mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: update gv11b_gating_reglist.c with CERTC fixes
Fix CERT-C INT-C violations by using safe cast operations. Corresponding changes added to gen_gating_reglist.c in the tools. This patch autogene- rates gv11b_gating_reglist.c. JIRA NVGPU-4138 Change-Id: I674356281f909f5f48c055053ed0fcbdd0c8d5fe Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2211381 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: Dinesh T <dt@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
dac79ce2f9
commit
aeea8b128f
@@ -303,7 +303,7 @@ void gv11b_slcg_bus_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_bus_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_bus);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_bus));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_bus_get_gating_prod(void)
|
||||
@@ -330,7 +330,7 @@ void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_ce2_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_ce2);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_ce2));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_ce2_get_gating_prod(void)
|
||||
@@ -357,7 +357,7 @@ void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_chiplet_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_chiplet);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_chiplet));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_chiplet_get_gating_prod(void)
|
||||
@@ -401,7 +401,7 @@ void gv11b_slcg_fb_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_fb_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_fb);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_fb));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_fb_get_gating_prod(void)
|
||||
@@ -428,7 +428,7 @@ void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_fifo_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_fifo);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_fifo));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_fifo_get_gating_prod(void)
|
||||
@@ -455,7 +455,7 @@ void gv11b_slcg_gr_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_gr_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_gr);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_gr));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_gr_get_gating_prod(void)
|
||||
@@ -482,7 +482,7 @@ void gv11b_slcg_ltc_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_ltc_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_ltc);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_ltc));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_ltc_get_gating_prod(void)
|
||||
@@ -509,7 +509,7 @@ void gv11b_slcg_perf_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_perf_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_perf);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_perf));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_perf_get_gating_prod(void)
|
||||
@@ -536,7 +536,7 @@ void gv11b_slcg_priring_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_priring_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_priring);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_priring));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_priring_get_gating_prod(void)
|
||||
@@ -563,7 +563,7 @@ void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_pwr_csb_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_pwr_csb);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_pwr_csb));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_pwr_csb_get_gating_prod(void)
|
||||
@@ -590,7 +590,7 @@ void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_pmu_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_pmu);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_pmu));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_pmu_get_gating_prod(void)
|
||||
@@ -617,7 +617,7 @@ void gv11b_slcg_therm_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_therm_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_therm);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_therm));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_therm_get_gating_prod(void)
|
||||
@@ -644,7 +644,7 @@ void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_xbar_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_xbar);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_xbar));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_xbar_get_gating_prod(void)
|
||||
@@ -671,7 +671,7 @@ void gv11b_slcg_hshub_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_slcg_hshub_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_slcg_hshub);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_slcg_hshub));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_slcg_hshub_get_gating_prod(void)
|
||||
@@ -698,7 +698,7 @@ void gv11b_blcg_bus_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_bus_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_bus);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_bus));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_bus_get_gating_prod(void)
|
||||
@@ -725,7 +725,7 @@ void gv11b_blcg_ce_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_ce_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_ce);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_ce));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_ce_get_gating_prod(void)
|
||||
@@ -752,7 +752,7 @@ void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_ctxsw_firmware_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_ctxsw_firmware);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_ctxsw_firmware));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_ctxsw_firmware_get_gating_prod(void)
|
||||
@@ -779,7 +779,7 @@ void gv11b_blcg_fb_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_fb_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_fb);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_fb));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_fb_get_gating_prod(void)
|
||||
@@ -806,7 +806,7 @@ void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_fifo_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_fifo);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_fifo));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_fifo_get_gating_prod(void)
|
||||
@@ -833,7 +833,7 @@ void gv11b_blcg_gr_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_gr_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_gr);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_gr));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_gr_get_gating_prod(void)
|
||||
@@ -860,7 +860,7 @@ void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_ltc_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_ltc);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_ltc));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_ltc_get_gating_prod(void)
|
||||
@@ -887,7 +887,7 @@ void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_pwr_csb_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_pwr_csb);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_pwr_csb));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_pwr_csb_get_gating_prod(void)
|
||||
@@ -914,7 +914,7 @@ void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_pmu_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_pmu);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_pmu));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_pmu_get_gating_prod(void)
|
||||
@@ -941,7 +941,7 @@ void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_xbar_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_xbar);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_xbar));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_xbar_get_gating_prod(void)
|
||||
@@ -968,7 +968,7 @@ void gv11b_blcg_hshub_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gv11b_blcg_hshub_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_blcg_hshub);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_blcg_hshub));
|
||||
}
|
||||
|
||||
const struct gating_desc *gv11b_blcg_hshub_get_gating_prod(void)
|
||||
@@ -995,7 +995,7 @@ void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g,
|
||||
|
||||
u32 gr_gv11b_pg_gr_gating_prod_size(void)
|
||||
{
|
||||
return ARRAY_SIZE(gv11b_pg_gr);
|
||||
return nvgpu_safe_cast_u64_to_u32(ARRAY_SIZE(gv11b_pg_gr));
|
||||
}
|
||||
|
||||
const struct gating_desc *gr_gv11b_pg_gr_get_gating_prod(void)
|
||||
|
||||
Reference in New Issue
Block a user