mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Fix MISRA Rule 10.3 errors in gr.init
Fix MISRA Rule 10.3 violations in gr.init unit Implicit conversion from essential type "unsinged 64-bit int" to narrower essential type "unsigned 32-bit int" Jira NVGPU-3389 Change-Id: Ibf294f515d10d1dd7e26f2730f8b58ecb82285fb Signed-off-by: Vinod G <vinodg@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2115013 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
21f04a94af
commit
9e63b64cd0
@@ -867,25 +867,28 @@ u32 gm20b_gr_init_get_global_ctx_pagepool_buffer_size(struct gk20a *g)
|
||||
}
|
||||
|
||||
void gm20b_gr_init_commit_global_bundle_cb(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u64 size, bool patch)
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch)
|
||||
{
|
||||
u32 data;
|
||||
u32 cb_addr;
|
||||
u32 bundle_cb_token_limit = g->ops.gr.init.get_bundle_cb_token_limit(g);
|
||||
|
||||
addr = addr >> U64(gr_scc_bundle_cb_base_addr_39_8_align_bits_v());
|
||||
addr = addr >> gr_scc_bundle_cb_base_addr_39_8_align_bits_v();
|
||||
|
||||
nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %llu",
|
||||
nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %u",
|
||||
addr, size);
|
||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||
|
||||
cb_addr = (u32)addr;
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_bundle_cb_base_r(),
|
||||
gr_scc_bundle_cb_base_addr_39_8_f(addr), patch);
|
||||
gr_scc_bundle_cb_base_addr_39_8_f(cb_addr), patch);
|
||||
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_bundle_cb_size_r(),
|
||||
gr_scc_bundle_cb_size_div_256b_f(size) |
|
||||
gr_scc_bundle_cb_size_valid_true_f(), patch);
|
||||
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_swdx_bundle_cb_base_r(),
|
||||
gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(addr), patch);
|
||||
gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(cb_addr), patch);
|
||||
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_swdx_bundle_cb_size_r(),
|
||||
gr_gpcs_swdx_bundle_cb_size_div_256b_f(size) |
|
||||
@@ -958,23 +961,24 @@ void gm20b_gr_init_commit_global_attrib_cb(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u32 tpc_count, u32 max_tpc, u64 addr,
|
||||
bool patch)
|
||||
{
|
||||
addr = (u64_lo32(addr) >>
|
||||
gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()) |
|
||||
(u64_hi32(addr) <<
|
||||
(32U - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
|
||||
u32 cb_addr;
|
||||
|
||||
addr = addr >> gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v();
|
||||
|
||||
nvgpu_log_info(g, "attrib cb addr : 0x%016llx", addr);
|
||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||
|
||||
cb_addr = (u32)addr;
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_setup_attrib_cb_base_r(),
|
||||
gr_gpcs_setup_attrib_cb_base_addr_39_12_f(addr) |
|
||||
gr_gpcs_setup_attrib_cb_base_addr_39_12_f(cb_addr) |
|
||||
gr_gpcs_setup_attrib_cb_base_valid_true_f(), patch);
|
||||
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(),
|
||||
gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(addr) |
|
||||
gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(cb_addr) |
|
||||
gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(), patch);
|
||||
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(),
|
||||
gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(addr) |
|
||||
gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(cb_addr) |
|
||||
gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(), patch);
|
||||
}
|
||||
|
||||
|
||||
@@ -82,7 +82,7 @@ u32 gm20b_gr_init_get_global_ctx_cb_buffer_size(struct gk20a *g);
|
||||
u32 gm20b_gr_init_get_global_ctx_pagepool_buffer_size(struct gk20a *g);
|
||||
|
||||
void gm20b_gr_init_commit_global_bundle_cb(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u64 size, bool patch);
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch);
|
||||
u32 gm20b_gr_init_pagepool_default_size(struct gk20a *g);
|
||||
void gm20b_gr_init_commit_global_pagepool(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, size_t size, bool patch,
|
||||
|
||||
@@ -298,30 +298,33 @@ u32 gp10b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count,
|
||||
}
|
||||
|
||||
void gp10b_gr_init_commit_global_bundle_cb(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u64 size, bool patch)
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch)
|
||||
{
|
||||
u32 data;
|
||||
u32 cb_addr;
|
||||
u32 bundle_cb_token_limit = g->ops.gr.init.get_bundle_cb_token_limit(g);
|
||||
|
||||
addr = addr >> U64(gr_scc_bundle_cb_base_addr_39_8_align_bits_v());
|
||||
addr = addr >> gr_scc_bundle_cb_base_addr_39_8_align_bits_v();
|
||||
|
||||
nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %llu",
|
||||
nvgpu_log_info(g, "bundle cb addr : 0x%016llx, size : %u",
|
||||
addr, size);
|
||||
|
||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||
|
||||
cb_addr = (u32)addr;
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_bundle_cb_base_r(),
|
||||
gr_scc_bundle_cb_base_addr_39_8_f((u32)addr), patch);
|
||||
gr_scc_bundle_cb_base_addr_39_8_f(cb_addr), patch);
|
||||
|
||||
nvgpu_assert(size <= U32_MAX);
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_bundle_cb_size_r(),
|
||||
gr_scc_bundle_cb_size_div_256b_f((u32)size) |
|
||||
gr_scc_bundle_cb_size_div_256b_f(size) |
|
||||
gr_scc_bundle_cb_size_valid_true_f(), patch);
|
||||
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_swdx_bundle_cb_base_r(),
|
||||
gr_gpcs_swdx_bundle_cb_base_addr_39_8_f((u32)addr), patch);
|
||||
gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(cb_addr), patch);
|
||||
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_gpcs_swdx_bundle_cb_size_r(),
|
||||
gr_gpcs_swdx_bundle_cb_size_div_256b_f((u32)size) |
|
||||
gr_gpcs_swdx_bundle_cb_size_div_256b_f(size) |
|
||||
gr_gpcs_swdx_bundle_cb_size_valid_true_f(), patch);
|
||||
|
||||
/* data for state_limit */
|
||||
@@ -559,16 +562,14 @@ u32 gp10b_gr_init_get_ctx_attrib_cb_size(struct gk20a *g, u32 betacb_size,
|
||||
void gp10b_gr_init_commit_ctxsw_spill(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch)
|
||||
{
|
||||
addr = (u64_lo32(addr) >>
|
||||
gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v()) |
|
||||
(u64_hi32(addr) <<
|
||||
(32U - gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v()));
|
||||
addr = addr >> gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v();
|
||||
|
||||
size /= gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
|
||||
size /= gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
|
||||
|
||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx,
|
||||
gr_gpc0_swdx_rm_spill_buffer_addr_r(),
|
||||
gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(addr),
|
||||
gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f((u32)addr),
|
||||
patch);
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx,
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_r(),
|
||||
|
||||
@@ -48,7 +48,7 @@ u32 gp10b_gr_init_get_global_attr_cb_size(struct gk20a *g, u32 tpc_count,
|
||||
u32 max_tpc);
|
||||
|
||||
void gp10b_gr_init_commit_global_bundle_cb(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u64 size, bool patch);
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, u32 size, bool patch);
|
||||
u32 gp10b_gr_init_pagepool_default_size(struct gk20a *g);
|
||||
void gp10b_gr_init_commit_global_pagepool(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, u64 addr, size_t size, bool patch,
|
||||
|
||||
@@ -40,7 +40,7 @@ u32 tu104_gr_init_get_rtv_cb_size(struct gk20a *g)
|
||||
|
||||
static void tu104_gr_init_patch_rtv_cb(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx,
|
||||
u64 addr, u32 size, u32 gfxpAddSize, bool patch)
|
||||
u32 addr, u32 size, u32 gfxpAddSize, bool patch)
|
||||
{
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx, gr_scc_rm_rtv_cb_base_r(),
|
||||
gr_scc_rm_rtv_cb_base_addr_39_8_f(addr), patch);
|
||||
@@ -56,21 +56,19 @@ static void tu104_gr_init_patch_rtv_cb(struct gk20a *g,
|
||||
void tu104_gr_init_commit_rtv_cb(struct gk20a *g, u64 addr,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch)
|
||||
{
|
||||
u32 size;
|
||||
|
||||
addr = addr >> U64(gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f());
|
||||
size = (gr_scc_rm_rtv_cb_size_div_256b_default_f() +
|
||||
u32 size = (gr_scc_rm_rtv_cb_size_div_256b_default_f() +
|
||||
gr_scc_rm_rtv_cb_size_div_256b_db_adder_f());
|
||||
|
||||
tu104_gr_init_patch_rtv_cb(g, gr_ctx, addr, size, 0, patch);
|
||||
addr = addr >> gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f();
|
||||
|
||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||
tu104_gr_init_patch_rtv_cb(g, gr_ctx, (u32)addr, size, 0, patch);
|
||||
}
|
||||
|
||||
void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, bool patch)
|
||||
{
|
||||
u64 addr;
|
||||
u64 addr_lo;
|
||||
u64 addr_hi;
|
||||
u32 rtv_cb_size;
|
||||
u32 gfxp_addr_size;
|
||||
struct nvgpu_mem *buf_mem;
|
||||
@@ -85,14 +83,11 @@ void tu104_gr_init_commit_gfxp_rtv_cb(struct gk20a *g,
|
||||
|
||||
/* GFXP RTV circular buffer */
|
||||
buf_mem = nvgpu_gr_ctx_get_gfxp_rtvcb_ctxsw_buffer(gr_ctx);
|
||||
addr_lo = (u64)(u64_lo32(buf_mem->gpu_va) >>
|
||||
gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f());
|
||||
addr_hi = (u64)(buf_mem->gpu_va);
|
||||
addr = addr_lo |
|
||||
(addr_hi <<
|
||||
(32U - gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f()));
|
||||
addr = buf_mem->gpu_va >>
|
||||
gr_scc_rm_rtv_cb_base_addr_39_8_align_bits_f();
|
||||
|
||||
tu104_gr_init_patch_rtv_cb(g, gr_ctx, addr,
|
||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||
tu104_gr_init_patch_rtv_cb(g, gr_ctx, (u32)addr,
|
||||
rtv_cb_size, gfxp_addr_size, patch);
|
||||
}
|
||||
|
||||
|
||||
@@ -749,7 +749,7 @@ struct gpu_ops {
|
||||
u32 (*get_global_ctx_pagepool_buffer_size)(
|
||||
struct gk20a *g);
|
||||
void (*commit_global_bundle_cb)(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *ch_ctx, u64 addr, u64 size,
|
||||
struct nvgpu_gr_ctx *ch_ctx, u64 addr, u32 size,
|
||||
bool patch);
|
||||
u32 (*pagepool_default_size)(struct gk20a *g);
|
||||
void (*commit_global_pagepool)(struct gk20a *g,
|
||||
|
||||
Reference in New Issue
Block a user