mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: gr: fix MISRA 10.3 violations
MISRA Rule 10.3 prohibits assigning objects of different essential or narrower type. This fixes MISRA 10.3 violations in the gr unit. JIRA NVGPU-3115 Change-Id: I9817d74eb927f6e52a13d31114e2c579fd65dd32 Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2094443 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
257ffe9c75
commit
e3f5e6c271
@@ -237,7 +237,7 @@ int nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
|
||||
((char *)virtual_address + offset);
|
||||
|
||||
min_element_size =
|
||||
(sh_hdr->operation == OP_END ?
|
||||
U32(sh_hdr->operation == OP_END ?
|
||||
sizeof(struct share_buffer_head) :
|
||||
sizeof(struct gk20a_cyclestate_buffer_elem));
|
||||
|
||||
@@ -248,7 +248,7 @@ int nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
|
||||
nvgpu_err(g,
|
||||
"bad cyclestate buffer header size at offset 0x%x",
|
||||
offset);
|
||||
sh_hdr->failed = true;
|
||||
sh_hdr->failed = U32(true);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -273,7 +273,8 @@ int nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
|
||||
"invalid cycletstats op offset: 0x%x",
|
||||
op_elem->offset_bar0);
|
||||
|
||||
sh_hdr->failed = exit = true;
|
||||
exit = true;
|
||||
sh_hdr->failed = U32(exit);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -314,7 +315,7 @@ int nvgpu_gr_intr_handle_notify_pending(struct gk20a *g,
|
||||
exit = true;
|
||||
break;
|
||||
}
|
||||
sh_hdr->completed = true;
|
||||
sh_hdr->completed = U32(true);
|
||||
offset += sh_hdr->size;
|
||||
}
|
||||
nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
|
||||
|
||||
@@ -393,7 +393,7 @@ int nvgpu_gr_obj_ctx_alloc_golden_ctx_image(struct gk20a *g,
|
||||
struct nvgpu_mem *inst_block)
|
||||
{
|
||||
u32 i;
|
||||
u32 size;
|
||||
u64 size;
|
||||
struct nvgpu_mem *gr_mem;
|
||||
int err = 0;
|
||||
struct netlist_aiv_list *sw_ctx_load = &g->netlist_vars->sw_ctx_load;
|
||||
@@ -544,13 +544,14 @@ static int nvgpu_gr_obj_ctx_gr_ctx_alloc(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx_desc *gr_ctx_desc, struct nvgpu_gr_ctx *gr_ctx,
|
||||
struct vm_gk20a *vm)
|
||||
{
|
||||
u32 size;
|
||||
u64 size;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
size = nvgpu_gr_obj_ctx_get_golden_image_size(golden_image);
|
||||
nvgpu_gr_ctx_set_size(gr_ctx_desc, NVGPU_GR_CTX_CTX, size);
|
||||
nvgpu_assert(size <= U64(U32_MAX));
|
||||
nvgpu_gr_ctx_set_size(gr_ctx_desc, NVGPU_GR_CTX_CTX, U32(size));
|
||||
|
||||
err = nvgpu_gr_ctx_alloc(g, gr_ctx, gr_ctx_desc, vm);
|
||||
if (err != 0) {
|
||||
|
||||
@@ -859,9 +859,10 @@ void gv11b_gr_init_commit_ctxsw_spill(struct gk20a *g,
|
||||
|
||||
size /= gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
|
||||
|
||||
nvgpu_assert(u64_hi32(addr) == 0U);
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx,
|
||||
gr_gpc0_swdx_rm_spill_buffer_addr_r(),
|
||||
gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(addr),
|
||||
gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(U32(addr)),
|
||||
patch);
|
||||
nvgpu_gr_ctx_patch_write(g, gr_ctx,
|
||||
gr_gpc0_swdx_rm_spill_buffer_size_r(),
|
||||
|
||||
Reference in New Issue
Block a user