mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: gr: fix misra 2.7 violations
Eliminate several Advisory Rule 2.7 violations in gr code. Advisory Rule 2.7 states that there should be no unused parameters in functions. Jira NVGPU-3178 Change-Id: I415023a297031884b2d1be667551de2a7d8f23ad Signed-off-by: Scott Long <scottl@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2174007 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Philip Elcan <pelcan@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
da19882f4d
commit
f6fc8a0540
@@ -646,7 +646,7 @@ int nvgpu_gr_fecs_trace_bind_channel(struct gk20a *g,
|
|||||||
GK20A_FECS_TRACE_NUM_RECORDS);
|
GK20A_FECS_TRACE_NUM_RECORDS);
|
||||||
|
|
||||||
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA) && subctx != NULL) {
|
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA) && subctx != NULL) {
|
||||||
mem = nvgpu_gr_subctx_get_ctx_header(g, subctx);
|
mem = nvgpu_gr_subctx_get_ctx_header(subctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
g->ops.gr.ctxsw_prog.set_ts_buffer_ptr(g, mem, addr, aperture_mask);
|
g->ops.gr.ctxsw_prog.set_ts_buffer_ptr(g, mem, addr, aperture_mask);
|
||||||
|
|||||||
@@ -54,8 +54,7 @@ static void gr_config_init_pes_tpc(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gr_config_init_gpc_skip_mask(struct gk20a *g,
|
static void gr_config_init_gpc_skip_mask(struct nvgpu_gr_config *config,
|
||||||
struct nvgpu_gr_config *config,
|
|
||||||
u32 gpc_index)
|
u32 gpc_index)
|
||||||
{
|
{
|
||||||
u32 pes_heavy_index;
|
u32 pes_heavy_index;
|
||||||
@@ -327,7 +326,7 @@ struct nvgpu_gr_config *nvgpu_gr_config_init(struct gk20a *g)
|
|||||||
config->ppc_count = nvgpu_safe_add_u32(config->ppc_count,
|
config->ppc_count = nvgpu_safe_add_u32(config->ppc_count,
|
||||||
config->gpc_ppc_count[gpc_index]);
|
config->gpc_ppc_count[gpc_index]);
|
||||||
|
|
||||||
gr_config_init_gpc_skip_mask(g, config, gpc_index);
|
gr_config_init_gpc_skip_mask(config, gpc_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
gr_config_log_info(g, config);
|
gr_config_log_info(g, config);
|
||||||
|
|||||||
@@ -720,7 +720,7 @@ void nvgpu_gr_intr_handle_semaphore_pending(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static u32 gr_intr_handle_exception_interrupts(struct gk20a *g,
|
static u32 gr_intr_handle_exception_interrupts(struct gk20a *g,
|
||||||
u32 gr_intr, u32 *clear_intr,
|
u32 *clear_intr,
|
||||||
struct nvgpu_tsg *tsg, u32 *global_esr,
|
struct nvgpu_tsg *tsg, u32 *global_esr,
|
||||||
struct nvgpu_gr_intr_info *intr_info,
|
struct nvgpu_gr_intr_info *intr_info,
|
||||||
struct nvgpu_gr_isr_data *isr_data)
|
struct nvgpu_gr_isr_data *isr_data)
|
||||||
@@ -949,8 +949,8 @@ int nvgpu_gr_intr_stall_isr(struct gk20a *g)
|
|||||||
need_reset |= gr_intr_handle_error_interrupts(g, gr_intr,
|
need_reset |= gr_intr_handle_error_interrupts(g, gr_intr,
|
||||||
&clear_intr, &intr_info, &isr_data);
|
&clear_intr, &intr_info, &isr_data);
|
||||||
|
|
||||||
need_reset |= gr_intr_handle_exception_interrupts(g, gr_intr,
|
need_reset |= gr_intr_handle_exception_interrupts(g, &clear_intr,
|
||||||
&clear_intr, tsg, &global_esr, &intr_info, &isr_data);
|
tsg, &global_esr, &intr_info, &isr_data);
|
||||||
|
|
||||||
if (need_reset != 0U) {
|
if (need_reset != 0U) {
|
||||||
nvgpu_rc_gr_fault(g, tsg, isr_data.ch);
|
nvgpu_rc_gr_fault(g, tsg, isr_data.ch);
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ void nvgpu_gr_obj_ctx_commit_inst(struct gk20a *g, struct nvgpu_mem *inst_block,
|
|||||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) {
|
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) {
|
||||||
nvgpu_gr_subctx_load_ctx_header(g, subctx, gr_ctx, gpu_va);
|
nvgpu_gr_subctx_load_ctx_header(g, subctx, gr_ctx, gpu_va);
|
||||||
|
|
||||||
ctxheader = nvgpu_gr_subctx_get_ctx_header(g, subctx);
|
ctxheader = nvgpu_gr_subctx_get_ctx_header(subctx);
|
||||||
nvgpu_gr_obj_ctx_commit_inst_gpu_va(g, inst_block,
|
nvgpu_gr_obj_ctx_commit_inst_gpu_va(g, inst_block,
|
||||||
ctxheader->gpu_va);
|
ctxheader->gpu_va);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -123,8 +123,7 @@ void nvgpu_gr_subctx_set_patch_ctx(struct gk20a *g,
|
|||||||
nvgpu_gr_ctx_get_patch_ctx_mem(gr_ctx)->gpu_va);
|
nvgpu_gr_ctx_get_patch_ctx_mem(gr_ctx)->gpu_va);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct gk20a *g,
|
struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct nvgpu_gr_subctx *subctx)
|
||||||
struct nvgpu_gr_subctx *subctx)
|
|
||||||
{
|
{
|
||||||
return &subctx->ctx_header;
|
return &subctx->ctx_header;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,8 +43,7 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
|
|||||||
void nvgpu_gr_subctx_set_patch_ctx(struct gk20a *g,
|
void nvgpu_gr_subctx_set_patch_ctx(struct gk20a *g,
|
||||||
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx);
|
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx);
|
||||||
|
|
||||||
struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct gk20a *g,
|
struct nvgpu_mem *nvgpu_gr_subctx_get_ctx_header(struct nvgpu_gr_subctx *subctx);
|
||||||
struct nvgpu_gr_subctx *subctx);
|
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_GRAPHICS
|
#ifdef CONFIG_NVGPU_GRAPHICS
|
||||||
void nvgpu_gr_subctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
|
void nvgpu_gr_subctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
|
||||||
|
|||||||
Reference in New Issue
Block a user