gpu: nvgpu: fix misra 2.7 violations

Advisory Rule 2.7 states that there should be no unused
parameters in functions.

This patch removes unused function parameters from the following:

 * nvgpu_channel_ctxsw_timeout_debug_dump_state()
 * nvgpu_channel_destroy()
 * nvgpu_tsg_destroy()
 * nvgpu_rc_pdbma_fault()

Jira NVGPU-3178

Change-Id: I12ad0d287fd7980533663a9776428ef5d4fd1fb9
Signed-off-by: Scott Long <scottl@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2176066
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Scott Long
2019-08-15 09:09:47 -07:00
committed by mobile promotions
parent 3bc6ea5fbd
commit 4277f65834
5 changed files with 11 additions and 14 deletions

View File

@@ -2251,8 +2251,8 @@ void nvgpu_channel_free_usermode_buffers(struct nvgpu_channel *c)
} }
} }
static bool nvgpu_channel_ctxsw_timeout_debug_dump_state(struct gk20a *g, static bool nvgpu_channel_ctxsw_timeout_debug_dump_state(
struct nvgpu_channel *ch) struct nvgpu_channel *ch)
{ {
bool verbose = false; bool verbose = false;
if (nvgpu_is_err_notifier_set(ch, if (nvgpu_is_err_notifier_set(ch,
@@ -2282,7 +2282,7 @@ bool nvgpu_channel_mark_error(struct gk20a *g, struct nvgpu_channel *ch)
{ {
bool verbose; bool verbose;
verbose = nvgpu_channel_ctxsw_timeout_debug_dump_state(g, ch); verbose = nvgpu_channel_ctxsw_timeout_debug_dump_state(ch);
nvgpu_channel_set_has_timedout_and_wakeup_wqs(g, ch); nvgpu_channel_set_has_timedout_and_wakeup_wqs(g, ch);
return verbose; return verbose;
@@ -2375,7 +2375,7 @@ void nvgpu_channel_deterministic_unidle(struct gk20a *g)
nvgpu_rwsem_up_write(&g->deterministic_busy); nvgpu_rwsem_up_write(&g->deterministic_busy);
} }
static void nvgpu_channel_destroy(struct gk20a *g, struct nvgpu_channel *c) static void nvgpu_channel_destroy(struct nvgpu_channel *c)
{ {
nvgpu_mutex_destroy(&c->ioctl_lock); nvgpu_mutex_destroy(&c->ioctl_lock);
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT #ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
@@ -2409,7 +2409,7 @@ void nvgpu_channel_cleanup_sw(struct gk20a *g)
nvgpu_channel_kill(ch); nvgpu_channel_kill(ch);
} }
nvgpu_channel_destroy(g, ch); nvgpu_channel_destroy(ch);
} }
nvgpu_vfree(g, f->channel); nvgpu_vfree(g, f->channel);
@@ -2498,7 +2498,7 @@ clean_up:
for (i = 0; i < chid; i++) { for (i = 0; i < chid; i++) {
struct nvgpu_channel *ch = &f->channel[i]; struct nvgpu_channel *ch = &f->channel[i];
nvgpu_channel_destroy(g, ch); nvgpu_channel_destroy(ch);
} }
nvgpu_vfree(g, f->channel); nvgpu_vfree(g, f->channel);
f->channel = NULL; f->channel = NULL;

View File

@@ -298,7 +298,7 @@ void nvgpu_tsg_unbind_channel_check_ctx_reload(struct nvgpu_tsg *tsg,
} }
} }
static void nvgpu_tsg_destroy(struct gk20a *g, struct nvgpu_tsg *tsg) static void nvgpu_tsg_destroy(struct nvgpu_tsg *tsg)
{ {
nvgpu_mutex_destroy(&tsg->event_id_list_lock); nvgpu_mutex_destroy(&tsg->event_id_list_lock);
} }
@@ -332,7 +332,7 @@ void nvgpu_tsg_cleanup_sw(struct gk20a *g)
for (tsgid = 0; tsgid < f->num_channels; tsgid++) { for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
struct nvgpu_tsg *tsg = &f->tsg[tsgid]; struct nvgpu_tsg *tsg = &f->tsg[tsgid];
nvgpu_tsg_destroy(g, tsg); nvgpu_tsg_destroy(tsg);
} }
nvgpu_vfree(g, f->tsg); nvgpu_vfree(g, f->tsg);

View File

@@ -84,8 +84,7 @@ void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
#endif #endif
} }
void nvgpu_rc_pbdma_fault(struct gk20a *g, struct nvgpu_fifo *f, void nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, u32 error_notifier,
u32 pbdma_id, u32 error_notifier,
struct nvgpu_pbdma_status_info *pbdma_status) struct nvgpu_pbdma_status_info *pbdma_status)
{ {
#ifdef CONFIG_NVGPU_RECOVERY #ifdef CONFIG_NVGPU_RECOVERY

View File

@@ -90,7 +90,6 @@ void gk20a_fifo_intr_handle_runlist_event(struct gk20a *g)
u32 gk20a_fifo_pbdma_isr(struct gk20a *g) u32 gk20a_fifo_pbdma_isr(struct gk20a *g)
{ {
struct nvgpu_fifo *f = &g->fifo;
u32 pbdma_id; u32 pbdma_id;
u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); u32 num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
u32 pbdma_pending_bitmask = nvgpu_readl(g, fifo_intr_pbdma_id_r()); u32 pbdma_pending_bitmask = nvgpu_readl(g, fifo_intr_pbdma_id_r());
@@ -105,7 +104,7 @@ u32 gk20a_fifo_pbdma_isr(struct gk20a *g)
recover = g->ops.pbdma.handle_intr(g, pbdma_id, recover = g->ops.pbdma.handle_intr(g, pbdma_id,
&error_notifier, &pbdma_status); &error_notifier, &pbdma_status);
if (recover) { if (recover) {
nvgpu_rc_pbdma_fault(g, f, pbdma_id, nvgpu_rc_pbdma_fault(g, pbdma_id,
error_notifier, &pbdma_status); error_notifier, &pbdma_status);
} }
} }

View File

@@ -46,8 +46,7 @@ struct nvgpu_pbdma_status_info;
void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask, void nvgpu_rc_ctxsw_timeout(struct gk20a *g, u32 eng_bitmask,
struct nvgpu_tsg *tsg, bool debug_dump); struct nvgpu_tsg *tsg, bool debug_dump);
void nvgpu_rc_pbdma_fault(struct gk20a *g, struct nvgpu_fifo *f, void nvgpu_rc_pbdma_fault(struct gk20a *g, u32 pbdma_id, u32 error_notifier,
u32 pbdma_id, u32 error_notifier,
struct nvgpu_pbdma_status_info *pbdma_status); struct nvgpu_pbdma_status_info *pbdma_status);
void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id); void nvgpu_rc_runlist_update(struct gk20a *g, u32 runlist_id);