gpu: nvgpu: gv11b: fix MISRA 10.3 violations

MISRA Rule 10.3 prohibits assigning objects of different essential or
narrower type. This fixes MISRA 10.3 violations in the gv11b unit.

JIRA NVGPU-3110

Change-Id: I6a5d7648473b35acea1417d86c402b83fc600882
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2093653
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-04-08 15:40:51 -04:00
committed by mobile promotions
parent cd1254d524
commit 35e02c6d29
3 changed files with 25 additions and 20 deletions

View File

@@ -60,7 +60,7 @@ u32 gv11b_ce_get_num_pce(struct gk20a *g)
u32 num_pce; u32 num_pce;
u32 ce_pce_map = gk20a_readl(g, ce_pce_map_r()); u32 ce_pce_map = gk20a_readl(g, ce_pce_map_r());
num_pce = hweight32(ce_pce_map); num_pce = U32(hweight32(ce_pce_map));
nvgpu_log_info(g, "num PCE: %d", num_pce); nvgpu_log_info(g, "num PCE: %d", num_pce);
return num_pce; return num_pce;
} }

View File

@@ -107,7 +107,7 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
u32 pbdma_id) u32 pbdma_id)
{ {
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
unsigned long delay = POLL_DELAY_MIN_US; /* in micro seconds */ u32 delay = POLL_DELAY_MIN_US; /* in micro seconds */
int ret; int ret;
unsigned int loop_count = 0; unsigned int loop_count = 0;
struct nvgpu_pbdma_status_info pbdma_status; struct nvgpu_pbdma_status_info pbdma_status;
@@ -181,9 +181,8 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
break; break;
} }
nvgpu_usleep_range(delay, delay * 2UL); nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(unsigned long, delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0); } while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) { if (ret != 0) {
@@ -198,7 +197,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
u32 act_eng_id, u32 *reset_eng_bitmask) u32 act_eng_id, u32 *reset_eng_bitmask)
{ {
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
unsigned long delay = POLL_DELAY_MIN_US; /* in micro seconds */ u32 delay = POLL_DELAY_MIN_US; /* in micro seconds */
u32 eng_stat; u32 eng_stat;
u32 ctx_stat; u32 ctx_stat;
int ret; int ret;
@@ -315,9 +314,8 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
ret = 0; ret = 0;
break; break;
} }
nvgpu_usleep_range(delay, delay * 2UL); nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(unsigned long, delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0); } while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) { if (ret != 0) {
@@ -434,8 +432,9 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
unsigned long runlist_served_pbdmas; unsigned long runlist_served_pbdmas;
unsigned long runlist_served_engines; unsigned long runlist_served_engines;
unsigned long pbdma_id; unsigned long bit;
unsigned long act_eng_id; u32 pbdma_id;
u32 act_eng_id;
u32 runlist_id; u32 runlist_id;
int ret = 0; int ret = 0;
u32 tsgid; u32 tsgid;
@@ -453,13 +452,15 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask; runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask;
runlist_served_engines = f->runlist_info[runlist_id]->eng_bitmask; runlist_served_engines = f->runlist_info[runlist_id]->eng_bitmask;
for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) { for_each_set_bit(bit, &runlist_served_pbdmas, f->num_pbdma) {
pbdma_id = U32(bit);
ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id); ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id);
} }
f->runlist_info[runlist_id]->reset_eng_bitmask = 0; f->runlist_info[runlist_id]->reset_eng_bitmask = 0;
for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) { for_each_set_bit(bit, &runlist_served_engines, f->max_engines) {
act_eng_id = U32(bit);
ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
&f->runlist_info[runlist_id]->reset_eng_bitmask); &f->runlist_info[runlist_id]->reset_eng_bitmask);
} }
@@ -695,9 +696,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
{ {
struct tsg_gk20a *tsg = NULL; struct tsg_gk20a *tsg = NULL;
u32 runlists_mask, rlid, i; u32 runlists_mask, rlid, i;
unsigned long pbdma_id; unsigned long bit;
u32 pbdma_id;
struct fifo_runlist_info_gk20a *runlist = NULL; struct fifo_runlist_info_gk20a *runlist = NULL;
unsigned long engine_id; u32 engine_id;
u32 client_type = ~U32(0U); u32 client_type = ~U32(0U);
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
u32 runlist_id = FIFO_INVAL_RUNLIST_ID; u32 runlist_id = FIFO_INVAL_RUNLIST_ID;
@@ -805,8 +807,8 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
if (tsg != NULL) { if (tsg != NULL) {
rlid = f->tsg[id].runlist_id; rlid = f->tsg[id].runlist_id;
runlist_served_pbdmas = f->runlist_info[rlid]->pbdma_bitmask; runlist_served_pbdmas = f->runlist_info[rlid]->pbdma_bitmask;
for_each_set_bit(pbdma_id, &runlist_served_pbdmas, for_each_set_bit(bit, &runlist_served_pbdmas, f->num_pbdma) {
f->num_pbdma) { pbdma_id = U32(bit);
/* /*
* If pbdma preempt fails the only option is to reset * If pbdma preempt fails the only option is to reset
* GPU. Any sort of hang indicates the entire GPUs * GPU. Any sort of hang indicates the entire GPUs
@@ -836,8 +838,9 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
unsigned long __reset_eng_bitmask = unsigned long __reset_eng_bitmask =
runlist->reset_eng_bitmask; runlist->reset_eng_bitmask;
for_each_set_bit(engine_id, &__reset_eng_bitmask, for_each_set_bit(bit, &__reset_eng_bitmask,
g->fifo.max_engines) { g->fifo.max_engines) {
engine_id = U32(bit);
if ((tsg != NULL) && if ((tsg != NULL) &&
gk20a_fifo_should_defer_engine_reset(g, gk20a_fifo_should_defer_engine_reset(g,
engine_id, client_type, false)) { engine_id, client_type, false)) {

View File

@@ -1943,7 +1943,8 @@ u64 gv11b_gr_get_sm_hww_warp_esr_pc(struct gk20a *g, u32 offset)
int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
struct channel_gk20a *fault_ch) struct channel_gk20a *fault_ch)
{ {
int sm_id; int ret = 0;
u32 sm_id;
u32 offset, sm_per_tpc, tpc_id; u32 offset, sm_per_tpc, tpc_id;
u32 gpc_offset, gpc_tpc_offset; u32 gpc_offset, gpc_tpc_offset;
struct nvgpu_tsg_sm_error_state *sm_error_states = NULL; struct nvgpu_tsg_sm_error_state *sm_error_states = NULL;
@@ -1966,6 +1967,7 @@ int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
if (tsg == NULL) { if (tsg == NULL) {
nvgpu_err(g, "no valid tsg"); nvgpu_err(g, "no valid tsg");
ret = -EINVAL;
goto record_fail; goto record_fail;
} }
@@ -1975,7 +1977,7 @@ int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
record_fail: record_fail:
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
return sm_id; return ret;
} }
void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g) void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g)