gpu: nvgpu: gr: misra 12.1 fixes

MISRA Advisory Rule states that the precedence of operators within
expressions should be made explicit.

This change removes the Advisory Rule 12.1 violations from gr code.

Jira NVGPU-3178

Change-Id: I99a60f60f6edcc2acb7343c66d1c4c79752d4acb
Signed-off-by: Scott Long <scottl@nvidia.com>
Reviewed-on: http://git-master.nvidia.com/r/c/linux-nvgpu/+/2276774
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Scott Long
2020-01-09 11:08:49 -08:00
committed by Alex Waterman
parent dfc0f3342a
commit 7378e16778
5 changed files with 24 additions and 24 deletions

View File

@@ -156,15 +156,15 @@ static int nvgpu_gr_global_ctx_buffer_alloc_vpr(struct gk20a *g,
static bool nvgpu_gr_global_ctx_buffer_sizes_are_valid(struct gk20a *g, static bool nvgpu_gr_global_ctx_buffer_sizes_are_valid(struct gk20a *g,
struct nvgpu_gr_global_ctx_buffer_desc *desc) struct nvgpu_gr_global_ctx_buffer_desc *desc)
{ {
if (desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR].size == 0U || if ((desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR].size == 0U) ||
desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL].size == 0U || (desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL].size == 0U) ||
desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE].size == 0U || (desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE].size == 0U) ||
#ifdef CONFIG_NVGPU_VPR #ifdef CONFIG_NVGPU_VPR
desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR].size == 0U || (desc[NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR].size == 0U) ||
desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR].size == 0U || (desc[NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR].size == 0U) ||
desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR].size == 0U || (desc[NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR].size == 0U) ||
#endif #endif
desc[NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP].size == 0U) { (desc[NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP].size == 0U)) {
return false; return false;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -391,7 +391,7 @@ static int nvgpu_gr_init_ctx_state(struct gk20a *g)
{ {
int err = 0; int err = 0;
if (g->gr->golden_image != NULL && if ((g->gr->golden_image != NULL) &&
nvgpu_gr_obj_ctx_is_golden_image_ready(g->gr->golden_image)) { nvgpu_gr_obj_ctx_is_golden_image_ready(g->gr->golden_image)) {
return err; return err;
} }
@@ -452,7 +452,7 @@ static int gr_init_ecc_init(struct gk20a *g)
{ {
int err = 0; int err = 0;
if (g->ops.gr.ecc.gpc_tpc_ecc_init != NULL && !g->ecc.initialized) { if ((g->ops.gr.ecc.gpc_tpc_ecc_init != NULL) && !g->ecc.initialized) {
err = g->ops.gr.ecc.gpc_tpc_ecc_init(g); err = g->ops.gr.ecc.gpc_tpc_ecc_init(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to init gr gpc/tpc ecc"); nvgpu_err(g, "failed to init gr gpc/tpc ecc");
@@ -611,7 +611,7 @@ int nvgpu_gr_prepare_sw(struct gk20a *g)
* FECS ECC errors during FECS load need to be handled and reported * FECS ECC errors during FECS load need to be handled and reported
* using the ECC counters. * using the ECC counters.
*/ */
if (g->ops.gr.ecc.fecs_ecc_init != NULL && !g->ecc.initialized) { if ((g->ops.gr.ecc.fecs_ecc_init != NULL) && !g->ecc.initialized) {
err = g->ops.gr.ecc.fecs_ecc_init(g); err = g->ops.gr.ecc.fecs_ecc_init(g);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "failed to init gr fecs ecc"); nvgpu_err(g, "failed to init gr fecs ecc");

View File

@@ -66,12 +66,12 @@ static void gr_config_init_gpc_skip_mask(struct nvgpu_gr_config *config,
} }
pes_tpc_cnt = nvgpu_safe_add_u32( pes_tpc_cnt = nvgpu_safe_add_u32(
config->pes_tpc_count[0][gpc_index], config->pes_tpc_count[0][gpc_index],
config->pes_tpc_count[1][gpc_index]); config->pes_tpc_count[1][gpc_index]);
pes_heavy_index = pes_heavy_index =
config->pes_tpc_count[0][gpc_index] > (config->pes_tpc_count[0][gpc_index] >
config->pes_tpc_count[1][gpc_index] ? 0U : 1U; config->pes_tpc_count[1][gpc_index]) ? 0U : 1U;
if ((pes_tpc_cnt == 5U) || ((pes_tpc_cnt == 4U) && if ((pes_tpc_cnt == 5U) || ((pes_tpc_cnt == 4U) &&
(config->pes_tpc_count[0][gpc_index] != (config->pes_tpc_count[0][gpc_index] !=

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -277,7 +277,7 @@ static void nvgpu_gr_falcon_copy_ctxsw_ucode_segments(
/* compute a "checksum" for the boot binary to detect its version */ /* compute a "checksum" for the boot binary to detect its version */
segments->boot_signature = 0; segments->boot_signature = 0;
for (i = 0; i < segments->boot.size / sizeof(u32); i++) { for (i = 0; i < (segments->boot.size / sizeof(u32)); i++) {
segments->boot_signature = nvgpu_gr_checksum_u32( segments->boot_signature = nvgpu_gr_checksum_u32(
segments->boot_signature, bootimage[i]); segments->boot_signature, bootimage[i]);
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -188,7 +188,7 @@ static int gr_intr_handle_illegal_method(struct gk20a *g,
static void gr_intr_handle_class_error(struct gk20a *g, static void gr_intr_handle_class_error(struct gk20a *g,
struct nvgpu_gr_isr_data *isr_data) struct nvgpu_gr_isr_data *isr_data)
{ {
u32 chid = isr_data->ch != NULL ? u32 chid = (isr_data->ch != NULL) ?
isr_data->ch->chid : NVGPU_INVALID_CHANNEL_ID; isr_data->ch->chid : NVGPU_INVALID_CHANNEL_ID;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -214,7 +214,7 @@ static void gr_intr_report_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
} }
ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid); ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid);
chid = ch != NULL ? ch->chid : NVGPU_INVALID_CHANNEL_ID; chid = (ch != NULL) ? ch->chid : NVGPU_INVALID_CHANNEL_ID;
if (ch != NULL) { if (ch != NULL) {
nvgpu_channel_put(ch); nvgpu_channel_put(ch);
} }
@@ -329,7 +329,7 @@ void nvgpu_gr_intr_report_exception(struct gk20a *g, u32 inst,
if (curr_ctx != 0U) { if (curr_ctx != 0U) {
ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid); ch = nvgpu_gr_intr_get_channel_from_ctx(g, curr_ctx, &tsgid);
} }
chid = ch != NULL ? ch->chid : NVGPU_INVALID_CHANNEL_ID; chid = (ch != NULL) ? ch->chid : NVGPU_INVALID_CHANNEL_ID;
if (ch != NULL) { if (ch != NULL) {
nvgpu_channel_put(ch); nvgpu_channel_put(ch);
} }
@@ -519,7 +519,7 @@ int nvgpu_gr_intr_handle_fecs_error(struct gk20a *g, struct nvgpu_channel *ch,
u32 gr_fecs_intr, mailbox_value; u32 gr_fecs_intr, mailbox_value;
int ret = 0; int ret = 0;
struct nvgpu_fecs_host_intr_status fecs_host_intr; struct nvgpu_fecs_host_intr_status fecs_host_intr;
u32 chid = isr_data->ch != NULL ? u32 chid = (isr_data->ch != NULL) ?
isr_data->ch->chid : NVGPU_INVALID_CHANNEL_ID; isr_data->ch->chid : NVGPU_INVALID_CHANNEL_ID;
u32 mailbox_id = NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX6; u32 mailbox_id = NVGPU_GR_FALCON_FECS_CTXSW_MAILBOX6;
@@ -896,7 +896,7 @@ static u32 gr_intr_handle_error_interrupts(struct gk20a *g,
/* this one happens if someone tries to hit a non-whitelisted /* this one happens if someone tries to hit a non-whitelisted
* register using set_falcon[4] */ * register using set_falcon[4] */
if (intr_info->fw_method != 0U) { if (intr_info->fw_method != 0U) {
u32 ch_id = isr_data->ch != NULL ? u32 ch_id = (isr_data->ch != NULL) ?
isr_data->ch->chid : NVGPU_INVALID_CHANNEL_ID; isr_data->ch->chid : NVGPU_INVALID_CHANNEL_ID;
nvgpu_err(g, nvgpu_err(g,
"firmware method 0x%08x, offset 0x%08x for channel %u", "firmware method 0x%08x, offset 0x%08x for channel %u",
@@ -938,7 +938,7 @@ static struct nvgpu_tsg *gr_intr_get_channel_from_ctx(struct gk20a *g,
ch = nvgpu_gr_intr_get_channel_from_ctx(g, isr_data->curr_ctx, &tsgid); ch = nvgpu_gr_intr_get_channel_from_ctx(g, isr_data->curr_ctx, &tsgid);
isr_data->ch = ch; isr_data->ch = ch;
channel_id = ch != NULL ? ch->chid : NVGPU_INVALID_CHANNEL_ID; channel_id = (ch != NULL) ? ch->chid : NVGPU_INVALID_CHANNEL_ID;
if (ch == NULL) { if (ch == NULL) {
nvgpu_err(g, nvgpu_err(g,