nvgpu: gp106: MISRA 10.1 boolean fixes

Fix violations where a variable of type non-boolean is used as a
boolean in gpu/nvgpu/gp106.

JIRA NVGPU-646

Change-Id: I2c56f87b36c6144497a34438006933c34e381ccb
Signed-off-by: Amurthyreddy <amurthyreddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1815523
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Amurthyreddy
2018-09-10 16:49:38 +05:30
committed by Abdul Salam
parent 7f6c782ba0
commit 9b8185b261
7 changed files with 64 additions and 53 deletions

View File

@@ -123,7 +123,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
gp106_dbg_pmu(g, "requesting PMU ucode in gp106\n");
pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!pmu_fw) {
if (pmu_fw == NULL) {
nvgpu_err(g, "failed to load pmu ucode!!");
return -ENOENT;
}
@@ -133,14 +133,14 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
gp106_dbg_pmu(g, "requesting PMU ucode desc in GM20B\n");
pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!pmu_desc) {
if (pmu_desc == NULL) {
nvgpu_err(g, "failed to load pmu ucode desc!!");
err = -ENOENT;
goto release_img_fw;
}
pmu_sig = nvgpu_request_firmware(g, GM20B_PMU_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!pmu_sig) {
if (pmu_sig == NULL) {
nvgpu_err(g, "failed to load pmu sig!!");
err = -ENOENT;
goto release_desc;
@@ -156,7 +156,7 @@ int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
if (!lsf_desc) {
if (lsf_desc == NULL) {
err = -ENOMEM;
goto release_sig;
}
@@ -224,12 +224,12 @@ int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
nvgpu_err(g, "no support for GPUID %x", ver);
}
if (!fecs_sig) {
if (fecs_sig == NULL) {
nvgpu_err(g, "failed to load fecs sig");
return -ENOENT;
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
if (!lsf_desc) {
if (lsf_desc == NULL) {
err = -ENOMEM;
goto rel_sig;
}
@@ -328,12 +328,12 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
nvgpu_err(g, "no support for GPUID %x", ver);
}
if (!gpccs_sig) {
if (gpccs_sig == NULL) {
nvgpu_err(g, "failed to load gpccs sig");
return -ENOENT;
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
if (!lsf_desc) {
if (lsf_desc == NULL) {
err = -ENOMEM;
goto rel_sig;
}
@@ -539,7 +539,8 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
lsfm_discover_and_add_sub_wprs(g, plsfm);
}
if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) {
if ((plsfm->managed_flcn_cnt != 0U) &&
(g->acr.ucode_blob.cpu_va == NULL)) {
/* Generate WPR requirements*/
err = lsf_gen_wpr_requirements(g, plsfm);
if (err) {
@@ -567,10 +568,10 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
return err;
}
static u8 lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
static bool lsfm_falcon_disabled(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
u32 falcon_id)
{
return (plsfm->disable_mask >> falcon_id) & 0x1;
return ((plsfm->disable_mask >> falcon_id) & 0x1U) != 0U;
}
/* Discover all managed falcon ucode images */
@@ -614,7 +615,7 @@ int lsfm_discover_ucode_images(struct gk20a *g,
}
/*Free any ucode image resources if not managing this falcon*/
if (!(pmu->pmu_mode & PMU_LSFM_MANAGED)) {
if ((pmu->pmu_mode & PMU_LSFM_MANAGED) == 0U) {
gp106_dbg_pmu(g, "pmu is not LSFM managed\n");
lsfm_free_ucode_img_res(g, &ucode_img);
}
@@ -935,7 +936,7 @@ void lsfm_init_wpr_contents(struct gk20a *g,
/*If this falcon has a boot loader and related args,
* flush them.*/
if (!pnode->ucode_img.header) {
if (pnode->ucode_img.header == NULL) {
/*Populate gen bl and flush to memory*/
lsfm_fill_flcn_bl_gen_desc(g, pnode);
nvgpu_mem_wr_n(g, ucode,
@@ -1207,7 +1208,7 @@ int lsf_gen_wpr_requirements(struct gk20a *g,
the boot loader data. The host will then copy the loader desc
args to this space within the WPR region (before locking down)
and the HS bin will then copy them to DMEM 0 for the loader. */
if (!pnode->ucode_img.header) {
if (pnode->ucode_img.header == NULL) {
/* Track the size for LSB details filled in later
Note that at this point we don't know what kind of i
boot loader desc, so we just take the size of the

View File

@@ -77,7 +77,7 @@ static void upload_data(struct gk20a *g, u32 dst, u8 *src, u32 size, u8 port)
int gp106_bios_devinit(struct gk20a *g)
{
int err = 0;
int devinit_completed;
bool devinit_completed;
struct nvgpu_timeout timeout;
nvgpu_log_fn(g, " ");
@@ -115,12 +115,12 @@ int gp106_bios_devinit(struct gk20a *g)
PMU_BOOT_TIMEOUT_DEFAULT,
NVGPU_TIMER_RETRY_TIMER);
do {
devinit_completed = pwr_falcon_cpuctl_halt_intr_v(
gk20a_readl(g, pwr_falcon_cpuctl_r())) &&
top_scratch1_devinit_completed_v(
gk20a_readl(g, top_scratch1_r()));
devinit_completed = (pwr_falcon_cpuctl_halt_intr_v(
gk20a_readl(g, pwr_falcon_cpuctl_r())) != 0U) &&
(top_scratch1_devinit_completed_v(
gk20a_readl(g, top_scratch1_r())) != 0U);
nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
} while (!devinit_completed && !nvgpu_timeout_expired(&timeout));
} while (!devinit_completed && (nvgpu_timeout_expired(&timeout) == 0));
if (nvgpu_timeout_peek_expired(&timeout)) {
err = -ETIMEDOUT;
@@ -199,7 +199,7 @@ int gp106_bios_init(struct gk20a *g)
nvgpu_log_info(g, "reading bios from EEPROM");
g->bios.size = BIOS_SIZE;
g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE);
if (!g->bios.data) {
if (g->bios.data == NULL) {
return -ENOMEM;
}
@@ -243,7 +243,7 @@ int gp106_bios_init(struct gk20a *g)
}
if (nvgpu_is_enabled(g, NVGPU_PMU_RUN_PREOS) &&
g->ops.bios.preos) {
(g->ops.bios.preos != NULL)) {
err = g->ops.bios.preos(g);
if (err) {
nvgpu_err(g, "pre-os failed");

View File

@@ -63,20 +63,20 @@ int gp106_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
p5_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P5, clkwhich);
if (!p5_info) {
if (p5_info == NULL) {
return -EINVAL;
}
p0_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P0, clkwhich);
if (!p0_info) {
if (p0_info == NULL) {
return -EINVAL;
}
limit_min_mhz = p5_info->min_mhz;
/* WAR for DVCO min */
if (api_domain == CTRL_CLK_DOMAIN_GPC2CLK) {
if ((pfllobjs->max_min_freq_mhz) &&
if ((pfllobjs->max_min_freq_mhz != 0U) &&
(pfllobjs->max_min_freq_mhz >= limit_min_mhz)) {
limit_min_mhz = pfllobjs->max_min_freq_mhz + 1;
}
@@ -109,7 +109,7 @@ int gp106_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
p0_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P0, clkwhich);
if (!p0_info) {
if (p0_info == NULL) {
return -EINVAL;
}
@@ -133,7 +133,7 @@ int gp106_init_clk_arbiter(struct gk20a *g)
}
arb = nvgpu_kzalloc(g, sizeof(struct nvgpu_clk_arb));
if (!arb)
if (arb == NULL)
return -ENOMEM;
arb->clk_arb_events_supported = true;
@@ -146,13 +146,13 @@ int gp106_init_clk_arbiter(struct gk20a *g)
nvgpu_spinlock_init(&arb->requests_lock);
arb->mclk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
if (!arb->mclk_f_points) {
if (arb->mclk_f_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
arb->gpc2clk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
if (!arb->gpc2clk_f_points) {
if (arb->gpc2clk_f_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
@@ -164,7 +164,7 @@ int gp106_init_clk_arbiter(struct gk20a *g)
table->gpc2clk_points = nvgpu_kcalloc(g, MAX_F_POINTS,
sizeof(struct nvgpu_clk_vf_point));
if (!table->gpc2clk_points) {
if (table->gpc2clk_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
@@ -172,7 +172,7 @@ int gp106_init_clk_arbiter(struct gk20a *g)
table->mclk_points = nvgpu_kcalloc(g, MAX_F_POINTS,
sizeof(struct nvgpu_clk_vf_point));
if (!table->mclk_points) {
if (table->mclk_points == NULL) {
err = -ENOMEM;
goto init_fail;
}
@@ -246,7 +246,7 @@ int gp106_init_clk_arbiter(struct gk20a *g)
nvgpu_smp_mb();
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
nvgpu_atomic_read(&arb->req_nr), 0);
} while (!nvgpu_atomic_read(&arb->req_nr));
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
return arb->status;
@@ -292,9 +292,10 @@ static u8 nvgpu_clk_arb_find_vf_point(struct nvgpu_clk_arb *arb,
/* pointer to table can be updated by callback */
nvgpu_smp_rmb();
if (!table)
if (table == NULL)
continue;
if ((!table->gpc2clk_num_points) || (!table->mclk_num_points)) {
if ((table->gpc2clk_num_points == 0U) ||
(table->mclk_num_points == 0U)) {
nvgpu_err(arb->g, "found empty table");
goto find_exit;
}
@@ -377,7 +378,7 @@ recalculate_vf_point:
mclk_voltuv = mclk_vf->uvolt;
mclk_voltuv_sram = mclk_vf->uvolt_sram;
} while (!table ||
} while ((table == NULL) ||
(NV_ACCESS_ONCE(arb->current_vf_table) != table));
find_exit:
@@ -504,13 +505,14 @@ void gp106_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
/* Query the latest committed request */
nvgpu_list_for_each_entry_safe(dev, tmp,
&session->targets, nvgpu_clk_dev, node) {
if (!mclk_set && dev->mclk_target_mhz) {
if (!mclk_set &&
(dev->mclk_target_mhz != 0U)) {
target->mclk =
dev->mclk_target_mhz;
mclk_set = true;
}
if (!gpc2clk_set &&
dev->gpc2clk_target_mhz) {
(dev->gpc2clk_target_mhz != 0U)) {
target->gpc2clk =
dev->gpc2clk_target_mhz;
gpc2clk_set = true;

View File

@@ -59,12 +59,16 @@ unsigned long gp106_clk_measure_freq(struct gk20a *g, u32 api_domain)
}
}
if (!c) {
if (c == NULL) {
return 0;
}
freq_khz = c->is_counter ? c->scale * gp106_get_rate_cntr(g, c) :
0; /* TODO: PLL read */
/* TODO: PLL read */
if (c->is_counter != 0U) {
freq_khz = c->scale * gp106_get_rate_cntr(g, c);
} else {
freq_khz = 0U;
}
/* Convert to HZ */
return freq_khz * 1000UL;
@@ -85,14 +89,14 @@ int gp106_init_clk_support(struct gk20a *g)
clk->clk_namemap = (struct namemap_cfg *)
nvgpu_kzalloc(g, sizeof(struct namemap_cfg) * NUM_NAMEMAPS);
if (!clk->clk_namemap) {
if (clk->clk_namemap == NULL) {
nvgpu_mutex_destroy(&clk->clk_mutex);
return -ENOMEM;
}
clk->namemap_xlat_table = nvgpu_kcalloc(g, NUM_NAMEMAPS, sizeof(u32));
if (!clk->namemap_xlat_table) {
if (clk->namemap_xlat_table == NULL) {
nvgpu_kfree(g, clk->clk_namemap);
nvgpu_mutex_destroy(&clk->clk_mutex);
return -ENOMEM;
@@ -173,7 +177,9 @@ u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c)
struct clk_gk20a *clk = &g->clk;
if (!c || !c->cntr.reg_ctrl_addr || !c->cntr.reg_cntr_addr) {
if ((c == NULL) ||
(c->cntr.reg_ctrl_addr == 0U) ||
(c->cntr.reg_cntr_addr == 0U)) {
return 0;
}
@@ -194,9 +200,11 @@ u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c)
retries = CLK_DEFAULT_CNTRL_SETTLE_RETRIES;
do {
nvgpu_udelay(CLK_DEFAULT_CNTRL_SETTLE_USECS);
} while ((--retries) && (cntr = gk20a_readl(g, c->cntr.reg_cntr_addr)));
cntr = gk20a_readl(g, c->cntr.reg_cntr_addr);
retries--;
} while ((retries != 0U) && (cntr != 0U));
if (!retries) {
if (retries == 0U) {
nvgpu_err(g, "unable to settle counter reset, bailing");
goto read_err;
}

View File

@@ -129,7 +129,7 @@ void gr_gp106_cb_size_default(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (!gr->attrib_cb_default_size) {
if (gr->attrib_cb_default_size == 0U) {
gr->attrib_cb_default_size = 0x800;
}
gr->alpha_cb_default_size =

View File

@@ -3086,7 +3086,7 @@ static int mclk_get_memclk_table(struct gk20a *g)
(u32)(memclock_table_header.script_list_ptr +
script_index * sizeof(u32)));
if (!script_ptr) {
if (script_ptr == 0U) {
continue;
}
@@ -3141,7 +3141,7 @@ static int mclk_get_memclk_table(struct gk20a *g)
(u32)(memclock_table_header.cmd_script_list_ptr +
cmd_script_index * sizeof(u32)));
if (!cmd_script_ptr) {
if (cmd_script_ptr == 0U) {
continue;
}
@@ -3531,7 +3531,7 @@ static int mclk_debugfs_init(struct gk20a *g)
gpu_root,
g,
&mclk_debug_speed_set_fops);
if (!d)
if (d == NULL)
return -ENOMEM;
d = debugfs_create_file(
@@ -3540,7 +3540,7 @@ static int mclk_debugfs_init(struct gk20a *g)
gpu_root,
g,
&mclk_switch_stats_fops);
if (!d)
if (d == NULL)
return -ENOMEM;
return 0;

View File

@@ -54,7 +54,7 @@ struct nvgpu_clk_session;
#define VF_POINT_GET_PSTATE(a) (((a)->pstates) ?\
__fls((a)->pstates) :\
VF_POINT_INVALID_PSTATE)
#define VF_POINT_COMMON_PSTATE(a, b) (((a)->pstates & (b)->pstates) ?\
#define VF_POINT_COMMON_PSTATE(a, b) (((a)->pstates & (b)->pstates) != 0U ?\
__fls((a)->pstates & (b)->pstates) :\
VF_POINT_INVALID_PSTATE)