gpu: nvgpu: Fix MISRA rule 15.6 violations

MISRA Rule-15.6 requires that all if-else blocks and loop blocks
be enclosed in braces, including single statement blocks. Fix errors
due to single statement if-else and loop blocks without braces
by introducing the braces.

JIRA NVGPU-775

Change-Id: Ib70621d39735abae3fd2eb7ccf77f36125e2d7b7
Signed-off-by: Srirangan Madhavan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1928745
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Srirangan Madhavan
2018-10-17 11:43:03 +05:30
committed by mobile promotions
parent 482d7e7ca2
commit ef5fdac7a6
21 changed files with 122 additions and 63 deletions

View File

@@ -65,8 +65,9 @@ int gk20a_ce_execute_ops(struct gk20a *g,
struct nvgpu_channel_fence fence = {0, 0};
struct gk20a_fence *ce_cmd_buf_fence_out = NULL;
if (!ce_app->initialised ||ce_app->app_state != NVGPU_CE_ACTIVE)
if (!ce_app->initialised || ce_app->app_state != NVGPU_CE_ACTIVE) {
goto end;
}
nvgpu_mutex_acquire(&ce_app->app_mutex);
@@ -108,8 +109,9 @@ int gk20a_ce_execute_ops(struct gk20a *g,
gk20a_fence_put(*prev_post_fence);
*prev_post_fence = NULL;
if (ret != 0)
if (ret != 0) {
goto noop;
}
}
cmd_buf_gpu_va = (ce_ctx->cmd_buf_mem.gpu_va + (u64)(cmd_buf_read_offset *sizeof(u32)));

View File

@@ -57,8 +57,9 @@ u32 gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
/* read gpc0 irrespective of vin id */
gpc0data = gk20a_readl(g, fuse_vin_cal_gpc0_r());
if (gpc0data == 0xFFFFFFFF)
if (gpc0data == 0xFFFFFFFF) {
return -EINVAL;
}
switch (vin_id) {
case CTRL_CLK_VIN_ID_GPC0:
@@ -97,8 +98,9 @@ u32 gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
default:
return -EINVAL;
}
if (data == 0xFFFFFFFF)
if (data == 0xFFFFFFFF) {
return -EINVAL;
}
gpc0interceptdata = (fuse_vin_cal_gpc0_icpt_int_data_v(gpc0data) <<
fuse_vin_cal_gpc0_icpt_frac_data_s()) +
@@ -137,10 +139,11 @@ u32 gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
return -EINVAL;
}
if (fuse_vin_cal_gpc1_delta_icpt_sign_data_v(data))
if (fuse_vin_cal_gpc1_delta_icpt_sign_data_v(data)) {
*intercept = gpc0interceptdata - interceptdata;
else
} else {
*intercept = gpc0interceptdata + interceptdata;
}
/* slope */
gpc0slopedata = (fuse_vin_cal_gpc0_slope_int_data_v(gpc0data) <<
@@ -169,10 +172,11 @@ u32 gp106_fuse_read_vin_cal_slope_intercept_fuse(struct gk20a *g,
return -EINVAL;
}
if (fuse_vin_cal_gpc1_delta_slope_sign_data_v(data))
if (fuse_vin_cal_gpc1_delta_slope_sign_data_v(data)) {
*slope = gpc0slopedata - slopedata;
else
} else {
*slope = gpc0slopedata + slopedata;
}
return 0;
}

View File

@@ -87,8 +87,9 @@ int gk20a_comptag_allocator_init(struct gk20a *g,
size--;
allocator->bitmap = nvgpu_vzalloc(g,
BITS_TO_LONGS(size) * sizeof(long));
if (allocator->bitmap == NULL)
if (allocator->bitmap == NULL) {
return -ENOMEM;
}
allocator->size = size;

View File

@@ -495,8 +495,9 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
}
err = nvgpu_init_mmu_debug(mm);
if (err != 0)
if (err != 0) {
return err;
}
mm->remove_support = nvgpu_remove_mm_support;
mm->remove_ce_support = nvgpu_remove_mm_ce_support;

View File

@@ -390,10 +390,11 @@ int nvgpu_sec2_process_message(struct nvgpu_sec2 *sec2)
msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK;
if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT)
if (msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) {
sec2_handle_event(sec2, &msg);
else
} else {
sec2_response_handle(sec2, &msg);
}
}
exit:
@@ -410,8 +411,9 @@ int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms,
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
do {
if (*(u8 *)var == val)
if (*(u8 *)var == val) {
return 0;
}
if (g->ops.sec2.is_interrupted(&g->sec2)) {
g->ops.sec2.isr(g);

View File

@@ -591,11 +591,13 @@ bool nvgpu_semaphore_reset(struct nvgpu_semaphore_int *hw_sema)
*/
if (WARN_ON(__nvgpu_semaphore_value_released(threshold + 1U,
current_val)))
current_val))) {
return false;
}
if (current_val == threshold)
if (current_val == threshold) {
return false;
}
nvgpu_mem_wr(hw_sema->ch->g, &hw_sema->location.pool->rw_mem,
hw_sema->location.offset, threshold);

View File

@@ -31,10 +31,12 @@ int nvgpu_init_therm_support(struct gk20a *g)
nvgpu_log_fn(g, " ");
if (g->ops.therm.init_therm_setup_hw != NULL)
if (g->ops.therm.init_therm_setup_hw != NULL) {
err = g->ops.therm.init_therm_setup_hw(g);
if (err != 0)
}
if (err != 0) {
return err;
}
#ifdef CONFIG_DEBUG_FS
if (g->ops.therm.therm_debugfs_init)

View File

@@ -124,8 +124,9 @@ void gm20b_therm_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine)
{
u32 gate_ctrl;
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
return;
}
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
@@ -155,8 +156,9 @@ void gm20b_therm_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG))
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
return;
}
switch (mode) {
case ELCG_RUN:

View File

@@ -102,8 +102,9 @@ void gv11b_therm_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
{
u32 gate_ctrl;
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG))
if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
return;
}
gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));

View File

@@ -282,8 +282,9 @@ int gk20a_finalize_poweron(struct gk20a *g)
nvgpu_mutex_acquire(&g->tpc_pg_lock);
if (g->can_tpc_powergate) {
if (g->ops.gr.powergate_tpc != NULL)
if (g->ops.gr.powergate_tpc != NULL) {
g->ops.gr.powergate_tpc(g);
}
}
err = gk20a_enable_gr_hw(g);

View File

@@ -70,8 +70,9 @@ static inline u32 pri_get_gpc_num(struct gk20a *g, u32 addr)
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
for (i = 0; i < num_gpcs; i++) {
start = gpc_base + (i * gpc_stride);
if ((addr >= start) && (addr < (start + gpc_stride)))
if ((addr >= start) && (addr < (start + gpc_stride))) {
return i;
}
}
return 0;
}
@@ -198,8 +199,9 @@ static inline u32 pri_get_be_num(struct gk20a *g, u32 addr)
u32 rop_stride = nvgpu_get_litter_value(g, GPU_LIT_ROP_STRIDE);
for (i = 0; i < num_fbps; i++) {
start = rop_base + (i * rop_stride);
if ((addr >= start) && (addr < (start + rop_stride)))
if ((addr >= start) && (addr < (start + rop_stride))) {
return i;
}
}
return 0;
}

View File

@@ -133,14 +133,16 @@ int gp106_init_clk_arbiter(struct gk20a *g)
}
arb = nvgpu_kzalloc(g, sizeof(struct nvgpu_clk_arb));
if (arb == NULL)
if (arb == NULL) {
return -ENOMEM;
}
arb->clk_arb_events_supported = true;
err = nvgpu_mutex_init(&arb->pstate_lock);
if (err != 0)
if (err != 0) {
goto mutex_fail;
}
nvgpu_spinlock_init(&arb->sessions_lock);
nvgpu_spinlock_init(&arb->users_lock);
nvgpu_spinlock_init(&arb->requests_lock);
@@ -206,8 +208,9 @@ int gp106_init_clk_arbiter(struct gk20a *g)
nvgpu_atomic64_set(&arb->alarm_mask, 0);
err = nvgpu_clk_notification_queue_alloc(g, &arb->notification_queue,
DEFAULT_EVENT_NUMBER);
if (err < 0)
if (err < 0) {
goto init_fail;
}
nvgpu_init_list_node(&arb->users);
nvgpu_init_list_node(&arb->sessions);
@@ -223,8 +226,9 @@ int gp106_init_clk_arbiter(struct gk20a *g)
arb->update_arb_work_item.item_type = CLK_ARB_WORK_UPDATE_ARB;
err = nvgpu_clk_arb_worker_init(g);
if (err < 0)
if (err < 0) {
goto init_fail;
}
#ifdef CONFIG_DEBUG_FS
arb->debug = &arb->debug_pool[0];
@@ -235,12 +239,14 @@ int gp106_init_clk_arbiter(struct gk20a *g)
}
#endif
err = clk_vf_point_cache(g);
if (err < 0)
if (err < 0) {
goto init_fail;
}
err = nvgpu_clk_arb_update_vf_table(arb);
if (err < 0)
if (err < 0) {
goto init_fail;
}
do {
/* Check that first run is completed */
nvgpu_smp_mb();
@@ -292,8 +298,9 @@ static u8 nvgpu_clk_arb_find_vf_point(struct nvgpu_clk_arb *arb,
/* pointer to table can be updated by callback */
nvgpu_smp_rmb();
if (table == NULL)
if (table == NULL) {
continue;
}
if ((table->gpc2clk_num_points == 0U) ||
(table->mclk_num_points == 0U)) {
nvgpu_err(arb->g, "found empty table");
@@ -420,28 +427,34 @@ static int nvgpu_clk_arb_change_vf_point(struct gk20a *g, u16 gpc2clk_target,
/* descending */
if (voltuv < arb->voltuv_actual) {
status = g->ops.clk.mclk_change(g, mclk_target);
if (status < 0)
if (status < 0) {
return status;
}
status = volt_set_voltage(g, voltuv, voltuv_sram);
if (status < 0)
if (status < 0) {
return status;
}
status = clk_set_fll_clks(g, &fllclk);
if (status < 0)
if (status < 0) {
return status;
}
} else {
status = clk_set_fll_clks(g, &fllclk);
if (status < 0)
if (status < 0) {
return status;
}
status = volt_set_voltage(g, voltuv, voltuv_sram);
if (status < 0)
if (status < 0) {
return status;
}
status = g->ops.clk.mclk_change(g, mclk_target);
if (status < 0)
if (status < 0) {
return status;
}
}
return 0;
@@ -477,8 +490,9 @@ void gp106_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
clk_arb_dbg(g, " ");
/* bail out if gpu is down */
if (nvgpu_atomic64_read(&arb->alarm_mask) & EVENT(ALARM_GPU_LOST))
if (nvgpu_atomic64_read(&arb->alarm_mask) & EVENT(ALARM_GPU_LOST)) {
goto exit_arb;
}
#ifdef CONFIG_DEBUG_FS
g->ops.ptimer.read_ptimer(g, &t0);
@@ -543,20 +557,24 @@ void gp106_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
gpc2clk_target = (gpc2clk_target > 0) ? gpc2clk_target :
arb->gpc2clk_default_mhz;
if (gpc2clk_target < arb->gpc2clk_min)
if (gpc2clk_target < arb->gpc2clk_min) {
gpc2clk_target = arb->gpc2clk_min;
}
if (gpc2clk_target > arb->gpc2clk_max)
if (gpc2clk_target > arb->gpc2clk_max) {
gpc2clk_target = arb->gpc2clk_max;
}
mclk_target = (mclk_target > 0) ? mclk_target :
arb->mclk_default_mhz;
if (mclk_target < arb->mclk_min)
if (mclk_target < arb->mclk_min) {
mclk_target = arb->mclk_min;
}
if (mclk_target > arb->mclk_max)
if (mclk_target > arb->mclk_max) {
mclk_target = arb->mclk_max;
}
sys2clk_target = 0;
xbar2clk_target = 0;
@@ -577,9 +595,10 @@ void gp106_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
}
if ((gpc2clk_target < gpc2clk_session_target) ||
(mclk_target < mclk_session_target))
(mclk_target < mclk_session_target)) {
nvgpu_clk_arb_set_global_alarm(g,
EVENT(ALARM_TARGET_VF_NOT_POSSIBLE));
}
if ((arb->actual->gpc2clk == gpc2clk_target) &&
(arb->actual->mclk == mclk_target) &&

View File

@@ -248,11 +248,13 @@ int gp106_clk_domain_get_f_points(
u8 i;
struct clk_pmupstate *pclk = &g->clk_pmu;
if (pfpointscount == NULL)
if (pfpointscount == NULL) {
return -EINVAL;
}
if ((pfreqpointsinmhz == NULL) && (*pfpointscount != 0))
if ((pfreqpointsinmhz == NULL) && (*pfpointscount != 0)) {
return -EINVAL;
}
BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
struct clk_domain *, pdomain, i) {

View File

@@ -76,8 +76,9 @@ static int gr_gv100_scg_estimate_perf(struct gk20a *g,
u32 *num_tpc_gpc = nvgpu_kzalloc(g, sizeof(u32) *
nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS));
if (!num_tpc_gpc)
if (!num_tpc_gpc) {
return -ENOMEM;
}
/* Calculate pix-perf-reduction-rate per GPC and find bottleneck TPC */
for (gpc_id = 0; gpc_id < gr->gpc_count; gpc_id++) {
@@ -111,8 +112,9 @@ static int gr_gv100_scg_estimate_perf(struct gk20a *g,
scg_gpc_pix_perf = scale_factor * num_tpc_gpc[gpc_id] /
gr->gpc_tpc_count[gpc_id];
if (min_scg_gpc_pix_perf > scg_gpc_pix_perf)
if (min_scg_gpc_pix_perf > scg_gpc_pix_perf) {
min_scg_gpc_pix_perf = scg_gpc_pix_perf;
}
/* Calculate # of surviving PES */
for (pes_id = 0; pes_id < gr->gpc_ppc_count[gpc_id]; pes_id++) {
@@ -130,8 +132,9 @@ static int gr_gv100_scg_estimate_perf(struct gk20a *g,
num_tpc_mask &= ~(0x1 << disable_tpc_id);
is_tpc_removed_pes = true;
}
if (hweight32(num_tpc_mask))
if (hweight32(num_tpc_mask)) {
scg_num_pes++;
}
}
}
@@ -151,8 +154,9 @@ static int gr_gv100_scg_estimate_perf(struct gk20a *g,
average_tpcs = scale_factor * average_tpcs / gr->gpc_count;
for (gpc_id =0; gpc_id < gr->gpc_count; gpc_id++) {
diff = average_tpcs - scale_factor * num_tpc_gpc[gpc_id];
if (diff < 0)
if (diff < 0) {
diff = -diff;
}
deviation += diff;
}
@@ -194,9 +198,10 @@ void gr_gv100_cb_size_default(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (!gr->attrib_cb_default_size)
if (!gr->attrib_cb_default_size) {
gr->attrib_cb_default_size =
gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
}
gr->alpha_cb_default_size =
gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
}
@@ -373,8 +378,9 @@ int gr_gv100_add_ctxsw_reg_pm_fbpa(struct gk20a *g,
u32 off = *offset;
u32 active_fbpa_mask;
if ((cnt + (regs->count * num_fbpas)) > max_cnt)
if ((cnt + (regs->count * num_fbpas)) > max_cnt) {
return -EINVAL;
}
active_fbpa_mask = gr_gv100_get_active_fpba_mask(g);

View File

@@ -278,8 +278,9 @@ int gv100_init_gpu_characteristics(struct gk20a *g)
int err;
err = gk20a_init_gpu_characteristics(g);
if (err != 0)
if (err != 0) {
return err;
}
__nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_TEMPERATURE, true);

View File

@@ -53,12 +53,14 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
int status = 0;
if (falconidmask == 0)
if (falconidmask == 0) {
return -EINVAL;
}
if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
(1 << LSF_FALCON_ID_GPCCS)))
(1 << LSF_FALCON_ID_GPCCS))) {
return -EINVAL;
}
g->pmu_lsf_loaded_falcon_id = 0;
/* check whether pmu is ready to bootstrap lsf if not wait for it */
@@ -90,8 +92,9 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g),
&g->pmu_lsf_loaded_falcon_id, 1);
if (g->pmu_lsf_loaded_falcon_id != 1)
if (g->pmu_lsf_loaded_falcon_id != 1) {
status = -ETIMEDOUT;
}
exit:
return status;

View File

@@ -261,10 +261,11 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte);
*/
#define pte_dbg(g, attrs, fmt, args...) \
do { \
if (((attrs) != NULL) && ((attrs)->debug)) \
if (((attrs) != NULL) && ((attrs)->debug)) { \
nvgpu_info(g, fmt, ##args); \
else \
} else { \
nvgpu_log(g, gpu_dbg_pte, fmt, ##args); \
} \
} while (0)
#endif /* NVGPU_GMMU_H */

View File

@@ -90,8 +90,9 @@ struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf,
struct gk20a_dmabuf_priv *priv;
priv = dma_buf_get_drvdata(dmabuf, dev);
if (WARN_ON(!priv))
if (WARN_ON(!priv)) {
return ERR_PTR(-EINVAL);
}
nvgpu_mutex_acquire(&priv->lock);
@@ -182,16 +183,18 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g,
struct gk20a_buffer_state *s;
struct device *dev = dev_from_gk20a(g);
if (WARN_ON(offset >= (u64)dmabuf->size))
if (WARN_ON(offset >= (u64)dmabuf->size)) {
return -EINVAL;
}
err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
if (err)
return err;
priv = dma_buf_get_drvdata(dmabuf, dev);
if (WARN_ON(!priv))
if (WARN_ON(!priv)) {
return -ENOSYS;
}
nvgpu_mutex_acquire(&priv->lock);

View File

@@ -938,12 +938,14 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
nvgpu_log_fn(g, " ");
/* not yet supported */
if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK))
if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_CPU_MASK)) {
return -EINVAL;
}
/* not yet supported */
if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_VPR))
if (WARN_ON(args->in.flags & NVGPU_GPU_ALLOC_VIDMEM_FLAG_VPR)) {
return -EINVAL;
}
if (args->in.size & (SZ_4K - 1))
return -EINVAL;

View File

@@ -83,8 +83,9 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
/* FIXME: add support for sparse mappings */
if (WARN_ON(!sgt) || WARN_ON(nvgpu_iommuable(g)))
if (WARN_ON(!sgt) || WARN_ON(nvgpu_iommuable(g))) {
return 0;
}
if (space_to_skip & (page_size - 1))
return 0;

View File

@@ -161,8 +161,9 @@ int vgpu_intr_thread(void *dev_id)
(void **)&msg, &size, &sender);
if (err == -ETIME)
continue;
if (WARN_ON(err))
if (WARN_ON(err)) {
continue;
}
if (msg->event == TEGRA_VGPU_EVENT_ABORT) {
vgpu_ivc_release(handle);