mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Icdeede22dd26fd70fae92aa791d35b115ef49e32 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797691 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
97aa9f705a
commit
0f97bd4d44
@@ -42,30 +42,36 @@ int gpu_init_hal(struct gk20a *g)
|
||||
case GK20A_GPUID_GM20B:
|
||||
case GK20A_GPUID_GM20B_B:
|
||||
nvgpu_log_info(g, "gm20b detected");
|
||||
if (gm20b_init_hal(g))
|
||||
if (gm20b_init_hal(g)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
case NVGPU_GPUID_GP10B:
|
||||
if (gp10b_init_hal(g))
|
||||
if (gp10b_init_hal(g)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
case NVGPU_GPUID_GP104:
|
||||
case NVGPU_GPUID_GP106:
|
||||
if (gp106_init_hal(g))
|
||||
if (gp106_init_hal(g)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
case NVGPU_GPUID_GV11B:
|
||||
if (gv11b_init_hal(g))
|
||||
if (gv11b_init_hal(g)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
case NVGPU_GPUID_GV100:
|
||||
if (gv100_init_hal(g))
|
||||
if (gv100_init_hal(g)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
#if defined(CONFIG_TEGRA_GPU_NEXT)
|
||||
case NVGPU_GPUID_NEXT:
|
||||
if (NVGPU_NEXT_INIT_HAL(g))
|
||||
if (NVGPU_NEXT_INIT_HAL(g)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
|
||||
@@ -93,23 +93,27 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
g->ops.fb.set_mmu_page_size(g);
|
||||
if (g->ops.fb.set_use_full_comp_tag_line)
|
||||
if (g->ops.fb.set_use_full_comp_tag_line) {
|
||||
mm->use_full_comp_tag_line =
|
||||
g->ops.fb.set_use_full_comp_tag_line(g);
|
||||
}
|
||||
|
||||
g->ops.fb.init_hw(g);
|
||||
|
||||
if (g->ops.bus.bar1_bind)
|
||||
if (g->ops.bus.bar1_bind) {
|
||||
g->ops.bus.bar1_bind(g, &mm->bar1.inst_block);
|
||||
}
|
||||
|
||||
if (g->ops.bus.bar2_bind) {
|
||||
err = g->ops.bus.bar2_bind(g, &mm->bar2.inst_block);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g))
|
||||
if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
@@ -211,8 +215,9 @@ static void __update_pte(struct vm_gk20a *vm,
|
||||
|
||||
pte_w[0] = pte_valid | addr;
|
||||
|
||||
if (attrs->priv)
|
||||
if (attrs->priv) {
|
||||
pte_w[0] |= gmmu_pte_privilege_true_f();
|
||||
}
|
||||
|
||||
pte_w[1] = __nvgpu_aperture_mask(g, attrs->aperture,
|
||||
gmmu_pte_aperture_sys_mem_ncoh_f(),
|
||||
@@ -222,9 +227,10 @@ static void __update_pte(struct vm_gk20a *vm,
|
||||
gmmu_pte_comptagline_f((u32)(attrs->ctag >> ctag_shift));
|
||||
|
||||
if (attrs->ctag && vm->mm->use_full_comp_tag_line &&
|
||||
phys_addr & 0x10000)
|
||||
phys_addr & 0x10000) {
|
||||
pte_w[1] |= gmmu_pte_comptagline_f(
|
||||
1 << (gmmu_pte_comptagline_s() - 1));
|
||||
}
|
||||
|
||||
if (attrs->rw_flag == gk20a_mem_flag_read_only) {
|
||||
pte_w[0] |= gmmu_pte_read_only_true_f();
|
||||
@@ -233,11 +239,13 @@ static void __update_pte(struct vm_gk20a *vm,
|
||||
pte_w[1] |= gmmu_pte_read_disable_true_f();
|
||||
}
|
||||
|
||||
if (!attrs->cacheable)
|
||||
if (!attrs->cacheable) {
|
||||
pte_w[1] |= gmmu_pte_vol_true_f();
|
||||
}
|
||||
|
||||
if (attrs->ctag)
|
||||
if (attrs->ctag) {
|
||||
attrs->ctag += page_size;
|
||||
}
|
||||
}
|
||||
|
||||
static void update_gmmu_pte_locked(struct vm_gk20a *vm,
|
||||
@@ -254,10 +262,11 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
|
||||
u32 pte_w[2] = {0, 0};
|
||||
int ctag_shift = ilog2(g->ops.fb.compression_page_size(g));
|
||||
|
||||
if (phys_addr)
|
||||
if (phys_addr) {
|
||||
__update_pte(vm, pte_w, phys_addr, attrs);
|
||||
else if (attrs->sparse)
|
||||
} else if (attrs->sparse) {
|
||||
__update_pte_sparse(pte_w);
|
||||
}
|
||||
|
||||
pte_dbg(g, attrs,
|
||||
"PTE: i=%-4u size=%-2u offs=%-4u | "
|
||||
@@ -338,8 +347,9 @@ int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch)
|
||||
nvgpu_vm_get(vm);
|
||||
ch->vm = vm;
|
||||
err = channel_gk20a_commit_va(ch);
|
||||
if (err)
|
||||
if (err) {
|
||||
ch->vm = NULL;
|
||||
}
|
||||
|
||||
nvgpu_log(gk20a_from_vm(vm), gpu_dbg_map, "Binding ch=%d -> VM:%s",
|
||||
ch->chid, vm->name);
|
||||
@@ -384,8 +394,9 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
|
||||
nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(),
|
||||
ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1)));
|
||||
|
||||
if (big_page_size && g->ops.mm.set_big_page_size)
|
||||
if (big_page_size && g->ops.mm.set_big_page_size) {
|
||||
g->ops.mm.set_big_page_size(g, inst_block, big_page_size);
|
||||
}
|
||||
}
|
||||
|
||||
int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
@@ -422,8 +433,9 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
||||
|
||||
retries = 100;
|
||||
|
||||
if (g->ops.mm.get_flush_retries)
|
||||
if (g->ops.mm.get_flush_retries) {
|
||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB);
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
@@ -447,13 +459,15 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
||||
flush_fb_flush_pending_busy_v()) {
|
||||
nvgpu_log_info(g, "fb_flush 0x%x", data);
|
||||
nvgpu_udelay(5);
|
||||
} else
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
if (g->ops.fb.dump_vpr_wpr_info)
|
||||
if (g->ops.fb.dump_vpr_wpr_info) {
|
||||
g->ops.fb.dump_vpr_wpr_info(g);
|
||||
}
|
||||
ret = -EBUSY;
|
||||
}
|
||||
|
||||
@@ -474,8 +488,9 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
|
||||
|
||||
trace_gk20a_mm_l2_invalidate(g->name);
|
||||
|
||||
if (g->ops.mm.get_flush_retries)
|
||||
if (g->ops.mm.get_flush_retries) {
|
||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV);
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
@@ -494,12 +509,14 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
|
||||
nvgpu_log_info(g, "l2_system_invalidate 0x%x",
|
||||
data);
|
||||
nvgpu_udelay(5);
|
||||
} else
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout))
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
nvgpu_warn(g, "l2_system_invalidate too many retries");
|
||||
}
|
||||
|
||||
trace_gk20a_mm_l2_invalidate_done(g->name);
|
||||
}
|
||||
@@ -526,11 +543,13 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
gk20a_busy_noresume(g);
|
||||
if (!g->power_on)
|
||||
if (!g->power_on) {
|
||||
goto hw_was_off;
|
||||
}
|
||||
|
||||
if (g->ops.mm.get_flush_retries)
|
||||
if (g->ops.mm.get_flush_retries) {
|
||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH);
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
@@ -552,15 +571,17 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
|
||||
flush_l2_flush_dirty_pending_busy_v()) {
|
||||
nvgpu_log_info(g, "l2_flush_dirty 0x%x", data);
|
||||
nvgpu_udelay(5);
|
||||
} else
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"l2_flush_dirty too many retries"));
|
||||
|
||||
trace_gk20a_mm_l2_flush_done(g->name);
|
||||
|
||||
if (invalidate)
|
||||
if (invalidate) {
|
||||
gk20a_mm_l2_invalidate_locked(g);
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&mm->l2_op_lock);
|
||||
|
||||
@@ -578,11 +599,13 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
gk20a_busy_noresume(g);
|
||||
if (!g->power_on)
|
||||
if (!g->power_on) {
|
||||
goto hw_was_off;
|
||||
}
|
||||
|
||||
if (g->ops.mm.get_flush_retries)
|
||||
if (g->ops.mm.get_flush_retries) {
|
||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
|
||||
}
|
||||
|
||||
nvgpu_timeout_init(g, &timeout, retries, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
@@ -601,8 +624,9 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
|
||||
flush_l2_clean_comptags_pending_busy_v()) {
|
||||
nvgpu_log_info(g, "l2_clean_comptags 0x%x", data);
|
||||
nvgpu_udelay(5);
|
||||
} else
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"l2_clean_comptags too many retries"));
|
||||
|
||||
|
||||
@@ -51,11 +51,12 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
|
||||
u32 i = 0, j = strlen(strings);
|
||||
|
||||
for (; i < j; i++) {
|
||||
if (strings[i] == '%')
|
||||
if (strings[i] == '%') {
|
||||
if (strings[i + 1] == 'x' || strings[i + 1] == 'X') {
|
||||
*hex_pos = i;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
*hex_pos = -1;
|
||||
return false;
|
||||
@@ -72,8 +73,9 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
|
||||
|
||||
/* allocate system memory to copy pmu trace buffer */
|
||||
tracebuffer = nvgpu_kzalloc(g, GK20A_PMU_TRACE_BUFSIZE);
|
||||
if (tracebuffer == NULL)
|
||||
if (tracebuffer == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* read pmu traces into system memory buffer */
|
||||
nvgpu_mem_rd_n(g, &pmu->trace_buf, 0, tracebuffer,
|
||||
@@ -85,17 +87,20 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
|
||||
nvgpu_err(g, "dump PMU trace buffer");
|
||||
for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
|
||||
for (j = 0; j < 0x40; j++) {
|
||||
if (trace1[(i / 4) + j])
|
||||
if (trace1[(i / 4) + j]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (j == 0x40)
|
||||
if (j == 0x40) {
|
||||
break;
|
||||
}
|
||||
count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]);
|
||||
l = 0;
|
||||
m = 0;
|
||||
while (nvgpu_find_hex_in_string((trace+i+20+m), g, &k)) {
|
||||
if (k >= 40)
|
||||
if (k >= 40) {
|
||||
break;
|
||||
}
|
||||
strncpy(part_str, (trace+i+20+m), k);
|
||||
part_str[k] = '\0';
|
||||
count += scnprintf((buf + count), 0x40, "%s0x%x",
|
||||
@@ -277,8 +282,9 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
struct pmu_mutex *mutex;
|
||||
u32 data, owner, max_retry;
|
||||
|
||||
if (!pmu->initialized)
|
||||
if (!pmu->initialized) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUG_ON(!token);
|
||||
BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
|
||||
@@ -346,8 +352,9 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
struct pmu_mutex *mutex;
|
||||
u32 owner, data;
|
||||
|
||||
if (!pmu->initialized)
|
||||
if (!pmu->initialized) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUG_ON(!token);
|
||||
BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
|
||||
@@ -364,8 +371,9 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (--mutex->ref_cnt > 0)
|
||||
if (--mutex->ref_cnt > 0) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
gk20a_writel(g, pwr_pmu_mutex_r(mutex->index),
|
||||
pwr_pmu_mutex_value_initial_lock_f());
|
||||
@@ -386,32 +394,36 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
{
|
||||
u32 queue_head_size = 0;
|
||||
|
||||
if (g->ops.pmu.pmu_get_queue_head_size)
|
||||
if (g->ops.pmu.pmu_get_queue_head_size) {
|
||||
queue_head_size = g->ops.pmu.pmu_get_queue_head_size();
|
||||
}
|
||||
|
||||
BUG_ON(!head || !queue_head_size);
|
||||
|
||||
if (PMU_IS_COMMAND_QUEUE(queue->id)) {
|
||||
|
||||
if (queue->index >= queue_head_size)
|
||||
if (queue->index >= queue_head_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!set)
|
||||
if (!set) {
|
||||
*head = pwr_pmu_queue_head_address_v(
|
||||
gk20a_readl(g,
|
||||
g->ops.pmu.pmu_get_queue_head(queue->index)));
|
||||
else
|
||||
} else {
|
||||
gk20a_writel(g,
|
||||
g->ops.pmu.pmu_get_queue_head(queue->index),
|
||||
pwr_pmu_queue_head_address_f(*head));
|
||||
}
|
||||
} else {
|
||||
if (!set)
|
||||
if (!set) {
|
||||
*head = pwr_pmu_msgq_head_val_v(
|
||||
gk20a_readl(g, pwr_pmu_msgq_head_r()));
|
||||
else
|
||||
} else {
|
||||
gk20a_writel(g,
|
||||
pwr_pmu_msgq_head_r(),
|
||||
pwr_pmu_msgq_head_val_f(*head));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -422,33 +434,36 @@ int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
{
|
||||
u32 queue_tail_size = 0;
|
||||
|
||||
if (g->ops.pmu.pmu_get_queue_tail_size)
|
||||
if (g->ops.pmu.pmu_get_queue_tail_size) {
|
||||
queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
|
||||
}
|
||||
|
||||
BUG_ON(!tail || !queue_tail_size);
|
||||
|
||||
if (PMU_IS_COMMAND_QUEUE(queue->id)) {
|
||||
|
||||
if (queue->index >= queue_tail_size)
|
||||
if (queue->index >= queue_tail_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!set)
|
||||
*tail = pwr_pmu_queue_tail_address_v(
|
||||
gk20a_readl(g,
|
||||
g->ops.pmu.pmu_get_queue_tail(queue->index)));
|
||||
else
|
||||
if (!set) {
|
||||
*tail = pwr_pmu_queue_tail_address_v(gk20a_readl(g,
|
||||
g->ops.pmu.pmu_get_queue_tail(queue->index)));
|
||||
} else {
|
||||
gk20a_writel(g,
|
||||
g->ops.pmu.pmu_get_queue_tail(queue->index),
|
||||
pwr_pmu_queue_tail_address_f(*tail));
|
||||
}
|
||||
|
||||
} else {
|
||||
if (!set)
|
||||
if (!set) {
|
||||
*tail = pwr_pmu_msgq_tail_val_v(
|
||||
gk20a_readl(g, pwr_pmu_msgq_tail_r()));
|
||||
else
|
||||
} else {
|
||||
gk20a_writel(g,
|
||||
pwr_pmu_msgq_tail_r(),
|
||||
pwr_pmu_msgq_tail_val_f(*tail));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -459,18 +474,20 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
u32 queue_tail_size = 0;
|
||||
|
||||
if (g->ops.pmu.pmu_get_queue_tail_size)
|
||||
if (g->ops.pmu.pmu_get_queue_tail_size) {
|
||||
queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
|
||||
}
|
||||
|
||||
BUG_ON(!tail || !queue_tail_size);
|
||||
|
||||
if (!set)
|
||||
if (!set) {
|
||||
*tail = pwr_pmu_msgq_tail_val_v(
|
||||
gk20a_readl(g, pwr_pmu_msgq_tail_r()));
|
||||
else
|
||||
} else {
|
||||
gk20a_writel(g,
|
||||
pwr_pmu_msgq_tail_r(),
|
||||
pwr_pmu_msgq_tail_val_f(*tail));
|
||||
}
|
||||
}
|
||||
|
||||
int gk20a_init_pmu_setup_hw1(struct gk20a *g)
|
||||
@@ -519,18 +536,20 @@ bool gk20a_pmu_is_engine_in_reset(struct gk20a *g)
|
||||
|
||||
pmc_enable = gk20a_readl(g, mc_enable_r());
|
||||
if (mc_enable_pwr_v(pmc_enable) ==
|
||||
mc_enable_pwr_disabled_v())
|
||||
mc_enable_pwr_disabled_v()) {
|
||||
status = true;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset)
|
||||
{
|
||||
if (do_reset)
|
||||
if (do_reset) {
|
||||
g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
|
||||
else
|
||||
} else {
|
||||
g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -547,8 +566,9 @@ u32 gk20a_pmu_pg_engines_list(struct gk20a *g)
|
||||
|
||||
u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
|
||||
{
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
|
||||
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
|
||||
return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -567,8 +587,9 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
|
||||
struct pmu_cmd cmd;
|
||||
u32 seq;
|
||||
|
||||
if (!pmu->pmu_ready || !entries || !pmu->zbc_ready)
|
||||
if (!pmu->pmu_ready || !entries || !pmu->zbc_ready) {
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
cmd.hdr.unit_id = PMU_UNIT_PG;
|
||||
@@ -583,8 +604,9 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
|
||||
pmu_handle_zbc_msg, pmu, &seq, ~0);
|
||||
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
|
||||
&pmu->zbc_save_done, 1);
|
||||
if (!pmu->zbc_save_done)
|
||||
if (!pmu->zbc_save_done) {
|
||||
nvgpu_err(g, "ZBC save timeout");
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
|
||||
@@ -596,11 +618,12 @@ int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
|
||||
|
||||
switch (msg->msg_type) {
|
||||
case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION:
|
||||
if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1))
|
||||
if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) {
|
||||
nvgpu_clk_arb_send_thermal_alarm(pmu->g);
|
||||
else
|
||||
} else {
|
||||
gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d",
|
||||
msg->hw_slct_msg.mask);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type);
|
||||
@@ -687,8 +710,9 @@ bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu)
|
||||
pwr_falcon_irqstat_exterr_true_f() |
|
||||
pwr_falcon_irqstat_swgen0_true_f();
|
||||
|
||||
if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint)
|
||||
if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -727,9 +751,11 @@ void gk20a_pmu_isr(struct gk20a *g)
|
||||
nvgpu_pmu_dump_falcon_stats(pmu);
|
||||
if (gk20a_readl(g, pwr_pmu_mailbox_r
|
||||
(PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) ==
|
||||
PMU_MODE_MISMATCH_STATUS_VAL)
|
||||
if (g->ops.pmu.dump_secure_fuses)
|
||||
PMU_MODE_MISMATCH_STATUS_VAL) {
|
||||
if (g->ops.pmu.dump_secure_fuses) {
|
||||
g->ops.pmu.dump_secure_fuses(g);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (intr & pwr_falcon_irqstat_exterr_true_f()) {
|
||||
nvgpu_err(g,
|
||||
@@ -741,8 +767,9 @@ void gk20a_pmu_isr(struct gk20a *g)
|
||||
~pwr_falcon_exterrstat_valid_m());
|
||||
}
|
||||
|
||||
if (g->ops.pmu.handle_ext_irq)
|
||||
if (g->ops.pmu.handle_ext_irq) {
|
||||
g->ops.pmu.handle_ext_irq(g, intr);
|
||||
}
|
||||
|
||||
if (intr & pwr_falcon_irqstat_swgen0_true_f()) {
|
||||
nvgpu_pmu_process_message(pmu);
|
||||
|
||||
@@ -36,11 +36,12 @@ static int regop_bsearch_range_cmp(const void *pkey, const void *pelem)
|
||||
{
|
||||
u32 key = *(u32 *)pkey;
|
||||
struct regop_offset_range *prange = (struct regop_offset_range *)pelem;
|
||||
if (key < prange->base)
|
||||
if (key < prange->base) {
|
||||
return -1;
|
||||
else if (prange->base <= key && key < (prange->base +
|
||||
(prange->count * 4U)))
|
||||
} else if (prange->base <= key && key < (prange->base +
|
||||
(prange->count * 4U))) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -48,8 +49,9 @@ static inline bool linear_search(u32 offset, const u32 *list, int size)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < size; i++) {
|
||||
if (list[i] == offset)
|
||||
if (list[i] == offset) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -111,8 +113,9 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
|
||||
* regops implementation, so we return -ENOSYS. This will allow
|
||||
* compute apps to run with vgpu. Tools will not work in this
|
||||
* configuration and are not required to work at this time. */
|
||||
if (g->is_virtual)
|
||||
if (g->is_virtual) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
ok = validate_reg_ops(dbg_s,
|
||||
&ctx_rd_count, &ctx_wr_count,
|
||||
@@ -134,8 +137,9 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
|
||||
|
||||
for (i = 0; i < num_ops; i++) {
|
||||
/* if it isn't global then it is done in the ctx ops... */
|
||||
if (ops[i].type != REGOP(TYPE_GLOBAL))
|
||||
if (ops[i].type != REGOP(TYPE_GLOBAL)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (ops[i].op) {
|
||||
|
||||
@@ -358,8 +362,9 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
|
||||
}
|
||||
|
||||
valid = check_whitelists(dbg_s, op, offset);
|
||||
if ((op->op == REGOP(READ_64) || op->op == REGOP(WRITE_64)) && valid)
|
||||
if ((op->op == REGOP(READ_64) || op->op == REGOP(WRITE_64)) && valid) {
|
||||
valid = check_whitelists(dbg_s, op, offset + 4);
|
||||
}
|
||||
|
||||
if (valid && (op->type != REGOP(TYPE_GLOBAL))) {
|
||||
err = gr_gk20a_get_ctx_buffer_offsets(dbg_s->g,
|
||||
@@ -416,10 +421,11 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
|
||||
}
|
||||
|
||||
if (reg_op_is_gr_ctx(ops[i].type)) {
|
||||
if (reg_op_is_read(ops[i].op))
|
||||
if (reg_op_is_read(ops[i].op)) {
|
||||
(*ctx_rd_count)++;
|
||||
else
|
||||
} else {
|
||||
(*ctx_wr_count)++;
|
||||
}
|
||||
}
|
||||
|
||||
/* if "allow_all" flag enabled, dont validate offset */
|
||||
|
||||
@@ -50,16 +50,18 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
|
||||
is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
|
||||
is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
|
||||
|
||||
if (is_next || is_ctx_reload)
|
||||
if (is_next || is_ctx_reload) {
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||
is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
|
||||
is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
|
||||
|
||||
if (is_next || is_ctx_reload)
|
||||
if (is_next || is_ctx_reload) {
|
||||
continue;
|
||||
}
|
||||
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
}
|
||||
@@ -92,8 +94,9 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
|
||||
|
||||
for (i = 0; i < f->max_runlists; ++i) {
|
||||
runlist = &f->runlist_info[i];
|
||||
if (test_bit(ch->chid, runlist->active_channels))
|
||||
if (test_bit(ch->chid, runlist->active_channels)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -124,9 +127,9 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
|
||||
ch->tsgid = tsg->tsgid;
|
||||
|
||||
/* all the channel part of TSG should need to be same runlist_id */
|
||||
if (tsg->runlist_id == FIFO_INVAL_TSG_ID)
|
||||
if (tsg->runlist_id == FIFO_INVAL_TSG_ID) {
|
||||
tsg->runlist_id = ch->runlist_id;
|
||||
else if (tsg->runlist_id != ch->runlist_id) {
|
||||
} else if (tsg->runlist_id != ch->runlist_id) {
|
||||
nvgpu_err(tsg->g,
|
||||
"Error: TSG channel should be share same runlist ch[%d] tsg[%d]",
|
||||
ch->runlist_id, tsg->runlist_id);
|
||||
@@ -180,8 +183,9 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
|
||||
struct tsg_gk20a *tsg = NULL;
|
||||
int err;
|
||||
|
||||
if (tsgid >= g->fifo.num_channels)
|
||||
if (tsgid >= g->fifo.num_channels) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tsg = &g->fifo.tsg[tsgid];
|
||||
|
||||
@@ -214,8 +218,9 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
|
||||
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH:
|
||||
ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid,
|
||||
0, level);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
tsg->interleave_level = level;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@@ -238,8 +243,9 @@ u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg)
|
||||
{
|
||||
struct gk20a *g = tsg->g;
|
||||
|
||||
if (!tsg->timeslice_us)
|
||||
if (!tsg->timeslice_us) {
|
||||
return g->ops.fifo.default_timeslice_us(g);
|
||||
}
|
||||
|
||||
return tsg->timeslice_us;
|
||||
}
|
||||
@@ -306,8 +312,9 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
|
||||
tsg->tgid = pid;
|
||||
tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE;
|
||||
|
||||
if (g->ops.fifo.init_eng_method_buffers)
|
||||
if (g->ops.fifo.init_eng_method_buffers) {
|
||||
g->ops.fifo.init_eng_method_buffers(g, tsg);
|
||||
}
|
||||
|
||||
if (g->ops.fifo.tsg_open) {
|
||||
err = g->ops.fifo.tsg_open(tsg);
|
||||
|
||||
Reference in New Issue
Block a user