mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: gp10b: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: Ib5961506b0f95867a57f8c0d7024568785fe7b93 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797332 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
14949fbad6
commit
5c9bedf6f6
@@ -51,11 +51,13 @@ void gp10b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
|
||||
nvgpu_log(g, gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id);
|
||||
|
||||
/* clear blocking interrupts: they exibit broken behavior */
|
||||
if (ce_intr & ce_intr_status_blockpipe_pending_f())
|
||||
if (ce_intr & ce_intr_status_blockpipe_pending_f()) {
|
||||
clear_intr |= ce_blockpipe_isr(g, ce_intr);
|
||||
}
|
||||
|
||||
if (ce_intr & ce_intr_status_launcherr_pending_f())
|
||||
if (ce_intr & ce_intr_status_launcherr_pending_f()) {
|
||||
clear_intr |= ce_launcherr_isr(g, ce_intr);
|
||||
}
|
||||
|
||||
gk20a_writel(g, ce_intr_status_r(inst_id), clear_intr);
|
||||
return;
|
||||
|
||||
@@ -150,8 +150,9 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c)
|
||||
v = nvgpu_mem_rd32(c->g, &c->inst_block,
|
||||
ram_fc_allowed_syncpoints_w());
|
||||
old_syncpt = pbdma_allowed_syncpoints_0_index_v(v);
|
||||
if (c->sync)
|
||||
if (c->sync) {
|
||||
new_syncpt = c->sync->syncpt_id(c->sync);
|
||||
}
|
||||
|
||||
if (new_syncpt && new_syncpt != old_syncpt) {
|
||||
/* disable channel */
|
||||
@@ -185,9 +186,9 @@ int gp10b_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type,
|
||||
int ret = ENGINE_INVAL_GK20A;
|
||||
|
||||
nvgpu_log_info(g, "engine type %d", engine_type);
|
||||
if (engine_type == top_device_info_type_enum_graphics_v())
|
||||
if (engine_type == top_device_info_type_enum_graphics_v()) {
|
||||
ret = ENGINE_GR_GK20A;
|
||||
else if (engine_type == top_device_info_type_enum_lce_v()) {
|
||||
} else if (engine_type == top_device_info_type_enum_lce_v()) {
|
||||
/* Default assumptions - all the CE engine have separate runlist */
|
||||
ret = ENGINE_ASYNC_CE_GK20A;
|
||||
}
|
||||
@@ -200,8 +201,9 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
|
||||
{
|
||||
if (top_device_info_data_type_v(table_entry) ==
|
||||
top_device_info_data_type_enum2_v()) {
|
||||
if (inst_id)
|
||||
if (inst_id) {
|
||||
*inst_id = top_device_info_data_inst_id_v(table_entry);
|
||||
}
|
||||
if (pri_base) {
|
||||
*pri_base =
|
||||
(top_device_info_data_pri_base_v(table_entry)
|
||||
@@ -214,9 +216,10 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
|
||||
g->ops.fifo.device_info_fault_id(table_entry);
|
||||
nvgpu_log_info(g, "device info: fault_id: %d", *fault_id);
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
nvgpu_err(g, "unknown device_info_data %d",
|
||||
top_device_info_data_type_v(table_entry));
|
||||
}
|
||||
}
|
||||
|
||||
void gp10b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f)
|
||||
@@ -330,21 +333,23 @@ static const char * const gp10b_hub_client_descs[] = {
|
||||
/* fill in mmu fault desc */
|
||||
void gp10b_fifo_get_mmu_fault_desc(struct mmu_fault_info *mmfault)
|
||||
{
|
||||
if (mmfault->fault_type >= ARRAY_SIZE(gp10b_fault_type_descs))
|
||||
if (mmfault->fault_type >= ARRAY_SIZE(gp10b_fault_type_descs)) {
|
||||
WARN_ON(mmfault->fault_type >=
|
||||
ARRAY_SIZE(gp10b_fault_type_descs));
|
||||
else
|
||||
} else {
|
||||
mmfault->fault_type_desc =
|
||||
gp10b_fault_type_descs[mmfault->fault_type];
|
||||
}
|
||||
}
|
||||
|
||||
/* fill in mmu fault client description */
|
||||
void gp10b_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault)
|
||||
{
|
||||
if (mmfault->client_id >= ARRAY_SIZE(gp10b_hub_client_descs))
|
||||
if (mmfault->client_id >= ARRAY_SIZE(gp10b_hub_client_descs)) {
|
||||
WARN_ON(mmfault->client_id >=
|
||||
ARRAY_SIZE(gp10b_hub_client_descs));
|
||||
else
|
||||
} else {
|
||||
mmfault->client_id_desc =
|
||||
gp10b_hub_client_descs[mmfault->client_id];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,18 +76,20 @@ bool gr_gp10b_is_valid_class(struct gk20a *g, u32 class_num)
|
||||
|
||||
bool gr_gp10b_is_valid_gfx_class(struct gk20a *g, u32 class_num)
|
||||
{
|
||||
if (class_num == PASCAL_A || class_num == MAXWELL_B)
|
||||
if (class_num == PASCAL_A || class_num == MAXWELL_B) {
|
||||
return true;
|
||||
else
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool gr_gp10b_is_valid_compute_class(struct gk20a *g, u32 class_num)
|
||||
{
|
||||
if (class_num == PASCAL_COMPUTE_A || class_num == MAXWELL_COMPUTE_B)
|
||||
if (class_num == PASCAL_COMPUTE_A || class_num == MAXWELL_COMPUTE_B) {
|
||||
return true;
|
||||
else
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -120,10 +122,11 @@ static void gr_gp10b_sm_lrf_ecc_overcount_war(int single_err,
|
||||
hweight32(sed_status & ded_status);
|
||||
}
|
||||
|
||||
if (*count_to_adjust > over_count)
|
||||
if (*count_to_adjust > over_count) {
|
||||
*count_to_adjust -= over_count;
|
||||
else
|
||||
} else {
|
||||
*count_to_adjust = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int gr_gp10b_handle_sm_exception(struct gk20a *g,
|
||||
@@ -407,8 +410,9 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
tsg = tsg_gk20a_from_ch(c);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
|
||||
@@ -740,8 +744,9 @@ void gr_gp10b_cb_size_default(struct gk20a *g)
|
||||
{
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
|
||||
if (!gr->attrib_cb_default_size)
|
||||
if (!gr->attrib_cb_default_size) {
|
||||
gr->attrib_cb_default_size = 0x800;
|
||||
}
|
||||
gr->alpha_cb_default_size =
|
||||
gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
|
||||
gr->attrib_cb_gfxp_default_size =
|
||||
@@ -765,8 +770,9 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (alpha_cb_size > gr->alpha_cb_size)
|
||||
if (alpha_cb_size > gr->alpha_cb_size) {
|
||||
alpha_cb_size = gr->alpha_cb_size;
|
||||
}
|
||||
|
||||
gk20a_writel(g, gr_ds_tga_constraintlogic_alpha_r(),
|
||||
(gk20a_readl(g, gr_ds_tga_constraintlogic_alpha_r()) &
|
||||
@@ -818,8 +824,9 @@ void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (cb_size_steady > gr->attrib_cb_size)
|
||||
if (cb_size_steady > gr->attrib_cb_size) {
|
||||
cb_size_steady = gr->attrib_cb_size;
|
||||
}
|
||||
if (gk20a_readl(g, gr_gpc0_ppc0_cbm_beta_cb_size_r()) !=
|
||||
gk20a_readl(g,
|
||||
gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r())) {
|
||||
@@ -889,8 +896,9 @@ int gr_gp10b_init_ctx_state(struct gk20a *g)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = gr_gk20a_init_ctx_state(g);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!g->gr.ctx_vars.preempt_image_size) {
|
||||
op.method.addr =
|
||||
@@ -920,8 +928,9 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
mem->gpu_va = nvgpu_gmmu_map(vm,
|
||||
mem,
|
||||
@@ -952,29 +961,35 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||
int err = 0;
|
||||
|
||||
if (g->ops.gr.is_valid_gfx_class(g, class) &&
|
||||
g->gr.ctx_vars.force_preemption_gfxp)
|
||||
g->gr.ctx_vars.force_preemption_gfxp) {
|
||||
graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
|
||||
}
|
||||
|
||||
if (g->ops.gr.is_valid_compute_class(g, class) &&
|
||||
g->gr.ctx_vars.force_preemption_cilp)
|
||||
g->gr.ctx_vars.force_preemption_cilp) {
|
||||
compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
|
||||
}
|
||||
|
||||
/* check for invalid combinations */
|
||||
if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
|
||||
if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
|
||||
(compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
|
||||
(compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Do not allow lower preemption modes than current ones */
|
||||
if (graphics_preempt_mode &&
|
||||
(graphics_preempt_mode < gr_ctx->graphics_preempt_mode))
|
||||
(graphics_preempt_mode < gr_ctx->graphics_preempt_mode)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (compute_preempt_mode &&
|
||||
(compute_preempt_mode < gr_ctx->compute_preempt_mode))
|
||||
(compute_preempt_mode < gr_ctx->compute_preempt_mode)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* set preemption modes */
|
||||
switch (graphics_preempt_mode) {
|
||||
@@ -1079,15 +1094,18 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
gr_ctx->ctx_id_valid = false;
|
||||
|
||||
if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP)
|
||||
if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP) {
|
||||
graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
|
||||
if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP)
|
||||
}
|
||||
if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP) {
|
||||
compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
|
||||
}
|
||||
|
||||
if (graphics_preempt_mode || compute_preempt_mode) {
|
||||
if (g->ops.gr.set_ctxsw_preemption_mode) {
|
||||
@@ -1097,8 +1115,9 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
|
||||
nvgpu_err(g, "set_ctxsw_preemption_mode failed");
|
||||
goto fail_free_gk20a_ctx;
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
goto fail_free_gk20a_ctx;
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
@@ -1177,8 +1196,9 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
tsg = tsg_gk20a_from_ch(c);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
|
||||
@@ -1209,12 +1229,13 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
u32 cbes_reserve;
|
||||
|
||||
if (g->ops.gr.set_preemption_buffer_va) {
|
||||
if (ctxheader->gpu_va)
|
||||
if (ctxheader->gpu_va) {
|
||||
g->ops.gr.set_preemption_buffer_va(g, ctxheader,
|
||||
gr_ctx->preempt_ctxsw_buffer.gpu_va);
|
||||
else
|
||||
} else {
|
||||
g->ops.gr.set_preemption_buffer_va(g, mem,
|
||||
gr_ctx->preempt_ctxsw_buffer.gpu_va);
|
||||
}
|
||||
}
|
||||
|
||||
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, true);
|
||||
@@ -1237,8 +1258,9 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
(32 - gr_scc_pagepool_base_addr_39_8_align_bits_v()));
|
||||
size = gr_ctx->pagepool_ctxsw_buffer.size;
|
||||
|
||||
if (size == g->ops.gr.pagepool_default_size(g))
|
||||
if (size == g->ops.gr.pagepool_default_size(g)) {
|
||||
size = gr_scc_pagepool_total_pages_hwmax_v();
|
||||
}
|
||||
|
||||
g->ops.gr.commit_global_pagepool(g, gr_ctx, addr, size, true);
|
||||
|
||||
@@ -1325,9 +1347,10 @@ int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
|
||||
gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r()));
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r()));
|
||||
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2)
|
||||
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r()));
|
||||
}
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r()));
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY0: 0x%x\n",
|
||||
@@ -1340,9 +1363,10 @@ int gr_gp10b_dump_gr_status_regs(struct gk20a *g,
|
||||
gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r()));
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r()));
|
||||
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2)
|
||||
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r()));
|
||||
}
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r()));
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_ACTIVITY0: 0x%x\n",
|
||||
@@ -1415,8 +1439,9 @@ static bool gr_activity_empty_or_preempted(u32 val)
|
||||
while(val) {
|
||||
u32 v = val & 7;
|
||||
if (v != gr_activity_4_gpc0_empty_v() &&
|
||||
v != gr_activity_4_gpc0_preempted_v())
|
||||
v != gr_activity_4_gpc0_preempted_v()) {
|
||||
return false;
|
||||
}
|
||||
val >>= 3;
|
||||
}
|
||||
|
||||
@@ -1480,10 +1505,11 @@ void gr_gp10b_commit_global_attrib_cb(struct gk20a *g,
|
||||
{
|
||||
int attrBufferSize;
|
||||
|
||||
if (gr_ctx->preempt_ctxsw_buffer.gpu_va)
|
||||
if (gr_ctx->preempt_ctxsw_buffer.gpu_va) {
|
||||
attrBufferSize = gr_ctx->betacb_ctxsw_buffer.size;
|
||||
else
|
||||
} else {
|
||||
attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
|
||||
}
|
||||
|
||||
attrBufferSize /= gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f();
|
||||
|
||||
@@ -1544,8 +1570,9 @@ int gr_gp10b_load_smid_config(struct gk20a *g)
|
||||
u32 max_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
|
||||
|
||||
tpc_sm_id = nvgpu_kcalloc(g, gr_cwd_sm_id__size_1_v(), sizeof(u32));
|
||||
if (!tpc_sm_id)
|
||||
if (!tpc_sm_id) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/
|
||||
for (i = 0; i <= ((g->gr.tpc_count-1) / 4); i++) {
|
||||
@@ -1557,8 +1584,9 @@ int gr_gp10b_load_smid_config(struct gk20a *g)
|
||||
u32 sm_id = (i * 4) + j;
|
||||
u32 bits;
|
||||
|
||||
if (sm_id >= g->gr.tpc_count)
|
||||
if (sm_id >= g->gr.tpc_count) {
|
||||
break;
|
||||
}
|
||||
|
||||
gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index;
|
||||
tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index;
|
||||
@@ -1612,12 +1640,13 @@ void gr_gp10b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
|
||||
nvgpu_tegra_fuse_write_bypass(g, 0x1);
|
||||
nvgpu_tegra_fuse_write_access_sw(g, 0x0);
|
||||
|
||||
if (g->gr.gpc_tpc_mask[gpc_index] == 0x1)
|
||||
if (g->gr.gpc_tpc_mask[gpc_index] == 0x1) {
|
||||
nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, 0x2);
|
||||
else if (g->gr.gpc_tpc_mask[gpc_index] == 0x2)
|
||||
} else if (g->gr.gpc_tpc_mask[gpc_index] == 0x2) {
|
||||
nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, 0x1);
|
||||
else
|
||||
} else {
|
||||
nvgpu_tegra_fuse_write_opt_gpu_tpc0_disable(g, 0x0);
|
||||
}
|
||||
}
|
||||
|
||||
void gr_gp10b_get_access_map(struct gk20a *g,
|
||||
@@ -1709,8 +1738,9 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
|
||||
|
||||
tsg = tsg_gk20a_from_ch(fault_ch);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
|
||||
@@ -1789,8 +1819,9 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
|
||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " ");
|
||||
|
||||
tsg = tsg_gk20a_from_ch(fault_ch);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
|
||||
@@ -1831,8 +1862,9 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
|
||||
|
||||
if (fault_ch) {
|
||||
tsg = tsg_gk20a_from_ch(fault_ch);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cilp_enabled = (tsg->gr_ctx.compute_preempt_mode ==
|
||||
NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
|
||||
@@ -1842,13 +1874,15 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
|
||||
gpc, tpc, global_esr);
|
||||
|
||||
if (cilp_enabled && sm_debugger_attached) {
|
||||
if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f())
|
||||
if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f()) {
|
||||
gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset,
|
||||
gr_gpc0_tpc0_sm_hww_global_esr_bpt_int_pending_f());
|
||||
}
|
||||
|
||||
if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f())
|
||||
if (global_esr & gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f()) {
|
||||
gk20a_writel(g, gr_gpc0_tpc0_sm_hww_global_esr_r() + offset,
|
||||
gr_gpc0_tpc0_sm_hww_global_esr_single_step_complete_pending_f());
|
||||
}
|
||||
|
||||
global_mask = gr_gpc0_tpc0_sm_hww_global_esr_sm_to_sm_fault_pending_f() |
|
||||
gr_gpcs_tpcs_sm_hww_global_esr_l1_error_pending_f() |
|
||||
@@ -1930,12 +1964,14 @@ static int gr_gp10b_get_cilp_preempt_pending_chid(struct gk20a *g, int *__chid)
|
||||
chid = g->gr.cilp_preempt_pending_chid;
|
||||
|
||||
ch = gk20a_channel_get(gk20a_fifo_channel_from_chid(g, chid));
|
||||
if (!ch)
|
||||
if (!ch) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
|
||||
@@ -1974,13 +2010,15 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g,
|
||||
gr_fecs_host_int_clear_ctxsw_intr1_clear_f());
|
||||
|
||||
ret = gr_gp10b_get_cilp_preempt_pending_chid(g, &chid);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
ch = gk20a_channel_get(
|
||||
gk20a_fifo_channel_from_chid(g, chid));
|
||||
if (!ch)
|
||||
if (!ch) {
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
|
||||
/* set preempt_pending to false */
|
||||
@@ -2014,10 +2052,11 @@ u32 gp10b_gr_get_sm_hww_warp_esr(struct gk20a *g,
|
||||
u32 hww_warp_esr = gk20a_readl(g,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
|
||||
|
||||
if (!(hww_warp_esr & gr_gpc0_tpc0_sm_hww_warp_esr_addr_valid_m()))
|
||||
if (!(hww_warp_esr & gr_gpc0_tpc0_sm_hww_warp_esr_addr_valid_m())) {
|
||||
hww_warp_esr = set_field(hww_warp_esr,
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_m(),
|
||||
gr_gpc0_tpc0_sm_hww_warp_esr_addr_error_type_none_f());
|
||||
}
|
||||
|
||||
return hww_warp_esr;
|
||||
}
|
||||
@@ -2046,8 +2085,9 @@ bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
|
||||
int err = 0;
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
|
||||
@@ -2058,10 +2098,11 @@ bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
|
||||
|
||||
if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) {
|
||||
err = gr_gp10b_set_cilp_preempt_pending(g, ch);
|
||||
if (err)
|
||||
if (err) {
|
||||
nvgpu_err(g, "unable to set CILP preempt pending");
|
||||
else
|
||||
} else {
|
||||
*cilp_preempt_pending = true;
|
||||
}
|
||||
|
||||
g->ops.gr.resume_all_sms(g);
|
||||
}
|
||||
@@ -2104,10 +2145,12 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
|
||||
|
||||
ctx_resident = gr_gp10b_suspend_context(ch,
|
||||
&cilp_preempt_pending);
|
||||
if (ctx_resident)
|
||||
if (ctx_resident) {
|
||||
local_ctx_resident_ch_fd = ch_data->channel_fd;
|
||||
if (cilp_preempt_pending)
|
||||
}
|
||||
if (cilp_preempt_pending) {
|
||||
cilp_preempt_pending_ch = ch;
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&dbg_s->ch_list_lock);
|
||||
@@ -2140,16 +2183,18 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
|
||||
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
|
||||
NVGPU_TIMER_CPU_TIMER);
|
||||
do {
|
||||
if (!gr_ctx->cilp_preempt_pending)
|
||||
if (!gr_ctx->cilp_preempt_pending) {
|
||||
break;
|
||||
}
|
||||
|
||||
nvgpu_usleep_range(delay, delay * 2);
|
||||
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
/* If cilp is still pending at this point, timeout */
|
||||
if (gr_ctx->cilp_preempt_pending)
|
||||
if (gr_ctx->cilp_preempt_pending) {
|
||||
err = -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
*ctx_resident_ch_fd = local_ctx_resident_ch_fd;
|
||||
@@ -2168,25 +2213,29 @@ int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
|
||||
int err = 0;
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
gr_ctx->boosted_ctx = boost;
|
||||
mem = &gr_ctx->mem;
|
||||
|
||||
err = gk20a_disable_channel_tsg(g, ch);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = gk20a_fifo_preempt(g, ch);
|
||||
if (err)
|
||||
if (err) {
|
||||
goto enable_ch;
|
||||
}
|
||||
|
||||
if (g->ops.gr.update_boosted_ctx)
|
||||
if (g->ops.gr.update_boosted_ctx) {
|
||||
g->ops.gr.update_boosted_ctx(g, mem, gr_ctx);
|
||||
else
|
||||
} else {
|
||||
err = -ENOSYS;
|
||||
}
|
||||
|
||||
enable_ch:
|
||||
gk20a_enable_channel_tsg(g, ch);
|
||||
@@ -2216,12 +2265,14 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
|
||||
int err = 0;
|
||||
|
||||
class = ch->obj_class;
|
||||
if (!class)
|
||||
if (!class) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vm = tsg->vm;
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
@@ -2229,15 +2280,18 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
|
||||
|
||||
/* skip setting anything if both modes are already set */
|
||||
if (graphics_preempt_mode &&
|
||||
(graphics_preempt_mode == gr_ctx->graphics_preempt_mode))
|
||||
(graphics_preempt_mode == gr_ctx->graphics_preempt_mode)) {
|
||||
graphics_preempt_mode = 0;
|
||||
}
|
||||
|
||||
if (compute_preempt_mode &&
|
||||
(compute_preempt_mode == gr_ctx->compute_preempt_mode))
|
||||
(compute_preempt_mode == gr_ctx->compute_preempt_mode)) {
|
||||
compute_preempt_mode = 0;
|
||||
}
|
||||
|
||||
if (graphics_preempt_mode == 0 && compute_preempt_mode == 0)
|
||||
if (graphics_preempt_mode == 0 && compute_preempt_mode == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (g->ops.gr.set_ctxsw_preemption_mode) {
|
||||
|
||||
@@ -2257,12 +2311,14 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
|
||||
}
|
||||
|
||||
err = gk20a_disable_channel_tsg(g, ch);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = gk20a_fifo_preempt(g, ch);
|
||||
if (err)
|
||||
if (err) {
|
||||
goto enable_ch;
|
||||
}
|
||||
|
||||
if (g->ops.gr.update_ctxsw_preemption_mode) {
|
||||
g->ops.gr.update_ctxsw_preemption_mode(ch->g,
|
||||
|
||||
@@ -762,8 +762,9 @@ int gp10b_init_hal(struct gk20a *g)
|
||||
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, false);
|
||||
|
||||
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */
|
||||
if (gops->fuse.check_priv_security(g))
|
||||
if (gops->fuse.check_priv_security(g)) {
|
||||
return -EINVAL; /* Do not boot gpu */
|
||||
}
|
||||
|
||||
/* priv security dependent ops */
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
|
||||
@@ -143,8 +143,9 @@ void mc_gp10b_isr_stall(struct gk20a *g)
|
||||
g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) {
|
||||
g->ops.nvlink.isr(g);
|
||||
}
|
||||
if (mc_intr_0 & mc_intr_pfb_pending_f() && g->ops.fb.fbpa_isr)
|
||||
if (mc_intr_0 & mc_intr_pfb_pending_f() && g->ops.fb.fbpa_isr) {
|
||||
g->ops.fb.fbpa_isr(g);
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0);
|
||||
|
||||
|
||||
@@ -59,13 +59,15 @@ int gp10b_init_bar2_vm(struct gk20a *g)
|
||||
mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K,
|
||||
mm->bar2.aperture_size - SZ_4K,
|
||||
mm->bar2.aperture_size, false, false, "bar2");
|
||||
if (!mm->bar2.vm)
|
||||
if (!mm->bar2.vm) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* allocate instance mem for bar2 */
|
||||
err = g->ops.mm.alloc_inst_block(g, inst_block);
|
||||
if (err)
|
||||
if (err) {
|
||||
goto clean_up_va;
|
||||
}
|
||||
|
||||
g->ops.mm.init_inst_block(inst_block, mm->bar2.vm, big_page_size);
|
||||
|
||||
@@ -129,11 +131,13 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm,
|
||||
small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL;
|
||||
big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG;
|
||||
|
||||
if (small_valid)
|
||||
if (small_valid) {
|
||||
small_addr = phys_addr >> gmmu_new_dual_pde_address_shift_v();
|
||||
}
|
||||
|
||||
if (big_valid)
|
||||
if (big_valid) {
|
||||
big_addr = phys_addr >> gmmu_new_dual_pde_address_big_shift_v();
|
||||
}
|
||||
|
||||
if (small_valid) {
|
||||
pde_v[2] |=
|
||||
@@ -195,24 +199,28 @@ static void __update_pte(struct vm_gk20a *vm,
|
||||
|
||||
pte_w[0] = pte_valid | pte_addr | pte_tgt;
|
||||
|
||||
if (attrs->priv)
|
||||
if (attrs->priv) {
|
||||
pte_w[0] |= gmmu_new_pte_privilege_true_f();
|
||||
}
|
||||
|
||||
pte_w[1] = phys_addr >> (24 + gmmu_new_pte_address_shift_v()) |
|
||||
gmmu_new_pte_kind_f(attrs->kind_v) |
|
||||
gmmu_new_pte_comptagline_f((u32)(attrs->ctag /
|
||||
ctag_granularity));
|
||||
|
||||
if (attrs->rw_flag == gk20a_mem_flag_read_only)
|
||||
if (attrs->rw_flag == gk20a_mem_flag_read_only) {
|
||||
pte_w[0] |= gmmu_new_pte_read_only_true_f();
|
||||
}
|
||||
|
||||
if (!attrs->valid && !attrs->cacheable)
|
||||
if (!attrs->valid && !attrs->cacheable) {
|
||||
pte_w[0] |= gmmu_new_pte_read_only_true_f();
|
||||
else if (!attrs->cacheable)
|
||||
} else if (!attrs->cacheable) {
|
||||
pte_w[0] |= gmmu_new_pte_vol_true_f();
|
||||
}
|
||||
|
||||
if (attrs->ctag)
|
||||
if (attrs->ctag) {
|
||||
attrs->ctag += page_size;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -235,10 +243,11 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
|
||||
u32 pd_offset = pd_offset_from_index(l, pd_idx);
|
||||
u32 pte_w[2] = {0, 0};
|
||||
|
||||
if (phys_addr)
|
||||
if (phys_addr) {
|
||||
__update_pte(vm, pte_w, phys_addr, attrs);
|
||||
else if (attrs->sparse)
|
||||
} else if (attrs->sparse) {
|
||||
__update_pte_sparse(pte_w);
|
||||
}
|
||||
|
||||
pte_dbg(g, attrs,
|
||||
"vm=%s "
|
||||
@@ -283,8 +292,9 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
|
||||
u32 i;
|
||||
u32 pgsz = GMMU_NR_PAGE_SIZES;
|
||||
|
||||
if (!pd->mem)
|
||||
if (!pd->mem) {
|
||||
return pgsz;
|
||||
}
|
||||
|
||||
for (i = 0; i < GP10B_PDE0_ENTRY_SIZE >> 2; i++) {
|
||||
pde_v[i] = nvgpu_mem_rd32(g, pd->mem, pde_offset + i);
|
||||
@@ -300,8 +310,9 @@ static u32 gp10b_get_pde0_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
|
||||
gmmu_new_dual_pde_address_small_sys_f(~0))) <<
|
||||
gmmu_new_dual_pde_address_shift_v();
|
||||
|
||||
if (addr)
|
||||
if (addr) {
|
||||
pgsz = GMMU_PAGE_SIZE_SMALL;
|
||||
}
|
||||
}
|
||||
|
||||
if (pde_v[0] & (gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f() |
|
||||
|
||||
@@ -179,11 +179,13 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
|
||||
|
||||
/* GM20B PMU supports loading FECS and GPCCS only */
|
||||
if (falconidmask == 0)
|
||||
if (falconidmask == 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
|
||||
(1 << LSF_FALCON_ID_GPCCS)))
|
||||
return -EINVAL;
|
||||
(1 << LSF_FALCON_ID_GPCCS))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
g->pmu_lsf_loaded_falcon_id = 0;
|
||||
/* check whether pmu is ready to bootstrap lsf if not wait for it */
|
||||
if (!g->pmu_lsf_pmu_wpr_init_done) {
|
||||
@@ -201,8 +203,9 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
pmu_wait_message_cond(&g->pmu,
|
||||
gk20a_get_gr_idle_timeout(g),
|
||||
&g->pmu_lsf_loaded_falcon_id, falconidmask);
|
||||
if (g->pmu_lsf_loaded_falcon_id != falconidmask)
|
||||
if (g->pmu_lsf_loaded_falcon_id != falconidmask) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -247,8 +250,9 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
|
||||
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
|
||||
pmu_handle_gr_param_msg, pmu, &seq, ~0);
|
||||
|
||||
} else
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -330,8 +334,9 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g)
|
||||
pwr_fbif_transcfg_target_noncoherent_sysmem_f());
|
||||
|
||||
err = g->ops.pmu.pmu_nsbootstrap(pmu);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user