gpu: nvgpu: add error codes to mm_l2_flush

gv11b_mm_l2_flush was not checking error codes from the various
functions it was calling. MISRA Rule-17.7 requires the return value
of all functions to be used. This patch now checks return values and
propagates the error upstream.

JIRA NVGPU-677

Change-Id: I9005c6d3a406f9665d318014d21a1da34f87ca30
Signed-off-by: Nicolas Benech <nbenech@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1998809
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Nicolas Benech
2019-01-17 18:07:35 -05:00
committed by mobile promotions
parent 6bddc121c3
commit e9c00c0da9
14 changed files with 107 additions and 39 deletions

View File

@@ -278,7 +278,9 @@ void nvgpu_gr_global_ctx_load_local_golden_image(struct gk20a *g,
{ {
/* Channel gr_ctx buffer is gpu cacheable. /* Channel gr_ctx buffer is gpu cacheable.
Flush and invalidate before cpu update. */ Flush and invalidate before cpu update. */
g->ops.mm.l2_flush(g, true); if (g->ops.mm.l2_flush(g, true) != 0) {
nvgpu_err(g, "l2_flush failed");
}
nvgpu_mem_wr_n(g, target_mem, 0, local_golden_image->context, nvgpu_mem_wr_n(g, target_mem, 0, local_golden_image->context,
local_golden_image->size); local_golden_image->size);

View File

@@ -260,7 +260,9 @@ void gp10b_ltc_lts_isr(struct gk20a *g, unsigned int ltc, unsigned int slice)
nvgpu_writel_check(g, nvgpu_writel_check(g,
ltc_ltc0_lts0_dstg_ecc_report_r() + offset, ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
ecc_stats_reg_val); ecc_stats_reg_val);
g->ops.mm.l2_flush(g, true); if (g->ops.mm.l2_flush(g, true) != 0) {
nvgpu_err(g, "l2_flush failed");
}
} }
if ((ltc_intr & if ((ltc_intr &
ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) != 0U) { ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) != 0U) {

View File

@@ -891,11 +891,15 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
} }
if (batch == NULL) { if (batch == NULL) {
gk20a_mm_l2_flush(g, true); if (gk20a_mm_l2_flush(g, true) != 0) {
nvgpu_err(g, "gk20a_mm_l2_flush[1] failed");
}
g->ops.fb.tlb_invalidate(g, vm->pdb.mem); g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
} else { } else {
if (!batch->gpu_l2_flushed) { if (!batch->gpu_l2_flushed) {
gk20a_mm_l2_flush(g, true); if (gk20a_mm_l2_flush(g, true) != 0) {
nvgpu_err(g, "gk20a_mm_l2_flush[2] failed");
}
batch->gpu_l2_flushed = true; batch->gpu_l2_flushed = true;
} }
batch->need_tlb_invalidate = true; batch->need_tlb_invalidate = true;

View File

@@ -116,12 +116,18 @@ u32 nvgpu_vm_get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
int nvgpu_mm_suspend(struct gk20a *g) int nvgpu_mm_suspend(struct gk20a *g)
{ {
int err;
nvgpu_log_info(g, "MM suspend running..."); nvgpu_log_info(g, "MM suspend running...");
nvgpu_vidmem_thread_pause_sync(&g->mm); nvgpu_vidmem_thread_pause_sync(&g->mm);
g->ops.mm.cbc_clean(g); g->ops.mm.cbc_clean(g);
g->ops.mm.l2_flush(g, false); err = g->ops.mm.l2_flush(g, false);
if (err != 0) {
nvgpu_err(g, "l2_flush failed");
return err;
}
if (g->ops.fb.disable_hub_intr != NULL) { if (g->ops.fb.disable_hub_intr != NULL) {
g->ops.fb.disable_hub_intr(g); g->ops.fb.disable_hub_intr(g);
@@ -133,7 +139,7 @@ int nvgpu_mm_suspend(struct gk20a *g)
nvgpu_log_info(g, "MM suspend done!"); nvgpu_log_info(g, "MM suspend done!");
return 0; return err;
} }
u64 nvgpu_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block) u64 nvgpu_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block)

View File

@@ -81,14 +81,17 @@ static void gr_gk20a_enable_elcg(struct gk20a *g);
u32 gr_gk20a_get_ctx_id(struct gk20a *g, struct nvgpu_mem *ctx_mem) u32 gr_gk20a_get_ctx_id(struct gk20a *g, struct nvgpu_mem *ctx_mem)
{ {
u32 ctx_id; /* Initialize ctx_id to invalid value */
u32 ctx_id = 0;
/* Channel gr_ctx buffer is gpu cacheable. /* Channel gr_ctx buffer is gpu cacheable.
Flush and invalidate before cpu update. */ Flush and invalidate before cpu update. */
g->ops.mm.l2_flush(g, true); if (g->ops.mm.l2_flush(g, true) != 0) {
nvgpu_err(g, "l2_flush failed");
ctx_id = g->ops.gr.ctxsw_prog.get_main_image_ctx_id(g, ctx_mem); } else {
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", ctx_id); ctx_id = g->ops.gr.ctxsw_prog.get_main_image_ctx_id(g, ctx_mem);
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, "ctx_id: 0x%x", ctx_id);
}
return ctx_id; return ctx_id;
} }
@@ -1394,7 +1397,11 @@ restore_fe_go_idle:
goto clean_up; goto clean_up;
} }
g->ops.mm.l2_flush(g, true); err = g->ops.mm.l2_flush(g, true);
if (err != 0) {
nvgpu_err(g, "l2_flush failed");
goto clean_up;
}
g->ops.gr.ctxsw_prog.set_zcull_mode_no_ctxsw(g, gr_mem); g->ops.gr.ctxsw_prog.set_zcull_mode_no_ctxsw(g, gr_mem);
g->ops.gr.ctxsw_prog.set_zcull_ptr(g, gr_mem, 0); g->ops.gr.ctxsw_prog.set_zcull_ptr(g, gr_mem, 0);
@@ -1462,7 +1469,11 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
/* Channel gr_ctx buffer is gpu cacheable. /* Channel gr_ctx buffer is gpu cacheable.
Flush and invalidate before cpu update. */ Flush and invalidate before cpu update. */
g->ops.mm.l2_flush(g, true); ret = g->ops.mm.l2_flush(g, true);
if (ret != 0) {
nvgpu_err(g, "l2_flush failed");
goto out;
}
g->ops.gr.ctxsw_prog.set_pm_smpc_mode(g, mem, enable_smpc_ctxsw); g->ops.gr.ctxsw_prog.set_pm_smpc_mode(g, mem, enable_smpc_ctxsw);
@@ -1546,7 +1557,11 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
/* Channel gr_ctx buffer is gpu cacheable. /* Channel gr_ctx buffer is gpu cacheable.
Flush and invalidate before cpu update. */ Flush and invalidate before cpu update. */
g->ops.mm.l2_flush(g, true); ret = g->ops.mm.l2_flush(g, true);
if (ret != 0) {
nvgpu_err(g, "l2_flush failed");
return ret;
}
if (mode != NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW) { if (mode != NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW) {
/* Allocate buffer if necessary */ /* Allocate buffer if necessary */
@@ -7404,7 +7419,11 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
goto cleanup; goto cleanup;
} }
g->ops.mm.l2_flush(g, true); err = g->ops.mm.l2_flush(g, true);
if (err != 0) {
nvgpu_err(g, "l2_flush failed");
goto cleanup;
}
/* write to appropriate place in context image, /* write to appropriate place in context image,
* first have to figure out where that really is */ * first have to figure out where that really is */

View File

@@ -553,12 +553,13 @@ void gk20a_mm_l2_invalidate(struct gk20a *g)
gk20a_idle_nosuspend(g); gk20a_idle_nosuspend(g);
} }
void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate) int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
{ {
struct mm_gk20a *mm = &g->mm; struct mm_gk20a *mm = &g->mm;
u32 data; u32 data;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
u32 retries = 2000; u32 retries = 2000;
int err = -ETIMEDOUT;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -592,6 +593,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
nvgpu_log_info(g, "l2_flush_dirty 0x%x", data); nvgpu_log_info(g, "l2_flush_dirty 0x%x", data);
nvgpu_udelay(5); nvgpu_udelay(5);
} else { } else {
err = 0;
break; break;
} }
} while (nvgpu_timeout_expired_msg(&timeout, } while (nvgpu_timeout_expired_msg(&timeout,
@@ -607,6 +609,8 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
hw_was_off: hw_was_off:
gk20a_idle_nosuspend(g); gk20a_idle_nosuspend(g);
return err;
} }
void gk20a_mm_cbc_clean(struct gk20a *g) void gk20a_mm_cbc_clean(struct gk20a *g)

View File

@@ -73,7 +73,7 @@ struct gk20a;
struct channel_gk20a; struct channel_gk20a;
int gk20a_mm_fb_flush(struct gk20a *g); int gk20a_mm_fb_flush(struct gk20a *g);
void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate); int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate);
void gk20a_mm_cbc_clean(struct gk20a *g); void gk20a_mm_cbc_clean(struct gk20a *g);
void gk20a_mm_l2_invalidate(struct gk20a *g); void gk20a_mm_l2_invalidate(struct gk20a *g);

View File

@@ -203,18 +203,38 @@ int gv11b_init_mm_setup_hw(struct gk20a *g)
return err; return err;
} }
void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate) int gv11b_mm_l2_flush(struct gk20a *g, bool invalidate)
{ {
int err = 0;
nvgpu_log(g, gpu_dbg_fn, "gv11b_mm_l2_flush"); nvgpu_log(g, gpu_dbg_fn, "gv11b_mm_l2_flush");
g->ops.mm.fb_flush(g); err = g->ops.mm.fb_flush(g);
gk20a_mm_l2_flush(g, invalidate); if (err != 0) {
if (g->ops.bus.bar1_bind != NULL) { nvgpu_err(g, "mm.fb_flush()[1] failed err=%d", err);
g->ops.fb.tlb_invalidate(g, return err;
g->mm.bar1.vm->pdb.mem);
} else {
g->ops.mm.fb_flush(g);
} }
err = gk20a_mm_l2_flush(g, invalidate);
if (err != 0) {
nvgpu_err(g, "gk20a_mm_l2_flush failed");
return err;
}
if (g->ops.bus.bar1_bind != NULL) {
err = g->ops.fb.tlb_invalidate(g,
g->mm.bar1.vm->pdb.mem);
if (err != 0) {
nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err);
return err;
}
} else {
err = g->ops.mm.fb_flush(g);
if (err != 0) {
nvgpu_err(g, "mm.fb_flush()[2] failed err=%d", err);
return err;
}
}
return err;
} }
/* /*

View File

@@ -33,7 +33,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
struct vm_gk20a *vm, u32 big_page_size); struct vm_gk20a *vm, u32 big_page_size);
bool gv11b_mm_mmu_fault_pending(struct gk20a *g); bool gv11b_mm_mmu_fault_pending(struct gk20a *g);
int gv11b_init_mm_setup_hw(struct gk20a *g); int gv11b_init_mm_setup_hw(struct gk20a *g);
void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate); int gv11b_mm_l2_flush(struct gk20a *g, bool invalidate);
u64 gv11b_gpu_phys_addr(struct gk20a *g, u64 gv11b_gpu_phys_addr(struct gk20a *g,
struct nvgpu_gmmu_attrs *attrs, u64 phys); struct nvgpu_gmmu_attrs *attrs, u64 phys);
void gv11b_mm_fault_info_mem_destroy(struct gk20a *g); void gv11b_mm_fault_info_mem_destroy(struct gk20a *g);

View File

@@ -102,6 +102,7 @@ int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va)
struct gk20a *g = c->g; struct gk20a *g = c->g;
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
int err = 0;
tsg = tsg_gk20a_from_ch(c); tsg = tsg_gk20a_from_ch(c);
if (tsg == NULL) { if (tsg == NULL) {
@@ -110,7 +111,11 @@ int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va)
gr_ctx = tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
g->ops.mm.l2_flush(g, true); err = g->ops.mm.l2_flush(g, true);
if (err != 0) {
nvgpu_err(g, "l2_flush failed");
return err;
}
/* set priv access map */ /* set priv access map */
g->ops.gr.ctxsw_prog.set_priv_access_map_addr(g, ctxheader, g->ops.gr.ctxsw_prog.set_priv_access_map_addr(g, ctxheader,
@@ -129,7 +134,7 @@ int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va)
g->ops.gr.ctxsw_prog.set_type_per_veid_header(g, ctxheader); g->ops.gr.ctxsw_prog.set_type_per_veid_header(g, ctxheader);
return 0; return err;
} }
static void gv11b_subctx_commit_valid_mask(struct vm_gk20a *vm, static void gv11b_subctx_commit_valid_mask(struct vm_gk20a *vm,

View File

@@ -1094,7 +1094,7 @@ struct gpu_ops {
struct channel_gk20a *ch); struct channel_gk20a *ch);
int (*fb_flush)(struct gk20a *g); int (*fb_flush)(struct gk20a *g);
void (*l2_invalidate)(struct gk20a *g); void (*l2_invalidate)(struct gk20a *g);
void (*l2_flush)(struct gk20a *g, bool invalidate); int (*l2_flush)(struct gk20a *g, bool invalidate);
void (*cbc_clean)(struct gk20a *g); void (*cbc_clean)(struct gk20a *g);
void (*set_big_page_size)(struct gk20a *g, void (*set_big_page_size)(struct gk20a *g,
struct nvgpu_mem *mem, u32 size); struct nvgpu_mem *mem, u32 size);

View File

@@ -607,11 +607,17 @@ static int nvgpu_gpu_ioctl_l2_fb_ops(struct gk20a *g,
(!args->l2_flush && args->l2_invalidate)) (!args->l2_flush && args->l2_invalidate))
return -EINVAL; return -EINVAL;
if (args->l2_flush) if (args->l2_flush) {
g->ops.mm.l2_flush(g, args->l2_invalidate ? true : false); err = g->ops.mm.l2_flush(g, args->l2_invalidate ? true : false);
if (err != 0) {
nvgpu_err(g, "l2_flush failed");
return err;
}
}
if (args->fb_flush) if (args->fb_flush) {
g->ops.mm.fb_flush(g); g->ops.mm.fb_flush(g);
}
return err; return err;
} }

View File

@@ -224,7 +224,7 @@ int vgpu_vm_bind_channel(struct vm_gk20a *vm,
return err; return err;
} }
static void vgpu_cache_maint(u64 handle, u8 op) static int vgpu_cache_maint(u64 handle, u8 op)
{ {
struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_cache_maint_params *p = &msg.params.cache_maint; struct tegra_vgpu_cache_maint_params *p = &msg.params.cache_maint;
@@ -235,6 +235,7 @@ static void vgpu_cache_maint(u64 handle, u8 op)
p->op = op; p->op = op;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret); WARN_ON(err || msg.ret);
return err;
} }
int vgpu_mm_fb_flush(struct gk20a *g) int vgpu_mm_fb_flush(struct gk20a *g)
@@ -242,8 +243,7 @@ int vgpu_mm_fb_flush(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH); return vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_FB_FLUSH);
return 0;
} }
void vgpu_mm_l2_invalidate(struct gk20a *g) void vgpu_mm_l2_invalidate(struct gk20a *g)
@@ -251,10 +251,10 @@ void vgpu_mm_l2_invalidate(struct gk20a *g)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); (void) vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
} }
void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) int vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
{ {
u8 op; u8 op;
@@ -265,7 +265,7 @@ void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
else else
op = TEGRA_VGPU_L2_MAINT_FLUSH; op = TEGRA_VGPU_L2_MAINT_FLUSH;
vgpu_cache_maint(vgpu_get_handle(g), op); return vgpu_cache_maint(vgpu_get_handle(g), op);
} }
int vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) int vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)

View File

@@ -42,7 +42,7 @@ int vgpu_vm_bind_channel(struct vm_gk20a *vm,
struct channel_gk20a *ch); struct channel_gk20a *ch);
int vgpu_mm_fb_flush(struct gk20a *g); int vgpu_mm_fb_flush(struct gk20a *g);
void vgpu_mm_l2_invalidate(struct gk20a *g); void vgpu_mm_l2_invalidate(struct gk20a *g);
void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate); int vgpu_mm_l2_flush(struct gk20a *g, bool invalidate);
int vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb); int vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb);
void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable); void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable);
#endif #endif