mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 18:42:29 +03:00
gpu: nvgpu: delete gr_gv11b_update_ctxsw_preemption_mode()
There is nothing h/w specific in gr_gv11b_update_ctxsw_preemption_mode anymore. Delete it and re-use gp10b specific hal for volta/tu104 Update gr_gp10b_update_ctxsw_preemption_mode to call platform specific hals if defined Jira NVGPU-1887 Change-Id: Idae9ebf780b1e76abf847d8b39aa40c0e0560084 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2084751 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8586aca4de
commit
f1402db43f
@@ -159,7 +159,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.init_ctxsw_preemption_mode =
|
||||
vgpu_gr_init_ctxsw_preemption_mode,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gv11b_update_ctxsw_preemption_mode,
|
||||
gr_gp10b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = NULL,
|
||||
.update_pc_sampling = vgpu_gr_update_pc_sampling,
|
||||
.get_rop_l2_en_mask = vgpu_gr_rop_l2_en_mask,
|
||||
|
||||
@@ -726,6 +726,15 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
|
||||
g->ops.gr.init.commit_cbes_reserve(g, gr_ctx, true);
|
||||
|
||||
if (g->ops.gr.init.gfxp_wfi_timeout != NULL) {
|
||||
g->ops.gr.init.gfxp_wfi_timeout(g, gr_ctx,
|
||||
g->gr.gfxp_wfi_timeout_count, true);
|
||||
}
|
||||
|
||||
if (g->ops.gr.init.commit_gfxp_rtv_cb != NULL) {
|
||||
g->ops.gr.init.commit_gfxp_rtv_cb(g, gr_ctx, true);
|
||||
}
|
||||
|
||||
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, true);
|
||||
}
|
||||
|
||||
|
||||
@@ -1347,65 +1347,6 @@ void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data)
|
||||
}
|
||||
}
|
||||
|
||||
void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, struct nvgpu_gr_subctx *subctx)
|
||||
{
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_gr_ctx_set_preemption_modes(g, gr_ctx);
|
||||
|
||||
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
|
||||
u64 addr;
|
||||
u32 size;
|
||||
|
||||
if (subctx != NULL) {
|
||||
nvgpu_gr_subctx_set_preemption_buffer_va(g, subctx,
|
||||
gr_ctx);
|
||||
} else {
|
||||
nvgpu_gr_ctx_set_preemption_buffer_va(g, gr_ctx);
|
||||
}
|
||||
|
||||
err = nvgpu_gr_ctx_patch_write_begin(g, gr_ctx, true);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "can't map patch context");
|
||||
goto out;
|
||||
}
|
||||
|
||||
addr = gr_ctx->betacb_ctxsw_buffer.gpu_va;
|
||||
g->ops.gr.init.commit_global_attrib_cb(g, gr_ctx,
|
||||
nvgpu_gr_config_get_tpc_count(g->gr.config),
|
||||
nvgpu_gr_config_get_max_tpc_count(g->gr.config), addr,
|
||||
true);
|
||||
|
||||
addr = gr_ctx->pagepool_ctxsw_buffer.gpu_va;
|
||||
nvgpu_assert(gr_ctx->pagepool_ctxsw_buffer.size <= U32_MAX);
|
||||
size = (u32)gr_ctx->pagepool_ctxsw_buffer.size;
|
||||
|
||||
g->ops.gr.init.commit_global_pagepool(g, gr_ctx, addr, size,
|
||||
true, false);
|
||||
|
||||
addr = gr_ctx->spill_ctxsw_buffer.gpu_va;
|
||||
nvgpu_assert(gr_ctx->spill_ctxsw_buffer.size <= U32_MAX);
|
||||
size = (u32)gr_ctx->spill_ctxsw_buffer.size;
|
||||
|
||||
g->ops.gr.init.commit_ctxsw_spill(g, gr_ctx, addr, size, true);
|
||||
|
||||
g->ops.gr.init.commit_cbes_reserve(g, gr_ctx, true);
|
||||
g->ops.gr.init.gfxp_wfi_timeout(g, gr_ctx,
|
||||
g->gr.gfxp_wfi_timeout_count, true);
|
||||
|
||||
if (g->ops.gr.init.commit_gfxp_rtv_cb != NULL) {
|
||||
g->ops.gr.init.commit_gfxp_rtv_cb(g, gr_ctx, true);
|
||||
}
|
||||
|
||||
nvgpu_gr_ctx_patch_write_end(g, gr_ctx, true);
|
||||
}
|
||||
|
||||
out:
|
||||
nvgpu_log_fn(g, "done");
|
||||
}
|
||||
static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g,
|
||||
struct gk20a_debug_output *o,
|
||||
u32 gpc, u32 tpc, u32 sm, u32 offset)
|
||||
|
||||
@@ -172,8 +172,6 @@ int gr_gv11b_init_preemption_state(struct gk20a *g);
|
||||
void gr_gv11b_init_gfxp_wfi_timeout_count(struct gk20a *g);
|
||||
unsigned long gr_gv11b_get_max_gfxp_wfi_timeout_count(struct gk20a *g);
|
||||
|
||||
void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
struct nvgpu_gr_ctx *gr_ctx, struct nvgpu_gr_subctx *subctx);
|
||||
int gr_gv11b_handle_ssync_hww(struct gk20a *g, u32 *ssync_esr);
|
||||
u32 gv11b_gr_sm_offset(struct gk20a *g, u32 sm);
|
||||
|
||||
|
||||
@@ -353,7 +353,7 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.init_ctxsw_preemption_mode =
|
||||
gr_gp10b_init_ctxsw_preemption_mode,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gv11b_update_ctxsw_preemption_mode,
|
||||
gr_gp10b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = gr_gv11b_dump_gr_status_regs,
|
||||
.update_pc_sampling = gr_gm20b_update_pc_sampling,
|
||||
.get_rop_l2_en_mask = gr_gm20b_rop_l2_en_mask,
|
||||
|
||||
@@ -422,7 +422,7 @@ static const struct gpu_ops tu104_ops = {
|
||||
.init_ctxsw_preemption_mode =
|
||||
gr_gp10b_init_ctxsw_preemption_mode,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gv11b_update_ctxsw_preemption_mode,
|
||||
gr_gp10b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = gr_gv11b_dump_gr_status_regs,
|
||||
.update_pc_sampling = gr_gm20b_update_pc_sampling,
|
||||
.get_rop_l2_en_mask = gr_gm20b_rop_l2_en_mask,
|
||||
|
||||
Reference in New Issue
Block a user