gpu: nvgpu: update gr_ctx patch and pm setup functions

set_patch_addr parameter to nvgpu_gr_ctx_set_patch_ctx was redundant.
Remove it.

Prepare new functions nvgpu_gr_ctx_set_hwpm_pm_mode to set PM mode,
nvgpu_gr_ctx_set_hwpm_ptr to set PM ptr in gr_ctx. Rename subctx
function to nvgpu_gr_subctx_set_hwpm_ptr.

This simplifies the logic in gr_gk20a_update_hwpm_ctxsw_mode to set
the PM mode and PM ptr. Channel loop is needed only for subcontexts.

Bug 3677982

Change-Id: I44acb09f6296ba8d510e278910188864f39e7157
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2743724
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Sagar Kamble
2022-07-12 16:10:12 +05:30
committed by mobile promotions
parent 7a956cf5a2
commit 931e5f8220
5 changed files with 25 additions and 37 deletions

View File

@@ -977,15 +977,13 @@ void nvgpu_gr_ctx_reset_patch_count(struct gk20a *g,
}
}
void nvgpu_gr_ctx_set_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
bool set_patch_addr)
void nvgpu_gr_ctx_set_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx)
{
g->ops.gr.ctxsw_prog.set_patch_count(g, &gr_ctx->mem,
gr_ctx->patch_ctx.data_count);
if (set_patch_addr) {
g->ops.gr.ctxsw_prog.set_patch_addr(g, &gr_ctx->mem,
gr_ctx->patch_ctx.mem.gpu_va);
}
}
int nvgpu_gr_ctx_alloc_pm_ctx(struct gk20a *g,
@@ -1160,16 +1158,15 @@ int nvgpu_gr_ctx_prepare_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
return ret;
}
int nvgpu_gr_ctx_set_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
bool set_pm_ptr)
void nvgpu_gr_ctx_set_hwpm_pm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx)
{
g->ops.gr.ctxsw_prog.set_pm_mode(g, &gr_ctx->mem,
gr_ctx->pm_ctx.pm_mode);
if (set_pm_ptr) {
}
void nvgpu_gr_ctx_set_hwpm_ptr(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx)
{
g->ops.gr.ctxsw_prog.set_pm_ptr(g, &gr_ctx->mem,
gr_ctx->pm_ctx.gpu_va);
}
return 0;
}
#endif /* CONFIG_NVGPU_DEBUGGER */

View File

@@ -143,7 +143,7 @@ void nvgpu_gr_subctx_set_preemption_buffer_va(struct gk20a *g,
#endif /* CONFIG_NVGPU_GFXP */
#ifdef CONFIG_NVGPU_DEBUGGER
void nvgpu_gr_subctx_set_hwpm_mode(struct gk20a *g,
void nvgpu_gr_subctx_set_hwpm_ptr(struct gk20a *g,
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx)
{
g->ops.gr.ctxsw_prog.set_pm_ptr(g, &subctx->ctx_header,

View File

@@ -128,25 +128,18 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
goto out;
}
nvgpu_gr_ctx_set_hwpm_pm_mode(g, gr_ctx);
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) {
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
if (ch->subctx != NULL) {
err = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, false);
if (err != 0) {
nvgpu_err(g, "chid: %d set_hwpm_mode failed",
ch->chid);
ret = err;
continue;
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
nvgpu_channel, ch_entry) {
nvgpu_gr_subctx_set_hwpm_ptr(g, ch->subctx, gr_ctx);
}
nvgpu_gr_subctx_set_hwpm_mode(g, ch->subctx, gr_ctx);
} else {
ret = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, true);
break;
}
}
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} else {
nvgpu_gr_ctx_set_hwpm_ptr(g, gr_ctx);
}
out:
g->ops.tsg.enable(tsg);
@@ -662,8 +655,7 @@ int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
nvgpu_gr_ctx_patch_write(g, gr_ctx,
addr, data, true);
nvgpu_gr_ctx_set_patch_ctx(g, gr_ctx,
true);
nvgpu_gr_ctx_set_patch_ctx(g, gr_ctx);
/* we're not caching these on cpu side,
but later watch for it */

View File

@@ -568,8 +568,7 @@ void nvgpu_gr_ctx_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm,
void nvgpu_gr_ctx_reset_patch_count(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx);
void nvgpu_gr_ctx_set_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
bool set_patch_addr);
void nvgpu_gr_ctx_set_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
u32 nvgpu_gr_ctx_read_ctx_id(struct nvgpu_gr_ctx *gr_ctx);
@@ -584,8 +583,8 @@ int nvgpu_gr_ctx_set_smpc_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
int nvgpu_gr_ctx_prepare_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
u32 mode, bool *skip_update);
int nvgpu_gr_ctx_set_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
bool set_pm_ptr);
void nvgpu_gr_ctx_set_hwpm_pm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
void nvgpu_gr_ctx_set_hwpm_ptr(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
void nvgpu_gr_ctx_set_boosted_ctx(struct nvgpu_gr_ctx *gr_ctx, bool boost);

View File

@@ -107,7 +107,7 @@ void nvgpu_gr_subctx_set_preemption_buffer_va(struct gk20a *g,
#endif
#ifdef CONFIG_NVGPU_DEBUGGER
void nvgpu_gr_subctx_set_hwpm_mode(struct gk20a *g,
void nvgpu_gr_subctx_set_hwpm_ptr(struct gk20a *g,
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx);
#endif
#endif /* NVGPU_GR_SUBCTX_H */