mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: update gr_ctx patch and pm setup functions
set_patch_addr parameter to nvgpu_gr_ctx_set_patch_ctx was redundant. Remove it. Prepare new functions nvgpu_gr_ctx_set_hwpm_pm_mode to set PM mode, nvgpu_gr_ctx_set_hwpm_ptr to set PM ptr in gr_ctx. Rename subctx function to nvgpu_gr_subctx_set_hwpm_ptr. This simplifies the logic in gr_gk20a_update_hwpm_ctxsw_mode to set the PM mode and PM ptr. Channel loop is needed only for subcontexts. Bug 3677982 Change-Id: I44acb09f6296ba8d510e278910188864f39e7157 Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2743724 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
7a956cf5a2
commit
931e5f8220
@@ -977,16 +977,14 @@ void nvgpu_gr_ctx_reset_patch_count(struct gk20a *g,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvgpu_gr_ctx_set_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
void nvgpu_gr_ctx_set_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx)
|
||||||
bool set_patch_addr)
|
|
||||||
{
|
{
|
||||||
g->ops.gr.ctxsw_prog.set_patch_count(g, &gr_ctx->mem,
|
g->ops.gr.ctxsw_prog.set_patch_count(g, &gr_ctx->mem,
|
||||||
gr_ctx->patch_ctx.data_count);
|
gr_ctx->patch_ctx.data_count);
|
||||||
if (set_patch_addr) {
|
|
||||||
g->ops.gr.ctxsw_prog.set_patch_addr(g, &gr_ctx->mem,
|
g->ops.gr.ctxsw_prog.set_patch_addr(g, &gr_ctx->mem,
|
||||||
gr_ctx->patch_ctx.mem.gpu_va);
|
gr_ctx->patch_ctx.mem.gpu_va);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
int nvgpu_gr_ctx_alloc_pm_ctx(struct gk20a *g,
|
int nvgpu_gr_ctx_alloc_pm_ctx(struct gk20a *g,
|
||||||
struct nvgpu_gr_ctx *gr_ctx,
|
struct nvgpu_gr_ctx *gr_ctx,
|
||||||
@@ -1160,16 +1158,15 @@ int nvgpu_gr_ctx_prepare_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_gr_ctx_set_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
void nvgpu_gr_ctx_set_hwpm_pm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx)
|
||||||
bool set_pm_ptr)
|
|
||||||
{
|
{
|
||||||
g->ops.gr.ctxsw_prog.set_pm_mode(g, &gr_ctx->mem,
|
g->ops.gr.ctxsw_prog.set_pm_mode(g, &gr_ctx->mem,
|
||||||
gr_ctx->pm_ctx.pm_mode);
|
gr_ctx->pm_ctx.pm_mode);
|
||||||
if (set_pm_ptr) {
|
}
|
||||||
|
|
||||||
|
void nvgpu_gr_ctx_set_hwpm_ptr(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx)
|
||||||
|
{
|
||||||
g->ops.gr.ctxsw_prog.set_pm_ptr(g, &gr_ctx->mem,
|
g->ops.gr.ctxsw_prog.set_pm_ptr(g, &gr_ctx->mem,
|
||||||
gr_ctx->pm_ctx.gpu_va);
|
gr_ctx->pm_ctx.gpu_va);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_NVGPU_DEBUGGER */
|
#endif /* CONFIG_NVGPU_DEBUGGER */
|
||||||
|
|||||||
@@ -143,7 +143,7 @@ void nvgpu_gr_subctx_set_preemption_buffer_va(struct gk20a *g,
|
|||||||
#endif /* CONFIG_NVGPU_GFXP */
|
#endif /* CONFIG_NVGPU_GFXP */
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||||
void nvgpu_gr_subctx_set_hwpm_mode(struct gk20a *g,
|
void nvgpu_gr_subctx_set_hwpm_ptr(struct gk20a *g,
|
||||||
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx)
|
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx)
|
||||||
{
|
{
|
||||||
g->ops.gr.ctxsw_prog.set_pm_ptr(g, &subctx->ctx_header,
|
g->ops.gr.ctxsw_prog.set_pm_ptr(g, &subctx->ctx_header,
|
||||||
|
|||||||
@@ -128,25 +128,18 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nvgpu_gr_ctx_set_hwpm_pm_mode(g, gr_ctx);
|
||||||
|
|
||||||
|
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) {
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_channel, ch_entry) {
|
nvgpu_channel, ch_entry) {
|
||||||
if (ch->subctx != NULL) {
|
nvgpu_gr_subctx_set_hwpm_ptr(g, ch->subctx, gr_ctx);
|
||||||
err = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, false);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "chid: %d set_hwpm_mode failed",
|
|
||||||
ch->chid);
|
|
||||||
ret = err;
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
nvgpu_gr_subctx_set_hwpm_mode(g, ch->subctx, gr_ctx);
|
|
||||||
} else {
|
|
||||||
ret = nvgpu_gr_ctx_set_hwpm_mode(g, gr_ctx, true);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||||
|
} else {
|
||||||
|
nvgpu_gr_ctx_set_hwpm_ptr(g, gr_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
g->ops.tsg.enable(tsg);
|
g->ops.tsg.enable(tsg);
|
||||||
@@ -662,8 +655,7 @@ int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
|
|||||||
nvgpu_gr_ctx_patch_write(g, gr_ctx,
|
nvgpu_gr_ctx_patch_write(g, gr_ctx,
|
||||||
addr, data, true);
|
addr, data, true);
|
||||||
|
|
||||||
nvgpu_gr_ctx_set_patch_ctx(g, gr_ctx,
|
nvgpu_gr_ctx_set_patch_ctx(g, gr_ctx);
|
||||||
true);
|
|
||||||
|
|
||||||
/* we're not caching these on cpu side,
|
/* we're not caching these on cpu side,
|
||||||
but later watch for it */
|
but later watch for it */
|
||||||
|
|||||||
@@ -568,8 +568,7 @@ void nvgpu_gr_ctx_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
|
|
||||||
void nvgpu_gr_ctx_reset_patch_count(struct gk20a *g,
|
void nvgpu_gr_ctx_reset_patch_count(struct gk20a *g,
|
||||||
struct nvgpu_gr_ctx *gr_ctx);
|
struct nvgpu_gr_ctx *gr_ctx);
|
||||||
void nvgpu_gr_ctx_set_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
void nvgpu_gr_ctx_set_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
||||||
bool set_patch_addr);
|
|
||||||
|
|
||||||
u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
||||||
u32 nvgpu_gr_ctx_read_ctx_id(struct nvgpu_gr_ctx *gr_ctx);
|
u32 nvgpu_gr_ctx_read_ctx_id(struct nvgpu_gr_ctx *gr_ctx);
|
||||||
@@ -584,8 +583,8 @@ int nvgpu_gr_ctx_set_smpc_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
|||||||
|
|
||||||
int nvgpu_gr_ctx_prepare_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
int nvgpu_gr_ctx_prepare_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||||
u32 mode, bool *skip_update);
|
u32 mode, bool *skip_update);
|
||||||
int nvgpu_gr_ctx_set_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
void nvgpu_gr_ctx_set_hwpm_pm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
||||||
bool set_pm_ptr);
|
void nvgpu_gr_ctx_set_hwpm_ptr(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
|
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
|
||||||
void nvgpu_gr_ctx_set_boosted_ctx(struct nvgpu_gr_ctx *gr_ctx, bool boost);
|
void nvgpu_gr_ctx_set_boosted_ctx(struct nvgpu_gr_ctx *gr_ctx, bool boost);
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ void nvgpu_gr_subctx_set_preemption_buffer_va(struct gk20a *g,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||||
void nvgpu_gr_subctx_set_hwpm_mode(struct gk20a *g,
|
void nvgpu_gr_subctx_set_hwpm_ptr(struct gk20a *g,
|
||||||
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx);
|
struct nvgpu_gr_subctx *subctx, struct nvgpu_gr_ctx *gr_ctx);
|
||||||
#endif
|
#endif
|
||||||
#endif /* NVGPU_GR_SUBCTX_H */
|
#endif /* NVGPU_GR_SUBCTX_H */
|
||||||
|
|||||||
Reference in New Issue
Block a user