gpu: nvgpu: add gr/ctx and gr/subctx APIs to set hwpm ctxsw mode

gr_gk20a_update_hwpm_ctxsw_mode() right now validates the incoming
hwpm mode, checks if it is already set, and if not, it will go ahead
and set the new hwpm mode by calling g->ops.gr.ctxsw_prog HALs

Instead of programming hwpm mode in gr_gk20a.c, move the programming
to gr/ctx and gr/subctx units by adding below APIs
nvgpu_gr_ctx_prepare_hwpm_mode() - validate the incoming mode and
                                   check if it is already set
nvgpu_gr_ctx_set_hwpm_mode() - set pm mode in graphics context
nvgpu_gr_subctx_set_hwpm_mode() - set pm mode in subcontext

Add gpu_va field to struct pm_ctx_desc to store the gpu_va to be
programmed into context

Rename NVGPU_DBG_HWPM_CTXSW_MODE_* to NVGPU_GR_CTX_HWPM_CTXSW_MODE_*
and move them to gr/ctx.h

Remove below HALs since they are no longer used
g->ops.gr.ctxsw_prog.set_pm_mode_no_ctxsw()
g->ops.gr.ctxsw_prog.set_pm_mode_ctxsw()
g->ops.gr.ctxsw_prog.set_pm_mode_stream_out_ctxsw()

Jira NVGPU-1527
Jira NVGPU-1613

Change-Id: Id2a4d498182ec0e3586dc7265f73a25870ca2ef7
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2011093
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2019-01-24 15:01:52 +05:30
committed by mobile promotions
parent dd12b9b320
commit fe27a7f934
20 changed files with 147 additions and 173 deletions

View File

@@ -673,3 +673,83 @@ int nvgpu_gr_ctx_set_smpc_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
return err;
}
int nvgpu_gr_ctx_prepare_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
u32 mode, bool *skip_update)
{
struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
*skip_update = false;
if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {
nvgpu_err(g, "no graphics context allocated");
return -EFAULT;
}
if ((mode == NVGPU_GR_CTX_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW) &&
(g->ops.gr.ctxsw_prog.hw_get_pm_mode_stream_out_ctxsw == NULL)) {
nvgpu_err(g,
"Mode-E hwpm context switch mode is not supported");
return -EINVAL;
}
switch (mode) {
case NVGPU_GR_CTX_HWPM_CTXSW_MODE_CTXSW:
if (pm_ctx->pm_mode ==
g->ops.gr.ctxsw_prog.hw_get_pm_mode_ctxsw()) {
*skip_update = true;
return 0;
}
pm_ctx->pm_mode = g->ops.gr.ctxsw_prog.hw_get_pm_mode_ctxsw();
pm_ctx->gpu_va = pm_ctx->mem.gpu_va;
break;
case NVGPU_GR_CTX_HWPM_CTXSW_MODE_NO_CTXSW:
if (pm_ctx->pm_mode ==
g->ops.gr.ctxsw_prog.hw_get_pm_mode_no_ctxsw()) {
*skip_update = true;
return 0;
}
pm_ctx->pm_mode =
g->ops.gr.ctxsw_prog.hw_get_pm_mode_no_ctxsw();
pm_ctx->gpu_va = 0;
break;
case NVGPU_GR_CTX_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW:
if (pm_ctx->pm_mode ==
g->ops.gr.ctxsw_prog.hw_get_pm_mode_stream_out_ctxsw()) {
*skip_update = true;
return 0;
}
pm_ctx->pm_mode =
g->ops.gr.ctxsw_prog.hw_get_pm_mode_stream_out_ctxsw();
pm_ctx->gpu_va = pm_ctx->mem.gpu_va;
break;
default:
nvgpu_err(g, "invalid hwpm context switch mode");
return -EINVAL;
}
return 0;
}
int nvgpu_gr_ctx_set_hwpm_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
bool set_pm_ptr)
{
int err;
/* Channel gr_ctx buffer is gpu cacheable.
Flush and invalidate before cpu update. */
err = g->ops.mm.l2_flush(g, true);
if (err != 0) {
nvgpu_err(g, "l2_flush failed");
return err;
}
g->ops.gr.ctxsw_prog.set_pm_mode(g, &gr_ctx->mem,
gr_ctx->pm_ctx.pm_mode);
if (set_pm_ptr) {
g->ops.gr.ctxsw_prog.set_pm_ptr(g, &gr_ctx->mem,
gr_ctx->pm_ctx.gpu_va);
}
return err;
}