mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: add gr/ctx API to set smpc ctxsw mode
gr_gk20a_update_smpc_ctxsw_mode() right now directly sets the SMPC mode in context image by calling g->ops.gr.ctxsw_prog HAL Add new API nvgpu_gr_ctx_set_smpc_mode() in gr/ctx unit to set SMPC mode and use it in gr_gk20a_update_smpc_ctxsw_mode() Jira NVGPU-1527 Change-Id: Ib9a74781d6bb988caffc2a79345be773fd4942e4 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2011092 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
2af1558d42
commit
dd12b9b320
@@ -650,3 +650,26 @@ int nvgpu_gr_ctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_gr_ctx_set_smpc_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||
bool enable)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {
|
||||
nvgpu_err(g, "no graphics context allocated");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Channel gr_ctx buffer is gpu cacheable.
|
||||
Flush and invalidate before cpu update. */
|
||||
err = g->ops.mm.l2_flush(g, true);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "l2_flush failed");
|
||||
return err;
|
||||
}
|
||||
|
||||
g->ops.gr.ctxsw_prog.set_pm_smpc_mode(g, &gr_ctx->mem, enable);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1391,8 +1391,6 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
|
||||
bool enable_smpc_ctxsw)
|
||||
{
|
||||
struct tsg_gk20a *tsg;
|
||||
struct nvgpu_gr_ctx *gr_ctx = NULL;
|
||||
struct nvgpu_mem *mem = NULL;
|
||||
int ret;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
@@ -1402,13 +1400,6 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr_ctx = tsg->gr_ctx;
|
||||
mem = &gr_ctx->mem;
|
||||
if (!nvgpu_mem_is_valid(mem)) {
|
||||
nvgpu_err(g, "no graphics context allocated");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
ret = gk20a_disable_channel_tsg(g, c);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g, "failed to disable channel/TSG");
|
||||
@@ -1421,15 +1412,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Channel gr_ctx buffer is gpu cacheable.
|
||||
Flush and invalidate before cpu update. */
|
||||
ret = g->ops.mm.l2_flush(g, true);
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g, "l2_flush failed");
|
||||
goto out;
|
||||
}
|
||||
|
||||
g->ops.gr.ctxsw_prog.set_pm_smpc_mode(g, mem, enable_smpc_ctxsw);
|
||||
ret = nvgpu_gr_ctx_set_smpc_mode(g, tsg->gr_ctx, enable_smpc_ctxsw);
|
||||
|
||||
out:
|
||||
gk20a_enable_channel_tsg(g, c);
|
||||
|
||||
@@ -182,4 +182,7 @@ u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
||||
int nvgpu_gr_ctx_init_zcull(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
||||
int nvgpu_gr_ctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||
bool set_zcull_ptr);
|
||||
|
||||
int nvgpu_gr_ctx_set_smpc_mode(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||
bool enable);
|
||||
#endif /* NVGPU_INCLUDE_GR_CTX_H */
|
||||
|
||||
Reference in New Issue
Block a user