mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
gpu: nvgpu: move zcull context setup to gr/ctx and gr/subctx units
In gr_gk20a_ctx_zcull_setup(), we configure context/subcontext with zcull details This API now does it directly by calling g->ops.gr.ctxsw_prog HAL Move all context/subcontext setup to gr/ctx and gr/subctx units respectively Define and use below new APIs for same gr/ctx : nvgpu_gr_ctx_zcull_setup() gr/subctx : nvgpu_gr_subctx_zcull_setup() Jira NVGPU-1527 Jira NVGPU-1613 Change-Id: I1b7b16baea60ea45535c623b5b41351610ca433e Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2011090 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
319eca3498
commit
bac95b36d8
@@ -612,3 +612,25 @@ u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx)
|
|||||||
|
|
||||||
return gr_ctx->ctx_id;
|
return gr_ctx->ctx_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int nvgpu_gr_ctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||||
|
bool set_zcull_ptr)
|
||||||
|
{
|
||||||
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
|
if (gr_ctx->zcull_ctx.gpu_va == 0ULL &&
|
||||||
|
g->ops.gr.ctxsw_prog.is_zcull_mode_separate_buffer(
|
||||||
|
gr_ctx->zcull_ctx.ctx_sw_mode)) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
g->ops.gr.ctxsw_prog.set_zcull(g, &gr_ctx->mem,
|
||||||
|
gr_ctx->zcull_ctx.ctx_sw_mode);
|
||||||
|
|
||||||
|
if (set_zcull_ptr) {
|
||||||
|
g->ops.gr.ctxsw_prog.set_zcull_ptr(g, &gr_ctx->mem,
|
||||||
|
gr_ctx->zcull_ctx.gpu_va);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|||||||
@@ -107,3 +107,13 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
|
|||||||
|
|
||||||
g->ops.gr.ctxsw_prog.set_type_per_veid_header(g, ctxheader);
|
g->ops.gr.ctxsw_prog.set_type_per_veid_header(g, ctxheader);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nvgpu_gr_subctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx)
|
||||||
|
{
|
||||||
|
|
||||||
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
|
g->ops.gr.ctxsw_prog.set_zcull_ptr(g, &subctx->ctx_header,
|
||||||
|
gr_ctx->zcull_ctx.gpu_va);
|
||||||
|
}
|
||||||
|
|||||||
@@ -634,19 +634,10 @@ int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
|
|||||||
static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
|
static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
|
||||||
struct nvgpu_gr_ctx *gr_ctx)
|
struct nvgpu_gr_ctx *gr_ctx)
|
||||||
{
|
{
|
||||||
struct nvgpu_mem *mem = NULL;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
mem = &gr_ctx->mem;
|
|
||||||
|
|
||||||
if (gr_ctx->zcull_ctx.gpu_va == 0ULL &&
|
|
||||||
g->ops.gr.ctxsw_prog.is_zcull_mode_separate_buffer(
|
|
||||||
gr_ctx->zcull_ctx.ctx_sw_mode)) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = gk20a_disable_channel_tsg(g, c);
|
ret = gk20a_disable_channel_tsg(g, c);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
nvgpu_err(g, "failed to disable channel/TSG");
|
nvgpu_err(g, "failed to disable channel/TSG");
|
||||||
@@ -659,14 +650,13 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
g->ops.gr.ctxsw_prog.set_zcull(g, mem, gr_ctx->zcull_ctx.ctx_sw_mode);
|
|
||||||
|
|
||||||
if (c->subctx != NULL) {
|
if (c->subctx != NULL) {
|
||||||
g->ops.gr.ctxsw_prog.set_zcull_ptr(g, &c->subctx->ctx_header,
|
ret = nvgpu_gr_ctx_zcull_setup(g, gr_ctx, false);
|
||||||
gr_ctx->zcull_ctx.gpu_va);
|
if (ret == 0) {
|
||||||
|
nvgpu_gr_subctx_zcull_setup(g, c->subctx, gr_ctx);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
g->ops.gr.ctxsw_prog.set_zcull_ptr(g, mem,
|
ret = nvgpu_gr_ctx_zcull_setup(g, gr_ctx, true);
|
||||||
gr_ctx->zcull_ctx.gpu_va);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
gk20a_enable_channel_tsg(g, c);
|
gk20a_enable_channel_tsg(g, c);
|
||||||
|
|||||||
@@ -179,4 +179,6 @@ void nvgpu_gr_ctx_patch_write(struct gk20a *g,
|
|||||||
|
|
||||||
u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
u32 nvgpu_gr_ctx_get_ctx_id(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx);
|
||||||
|
|
||||||
|
int nvgpu_gr_ctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
||||||
|
bool set_zcull_ptr);
|
||||||
#endif /* NVGPU_INCLUDE_GR_CTX_H */
|
#endif /* NVGPU_INCLUDE_GR_CTX_H */
|
||||||
|
|||||||
@@ -43,4 +43,7 @@ void nvgpu_gr_subctx_load_ctx_header(struct gk20a *g,
|
|||||||
struct nvgpu_gr_subctx *subctx,
|
struct nvgpu_gr_subctx *subctx,
|
||||||
struct nvgpu_gr_ctx *gr_ctx, u64 gpu_va);
|
struct nvgpu_gr_ctx *gr_ctx, u64 gpu_va);
|
||||||
|
|
||||||
|
void nvgpu_gr_subctx_zcull_setup(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
|
||||||
|
struct nvgpu_gr_ctx *gr_ctx);
|
||||||
|
|
||||||
#endif /* NVGPU_GR_SUBCTX_H */
|
#endif /* NVGPU_GR_SUBCTX_H */
|
||||||
|
|||||||
Reference in New Issue
Block a user