gpu: nvgpu: pass gr_ctx to commit_global_ctx_buffers

Simplify object ownership by passing the gr_ctx around directly instead
of reading from tsg via a channel; the caller holds the gr_ctx already.

Jira NVGPU-1149

Change-Id: I710afc48c0ed11b727cc1b9b6f440110aa404693
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1925430
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2018-10-10 12:36:59 +03:00
committed by mobile promotions
parent b9d391d391
commit ca632a2e66
5 changed files with 8 additions and 24 deletions

View File

@@ -791,22 +791,14 @@ u32 gk20a_gr_tpc_offset(struct gk20a *g, u32 tpc)
}
int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *c, bool patch)
struct nvgpu_gr_ctx *gr_ctx, bool patch)
{
struct gr_gk20a *gr = &g->gr;
struct tsg_gk20a *tsg;
struct nvgpu_gr_ctx *gr_ctx = NULL;
u64 addr;
u32 size;
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c);
if (tsg == NULL) {
return -EINVAL;
}
gr_ctx = tsg->gr_ctx;
if (patch) {
int err;
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false);
@@ -1435,7 +1427,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
gk20a_writel(g, gr_fe_go_idle_timeout_r(),
gr_fe_go_idle_timeout_count_disabled_f());
err = g->ops.gr.commit_global_ctx_buffers(g, c, false);
err = g->ops.gr.commit_global_ctx_buffers(g, gr_ctx, false);
if (err != 0U) {
goto clean_up;
}
@@ -2932,7 +2924,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
"fail to map global ctx buffer");
goto out;
}
g->ops.gr.commit_global_ctx_buffers(g, c, true);
g->ops.gr.commit_global_ctx_buffers(g, gr_ctx, true);
/* commit gr ctx buffer */
err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va);

View File

@@ -755,7 +755,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g);
int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *c);
int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *c, bool patch);
struct nvgpu_gr_ctx *gr_ctx, bool patch);
int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
struct channel_gk20a *c);

View File

@@ -521,7 +521,7 @@ struct gpu_ops {
int (*map_global_ctx_buffers)(struct gk20a *g,
struct channel_gk20a *c);
int (*commit_global_ctx_buffers)(struct gk20a *g,
struct channel_gk20a *c, bool patch);
struct nvgpu_gr_ctx *gr_ctx, bool patch);
u32 (*get_nonpes_aware_tpc)(struct gk20a *g, u32 gpc, u32 tpc);
int (*get_offset_in_gpccs_segment)(struct gk20a *g,
enum ctxsw_addr_type addr_type, u32 num_tpcs,

View File

@@ -220,25 +220,17 @@ static void gr_tu104_commit_rtv_circular_buffer(struct gk20a *g,
}
int gr_tu104_commit_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *ch, bool patch)
struct nvgpu_gr_ctx *gr_ctx, bool patch)
{
int err;
struct tsg_gk20a *tsg;
struct nvgpu_gr_ctx *gr_ctx = NULL;
u64 addr;
u32 size;
err = gr_gk20a_commit_global_ctx_buffers(g, ch, patch);
err = gr_gk20a_commit_global_ctx_buffers(g, gr_ctx, patch);
if (err != 0) {
return err;
}
tsg = tsg_gk20a_from_ch(ch);
if (tsg == NULL) {
return -EINVAL;
}
gr_ctx = tsg->gr_ctx;
if (patch) {
int err;
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false);

View File

@@ -72,7 +72,7 @@ int gr_tu104_alloc_global_ctx_buffers(struct gk20a *g);
int gr_tu104_map_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *ch);
int gr_tu104_commit_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *ch, bool patch);
struct nvgpu_gr_ctx *gr_ctx, bool patch);
void gr_tu104_bundle_cb_defaults(struct gk20a *g);
void gr_tu104_cb_size_default(struct gk20a *g);