gpu: nvgpu: make gr_ctx a pointer in tsg

Remove a dependency to a graphics type in tsg header by adding a pointer
indirection.

Jira NVGPU-967
Jira NVGPU-1149

Change-Id: I9177e6eedf08bfe4a3b981b67fa8d4d734f9e50f
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1822023
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2018-09-25 09:23:51 +03:00
committed by mobile promotions
parent 17261054d6
commit b08c613402
14 changed files with 73 additions and 65 deletions

View File

@@ -311,6 +311,10 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
tsg->runlist_id = ~0; tsg->runlist_id = ~0;
tsg->tgid = pid; tsg->tgid = pid;
tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE;
tsg->gr_ctx = nvgpu_kzalloc(g, sizeof(*tsg->gr_ctx));
if (tsg->gr_ctx == NULL) {
goto clean_up;
}
if (g->ops.fifo.init_eng_method_buffers != NULL) { if (g->ops.fifo.init_eng_method_buffers != NULL) {
g->ops.fifo.init_eng_method_buffers(g, tsg); g->ops.fifo.init_eng_method_buffers(g, tsg);
@@ -330,7 +334,8 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
return tsg; return tsg;
clean_up: clean_up:
nvgpu_kfree(g, tsg->gr_ctx);
tsg->gr_ctx = NULL;
if(tsg->sm_error_states != NULL) { if(tsg->sm_error_states != NULL) {
nvgpu_kfree(g, tsg->sm_error_states); nvgpu_kfree(g, tsg->sm_error_states);
tsg->sm_error_states = NULL; tsg->sm_error_states = NULL;
@@ -350,10 +355,13 @@ void gk20a_tsg_release(struct nvgpu_ref *ref)
g->ops.fifo.tsg_release(tsg); g->ops.fifo.tsg_release(tsg);
} }
if (nvgpu_mem_is_valid(&tsg->gr_ctx.mem)) { if (tsg->gr_ctx != NULL && nvgpu_mem_is_valid(&tsg->gr_ctx->mem)) {
gr_gk20a_free_tsg_gr_ctx(tsg); gr_gk20a_free_tsg_gr_ctx(tsg);
} }
nvgpu_kfree(g, tsg->gr_ctx);
tsg->gr_ctx = NULL;
if (g->ops.fifo.deinit_eng_method_buffers != NULL) { if (g->ops.fifo.deinit_eng_method_buffers != NULL) {
g->ops.fifo.deinit_eng_method_buffers(g, tsg); g->ops.fifo.deinit_eng_method_buffers(g, tsg);
} }

View File

@@ -469,7 +469,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
if (!tsg) if (!tsg)
return -EINVAL; return -EINVAL;
ch_ctx = &tsg->gr_ctx; ch_ctx = tsg->gr_ctx;
mem = &ch_ctx->mem; mem = &ch_ctx->mem;
if (!trace) if (!trace)

View File

@@ -107,7 +107,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
mem = &gr_ctx->mem; mem = &gr_ctx->mem;
/* Channel gr_ctx buffer is gpu cacheable. /* Channel gr_ctx buffer is gpu cacheable.
@@ -766,7 +766,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
mem = &gr_ctx->mem; mem = &gr_ctx->mem;
if (gr_ctx->zcull_ctx.gpu_va == 0 && if (gr_ctx->zcull_ctx.gpu_va == 0 &&
@@ -836,7 +836,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (patch) { if (patch) {
int err; int err;
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false);
@@ -1359,7 +1359,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
gr_mem = &gr_ctx->mem; gr_mem = &gr_ctx->mem;
/* golden ctx is global to all channels. Although only the first /* golden ctx is global to all channels. Although only the first
@@ -1609,7 +1609,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
mem = &gr_ctx->mem; mem = &gr_ctx->mem;
if (!nvgpu_mem_is_valid(mem)) { if (!nvgpu_mem_is_valid(mem)) {
nvgpu_err(g, "no graphics context allocated"); nvgpu_err(g, "no graphics context allocated");
@@ -1669,7 +1669,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
pm_ctx = &gr_ctx->pm_ctx; pm_ctx = &gr_ctx->pm_ctx;
gr_mem = &gr_ctx->mem; gr_mem = &gr_ctx->mem;
if (!nvgpu_mem_is_valid(gr_mem)) { if (!nvgpu_mem_is_valid(gr_mem)) {
@@ -1825,7 +1825,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
mem = &gr_ctx->mem; mem = &gr_ctx->mem;
if (gr->ctx_vars.local_golden_image == NULL) { if (gr->ctx_vars.local_golden_image == NULL) {
return -EINVAL; return -EINVAL;
@@ -2661,9 +2661,9 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va;
g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size;
g_bfr_index = tsg->gr_ctx.global_ctx_buffer_index; g_bfr_index = tsg->gr_ctx->global_ctx_buffer_index;
/* Circular Buffer */ /* Circular Buffer */
if (c->vpr && if (c->vpr &&
@@ -2744,7 +2744,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
g_bfr_size[PRIV_ACCESS_MAP_VA] = mem->size; g_bfr_size[PRIV_ACCESS_MAP_VA] = mem->size;
g_bfr_index[PRIV_ACCESS_MAP_VA] = PRIV_ACCESS_MAP; g_bfr_index[PRIV_ACCESS_MAP_VA] = PRIV_ACCESS_MAP;
tsg->gr_ctx.global_ctx_buffer_mapped = true; tsg->gr_ctx->global_ctx_buffer_mapped = true;
#ifdef CONFIG_GK20A_CTXSW_TRACE #ifdef CONFIG_GK20A_CTXSW_TRACE
/* FECS trace buffer */ /* FECS trace buffer */
@@ -2763,7 +2763,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
return 0; return 0;
clean_up: clean_up:
gr_gk20a_unmap_global_ctx_buffers(g, ch_vm, &tsg->gr_ctx); gr_gk20a_unmap_global_ctx_buffers(g, ch_vm, tsg->gr_ctx);
return -ENOMEM; return -ENOMEM;
} }
@@ -2812,7 +2812,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g, static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g,
struct tsg_gk20a *tsg, u32 class, u32 padding) struct tsg_gk20a *tsg, u32 class, u32 padding)
{ {
struct nvgpu_gr_ctx *gr_ctx = &tsg->gr_ctx; struct nvgpu_gr_ctx *gr_ctx = tsg->gr_ctx;
int err; int err;
if (tsg->vm == NULL) { if (tsg->vm == NULL) {
@@ -2835,7 +2835,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
{ {
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (gr_ctx->mem.gpu_va) { if (gr_ctx != NULL) {
gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx);
gr_gk20a_free_channel_patch_ctx(g, vm, gr_ctx); gr_gk20a_free_channel_patch_ctx(g, vm, gr_ctx);
gr_gk20a_free_channel_pm_ctx(g, vm, gr_ctx); gr_gk20a_free_channel_pm_ctx(g, vm, gr_ctx);
@@ -2863,7 +2863,7 @@ void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg)
nvgpu_err(g, "No address space bound"); nvgpu_err(g, "No address space bound");
return; return;
} }
tsg->g->ops.gr.free_gr_ctx(g, tsg->vm, &tsg->gr_ctx); tsg->g->ops.gr.free_gr_ctx(g, tsg->vm, tsg->gr_ctx);
} }
u32 gr_gk20a_get_patch_slots(struct gk20a *g) u32 gr_gk20a_get_patch_slots(struct gk20a *g)
@@ -2887,7 +2887,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
patch_ctx = &tsg->gr_ctx.patch_ctx; patch_ctx = &tsg->gr_ctx->patch_ctx;
alloc_size = g->ops.gr.get_patch_slots(g) * alloc_size = g->ops.gr.get_patch_slots(g) *
PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY; PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY;
@@ -2967,7 +2967,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
} }
tsg = &f->tsg[c->tsgid]; tsg = &f->tsg[c->tsgid];
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (!nvgpu_mem_is_valid(&gr_ctx->mem)) { if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {
tsg->vm = c->vm; tsg->vm = c->vm;
@@ -3661,7 +3661,7 @@ int gr_gk20a_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr,
return -EINVAL; return -EINVAL;
} }
zcull_ctx = &tsg->gr_ctx.zcull_ctx; zcull_ctx = &tsg->gr_ctx->zcull_ctx;
zcull_ctx->ctx_sw_mode = mode; zcull_ctx->ctx_sw_mode = mode;
zcull_ctx->gpu_va = zcull_va; zcull_ctx->gpu_va = zcull_va;
@@ -6746,7 +6746,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
g->ops.gr.init_ovr_sm_dsm_perf(); g->ops.gr.init_ovr_sm_dsm_perf();
g->ops.gr.init_sm_dsm_reg_info(); g->ops.gr.init_sm_dsm_reg_info();
g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs);
@@ -8034,7 +8034,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (ch_is_curr_ctx) { if (ch_is_curr_ctx) {
for (pass = 0; pass < 2; pass++) { for (pass = 0; pass < 2; pass++) {

View File

@@ -195,7 +195,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
ch_ctx = &tsg->gr_ctx; ch_ctx = tsg->gr_ctx;
gr_gk20a_ctx_patch_write(g, ch_ctx, gr_ds_tga_constraintlogic_r(), gr_gk20a_ctx_patch_write(g, ch_ctx, gr_ds_tga_constraintlogic_r(),
gr_ds_tga_constraintlogic_beta_cbsize_f(gr->attrib_cb_default_size) | gr_ds_tga_constraintlogic_beta_cbsize_f(gr->attrib_cb_default_size) |
@@ -931,7 +931,7 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
return; return;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
nvgpu_log_info(g, "CTA: %x", cta_preempt_option); nvgpu_log_info(g, "CTA: %x", cta_preempt_option);
nvgpu_mem_wr(g, mem, nvgpu_mem_wr(g, mem,
@@ -1097,7 +1097,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
mem = &gr_ctx->mem; mem = &gr_ctx->mem;
if (!nvgpu_mem_is_valid(mem) || c->vpr) { if (!nvgpu_mem_is_valid(mem) || c->vpr) {
return -EINVAL; return -EINVAL;

View File

@@ -414,7 +414,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) {
attrib_size_in_chunk = gr->attrib_cb_gfxp_size; attrib_size_in_chunk = gr->attrib_cb_gfxp_size;
@@ -1199,7 +1199,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
return; return;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) {
nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option); nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option);
@@ -1737,7 +1737,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (gr_ctx->cilp_preempt_pending) { if (gr_ctx->cilp_preempt_pending) {
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
@@ -1818,7 +1818,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
/* The ucode is self-clearing, so all we need to do here is /* The ucode is self-clearing, so all we need to do here is
to clear cilp_preempt_pending. */ to clear cilp_preempt_pending. */
@@ -1861,7 +1861,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
cilp_enabled = (tsg->gr_ctx.compute_preempt_mode == cilp_enabled = (tsg->gr_ctx->compute_preempt_mode ==
NVGPU_PREEMPTION_MODE_COMPUTE_CILP); NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
} }
@@ -1968,7 +1968,7 @@ static int gr_gp10b_get_cilp_preempt_pending_chid(struct gk20a *g, int *__chid)
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (gr_ctx->cilp_preempt_pending) { if (gr_ctx->cilp_preempt_pending) {
*__chid = chid; *__chid = chid;
@@ -2084,7 +2084,7 @@ bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
*cilp_preempt_pending = false; *cilp_preempt_pending = false;
@@ -2173,7 +2173,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
goto clean_up; goto clean_up;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
@@ -2212,7 +2212,7 @@ int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
gr_ctx->boosted_ctx = boost; gr_ctx->boosted_ctx = boost;
mem = &gr_ctx->mem; mem = &gr_ctx->mem;
@@ -2270,7 +2270,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
} }
vm = tsg->vm; vm = tsg->vm;
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
mem = &gr_ctx->mem; mem = &gr_ctx->mem;
/* skip setting anything if both modes are already set */ /* skip setting anything if both modes are already set */

View File

@@ -1682,7 +1682,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
return; return;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (gr_ctx->graphics_preempt_mode == if (gr_ctx->graphics_preempt_mode ==
NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) {
@@ -2384,7 +2384,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
cilp_enabled = (tsg->gr_ctx.compute_preempt_mode == cilp_enabled = (tsg->gr_ctx->compute_preempt_mode ==
NVGPU_PREEMPTION_MODE_COMPUTE_CILP); NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
} }

View File

@@ -109,7 +109,7 @@ int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va)
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
g->ops.mm.l2_flush(g, true); g->ops.mm.l2_flush(g, true);

View File

@@ -54,7 +54,7 @@ struct tsg_gk20a {
struct nvgpu_mem *eng_method_buffers; struct nvgpu_mem *eng_method_buffers;
struct nvgpu_gr_ctx gr_ctx; struct nvgpu_gr_ctx *gr_ctx;
struct nvgpu_ref refcount; struct nvgpu_ref refcount;
struct nvgpu_list_node ch_list; struct nvgpu_list_node ch_list;

View File

@@ -93,8 +93,8 @@ static int gk20a_fifo_sched_debugfs_seq_show(
tsg->timeslice_us, tsg->timeslice_us,
ch->timeout_ms_max, ch->timeout_ms_max,
tsg->interleave_level, tsg->interleave_level,
tsg->gr_ctx.graphics_preempt_mode, tsg->gr_ctx->graphics_preempt_mode,
tsg->gr_ctx.compute_preempt_mode); tsg->gr_ctx->compute_preempt_mode);
gk20a_channel_put(ch); gk20a_channel_put(ch);
} }
return 0; return 0;

View File

@@ -91,9 +91,9 @@ static void gk20a_channel_trace_sched_param(
ch->timeout_ms_max, ch->timeout_ms_max,
gk20a_fifo_interleave_level_name(tsg->interleave_level), gk20a_fifo_interleave_level_name(tsg->interleave_level),
gr_gk20a_graphics_preempt_mode_name( gr_gk20a_graphics_preempt_mode_name(
tsg->gr_ctx.graphics_preempt_mode), tsg->gr_ctx->graphics_preempt_mode),
gr_gk20a_compute_preempt_mode_name( gr_gk20a_compute_preempt_mode_name(
tsg->gr_ctx.compute_preempt_mode)); tsg->gr_ctx->compute_preempt_mode));
} }
/* /*

View File

@@ -209,9 +209,9 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched,
arg->timeslice = tsg->timeslice_us; arg->timeslice = tsg->timeslice_us;
arg->graphics_preempt_mode = arg->graphics_preempt_mode =
tsg->gr_ctx.graphics_preempt_mode; tsg->gr_ctx->graphics_preempt_mode;
arg->compute_preempt_mode = arg->compute_preempt_mode =
tsg->gr_ctx.compute_preempt_mode; tsg->gr_ctx->compute_preempt_mode;
nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release); nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release);

View File

@@ -175,9 +175,9 @@ int gr_tu104_map_global_ctx_buffers(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va;
g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size;
g_bfr_index = tsg->gr_ctx.global_ctx_buffer_index; g_bfr_index = tsg->gr_ctx->global_ctx_buffer_index;
/* RTV circular buffer */ /* RTV circular buffer */
mem = &gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER].mem; mem = &gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER].mem;
@@ -238,7 +238,7 @@ int gr_tu104_commit_global_ctx_buffers(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (patch) { if (patch) {
int err; int err;
err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false);

View File

@@ -261,7 +261,7 @@ int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
return -EINVAL; return -EINVAL;
vm = tsg->vm; vm = tsg->vm;
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
/* skip setting anything if both modes are already set */ /* skip setting anything if both modes are already set */
if (graphics_preempt_mode && if (graphics_preempt_mode &&

View File

@@ -186,8 +186,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
if (!tsg) if (!tsg)
return -EINVAL; return -EINVAL;
g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va;
g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size;
/* Circular Buffer */ /* Circular Buffer */
gpu_va = __nvgpu_vm_alloc_va(ch_vm, gpu_va = __nvgpu_vm_alloc_va(ch_vm,
@@ -255,7 +255,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
if (err || msg.ret) if (err || msg.ret)
goto clean_up; goto clean_up;
tsg->gr_ctx.global_ctx_buffer_mapped = true; tsg->gr_ctx->global_ctx_buffer_mapped = true;
return 0; return 0;
clean_up: clean_up:
@@ -272,14 +272,14 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg) static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
{ {
struct vm_gk20a *ch_vm = tsg->vm; struct vm_gk20a *ch_vm = tsg->vm;
u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; u64 *g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va;
u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; u64 *g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size;
u32 i; u32 i;
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (tsg->gr_ctx.global_ctx_buffer_mapped) { if (tsg->gr_ctx->global_ctx_buffer_mapped) {
/* server will unmap on channel close */ /* server will unmap on channel close */
for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
@@ -291,7 +291,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
} }
} }
tsg->gr_ctx.global_ctx_buffer_mapped = false; tsg->gr_ctx->global_ctx_buffer_mapped = false;
} }
} }
@@ -361,7 +361,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
if (!tsg) if (!tsg)
return -EINVAL; return -EINVAL;
patch_ctx = &tsg->gr_ctx.patch_ctx; patch_ctx = &tsg->gr_ctx->patch_ctx;
patch_ctx->mem.size = 128 * sizeof(u32); patch_ctx->mem.size = 128 * sizeof(u32);
patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm, patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
patch_ctx->mem.size, patch_ctx->mem.size,
@@ -385,7 +385,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
{ {
struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx; struct patch_desc *patch_ctx = &tsg->gr_ctx->patch_ctx;
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -401,7 +401,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg) static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
{ {
struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx; struct nvgpu_gr_ctx *ch_ctx = tsg->gr_ctx;
struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
@@ -466,7 +466,7 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
if (!tsg) if (!tsg)
return -EINVAL; return -EINVAL;
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX;
msg.handle = vgpu_get_handle(c->g); msg.handle = vgpu_get_handle(c->g);
@@ -481,7 +481,7 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c)
static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg) static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg)
{ {
struct nvgpu_gr_ctx *gr_ctx = &tsg->gr_ctx; struct nvgpu_gr_ctx *gr_ctx = tsg->gr_ctx;
struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_tsg_bind_gr_ctx_params *p = struct tegra_vgpu_tsg_bind_gr_ctx_params *p =
&msg.params.tsg_bind_gr_ctx; &msg.params.tsg_bind_gr_ctx;
@@ -526,7 +526,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
return -EINVAL; return -EINVAL;
tsg = &f->tsg[c->tsgid]; tsg = &f->tsg[c->tsgid];
gr_ctx = &tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
if (!nvgpu_mem_is_valid(&gr_ctx->mem)) { if (!nvgpu_mem_is_valid(&gr_ctx->mem)) {
tsg->vm = c->vm; tsg->vm = c->vm;
@@ -1075,7 +1075,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
ch_ctx = &tsg->gr_ctx; ch_ctx = tsg->gr_ctx;
pm_ctx = &ch_ctx->pm_ctx; pm_ctx = &ch_ctx->pm_ctx;
if (mode == NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW) { if (mode == NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW) {