diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c index 62216a4f7..cf527e55f 100644 --- a/drivers/gpu/nvgpu/common/fifo/tsg.c +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -311,6 +311,10 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) tsg->runlist_id = ~0; tsg->tgid = pid; tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; + tsg->gr_ctx = nvgpu_kzalloc(g, sizeof(*tsg->gr_ctx)); + if (tsg->gr_ctx == NULL) { + goto clean_up; + } if (g->ops.fifo.init_eng_method_buffers != NULL) { g->ops.fifo.init_eng_method_buffers(g, tsg); @@ -330,7 +334,8 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) return tsg; clean_up: - + nvgpu_kfree(g, tsg->gr_ctx); + tsg->gr_ctx = NULL; if(tsg->sm_error_states != NULL) { nvgpu_kfree(g, tsg->sm_error_states); tsg->sm_error_states = NULL; @@ -350,10 +355,13 @@ void gk20a_tsg_release(struct nvgpu_ref *ref) g->ops.fifo.tsg_release(tsg); } - if (nvgpu_mem_is_valid(&tsg->gr_ctx.mem)) { + if (tsg->gr_ctx != NULL && nvgpu_mem_is_valid(&tsg->gr_ctx->mem)) { gr_gk20a_free_tsg_gr_ctx(tsg); } + nvgpu_kfree(g, tsg->gr_ctx); + tsg->gr_ctx = NULL; + if (g->ops.fifo.deinit_eng_method_buffers != NULL) { g->ops.fifo.deinit_eng_method_buffers(g, tsg); } diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index 9602d4f8b..70091f7ab 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c @@ -469,7 +469,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g, if (!tsg) return -EINVAL; - ch_ctx = &tsg->gr_ctx; + ch_ctx = tsg->gr_ctx; mem = &ch_ctx->mem; if (!trace) diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index ee29fd183..3a03169fb 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -107,7 +107,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; mem = &gr_ctx->mem; /* Channel gr_ctx buffer is gpu cacheable. @@ -766,7 +766,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c) return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; mem = &gr_ctx->mem; if (gr_ctx->zcull_ctx.gpu_va == 0 && @@ -836,7 +836,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (patch) { int err; err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); @@ -1359,7 +1359,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; gr_mem = &gr_ctx->mem; /* golden ctx is global to all channels. Although only the first @@ -1609,7 +1609,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; mem = &gr_ctx->mem; if (!nvgpu_mem_is_valid(mem)) { nvgpu_err(g, "no graphics context allocated"); @@ -1669,7 +1669,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; pm_ctx = &gr_ctx->pm_ctx; gr_mem = &gr_ctx->mem; if (!nvgpu_mem_is_valid(gr_mem)) { @@ -1825,7 +1825,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; mem = &gr_ctx->mem; if (gr->ctx_vars.local_golden_image == NULL) { return -EINVAL; @@ -2661,9 +2661,9 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, return -EINVAL; } - g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; - g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; - g_bfr_index = tsg->gr_ctx.global_ctx_buffer_index; + g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va; + g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size; + g_bfr_index = tsg->gr_ctx->global_ctx_buffer_index; /* Circular Buffer */ if (c->vpr && @@ -2744,7 +2744,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, g_bfr_size[PRIV_ACCESS_MAP_VA] = mem->size; g_bfr_index[PRIV_ACCESS_MAP_VA] = PRIV_ACCESS_MAP; - tsg->gr_ctx.global_ctx_buffer_mapped = true; + tsg->gr_ctx->global_ctx_buffer_mapped = true; #ifdef CONFIG_GK20A_CTXSW_TRACE /* FECS trace buffer */ @@ -2763,7 +2763,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, return 0; clean_up: - gr_gk20a_unmap_global_ctx_buffers(g, ch_vm, &tsg->gr_ctx); + gr_gk20a_unmap_global_ctx_buffers(g, ch_vm, tsg->gr_ctx); return -ENOMEM; } @@ -2812,7 +2812,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g, struct tsg_gk20a *tsg, u32 class, u32 padding) { - struct nvgpu_gr_ctx *gr_ctx = &tsg->gr_ctx; + struct nvgpu_gr_ctx *gr_ctx = tsg->gr_ctx; int err; if (tsg->vm == NULL) { @@ -2835,7 +2835,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g, { nvgpu_log_fn(g, " "); - if (gr_ctx->mem.gpu_va) { + if (gr_ctx != NULL) { gr_gk20a_unmap_global_ctx_buffers(g, vm, gr_ctx); gr_gk20a_free_channel_patch_ctx(g, vm, gr_ctx); gr_gk20a_free_channel_pm_ctx(g, vm, gr_ctx); @@ -2863,7 +2863,7 @@ void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) nvgpu_err(g, "No address space bound"); return; } - tsg->g->ops.gr.free_gr_ctx(g, tsg->vm, &tsg->gr_ctx); + tsg->g->ops.gr.free_gr_ctx(g, tsg->vm, tsg->gr_ctx); } u32 gr_gk20a_get_patch_slots(struct gk20a *g) @@ -2887,7 +2887,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, return -EINVAL; } - patch_ctx = &tsg->gr_ctx.patch_ctx; + patch_ctx = &tsg->gr_ctx->patch_ctx; alloc_size = g->ops.gr.get_patch_slots(g) * PATCH_CTX_SLOTS_REQUIRED_PER_ENTRY; @@ -2967,7 +2967,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) } tsg = &f->tsg[c->tsgid]; - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (!nvgpu_mem_is_valid(&gr_ctx->mem)) { tsg->vm = c->vm; @@ -3661,7 +3661,7 @@ int gr_gk20a_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, return -EINVAL; } - zcull_ctx = &tsg->gr_ctx.zcull_ctx; + zcull_ctx = &tsg->gr_ctx->zcull_ctx; zcull_ctx->ctx_sw_mode = mode; zcull_ctx->gpu_va = zcull_va; @@ -6746,7 +6746,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; g->ops.gr.init_ovr_sm_dsm_perf(); g->ops.gr.init_sm_dsm_reg_info(); g->ops.gr.get_ovr_perf_regs(g, &num_ovr_perf_regs, &ovr_perf_regs); @@ -8034,7 +8034,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (ch_is_curr_ctx) { for (pass = 0; pass < 2; pass++) { diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c index 6b93736ab..af4b2b97c 100644 --- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c @@ -195,7 +195,7 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g, return -EINVAL; } - ch_ctx = &tsg->gr_ctx; + ch_ctx = tsg->gr_ctx; gr_gk20a_ctx_patch_write(g, ch_ctx, gr_ds_tga_constraintlogic_r(), gr_ds_tga_constraintlogic_beta_cbsize_f(gr->attrib_cb_default_size) | @@ -931,7 +931,7 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g, return; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { nvgpu_log_info(g, "CTA: %x", cta_preempt_option); nvgpu_mem_wr(g, mem, @@ -1097,7 +1097,7 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; mem = &gr_ctx->mem; if (!nvgpu_mem_is_valid(mem) || c->vpr) { return -EINVAL; diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index a6b9ba435..4f0dabc4a 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c @@ -414,7 +414,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { attrib_size_in_chunk = gr->attrib_cb_gfxp_size; @@ -1199,7 +1199,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, return; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option); @@ -1737,7 +1737,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (gr_ctx->cilp_preempt_pending) { nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, @@ -1818,7 +1818,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; /* The ucode is self-clearing, so all we need to do here is to clear cilp_preempt_pending. */ @@ -1861,7 +1861,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, return -EINVAL; } - cilp_enabled = (tsg->gr_ctx.compute_preempt_mode == + cilp_enabled = (tsg->gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP); } @@ -1968,7 +1968,7 @@ static int gr_gp10b_get_cilp_preempt_pending_chid(struct gk20a *g, int *__chid) return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (gr_ctx->cilp_preempt_pending) { *__chid = chid; @@ -2084,7 +2084,7 @@ bool gr_gp10b_suspend_context(struct channel_gk20a *ch, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; *cilp_preempt_pending = false; @@ -2173,7 +2173,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g, goto clean_up; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), NVGPU_TIMER_CPU_TIMER); @@ -2212,7 +2212,7 @@ int gr_gp10b_set_boosted_ctx(struct channel_gk20a *ch, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; gr_ctx->boosted_ctx = boost; mem = &gr_ctx->mem; @@ -2270,7 +2270,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, } vm = tsg->vm; - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; mem = &gr_ctx->mem; /* skip setting anything if both modes are already set */ diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c index aa500115c..1c09c731a 100644 --- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c @@ -1682,7 +1682,7 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g, return; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { @@ -2384,7 +2384,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g, return -EINVAL; } - cilp_enabled = (tsg->gr_ctx.compute_preempt_mode == + cilp_enabled = (tsg->gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP); } diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c index 661b06f20..6d2cd77d9 100644 --- a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c @@ -109,7 +109,7 @@ int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va) return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; g->ops.mm.l2_flush(g, true); diff --git a/drivers/gpu/nvgpu/include/nvgpu/tsg.h b/drivers/gpu/nvgpu/include/nvgpu/tsg.h index 68e6938c9..2341090e5 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/tsg.h +++ b/drivers/gpu/nvgpu/include/nvgpu/tsg.h @@ -54,7 +54,7 @@ struct tsg_gk20a { struct nvgpu_mem *eng_method_buffers; - struct nvgpu_gr_ctx gr_ctx; + struct nvgpu_gr_ctx *gr_ctx; struct nvgpu_ref refcount; struct nvgpu_list_node ch_list; diff --git a/drivers/gpu/nvgpu/os/linux/debug_fifo.c b/drivers/gpu/nvgpu/os/linux/debug_fifo.c index 0c791a422..7b7b58fc4 100644 --- a/drivers/gpu/nvgpu/os/linux/debug_fifo.c +++ b/drivers/gpu/nvgpu/os/linux/debug_fifo.c @@ -93,8 +93,8 @@ static int gk20a_fifo_sched_debugfs_seq_show( tsg->timeslice_us, ch->timeout_ms_max, tsg->interleave_level, - tsg->gr_ctx.graphics_preempt_mode, - tsg->gr_ctx.compute_preempt_mode); + tsg->gr_ctx->graphics_preempt_mode, + tsg->gr_ctx->compute_preempt_mode); gk20a_channel_put(ch); } return 0; diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c index b681dcbc4..fa4fe2b1d 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_channel.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_channel.c @@ -91,9 +91,9 @@ static void gk20a_channel_trace_sched_param( ch->timeout_ms_max, gk20a_fifo_interleave_level_name(tsg->interleave_level), gr_gk20a_graphics_preempt_mode_name( - tsg->gr_ctx.graphics_preempt_mode), + tsg->gr_ctx->graphics_preempt_mode), gr_gk20a_compute_preempt_mode_name( - tsg->gr_ctx.compute_preempt_mode)); + tsg->gr_ctx->compute_preempt_mode)); } /* diff --git a/drivers/gpu/nvgpu/os/linux/sched.c b/drivers/gpu/nvgpu/os/linux/sched.c index 3cf616448..4f9aa782f 100644 --- a/drivers/gpu/nvgpu/os/linux/sched.c +++ b/drivers/gpu/nvgpu/os/linux/sched.c @@ -209,9 +209,9 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched, arg->timeslice = tsg->timeslice_us; arg->graphics_preempt_mode = - tsg->gr_ctx.graphics_preempt_mode; + tsg->gr_ctx->graphics_preempt_mode; arg->compute_preempt_mode = - tsg->gr_ctx.compute_preempt_mode; + tsg->gr_ctx->compute_preempt_mode; nvgpu_ref_put(&tsg->refcount, nvgpu_ioctl_tsg_release); diff --git a/drivers/gpu/nvgpu/tu104/gr_tu104.c b/drivers/gpu/nvgpu/tu104/gr_tu104.c index 141882f55..ae0be3b53 100644 --- a/drivers/gpu/nvgpu/tu104/gr_tu104.c +++ b/drivers/gpu/nvgpu/tu104/gr_tu104.c @@ -175,9 +175,9 @@ int gr_tu104_map_global_ctx_buffers(struct gk20a *g, return -EINVAL; } - g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; - g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; - g_bfr_index = tsg->gr_ctx.global_ctx_buffer_index; + g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va; + g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size; + g_bfr_index = tsg->gr_ctx->global_ctx_buffer_index; /* RTV circular buffer */ mem = &gr->global_ctx_buffer[RTV_CIRCULAR_BUFFER].mem; @@ -238,7 +238,7 @@ int gr_tu104_commit_global_ctx_buffers(struct gk20a *g, return -EINVAL; } - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (patch) { int err; err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c index 4283c9384..9d9412f85 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c @@ -261,7 +261,7 @@ int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, return -EINVAL; vm = tsg->vm; - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; /* skip setting anything if both modes are already set */ if (graphics_preempt_mode && diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 54f179e91..ca67ee4fb 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -186,8 +186,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, if (!tsg) return -EINVAL; - g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; - g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; + g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va; + g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size; /* Circular Buffer */ gpu_va = __nvgpu_vm_alloc_va(ch_vm, @@ -255,7 +255,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, if (err || msg.ret) goto clean_up; - tsg->gr_ctx.global_ctx_buffer_mapped = true; + tsg->gr_ctx->global_ctx_buffer_mapped = true; return 0; clean_up: @@ -272,14 +272,14 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg) { struct vm_gk20a *ch_vm = tsg->vm; - u64 *g_bfr_va = tsg->gr_ctx.global_ctx_buffer_va; - u64 *g_bfr_size = tsg->gr_ctx.global_ctx_buffer_size; + u64 *g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va; + u64 *g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size; u32 i; struct gk20a *g = tsg->g; nvgpu_log_fn(g, " "); - if (tsg->gr_ctx.global_ctx_buffer_mapped) { + if (tsg->gr_ctx->global_ctx_buffer_mapped) { /* server will unmap on channel close */ for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { @@ -291,7 +291,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg) } } - tsg->gr_ctx.global_ctx_buffer_mapped = false; + tsg->gr_ctx->global_ctx_buffer_mapped = false; } } @@ -361,7 +361,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, if (!tsg) return -EINVAL; - patch_ctx = &tsg->gr_ctx.patch_ctx; + patch_ctx = &tsg->gr_ctx->patch_ctx; patch_ctx->mem.size = 128 * sizeof(u32); patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm, patch_ctx->mem.size, @@ -385,7 +385,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) { - struct patch_desc *patch_ctx = &tsg->gr_ctx.patch_ctx; + struct patch_desc *patch_ctx = &tsg->gr_ctx->patch_ctx; struct gk20a *g = tsg->g; nvgpu_log_fn(g, " "); @@ -401,7 +401,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg) { - struct nvgpu_gr_ctx *ch_ctx = &tsg->gr_ctx; + struct nvgpu_gr_ctx *ch_ctx = tsg->gr_ctx; struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; struct gk20a *g = tsg->g; @@ -466,7 +466,7 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) if (!tsg) return -EINVAL; - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTX; msg.handle = vgpu_get_handle(c->g); @@ -481,7 +481,7 @@ static int vgpu_gr_ch_bind_gr_ctx(struct channel_gk20a *c) static int vgpu_gr_tsg_bind_gr_ctx(struct tsg_gk20a *tsg) { - struct nvgpu_gr_ctx *gr_ctx = &tsg->gr_ctx; + struct nvgpu_gr_ctx *gr_ctx = tsg->gr_ctx; struct tegra_vgpu_cmd_msg msg = {0}; struct tegra_vgpu_tsg_bind_gr_ctx_params *p = &msg.params.tsg_bind_gr_ctx; @@ -526,7 +526,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) return -EINVAL; tsg = &f->tsg[c->tsgid]; - gr_ctx = &tsg->gr_ctx; + gr_ctx = tsg->gr_ctx; if (!nvgpu_mem_is_valid(&gr_ctx->mem)) { tsg->vm = c->vm; @@ -1075,7 +1075,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, return -EINVAL; } - ch_ctx = &tsg->gr_ctx; + ch_ctx = tsg->gr_ctx; pm_ctx = &ch_ctx->pm_ctx; if (mode == NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW) {