diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index 74dff7a06..0ed2fe67f 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -397,7 +397,11 @@ nvgpu-$(CONFIG_GK20A_VIDMEM) += \ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \ common/vgpu/ltc_vgpu.o \ - common/vgpu/gr_vgpu.o \ + common/vgpu/gr/gr_vgpu.o \ + common/vgpu/gr/ctx_vgpu.o \ + common/vgpu/gr/vgpu_gr_gm20b.o \ + common/vgpu/gr/vgpu_gr_gp10b.o \ + common/vgpu/gr/vgpu_gr_gv11b.o \ common/vgpu/fifo/fifo_vgpu.o \ common/vgpu/fifo/runlist_vgpu.o \ common/vgpu/fifo/vgpu_fifo_gv11b.o \ @@ -411,14 +415,11 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \ common/vgpu/ecc_vgpu.o \ common/vgpu/clk_vgpu.o \ common/vgpu/fecs_trace_vgpu.o \ - common/vgpu/gm20b/vgpu_gr_gm20b.o \ common/vgpu/gp10b/vgpu_hal_gp10b.o \ - common/vgpu/gp10b/vgpu_gr_gp10b.o \ common/vgpu/gp10b/vgpu_fuse_gp10b.o \ common/vgpu/gp10b/vgpu_mm_gp10b.o \ common/vgpu/gv11b/vgpu_gv11b.o \ common/vgpu/gv11b/vgpu_hal_gv11b.o \ - common/vgpu/gv11b/vgpu_gr_gv11b.o \ common/vgpu/gv11b/vgpu_subctx_gv11b.o \ common/vgpu/gv11b/vgpu_tsg_gv11b.o \ diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index f28da6525..86c74fe3b 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -319,7 +319,11 @@ srcs += common/sim.c \ common/vgpu/perf/perf_vgpu.c \ common/vgpu/fecs_trace_vgpu.c \ common/vgpu/mm_vgpu.c \ - common/vgpu/gr_vgpu.c \ + common/vgpu/gr/gr_vgpu.c \ + common/vgpu/gr/ctx_vgpu.c \ + common/vgpu/gr/vgpu_gr_gv11b.c \ + common/vgpu/gr/vgpu_gr_gp10b.c \ + common/vgpu/gr/vgpu_gr_gm20b.c \ common/vgpu/clk_vgpu.c \ common/vgpu/debugger_vgpu.c \ common/vgpu/ltc_vgpu.c \ @@ -328,12 +332,9 @@ srcs += common/sim.c \ common/vgpu/gv11b/vgpu_hal_gv11b.c \ common/vgpu/gv11b/vgpu_tsg_gv11b.c \ common/vgpu/gv11b/vgpu_subctx_gv11b.c \ - common/vgpu/gv11b/vgpu_gr_gv11b.c \ common/vgpu/gp10b/vgpu_hal_gp10b.c \ common/vgpu/gp10b/vgpu_fuse_gp10b.c \ common/vgpu/gp10b/vgpu_mm_gp10b.c \ - common/vgpu/gp10b/vgpu_gr_gp10b.c \ - common/vgpu/gm20b/vgpu_gr_gm20b.c \ hal/bus/bus_gk20a.c \ hal/bus/bus_gm20b.c \ hal/bus/bus_gp10b.c \ diff --git a/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_hal_gp10b.c b/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_hal_gp10b.c index 0f77c3221..a172e7050 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_hal_gp10b.c +++ b/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_hal_gp10b.c @@ -43,7 +43,8 @@ #include "common/vgpu/fifo/fifo_vgpu.h" #include "common/vgpu/fifo/runlist_vgpu.h" -#include "common/vgpu/gr_vgpu.h" +#include "common/vgpu/gr/gr_vgpu.h" +#include "common/vgpu/gr/ctx_vgpu.h" #include "common/vgpu/ltc_vgpu.h" #include "common/vgpu/mm_vgpu.h" #include "common/vgpu/debugger_vgpu.h" @@ -52,8 +53,8 @@ #include "common/vgpu/perf/cyclestats_snapshot_vgpu.h" #include "gp10b/gp10b.h" #include "gp10b/hal_gp10b.h" -#include "common/vgpu/gm20b/vgpu_gr_gm20b.h" -#include "vgpu_gr_gp10b.h" +#include "common/vgpu/gr/vgpu_gr_gm20b.h" +#include "common/vgpu/gr/vgpu_gr_gp10b.h" #include "vgpu_mm_gp10b.h" #include "vgpu_fuse_gp10b.h" diff --git a/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.c new file mode 100644 index 000000000..7876c9117 --- /dev/null +++ b/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.c @@ -0,0 +1,340 @@ +/* + * Virtualized GPU Graphics + * + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ctx_vgpu.h" + +int vgpu_gr_alloc_gr_ctx(struct gk20a *g, + struct nvgpu_gr_ctx *gr_ctx, + struct vm_gk20a *vm) +{ + struct tegra_vgpu_cmd_msg msg = {0}; + struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; + struct gr_gk20a *gr = &g->gr; + int err; + + nvgpu_log_fn(g, " "); + + if (gr->ctx_vars.golden_image_size == 0) { + return -EINVAL; + } + + gr_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm, + gr->ctx_vars.golden_image_size, + GMMU_PAGE_SIZE_KERNEL); + + if (!gr_ctx->mem.gpu_va) { + return -ENOMEM; + } + gr_ctx->mem.size = gr->ctx_vars.golden_image_size; + gr_ctx->mem.aperture = APERTURE_SYSMEM; + + msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC; + msg.handle = vgpu_get_handle(g); + p->as_handle = vm->handle; + p->gr_ctx_va = gr_ctx->mem.gpu_va; + p->tsg_id = gr_ctx->tsgid; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + err = err ? err : msg.ret; + + if (unlikely(err)) { + nvgpu_err(g, "fail to alloc gr_ctx"); + nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); + gr_ctx->mem.aperture = APERTURE_INVALID; + } + + return err; +} + +void vgpu_gr_free_gr_ctx(struct gk20a *g, + struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) +{ + nvgpu_log_fn(g, " "); + + if (gr_ctx->mem.gpu_va) { + struct tegra_vgpu_cmd_msg msg; + struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; + int err; + + msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE; + msg.handle = vgpu_get_handle(g); + p->tsg_id = gr_ctx->tsgid; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + WARN_ON(err || msg.ret); + + nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); + + vgpu_gr_unmap_global_ctx_buffers(g, gr_ctx, vm); + vgpu_gr_free_patch_ctx(g, vm, gr_ctx); + vgpu_gr_free_pm_ctx(g, vm, gr_ctx); + + nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer); + nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer); + + (void) memset(gr_ctx, 0, sizeof(*gr_ctx)); + } +} + +int vgpu_gr_alloc_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, + struct vm_gk20a *ch_vm, u64 virt_ctx) +{ + struct patch_desc *patch_ctx; + struct tegra_vgpu_cmd_msg msg; + struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; + int err; + + nvgpu_log_fn(g, " "); + + patch_ctx = &gr_ctx->patch_ctx; + patch_ctx->mem.size = 128 * sizeof(u32); + patch_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch_vm, + patch_ctx->mem.size, + GMMU_PAGE_SIZE_KERNEL); + if (!patch_ctx->mem.gpu_va) { + return -ENOMEM; + } + + msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX; + msg.handle = vgpu_get_handle(g); + p->handle = virt_ctx; + p->patch_ctx_va = patch_ctx->mem.gpu_va; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + if (err || msg.ret) { + nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); + err = -ENOMEM; + } + + return err; +} + +void vgpu_gr_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm, + struct nvgpu_gr_ctx *gr_ctx) +{ + struct patch_desc *patch_ctx = &gr_ctx->patch_ctx; + + nvgpu_log_fn(g, " "); + + if (patch_ctx->mem.gpu_va) { + /* server will free on channel close */ + + nvgpu_vm_free_va(vm, patch_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); + patch_ctx->mem.gpu_va = 0; + } +} + +int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, + struct vm_gk20a *vm) +{ + struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; + + nvgpu_log_fn(g, " "); + + if (pm_ctx->mem.gpu_va != 0ULL) { + return 0; + } + + pm_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm, + g->gr.ctx_vars.pm_ctxsw_image_size, + GMMU_PAGE_SIZE_KERNEL); + + if (!pm_ctx->mem.gpu_va) { + nvgpu_err(g, "failed to map pm ctxt buffer"); + return -ENOMEM; + } + + pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size; + return 0; +} + +void vgpu_gr_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm, + struct nvgpu_gr_ctx *gr_ctx) +{ + struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx; + + nvgpu_log_fn(g, " "); + + /* check if hwpm was ever initialized. If not, nothing to do */ + if (pm_ctx->mem.gpu_va == 0) { + return; + } + + /* server will free on channel close */ + + nvgpu_vm_free_va(vm, pm_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); + pm_ctx->mem.gpu_va = 0; +} + +void vgpu_gr_unmap_global_ctx_buffers(struct gk20a *g, + struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *ch_vm) +{ + u64 *g_bfr_va = gr_ctx->global_ctx_buffer_va; + u32 i; + + nvgpu_log_fn(g, " "); + + if (gr_ctx->global_ctx_buffer_mapped) { + /* server will unmap on channel close */ + + for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) { + if (g_bfr_va[i]) { + nvgpu_vm_free_va(ch_vm, g_bfr_va[i], + GMMU_PAGE_SIZE_KERNEL); + g_bfr_va[i] = 0; + } + } + + gr_ctx->global_ctx_buffer_mapped = false; + } +} + +int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, + struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer, + struct vm_gk20a *ch_vm, u64 virt_ctx) +{ + struct tegra_vgpu_cmd_msg msg; + struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; + u64 *g_bfr_va; + u64 gpu_va; + u32 i; + int err; + + nvgpu_log_fn(g, " "); + + g_bfr_va = gr_ctx->global_ctx_buffer_va; + + /* Circular Buffer */ + gpu_va = nvgpu_vm_alloc_va(ch_vm, + nvgpu_gr_global_ctx_get_size(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_CIRCULAR), + GMMU_PAGE_SIZE_KERNEL); + + if (!gpu_va) { + goto clean_up; + } + g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va; + + /* Attribute Buffer */ + gpu_va = nvgpu_vm_alloc_va(ch_vm, + nvgpu_gr_global_ctx_get_size(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_ATTRIBUTE), + GMMU_PAGE_SIZE_KERNEL); + + if (!gpu_va) { + goto clean_up; + } + g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va; + + /* Page Pool */ + gpu_va = nvgpu_vm_alloc_va(ch_vm, + nvgpu_gr_global_ctx_get_size(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_PAGEPOOL), + GMMU_PAGE_SIZE_KERNEL); + if (!gpu_va) { + goto clean_up; + } + g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va; + + /* Priv register Access Map */ + gpu_va = nvgpu_vm_alloc_va(ch_vm, + nvgpu_gr_global_ctx_get_size(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP), + GMMU_PAGE_SIZE_KERNEL); + if (!gpu_va) { + goto clean_up; + } + g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va; + + /* FECS trace Buffer */ +#ifdef CONFIG_GK20A_CTXSW_TRACE + gpu_va = nvgpu_vm_alloc_va(ch_vm, + nvgpu_gr_global_ctx_get_size(global_ctx_buffer, + NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER), + GMMU_PAGE_SIZE_KERNEL); + + if (!gpu_va) + goto clean_up; + + g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA] = gpu_va; +#endif + msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; + msg.handle = vgpu_get_handle(g); + p->handle = virt_ctx; + p->cb_va = g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA]; + p->attr_va = g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA]; + p->page_pool_va = g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA]; + p->priv_access_map_va = g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA]; +#ifdef CONFIG_GK20A_CTXSW_TRACE + p->fecs_trace_va = g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA]; +#endif + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + if (err || msg.ret) { + goto clean_up; + } + + gr_ctx->global_ctx_buffer_mapped = true; + return 0; + + clean_up: + for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) { + if (g_bfr_va[i]) { + nvgpu_vm_free_va(ch_vm, g_bfr_va[i], + GMMU_PAGE_SIZE_KERNEL); + g_bfr_va[i] = 0; + } + } + return -ENOMEM; +} + +/* load saved fresh copy of gloden image into channel gr_ctx */ +int vgpu_gr_load_golden_ctx_image(struct gk20a *g, u64 virt_ctx) +{ + struct tegra_vgpu_cmd_msg msg; + struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; + int err; + + nvgpu_log_fn(g, " "); + + msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; + msg.handle = vgpu_get_handle(g); + p->handle = virt_ctx; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + + return (err || msg.ret) ? -1 : 0; +} diff --git a/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.h b/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.h new file mode 100644 index 000000000..5e1ed2e67 --- /dev/null +++ b/drivers/gpu/nvgpu/common/vgpu/gr/ctx_vgpu.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef CTX_VGPU_H +#define CTX_VGPU_H + +struct gk20a; +struct nvgpu_gr_ctx; +struct vm_gk20a; +struct nvgpu_gr_global_ctx_buffer_desc; + +int vgpu_gr_alloc_gr_ctx(struct gk20a *g, + struct nvgpu_gr_ctx *gr_ctx, + struct vm_gk20a *vm); +void vgpu_gr_free_gr_ctx(struct gk20a *g, + struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx); +int vgpu_gr_alloc_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, + struct vm_gk20a *ch_vm, u64 virt_ctx); +void vgpu_gr_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm, + struct nvgpu_gr_ctx *gr_ctx); +int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, + struct vm_gk20a *vm); +void vgpu_gr_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm, + struct nvgpu_gr_ctx *gr_ctx); +void vgpu_gr_unmap_global_ctx_buffers(struct gk20a *g, + struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *ch_vm); +int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx, + struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer, + struct vm_gk20a *ch_vm, u64 virt_ctx); +int vgpu_gr_load_golden_ctx_image(struct gk20a *g, u64 virt_ctx); + +#endif diff --git a/drivers/gpu/nvgpu/common/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c similarity index 78% rename from drivers/gpu/nvgpu/common/vgpu/gr_vgpu.c rename to drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c index 2983a881d..865eb6bc9 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.c @@ -40,11 +40,11 @@ #include #include -#include "gr_vgpu.h" -#include "gk20a/fecs_trace_gk20a.h" - #include +#include "gr_vgpu.h" +#include "ctx_vgpu.h" + void vgpu_gr_detect_sm_arch(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); @@ -93,24 +93,6 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g, return (err || msg.ret) ? -1 : 0; } -/* load saved fresh copy of gloden image into channel gr_ctx */ -static int vgpu_gr_load_golden_ctx_image(struct gk20a *g, - struct channel_gk20a *c) -{ - struct tegra_vgpu_cmd_msg msg; - struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; - int err; - - nvgpu_log_fn(g, " "); - - msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX; - msg.handle = vgpu_get_handle(g); - p->handle = c->virt_ctx; - err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); - - return (err || msg.ret) ? -1 : 0; -} - int vgpu_gr_init_ctx_state(struct gk20a *g) { struct vgpu_priv_data *priv = vgpu_get_priv_data(g); @@ -133,7 +115,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g) return 0; } -static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) +int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) { struct gr_gk20a *gr = &g->gr; u32 size; @@ -183,293 +165,11 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g) return 0; } -static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, - struct channel_gk20a *c) -{ - struct tegra_vgpu_cmd_msg msg; - struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; - struct vm_gk20a *ch_vm = c->vm; - struct tsg_gk20a *tsg; - u64 *g_bfr_va; - struct gr_gk20a *gr = &g->gr; - u64 gpu_va; - u32 i; - int err; - - nvgpu_log_fn(g, " "); - - tsg = tsg_gk20a_from_ch(c); - if (!tsg) { - return -EINVAL; - } - - g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va; - - /* Circular Buffer */ - gpu_va = nvgpu_vm_alloc_va(ch_vm, - nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_CIRCULAR), - GMMU_PAGE_SIZE_KERNEL); - - if (!gpu_va) { - goto clean_up; - } - g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va; - - /* Attribute Buffer */ - gpu_va = nvgpu_vm_alloc_va(ch_vm, - nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_ATTRIBUTE), - GMMU_PAGE_SIZE_KERNEL); - - if (!gpu_va) { - goto clean_up; - } - g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va; - - /* Page Pool */ - gpu_va = nvgpu_vm_alloc_va(ch_vm, - nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_PAGEPOOL), - GMMU_PAGE_SIZE_KERNEL); - if (!gpu_va) { - goto clean_up; - } - g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va; - - /* Priv register Access Map */ - gpu_va = nvgpu_vm_alloc_va(ch_vm, - nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP), - GMMU_PAGE_SIZE_KERNEL); - if (!gpu_va) { - goto clean_up; - } - g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va; - - /* FECS trace Buffer */ -#ifdef CONFIG_GK20A_CTXSW_TRACE - gpu_va = nvgpu_vm_alloc_va(ch_vm, - nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer, - NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER), - GMMU_PAGE_SIZE_KERNEL); - - if (!gpu_va) - goto clean_up; - - g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA] = gpu_va; -#endif - msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX; - msg.handle = vgpu_get_handle(g); - p->handle = c->virt_ctx; - p->cb_va = g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA]; - p->attr_va = g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA]; - p->page_pool_va = g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA]; - p->priv_access_map_va = g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA]; -#ifdef CONFIG_GK20A_CTXSW_TRACE - p->fecs_trace_va = g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA]; -#endif - err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); - if (err || msg.ret) { - goto clean_up; - } - - tsg->gr_ctx->global_ctx_buffer_mapped = true; - return 0; - - clean_up: - for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) { - if (g_bfr_va[i]) { - nvgpu_vm_free_va(ch_vm, g_bfr_va[i], - GMMU_PAGE_SIZE_KERNEL); - g_bfr_va[i] = 0; - } - } - return -ENOMEM; -} - -static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg) -{ - struct vm_gk20a *ch_vm = tsg->vm; - u64 *g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va; - u32 i; - struct gk20a *g = tsg->g; - - nvgpu_log_fn(g, " "); - - if (tsg->gr_ctx->global_ctx_buffer_mapped) { - /* server will unmap on channel close */ - - for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) { - if (g_bfr_va[i]) { - nvgpu_vm_free_va(ch_vm, g_bfr_va[i], - GMMU_PAGE_SIZE_KERNEL); - g_bfr_va[i] = 0; - } - } - - tsg->gr_ctx->global_ctx_buffer_mapped = false; - } -} - -int vgpu_gr_alloc_gr_ctx(struct gk20a *g, - struct nvgpu_gr_ctx *gr_ctx, - struct vm_gk20a *vm) -{ - struct tegra_vgpu_cmd_msg msg = {0}; - struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; - struct gr_gk20a *gr = &g->gr; - int err; - - nvgpu_log_fn(g, " "); - - if (gr->ctx_vars.golden_image_size == 0) { - return -EINVAL; - } - - gr_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm, - gr->ctx_vars.golden_image_size, - GMMU_PAGE_SIZE_KERNEL); - - if (!gr_ctx->mem.gpu_va) { - return -ENOMEM; - } - gr_ctx->mem.size = gr->ctx_vars.golden_image_size; - gr_ctx->mem.aperture = APERTURE_SYSMEM; - - msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC; - msg.handle = vgpu_get_handle(g); - p->as_handle = vm->handle; - p->gr_ctx_va = gr_ctx->mem.gpu_va; - p->tsg_id = gr_ctx->tsgid; - err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); - err = err ? err : msg.ret; - - if (unlikely(err)) { - nvgpu_err(g, "fail to alloc gr_ctx"); - nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); - gr_ctx->mem.aperture = APERTURE_INVALID; - } - - return err; -} - -static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, - struct channel_gk20a *c) -{ - struct tsg_gk20a *tsg; - struct patch_desc *patch_ctx; - struct vm_gk20a *ch_vm = c->vm; - struct tegra_vgpu_cmd_msg msg; - struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx; - int err; - - nvgpu_log_fn(g, " "); - - tsg = tsg_gk20a_from_ch(c); - if (!tsg) { - return -EINVAL; - } - - patch_ctx = &tsg->gr_ctx->patch_ctx; - patch_ctx->mem.size = 128 * sizeof(u32); - patch_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch_vm, - patch_ctx->mem.size, - GMMU_PAGE_SIZE_KERNEL); - if (!patch_ctx->mem.gpu_va) { - return -ENOMEM; - } - - msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX; - msg.handle = vgpu_get_handle(g); - p->handle = c->virt_ctx; - p->patch_ctx_va = patch_ctx->mem.gpu_va; - err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); - if (err || msg.ret) { - nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); - err = -ENOMEM; - } - - return err; -} - -static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) -{ - struct patch_desc *patch_ctx = &tsg->gr_ctx->patch_ctx; - struct gk20a *g = tsg->g; - - nvgpu_log_fn(g, " "); - - if (patch_ctx->mem.gpu_va) { - /* server will free on channel close */ - - nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); - patch_ctx->mem.gpu_va = 0; - } -} - -static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg) -{ - struct nvgpu_gr_ctx *ch_ctx = tsg->gr_ctx; - struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx; - struct gk20a *g = tsg->g; - - nvgpu_log_fn(g, " "); - - /* check if hwpm was ever initialized. If not, nothing to do */ - if (pm_ctx->mem.gpu_va == 0) { - return; - } - - /* server will free on channel close */ - - nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); - pm_ctx->mem.gpu_va = 0; -} - -void vgpu_gr_free_gr_ctx(struct gk20a *g, - struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx) -{ - struct tsg_gk20a *tsg; - - nvgpu_log_fn(g, " "); - - if (gr_ctx->mem.gpu_va) { - struct tegra_vgpu_cmd_msg msg; - struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; - int err; - - msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE; - msg.handle = vgpu_get_handle(g); - p->tsg_id = gr_ctx->tsgid; - err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); - WARN_ON(err || msg.ret); - - nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); - - tsg = &g->fifo.tsg[gr_ctx->tsgid]; - vgpu_gr_unmap_global_ctx_buffers(tsg); - vgpu_gr_free_channel_patch_ctx(tsg); - vgpu_gr_free_channel_pm_ctx(tsg); - - nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer); - nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer); - nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer); - nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer); - - (void) memset(gr_ctx, 0, sizeof(*gr_ctx)); - } -} - int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) { struct gk20a *g = c->g; struct nvgpu_gr_ctx *gr_ctx = NULL; + struct gr_gk20a *gr = &g->gr; struct tsg_gk20a *tsg = NULL; int err = 0; @@ -504,14 +204,15 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) c->vm); if (err) { nvgpu_err(g, - "fail to allocate TSG gr ctx buffer, err=%d", err); + "fail to allocate TSG gr ctx buffer, err=%d", + err); nvgpu_vm_put(tsg->vm); tsg->vm = NULL; goto out; } /* allocate patch buffer */ - err = vgpu_gr_alloc_channel_patch_ctx(g, c); + err = vgpu_gr_alloc_patch_ctx(g, gr_ctx, c->vm, c->virt_ctx); if (err) { nvgpu_err(g, "fail to allocate patch buffer"); goto out; @@ -523,7 +224,9 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) flags); /* map global buffer to channel gpu_va and commit */ - err = vgpu_gr_map_global_ctx_buffers(g, c); + err = vgpu_gr_map_global_ctx_buffers(g, gr_ctx, + gr->global_ctx_buffer, c->vm, + c->virt_ctx); if (err) { nvgpu_err(g, "fail to map global ctx buffer"); goto out; @@ -544,7 +247,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) /* load golden image */ err = gr_gk20a_elpg_protected_call(g, - vgpu_gr_load_golden_ctx_image(g, c)); + vgpu_gr_load_golden_ctx_image(g, c->virt_ctx)); if (err) { nvgpu_err(g, "fail to load golden ctx image"); goto out; @@ -559,7 +262,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) #ifdef CONFIG_GK20A_CTXSW_TRACE /* for fecs bind channel */ err = gr_gk20a_elpg_protected_call(g, - vgpu_gr_load_golden_ctx_image(g, c)); + vgpu_gr_load_golden_ctx_image(g, c->virt_ctx)); if (err) { nvgpu_err(g, "fail to load golden ctx image"); goto out; @@ -1158,16 +861,13 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, if (mode != NVGPU_GR_CTX_HWPM_CTXSW_MODE_NO_CTXSW) { /* Allocate buffer if necessary */ - if (pm_ctx->mem.gpu_va == 0) { - pm_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch->vm, - g->gr.ctx_vars.pm_ctxsw_image_size, - GMMU_PAGE_SIZE_KERNEL); - - if (!pm_ctx->mem.gpu_va) { - return -ENOMEM; - } - pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size; + err = vgpu_gr_alloc_pm_ctx(g, tsg->gr_ctx, ch->vm); + if (err != 0) { + nvgpu_err(g, + "failed to allocate pm ctxt buffer"); + return err; } + pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size; } msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE; diff --git a/drivers/gpu/nvgpu/common/vgpu/gr_vgpu.h b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.h similarity index 97% rename from drivers/gpu/nvgpu/common/vgpu/gr_vgpu.h rename to drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.h index 7b71c132a..9c5b6f38e 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gr_vgpu.h +++ b/drivers/gpu/nvgpu/common/vgpu/gr/gr_vgpu.h @@ -36,6 +36,8 @@ struct dbg_session_gk20a; struct tsg_gk20a; void vgpu_gr_detect_sm_arch(struct gk20a *g); +int vgpu_gr_init_ctx_state(struct gk20a *g); +int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g); void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg); void vgpu_gr_free_tsg_ctx(struct tsg_gk20a *tsg); int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags); @@ -55,10 +57,10 @@ int vgpu_gr_add_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc, struct nvgpu_gr_zbc_entry *zbc_val); int vgpu_gr_query_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc, struct nvgpu_gr_zbc_query_params *query_params); -int vgpu_gr_set_sm_debug_mode(struct gk20a *g, - struct channel_gk20a *ch, u64 sms, bool enable); int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g, struct channel_gk20a *ch, bool enable); +int vgpu_gr_set_sm_debug_mode(struct gk20a *g, + struct channel_gk20a *ch, u64 sms, bool enable); int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, struct channel_gk20a *ch, u64 gpu_va, u32 mode); int vgpu_gr_clear_sm_error_state(struct gk20a *g, diff --git a/drivers/gpu/nvgpu/common/vgpu/gm20b/vgpu_gr_gm20b.c b/drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gm20b.c similarity index 100% rename from drivers/gpu/nvgpu/common/vgpu/gm20b/vgpu_gr_gm20b.c rename to drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gm20b.c diff --git a/drivers/gpu/nvgpu/common/vgpu/gm20b/vgpu_gr_gm20b.h b/drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gm20b.h similarity index 100% rename from drivers/gpu/nvgpu/common/vgpu/gm20b/vgpu_gr_gm20b.h rename to drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gm20b.h diff --git a/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gp10b.c similarity index 99% rename from drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_gr_gp10b.c rename to drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gp10b.c index be8d38d14..594ff7840 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_gr_gp10b.c +++ b/drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gp10b.c @@ -30,10 +30,11 @@ #include #include -#include "common/vgpu/gm20b/vgpu_gr_gm20b.h" +#include "ctx_vgpu.h" +#include "vgpu_gr_gm20b.h" +#include "vgpu_gr_gp10b.h" #include "gp10b/gr_gp10b.h" -#include "vgpu_gr_gp10b.h" #include diff --git a/drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_gr_gp10b.h b/drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gp10b.h similarity index 100% rename from drivers/gpu/nvgpu/common/vgpu/gp10b/vgpu_gr_gp10b.h rename to drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gp10b.h diff --git a/drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_gr_gv11b.c b/drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gv11b.c similarity index 94% rename from drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_gr_gv11b.c rename to drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gv11b.c index f74c60ba6..60a6df7ce 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_gr_gv11b.c +++ b/drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gv11b.c @@ -22,10 +22,12 @@ #include -#include "common/vgpu/gr_vgpu.h" -#include "vgpu_subctx_gv11b.h" +#include "gr_vgpu.h" +#include "ctx_vgpu.h" #include "vgpu_gr_gv11b.h" +#include "common/vgpu/gv11b/vgpu_subctx_gv11b.h" + int vgpu_gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va) { int err; diff --git a/drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_gr_gv11b.h b/drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gv11b.h similarity index 100% rename from drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_gr_gv11b.h rename to drivers/gpu/nvgpu/common/vgpu/gr/vgpu_gr_gv11b.h diff --git a/drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_hal_gv11b.c b/drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_hal_gv11b.c index 9b3815274..1da6feff4 100644 --- a/drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_hal_gv11b.c +++ b/drivers/gpu/nvgpu/common/vgpu/gv11b/vgpu_hal_gv11b.c @@ -55,7 +55,8 @@ #include "common/vgpu/fifo/fifo_vgpu.h" #include "common/vgpu/fifo/runlist_vgpu.h" -#include "common/vgpu/gr_vgpu.h" +#include "common/vgpu/gr/gr_vgpu.h" +#include "common/vgpu/gr/ctx_vgpu.h" #include "common/vgpu/ltc_vgpu.h" #include "common/vgpu/mm_vgpu.h" #include "common/vgpu/debugger_vgpu.h" @@ -63,9 +64,10 @@ #include "common/vgpu/fecs_trace_vgpu.h" #include "common/vgpu/perf/cyclestats_snapshot_vgpu.h" #include "common/vgpu/fifo/vgpu_fifo_gv11b.h" -#include "common/vgpu/gm20b/vgpu_gr_gm20b.h" +#include "common/vgpu/gr/vgpu_gr_gm20b.h" #include "common/vgpu/gp10b/vgpu_mm_gp10b.h" -#include "common/vgpu/gp10b/vgpu_gr_gp10b.h" +#include "common/vgpu/gr/vgpu_gr_gp10b.h" +#include "common/vgpu/gr/vgpu_gr_gv11b.h" #include "common/falcon/falcon_gk20a.h" @@ -95,7 +97,6 @@ #include #include "vgpu_gv11b.h" -#include "vgpu_gr_gv11b.h" #include "vgpu_subctx_gv11b.h" #include "vgpu_tsg_gv11b.h" diff --git a/drivers/gpu/nvgpu/include/nvgpu/vgpu/vgpu.h b/drivers/gpu/nvgpu/include/nvgpu/vgpu/vgpu.h index f49d0d8c0..9e0d7d2f6 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vgpu/vgpu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vgpu/vgpu.h @@ -84,14 +84,8 @@ int vgpu_init_hal_os(struct gk20a *g); int vgpu_get_constants(struct gk20a *g); u64 vgpu_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset); int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info); -int vgpu_gr_alloc_gr_ctx(struct gk20a *g, - struct nvgpu_gr_ctx *gr_ctx, - struct vm_gk20a *vm); -void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, - struct nvgpu_gr_ctx *gr_ctx); void vgpu_gr_handle_sm_esr_event(struct gk20a *g, struct tegra_vgpu_sm_esr_info *info); -int vgpu_gr_init_ctx_state(struct gk20a *g); int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info); int vgpu_init_mm_support(struct gk20a *g);