mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
In nvgpu_gr_ctx_alloc_ctxsw_buffers, just return if ctxsw buffers have already been allocated. Bug 200418468 Change-Id: I0fe0b0d851c2b304243f9fca2c19832806ba40f4 Signed-off-by: Peter Daifuku <pdaifuku@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1991656 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: Richard Zhao <rizhao@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
461 lines
12 KiB
C
461 lines
12 KiB
C
/*
|
|
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
* DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#include <nvgpu/gk20a.h>
|
|
#include <nvgpu/gr/global_ctx.h>
|
|
#include <nvgpu/gr/ctx.h>
|
|
#include <nvgpu/vm.h>
|
|
#include <nvgpu/gmmu.h>
|
|
|
|
static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
|
|
struct vm_gk20a *vm);
|
|
|
|
struct nvgpu_gr_ctx_desc *
|
|
nvgpu_gr_ctx_desc_alloc(struct gk20a *g)
|
|
{
|
|
struct nvgpu_gr_ctx_desc *desc = nvgpu_kzalloc(g, sizeof(*desc));
|
|
return desc;
|
|
}
|
|
|
|
void nvgpu_gr_ctx_desc_free(struct gk20a *g,
|
|
struct nvgpu_gr_ctx_desc *desc)
|
|
{
|
|
nvgpu_kfree(g, desc);
|
|
}
|
|
|
|
void nvgpu_gr_ctx_set_size(struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
|
enum nvgpu_gr_ctx_index index, u32 size)
|
|
{
|
|
gr_ctx_desc->size[index] = size;
|
|
}
|
|
|
|
int nvgpu_gr_ctx_alloc(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
|
struct vm_gk20a *vm)
|
|
{
|
|
int err = 0;
|
|
|
|
nvgpu_log_fn(g, " ");
|
|
|
|
if (gr_ctx_desc->size[NVGPU_GR_CTX_CTX] == 0U) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
err = nvgpu_dma_alloc(g, gr_ctx_desc->size[NVGPU_GR_CTX_CTX],
|
|
&gr_ctx->mem);
|
|
if (err != 0) {
|
|
return err;
|
|
}
|
|
|
|
gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm,
|
|
&gr_ctx->mem,
|
|
gr_ctx->mem.size,
|
|
0, /* not GPU-cacheable */
|
|
gk20a_mem_flag_none, true,
|
|
gr_ctx->mem.aperture);
|
|
if (gr_ctx->mem.gpu_va == 0ULL) {
|
|
goto err_free_mem;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err_free_mem:
|
|
nvgpu_dma_free(g, &gr_ctx->mem);
|
|
|
|
return err;
|
|
}
|
|
|
|
void nvgpu_gr_ctx_free(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
|
|
struct vm_gk20a *vm)
|
|
{
|
|
nvgpu_log_fn(g, " ");
|
|
|
|
if (gr_ctx != NULL) {
|
|
nvgpu_gr_ctx_unmap_global_ctx_buffers(g, gr_ctx,
|
|
global_ctx_buffer, vm);
|
|
|
|
nvgpu_gr_ctx_free_pm_ctx(g, vm, gr_ctx);
|
|
nvgpu_gr_ctx_free_patch_ctx(g, vm, gr_ctx);
|
|
|
|
if (nvgpu_mem_is_valid(&gr_ctx->gfxp_rtvcb_ctxsw_buffer)) {
|
|
nvgpu_dma_unmap_free(vm,
|
|
&gr_ctx->gfxp_rtvcb_ctxsw_buffer);
|
|
}
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer);
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer);
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer);
|
|
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->mem);
|
|
(void) memset(gr_ctx, 0, sizeof(*gr_ctx));
|
|
}
|
|
}
|
|
|
|
int nvgpu_gr_ctx_alloc_pm_ctx(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
|
struct vm_gk20a *vm,
|
|
u64 gpu_va)
|
|
{
|
|
struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
|
|
int err;
|
|
|
|
if (pm_ctx->mem.gpu_va != 0ULL) {
|
|
return 0;
|
|
}
|
|
|
|
err = nvgpu_dma_alloc_sys(g, gr_ctx_desc->size[NVGPU_GR_CTX_PM_CTX],
|
|
&pm_ctx->mem);
|
|
if (err != 0) {
|
|
nvgpu_err(g,
|
|
"failed to allocate pm ctx buffer");
|
|
return err;
|
|
}
|
|
|
|
pm_ctx->mem.gpu_va = nvgpu_gmmu_map_fixed(vm,
|
|
&pm_ctx->mem,
|
|
gpu_va,
|
|
pm_ctx->mem.size,
|
|
NVGPU_VM_MAP_CACHEABLE,
|
|
gk20a_mem_flag_none, true,
|
|
pm_ctx->mem.aperture);
|
|
if (pm_ctx->mem.gpu_va == 0ULL) {
|
|
nvgpu_err(g,
|
|
"failed to map pm ctxt buffer");
|
|
nvgpu_dma_free(g, &pm_ctx->mem);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void nvgpu_gr_ctx_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
|
struct nvgpu_gr_ctx *gr_ctx)
|
|
{
|
|
struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
|
|
|
|
if (pm_ctx->mem.gpu_va != 0ULL) {
|
|
nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va);
|
|
|
|
nvgpu_dma_free(g, &pm_ctx->mem);
|
|
}
|
|
}
|
|
|
|
int nvgpu_gr_ctx_alloc_patch_ctx(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
|
struct vm_gk20a *vm)
|
|
{
|
|
struct patch_desc *patch_ctx = &gr_ctx->patch_ctx;
|
|
int err = 0;
|
|
|
|
nvgpu_log(g, gpu_dbg_info, "patch buffer size in entries: %d",
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_PATCH_CTX]);
|
|
|
|
err = nvgpu_dma_alloc_map_sys(vm, gr_ctx_desc->size[NVGPU_GR_CTX_PATCH_CTX],
|
|
&patch_ctx->mem);
|
|
if (err != 0) {
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void nvgpu_gr_ctx_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
|
struct nvgpu_gr_ctx *gr_ctx)
|
|
{
|
|
struct patch_desc *patch_ctx = &gr_ctx->patch_ctx;
|
|
|
|
if (patch_ctx->mem.gpu_va != 0ULL) {
|
|
nvgpu_gmmu_unmap(vm, &patch_ctx->mem,
|
|
patch_ctx->mem.gpu_va);
|
|
}
|
|
|
|
nvgpu_dma_free(g, &patch_ctx->mem);
|
|
patch_ctx->data_count = 0;
|
|
}
|
|
|
|
void nvgpu_gr_ctx_set_zcull_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
|
|
u32 mode, u64 gpu_va)
|
|
{
|
|
struct zcull_ctx_desc *zcull_ctx = &gr_ctx->zcull_ctx;
|
|
|
|
zcull_ctx->ctx_sw_mode = mode;
|
|
zcull_ctx->gpu_va = gpu_va;
|
|
}
|
|
|
|
static int nvgpu_gr_ctx_alloc_ctxsw_buffer(struct vm_gk20a *vm, size_t size,
|
|
struct nvgpu_mem *mem)
|
|
{
|
|
int err;
|
|
|
|
err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
|
|
if (err != 0) {
|
|
return err;
|
|
}
|
|
|
|
mem->gpu_va = nvgpu_gmmu_map(vm,
|
|
mem,
|
|
mem->aligned_size,
|
|
NVGPU_VM_MAP_CACHEABLE,
|
|
gk20a_mem_flag_none,
|
|
false,
|
|
mem->aperture);
|
|
if (mem->gpu_va == 0ULL) {
|
|
nvgpu_dma_free(vm->mm->g, mem);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int nvgpu_gr_ctx_alloc_ctxsw_buffers(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct nvgpu_gr_ctx_desc *gr_ctx_desc,
|
|
struct vm_gk20a *vm)
|
|
{
|
|
int err;
|
|
|
|
/* nothing to do if already initialized */
|
|
if (nvgpu_mem_is_valid(&gr_ctx->preempt_ctxsw_buffer)) {
|
|
return 0;
|
|
}
|
|
|
|
if (gr_ctx_desc->size[NVGPU_GR_CTX_PREEMPT_CTXSW] == 0U ||
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_SPILL_CTXSW] == 0U ||
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_BETACB_CTXSW] == 0U ||
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_PAGEPOOL_CTXSW] == 0U) {
|
|
return -EINVAL;
|
|
}
|
|
|
|
err = nvgpu_gr_ctx_alloc_ctxsw_buffer(vm,
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_PREEMPT_CTXSW],
|
|
&gr_ctx->preempt_ctxsw_buffer);
|
|
if (err != 0) {
|
|
nvgpu_err(g, "cannot allocate preempt buffer");
|
|
goto fail;
|
|
}
|
|
|
|
err = nvgpu_gr_ctx_alloc_ctxsw_buffer(vm,
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_SPILL_CTXSW],
|
|
&gr_ctx->spill_ctxsw_buffer);
|
|
if (err != 0) {
|
|
nvgpu_err(g, "cannot allocate spill buffer");
|
|
goto fail_free_preempt;
|
|
}
|
|
|
|
err = nvgpu_gr_ctx_alloc_ctxsw_buffer(vm,
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_BETACB_CTXSW],
|
|
&gr_ctx->betacb_ctxsw_buffer);
|
|
if (err != 0) {
|
|
nvgpu_err(g, "cannot allocate beta buffer");
|
|
goto fail_free_spill;
|
|
}
|
|
|
|
err = nvgpu_gr_ctx_alloc_ctxsw_buffer(vm,
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_PAGEPOOL_CTXSW],
|
|
&gr_ctx->pagepool_ctxsw_buffer);
|
|
if (err != 0) {
|
|
nvgpu_err(g, "cannot allocate page pool");
|
|
goto fail_free_betacb;
|
|
}
|
|
|
|
if (gr_ctx_desc->size[NVGPU_GR_CTX_GFXP_RTVCB_CTXSW] != 0U) {
|
|
err = nvgpu_gr_ctx_alloc_ctxsw_buffer(vm,
|
|
gr_ctx_desc->size[NVGPU_GR_CTX_GFXP_RTVCB_CTXSW],
|
|
&gr_ctx->pagepool_ctxsw_buffer);
|
|
if (err != 0) {
|
|
nvgpu_err(g, "cannot allocate gfxp rtvcb");
|
|
goto fail_free_pagepool;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail_free_pagepool:
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);
|
|
fail_free_betacb:
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer);
|
|
fail_free_spill:
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer);
|
|
fail_free_preempt:
|
|
nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer);
|
|
fail:
|
|
return err;
|
|
}
|
|
|
|
static void nvgpu_gr_ctx_unmap_global_ctx_buffers(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
|
|
struct vm_gk20a *vm)
|
|
{
|
|
u64 *g_bfr_va = gr_ctx->global_ctx_buffer_va;
|
|
int *g_bfr_index = gr_ctx->global_ctx_buffer_index;
|
|
u32 i;
|
|
|
|
nvgpu_log_fn(g, " ");
|
|
|
|
for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) {
|
|
nvgpu_gr_global_ctx_buffer_unmap(global_ctx_buffer,
|
|
g_bfr_index[i], vm, g_bfr_va[i]);
|
|
}
|
|
|
|
(void) memset(g_bfr_va, 0, sizeof(gr_ctx->global_ctx_buffer_va));
|
|
(void) memset(g_bfr_index, 0, sizeof(gr_ctx->global_ctx_buffer_index));
|
|
|
|
gr_ctx->global_ctx_buffer_mapped = false;
|
|
}
|
|
|
|
int nvgpu_gr_ctx_map_global_ctx_buffers(struct gk20a *g,
|
|
struct nvgpu_gr_ctx *gr_ctx,
|
|
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
|
|
struct vm_gk20a *vm, bool vpr)
|
|
{
|
|
u64 *g_bfr_va;
|
|
int *g_bfr_index;
|
|
u64 gpu_va = 0ULL;
|
|
|
|
nvgpu_log_fn(g, " ");
|
|
|
|
g_bfr_va = gr_ctx->global_ctx_buffer_va;
|
|
g_bfr_index = gr_ctx->global_ctx_buffer_index;
|
|
|
|
/* Circular Buffer */
|
|
if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR)) {
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR,
|
|
vm, NVGPU_VM_MAP_CACHEABLE, true);
|
|
g_bfr_index[NVGPU_GR_CTX_CIRCULAR_VA] = NVGPU_GR_GLOBAL_CTX_CIRCULAR_VPR;
|
|
} else {
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_CIRCULAR,
|
|
vm, NVGPU_VM_MAP_CACHEABLE, true);
|
|
g_bfr_index[NVGPU_GR_CTX_CIRCULAR_VA] = NVGPU_GR_GLOBAL_CTX_CIRCULAR;
|
|
}
|
|
if (gpu_va == 0ULL) {
|
|
goto clean_up;
|
|
}
|
|
|
|
g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va;
|
|
|
|
/* Attribute Buffer */
|
|
if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR)) {
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR,
|
|
vm, NVGPU_VM_MAP_CACHEABLE, false);
|
|
g_bfr_index[NVGPU_GR_CTX_ATTRIBUTE_VA] = NVGPU_GR_GLOBAL_CTX_ATTRIBUTE_VPR;
|
|
} else {
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE,
|
|
vm, NVGPU_VM_MAP_CACHEABLE, false);
|
|
g_bfr_index[NVGPU_GR_CTX_ATTRIBUTE_VA] = NVGPU_GR_GLOBAL_CTX_ATTRIBUTE;
|
|
}
|
|
if (gpu_va == 0ULL) {
|
|
goto clean_up;
|
|
}
|
|
|
|
g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va;
|
|
|
|
/* Page Pool */
|
|
if (vpr && nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR)) {
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR,
|
|
vm, NVGPU_VM_MAP_CACHEABLE, true);
|
|
g_bfr_index[NVGPU_GR_CTX_PAGEPOOL_VA] = NVGPU_GR_GLOBAL_CTX_PAGEPOOL_VPR;
|
|
} else {
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_PAGEPOOL,
|
|
vm, NVGPU_VM_MAP_CACHEABLE, true);
|
|
g_bfr_index[NVGPU_GR_CTX_PAGEPOOL_VA] = NVGPU_GR_GLOBAL_CTX_PAGEPOOL;
|
|
}
|
|
if (gpu_va == 0ULL) {
|
|
goto clean_up;
|
|
}
|
|
|
|
g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va;
|
|
|
|
/* Priv register Access Map */
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP,
|
|
vm, 0, true);
|
|
if (gpu_va == 0ULL) {
|
|
goto clean_up;
|
|
}
|
|
|
|
g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va;
|
|
g_bfr_index[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP;
|
|
|
|
#ifdef CONFIG_GK20A_CTXSW_TRACE
|
|
/* FECS trace buffer */
|
|
if (nvgpu_is_enabled(g, NVGPU_FECS_TRACE_VA)) {
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER,
|
|
vm, 0, true);
|
|
if (gpu_va == 0ULL) {
|
|
goto clean_up;
|
|
}
|
|
|
|
g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA] = gpu_va;
|
|
g_bfr_index[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA] =
|
|
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER;
|
|
}
|
|
#endif
|
|
|
|
/* RTV circular buffer */
|
|
if (nvgpu_gr_global_ctx_buffer_ready(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER)) {
|
|
gpu_va = nvgpu_gr_global_ctx_buffer_map(global_ctx_buffer,
|
|
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER,
|
|
vm, 0, true);
|
|
if (gpu_va == 0ULL) {
|
|
goto clean_up;
|
|
}
|
|
|
|
g_bfr_va[NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA] = gpu_va;
|
|
g_bfr_index[NVGPU_GR_CTX_RTV_CIRCULAR_BUFFER_VA] =
|
|
NVGPU_GR_GLOBAL_CTX_RTV_CIRCULAR_BUFFER;
|
|
}
|
|
|
|
gr_ctx->global_ctx_buffer_mapped = true;
|
|
|
|
return 0;
|
|
|
|
clean_up:
|
|
nvgpu_gr_ctx_unmap_global_ctx_buffers(g, gr_ctx, global_ctx_buffer, vm);
|
|
|
|
return -ENOMEM;
|
|
}
|
|
|
|
u64 nvgpu_gr_ctx_get_global_ctx_va(struct nvgpu_gr_ctx *gr_ctx,
|
|
enum nvgpu_gr_ctx_global_ctx_va index)
|
|
{
|
|
return gr_ctx->global_ctx_buffer_va[index];
|
|
}
|