gpu: nvgpu: vgpu: move vgpu gr files under vgpu/gr

Create a new directory gr under common vgpu path moving all
vgpu common gr files under that directory.

Move vgpu gr ctx implementations to a new file ctx_vgpu.c
and create corresponding header file.
Also modify parameters of some functions in ctx_vgpu.c to
not access channel/tsg/fifo constructs.

Jira GVSCI-334

Change-Id: I3498b10db62194df2871eb81fc5c5cb04b42abc3
Signed-off-by: Aparna Das <aparnad@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2013350
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Aparna Das
2019-02-20 13:22:15 -08:00
committed by mobile promotions
parent 094395ed38
commit a0bdf6e470
15 changed files with 440 additions and 346 deletions

View File

@@ -397,7 +397,11 @@ nvgpu-$(CONFIG_GK20A_VIDMEM) += \
nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/vgpu/ltc_vgpu.o \
common/vgpu/gr_vgpu.o \
common/vgpu/gr/gr_vgpu.o \
common/vgpu/gr/ctx_vgpu.o \
common/vgpu/gr/vgpu_gr_gm20b.o \
common/vgpu/gr/vgpu_gr_gp10b.o \
common/vgpu/gr/vgpu_gr_gv11b.o \
common/vgpu/fifo/fifo_vgpu.o \
common/vgpu/fifo/runlist_vgpu.o \
common/vgpu/fifo/vgpu_fifo_gv11b.o \
@@ -411,14 +415,11 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/vgpu/ecc_vgpu.o \
common/vgpu/clk_vgpu.o \
common/vgpu/fecs_trace_vgpu.o \
common/vgpu/gm20b/vgpu_gr_gm20b.o \
common/vgpu/gp10b/vgpu_hal_gp10b.o \
common/vgpu/gp10b/vgpu_gr_gp10b.o \
common/vgpu/gp10b/vgpu_fuse_gp10b.o \
common/vgpu/gp10b/vgpu_mm_gp10b.o \
common/vgpu/gv11b/vgpu_gv11b.o \
common/vgpu/gv11b/vgpu_hal_gv11b.o \
common/vgpu/gv11b/vgpu_gr_gv11b.o \
common/vgpu/gv11b/vgpu_subctx_gv11b.o \
common/vgpu/gv11b/vgpu_tsg_gv11b.o \

View File

@@ -319,7 +319,11 @@ srcs += common/sim.c \
common/vgpu/perf/perf_vgpu.c \
common/vgpu/fecs_trace_vgpu.c \
common/vgpu/mm_vgpu.c \
common/vgpu/gr_vgpu.c \
common/vgpu/gr/gr_vgpu.c \
common/vgpu/gr/ctx_vgpu.c \
common/vgpu/gr/vgpu_gr_gv11b.c \
common/vgpu/gr/vgpu_gr_gp10b.c \
common/vgpu/gr/vgpu_gr_gm20b.c \
common/vgpu/clk_vgpu.c \
common/vgpu/debugger_vgpu.c \
common/vgpu/ltc_vgpu.c \
@@ -328,12 +332,9 @@ srcs += common/sim.c \
common/vgpu/gv11b/vgpu_hal_gv11b.c \
common/vgpu/gv11b/vgpu_tsg_gv11b.c \
common/vgpu/gv11b/vgpu_subctx_gv11b.c \
common/vgpu/gv11b/vgpu_gr_gv11b.c \
common/vgpu/gp10b/vgpu_hal_gp10b.c \
common/vgpu/gp10b/vgpu_fuse_gp10b.c \
common/vgpu/gp10b/vgpu_mm_gp10b.c \
common/vgpu/gp10b/vgpu_gr_gp10b.c \
common/vgpu/gm20b/vgpu_gr_gm20b.c \
hal/bus/bus_gk20a.c \
hal/bus/bus_gm20b.c \
hal/bus/bus_gp10b.c \

View File

@@ -43,7 +43,8 @@
#include "common/vgpu/fifo/fifo_vgpu.h"
#include "common/vgpu/fifo/runlist_vgpu.h"
#include "common/vgpu/gr_vgpu.h"
#include "common/vgpu/gr/gr_vgpu.h"
#include "common/vgpu/gr/ctx_vgpu.h"
#include "common/vgpu/ltc_vgpu.h"
#include "common/vgpu/mm_vgpu.h"
#include "common/vgpu/debugger_vgpu.h"
@@ -52,8 +53,8 @@
#include "common/vgpu/perf/cyclestats_snapshot_vgpu.h"
#include "gp10b/gp10b.h"
#include "gp10b/hal_gp10b.h"
#include "common/vgpu/gm20b/vgpu_gr_gm20b.h"
#include "vgpu_gr_gp10b.h"
#include "common/vgpu/gr/vgpu_gr_gm20b.h"
#include "common/vgpu/gr/vgpu_gr_gp10b.h"
#include "vgpu_mm_gp10b.h"
#include "vgpu_fuse_gp10b.h"

View File

@@ -0,0 +1,340 @@
/*
* Virtualized GPU Graphics
*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/kmem.h>
#include <nvgpu/bug.h>
#include <nvgpu/dma.h>
#include <nvgpu/dma.h>
#include <nvgpu/vgpu/vgpu_ivc.h>
#include <nvgpu/vgpu/vgpu.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/gr/global_ctx.h>
#include <nvgpu/gr/ctx.h>
#include "ctx_vgpu.h"
int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *vm)
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
struct gr_gk20a *gr = &g->gr;
int err;
nvgpu_log_fn(g, " ");
if (gr->ctx_vars.golden_image_size == 0) {
return -EINVAL;
}
gr_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm,
gr->ctx_vars.golden_image_size,
GMMU_PAGE_SIZE_KERNEL);
if (!gr_ctx->mem.gpu_va) {
return -ENOMEM;
}
gr_ctx->mem.size = gr->ctx_vars.golden_image_size;
gr_ctx->mem.aperture = APERTURE_SYSMEM;
msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC;
msg.handle = vgpu_get_handle(g);
p->as_handle = vm->handle;
p->gr_ctx_va = gr_ctx->mem.gpu_va;
p->tsg_id = gr_ctx->tsgid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (unlikely(err)) {
nvgpu_err(g, "fail to alloc gr_ctx");
nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
gr_ctx->mem.aperture = APERTURE_INVALID;
}
return err;
}
void vgpu_gr_free_gr_ctx(struct gk20a *g,
struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx)
{
nvgpu_log_fn(g, " ");
if (gr_ctx->mem.gpu_va) {
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
int err;
msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
msg.handle = vgpu_get_handle(g);
p->tsg_id = gr_ctx->tsgid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret);
nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
vgpu_gr_unmap_global_ctx_buffers(g, gr_ctx, vm);
vgpu_gr_free_patch_ctx(g, vm, gr_ctx);
vgpu_gr_free_pm_ctx(g, vm, gr_ctx);
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer);
(void) memset(gr_ctx, 0, sizeof(*gr_ctx));
}
}
int vgpu_gr_alloc_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *ch_vm, u64 virt_ctx)
{
struct patch_desc *patch_ctx;
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
int err;
nvgpu_log_fn(g, " ");
patch_ctx = &gr_ctx->patch_ctx;
patch_ctx->mem.size = 128 * sizeof(u32);
patch_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch_vm,
patch_ctx->mem.size,
GMMU_PAGE_SIZE_KERNEL);
if (!patch_ctx->mem.gpu_va) {
return -ENOMEM;
}
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX;
msg.handle = vgpu_get_handle(g);
p->handle = virt_ctx;
p->patch_ctx_va = patch_ctx->mem.gpu_va;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) {
nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
err = -ENOMEM;
}
return err;
}
void vgpu_gr_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx)
{
struct patch_desc *patch_ctx = &gr_ctx->patch_ctx;
nvgpu_log_fn(g, " ");
if (patch_ctx->mem.gpu_va) {
/* server will free on channel close */
nvgpu_vm_free_va(vm, patch_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
patch_ctx->mem.gpu_va = 0;
}
}
int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *vm)
{
struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
nvgpu_log_fn(g, " ");
if (pm_ctx->mem.gpu_va != 0ULL) {
return 0;
}
pm_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm,
g->gr.ctx_vars.pm_ctxsw_image_size,
GMMU_PAGE_SIZE_KERNEL);
if (!pm_ctx->mem.gpu_va) {
nvgpu_err(g, "failed to map pm ctxt buffer");
return -ENOMEM;
}
pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size;
return 0;
}
void vgpu_gr_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx)
{
struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
nvgpu_log_fn(g, " ");
/* check if hwpm was ever initialized. If not, nothing to do */
if (pm_ctx->mem.gpu_va == 0) {
return;
}
/* server will free on channel close */
nvgpu_vm_free_va(vm, pm_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
pm_ctx->mem.gpu_va = 0;
}
void vgpu_gr_unmap_global_ctx_buffers(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *ch_vm)
{
u64 *g_bfr_va = gr_ctx->global_ctx_buffer_va;
u32 i;
nvgpu_log_fn(g, " ");
if (gr_ctx->global_ctx_buffer_mapped) {
/* server will unmap on channel close */
for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) {
if (g_bfr_va[i]) {
nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
GMMU_PAGE_SIZE_KERNEL);
g_bfr_va[i] = 0;
}
}
gr_ctx->global_ctx_buffer_mapped = false;
}
}
int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
struct vm_gk20a *ch_vm, u64 virt_ctx)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
u64 *g_bfr_va;
u64 gpu_va;
u32 i;
int err;
nvgpu_log_fn(g, " ");
g_bfr_va = gr_ctx->global_ctx_buffer_va;
/* Circular Buffer */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va;
/* Attribute Buffer */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va;
/* Page Pool */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va;
/* Priv register Access Map */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va;
/* FECS trace Buffer */
#ifdef CONFIG_GK20A_CTXSW_TRACE
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va)
goto clean_up;
g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA] = gpu_va;
#endif
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
msg.handle = vgpu_get_handle(g);
p->handle = virt_ctx;
p->cb_va = g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA];
p->attr_va = g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA];
p->page_pool_va = g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA];
p->priv_access_map_va = g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA];
#ifdef CONFIG_GK20A_CTXSW_TRACE
p->fecs_trace_va = g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA];
#endif
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) {
goto clean_up;
}
gr_ctx->global_ctx_buffer_mapped = true;
return 0;
clean_up:
for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) {
if (g_bfr_va[i]) {
nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
GMMU_PAGE_SIZE_KERNEL);
g_bfr_va[i] = 0;
}
}
return -ENOMEM;
}
/* load saved fresh copy of gloden image into channel gr_ctx */
int vgpu_gr_load_golden_ctx_image(struct gk20a *g, u64 virt_ctx)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
int err;
nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX;
msg.handle = vgpu_get_handle(g);
p->handle = virt_ctx;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
return (err || msg.ret) ? -1 : 0;
}

View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef CTX_VGPU_H
#define CTX_VGPU_H
struct gk20a;
struct nvgpu_gr_ctx;
struct vm_gk20a;
struct nvgpu_gr_global_ctx_buffer_desc;
int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *vm);
void vgpu_gr_free_gr_ctx(struct gk20a *g,
struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx);
int vgpu_gr_alloc_patch_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *ch_vm, u64 virt_ctx);
void vgpu_gr_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx);
int vgpu_gr_alloc_pm_ctx(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *vm);
void vgpu_gr_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx);
void vgpu_gr_unmap_global_ctx_buffers(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *ch_vm);
int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, struct nvgpu_gr_ctx *gr_ctx,
struct nvgpu_gr_global_ctx_buffer_desc *global_ctx_buffer,
struct vm_gk20a *ch_vm, u64 virt_ctx);
int vgpu_gr_load_golden_ctx_image(struct gk20a *g, u64 virt_ctx);
#endif

View File

@@ -40,11 +40,11 @@
#include <nvgpu/gr/zbc.h>
#include <nvgpu/gr/fecs_trace.h>
#include "gr_vgpu.h"
#include "gk20a/fecs_trace_gk20a.h"
#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
#include "gr_vgpu.h"
#include "ctx_vgpu.h"
void vgpu_gr_detect_sm_arch(struct gk20a *g)
{
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
@@ -93,24 +93,6 @@ static int vgpu_gr_commit_global_ctx_buffers(struct gk20a *g,
return (err || msg.ret) ? -1 : 0;
}
/* load saved fresh copy of gloden image into channel gr_ctx */
static int vgpu_gr_load_golden_ctx_image(struct gk20a *g,
struct channel_gk20a *c)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
int err;
nvgpu_log_fn(g, " ");
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_LOAD_GR_GOLDEN_CTX;
msg.handle = vgpu_get_handle(g);
p->handle = c->virt_ctx;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
return (err || msg.ret) ? -1 : 0;
}
int vgpu_gr_init_ctx_state(struct gk20a *g)
{
struct vgpu_priv_data *priv = vgpu_get_priv_data(g);
@@ -133,7 +115,7 @@ int vgpu_gr_init_ctx_state(struct gk20a *g)
return 0;
}
static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
u32 size;
@@ -183,293 +165,11 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
return 0;
}
static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
struct channel_gk20a *c)
{
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
struct vm_gk20a *ch_vm = c->vm;
struct tsg_gk20a *tsg;
u64 *g_bfr_va;
struct gr_gk20a *gr = &g->gr;
u64 gpu_va;
u32 i;
int err;
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c);
if (!tsg) {
return -EINVAL;
}
g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va;
/* Circular Buffer */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_CIRCULAR),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA] = gpu_va;
/* Attribute Buffer */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_ATTRIBUTE),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA] = gpu_va;
/* Page Pool */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PAGEPOOL),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA] = gpu_va;
/* Priv register Access Map */
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va) {
goto clean_up;
}
g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA] = gpu_va;
/* FECS trace Buffer */
#ifdef CONFIG_GK20A_CTXSW_TRACE
gpu_va = nvgpu_vm_alloc_va(ch_vm,
nvgpu_gr_global_ctx_get_size(gr->global_ctx_buffer,
NVGPU_GR_GLOBAL_CTX_FECS_TRACE_BUFFER),
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va)
goto clean_up;
g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA] = gpu_va;
#endif
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
msg.handle = vgpu_get_handle(g);
p->handle = c->virt_ctx;
p->cb_va = g_bfr_va[NVGPU_GR_CTX_CIRCULAR_VA];
p->attr_va = g_bfr_va[NVGPU_GR_CTX_ATTRIBUTE_VA];
p->page_pool_va = g_bfr_va[NVGPU_GR_CTX_PAGEPOOL_VA];
p->priv_access_map_va = g_bfr_va[NVGPU_GR_CTX_PRIV_ACCESS_MAP_VA];
#ifdef CONFIG_GK20A_CTXSW_TRACE
p->fecs_trace_va = g_bfr_va[NVGPU_GR_CTX_FECS_TRACE_BUFFER_VA];
#endif
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) {
goto clean_up;
}
tsg->gr_ctx->global_ctx_buffer_mapped = true;
return 0;
clean_up:
for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) {
if (g_bfr_va[i]) {
nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
GMMU_PAGE_SIZE_KERNEL);
g_bfr_va[i] = 0;
}
}
return -ENOMEM;
}
static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
{
struct vm_gk20a *ch_vm = tsg->vm;
u64 *g_bfr_va = tsg->gr_ctx->global_ctx_buffer_va;
u32 i;
struct gk20a *g = tsg->g;
nvgpu_log_fn(g, " ");
if (tsg->gr_ctx->global_ctx_buffer_mapped) {
/* server will unmap on channel close */
for (i = 0; i < NVGPU_GR_CTX_VA_COUNT; i++) {
if (g_bfr_va[i]) {
nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
GMMU_PAGE_SIZE_KERNEL);
g_bfr_va[i] = 0;
}
}
tsg->gr_ctx->global_ctx_buffer_mapped = false;
}
}
int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *vm)
{
struct tegra_vgpu_cmd_msg msg = {0};
struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
struct gr_gk20a *gr = &g->gr;
int err;
nvgpu_log_fn(g, " ");
if (gr->ctx_vars.golden_image_size == 0) {
return -EINVAL;
}
gr_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm,
gr->ctx_vars.golden_image_size,
GMMU_PAGE_SIZE_KERNEL);
if (!gr_ctx->mem.gpu_va) {
return -ENOMEM;
}
gr_ctx->mem.size = gr->ctx_vars.golden_image_size;
gr_ctx->mem.aperture = APERTURE_SYSMEM;
msg.cmd = TEGRA_VGPU_CMD_GR_CTX_ALLOC;
msg.handle = vgpu_get_handle(g);
p->as_handle = vm->handle;
p->gr_ctx_va = gr_ctx->mem.gpu_va;
p->tsg_id = gr_ctx->tsgid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (unlikely(err)) {
nvgpu_err(g, "fail to alloc gr_ctx");
nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
gr_ctx->mem.aperture = APERTURE_INVALID;
}
return err;
}
static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
struct channel_gk20a *c)
{
struct tsg_gk20a *tsg;
struct patch_desc *patch_ctx;
struct vm_gk20a *ch_vm = c->vm;
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_ch_ctx_params *p = &msg.params.ch_ctx;
int err;
nvgpu_log_fn(g, " ");
tsg = tsg_gk20a_from_ch(c);
if (!tsg) {
return -EINVAL;
}
patch_ctx = &tsg->gr_ctx->patch_ctx;
patch_ctx->mem.size = 128 * sizeof(u32);
patch_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch_vm,
patch_ctx->mem.size,
GMMU_PAGE_SIZE_KERNEL);
if (!patch_ctx->mem.gpu_va) {
return -ENOMEM;
}
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_PATCH_CTX;
msg.handle = vgpu_get_handle(g);
p->handle = c->virt_ctx;
p->patch_ctx_va = patch_ctx->mem.gpu_va;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) {
nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
err = -ENOMEM;
}
return err;
}
static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
{
struct patch_desc *patch_ctx = &tsg->gr_ctx->patch_ctx;
struct gk20a *g = tsg->g;
nvgpu_log_fn(g, " ");
if (patch_ctx->mem.gpu_va) {
/* server will free on channel close */
nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
patch_ctx->mem.gpu_va = 0;
}
}
static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
{
struct nvgpu_gr_ctx *ch_ctx = tsg->gr_ctx;
struct pm_ctx_desc *pm_ctx = &ch_ctx->pm_ctx;
struct gk20a *g = tsg->g;
nvgpu_log_fn(g, " ");
/* check if hwpm was ever initialized. If not, nothing to do */
if (pm_ctx->mem.gpu_va == 0) {
return;
}
/* server will free on channel close */
nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
pm_ctx->mem.gpu_va = 0;
}
void vgpu_gr_free_gr_ctx(struct gk20a *g,
struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx)
{
struct tsg_gk20a *tsg;
nvgpu_log_fn(g, " ");
if (gr_ctx->mem.gpu_va) {
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
int err;
msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
msg.handle = vgpu_get_handle(g);
p->tsg_id = gr_ctx->tsgid;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret);
nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
tsg = &g->fifo.tsg[gr_ctx->tsgid];
vgpu_gr_unmap_global_ctx_buffers(tsg);
vgpu_gr_free_channel_patch_ctx(tsg);
vgpu_gr_free_channel_pm_ctx(tsg);
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer);
nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer);
(void) memset(gr_ctx, 0, sizeof(*gr_ctx));
}
}
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
{
struct gk20a *g = c->g;
struct nvgpu_gr_ctx *gr_ctx = NULL;
struct gr_gk20a *gr = &g->gr;
struct tsg_gk20a *tsg = NULL;
int err = 0;
@@ -504,14 +204,15 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
c->vm);
if (err) {
nvgpu_err(g,
"fail to allocate TSG gr ctx buffer, err=%d", err);
"fail to allocate TSG gr ctx buffer, err=%d",
err);
nvgpu_vm_put(tsg->vm);
tsg->vm = NULL;
goto out;
}
/* allocate patch buffer */
err = vgpu_gr_alloc_channel_patch_ctx(g, c);
err = vgpu_gr_alloc_patch_ctx(g, gr_ctx, c->vm, c->virt_ctx);
if (err) {
nvgpu_err(g, "fail to allocate patch buffer");
goto out;
@@ -523,7 +224,9 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
flags);
/* map global buffer to channel gpu_va and commit */
err = vgpu_gr_map_global_ctx_buffers(g, c);
err = vgpu_gr_map_global_ctx_buffers(g, gr_ctx,
gr->global_ctx_buffer, c->vm,
c->virt_ctx);
if (err) {
nvgpu_err(g, "fail to map global ctx buffer");
goto out;
@@ -544,7 +247,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
/* load golden image */
err = gr_gk20a_elpg_protected_call(g,
vgpu_gr_load_golden_ctx_image(g, c));
vgpu_gr_load_golden_ctx_image(g, c->virt_ctx));
if (err) {
nvgpu_err(g, "fail to load golden ctx image");
goto out;
@@ -559,7 +262,7 @@ int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
#ifdef CONFIG_GK20A_CTXSW_TRACE
/* for fecs bind channel */
err = gr_gk20a_elpg_protected_call(g,
vgpu_gr_load_golden_ctx_image(g, c));
vgpu_gr_load_golden_ctx_image(g, c->virt_ctx));
if (err) {
nvgpu_err(g, "fail to load golden ctx image");
goto out;
@@ -1158,16 +861,13 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
if (mode != NVGPU_GR_CTX_HWPM_CTXSW_MODE_NO_CTXSW) {
/* Allocate buffer if necessary */
if (pm_ctx->mem.gpu_va == 0) {
pm_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch->vm,
g->gr.ctx_vars.pm_ctxsw_image_size,
GMMU_PAGE_SIZE_KERNEL);
if (!pm_ctx->mem.gpu_va) {
return -ENOMEM;
}
pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size;
err = vgpu_gr_alloc_pm_ctx(g, tsg->gr_ctx, ch->vm);
if (err != 0) {
nvgpu_err(g,
"failed to allocate pm ctxt buffer");
return err;
}
pm_ctx->mem.size = g->gr.ctx_vars.pm_ctxsw_image_size;
}
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_SET_HWPM_CTXSW_MODE;

View File

@@ -36,6 +36,8 @@ struct dbg_session_gk20a;
struct tsg_gk20a;
void vgpu_gr_detect_sm_arch(struct gk20a *g);
int vgpu_gr_init_ctx_state(struct gk20a *g);
int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g);
void vgpu_gr_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
void vgpu_gr_free_tsg_ctx(struct tsg_gk20a *tsg);
int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
@@ -55,10 +57,10 @@ int vgpu_gr_add_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
struct nvgpu_gr_zbc_entry *zbc_val);
int vgpu_gr_query_zbc(struct gk20a *g, struct nvgpu_gr_zbc *zbc,
struct nvgpu_gr_zbc_query_params *query_params);
int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable);
int vgpu_gr_update_smpc_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *ch, bool enable);
int vgpu_gr_set_sm_debug_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 sms, bool enable);
int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
struct channel_gk20a *ch, u64 gpu_va, u32 mode);
int vgpu_gr_clear_sm_error_state(struct gk20a *g,

View File

@@ -30,10 +30,11 @@
#include <nvgpu/gr/config.h>
#include <nvgpu/log.h>
#include "common/vgpu/gm20b/vgpu_gr_gm20b.h"
#include "ctx_vgpu.h"
#include "vgpu_gr_gm20b.h"
#include "vgpu_gr_gp10b.h"
#include "gp10b/gr_gp10b.h"
#include "vgpu_gr_gp10b.h"
#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>

View File

@@ -22,10 +22,12 @@
#include <nvgpu/gk20a.h>
#include "common/vgpu/gr_vgpu.h"
#include "vgpu_subctx_gv11b.h"
#include "gr_vgpu.h"
#include "ctx_vgpu.h"
#include "vgpu_gr_gv11b.h"
#include "common/vgpu/gv11b/vgpu_subctx_gv11b.h"
int vgpu_gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va)
{
int err;

View File

@@ -55,7 +55,8 @@
#include "common/vgpu/fifo/fifo_vgpu.h"
#include "common/vgpu/fifo/runlist_vgpu.h"
#include "common/vgpu/gr_vgpu.h"
#include "common/vgpu/gr/gr_vgpu.h"
#include "common/vgpu/gr/ctx_vgpu.h"
#include "common/vgpu/ltc_vgpu.h"
#include "common/vgpu/mm_vgpu.h"
#include "common/vgpu/debugger_vgpu.h"
@@ -63,9 +64,10 @@
#include "common/vgpu/fecs_trace_vgpu.h"
#include "common/vgpu/perf/cyclestats_snapshot_vgpu.h"
#include "common/vgpu/fifo/vgpu_fifo_gv11b.h"
#include "common/vgpu/gm20b/vgpu_gr_gm20b.h"
#include "common/vgpu/gr/vgpu_gr_gm20b.h"
#include "common/vgpu/gp10b/vgpu_mm_gp10b.h"
#include "common/vgpu/gp10b/vgpu_gr_gp10b.h"
#include "common/vgpu/gr/vgpu_gr_gp10b.h"
#include "common/vgpu/gr/vgpu_gr_gv11b.h"
#include "common/falcon/falcon_gk20a.h"
@@ -95,7 +97,6 @@
#include <nvgpu/gr/zbc.h>
#include "vgpu_gv11b.h"
#include "vgpu_gr_gv11b.h"
#include "vgpu_subctx_gv11b.h"
#include "vgpu_tsg_gv11b.h"

View File

@@ -84,14 +84,8 @@ int vgpu_init_hal_os(struct gk20a *g);
int vgpu_get_constants(struct gk20a *g);
u64 vgpu_mm_bar1_map_userd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset);
int vgpu_gr_isr(struct gk20a *g, struct tegra_vgpu_gr_intr_info *info);
int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
struct nvgpu_gr_ctx *gr_ctx,
struct vm_gk20a *vm);
void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
struct nvgpu_gr_ctx *gr_ctx);
void vgpu_gr_handle_sm_esr_event(struct gk20a *g,
struct tegra_vgpu_sm_esr_info *info);
int vgpu_gr_init_ctx_state(struct gk20a *g);
int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info);
int vgpu_init_mm_support(struct gk20a *g);