gpu: nvgpu: vgpu: create vgpu common gr subctx unit

Add new unit vgpu subctx under common/vgpu/gr to manage
GR subcontext. This unit provides interfaces to allocate
and free subctx header.

Rename vgpu_subctx_gv11b* files to subctx_vgpu* files
renaming functions vgpu_gv11b_alloc_subctx_header to
vgpu_alloc_subctx_header and vgpu_gv11b_free_subctx_header
to vgpu_free_subctx_header which are called only if
NVGPU_SUPPORT_TSG_SUBCONTEXTS is enabled or free_channel_ctx_header
HAL op is set which is set only for gv11b for virtualization.

Also assign fifo HAL op free_channel_ctx_header to
vgpu_channel_free_ctx_header for vgpu gv11b which in turn
calls vgpu_free_subctx_header.

Jira GVSCI-334

Change-Id: Ib46e7be911632eba01cd21881077683b795f8bad
Signed-off-by: Aparna Das <aparnad@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2075872
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Aparna Das
2019-03-18 15:56:45 -07:00
committed by mobile promotions
parent 60073d2156
commit 3e3ca8a761
8 changed files with 54 additions and 41 deletions

View File

@@ -439,6 +439,7 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/vgpu/cbc/cbc_vgpu.o \
common/vgpu/gr/gr_vgpu.o \
common/vgpu/gr/ctx_vgpu.o \
common/vgpu/gr/subctx_vgpu.o \
common/vgpu/fifo/fifo_vgpu.o \
common/vgpu/fifo/runlist_vgpu.o \
common/vgpu/fifo/vgpu_fifo_gv11b.o \
@@ -457,7 +458,6 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
common/vgpu/gp10b/vgpu_hal_gp10b.o \
common/vgpu/gv11b/vgpu_gv11b.o \
common/vgpu/gv11b/vgpu_hal_gv11b.o \
common/vgpu/gv11b/vgpu_subctx_gv11b.o \
common/vgpu/gv11b/vgpu_tsg_gv11b.o \
nvgpu-$(CONFIG_GK20A_CYCLE_STATS) += \

View File

@@ -302,6 +302,7 @@ srcs += common/sim.c \
common/vgpu/mm/vm_vgpu.c \
common/vgpu/gr/gr_vgpu.c \
common/vgpu/gr/ctx_vgpu.c \
common/vgpu/gr/subctx_vgpu.c \
common/vgpu/clk_vgpu.c \
common/vgpu/debugger_vgpu.c \
common/vgpu/ltc/ltc_vgpu.c \
@@ -310,7 +311,6 @@ srcs += common/sim.c \
common/vgpu/gv11b/vgpu_gv11b.c \
common/vgpu/gv11b/vgpu_hal_gv11b.c \
common/vgpu/gv11b/vgpu_tsg_gv11b.c \
common/vgpu/gv11b/vgpu_subctx_gv11b.c \
common/vgpu/gp10b/vgpu_hal_gp10b.c \
hal/bus/bus_gk20a.c \
hal/bus/bus_gm20b.c \

View File

@@ -41,6 +41,7 @@
#include <nvgpu/vm_area.h>
#include "fifo_vgpu.h"
#include "common/vgpu/gr/subctx_vgpu.h"
#include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
@@ -512,3 +513,8 @@ u32 vgpu_channel_count(struct gk20a *g)
return priv->constants.num_channels;
}
void vgpu_channel_free_ctx_header(struct channel_gk20a *c)
{
vgpu_free_subctx_header(c->g, c->subctx, c->vm, c->virt_ctx);
}

View File

@@ -58,4 +58,5 @@ int vgpu_tsg_unbind_channel(struct channel_gk20a *ch);
int vgpu_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice);
int vgpu_enable_tsg(struct tsg_gk20a *tsg);
int vgpu_set_sm_exception_type_mask(struct channel_gk20a *ch, u32 mask);
void vgpu_channel_free_ctx_header(struct channel_gk20a *c);
#endif /* NVGPU_FIFO_VGPU_H */

View File

@@ -46,9 +46,9 @@
#include "gr_vgpu.h"
#include "ctx_vgpu.h"
#include "subctx_vgpu.h"
#include "common/vgpu/perf/cyclestats_snapshot_vgpu.h"
#include "common/vgpu/gv11b/vgpu_subctx_gv11b.h"
void vgpu_gr_detect_sm_arch(struct gk20a *g)
{
@@ -74,8 +74,9 @@ int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
nvgpu_log_fn(g, " ");
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) {
err = vgpu_gv11b_alloc_subctx_header(c);
if (err) {
err = vgpu_alloc_subctx_header(g, &c->subctx, c->vm,
c->virt_ctx);
if (err != 0) {
return err;
}
}
@@ -87,7 +88,8 @@ int vgpu_gr_commit_inst(struct channel_gk20a *c, u64 gpu_va)
if (err || msg.ret) {
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) {
vgpu_gv11b_free_subctx_header(c);
vgpu_free_subctx_header(g, c->subctx, c->vm,
c->virt_ctx);
}
return -1;
} else {

View File

@@ -20,81 +20,81 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "vgpu_subctx_gv11b.h"
#include <nvgpu/vgpu/vgpu.h>
#include <nvgpu/vgpu/tegra_vgpu.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/channel.h>
#include <nvgpu/gr/subctx.h>
#include "subctx_vgpu.h"
int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
int vgpu_alloc_subctx_header(struct gk20a *g,
struct nvgpu_gr_subctx **gr_subctx,
struct vm_gk20a *vm, u64 virt_ctx)
{
struct nvgpu_gr_subctx *subctx;
struct nvgpu_mem *ctxheader;
struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_alloc_ctx_header_params *p =
&msg.params.alloc_ctx_header;
struct gk20a *g = c->g;
int err;
c->subctx = nvgpu_kzalloc(g, sizeof(*c->subctx));
if (c->subctx == NULL) {
subctx = nvgpu_kzalloc(g, sizeof(*subctx));
if (subctx == NULL) {
return -ENOMEM;
}
ctxheader = &c->subctx->ctx_header;
ctxheader = &subctx->ctx_header;
msg.cmd = TEGRA_VGPU_CMD_ALLOC_CTX_HEADER;
msg.handle = vgpu_get_handle(c->g);
p->ch_handle = c->virt_ctx;
p->ctx_header_va = nvgpu_vm_alloc_va(c->vm,
c->g->ops.gr.ctxsw_prog.hw_get_fecs_header_size(),
msg.handle = vgpu_get_handle(g);
p->ch_handle = virt_ctx;
p->ctx_header_va = nvgpu_vm_alloc_va(vm,
g->ops.gr.ctxsw_prog.hw_get_fecs_header_size(),
GMMU_PAGE_SIZE_KERNEL);
if (!p->ctx_header_va) {
nvgpu_err(c->g, "alloc va failed for ctx_header");
if (p->ctx_header_va == 0U) {
nvgpu_err(g, "alloc va failed for ctx_header");
err = -ENOMEM;
goto fail;
}
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (unlikely(err)) {
nvgpu_err(c->g, "alloc ctx_header failed err %d", err);
nvgpu_vm_free_va(c->vm, p->ctx_header_va,
if (unlikely(err != 0)) {
nvgpu_err(g, "alloc ctx_header failed err %d", err);
nvgpu_vm_free_va(vm, p->ctx_header_va,
GMMU_PAGE_SIZE_KERNEL);
goto fail;
}
ctxheader->gpu_va = p->ctx_header_va;
return err;
*gr_subctx = subctx;
return 0;
fail:
nvgpu_kfree(g, c->subctx);
nvgpu_kfree(g, subctx);
return err;
}
void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c)
void vgpu_free_subctx_header(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
struct vm_gk20a *vm, u64 virt_ctx)
{
struct nvgpu_gr_subctx *subctx = c->subctx;
struct nvgpu_mem *ctxheader;
struct tegra_vgpu_cmd_msg msg = {};
struct tegra_vgpu_free_ctx_header_params *p =
&msg.params.free_ctx_header;
struct gk20a *g = c->g;
int err;
if (subctx != NULL) {
ctxheader = &subctx->ctx_header;
msg.cmd = TEGRA_VGPU_CMD_FREE_CTX_HEADER;
msg.handle = vgpu_get_handle(c->g);
p->ch_handle = c->virt_ctx;
msg.handle = vgpu_get_handle(g);
p->ch_handle = virt_ctx;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (unlikely(err)) {
nvgpu_err(c->g, "free ctx_header failed err %d", err);
if (unlikely(err != 0)) {
nvgpu_err(g, "free ctx_header failed err %d", err);
}
nvgpu_vm_free_va(c->vm, ctxheader->gpu_va,
nvgpu_vm_free_va(vm, ctxheader->gpu_va,
GMMU_PAGE_SIZE_KERNEL);
ctxheader->gpu_va = 0;
nvgpu_kfree(g, subctx);

View File

@@ -20,12 +20,17 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_VGPU_SUBCTX_GV11B_H
#define NVGPU_VGPU_SUBCTX_GV11B_H
#ifndef NVGPU_SUBCTX_VGPU_H
#define NVGPU_SUBCTX_VGPU_H
struct channel_gk20a;
struct gk20a;
struct nvgpu_gr_subctx;
struct vm_gk20a;
int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c);
void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c);
int vgpu_alloc_subctx_header(struct gk20a *g,
struct nvgpu_gr_subctx **gr_subctx,
struct vm_gk20a *vm, u64 virt_ctx);
void vgpu_free_subctx_header(struct gk20a *g, struct nvgpu_gr_subctx *subctx,
struct vm_gk20a *vm, u64 virt_ctx);
#endif /* NVGPU_VGPU_SUBCTX_GV11B_H */
#endif /* NVGPU_SUBCTX_VGPU_H */

View File

@@ -99,7 +99,6 @@
#include "vgpu_gv11b.h"
#include "vgpu_subctx_gv11b.h"
#include "vgpu_tsg_gv11b.h"
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
@@ -507,7 +506,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.setup_sw = vgpu_fifo_setup_sw,
.cleanup_sw = vgpu_fifo_cleanup_sw,
.resetup_ramfc = NULL,
.free_channel_ctx_header = vgpu_gv11b_free_subctx_header,
.free_channel_ctx_header = vgpu_channel_free_ctx_header,
.handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
.ring_channel_doorbell = gv11b_ring_channel_doorbell,
.set_sm_exception_type_mask = vgpu_set_sm_exception_type_mask,