mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Combine gk20a and gp10b free_gr_ctx
gp10b version of free_gr_ctx was created to keep gp10b source code changes out from the mainline. gp10b was merged back to mainline a while ago, so this separation is no longer needed. Merge the two variants. Change-Id: I954b3b677e98e4248f95641ea22e0def4e583c66 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1635127 Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
44a1208fec
commit
ece3d958b3
@@ -26,34 +26,6 @@
|
||||
|
||||
#include <nvgpu/hw/gp10b/hw_gr_gp10b.h>
|
||||
|
||||
void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
struct gr_ctx_desc *gr_ctx)
|
||||
{
|
||||
struct tegra_vgpu_cmd_msg msg = {0};
|
||||
struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
|
||||
int err;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (!gr_ctx || !gr_ctx->mem.gpu_va)
|
||||
return;
|
||||
|
||||
msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
|
||||
msg.handle = vgpu_get_handle(g);
|
||||
p->gr_ctx_handle = gr_ctx->virt_ctx;
|
||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||
WARN_ON(err || msg.ret);
|
||||
|
||||
__nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, gmmu_page_size_kernel);
|
||||
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer);
|
||||
|
||||
nvgpu_kfree(g, gr_ctx);
|
||||
}
|
||||
|
||||
int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
|
||||
struct gr_ctx_desc **__gr_ctx,
|
||||
struct vm_gk20a *vm,
|
||||
@@ -107,7 +79,7 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
|
||||
return err;
|
||||
|
||||
fail:
|
||||
vgpu_gr_gp10b_free_gr_ctx(g, vm, gr_ctx);
|
||||
vgpu_gr_free_gr_ctx(g, vm, gr_ctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,6 @@
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
struct gr_ctx_desc *gr_ctx);
|
||||
int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
|
||||
struct gr_ctx_desc **__gr_ctx,
|
||||
struct vm_gk20a *vm,
|
||||
|
||||
@@ -128,7 +128,7 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
||||
.pagepool_default_size = gr_gp10b_pagepool_default_size,
|
||||
.init_ctx_state = vgpu_gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx,
|
||||
.free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx,
|
||||
.free_gr_ctx = vgpu_gr_free_gr_ctx,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gp10b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = NULL,
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/error_notifier.h>
|
||||
#include <nvgpu/dma.h>
|
||||
|
||||
#include "vgpu.h"
|
||||
#include "gr_vgpu.h"
|
||||
@@ -317,13 +318,16 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
|
||||
void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
struct gr_ctx_desc *gr_ctx)
|
||||
{
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (gr_ctx && gr_ctx->mem.gpu_va) {
|
||||
struct tegra_vgpu_cmd_msg msg;
|
||||
struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
|
||||
int err;
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (!gr_ctx || !gr_ctx->mem.gpu_va)
|
||||
return;
|
||||
|
||||
|
||||
msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE;
|
||||
msg.handle = vgpu_get_handle(g);
|
||||
p->gr_ctx_handle = gr_ctx->virt_ctx;
|
||||
@@ -332,8 +336,13 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
|
||||
__nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
|
||||
gmmu_page_size_kernel);
|
||||
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer);
|
||||
|
||||
nvgpu_kfree(g, gr_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
|
||||
|
||||
@@ -148,7 +148,7 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
||||
.pagepool_default_size = gr_gv11b_pagepool_default_size,
|
||||
.init_ctx_state = vgpu_gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx,
|
||||
.free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx,
|
||||
.free_gr_ctx = vgpu_gr_free_gr_ctx,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gp10b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = NULL,
|
||||
|
||||
@@ -2786,6 +2786,14 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
|
||||
if (!gr_ctx || !gr_ctx->mem.gpu_va)
|
||||
return;
|
||||
|
||||
if (g->ops.gr.dump_ctxsw_stats &&
|
||||
g->gr.ctx_vars.dump_ctxsw_stats_on_channel_close)
|
||||
g->ops.gr.dump_ctxsw_stats(g, vm, gr_ctx);
|
||||
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer);
|
||||
nvgpu_gmmu_unmap(vm, &gr_ctx->mem, gr_ctx->mem.gpu_va);
|
||||
nvgpu_dma_free(g, &gr_ctx->mem);
|
||||
nvgpu_kfree(g, gr_ctx);
|
||||
|
||||
@@ -292,7 +292,7 @@ static const struct gpu_ops gp106_ops = {
|
||||
.pagepool_default_size = gr_gp106_pagepool_default_size,
|
||||
.init_ctx_state = gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gp10b_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_gp10b_free_gr_ctx,
|
||||
.free_gr_ctx = gr_gk20a_free_gr_ctx,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gp10b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = gr_gp10b_dump_gr_status_regs,
|
||||
|
||||
@@ -1167,27 +1167,6 @@ void gr_gp10b_dump_ctxsw_stats(struct gk20a *g, struct vm_gk20a *vm,
|
||||
nvgpu_mem_end(g, mem);
|
||||
}
|
||||
|
||||
void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
struct gr_ctx_desc *gr_ctx)
|
||||
{
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (!gr_ctx)
|
||||
return;
|
||||
|
||||
if (g->ops.gr.dump_ctxsw_stats &&
|
||||
g->gr.ctx_vars.dump_ctxsw_stats_on_channel_close)
|
||||
g->ops.gr.dump_ctxsw_stats(g, vm, gr_ctx);
|
||||
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->pagepool_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->betacb_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->spill_ctxsw_buffer);
|
||||
nvgpu_dma_unmap_free(vm, &gr_ctx->preempt_ctxsw_buffer);
|
||||
gr_gk20a_free_gr_ctx(g, vm, gr_ctx);
|
||||
gk20a_dbg_fn("done");
|
||||
}
|
||||
|
||||
|
||||
void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
struct channel_ctx_gk20a *ch_ctx,
|
||||
struct nvgpu_mem *mem)
|
||||
|
||||
@@ -101,8 +101,6 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
|
||||
struct gr_ctx_desc **gr_ctx, struct vm_gk20a *vm,
|
||||
u32 class,
|
||||
u32 flags);
|
||||
void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
struct gr_ctx_desc *gr_ctx);
|
||||
void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
struct channel_ctx_gk20a *ch_ctx,
|
||||
struct nvgpu_mem *mem);
|
||||
|
||||
@@ -256,7 +256,7 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.pagepool_default_size = gr_gp10b_pagepool_default_size,
|
||||
.init_ctx_state = gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gp10b_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_gp10b_free_gr_ctx,
|
||||
.free_gr_ctx = gr_gk20a_free_gr_ctx,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gp10b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = gr_gp10b_dump_gr_status_regs,
|
||||
|
||||
@@ -325,7 +325,7 @@ static const struct gpu_ops gv100_ops = {
|
||||
.pagepool_default_size = gr_gv11b_pagepool_default_size,
|
||||
.init_ctx_state = gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gp10b_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_gp10b_free_gr_ctx,
|
||||
.free_gr_ctx = gr_gk20a_free_gr_ctx,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gp10b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = gr_gv11b_dump_gr_status_regs,
|
||||
|
||||
@@ -292,7 +292,7 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.pagepool_default_size = gr_gv11b_pagepool_default_size,
|
||||
.init_ctx_state = gr_gp10b_init_ctx_state,
|
||||
.alloc_gr_ctx = gr_gp10b_alloc_gr_ctx,
|
||||
.free_gr_ctx = gr_gp10b_free_gr_ctx,
|
||||
.free_gr_ctx = gr_gk20a_free_gr_ctx,
|
||||
.update_ctxsw_preemption_mode =
|
||||
gr_gv11b_update_ctxsw_preemption_mode,
|
||||
.dump_gr_regs = gr_gv11b_dump_gr_status_regs,
|
||||
|
||||
Reference in New Issue
Block a user