mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Rename nvgpu DMA APIs
Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*. This better reflects the purpose of the APIs (to allocate DMA suitable memory) and avoids confusion with GMMU related code. JIRA NVGPU-12 Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1325548 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8f2d4a3f4a
commit
50667e097b
@@ -23,7 +23,7 @@
|
|||||||
#include "gk20a/gk20a.h"
|
#include "gk20a/gk20a.h"
|
||||||
|
|
||||||
#if defined(CONFIG_GK20A_VIDMEM)
|
#if defined(CONFIG_GK20A_VIDMEM)
|
||||||
static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
|
static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
u64 addr = 0;
|
u64 addr = 0;
|
||||||
@@ -38,11 +38,11 @@ static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
|
||||||
static void gk20a_dma_flags_to_attrs(unsigned long *attrs,
|
static void nvgpu_dma_flags_to_attrs(unsigned long *attrs,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
#define ATTR_ARG(x) *x
|
#define ATTR_ARG(x) *x
|
||||||
#else
|
#else
|
||||||
static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs,
|
static void nvgpu_dma_flags_to_attrs(struct dma_attrs *attrs,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
#define ATTR_ARG(x) x
|
#define ATTR_ARG(x) x
|
||||||
#endif
|
#endif
|
||||||
@@ -56,12 +56,12 @@ static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs,
|
|||||||
#undef ATTR_ARG
|
#undef ATTR_ARG
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
return gk20a_gmmu_alloc_flags(g, 0, size, mem);
|
return nvgpu_dma_alloc_flags(g, 0, size, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
||||||
struct nvgpu_mem *mem)
|
struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
if (g->mm.vidmem_is_vidmem) {
|
if (g->mm.vidmem_is_vidmem) {
|
||||||
@@ -71,7 +71,7 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
|||||||
* using gk20a_gmmu_alloc_map and it's vidmem, or if there's a
|
* using gk20a_gmmu_alloc_map and it's vidmem, or if there's a
|
||||||
* difference, the user should use the flag explicitly anyway.
|
* difference, the user should use the flag explicitly anyway.
|
||||||
*/
|
*/
|
||||||
int err = gk20a_gmmu_alloc_flags_vid(g,
|
int err = nvgpu_dma_alloc_flags_vid(g,
|
||||||
flags | NVGPU_DMA_NO_KERNEL_MAPPING,
|
flags | NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
size, mem);
|
size, mem);
|
||||||
|
|
||||||
@@ -83,15 +83,15 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
return gk20a_gmmu_alloc_flags_sys(g, flags, size, mem);
|
return nvgpu_dma_alloc_flags_sys(g, flags, size, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem);
|
return nvgpu_dma_alloc_flags_sys(g, 0, size, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem)
|
size_t size, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
struct device *d = dev_from_gk20a(g);
|
struct device *d = dev_from_gk20a(g);
|
||||||
@@ -103,7 +103,7 @@ int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
|||||||
if (flags) {
|
if (flags) {
|
||||||
DEFINE_DMA_ATTRS(dma_attrs);
|
DEFINE_DMA_ATTRS(dma_attrs);
|
||||||
|
|
||||||
gk20a_dma_flags_to_attrs(&dma_attrs, flags);
|
nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
|
||||||
|
|
||||||
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
|
if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
|
||||||
mem->pages = dma_alloc_attrs(d,
|
mem->pages = dma_alloc_attrs(d,
|
||||||
@@ -149,19 +149,19 @@ fail_free:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
return gk20a_gmmu_alloc_flags_vid(g,
|
return nvgpu_dma_alloc_flags_vid(g,
|
||||||
NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
|
NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags,
|
int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem)
|
size_t size, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
return gk20a_gmmu_alloc_flags_vid_at(g, flags, size, mem, 0);
|
return nvgpu_dma_alloc_flags_vid_at(g, flags, size, mem, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
|
int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem, dma_addr_t at)
|
size_t size, struct nvgpu_mem *mem, dma_addr_t at)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_GK20A_VIDMEM)
|
#if defined(CONFIG_GK20A_VIDMEM)
|
||||||
@@ -185,7 +185,7 @@ int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
|
|||||||
|
|
||||||
nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
|
nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
|
||||||
before_pending = atomic64_read(&g->mm.vidmem.bytes_pending);
|
before_pending = atomic64_read(&g->mm.vidmem.bytes_pending);
|
||||||
addr = __gk20a_gmmu_alloc(vidmem_alloc, at, size);
|
addr = __nvgpu_dma_alloc(vidmem_alloc, at, size);
|
||||||
nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
|
nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
|
||||||
if (!addr) {
|
if (!addr) {
|
||||||
/*
|
/*
|
||||||
@@ -237,23 +237,23 @@ fail_physfree:
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
|
int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
|
||||||
struct nvgpu_mem *mem)
|
struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
return gk20a_gmmu_alloc_map_flags(vm, 0, size, mem);
|
return nvgpu_dma_alloc_map_flags(vm, 0, size, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem)
|
size_t size, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
if (vm->mm->vidmem_is_vidmem) {
|
if (vm->mm->vidmem_is_vidmem) {
|
||||||
/*
|
/*
|
||||||
* Force the no-kernel-mapping flag on because we don't support
|
* Force the no-kernel-mapping flag on because we don't support
|
||||||
* the lack of it for vidmem - the user should not care when
|
* the lack of it for vidmem - the user should not care when
|
||||||
* using gk20a_gmmu_alloc_map and it's vidmem, or if there's a
|
* using nvgpu_dma_alloc_map and it's vidmem, or if there's a
|
||||||
* difference, the user should use the flag explicitly anyway.
|
* difference, the user should use the flag explicitly anyway.
|
||||||
*/
|
*/
|
||||||
int err = gk20a_gmmu_alloc_map_flags_vid(vm,
|
int err = nvgpu_dma_alloc_map_flags_vid(vm,
|
||||||
flags | NVGPU_DMA_NO_KERNEL_MAPPING,
|
flags | NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
size, mem);
|
size, mem);
|
||||||
|
|
||||||
@@ -265,19 +265,19 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
return gk20a_gmmu_alloc_map_flags_sys(vm, flags, size, mem);
|
return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
||||||
struct nvgpu_mem *mem)
|
struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
return gk20a_gmmu_alloc_map_flags_sys(vm, 0, size, mem);
|
return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem)
|
size_t size, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
int err = gk20a_gmmu_alloc_flags_sys(vm->mm->g, flags, size, mem);
|
int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
@@ -293,21 +293,21 @@ int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_free:
|
fail_free:
|
||||||
gk20a_gmmu_free(vm->mm->g, mem);
|
nvgpu_dma_free(vm->mm->g, mem);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
|
int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
|
||||||
struct nvgpu_mem *mem)
|
struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
return gk20a_gmmu_alloc_map_flags_vid(vm,
|
return nvgpu_dma_alloc_map_flags_vid(vm,
|
||||||
NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
|
NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
|
int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem)
|
size_t size, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
int err = gk20a_gmmu_alloc_flags_vid(vm->mm->g, flags, size, mem);
|
int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
@@ -323,11 +323,11 @@ int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_free:
|
fail_free:
|
||||||
gk20a_gmmu_free(vm->mm->g, mem);
|
nvgpu_dma_free(vm->mm->g, mem);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
struct device *d = dev_from_gk20a(g);
|
struct device *d = dev_from_gk20a(g);
|
||||||
|
|
||||||
@@ -335,7 +335,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
if (mem->flags) {
|
if (mem->flags) {
|
||||||
DEFINE_DMA_ATTRS(dma_attrs);
|
DEFINE_DMA_ATTRS(dma_attrs);
|
||||||
|
|
||||||
gk20a_dma_flags_to_attrs(&dma_attrs, mem->flags);
|
nvgpu_dma_flags_to_attrs(&dma_attrs, mem->flags);
|
||||||
|
|
||||||
if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
|
if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
|
||||||
dma_free_attrs(d, mem->size, mem->pages,
|
dma_free_attrs(d, mem->size, mem->pages,
|
||||||
@@ -361,7 +361,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
mem->aperture = APERTURE_INVALID;
|
mem->aperture = APERTURE_INVALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
|
static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_GK20A_VIDMEM)
|
#if defined(CONFIG_GK20A_VIDMEM)
|
||||||
bool was_empty;
|
bool was_empty;
|
||||||
@@ -393,23 +393,24 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem)
|
void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
switch (mem->aperture) {
|
switch (mem->aperture) {
|
||||||
case APERTURE_SYSMEM:
|
case APERTURE_SYSMEM:
|
||||||
return gk20a_gmmu_free_sys(g, mem);
|
return nvgpu_dma_free_sys(g, mem);
|
||||||
case APERTURE_VIDMEM:
|
case APERTURE_VIDMEM:
|
||||||
return gk20a_gmmu_free_vid(g, mem);
|
return nvgpu_dma_free_vid(g, mem);
|
||||||
default:
|
default:
|
||||||
break; /* like free() on "null" memory */
|
break; /* like free() on "null" memory */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
|
void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
if (mem->gpu_va)
|
if (mem->gpu_va)
|
||||||
gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size, gk20a_mem_flag_none);
|
gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size,
|
||||||
|
gk20a_mem_flag_none);
|
||||||
mem->gpu_va = 0;
|
mem->gpu_va = 0;
|
||||||
|
|
||||||
gk20a_gmmu_free(vm->mm->g, mem);
|
nvgpu_dma_free(vm->mm->g, mem);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
|
|||||||
|
|
||||||
__lock_sema_sea(sea);
|
__lock_sema_sea(sea);
|
||||||
|
|
||||||
ret = gk20a_gmmu_alloc_flags_sys(gk20a, NVGPU_DMA_NO_KERNEL_MAPPING,
|
ret = nvgpu_dma_alloc_flags_sys(gk20a, NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
PAGE_SIZE * SEMAPHORE_POOL_COUNT,
|
PAGE_SIZE * SEMAPHORE_POOL_COUNT,
|
||||||
&sea->sea_mem);
|
&sea->sea_mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx)
|
|||||||
|
|
||||||
for (i = 0; i < cde_ctx->num_bufs; i++) {
|
for (i = 0; i < cde_ctx->num_bufs; i++) {
|
||||||
struct nvgpu_mem *mem = cde_ctx->mem + i;
|
struct nvgpu_mem *mem = cde_ctx->mem + i;
|
||||||
gk20a_gmmu_unmap_free(cde_ctx->vm, mem);
|
nvgpu_dma_unmap_free(cde_ctx->vm, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_kfree(cde_ctx->g, cde_ctx->init_convert_cmd);
|
nvgpu_kfree(cde_ctx->g, cde_ctx->init_convert_cmd);
|
||||||
@@ -247,7 +247,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx,
|
|||||||
|
|
||||||
/* allocate buf */
|
/* allocate buf */
|
||||||
mem = cde_ctx->mem + cde_ctx->num_bufs;
|
mem = cde_ctx->mem + cde_ctx->num_bufs;
|
||||||
err = gk20a_gmmu_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem);
|
err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d",
|
gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d",
|
||||||
cde_ctx->num_bufs);
|
cde_ctx->num_bufs);
|
||||||
|
|||||||
@@ -195,7 +195,7 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
|
|||||||
|
|
||||||
if (ce_ctx->cmd_buf_mem.cpu_va) {
|
if (ce_ctx->cmd_buf_mem.cpu_va) {
|
||||||
gk20a_ce_free_command_buffer_stored_fence(ce_ctx);
|
gk20a_ce_free_command_buffer_stored_fence(ce_ctx);
|
||||||
gk20a_gmmu_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem);
|
nvgpu_dma_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* free the channel */
|
/* free the channel */
|
||||||
@@ -479,7 +479,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* allocate command buffer (4096 should be more than enough) from sysmem*/
|
/* allocate command buffer (4096 should be more than enough) from sysmem*/
|
||||||
err = gk20a_gmmu_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem);
|
err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(ce_ctx->dev,
|
gk20a_err(ce_ctx->dev,
|
||||||
"ce: could not allocate command buffer for CE context");
|
"ce: could not allocate command buffer for CE context");
|
||||||
|
|||||||
@@ -523,7 +523,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
|
|||||||
|
|
||||||
gk20a_gr_flush_channel_tlb(gr);
|
gk20a_gr_flush_channel_tlb(gr);
|
||||||
|
|
||||||
gk20a_gmmu_unmap_free(ch_vm, &ch->gpfifo.mem);
|
nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem);
|
||||||
nvgpu_big_free(g, ch->gpfifo.pipe);
|
nvgpu_big_free(g, ch->gpfifo.pipe);
|
||||||
memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
|
memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
|
||||||
|
|
||||||
@@ -899,7 +899,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
|
|||||||
size = roundup_pow_of_two(c->gpfifo.entry_num *
|
size = roundup_pow_of_two(c->gpfifo.entry_num *
|
||||||
2 * 18 * sizeof(u32) / 3);
|
2 * 18 * sizeof(u32) / 3);
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_map_sys(ch_vm, size, &q->mem);
|
err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(d, "%s: memory allocation failed\n", __func__);
|
gk20a_err(d, "%s: memory allocation failed\n", __func__);
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
@@ -922,7 +922,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c)
|
|||||||
if (q->size == 0)
|
if (q->size == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
gk20a_gmmu_unmap_free(ch_vm, &q->mem);
|
nvgpu_dma_unmap_free(ch_vm, &q->mem);
|
||||||
|
|
||||||
memset(q, 0, sizeof(struct priv_cmd_queue));
|
memset(q, 0, sizeof(struct priv_cmd_queue));
|
||||||
}
|
}
|
||||||
@@ -1244,7 +1244,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
|
|||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_map_sys(ch_vm,
|
err = nvgpu_dma_alloc_map_sys(ch_vm,
|
||||||
gpfifo_size * sizeof(struct nvgpu_gpfifo),
|
gpfifo_size * sizeof(struct nvgpu_gpfifo),
|
||||||
&c->gpfifo.mem);
|
&c->gpfifo.mem);
|
||||||
if (err) {
|
if (err) {
|
||||||
@@ -1331,7 +1331,7 @@ clean_up_sync:
|
|||||||
}
|
}
|
||||||
clean_up_unmap:
|
clean_up_unmap:
|
||||||
nvgpu_big_free(g, c->gpfifo.pipe);
|
nvgpu_big_free(g, c->gpfifo.pipe);
|
||||||
gk20a_gmmu_unmap_free(ch_vm, &c->gpfifo.mem);
|
nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem);
|
||||||
clean_up:
|
clean_up:
|
||||||
memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
|
memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
|
||||||
gk20a_err(d, "fail");
|
gk20a_err(d, "fail");
|
||||||
|
|||||||
@@ -143,7 +143,7 @@ static int css_hw_enable_snapshot(struct channel_gk20a *ch,
|
|||||||
if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE)
|
if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE)
|
||||||
snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE;
|
snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE;
|
||||||
|
|
||||||
ret = gk20a_gmmu_alloc_map_sys(&g->mm.pmu.vm, snapshot_size,
|
ret = nvgpu_dma_alloc_map_sys(&g->mm.pmu.vm, snapshot_size,
|
||||||
&data->hw_memdesc);
|
&data->hw_memdesc);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -192,7 +192,7 @@ static int css_hw_enable_snapshot(struct channel_gk20a *ch,
|
|||||||
|
|
||||||
failed_allocation:
|
failed_allocation:
|
||||||
if (data->hw_memdesc.size) {
|
if (data->hw_memdesc.size) {
|
||||||
gk20a_gmmu_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc);
|
nvgpu_dma_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc);
|
||||||
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
|
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
|
||||||
}
|
}
|
||||||
data->hw_snapshot = NULL;
|
data->hw_snapshot = NULL;
|
||||||
@@ -220,7 +220,7 @@ static void css_hw_disable_snapshot(struct gr_gk20a *gr)
|
|||||||
perf_pmasys_mem_block_valid_false_f() |
|
perf_pmasys_mem_block_valid_false_f() |
|
||||||
perf_pmasys_mem_block_target_f(0));
|
perf_pmasys_mem_block_target_f(0));
|
||||||
|
|
||||||
gk20a_gmmu_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc);
|
nvgpu_dma_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc);
|
||||||
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
|
memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
|
||||||
data->hw_snapshot = NULL;
|
data->hw_snapshot = NULL;
|
||||||
|
|
||||||
|
|||||||
@@ -400,7 +400,7 @@ static int gk20a_fecs_trace_alloc_ring(struct gk20a *g)
|
|||||||
{
|
{
|
||||||
struct gk20a_fecs_trace *trace = g->fecs_trace;
|
struct gk20a_fecs_trace *trace = g->fecs_trace;
|
||||||
|
|
||||||
return gk20a_gmmu_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS
|
return nvgpu_dma_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS
|
||||||
* ctxsw_prog_record_timestamp_record_size_in_bytes_v(),
|
* ctxsw_prog_record_timestamp_record_size_in_bytes_v(),
|
||||||
&trace->trace_buf);
|
&trace->trace_buf);
|
||||||
}
|
}
|
||||||
@@ -409,7 +409,7 @@ static void gk20a_fecs_trace_free_ring(struct gk20a *g)
|
|||||||
{
|
{
|
||||||
struct gk20a_fecs_trace *trace = g->fecs_trace;
|
struct gk20a_fecs_trace *trace = g->fecs_trace;
|
||||||
|
|
||||||
gk20a_gmmu_free(g, &trace->trace_buf);
|
nvgpu_dma_free(g, &trace->trace_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
|
|||||||
@@ -483,7 +483,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
|
|||||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||||
runlist = &f->runlist_info[runlist_id];
|
runlist = &f->runlist_info[runlist_id];
|
||||||
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
|
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
|
||||||
gk20a_gmmu_free(g, &runlist->mem[i]);
|
nvgpu_dma_free(g, &runlist->mem[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_kfree(g, runlist->active_channels);
|
nvgpu_kfree(g, runlist->active_channels);
|
||||||
@@ -544,9 +544,9 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
|
|||||||
nvgpu_vfree(g, f->channel);
|
nvgpu_vfree(g, f->channel);
|
||||||
nvgpu_vfree(g, f->tsg);
|
nvgpu_vfree(g, f->tsg);
|
||||||
if (g->ops.mm.is_bar1_supported(g))
|
if (g->ops.mm.is_bar1_supported(g))
|
||||||
gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd);
|
nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd);
|
||||||
else
|
else
|
||||||
gk20a_gmmu_free(g, &f->userd);
|
nvgpu_dma_free(g, &f->userd);
|
||||||
|
|
||||||
gk20a_fifo_delete_runlist(f);
|
gk20a_fifo_delete_runlist(f);
|
||||||
|
|
||||||
@@ -686,7 +686,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
|
|||||||
f->num_runlist_entries, runlist_size);
|
f->num_runlist_entries, runlist_size);
|
||||||
|
|
||||||
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
|
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
|
||||||
int err = gk20a_gmmu_alloc_sys(g, runlist_size,
|
int err = nvgpu_dma_alloc_sys(g, runlist_size,
|
||||||
&runlist->mem[i]);
|
&runlist->mem[i]);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(d, "memory allocation failed\n");
|
dev_err(d, "memory allocation failed\n");
|
||||||
@@ -940,12 +940,12 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
|
|||||||
nvgpu_mutex_init(&f->free_chs_mutex);
|
nvgpu_mutex_init(&f->free_chs_mutex);
|
||||||
|
|
||||||
if (g->ops.mm.is_bar1_supported(g))
|
if (g->ops.mm.is_bar1_supported(g))
|
||||||
err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm,
|
err = nvgpu_dma_alloc_map_sys(&g->mm.bar1.vm,
|
||||||
f->userd_entry_size * f->num_channels,
|
f->userd_entry_size * f->num_channels,
|
||||||
&f->userd);
|
&f->userd);
|
||||||
|
|
||||||
else
|
else
|
||||||
err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size *
|
err = nvgpu_dma_alloc_sys(g, f->userd_entry_size *
|
||||||
f->num_channels, &f->userd);
|
f->num_channels, &f->userd);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(d, "userd memory allocation failed\n");
|
dev_err(d, "userd memory allocation failed\n");
|
||||||
@@ -980,9 +980,9 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
|
|||||||
clean_up:
|
clean_up:
|
||||||
gk20a_dbg_fn("fail");
|
gk20a_dbg_fn("fail");
|
||||||
if (g->ops.mm.is_bar1_supported(g))
|
if (g->ops.mm.is_bar1_supported(g))
|
||||||
gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd);
|
nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd);
|
||||||
else
|
else
|
||||||
gk20a_gmmu_free(g, &f->userd);
|
nvgpu_dma_free(g, &f->userd);
|
||||||
|
|
||||||
nvgpu_vfree(g, f->channel);
|
nvgpu_vfree(g, f->channel);
|
||||||
f->channel = NULL;
|
f->channel = NULL;
|
||||||
|
|||||||
@@ -1938,7 +1938,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
|
|||||||
if (enable_hwpm_ctxsw) {
|
if (enable_hwpm_ctxsw) {
|
||||||
/* Allocate buffer if necessary */
|
/* Allocate buffer if necessary */
|
||||||
if (pm_ctx->mem.gpu_va == 0) {
|
if (pm_ctx->mem.gpu_va == 0) {
|
||||||
ret = gk20a_gmmu_alloc_flags_sys(g,
|
ret = nvgpu_dma_alloc_flags_sys(g,
|
||||||
NVGPU_DMA_NO_KERNEL_MAPPING,
|
NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
g->gr.ctx_vars.pm_ctxsw_image_size,
|
g->gr.ctx_vars.pm_ctxsw_image_size,
|
||||||
&pm_ctx->mem);
|
&pm_ctx->mem);
|
||||||
@@ -1958,7 +1958,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
|
|||||||
if (!pm_ctx->mem.gpu_va) {
|
if (!pm_ctx->mem.gpu_va) {
|
||||||
gk20a_err(dev_from_gk20a(g),
|
gk20a_err(dev_from_gk20a(g),
|
||||||
"failed to map pm ctxt buffer");
|
"failed to map pm ctxt buffer");
|
||||||
gk20a_gmmu_free(g, &pm_ctx->mem);
|
nvgpu_dma_free(g, &pm_ctx->mem);
|
||||||
c->g->ops.fifo.enable_channel(c);
|
c->g->ops.fifo.enable_channel(c);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@@ -2018,7 +2018,7 @@ clean_up_mem:
|
|||||||
cleanup_pm_buf:
|
cleanup_pm_buf:
|
||||||
gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
|
gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size,
|
||||||
gk20a_mem_flag_none);
|
gk20a_mem_flag_none);
|
||||||
gk20a_gmmu_free(g, &pm_ctx->mem);
|
nvgpu_dma_free(g, &pm_ctx->mem);
|
||||||
memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem));
|
memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem));
|
||||||
|
|
||||||
gk20a_enable_channel_tsg(g, c);
|
gk20a_enable_channel_tsg(g, c);
|
||||||
@@ -2318,7 +2318,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
|
|||||||
g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32),
|
g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32),
|
||||||
g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32));
|
g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32));
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_sys(g, ucode_size, &ucode_info->surface_desc);
|
err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc);
|
||||||
if (err)
|
if (err)
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
|
|
||||||
@@ -2350,7 +2350,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
|
|||||||
if (ucode_info->surface_desc.gpu_va)
|
if (ucode_info->surface_desc.gpu_va)
|
||||||
gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va,
|
gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va,
|
||||||
ucode_info->surface_desc.size, gk20a_mem_flag_none);
|
ucode_info->surface_desc.size, gk20a_mem_flag_none);
|
||||||
gk20a_gmmu_free(g, &ucode_info->surface_desc);
|
nvgpu_dma_free(g, &ucode_info->surface_desc);
|
||||||
|
|
||||||
release_firmware(gpccs_fw);
|
release_firmware(gpccs_fw);
|
||||||
gpccs_fw = NULL;
|
gpccs_fw = NULL;
|
||||||
@@ -2700,7 +2700,7 @@ static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
|
|||||||
{
|
{
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return;
|
return;
|
||||||
gk20a_gmmu_free(g, &desc->mem);
|
nvgpu_dma_free(g, &desc->mem);
|
||||||
desc->destroy = NULL;
|
desc->destroy = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2710,7 +2710,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
|
|||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
size, &desc->mem);
|
size, &desc->mem);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
@@ -2953,7 +2953,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
|
|||||||
if (!gr_ctx)
|
if (!gr_ctx)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
gr->ctx_vars.buffer_total_size,
|
gr->ctx_vars.buffer_total_size,
|
||||||
&gr_ctx->mem);
|
&gr_ctx->mem);
|
||||||
if (err)
|
if (err)
|
||||||
@@ -2973,7 +2973,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free_mem:
|
err_free_mem:
|
||||||
gk20a_gmmu_free(g, &gr_ctx->mem);
|
nvgpu_dma_free(g, &gr_ctx->mem);
|
||||||
err_free_ctx:
|
err_free_ctx:
|
||||||
nvgpu_kfree(g, gr_ctx);
|
nvgpu_kfree(g, gr_ctx);
|
||||||
gr_ctx = NULL;
|
gr_ctx = NULL;
|
||||||
@@ -3022,7 +3022,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
|
|||||||
|
|
||||||
gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
|
gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
|
||||||
gr_ctx->mem.size, gk20a_mem_flag_none);
|
gr_ctx->mem.size, gk20a_mem_flag_none);
|
||||||
gk20a_gmmu_free(g, &gr_ctx->mem);
|
nvgpu_dma_free(g, &gr_ctx->mem);
|
||||||
nvgpu_kfree(g, gr_ctx);
|
nvgpu_kfree(g, gr_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3051,7 +3051,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
|
|||||||
|
|
||||||
gk20a_dbg_fn("");
|
gk20a_dbg_fn("");
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING,
|
err = nvgpu_dma_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
128 * sizeof(u32), &patch_ctx->mem);
|
128 * sizeof(u32), &patch_ctx->mem);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
@@ -3071,7 +3071,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c)
|
|||||||
gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va,
|
gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va,
|
||||||
patch_ctx->mem.size, gk20a_mem_flag_none);
|
patch_ctx->mem.size, gk20a_mem_flag_none);
|
||||||
|
|
||||||
gk20a_gmmu_free(g, &patch_ctx->mem);
|
nvgpu_dma_free(g, &patch_ctx->mem);
|
||||||
patch_ctx->data_count = 0;
|
patch_ctx->data_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3086,7 +3086,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c)
|
|||||||
gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va,
|
gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va,
|
||||||
pm_ctx->mem.size, gk20a_mem_flag_none);
|
pm_ctx->mem.size, gk20a_mem_flag_none);
|
||||||
|
|
||||||
gk20a_gmmu_free(g, &pm_ctx->mem);
|
nvgpu_dma_free(g, &pm_ctx->mem);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3366,10 +3366,10 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
|
|||||||
|
|
||||||
gr_gk20a_free_global_ctx_buffers(g);
|
gr_gk20a_free_global_ctx_buffers(g);
|
||||||
|
|
||||||
gk20a_gmmu_free(g, &gr->mmu_wr_mem);
|
nvgpu_dma_free(g, &gr->mmu_wr_mem);
|
||||||
gk20a_gmmu_free(g, &gr->mmu_rd_mem);
|
nvgpu_dma_free(g, &gr->mmu_rd_mem);
|
||||||
|
|
||||||
gk20a_gmmu_free(g, &gr->compbit_store.mem);
|
nvgpu_dma_free(g, &gr->compbit_store.mem);
|
||||||
|
|
||||||
memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc));
|
memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc));
|
||||||
|
|
||||||
@@ -3658,17 +3658,17 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_wr_mem);
|
err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_wr_mem);
|
||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_rd_mem);
|
err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_rd_mem);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_wr_mem;
|
goto err_free_wr_mem;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free_wr_mem:
|
err_free_wr_mem:
|
||||||
gk20a_gmmu_free(g, &gr->mmu_wr_mem);
|
nvgpu_dma_free(g, &gr->mmu_wr_mem);
|
||||||
err:
|
err:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@@ -5215,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!pmu->pg_buf.cpu_va) {
|
if (!pmu->pg_buf.cpu_va) {
|
||||||
err = gk20a_gmmu_alloc_map_sys(vm, size, &pmu->pg_buf);
|
err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(d, "failed to allocate memory\n");
|
gk20a_err(d, "failed to allocate memory\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g,
|
|||||||
{
|
{
|
||||||
struct gr_gk20a *gr = &g->gr;
|
struct gr_gk20a *gr = &g->gr;
|
||||||
|
|
||||||
return gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
|
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
|
||||||
compbit_backing_size,
|
compbit_backing_size,
|
||||||
&gr->compbit_store.mem);
|
&gr->compbit_store.mem);
|
||||||
}
|
}
|
||||||
@@ -80,7 +80,7 @@ static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
|
|||||||
{
|
{
|
||||||
struct gr_gk20a *gr = &g->gr;
|
struct gr_gk20a *gr = &g->gr;
|
||||||
|
|
||||||
return gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
compbit_backing_size,
|
compbit_backing_size,
|
||||||
&gr->compbit_store.mem);
|
&gr->compbit_store.mem);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -509,7 +509,7 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm)
|
|||||||
|
|
||||||
static int gk20a_alloc_sysmem_flush(struct gk20a *g)
|
static int gk20a_alloc_sysmem_flush(struct gk20a *g)
|
||||||
{
|
{
|
||||||
return gk20a_gmmu_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush);
|
return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_GK20A_VIDMEM)
|
#if defined(CONFIG_GK20A_VIDMEM)
|
||||||
@@ -897,9 +897,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
|
|||||||
* default.
|
* default.
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_ARM64))
|
if (IS_ENABLED(CONFIG_ARM64))
|
||||||
err = gk20a_gmmu_alloc(g, len, &entry->mem);
|
err = nvgpu_dma_alloc(g, len, &entry->mem);
|
||||||
else
|
else
|
||||||
err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
|
||||||
len, &entry->mem);
|
len, &entry->mem);
|
||||||
|
|
||||||
|
|
||||||
@@ -929,7 +929,7 @@ void free_gmmu_pages(struct vm_gk20a *vm,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
gk20a_gmmu_free(g, &entry->mem);
|
nvgpu_dma_free(g, &entry->mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
|
int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
|
||||||
@@ -1756,7 +1756,7 @@ static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
|
|||||||
if (buf->dmabuf_priv)
|
if (buf->dmabuf_priv)
|
||||||
buf->dmabuf_priv_delete(buf->dmabuf_priv);
|
buf->dmabuf_priv_delete(buf->dmabuf_priv);
|
||||||
|
|
||||||
gk20a_gmmu_free(buf->g, buf->mem);
|
nvgpu_dma_free(buf->g, buf->mem);
|
||||||
nvgpu_kfree(buf->g, buf);
|
nvgpu_kfree(buf->g, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1873,7 +1873,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
|
|||||||
|
|
||||||
buf->mem->user_mem = true;
|
buf->mem->user_mem = true;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_vid(g, bytes, buf->mem);
|
err = nvgpu_dma_alloc_vid(g, bytes, buf->mem);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_memfree;
|
goto err_memfree;
|
||||||
|
|
||||||
@@ -1896,7 +1896,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
|
|||||||
return fd;
|
return fd;
|
||||||
|
|
||||||
err_bfree:
|
err_bfree:
|
||||||
gk20a_gmmu_free(g, buf->mem);
|
nvgpu_dma_free(g, buf->mem);
|
||||||
err_memfree:
|
err_memfree:
|
||||||
nvgpu_kfree(g, buf->mem);
|
nvgpu_kfree(g, buf->mem);
|
||||||
err_kfree:
|
err_kfree:
|
||||||
@@ -4199,7 +4199,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
|||||||
|
|
||||||
gk20a_dbg_fn("");
|
gk20a_dbg_fn("");
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc(g, ram_in_alloc_size_v(), inst_block);
|
err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(dev, "%s: memory allocation failed\n", __func__);
|
gk20a_err(dev, "%s: memory allocation failed\n", __func__);
|
||||||
return err;
|
return err;
|
||||||
@@ -4212,7 +4212,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
|||||||
void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||||
{
|
{
|
||||||
if (inst_block->size)
|
if (inst_block->size)
|
||||||
gk20a_gmmu_free(g, inst_block);
|
nvgpu_dma_free(g, inst_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block)
|
u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||||
|
|||||||
@@ -3151,7 +3151,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
|
|||||||
pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
|
pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
|
||||||
pmu->desc->descriptor_size);
|
pmu->desc->descriptor_size);
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX,
|
err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX,
|
||||||
&pmu->ucode);
|
&pmu->ucode);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_release_fw;
|
goto err_release_fw;
|
||||||
@@ -3225,7 +3225,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
|
|||||||
|
|
||||||
INIT_WORK(&pmu->pg_init, pmu_setup_hw);
|
INIT_WORK(&pmu->pg_init, pmu_setup_hw);
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
|
err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
|
||||||
&pmu->seq_buf);
|
&pmu->seq_buf);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(d, "failed to allocate memory\n");
|
gk20a_err(d, "failed to allocate memory\n");
|
||||||
@@ -3242,7 +3242,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
|
|||||||
|
|
||||||
pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
|
pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
|
err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
|
||||||
&pmu->trace_buf);
|
&pmu->trace_buf);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(d, "failed to allocate pmu trace buffer\n");
|
gk20a_err(d, "failed to allocate pmu trace buffer\n");
|
||||||
@@ -3255,7 +3255,7 @@ skip_init:
|
|||||||
gk20a_dbg_fn("done");
|
gk20a_dbg_fn("done");
|
||||||
return 0;
|
return 0;
|
||||||
err_free_seq_buf:
|
err_free_seq_buf:
|
||||||
gk20a_gmmu_unmap_free(vm, &pmu->seq_buf);
|
nvgpu_dma_unmap_free(vm, &pmu->seq_buf);
|
||||||
err_free_seq:
|
err_free_seq:
|
||||||
nvgpu_kfree(g, pmu->seq);
|
nvgpu_kfree(g, pmu->seq);
|
||||||
err_free_mutex:
|
err_free_mutex:
|
||||||
@@ -4760,7 +4760,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
|||||||
struct vm_gk20a *vm = &mm->pmu.vm;
|
struct vm_gk20a *vm = &mm->pmu.vm;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_map_vid(vm, size, mem);
|
err = nvgpu_dma_alloc_map_vid(vm, size, mem);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(g->dev, "memory allocation failed");
|
gk20a_err(g->dev, "memory allocation failed");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -4776,7 +4776,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
|||||||
struct vm_gk20a *vm = &mm->pmu.vm;
|
struct vm_gk20a *vm = &mm->pmu.vm;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_map_sys(vm, size, mem);
|
err = nvgpu_dma_alloc_map_sys(vm, size, mem);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(g->dev, "failed to allocate memory\n");
|
gk20a_err(g->dev, "failed to allocate memory\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -4787,7 +4787,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
|
|||||||
|
|
||||||
void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
|
void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||||
{
|
{
|
||||||
gk20a_gmmu_free(g, mem);
|
nvgpu_dma_free(g, mem);
|
||||||
memset(mem, 0, sizeof(struct nvgpu_mem));
|
memset(mem, 0, sizeof(struct nvgpu_mem));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -369,7 +369,7 @@ static int gm20b_alloc_blob_space(struct gk20a *g,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_sys(g, size, mem);
|
err = nvgpu_dma_alloc_sys(g, size, mem);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -1115,7 +1115,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
|
|||||||
err = -1;
|
err = -1;
|
||||||
goto err_release_acr_fw;
|
goto err_release_acr_fw;
|
||||||
}
|
}
|
||||||
err = gk20a_gmmu_alloc_map_sys(vm, img_size_in_bytes,
|
err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
|
||||||
&acr->acr_ucode);
|
&acr->acr_ucode);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@@ -1171,7 +1171,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
err_free_ucode_map:
|
err_free_ucode_map:
|
||||||
gk20a_gmmu_unmap_free(vm, &acr->acr_ucode);
|
nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
|
||||||
err_release_acr_fw:
|
err_release_acr_fw:
|
||||||
release_firmware(acr_fw);
|
release_firmware(acr_fw);
|
||||||
acr->acr_fw = NULL;
|
acr->acr_fw = NULL;
|
||||||
@@ -1417,7 +1417,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
|
|||||||
/*TODO in code verify that enable PMU is done,
|
/*TODO in code verify that enable PMU is done,
|
||||||
scrubbing etc is done*/
|
scrubbing etc is done*/
|
||||||
/*TODO in code verify that gmmu vm init is done*/
|
/*TODO in code verify that gmmu vm init is done*/
|
||||||
err = gk20a_gmmu_alloc_flags_sys(g,
|
err = nvgpu_dma_alloc_flags_sys(g,
|
||||||
NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode);
|
NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode);
|
||||||
if (err) {
|
if (err) {
|
||||||
gk20a_err(d, "failed to allocate memory\n");
|
gk20a_err(d, "failed to allocate memory\n");
|
||||||
@@ -1475,7 +1475,7 @@ err_unmap_bl:
|
|||||||
gk20a_gmmu_unmap(vm, acr->hsbl_ucode.gpu_va,
|
gk20a_gmmu_unmap(vm, acr->hsbl_ucode.gpu_va,
|
||||||
acr->hsbl_ucode.size, gk20a_mem_flag_none);
|
acr->hsbl_ucode.size, gk20a_mem_flag_none);
|
||||||
err_free_ucode:
|
err_free_ucode:
|
||||||
gk20a_gmmu_free(g, &acr->hsbl_ucode);
|
nvgpu_dma_free(g, &acr->hsbl_ucode);
|
||||||
err_done:
|
err_done:
|
||||||
release_firmware(hsbl_fw);
|
release_firmware(hsbl_fw);
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
@@ -113,13 +113,13 @@ static int gp106_alloc_blob_space(struct gk20a *g,
|
|||||||
* Even though this mem_desc wouldn't be used, the wpr region needs to
|
* Even though this mem_desc wouldn't be used, the wpr region needs to
|
||||||
* be reserved in the allocator.
|
* be reserved in the allocator.
|
||||||
*/
|
*/
|
||||||
err = gk20a_gmmu_alloc_flags_vid_at(g,
|
err = nvgpu_dma_alloc_flags_vid_at(g,
|
||||||
NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size,
|
NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size,
|
||||||
&g->acr.wpr_dummy, wpr_inf.wpr_base);
|
&g->acr.wpr_dummy, wpr_inf.wpr_base);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
return gk20a_gmmu_alloc_flags_vid_at(g,
|
return nvgpu_dma_alloc_flags_vid_at(g,
|
||||||
NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem,
|
NVGPU_DMA_NO_KERNEL_MAPPING, wpr_inf.size, mem,
|
||||||
wpr_inf.nonwpr_base);
|
wpr_inf.nonwpr_base);
|
||||||
}
|
}
|
||||||
@@ -1094,7 +1094,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g)
|
|||||||
err = -1;
|
err = -1;
|
||||||
goto err_release_acr_fw;
|
goto err_release_acr_fw;
|
||||||
}
|
}
|
||||||
err = gk20a_gmmu_alloc_map_sys(vm, img_size_in_bytes,
|
err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
|
||||||
&acr->acr_ucode);
|
&acr->acr_ucode);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@@ -1170,7 +1170,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err_free_ucode_map:
|
err_free_ucode_map:
|
||||||
gk20a_gmmu_unmap_free(vm, &acr->acr_ucode);
|
nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
|
||||||
err_release_acr_fw:
|
err_release_acr_fw:
|
||||||
release_firmware(acr_fw);
|
release_firmware(acr_fw);
|
||||||
acr->acr_fw = NULL;
|
acr->acr_fw = NULL;
|
||||||
|
|||||||
@@ -226,11 +226,11 @@ static int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_free_betacb:
|
fail_free_betacb:
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
|
||||||
fail_free_spill:
|
fail_free_spill:
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
|
||||||
fail_free_preempt:
|
fail_free_preempt:
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
|
||||||
fail:
|
fail:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -839,7 +839,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
|
|||||||
|
|
||||||
gk20a_dbg_fn("");
|
gk20a_dbg_fn("");
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_sys(vm->mm->g, size, mem);
|
err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@@ -859,7 +859,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_free:
|
fail_free:
|
||||||
gk20a_gmmu_free(vm->mm->g, mem);
|
nvgpu_dma_free(vm->mm->g, mem);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -980,11 +980,11 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_free_betacb:
|
fail_free_betacb:
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
|
||||||
fail_free_spill:
|
fail_free_spill:
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
|
||||||
fail_free_preempt:
|
fail_free_preempt:
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
|
||||||
fail:
|
fail:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -1098,10 +1098,10 @@ static void gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
if (g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close)
|
if (g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close)
|
||||||
dump_ctx_switch_stats(g, vm, gr_ctx);
|
dump_ctx_switch_stats(g, vm, gr_ctx);
|
||||||
|
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
|
||||||
gr_gk20a_free_gr_ctx(g, vm, gr_ctx);
|
gr_gk20a_free_gr_ctx(g, vm, gr_ctx);
|
||||||
gk20a_dbg_fn("done");
|
gk20a_dbg_fn("done");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
|
|||||||
gk20a_dbg_fn("");
|
gk20a_dbg_fn("");
|
||||||
|
|
||||||
if (!g->mm.bar2_desc.gpu_va) {
|
if (!g->mm.bar2_desc.gpu_va) {
|
||||||
err = gk20a_gmmu_alloc_map_sys(vm, rbfb_size,
|
err = nvgpu_dma_alloc_map_sys(vm, rbfb_size,
|
||||||
&g->mm.bar2_desc);
|
&g->mm.bar2_desc);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(dev_from_gk20a(g),
|
dev_err(dev_from_gk20a(g),
|
||||||
@@ -63,7 +63,7 @@ void gp10b_replayable_pagefault_buffer_deinit(struct gk20a *g)
|
|||||||
{
|
{
|
||||||
struct vm_gk20a *vm = &g->mm.bar2.vm;
|
struct vm_gk20a *vm = &g->mm.bar2.vm;
|
||||||
|
|
||||||
gk20a_gmmu_unmap_free(vm, &g->mm.bar2_desc);
|
nvgpu_dma_unmap_free(vm, &g->mm.bar2_desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)
|
u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ struct vm_gk20a;
|
|||||||
struct nvgpu_mem;
|
struct nvgpu_mem;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags for the below gk20a_gmmu_{alloc,alloc_map}_flags*
|
* Flags for the below nvgpu_dma_{alloc,alloc_map}_flags*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -45,7 +45,7 @@ struct nvgpu_mem;
|
|||||||
#define NVGPU_DMA_READ_ONLY (1 << 2)
|
#define NVGPU_DMA_READ_ONLY (1 << 2)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc - Allocate DMA memory
|
* nvgpu_dma_alloc - Allocate DMA memory
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @size - Size of the allocation in bytes.
|
* @size - Size of the allocation in bytes.
|
||||||
@@ -56,10 +56,10 @@ struct nvgpu_mem;
|
|||||||
* memory can be either placed in VIDMEM or SYSMEM, which ever is more
|
* memory can be either placed in VIDMEM or SYSMEM, which ever is more
|
||||||
* convenient for the driver.
|
* convenient for the driver.
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_flags - Allocate DMA memory
|
* nvgpu_dma_alloc_flags - Allocate DMA memory
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @flags - Flags modifying the operation of the DMA allocation.
|
* @flags - Flags modifying the operation of the DMA allocation.
|
||||||
@@ -77,11 +77,11 @@ int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
|||||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||||
* %NVGPU_DMA_READ_ONLY
|
* %NVGPU_DMA_READ_ONLY
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
||||||
struct nvgpu_mem *mem);
|
struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_sys - Allocate DMA memory
|
* nvgpu_dma_alloc_sys - Allocate DMA memory
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @size - Size of the allocation in bytes.
|
* @size - Size of the allocation in bytes.
|
||||||
@@ -91,10 +91,10 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
|
|||||||
* Returns 0 on success and a suitable error code when there's an error. This
|
* Returns 0 on success and a suitable error code when there's an error. This
|
||||||
* allocates memory specifically in SYSMEM.
|
* allocates memory specifically in SYSMEM.
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_flags_sys - Allocate DMA memory
|
* nvgpu_dma_alloc_flags_sys - Allocate DMA memory
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @flags - Flags modifying the operation of the DMA allocation.
|
* @flags - Flags modifying the operation of the DMA allocation.
|
||||||
@@ -111,11 +111,11 @@ int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
|||||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||||
* %NVGPU_DMA_READ_ONLY
|
* %NVGPU_DMA_READ_ONLY
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem);
|
size_t size, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_vid - Allocate DMA memory
|
* nvgpu_dma_alloc_vid - Allocate DMA memory
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @size - Size of the allocation in bytes.
|
* @size - Size of the allocation in bytes.
|
||||||
@@ -125,10 +125,10 @@ int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
|||||||
* Returns 0 on success and a suitable error code when there's an error. This
|
* Returns 0 on success and a suitable error code when there's an error. This
|
||||||
* allocates memory specifically in VIDMEM.
|
* allocates memory specifically in VIDMEM.
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_flags_vid - Allocate DMA memory
|
* nvgpu_dma_alloc_flags_vid - Allocate DMA memory
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @flags - Flags modifying the operation of the DMA allocation.
|
* @flags - Flags modifying the operation of the DMA allocation.
|
||||||
@@ -144,11 +144,11 @@ int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem);
|
|||||||
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags,
|
int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem);
|
size_t size, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_flags_vid_at - Allocate DMA memory
|
* nvgpu_dma_alloc_flags_vid_at - Allocate DMA memory
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @flags - Flags modifying the operation of the DMA allocation.
|
* @flags - Flags modifying the operation of the DMA allocation.
|
||||||
@@ -165,29 +165,29 @@ int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags,
|
|||||||
*
|
*
|
||||||
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
* %NVGPU_DMA_NO_KERNEL_MAPPING
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
|
int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem, dma_addr_t at);
|
size_t size, struct nvgpu_mem *mem, dma_addr_t at);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_free - Free a DMA allocation
|
* nvgpu_dma_free - Free a DMA allocation
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @mem - An allocation to free.
|
* @mem - An allocation to free.
|
||||||
*
|
*
|
||||||
* Free memory created with any of:
|
* Free memory created with any of:
|
||||||
*
|
*
|
||||||
* gk20a_gmmu_alloc()
|
* nvgpu_dma_alloc()
|
||||||
* gk20a_gmmu_alloc_flags()
|
* nvgpu_dma_alloc_flags()
|
||||||
* gk20a_gmmu_alloc_sys()
|
* nvgpu_dma_alloc_sys()
|
||||||
* gk20a_gmmu_alloc_flags_sys()
|
* nvgpu_dma_alloc_flags_sys()
|
||||||
* gk20a_gmmu_alloc_vid()
|
* nvgpu_dma_alloc_vid()
|
||||||
* gk20a_gmmu_alloc_flags_vid()
|
* nvgpu_dma_alloc_flags_vid()
|
||||||
* gk20a_gmmu_alloc_flags_vid_at()
|
* nvgpu_dma_alloc_flags_vid_at()
|
||||||
*/
|
*/
|
||||||
void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem);
|
void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_map - Allocate DMA memory and map into GMMU.
|
* nvgpu_dma_alloc_map - Allocate DMA memory and map into GMMU.
|
||||||
*
|
*
|
||||||
* @vm - VM context for GMMU mapping.
|
* @vm - VM context for GMMU mapping.
|
||||||
* @size - Size of the allocation in bytes.
|
* @size - Size of the allocation in bytes.
|
||||||
@@ -198,11 +198,11 @@ void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem);
|
|||||||
* either placed in VIDMEM or SYSMEM, which ever is more convenient for the
|
* either placed in VIDMEM or SYSMEM, which ever is more convenient for the
|
||||||
* driver.
|
* driver.
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
|
int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
|
||||||
struct nvgpu_mem *mem);
|
struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_map_flags - Allocate DMA memory and map into GMMU.
|
* nvgpu_dma_alloc_map_flags - Allocate DMA memory and map into GMMU.
|
||||||
*
|
*
|
||||||
* @vm - VM context for GMMU mapping.
|
* @vm - VM context for GMMU mapping.
|
||||||
* @flags - Flags modifying the operation of the DMA allocation.
|
* @flags - Flags modifying the operation of the DMA allocation.
|
||||||
@@ -221,11 +221,11 @@ int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
|
|||||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||||
* %NVGPU_DMA_READ_ONLY
|
* %NVGPU_DMA_READ_ONLY
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem);
|
size_t size, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_map_sys - Allocate DMA memory and map into GMMU.
|
* nvgpu_dma_alloc_map_sys - Allocate DMA memory and map into GMMU.
|
||||||
*
|
*
|
||||||
* @vm - VM context for GMMU mapping.
|
* @vm - VM context for GMMU mapping.
|
||||||
* @size - Size of the allocation in bytes.
|
* @size - Size of the allocation in bytes.
|
||||||
@@ -234,11 +234,11 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
|
|||||||
* Allocate memory suitable for doing DMA and map that memory into the GMMU.
|
* Allocate memory suitable for doing DMA and map that memory into the GMMU.
|
||||||
* This memory will be placed in SYSMEM.
|
* This memory will be placed in SYSMEM.
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
||||||
struct nvgpu_mem *mem);
|
struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_map_flags_sys - Allocate DMA memory and map into GMMU.
|
* nvgpu_dma_alloc_map_flags_sys - Allocate DMA memory and map into GMMU.
|
||||||
*
|
*
|
||||||
* @vm - VM context for GMMU mapping.
|
* @vm - VM context for GMMU mapping.
|
||||||
* @flags - Flags modifying the operation of the DMA allocation.
|
* @flags - Flags modifying the operation of the DMA allocation.
|
||||||
@@ -255,11 +255,11 @@ int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
|
|||||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||||
* %NVGPU_DMA_READ_ONLY
|
* %NVGPU_DMA_READ_ONLY
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem);
|
size_t size, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_map_vid - Allocate DMA memory and map into GMMU.
|
* nvgpu_dma_alloc_map_vid - Allocate DMA memory and map into GMMU.
|
||||||
*
|
*
|
||||||
* @vm - VM context for GMMU mapping.
|
* @vm - VM context for GMMU mapping.
|
||||||
* @size - Size of the allocation in bytes.
|
* @size - Size of the allocation in bytes.
|
||||||
@@ -268,11 +268,11 @@ int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
|
|||||||
* Allocate memory suitable for doing DMA and map that memory into the GMMU.
|
* Allocate memory suitable for doing DMA and map that memory into the GMMU.
|
||||||
* This memory will be placed in VIDMEM.
|
* This memory will be placed in VIDMEM.
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
|
int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
|
||||||
struct nvgpu_mem *mem);
|
struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_alloc_map_flags_vid - Allocate DMA memory and map into GMMU.
|
* nvgpu_dma_alloc_map_flags_vid - Allocate DMA memory and map into GMMU.
|
||||||
*
|
*
|
||||||
* @vm - VM context for GMMU mapping.
|
* @vm - VM context for GMMU mapping.
|
||||||
* @flags - Flags modifying the operation of the DMA allocation.
|
* @flags - Flags modifying the operation of the DMA allocation.
|
||||||
@@ -289,24 +289,24 @@ int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
|
|||||||
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
* %NVGPU_DMA_FORCE_CONTIGUOUS
|
||||||
* %NVGPU_DMA_READ_ONLY
|
* %NVGPU_DMA_READ_ONLY
|
||||||
*/
|
*/
|
||||||
int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
|
int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
|
||||||
size_t size, struct nvgpu_mem *mem);
|
size_t size, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gk20a_gmmu_unmap_free - Free a DMA allocation
|
* nvgpu_dma_unmap_free - Free a DMA allocation
|
||||||
*
|
*
|
||||||
* @g - The GPU.
|
* @g - The GPU.
|
||||||
* @mem - An allocation to free.
|
* @mem - An allocation to free.
|
||||||
*
|
*
|
||||||
* Free memory created with any of:
|
* Free memory created with any of:
|
||||||
*
|
*
|
||||||
* gk20a_gmmu_alloc_map()
|
* nvgpu_dma_alloc_map()
|
||||||
* gk20a_gmmu_alloc_map_flags()
|
* nvgpu_dma_alloc_map_flags()
|
||||||
* gk20a_gmmu_alloc_map_sys()
|
* nvgpu_dma_alloc_map_sys()
|
||||||
* gk20a_gmmu_alloc_map_flags_sys()
|
* nvgpu_dma_alloc_map_flags_sys()
|
||||||
* gk20a_gmmu_alloc_map_vid()
|
* nvgpu_dma_alloc_map_vid()
|
||||||
* gk20a_gmmu_alloc_map_flags_vid()
|
* nvgpu_dma_alloc_map_flags_vid()
|
||||||
*/
|
*/
|
||||||
void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem);
|
void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -216,7 +216,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
|
|||||||
|
|
||||||
runlist_size = sizeof(u16) * f->num_channels;
|
runlist_size = sizeof(u16) * f->num_channels;
|
||||||
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
|
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
|
||||||
int err = gk20a_gmmu_alloc_sys(g, runlist_size,
|
int err = nvgpu_dma_alloc_sys(g, runlist_size,
|
||||||
&runlist->mem[i]);
|
&runlist->mem[i]);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(d, "memory allocation failed\n");
|
dev_err(d, "memory allocation failed\n");
|
||||||
@@ -260,7 +260,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
|
|||||||
|
|
||||||
f->userd_entry_size = 1 << ram_userd_base_shift_v();
|
f->userd_entry_size = 1 << ram_userd_base_shift_v();
|
||||||
|
|
||||||
err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size * f->num_channels,
|
err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * f->num_channels,
|
||||||
&f->userd);
|
&f->userd);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(d, "memory allocation failed\n");
|
dev_err(d, "memory allocation failed\n");
|
||||||
@@ -327,7 +327,7 @@ static int vgpu_init_fifo_setup_sw(struct gk20a *g)
|
|||||||
clean_up:
|
clean_up:
|
||||||
gk20a_dbg_fn("fail");
|
gk20a_dbg_fn("fail");
|
||||||
/* FIXME: unmap from bar1 */
|
/* FIXME: unmap from bar1 */
|
||||||
gk20a_gmmu_free(g, &f->userd);
|
nvgpu_dma_free(g, &f->userd);
|
||||||
|
|
||||||
memset(&f->userd, 0, sizeof(f->userd));
|
memset(&f->userd, 0, sizeof(f->userd));
|
||||||
|
|
||||||
|
|||||||
@@ -42,10 +42,10 @@ static void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
|||||||
gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size,
|
gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size,
|
||||||
gmmu_page_size_kernel);
|
gmmu_page_size_kernel);
|
||||||
|
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer);
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer);
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer);
|
||||||
gk20a_gmmu_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
|
nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer);
|
||||||
|
|
||||||
nvgpu_kfree(g, gr_ctx);
|
nvgpu_kfree(g, gr_ctx);
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user