mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: Add translation for NVGPU MM flags
Add a translation layer to convert from the NVGPU_AS_* flags to to new set of NVGPU_VM_MAP_* and NVGPU_VM_AREA_ALLOC_* flags. This allows the common MM code to not depend on the UAPI header defined for Linux. In addition to this change a couple of other small changes were made: 1. Deprecate, print a warning, and ignore usage of the NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS flag. 2. Move the t19x IO coherence flag from the t19x UAPI header to the regular UAPI header. JIRA NVGPU-293 Change-Id: I146402b0e8617294374e63e78f8826c57cd3b291 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599802 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
b42fb7ba26
commit
35ae4194a0
@@ -1052,9 +1052,9 @@ __releases(&l->cde_app->mutex)
|
||||
/* map the destination buffer */
|
||||
get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */
|
||||
err = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0,
|
||||
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE |
|
||||
NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL,
|
||||
NV_KIND_INVALID,
|
||||
NVGPU_VM_MAP_CACHEABLE |
|
||||
NVGPU_VM_MAP_DIRECT_KIND_CTRL,
|
||||
NVGPU_KIND_INVALID,
|
||||
compbits_kind, /* incompressible kind */
|
||||
gk20a_mem_flag_none,
|
||||
map_offset, map_size,
|
||||
@@ -1284,7 +1284,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
|
||||
/* map backing store to gpu virtual space */
|
||||
vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem,
|
||||
g->gr.compbit_store.mem.size,
|
||||
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
|
||||
NVGPU_VM_MAP_CACHEABLE,
|
||||
gk20a_mem_flag_read_only,
|
||||
false,
|
||||
gr->compbit_store.mem.aperture);
|
||||
|
||||
@@ -32,6 +32,18 @@
|
||||
#include "ioctl_as.h"
|
||||
#include "os_linux.h"
|
||||
|
||||
static u32 gk20a_as_translate_linux_flags(struct gk20a *g, u32 flags)
|
||||
{
|
||||
u32 core_flags = 0;
|
||||
|
||||
if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
|
||||
core_flags |= NVGPU_VM_AREA_ALLOC_FIXED_OFFSET;
|
||||
if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE)
|
||||
core_flags |= NVGPU_VM_AREA_ALLOC_SPARSE;
|
||||
|
||||
return core_flags;
|
||||
}
|
||||
|
||||
static int gk20a_as_ioctl_bind_channel(
|
||||
struct gk20a_as_share *as_share,
|
||||
struct nvgpu_as_bind_channel_args *args)
|
||||
@@ -62,9 +74,13 @@ static int gk20a_as_ioctl_alloc_space(
|
||||
struct gk20a_as_share *as_share,
|
||||
struct nvgpu_as_alloc_space_args *args)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_vm(as_share->vm);
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size,
|
||||
&args->o_a.offset, args->flags);
|
||||
&args->o_a.offset,
|
||||
gk20a_as_translate_linux_flags(g,
|
||||
args->flags));
|
||||
}
|
||||
|
||||
static int gk20a_as_ioctl_free_space(
|
||||
|
||||
@@ -165,7 +165,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
p->pgsz_idx = pgsz_idx;
|
||||
p->iova = 0;
|
||||
p->kind = kind_v;
|
||||
p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
|
||||
p->cacheable = (flags & NVGPU_VM_MAP_CACHEABLE) ? 1 : 0;
|
||||
p->prot = prot;
|
||||
p->ctag_offset = ctag_offset;
|
||||
p->clear_ctags = clear_ctags;
|
||||
|
||||
@@ -148,7 +148,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
p->pgsz_idx = pgsz_idx;
|
||||
p->iova = mapping ? 1 : 0;
|
||||
p->kind = kind_v;
|
||||
p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
|
||||
p->cacheable = (flags & NVGPU_VM_MAP_CACHEABLE) ? 1 : 0;
|
||||
p->prot = prot;
|
||||
p->ctag_offset = ctag_offset;
|
||||
p->clear_ctags = clear_ctags;
|
||||
|
||||
@@ -37,6 +37,30 @@
|
||||
#include "os_linux.h"
|
||||
#include "dmabuf.h"
|
||||
|
||||
static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
|
||||
{
|
||||
u32 core_flags = 0;
|
||||
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)
|
||||
core_flags |= NVGPU_VM_MAP_FIXED_OFFSET;
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE)
|
||||
core_flags |= NVGPU_VM_MAP_CACHEABLE;
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT)
|
||||
core_flags |= NVGPU_VM_MAP_IO_COHERENT;
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE)
|
||||
core_flags |= NVGPU_VM_MAP_UNMAPPED_PTE;
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC)
|
||||
core_flags |= NVGPU_VM_MAP_L3_ALLOC;
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)
|
||||
core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL;
|
||||
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS)
|
||||
nvgpu_warn(g, "Ignoring deprecated flag: "
|
||||
"NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS");
|
||||
|
||||
return core_flags;
|
||||
}
|
||||
|
||||
static struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_reverse(
|
||||
struct vm_gk20a *vm, struct dma_buf *dmabuf, u32 kind)
|
||||
{
|
||||
@@ -102,7 +126,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
struct nvgpu_mapped_buf *mapped_buffer = NULL;
|
||||
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
|
||||
if (flags & NVGPU_VM_MAP_FIXED_OFFSET) {
|
||||
mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, map_addr);
|
||||
if (!mapped_buffer)
|
||||
return NULL;
|
||||
@@ -167,7 +191,7 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
|
||||
u64 map_addr = 0ULL;
|
||||
int err = 0;
|
||||
|
||||
if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)
|
||||
if (flags & NVGPU_VM_MAP_FIXED_OFFSET)
|
||||
map_addr = offset_align;
|
||||
|
||||
sgt = gk20a_mm_pin(dev, dmabuf);
|
||||
@@ -229,15 +253,16 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
||||
u64 mapping_size,
|
||||
struct vm_gk20a_mapping_batch *batch)
|
||||
{
|
||||
int err = 0;
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
struct dma_buf *dmabuf;
|
||||
u64 ret_va;
|
||||
int err = 0;
|
||||
|
||||
/* get ref to the mem handle (released on unmap_locked) */
|
||||
dmabuf = dma_buf_get(dmabuf_fd);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
nvgpu_warn(gk20a_from_vm(vm), "%s: fd %d is not a dmabuf",
|
||||
__func__, dmabuf_fd);
|
||||
nvgpu_warn(g, "%s: fd %d is not a dmabuf",
|
||||
__func__, dmabuf_fd);
|
||||
return PTR_ERR(dmabuf);
|
||||
}
|
||||
|
||||
@@ -250,9 +275,9 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
||||
*/
|
||||
if ((mapping_size > dmabuf->size) ||
|
||||
(buffer_offset > (dmabuf->size - mapping_size))) {
|
||||
nvgpu_err(gk20a_from_vm(vm),
|
||||
"buf size %llx < (offset(%llx) + map_size(%llx))\n",
|
||||
(u64)dmabuf->size, buffer_offset, mapping_size);
|
||||
nvgpu_err(g,
|
||||
"buf size %llx < (offset(%llx) + map_size(%llx))\n",
|
||||
(u64)dmabuf->size, buffer_offset, mapping_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -263,7 +288,8 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
|
||||
}
|
||||
|
||||
err = nvgpu_vm_map_linux(vm, dmabuf, *offset_align,
|
||||
flags, compr_kind, incompr_kind,
|
||||
nvgpu_vm_translate_linux_flags(g, flags),
|
||||
compr_kind, incompr_kind,
|
||||
gk20a_mem_flag_none,
|
||||
buffer_offset,
|
||||
mapping_size,
|
||||
|
||||
Reference in New Issue
Block a user