gpu: nvgpu: Remove support for legacy mapping

Make NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL mandatory for all map
IOCTLs. We'll clean up the legacy kernel code in subsequent patches.

Remove support for NVGPU_AS_IOCTL_MAP_BUFFER. It has been superseded
by NVGPU_AS_IOCTL_MAP_BUFFER_EX.

Remove legacy definitions to nvgpu_map_buffer_args and the related
flags, and update the in-kernel map calls accordingly by switching to
the newer definitions.

Bug 1902982

Change-Id: Ie9a7f02b8d5d0ec7c3722c4481afab6d39b4fbd0
Signed-off-by: Sami Kiminki <skiminki@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1560932
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sami Kiminki
2017-11-02 12:34:57 +02:00
committed by mobile promotions
parent 02d281d077
commit c22a5af913
8 changed files with 36 additions and 96 deletions

View File

@@ -1279,7 +1279,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
/* map backing store to gpu virtual space */
vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem,
g->gr.compbit_store.mem.size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_read_only,
false,
gr->compbit_store.mem.aperture);

View File

@@ -88,8 +88,8 @@ static int gk20a_as_ioctl_map_buffer_ex(
compressible_kind = args->compr_kind;
incompressible_kind = args->incompr_kind;
} else {
compressible_kind = args->kind;
incompressible_kind = NV_KIND_INVALID;
/* unsupported, direct kind control must be used */
return -EINVAL;
}
return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
@@ -100,19 +100,6 @@ static int gk20a_as_ioctl_map_buffer_ex(
NULL);
}
static int gk20a_as_ioctl_map_buffer(
struct gk20a_as_share *as_share,
struct nvgpu_as_map_buffer_args *args)
{
gk20a_dbg_fn("");
return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
&args->o_a.offset,
args->flags, NV_KIND_DEFAULT,
NV_KIND_DEFAULT,
0, 0, NULL);
/* args->o_a.offset will be set if !err */
}
static int gk20a_as_ioctl_unmap_buffer(
struct gk20a_as_share *as_share,
struct nvgpu_as_unmap_buffer_args *args)
@@ -187,8 +174,9 @@ static int gk20a_as_ioctl_map_buffer_batch(
compressible_kind = map_args.compr_kind;
incompressible_kind = map_args.incompr_kind;
} else {
compressible_kind = map_args.kind;
incompressible_kind = NV_KIND_INVALID;
/* direct kind control must be used */
err = -EINVAL;
break;
}
err = nvgpu_vm_map_buffer(
@@ -348,11 +336,6 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
err = gk20a_as_ioctl_free_space(as_share,
(struct nvgpu_as_free_space_args *)buf);
break;
case NVGPU_AS_IOCTL_MAP_BUFFER:
trace_gk20a_as_ioctl_map_buffer(g->name);
err = gk20a_as_ioctl_map_buffer(as_share,
(struct nvgpu_as_map_buffer_args *)buf);
break;
case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
trace_gk20a_as_ioctl_map_buffer(g->name);
err = gk20a_as_ioctl_map_buffer_ex(as_share,

View File

@@ -680,7 +680,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
.pgsz = pgsz_idx,
.kind_v = kind_v,
.ctag = (u64)ctag_offset * (u64)ctag_granularity,
.cacheable = flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
.cacheable = flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
.rw_flag = rw_flag,
.sparse = sparse,
.priv = priv,

View File

@@ -1738,7 +1738,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm,
&pm_ctx->mem,
pm_ctx->mem.size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none, true,
pm_ctx->mem.aperture);
if (!pm_ctx->mem.gpu_va) {
@@ -2633,7 +2633,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none, true, mem->aperture);
if (!gpu_va)
goto clean_up;
@@ -2651,7 +2651,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none, false, mem->aperture);
if (!gpu_va)
goto clean_up;
@@ -2669,7 +2669,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none, true, mem->aperture);
if (!gpu_va)
goto clean_up;
@@ -2736,7 +2736,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm,
&gr_ctx->mem,
gr_ctx->mem.size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE,
0, /* not GPU-cacheable */
gk20a_mem_flag_none, true,
gr_ctx->mem.aperture);
if (!gr_ctx->mem.gpu_va)

View File

@@ -913,7 +913,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
mem->gpu_va = nvgpu_gmmu_map(vm,
mem,
mem->aligned_size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none,
false,
mem->aperture);

View File

@@ -169,8 +169,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
p->pgsz_idx = pgsz_idx;
p->iova = 0;
p->kind = kind_v;
p->cacheable =
(flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE) ? 1 : 0;
p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
p->prot = prot;
p->ctag_offset = ctag_offset;
p->clear_ctags = clear_ctags;

View File

@@ -153,8 +153,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
p->pgsz_idx = pgsz_idx;
p->iova = mapping ? 1 : 0;
p->kind = kind_v;
p->cacheable =
(flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE) ? 1 : 0;
p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
p->prot = prot;
p->ctag_offset = ctag_offset;
p->clear_ctags = clear_ctags;

View File

@@ -1461,27 +1461,6 @@ struct nvgpu_submit_gpfifo_args {
struct nvgpu_fence fence;
};
struct nvgpu_map_buffer_args {
__u32 flags;
#define NVGPU_MAP_BUFFER_FLAGS_ALIGN 0x0
#define NVGPU_MAP_BUFFER_FLAGS_OFFSET (1 << 0)
#define NVGPU_MAP_BUFFER_FLAGS_KIND_PITCH 0x0
#define NVGPU_MAP_BUFFER_FLAGS_KIND_SPECIFIED (1 << 1)
#define NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE 0x0
#define NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE (1 << 2)
__u32 nvmap_handle;
union {
__u64 offset; /* valid if _offset flag given (in|out) */
__u64 align; /* alignment multiple (0:={1 or n/a}) */
} offset_alignment;
__u32 kind;
#define NVGPU_MAP_BUFFER_KIND_GENERIC_16BX2 0xfe
};
struct nvgpu_unmap_buffer_args {
__u64 offset;
};
struct nvgpu_wait_args {
#define NVGPU_WAIT_TYPE_NOTIFIER 0x0
#define NVGPU_WAIT_TYPE_SEMAPHORE 0x1
@@ -1789,22 +1768,12 @@ struct nvgpu_as_bind_channel_args {
* chosen will be returned back to the caller in the 'page_size' parameter in
* that case.
*/
struct nvgpu_as_map_buffer_args {
__u32 flags; /* in/out */
#define NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET (1 << 0)
#define NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE (1 << 2)
#define NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT (1 << 4)
#define NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE (1 << 5)
#define NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS (1 << 6)
#define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8)
__u32 reserved; /* in */
__u32 dmabuf_fd; /* in */
__u32 page_size; /* inout, 0:= best fit to buffer */
union {
__u64 offset; /* inout, byte address valid iff _FIXED_OFFSET */
__u64 align; /* in, alignment multiple (0:={1 or n/a}) */
} o_a;
};
/*
* Mapping dmabuf fds into an address space:
@@ -1816,19 +1785,10 @@ struct nvgpu_as_map_buffer_args {
* returned back to the caller in the 'page_size' parameter in that case.
*/
struct nvgpu_as_map_buffer_ex_args {
/* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL must be set */
__u32 flags; /* in/out */
#define NV_KIND_DEFAULT -1
union {
/*
* Used if NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL
* is not set.
*/
__s32 kind; /* in (-1 represents default) */
/*
* If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
* set, this is used, instead. The rules are:
*
* - If both compr_kind and incompr_kind are set
* (i.e., value is other than NV_KIND_INVALID),
* kernel attempts to use compr_kind first.
@@ -1837,18 +1797,17 @@ struct nvgpu_as_map_buffer_ex_args {
* comptags for the buffer. If successful,
* compr_kind is used as the PTE kind.
*
* - If incompr_kind is set, kernel uses incompr_kind
* as the PTE kind. Comptags are not allocated.
* - If incompr_kind is set, kernel uses incompr_kind as the
* PTE kind, if compr_kind cannot be used. Comptags are not
* allocated.
*
* - If neither compr_kind or incompr_kind is set, the
* map call will fail.
*/
#define NV_KIND_INVALID -1
struct {
__s16 compr_kind;
__s16 incompr_kind;
};
};
__u32 dmabuf_fd; /* in */
__u32 page_size; /* inout, 0:= best fit to buffer */
@@ -1975,7 +1934,7 @@ struct nvgpu_as_get_va_regions_args {
};
struct nvgpu_as_map_buffer_batch_args {
__u64 unmaps; /* ptr to array of nvgpu_unmap_buffer_args */
__u64 unmaps; /* ptr to array of nvgpu_as_unmap_buffer_args */
__u64 maps; /* ptr to array of nvgpu_as_map_buffer_ex_args */
__u32 num_unmaps; /* in: number of unmaps
* out: on error, number of successful unmaps */