gpu: nvgpu: Remove support for legacy mapping

Make NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL mandatory for all map
IOCTLs. We'll clean up the legacy kernel code in subsequent patches.

Remove support for NVGPU_AS_IOCTL_MAP_BUFFER. It has been superseded
by NVGPU_AS_IOCTL_MAP_BUFFER_EX.

Remove legacy definitions to nvgpu_map_buffer_args and the related
flags, and update the in-kernel map calls accordingly by switching to
the newer definitions.

Bug 1902982

Change-Id: Ie9a7f02b8d5d0ec7c3722c4481afab6d39b4fbd0
Signed-off-by: Sami Kiminki <skiminki@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1560932
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sami Kiminki
2017-11-02 12:34:57 +02:00
committed by mobile promotions
parent 02d281d077
commit c22a5af913
8 changed files with 36 additions and 96 deletions

View File

@@ -1279,7 +1279,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
/* map backing store to gpu virtual space */
vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem,
g->gr.compbit_store.mem.size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_read_only,
false,
gr->compbit_store.mem.aperture);

View File

@@ -88,8 +88,8 @@ static int gk20a_as_ioctl_map_buffer_ex(
compressible_kind = args->compr_kind;
incompressible_kind = args->incompr_kind;
} else {
compressible_kind = args->kind;
incompressible_kind = NV_KIND_INVALID;
/* unsupported, direct kind control must be used */
return -EINVAL;
}
return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
@@ -100,19 +100,6 @@ static int gk20a_as_ioctl_map_buffer_ex(
NULL);
}
static int gk20a_as_ioctl_map_buffer(
struct gk20a_as_share *as_share,
struct nvgpu_as_map_buffer_args *args)
{
gk20a_dbg_fn("");
return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
&args->o_a.offset,
args->flags, NV_KIND_DEFAULT,
NV_KIND_DEFAULT,
0, 0, NULL);
/* args->o_a.offset will be set if !err */
}
static int gk20a_as_ioctl_unmap_buffer(
struct gk20a_as_share *as_share,
struct nvgpu_as_unmap_buffer_args *args)
@@ -187,8 +174,9 @@ static int gk20a_as_ioctl_map_buffer_batch(
compressible_kind = map_args.compr_kind;
incompressible_kind = map_args.incompr_kind;
} else {
compressible_kind = map_args.kind;
incompressible_kind = NV_KIND_INVALID;
/* direct kind control must be used */
err = -EINVAL;
break;
}
err = nvgpu_vm_map_buffer(
@@ -348,11 +336,6 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
err = gk20a_as_ioctl_free_space(as_share,
(struct nvgpu_as_free_space_args *)buf);
break;
case NVGPU_AS_IOCTL_MAP_BUFFER:
trace_gk20a_as_ioctl_map_buffer(g->name);
err = gk20a_as_ioctl_map_buffer(as_share,
(struct nvgpu_as_map_buffer_args *)buf);
break;
case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
trace_gk20a_as_ioctl_map_buffer(g->name);
err = gk20a_as_ioctl_map_buffer_ex(as_share,

View File

@@ -680,7 +680,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
.pgsz = pgsz_idx,
.kind_v = kind_v,
.ctag = (u64)ctag_offset * (u64)ctag_granularity,
.cacheable = flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
.cacheable = flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
.rw_flag = rw_flag,
.sparse = sparse,
.priv = priv,

View File

@@ -1738,7 +1738,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm,
&pm_ctx->mem,
pm_ctx->mem.size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none, true,
pm_ctx->mem.aperture);
if (!pm_ctx->mem.gpu_va) {
@@ -2633,7 +2633,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none, true, mem->aperture);
if (!gpu_va)
goto clean_up;
@@ -2651,7 +2651,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none, false, mem->aperture);
if (!gpu_va)
goto clean_up;
@@ -2669,7 +2669,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none, true, mem->aperture);
if (!gpu_va)
goto clean_up;
@@ -2736,7 +2736,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm,
&gr_ctx->mem,
gr_ctx->mem.size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE,
0, /* not GPU-cacheable */
gk20a_mem_flag_none, true,
gr_ctx->mem.aperture);
if (!gr_ctx->mem.gpu_va)

View File

@@ -913,7 +913,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
mem->gpu_va = nvgpu_gmmu_map(vm,
mem,
mem->aligned_size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
gk20a_mem_flag_none,
false,
mem->aperture);

View File

@@ -169,8 +169,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
p->pgsz_idx = pgsz_idx;
p->iova = 0;
p->kind = kind_v;
p->cacheable =
(flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE) ? 1 : 0;
p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
p->prot = prot;
p->ctag_offset = ctag_offset;
p->clear_ctags = clear_ctags;

View File

@@ -153,8 +153,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
p->pgsz_idx = pgsz_idx;
p->iova = mapping ? 1 : 0;
p->kind = kind_v;
p->cacheable =
(flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE) ? 1 : 0;
p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
p->prot = prot;
p->ctag_offset = ctag_offset;
p->clear_ctags = clear_ctags;