gpu: nvgpu: vgpu: remove vgpu_locked_gmmu_map()

The function is not used anymore.

Change-Id: Iad99811e2d356362d16b961464729f5169c36f28
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1649937
Reviewed-by: Aingara Paramakuru <aparamakuru@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Nirav Patel <nipatel@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Richard Zhao
2018-01-31 15:19:26 -08:00
committed by mobile promotions
parent c45462288d
commit 2ac0c35245
3 changed files with 0 additions and 101 deletions

View File

@@ -80,91 +80,6 @@ int vgpu_init_mm_support(struct gk20a *g)
return err;
}
u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
u64 map_offset,
struct nvgpu_sgt *sgt,
u64 buffer_offset,
u64 size,
int pgsz_idx,
u8 kind_v,
u32 ctag_offset,
u32 flags,
int rw_flag,
bool clear_ctags,
bool sparse,
bool priv,
struct vm_gk20a_mapping_batch *batch,
enum nvgpu_aperture aperture)
{
int err = 0;
struct device *d = dev_from_vm(vm);
struct gk20a *g = gk20a_from_vm(vm);
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(d);
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_as_map_params *p = &msg.params.as_map;
u64 addr = nvgpu_sgt_get_gpu_addr(g, sgt, sgt->sgl, NULL);
u8 prot;
gk20a_dbg_fn("");
/* Allocate (or validate when map_offset != 0) the virtual address. */
if (!map_offset) {
map_offset = __nvgpu_vm_alloc_va(vm, size,
pgsz_idx);
if (!map_offset) {
nvgpu_err(g, "failed to allocate va space");
err = -ENOMEM;
goto fail;
}
}
if (rw_flag == gk20a_mem_flag_read_only)
prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
else if (rw_flag == gk20a_mem_flag_write_only)
prot = TEGRA_VGPU_MAP_PROT_WRITE_ONLY;
else
prot = TEGRA_VGPU_MAP_PROT_NONE;
msg.cmd = TEGRA_VGPU_CMD_AS_MAP;
msg.handle = vgpu_get_handle(g);
p->handle = vm->handle;
p->addr = addr;
p->gpu_va = map_offset;
p->size = size;
if (pgsz_idx == gmmu_page_size_kernel) {
u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
if (page_size == vm->gmmu_page_sizes[gmmu_page_size_small]) {
pgsz_idx = gmmu_page_size_small;
} else if (page_size ==
vm->gmmu_page_sizes[gmmu_page_size_big]) {
pgsz_idx = gmmu_page_size_big;
} else {
nvgpu_err(g, "invalid kernel page size %d",
page_size);
goto fail;
}
}
p->pgsz_idx = pgsz_idx;
p->iova = mapping ? 1 : 0;
p->kind = kind_v;
p->cacheable = (flags & NVGPU_VM_MAP_CACHEABLE) ? 1 : 0;
p->prot = prot;
p->ctag_offset = ctag_offset;
p->clear_ctags = clear_ctags;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
err = err ? err : msg.ret;
if (err)
goto fail;
/* TLB invalidate handled on server side */
return map_offset;
fail:
nvgpu_err(g, "%s: failed with err=%d", __func__, err);
return 0;
}
void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
u64 vaddr,
u64 size,

View File

@@ -17,21 +17,6 @@
#ifndef _MM_VGPU_H_
#define _MM_VGPU_H_
u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
u64 map_offset,
struct nvgpu_sgt *sgt,
u64 buffer_offset,
u64 size,
int pgsz_idx,
u8 kind_v,
u32 ctag_offset,
u32 flags,
int rw_flag,
bool clear_ctags,
bool sparse,
bool priv,
struct vm_gk20a_mapping_batch *batch,
enum nvgpu_aperture aperture);
void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
u64 vaddr,
u64 size,

View File

@@ -50,7 +50,6 @@ enum {
TEGRA_VGPU_CMD_AS_ALLOC_SHARE = 7,
TEGRA_VGPU_CMD_AS_BIND_SHARE = 8,
TEGRA_VGPU_CMD_AS_FREE_SHARE = 9,
TEGRA_VGPU_CMD_AS_MAP = 10,
TEGRA_VGPU_CMD_AS_UNMAP = 11,
TEGRA_VGPU_CMD_CHANNEL_BIND = 13,
TEGRA_VGPU_CMD_CHANNEL_UNBIND = 14,