diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index 5670c8483..61850b3b1 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -37,7 +37,7 @@ #include -#define __gmmu_dbg(g, attrs, fmt, args...) \ +#define nvgpu_gmmu_dbg(g, attrs, fmt, args...) \ do { \ if ((attrs)->debug) { \ nvgpu_info(g, fmt, ##args); \ @@ -46,7 +46,7 @@ } \ } while (false) -#define __gmmu_dbg_v(g, attrs, fmt, args...) \ +#define nvgpu_gmmu_dbg_v(g, attrs, fmt, args...) \ do { \ if ((attrs)->debug) { \ nvgpu_info(g, fmt, ##args); \ @@ -66,14 +66,14 @@ static u32 pd_size(const struct gk20a_mmu_level *l, * VA will be allocated for you. If addr is non-zero then the buffer will be * mapped at @addr. */ -static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm, - struct nvgpu_mem *mem, - u64 addr, - u64 size, - u32 flags, - enum gk20a_mem_rw_flag rw_flag, - bool priv, - enum nvgpu_aperture aperture) +static u64 nvgpu_gmmu_map_core(struct vm_gk20a *vm, + struct nvgpu_mem *mem, + u64 addr, + u64 size, + u32 flags, + enum gk20a_mem_rw_flag rw_flag, + bool priv, + enum nvgpu_aperture aperture) { struct gk20a *g = gk20a_from_vm(vm); u64 vaddr; @@ -134,8 +134,8 @@ u64 nvgpu_gmmu_map(struct vm_gk20a *vm, bool priv, enum nvgpu_aperture aperture) { - return __nvgpu_gmmu_map(vm, mem, 0, size, flags, rw_flag, priv, - aperture); + return nvgpu_gmmu_map_core(vm, mem, 0, size, flags, rw_flag, priv, + aperture); } /* @@ -150,8 +150,8 @@ u64 nvgpu_gmmu_map_fixed(struct vm_gk20a *vm, bool priv, enum nvgpu_aperture aperture) { - return __nvgpu_gmmu_map(vm, mem, addr, size, flags, rw_flag, priv, - aperture); + return nvgpu_gmmu_map_core(vm, mem, addr, size, flags, rw_flag, priv, + aperture); } void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va) @@ -371,12 +371,12 @@ static int pd_allocate_children(struct vm_gk20a *vm, * phys_addr will always point to a contiguous range - the discontiguous nature * of DMA buffers is taken care of at the layer above this. */ -static int __set_pd_level(struct vm_gk20a *vm, - struct nvgpu_gmmu_pd *pd, - int lvl, - u64 phys_addr, - u64 virt_addr, u64 length, - struct nvgpu_gmmu_attrs *attrs) +static int nvgpu_set_pd_level(struct vm_gk20a *vm, + struct nvgpu_gmmu_pd *pd, + int lvl, + u64 phys_addr, + u64 virt_addr, u64 length, + struct nvgpu_gmmu_attrs *attrs) { int err = 0; u64 pde_range; @@ -390,7 +390,7 @@ static int __set_pd_level(struct vm_gk20a *vm, * offsets into the page table debugging code which makes it easier to * see what level prints are from. */ - static const char *__lvl_debug[] = { + static const char *lvl_debug[] = { "", /* L=0 */ " ", /* L=1 */ " ", /* L=2 */ @@ -400,13 +400,13 @@ static int __set_pd_level(struct vm_gk20a *vm, pde_range = 1ULL << (u64)l->lo_bit[attrs->pgsz]; - __gmmu_dbg_v(g, attrs, - "L=%d %sGPU virt %#-12llx +%#-9llx -> phys %#-12llx", - lvl, - __lvl_debug[lvl], - virt_addr, - length, - phys_addr); + nvgpu_gmmu_dbg_v(g, attrs, + "L=%d %sGPU virt %#-12llx +%#-9llx -> phys %#-12llx", + lvl, + lvl_debug[lvl], + virt_addr, + length, + phys_addr); /* * Iterate across the mapping in chunks the size of this level's PDE. @@ -468,12 +468,12 @@ static int __set_pd_level(struct vm_gk20a *vm, attrs); if (next_l->update_entry != NULL) { - err = __set_pd_level(vm, next_pd, - lvl + 1, - phys_addr, - virt_addr, - chunk_size, - attrs); + err = nvgpu_set_pd_level(vm, next_pd, + lvl + 1, + phys_addr, + virt_addr, + chunk_size, + attrs); if (err != 0) { return err; @@ -494,17 +494,18 @@ static int __set_pd_level(struct vm_gk20a *vm, length -= chunk_size; } - __gmmu_dbg_v(g, attrs, "L=%d %s%s", lvl, __lvl_debug[lvl], "ret!"); + nvgpu_gmmu_dbg_v(g, attrs, "L=%d %s%s", lvl, lvl_debug[lvl], + "ret!"); return 0; } -static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, - struct nvgpu_sgt *sgt, - u64 space_to_skip, - u64 virt_addr, - u64 length, - struct nvgpu_gmmu_attrs *attrs) +static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, + struct nvgpu_sgt *sgt, + u64 space_to_skip, + u64 virt_addr, + u64 length, + struct nvgpu_gmmu_attrs *attrs) { struct gk20a *g = gk20a_from_vm(vm); struct nvgpu_sgl *sgl; @@ -515,11 +516,11 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, * This is considered an unmap. Just pass in 0 as the physical * address for the entire GPU range. */ - err = __set_pd_level(vm, &vm->pdb, - 0, - 0, - virt_addr, length, - attrs); + err = nvgpu_set_pd_level(vm, &vm->pdb, + 0, + 0, + virt_addr, length, + attrs); return err; } @@ -546,12 +547,12 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, io_addr += space_to_skip; - err = __set_pd_level(vm, &vm->pdb, - 0, - io_addr, - virt_addr, - length, - attrs); + err = nvgpu_set_pd_level(vm, &vm->pdb, + 0, + io_addr, + virt_addr, + length, + attrs); return err; } @@ -636,7 +637,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, mapped_sgl_length = min(length, sgl_contiguous_length - space_to_skip); - err = __set_pd_level(vm, &vm->pdb, + err = nvgpu_set_pd_level(vm, &vm->pdb, 0, phys_addr, virt_addr, @@ -684,12 +685,12 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, * [*] Note: the "physical" address may actually be an IO virtual address in the * case of SMMU usage. */ -static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, - struct nvgpu_sgt *sgt, - u64 space_to_skip, - u64 virt_addr, - u64 length, - struct nvgpu_gmmu_attrs *attrs) +static int nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, + struct nvgpu_sgt *sgt, + u64 space_to_skip, + u64 virt_addr, + u64 length, + struct nvgpu_gmmu_attrs *attrs) { struct gk20a *g = gk20a_from_vm(vm); u32 page_size; @@ -712,37 +713,38 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, */ length = nvgpu_align_map_length(vm, length, attrs); - __gmmu_dbg(g, attrs, - "vm=%s " - "%-5s GPU virt %#-12llx +%#-9llx phys %#-12llx " - "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | " - "kind=%#02x APT=%-6s %c%c%c%c%c", - vm->name, - (sgt != NULL) ? "MAP" : "UNMAP", - virt_addr, - length, - (sgt != NULL) ? nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0ULL, - space_to_skip, - page_size >> 10, - nvgpu_gmmu_perm_str(attrs->rw_flag), - attrs->kind_v, - nvgpu_aperture_str(g, attrs->aperture), - attrs->cacheable ? 'C' : '-', - attrs->sparse ? 'S' : '-', - attrs->priv ? 'P' : '-', - attrs->valid ? 'V' : '-', - attrs->platform_atomic ? 'A' : '-'); + nvgpu_gmmu_dbg(g, attrs, + "vm=%s " + "%-5s GPU virt %#-12llx +%#-9llx phys %#-12llx " + "phys offset: %#-4llx; pgsz: %3dkb perm=%-2s | " + "kind=%#02x APT=%-6s %c%c%c%c%c", + vm->name, + (sgt != NULL) ? "MAP" : "UNMAP", + virt_addr, + length, + (sgt != NULL) ? + nvgpu_sgt_get_phys(g, sgt, sgt->sgl) : 0ULL, + space_to_skip, + page_size >> 10, + nvgpu_gmmu_perm_str(attrs->rw_flag), + attrs->kind_v, + nvgpu_aperture_str(g, attrs->aperture), + attrs->cacheable ? 'C' : '-', + attrs->sparse ? 'S' : '-', + attrs->priv ? 'P' : '-', + attrs->valid ? 'V' : '-', + attrs->platform_atomic ? 'A' : '-'); - err = __nvgpu_gmmu_do_update_page_table(vm, - sgt, - space_to_skip, - virt_addr, - length, - attrs); + err = nvgpu_gmmu_do_update_page_table(vm, + sgt, + space_to_skip, + virt_addr, + length, + attrs); nvgpu_mb(); - __gmmu_dbg(g, attrs, "%-5s Done!", + nvgpu_gmmu_dbg(g, attrs, "%-5s Done!", (sgt != NULL) ? "MAP" : "UNMAP"); return err; @@ -818,8 +820,8 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm, allocated = true; } - err = __nvgpu_gmmu_update_page_table(vm, sgt, buffer_offset, - vaddr, size, &attrs); + err = nvgpu_gmmu_update_page_table(vm, sgt, buffer_offset, + vaddr, size, &attrs); if (err != 0) { nvgpu_err(g, "failed to update ptes on map"); goto fail_validate; @@ -874,8 +876,8 @@ void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm, } /* unmap here needs to know the page size we assigned at mapping */ - err = __nvgpu_gmmu_update_page_table(vm, NULL, 0, - vaddr, size, &attrs); + err = nvgpu_gmmu_update_page_table(vm, NULL, 0, + vaddr, size, &attrs); if (err != 0) { nvgpu_err(g, "failed to update gmmu ptes on unmap"); } @@ -899,7 +901,7 @@ void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm, } } -u32 __nvgpu_pte_words(struct gk20a *g) +u32 nvgpu_pte_words(struct gk20a *g) { const struct gk20a_mmu_level *l = g->ops.mm.gmmu.get_mmu_levels(g, SZ_64K); @@ -924,13 +926,13 @@ u32 __nvgpu_pte_words(struct gk20a *g) /* * Recursively walk the pages tables to find the PTE. */ -static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, - struct nvgpu_gmmu_pd *pd, - u64 vaddr, int lvl, - struct nvgpu_gmmu_attrs *attrs, - u32 *data, - struct nvgpu_gmmu_pd **pd_out, u32 *pd_idx_out, - u32 *pd_offs_out) +static int nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, + struct nvgpu_gmmu_pd *pd, + u64 vaddr, int lvl, + struct nvgpu_gmmu_attrs *attrs, + u32 *data, + struct nvgpu_gmmu_pd **pd_out, u32 *pd_idx_out, + u32 *pd_offs_out) { const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl]; const struct gk20a_mmu_level *next_l = &vm->mmu_levels[lvl + 1]; @@ -957,10 +959,10 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, return -EINVAL; } - return __nvgpu_locate_pte(g, vm, pd_next, - vaddr, lvl + 1, attrs, - data, pd_out, pd_idx_out, - pd_offs_out); + return nvgpu_locate_pte(g, vm, pd_next, + vaddr, lvl + 1, attrs, + data, pd_out, pd_idx_out, + pd_offs_out); } if (pd->mem == NULL) { @@ -996,18 +998,18 @@ static int __nvgpu_locate_pte(struct gk20a *g, struct vm_gk20a *vm, return 0; } -int __nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) +int nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) { struct nvgpu_gmmu_attrs attrs = { .pgsz = 0, }; - return __nvgpu_locate_pte(g, vm, &vm->pdb, - vaddr, 0, &attrs, - pte, NULL, NULL, NULL); + return nvgpu_locate_pte(g, vm, &vm->pdb, + vaddr, 0, &attrs, + pte, NULL, NULL, NULL); } -int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) +int nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) { struct nvgpu_gmmu_pd *pd; u32 pd_idx, pd_offs, pte_size, i; @@ -1017,14 +1019,14 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) }; struct nvgpu_gmmu_attrs *attrs_ptr = &attrs; - err = __nvgpu_locate_pte(g, vm, &vm->pdb, - vaddr, 0, &attrs, - NULL, &pd, &pd_idx, &pd_offs); + err = nvgpu_locate_pte(g, vm, &vm->pdb, + vaddr, 0, &attrs, + NULL, &pd, &pd_idx, &pd_offs); if (err != 0) { return err; } - pte_size = __nvgpu_pte_words(g); + pte_size = nvgpu_pte_words(g); for (i = 0; i < pte_size; i++) { nvgpu_pd_write(g, pd, (size_t)pd_offs + (size_t)i, pte[i]); diff --git a/drivers/gpu/nvgpu/hal/mm/mmu_fault/mmu_fault_gv11b.c b/drivers/gpu/nvgpu/hal/mm/mmu_fault/mmu_fault_gv11b.c index f277c1e3d..e08769f45 100644 --- a/drivers/gpu/nvgpu/hal/mm/mmu_fault/mmu_fault_gv11b.c +++ b/drivers/gpu/nvgpu/hal/mm/mmu_fault/mmu_fault_gv11b.c @@ -551,7 +551,7 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g, return -EINVAL; } - err = __nvgpu_get_pte(g, + err = nvgpu_get_pte(g, mmufault->refch->vm, mmufault->fault_addr, &pte[0]); if (err != 0) { nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte not found"); @@ -578,7 +578,7 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g, nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "new pte: %#08x %#08x", pte[1], pte[0]); - err = __nvgpu_set_pte(g, + err = nvgpu_set_pte(g, mmufault->refch->vm, mmufault->fault_addr, &pte[0]); if (err != 0) { nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte not fixed"); @@ -587,7 +587,7 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g, /* invalidate tlb so that GMMU does not use old cached translation */ g->ops.fb.tlb_invalidate(g, mmufault->refch->vm->pdb.mem); - err = __nvgpu_get_pte(g, + err = nvgpu_get_pte(g, mmufault->refch->vm, mmufault->fault_addr, &pte[0]); nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte after tlb invalidate: %#08x %#08x", diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h index 6af666b93..d93ee6d7a 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h @@ -165,16 +165,16 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm, u64 gpu_va); /** - * __nvgpu_pte_words - Compute number of words in a PTE. + * nvgpu_pte_words - Compute number of words in a PTE. * * @g - The GPU. * * This computes and returns the size of a PTE for the passed chip. */ -u32 __nvgpu_pte_words(struct gk20a *g); +u32 nvgpu_pte_words(struct gk20a *g); /** - * __nvgpu_get_pte - Get the contents of a PTE by virtual address + * nvgpu_get_pte - Get the contents of a PTE by virtual address * * @g - The GPU. * @vm - VM to look in. @@ -184,17 +184,17 @@ u32 __nvgpu_pte_words(struct gk20a *g); * Find a PTE in the passed VM based on the passed GPU virtual address. This * will @pte with a copy of the contents of the PTE. @pte must be an array of * u32s large enough to contain the PTE. This can be computed using - * __nvgpu_pte_words(). + * nvgpu_pte_words(). * * If you wish to write to this PTE then you may modify @pte and then use the - * __nvgpu_set_pte(). + * nvgpu_set_pte(). * * This function returns 0 if the PTE is found and -EINVAL otherwise. */ -int __nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte); +int nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte); /** - * __nvgpu_set_pte - Set a PTE based on virtual address + * nvgpu_set_pte - Set a PTE based on virtual address * * @g - The GPU. * @vm - VM to look in. @@ -208,11 +208,11 @@ int __nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte); * the mapping (for instance changing invalid to valid). * * @pte must contain at least the required words for the PTE. See - * __nvgpu_pte_words(). + * nvgpu_pte_words(). * * This function returns 0 on success and -EINVAL otherwise. */ -int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte); +int nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte); /* * Native GPU "HAL" functions. diff --git a/drivers/gpu/nvgpu/libnvgpu-drv.export b/drivers/gpu/nvgpu/libnvgpu-drv.export index ebf3f934b..e5addf29b 100644 --- a/drivers/gpu/nvgpu/libnvgpu-drv.export +++ b/drivers/gpu/nvgpu/libnvgpu-drv.export @@ -1,10 +1,8 @@ # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. -__nvgpu_get_pte __nvgpu_log_dbg __nvgpu_log_msg __nvgpu_readl -__nvgpu_set_pte bitmap_clear bitmap_find_next_zero_area_off bitmap_set @@ -73,6 +71,7 @@ nvgpu_dma_free nvgpu_free nvgpu_free_enabled_flags nvgpu_free_fixed +nvgpu_get_pte nvgpu_gmmu_init_page_table nvgpu_gmmu_map nvgpu_gmmu_map_fixed @@ -159,6 +158,7 @@ nvgpu_runlist_construct_locked nvgpu_rwsem_init nvgpu_tsg_default_timeslice_us nvgpu_set_enabled +nvgpu_set_pte nvgpu_sgt_alignment nvgpu_sgt_create_from_mem nvgpu_sgt_free diff --git a/userspace/units/mm/gmmu/page_table/page_table.c b/userspace/units/mm/gmmu/page_table/page_table.c index 8448016de..3c70c2897 100644 --- a/userspace/units/mm/gmmu/page_table/page_table.c +++ b/userspace/units/mm/gmmu/page_table/page_table.c @@ -476,7 +476,7 @@ static int test_nvgpu_gmmu_map_unmap(struct unit_module *m, * Based on the VA returned from gmmu_map, lookup the corresponding * PTE */ - result = __nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); + result = nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); if (result != 0) { unit_return_fail(m, "PTE lookup failed with code=%d\n", result); } @@ -532,7 +532,7 @@ static int test_nvgpu_gmmu_map_unmap(struct unit_module *m, /* Now unmap the buffer and make sure the PTE is now invalid */ nvgpu_gmmu_unmap(g->mm.pmu.vm, &mem, mem.gpu_va); - result = __nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); + result = nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); if (result != 0) { unit_return_fail(m, "PTE lookup failed with code=%d\n", result); } @@ -636,7 +636,7 @@ static int test_nvgpu_gmmu_init_page_table_fail(struct unit_module *m, /* * Test: test_nvgpu_gmmu_set_pte - * This test targets the __nvgpu_set_pte() function by mapping a buffer, and + * This test targets the nvgpu_set_pte() function by mapping a buffer, and * then trying to alter the validity bit of the corresponding PTE. */ static int test_nvgpu_gmmu_set_pte(struct unit_module *m, @@ -659,7 +659,7 @@ static int test_nvgpu_gmmu_set_pte(struct unit_module *m, unit_return_fail(m, "Failed to map GMMU page"); } - result = __nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); + result = nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); if (result != 0) { unit_return_fail(m, "PTE lookup failed with code=%d\n", result); } @@ -668,19 +668,19 @@ static int test_nvgpu_gmmu_set_pte(struct unit_module *m, pte[0] &= ~(gmmu_new_pte_valid_true_f()); /* Test error case where the VA is not mapped */ - result = __nvgpu_set_pte(g, g->mm.pmu.vm, TEST_INVALID_ADDRESS, + result = nvgpu_set_pte(g, g->mm.pmu.vm, TEST_INVALID_ADDRESS, &pte[0]); if (result == 0) { unit_return_fail(m, "Set PTE succeeded with invalid VA\n"); } /* Now rewrite PTE of the already mapped page */ - result = __nvgpu_set_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); + result = nvgpu_set_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); if (result != 0) { unit_return_fail(m, "Set PTE failed with code=%d\n", result); } - result = __nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); + result = nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]); if (result != 0) { unit_return_fail(m, "PTE lookup failed with code=%d\n", result); } @@ -919,7 +919,7 @@ static int check_pte_valid(struct unit_module *m, struct gk20a *g, u32 pte[2]; int result; - result = __nvgpu_get_pte(g, vm, mem->gpu_va, &pte[0]); + result = nvgpu_get_pte(g, vm, mem->gpu_va, &pte[0]); if (result != 0) { unit_return_fail(m, "PTE lookup failed with code=%d\n", result); } @@ -939,7 +939,7 @@ static int check_pte_invalidated(struct unit_module *m, struct gk20a *g, u32 pte[2]; int result; - result = __nvgpu_get_pte(g, vm, mem->gpu_va, &pte[0]); + result = nvgpu_get_pte(g, vm, mem->gpu_va, &pte[0]); if (result != 0) { unit_return_fail(m, "PTE lookup failed with code=%d\n", result); } diff --git a/userspace/units/mm/vm/vm.c b/userspace/units/mm/vm/vm.c index fd5019946..c472fe5ad 100644 --- a/userspace/units/mm/vm/vm.c +++ b/userspace/units/mm/vm/vm.c @@ -262,7 +262,7 @@ static int map_buffer(struct unit_module *m, /* * Based on the virtual address returned, lookup the corresponding PTE */ - ret = __nvgpu_get_pte(g, vm, mapped_buf->addr, pte); + ret = nvgpu_get_pte(g, vm, mapped_buf->addr, pte); if (ret != 0) { unit_err(m, "PTE lookup failed\n"); ret = UNIT_FAIL;