gpu: nvgpu: rename gk20a_locked_gmmu_map() and move to gmmu.h

Rename the two native GPU GMMU map/unmap functions and update the
HAL initializations to reflect this:

  gk20a_locked_gmmu_map   -> nvgpu_gmmu_map_locked
  gk20a_locked_gmmu_unmap -> nvgpu_gmmu_unmap_locked

This matches what other units do for handling vGPU "HAL" indirection.

Also move the function declarations to <nvgpu/gmmu.h> since these are
shared among all non-vGPU chips. But since these are still technically
HAL operations they should never be called directly. This is a bit of
an organixational issue that I have not thought through hwo to solve
yet.

Ideally they would go into a "hal/mm/gmmu/" include somewhere, but
that A) doesn't yet exist, and B) those are chip specific; these
functions are native specific. Ugh.

JIRA NVGPU-2042

Change-Id: Ibc614f2928630d12eafcec6ce73019628b44ad94
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2099692
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2019-04-15 12:19:46 -07:00
committed by mobile promotions
parent 0561bace40
commit 32eea0988c
12 changed files with 48 additions and 48 deletions

View File

@@ -750,7 +750,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
} }
/** /**
* gk20a_locked_gmmu_map - Map a buffer into the GMMU * nvgpu_gmmu_map_locked - Map a buffer into the GMMU
* *
* This is for non-vGPU chips. It's part of the HAL at the moment but really * This is for non-vGPU chips. It's part of the HAL at the moment but really
* should not be. Chip specific stuff is handled at the PTE/PDE programming * should not be. Chip specific stuff is handled at the PTE/PDE programming
@@ -761,7 +761,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
* mm.gmmu_lock() HAL. So before calling the mm.gmmu_lock() HAL make sure you * mm.gmmu_lock() HAL. So before calling the mm.gmmu_lock() HAL make sure you
* have the update_gmmu_lock aquired. * have the update_gmmu_lock aquired.
*/ */
u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm,
u64 vaddr, u64 vaddr,
struct nvgpu_sgt *sgt, struct nvgpu_sgt *sgt,
u64 buffer_offset, u64 buffer_offset,
@@ -847,7 +847,7 @@ fail_alloc:
return 0; return 0;
} }
void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm,
u64 vaddr, u64 vaddr,
u64 size, u64 size,
u32 pgsz_idx, u32 pgsz_idx,

View File

@@ -30,8 +30,6 @@
#include <nvgpu/rbtree.h> #include <nvgpu/rbtree.h>
#include <nvgpu/kref.h> #include <nvgpu/kref.h>
enum gk20a_mem_rw_flag;
struct compbit_store_desc { struct compbit_store_desc {
struct nvgpu_mem mem; struct nvgpu_mem mem;
@@ -83,31 +81,6 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
u32 big_page_size); u32 big_page_size);
int gk20a_init_mm_setup_hw(struct gk20a *g); int gk20a_init_mm_setup_hw(struct gk20a *g);
u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
u64 vaddr,
struct nvgpu_sgt *sgt,
u64 buffer_offset,
u64 size,
u32 pgsz_idx,
u8 kind_v,
u32 ctag_offset,
u32 flags,
enum gk20a_mem_rw_flag rw_flag,
bool clear_ctags,
bool sparse,
bool priv,
struct vm_gk20a_mapping_batch *batch,
enum nvgpu_aperture aperture);
void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
u64 vaddr,
u64 size,
u32 pgsz_idx,
bool va_allocated,
enum gk20a_mem_rw_flag rw_flag,
bool sparse,
struct vm_gk20a_mapping_batch *batch);
/* vm-as interface */ /* vm-as interface */
struct nvgpu_as_alloc_space_args; struct nvgpu_as_alloc_space_args;
struct nvgpu_as_free_space_args; struct nvgpu_as_free_space_args;

View File

@@ -808,8 +808,8 @@ static const struct gpu_ops gm20b_ops = {
.is_fw_defined = gm20b_netlist_is_firmware_defined, .is_fw_defined = gm20b_netlist_is_firmware_defined,
}, },
.mm = { .mm = {
.gmmu_map = gk20a_locked_gmmu_map, .gmmu_map = nvgpu_gmmu_map_locked,
.gmmu_unmap = gk20a_locked_gmmu_unmap, .gmmu_unmap = nvgpu_gmmu_unmap_locked,
.vm_bind_channel = gk20a_vm_bind_channel, .vm_bind_channel = gk20a_vm_bind_channel,
.get_big_page_sizes = gm20b_mm_get_big_page_sizes, .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
.get_default_big_page_size = gm20b_mm_get_default_big_page_size, .get_default_big_page_size = gm20b_mm_get_default_big_page_size,

View File

@@ -909,8 +909,8 @@ static const struct gpu_ops gp10b_ops = {
.is_fw_defined = gp10b_netlist_is_firmware_defined, .is_fw_defined = gp10b_netlist_is_firmware_defined,
}, },
.mm = { .mm = {
.gmmu_map = gk20a_locked_gmmu_map, .gmmu_map = nvgpu_gmmu_map_locked,
.gmmu_unmap = gk20a_locked_gmmu_unmap, .gmmu_unmap = nvgpu_gmmu_unmap_locked,
.vm_bind_channel = gk20a_vm_bind_channel, .vm_bind_channel = gk20a_vm_bind_channel,
.get_big_page_sizes = gm20b_mm_get_big_page_sizes, .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
.get_default_big_page_size = gp10b_mm_get_default_big_page_size, .get_default_big_page_size = gp10b_mm_get_default_big_page_size,

View File

@@ -1092,8 +1092,8 @@ static const struct gpu_ops gv100_ops = {
.is_fw_defined = gv100_netlist_is_firmware_defined, .is_fw_defined = gv100_netlist_is_firmware_defined,
}, },
.mm = { .mm = {
.gmmu_map = gk20a_locked_gmmu_map, .gmmu_map = nvgpu_gmmu_map_locked,
.gmmu_unmap = gk20a_locked_gmmu_unmap, .gmmu_unmap = nvgpu_gmmu_unmap_locked,
.vm_bind_channel = gk20a_vm_bind_channel, .vm_bind_channel = gk20a_vm_bind_channel,
.get_big_page_sizes = gm20b_mm_get_big_page_sizes, .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
.get_default_big_page_size = gp10b_mm_get_default_big_page_size, .get_default_big_page_size = gp10b_mm_get_default_big_page_size,

View File

@@ -1068,8 +1068,8 @@ static const struct gpu_ops gv11b_ops = {
.is_fw_defined = gv11b_netlist_is_firmware_defined, .is_fw_defined = gv11b_netlist_is_firmware_defined,
}, },
.mm = { .mm = {
.gmmu_map = gk20a_locked_gmmu_map, .gmmu_map = nvgpu_gmmu_map_locked,
.gmmu_unmap = gk20a_locked_gmmu_unmap, .gmmu_unmap = nvgpu_gmmu_unmap_locked,
.vm_bind_channel = gk20a_vm_bind_channel, .vm_bind_channel = gk20a_vm_bind_channel,
.get_big_page_sizes = gm20b_mm_get_big_page_sizes, .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
.get_default_big_page_size = gp10b_mm_get_default_big_page_size, .get_default_big_page_size = gp10b_mm_get_default_big_page_size,

View File

@@ -38,6 +38,7 @@
struct vm_gk20a; struct vm_gk20a;
struct nvgpu_mem; struct nvgpu_mem;
struct nvgpu_gmmu_pd; struct nvgpu_gmmu_pd;
struct vm_gk20a_mapping_batch;
#define GMMU_PAGE_SIZE_SMALL 0U #define GMMU_PAGE_SIZE_SMALL 0U
#define GMMU_PAGE_SIZE_BIG 1U #define GMMU_PAGE_SIZE_BIG 1U
@@ -213,6 +214,32 @@ int __nvgpu_get_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte);
*/ */
int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte); int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte);
/*
* Native GPU "HAL" functions.
*/
u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm,
u64 vaddr,
struct nvgpu_sgt *sgt,
u64 buffer_offset,
u64 size,
u32 pgsz_idx,
u8 kind_v,
u32 ctag_offset,
u32 flags,
enum gk20a_mem_rw_flag rw_flag,
bool clear_ctags,
bool sparse,
bool priv,
struct vm_gk20a_mapping_batch *batch,
enum nvgpu_aperture aperture);
void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm,
u64 vaddr,
u64 size,
u32 pgsz_idx,
bool va_allocated,
enum gk20a_mem_rw_flag rw_flag,
bool sparse,
struct vm_gk20a_mapping_batch *batch);
/* /*
* Internal debugging routines. Probably not something you want to use. * Internal debugging routines. Probably not something you want to use.

View File

@@ -28,8 +28,8 @@ gk20a_alloc_inst_block
gk20a_bus_set_bar0_window gk20a_bus_set_bar0_window
gk20a_runlist_get_ch_entry gk20a_runlist_get_ch_entry
gk20a_runlist_get_tsg_entry gk20a_runlist_get_tsg_entry
gk20a_locked_gmmu_map nvgpu_gmmu_map_locked
gk20a_locked_gmmu_unmap nvgpu_gmmu_unmap_locked
gk20a_ramin_alloc_size gk20a_ramin_alloc_size
gk20a_mm_fb_flush gk20a_mm_fb_flush
gm20b_fb_tlb_invalidate gm20b_fb_tlb_invalidate

View File

@@ -1130,8 +1130,8 @@ static const struct gpu_ops tu104_ops = {
.is_fw_defined = tu104_netlist_is_firmware_defined, .is_fw_defined = tu104_netlist_is_firmware_defined,
}, },
.mm = { .mm = {
.gmmu_map = gk20a_locked_gmmu_map, .gmmu_map = nvgpu_gmmu_map_locked,
.gmmu_unmap = gk20a_locked_gmmu_unmap, .gmmu_unmap = nvgpu_gmmu_unmap_locked,
.vm_bind_channel = gk20a_vm_bind_channel, .vm_bind_channel = gk20a_vm_bind_channel,
.get_big_page_sizes = gm20b_mm_get_big_page_sizes, .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
.get_default_big_page_size = gp10b_mm_get_default_big_page_size, .get_default_big_page_size = gp10b_mm_get_default_big_page_size,

View File

@@ -301,8 +301,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels; g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.alloc_inst_block = gk20a_alloc_inst_block; g->ops.mm.alloc_inst_block = gk20a_alloc_inst_block;
g->ops.mm.init_inst_block = gv11b_init_inst_block; g->ops.mm.init_inst_block = gv11b_init_inst_block;
g->ops.mm.gmmu_map = gk20a_locked_gmmu_map; g->ops.mm.gmmu_map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu_unmap = gk20a_locked_gmmu_unmap; g->ops.mm.gmmu_unmap = nvgpu_gmmu_unmap_locked;
g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr; g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr;
g->ops.mm.is_bar1_supported = gv11b_mm_is_bar1_supported; g->ops.mm.is_bar1_supported = gv11b_mm_is_bar1_supported;
g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush; g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush;

View File

@@ -122,8 +122,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels; g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.alloc_inst_block = gk20a_alloc_inst_block; g->ops.mm.alloc_inst_block = gk20a_alloc_inst_block;
g->ops.mm.init_inst_block = gv11b_init_inst_block; g->ops.mm.init_inst_block = gv11b_init_inst_block;
g->ops.mm.gmmu_map = gk20a_locked_gmmu_map; g->ops.mm.gmmu_map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu_unmap = gk20a_locked_gmmu_unmap; g->ops.mm.gmmu_unmap = nvgpu_gmmu_unmap_locked;
g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr; g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr;
g->ops.fb.compression_page_size = gp10b_fb_compression_page_size; g->ops.fb.compression_page_size = gp10b_fb_compression_page_size;
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate; g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;

View File

@@ -136,8 +136,8 @@ static int init_test_env(struct unit_module *m, struct gk20a *g)
g->ops.mm.get_default_big_page_size = g->ops.mm.get_default_big_page_size =
gp10b_mm_get_default_big_page_size; gp10b_mm_get_default_big_page_size;
g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels; g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels;
g->ops.mm.gmmu_map = gk20a_locked_gmmu_map; g->ops.mm.gmmu_map = nvgpu_gmmu_map_locked;
g->ops.mm.gmmu_unmap = gk20a_locked_gmmu_unmap; g->ops.mm.gmmu_unmap = nvgpu_gmmu_unmap_locked;
g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr; g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr;
g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush; g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush;
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush; g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;