mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: Create hal/mm/gmmu and move gk20a GMMU code
Make a hal/mm/gmmu sub-unit for the GMMU HAL code. Also move the gk20a specific HAL code there. gp10b will happen in the next patch. This change also updates all the GMMU related HAL usage, of which there is quite a bit. Generally the only change is a .gmmu needs to be inserted into the HAL path. Each HAL init was also updated. JIRA NVGPU-2042 Change-Id: I6c46bdfddb8e021f56103d9457fb3e2a226f8947 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2099693 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
a9b2a01001
commit
efbe371fd5
@@ -68,9 +68,9 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
|
||||
/* Initialize vm */
|
||||
|
||||
/* Minimum HALs for vm_init */
|
||||
g->ops.mm.get_default_big_page_size =
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
gp10b_mm_get_default_big_page_size;
|
||||
g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
|
||||
/* Minimum HAL init for PRAMIN */
|
||||
g->ops.bus.set_bar0_window = gk20a_bus_set_bar0_window;
|
||||
@@ -90,7 +90,7 @@ static struct vm_gk20a *init_vm_env(struct unit_module *m, struct gk20a *g,
|
||||
flags |= GPU_ALLOC_GVA_SPACE;
|
||||
|
||||
/* Init vm with big_pages disabled */
|
||||
test_vm = nvgpu_vm_init(g, g->ops.mm.get_default_big_page_size(),
|
||||
test_vm = nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
|
||||
@@ -296,14 +296,14 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
|
||||
p->mm_is_iommuable = true;
|
||||
|
||||
g->ops.mm.get_default_big_page_size =
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
gp10b_mm_get_default_big_page_size;
|
||||
g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.alloc_inst_block = gk20a_alloc_inst_block;
|
||||
g->ops.mm.init_inst_block = gv11b_init_inst_block;
|
||||
g->ops.mm.gmmu_map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu_unmap = nvgpu_gmmu_unmap_locked;
|
||||
g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr;
|
||||
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
|
||||
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
|
||||
g->ops.mm.is_bar1_supported = gv11b_mm_is_bar1_supported;
|
||||
g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush;
|
||||
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
|
||||
@@ -329,7 +329,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
mm->channel.kernel_size = NV_MM_DEFAULT_KERNEL_SIZE;
|
||||
|
||||
|
||||
mm->pmu.vm = nvgpu_vm_init(g, g->ops.mm.get_default_big_page_size(),
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
@@ -729,7 +730,7 @@ static struct nvgpu_sgt *custom_sgt_create(struct unit_module *m,
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function to wrap calls to g->ops.mm.gmmu_map and thus giving
|
||||
* Helper function to wrap calls to g->ops.mm.gmmu.map and thus giving
|
||||
* access to more parameters
|
||||
*/
|
||||
static u64 gmmu_map_advanced(struct unit_module *m, struct gk20a *g,
|
||||
@@ -755,7 +756,7 @@ static u64 gmmu_map_advanced(struct unit_module *m, struct gk20a *g,
|
||||
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
|
||||
vaddr = g->ops.mm.gmmu_map(vm, (u64) mem->cpu_va,
|
||||
vaddr = g->ops.mm.gmmu.map(vm, (u64) mem->cpu_va,
|
||||
sgt,
|
||||
offset,
|
||||
mem->size,
|
||||
@@ -775,7 +776,7 @@ static u64 gmmu_map_advanced(struct unit_module *m, struct gk20a *g,
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function to wrap calls to g->ops.mm.gmmu_unmap and thus giving
|
||||
* Helper function to wrap calls to g->ops.mm.gmmu.unmap and thus giving
|
||||
* access to more parameters
|
||||
*/
|
||||
static void gmmu_unmap_advanced(struct vm_gk20a *vm, struct nvgpu_mem *mem,
|
||||
@@ -786,7 +787,7 @@ static void gmmu_unmap_advanced(struct vm_gk20a *vm, struct nvgpu_mem *mem,
|
||||
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
|
||||
g->ops.mm.gmmu_unmap(vm,
|
||||
g->ops.mm.gmmu.unmap(vm,
|
||||
gpu_va,
|
||||
mem->size,
|
||||
params->page_size,
|
||||
@@ -973,9 +974,9 @@ static struct vm_gk20a *init_test_req_vm(struct gk20a *g)
|
||||
/* 1.4. Have a 4GB kernel reserved space */
|
||||
kernel_reserved = 4 * SZ_1G;
|
||||
|
||||
return nvgpu_vm_init(g, g->ops.mm.get_default_big_page_size(), low_hole,
|
||||
kernel_reserved - low_hole, aperture_size, big_pages,
|
||||
true, true, "testmem");
|
||||
return nvgpu_vm_init(g, g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole, kernel_reserved - low_hole,
|
||||
aperture_size, big_pages, true, true, "testmem");
|
||||
}
|
||||
|
||||
/* Test case to cover NVGPU-RQCD-45 C1 */
|
||||
|
||||
@@ -344,7 +344,7 @@ static int test_nvgpu_mem_iommu_translate(struct unit_module *m,
|
||||
* Case: mm is iommuable
|
||||
* Set HAL to enable iommu_translate
|
||||
*/
|
||||
g->ops.mm.get_iommu_bit = gp10b_mm_get_iommu_bit;
|
||||
g->ops.mm.gmmu.get_iommu_bit = gp10b_mm_get_iommu_bit;
|
||||
|
||||
temp_phys = nvgpu_mem_iommu_translate(g, test_sgl->phys);
|
||||
if (temp_phys == test_sgl->phys) {
|
||||
|
||||
@@ -117,14 +117,14 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
p->mm_is_iommuable = true;
|
||||
|
||||
/* Minimum HALs for page_table */
|
||||
g->ops.mm.get_default_big_page_size =
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
gp10b_mm_get_default_big_page_size;
|
||||
g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.alloc_inst_block = gk20a_alloc_inst_block;
|
||||
g->ops.mm.init_inst_block = gv11b_init_inst_block;
|
||||
g->ops.mm.gmmu_map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu_unmap = nvgpu_gmmu_unmap_locked;
|
||||
g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr;
|
||||
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
|
||||
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
|
||||
g->ops.fb.compression_page_size = gp10b_fb_compression_page_size;
|
||||
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
|
||||
g->ops.ramin.init_pdb = gp10b_ramin_init_pdb;
|
||||
@@ -179,7 +179,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
aperture_size = GK20A_PMU_VA_SIZE;
|
||||
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
|
||||
|
||||
mm->pmu.vm = nvgpu_vm_init(g, g->ops.mm.get_default_big_page_size(),
|
||||
mm->pmu.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
aperture_size - low_hole,
|
||||
aperture_size,
|
||||
@@ -193,7 +194,8 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
|
||||
/* BAR2 memory space */
|
||||
mm->bar2.aperture_size = U32(32) << 20U;
|
||||
mm->bar2.vm = nvgpu_vm_init(g, g->ops.mm.get_default_big_page_size(),
|
||||
mm->bar2.vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
SZ_4K, mm->bar2.aperture_size - SZ_4K,
|
||||
mm->bar2.aperture_size, false, false, false, "bar2");
|
||||
if (mm->bar2.vm == NULL) {
|
||||
@@ -323,10 +325,10 @@ static int test_page_faults_inst_block(struct unit_module *m, struct gk20a *g,
|
||||
/* Handle some corner cases */
|
||||
if (scenario == 1) {
|
||||
/* Init inst_block with large page size */
|
||||
big_page_size = g->ops.mm.get_default_big_page_size();
|
||||
big_page_size = g->ops.mm.gmmu.get_default_big_page_size();
|
||||
} else if (scenario == 2) {
|
||||
/* Handle branch case in gv11b_init_inst_block() */
|
||||
big_page_size = g->ops.mm.get_default_big_page_size();
|
||||
big_page_size = g->ops.mm.gmmu.get_default_big_page_size();
|
||||
g->ops.ramin.set_big_page_size = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -133,12 +133,12 @@ static int init_test_env(struct unit_module *m, struct gk20a *g)
|
||||
g->ops.fb.compression_page_size = gp10b_fb_compression_page_size;
|
||||
g->ops.fb.tlb_invalidate = gm20b_fb_tlb_invalidate;
|
||||
|
||||
g->ops.mm.get_default_big_page_size =
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
gp10b_mm_get_default_big_page_size;
|
||||
g->ops.mm.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.gmmu_map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu_unmap = nvgpu_gmmu_unmap_locked;
|
||||
g->ops.mm.gpu_phys_addr = gv11b_gpu_phys_addr;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
|
||||
g->ops.mm.gmmu.gpu_phys_addr = gv11b_gpu_phys_addr;
|
||||
g->ops.mm.cache.l2_flush = gv11b_mm_l2_flush;
|
||||
g->ops.mm.cache.fb_flush = gk20a_mm_fb_flush;
|
||||
|
||||
@@ -388,7 +388,7 @@ static int test_map_buf(struct unit_module *m, struct gk20a *g, void *__args)
|
||||
unit_info(m, " - Kernel Reserved Size = 0x%llx\n", kernel_reserved);
|
||||
unit_info(m, " - Total Aperture Size = 0x%llx\n", aperture_size);
|
||||
vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.get_default_big_page_size(),
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
kernel_reserved,
|
||||
aperture_size,
|
||||
@@ -524,7 +524,7 @@ static int test_map_buf_gpu_va(struct unit_module *m,
|
||||
unit_info(m, " - Kernel Reserved Size = 0x%llx\n", kernel_reserved);
|
||||
unit_info(m, " - Total Aperture Size = 0x%llx\n", aperture_size);
|
||||
vm = nvgpu_vm_init(g,
|
||||
g->ops.mm.get_default_big_page_size(),
|
||||
g->ops.mm.gmmu.get_default_big_page_size(),
|
||||
low_hole,
|
||||
kernel_reserved,
|
||||
aperture_size,
|
||||
|
||||
Reference in New Issue
Block a user