gpu: nvgpu: Create hal/mm/gmmu and move gk20a GMMU code

Make a hal/mm/gmmu sub-unit for the GMMU HAL code. Also move the
gk20a specific HAL code there. gp10b will happen in the next patch.

This change also updates all the GMMU related HAL usage, of which
there is quite a bit. Generally the only change is a .gmmu needs to
be inserted into the HAL path. Each HAL init was also updated.

JIRA NVGPU-2042

Change-Id: I6c46bdfddb8e021f56103d9457fb3e2a226f8947
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2099693
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2019-04-15 14:07:22 -07:00
committed by mobile promotions
parent a9b2a01001
commit efbe371fd5
30 changed files with 496 additions and 391 deletions

View File

@@ -327,7 +327,8 @@ gk20a_ctrl_ioctl_gpu_characteristics(
gpu.big_page_size = nvgpu_mm_get_default_big_page_size(g);
gpu.pde_coverage_bit_count =
g->ops.mm.get_mmu_levels(g, gpu.big_page_size)[0].lo_bit[0];
g->ops.mm.gmmu.get_mmu_levels(g,
gpu.big_page_size)[0].lo_bit[0];
gpu.available_big_page_sizes = nvgpu_mm_get_available_big_page_sizes(g);
gpu.sm_arch_sm_version = g->params.sm_arch_sm_version;

View File

@@ -1355,7 +1355,7 @@ static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
struct mm_gk20a *mm = &g->mm;
int err;
u32 virt_size;
u32 big_page_size = g->ops.mm.get_default_big_page_size();
u32 big_page_size = g->ops.mm.gmmu.get_default_big_page_size();
nvgpu_mutex_acquire(&g->dbg_sessions_lock);

View File

@@ -64,11 +64,11 @@ u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl)
{
if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ||
!nvgpu_iommuable(g))
return g->ops.mm.gpu_phys_addr(g, NULL,
return g->ops.mm.gmmu.gpu_phys_addr(g, NULL,
__nvgpu_sgl_phys(g, (struct nvgpu_sgl *)sgl));
if (sg_dma_address(sgl) == 0)
return g->ops.mm.gpu_phys_addr(g, NULL,
return g->ops.mm.gmmu.gpu_phys_addr(g, NULL,
__nvgpu_sgl_phys(g, (struct nvgpu_sgl *)sgl));
if (sg_dma_address(sgl) == DMA_ERROR_CODE)
@@ -230,7 +230,7 @@ static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g,
struct nvgpu_gmmu_attrs *attrs)
{
if (sg_dma_address((struct scatterlist *)sgl) == 0)
return g->ops.mm.gpu_phys_addr(g, attrs,
return g->ops.mm.gmmu.gpu_phys_addr(g, attrs,
__nvgpu_sgl_phys(g, sgl));
if (sg_dma_address((struct scatterlist *)sgl) == DMA_ERROR_CODE)