gpu: nvgpu: simplify gmmu map calls

Introduce nvgpu_gmmu_map_partial() to map a specific size of a buffer
represented by nvgpu_mem, or what nvgpu_gmmu_map() used to do. Delete
the size parameter from nvgpu_gmmu_map() such that it now maps the
entire buffer. The separate size parameter is a historical artifact from
when nvgpu_mem did not exist yet; the typical use is to map the entire
buffer.

Mapping at a certain address with nvgpu_gmmu_map_fixed() still takes the
size parameter.

The returned address still has to be stored somewhere, typically to
mem.gpu_va by the caller so that the matching unmap variant finds the
right address.

Change-Id: I7d67a0b15d741c6bcee1aecff1678e3216cc28d2
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2601788
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2021-09-28 09:24:17 +03:00
committed by mobile promotions
parent 6d38ae76ce
commit 1b1d183b9c
15 changed files with 50 additions and 24 deletions

View File

@@ -479,7 +479,7 @@ int test_nvgpu_gmmu_map_unmap(struct unit_module *m, struct gk20a *g,
params->flags, params->rw_flag, params->priv,
params->aperture);
} else {
mem.gpu_va = nvgpu_gmmu_map(g->mm.pmu.vm, &mem, mem.size,
mem.gpu_va = nvgpu_gmmu_map(g->mm.pmu.vm, &mem,
params->flags, params->rw_flag, params->priv,
params->aperture);
}
@@ -607,7 +607,7 @@ int test_nvgpu_gmmu_map_unmap_map_fail(struct unit_module *m, struct gk20a *g,
g->ops.fb.tlb_invalidate = hal_fb_tlb_invalidate_fail;
}
mem.gpu_va = nvgpu_gmmu_map(g->mm.pmu.vm, &mem, mem.size,
mem.gpu_va = nvgpu_gmmu_map(g->mm.pmu.vm, &mem,
NVGPU_VM_MAP_CACHEABLE, gk20a_mem_flag_none,
true, APERTURE_SYSMEM);
@@ -656,7 +656,7 @@ int test_nvgpu_gmmu_set_pte(struct unit_module *m, struct gk20a *g, void *args)
p->mm_is_iommuable = params->is_iommuable;
mem.size = TEST_SIZE;
mem.cpu_va = (void *) TEST_PA_ADDRESS;
mem.gpu_va = nvgpu_gmmu_map(g->mm.pmu.vm, &mem, mem.size,
mem.gpu_va = nvgpu_gmmu_map(g->mm.pmu.vm, &mem,
params->flags, params->rw_flag, params->priv,
params->aperture);
@@ -1053,7 +1053,7 @@ int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g,
&test_iommu_sysmem, NULL, vm, mixed_sgt);
} else {
mem[mem_i].gpu_va = nvgpu_gmmu_map(vm, &mem[mem_i],
mem[mem_i].size, NVGPU_VM_MAP_CACHEABLE,
NVGPU_VM_MAP_CACHEABLE,
gk20a_mem_flag_none, true, APERTURE_SYSMEM);
}

View File

@@ -348,7 +348,7 @@ int test_sync_get_ro_map(struct unit_module *m, struct gk20a *g, void *args)
for (branches = 0U; branches <= F_SYNC_GET_RO_MAP_MAX; branches++) {
if (branches == F_SYNC_GET_RO_MAP_PRE_ALLOCATED) {
ch->vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(ch->vm,
ch->vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map_partial(ch->vm,
&g->syncpt_mem, g->syncpt_unit_size,
0, gk20a_mem_flag_read_only,
false, APERTURE_SYSMEM);