mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: simplify gmmu unmap calls
Introduce nvgpu_gmmu_unmap_addr() to unmap a nvgpu_mem that was mapped at some other address than mem.gpu_va, which can be the case for buffers that are shared across different address spaces. Delete the address parameter from nvgpu_gmmu_unmap(), as the common case is to store the address to mem.gpu_va when mapping the buffer. Modify some instances of consecutive unmap + free calls to call just nvgpu_dma_unmap_free(). Change-Id: Iecd7c9aa41d04e9f48e055f6bc0c9227cd759c69 Signed-off-by: Konsta Hölttä <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2601787 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
9bdb8f1a10
commit
44422db851
@@ -163,10 +163,7 @@ void nvgpu_gr_ctx_free_patch_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
struct patch_desc *patch_ctx = &gr_ctx->patch_ctx;
|
||||
|
||||
if (nvgpu_mem_is_valid(&patch_ctx->mem)) {
|
||||
nvgpu_gmmu_unmap(vm, &patch_ctx->mem,
|
||||
patch_ctx->mem.gpu_va);
|
||||
|
||||
nvgpu_dma_free(g, &patch_ctx->mem);
|
||||
nvgpu_dma_unmap_free(vm, &patch_ctx->mem);
|
||||
patch_ctx->data_count = 0;
|
||||
}
|
||||
}
|
||||
@@ -1027,9 +1024,7 @@ void nvgpu_gr_ctx_free_pm_ctx(struct gk20a *g, struct vm_gk20a *vm,
|
||||
struct pm_ctx_desc *pm_ctx = &gr_ctx->pm_ctx;
|
||||
|
||||
if (pm_ctx->mem.gpu_va != 0ULL) {
|
||||
nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va);
|
||||
|
||||
nvgpu_dma_free(g, &pm_ctx->mem);
|
||||
nvgpu_dma_unmap_free(vm, &pm_ctx->mem);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -338,7 +338,7 @@ void nvgpu_gr_global_ctx_buffer_unmap(
|
||||
struct vm_gk20a *vm, u64 gpu_va)
|
||||
{
|
||||
if (nvgpu_mem_is_valid(&desc[index].mem)) {
|
||||
nvgpu_gmmu_unmap(vm, &desc[index].mem, gpu_va);
|
||||
nvgpu_gmmu_unmap_addr(vm, &desc[index].mem, gpu_va);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -75,9 +75,7 @@ void nvgpu_gr_subctx_free(struct gk20a *g,
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_gmmu_unmap(vm, &subctx->ctx_header,
|
||||
subctx->ctx_header.gpu_va);
|
||||
nvgpu_dma_free(g, &subctx->ctx_header);
|
||||
nvgpu_dma_unmap_free(vm, &subctx->ctx_header);
|
||||
nvgpu_kfree(g, subctx);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -226,7 +226,7 @@ void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
|
||||
{
|
||||
if (mem->gpu_va != 0ULL) {
|
||||
nvgpu_gmmu_unmap(vm, mem, mem->gpu_va);
|
||||
nvgpu_gmmu_unmap(vm, mem);
|
||||
}
|
||||
mem->gpu_va = 0;
|
||||
|
||||
|
||||
@@ -159,7 +159,7 @@ u64 nvgpu_gmmu_map_fixed(struct vm_gk20a *vm,
|
||||
aperture);
|
||||
}
|
||||
|
||||
void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
|
||||
void nvgpu_gmmu_unmap_addr(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_vm(vm);
|
||||
|
||||
@@ -176,6 +176,11 @@ void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
|
||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||
}
|
||||
|
||||
void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem)
|
||||
{
|
||||
nvgpu_gmmu_unmap_addr(vm, mem, mem->gpu_va);
|
||||
}
|
||||
|
||||
int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
|
||||
{
|
||||
u32 pdb_size;
|
||||
|
||||
@@ -948,7 +948,7 @@ static void nvgpu_vm_remove(struct vm_gk20a *vm)
|
||||
|
||||
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
|
||||
(vm->syncpt_ro_map_gpu_va != 0ULL)) {
|
||||
nvgpu_gmmu_unmap(vm, &g->syncpt_mem,
|
||||
nvgpu_gmmu_unmap_addr(vm, &g->syncpt_mem,
|
||||
vm->syncpt_ro_map_gpu_va);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -160,7 +160,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
|
||||
fail_free_submem:
|
||||
nvgpu_dma_free(pool_to_gk20a(p), &p->rw_mem);
|
||||
fail_unmap:
|
||||
nvgpu_gmmu_unmap(vm, &p->sema_sea->sea_mem, p->gpu_va_ro);
|
||||
nvgpu_gmmu_unmap_addr(vm, &p->sema_sea->sea_mem, p->gpu_va_ro);
|
||||
gpu_sema_dbg(pool_to_gk20a(p),
|
||||
" %llu: Failed to map semaphore pool!", p->page_idx);
|
||||
fail_unlock:
|
||||
@@ -176,8 +176,8 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
|
||||
{
|
||||
nvgpu_semaphore_sea_lock(p->sema_sea);
|
||||
|
||||
nvgpu_gmmu_unmap(vm, &p->sema_sea->sea_mem, p->gpu_va_ro);
|
||||
nvgpu_gmmu_unmap(vm, &p->rw_mem, p->gpu_va);
|
||||
nvgpu_gmmu_unmap_addr(vm, &p->sema_sea->sea_mem, p->gpu_va_ro);
|
||||
nvgpu_gmmu_unmap_addr(vm, &p->rw_mem, p->gpu_va);
|
||||
nvgpu_dma_free(pool_to_gk20a(p), &p->rw_mem);
|
||||
|
||||
p->gpu_va = 0;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GV11B syncpt cmdbuf
|
||||
*
|
||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -100,8 +100,7 @@ int gv11b_syncpt_alloc_buf(struct nvgpu_channel *c,
|
||||
void gv11b_syncpt_free_buf(struct nvgpu_channel *c,
|
||||
struct nvgpu_mem *syncpt_buf)
|
||||
{
|
||||
nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va);
|
||||
nvgpu_dma_free(c->g, syncpt_buf);
|
||||
nvgpu_dma_unmap_free(c->vm, syncpt_buf);
|
||||
}
|
||||
|
||||
int gv11b_syncpt_get_sync_ro_map(struct vm_gk20a *vm,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -125,7 +125,7 @@ int vgpu_gv11b_syncpt_alloc_buf(struct nvgpu_channel *c,
|
||||
void vgpu_gv11b_syncpt_free_buf(struct nvgpu_channel *c,
|
||||
struct nvgpu_mem *syncpt_buf)
|
||||
{
|
||||
nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va);
|
||||
nvgpu_gmmu_unmap(c->vm, syncpt_buf);
|
||||
nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, GMMU_PAGE_SIZE_KERNEL);
|
||||
nvgpu_dma_free(c->g, syncpt_buf);
|
||||
}
|
||||
|
||||
@@ -403,10 +403,17 @@ u64 nvgpu_gmmu_map_fixed(struct vm_gk20a *vm,
|
||||
*
|
||||
* @return None.
|
||||
*/
|
||||
void nvgpu_gmmu_unmap(struct vm_gk20a *vm,
|
||||
void nvgpu_gmmu_unmap_addr(struct vm_gk20a *vm,
|
||||
struct nvgpu_mem *mem,
|
||||
u64 gpu_va);
|
||||
|
||||
/**
|
||||
* @brief Unmap a memory mapped by nvgpu_gmmu_map()/nvgpu_gmmu_map_fixed().
|
||||
*
|
||||
* This is like nvgpu_gmmu_unmap_addr() but with the address in nvgpu_mem.gpu_va.
|
||||
*/
|
||||
void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem);
|
||||
|
||||
/**
|
||||
* @brief Compute number of words in a PTE.
|
||||
*
|
||||
|
||||
@@ -109,7 +109,7 @@ __must_hold(&cde_app->mutex)
|
||||
|
||||
/* release mapped memory */
|
||||
gk20a_deinit_cde_img(cde_ctx);
|
||||
nvgpu_gmmu_unmap(vm, &cbc->compbit_store.mem,
|
||||
nvgpu_gmmu_unmap_addr(vm, &cbc->compbit_store.mem,
|
||||
cde_ctx->backing_store_vaddr);
|
||||
|
||||
/*
|
||||
@@ -1442,7 +1442,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
|
||||
return 0;
|
||||
|
||||
err_init_cde_img:
|
||||
nvgpu_gmmu_unmap(ch->vm, &cbc->compbit_store.mem, vaddr);
|
||||
nvgpu_gmmu_unmap_addr(ch->vm, &cbc->compbit_store.mem, vaddr);
|
||||
err_map_backingstore:
|
||||
err_setup_bind:
|
||||
nvgpu_vm_put(ch->vm);
|
||||
|
||||
@@ -409,6 +409,7 @@ nvgpu_gmmu_map
|
||||
nvgpu_gmmu_map_locked
|
||||
nvgpu_gmmu_map_fixed
|
||||
nvgpu_gmmu_unmap
|
||||
nvgpu_gmmu_unmap_addr
|
||||
nvgpu_gmmu_unmap_locked
|
||||
nvgpu_golden_ctx_verif_get_fault_injection
|
||||
nvgpu_gr_alloc
|
||||
|
||||
@@ -425,6 +425,7 @@ nvgpu_gmmu_map
|
||||
nvgpu_gmmu_map_locked
|
||||
nvgpu_gmmu_map_fixed
|
||||
nvgpu_gmmu_unmap
|
||||
nvgpu_gmmu_unmap_addr
|
||||
nvgpu_gmmu_unmap_locked
|
||||
nvgpu_golden_ctx_verif_get_fault_injection
|
||||
nvgpu_gr_alloc
|
||||
|
||||
@@ -552,7 +552,7 @@ int test_nvgpu_gmmu_map_unmap(struct unit_module *m, struct gk20a *g,
|
||||
}
|
||||
|
||||
/* Now unmap the buffer and make sure the PTE is now invalid */
|
||||
nvgpu_gmmu_unmap(g->mm.pmu.vm, &mem, mem.gpu_va);
|
||||
nvgpu_gmmu_unmap(g->mm.pmu.vm, &mem);
|
||||
|
||||
result = nvgpu_get_pte(g, g->mm.pmu.vm, mem.gpu_va, &pte[0]);
|
||||
if (result != 0) {
|
||||
@@ -856,7 +856,7 @@ int test_nvgpu_gmmu_map_unmap_adv(struct unit_module *m,
|
||||
g->ops.fb.tlb_invalidate = hal_fb_tlb_invalidate_fail;
|
||||
}
|
||||
|
||||
nvgpu_gmmu_unmap(g->mm.pmu.vm, &mem, vaddr);
|
||||
nvgpu_gmmu_unmap_addr(g->mm.pmu.vm, &mem, vaddr);
|
||||
|
||||
if (params->special_unmap_tbl_invalidate_fail) {
|
||||
/* Restore previous op */
|
||||
@@ -1074,7 +1074,7 @@ int test_nvgpu_page_table_c1_full(struct unit_module *m, struct gk20a *g,
|
||||
}
|
||||
|
||||
/* 3.3. Free the mapping */
|
||||
nvgpu_gmmu_unmap(vm, &mem[mem_i], mem[mem_i].gpu_va);
|
||||
nvgpu_gmmu_unmap(vm, &mem[mem_i]);
|
||||
|
||||
/* 3.4. Verify that the mapping has been cleared */
|
||||
if (check_pte_invalidated(m, g, vm, &mem[mem_i]) != 0) {
|
||||
@@ -1115,7 +1115,7 @@ static int c2_fixed_allocation(struct unit_module *m, struct gk20a *g,
|
||||
}
|
||||
|
||||
/* Free the mapping */
|
||||
nvgpu_gmmu_unmap(vm, mem_fixed, mem_fixed->gpu_va);
|
||||
nvgpu_gmmu_unmap(vm, mem_fixed);
|
||||
|
||||
/* Verify that the mapping has been cleared */
|
||||
if (check_pte_invalidated(m, g, vm, mem_fixed) != 0) {
|
||||
|
||||
@@ -210,7 +210,7 @@ done:
|
||||
|
||||
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
|
||||
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
|
||||
nvgpu_gmmu_unmap_addr(ch->vm, &g->syncpt_mem,
|
||||
ch->vm->syncpt_ro_map_gpu_va);
|
||||
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
||||
}
|
||||
@@ -262,7 +262,7 @@ done:
|
||||
|
||||
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
|
||||
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
|
||||
nvgpu_gmmu_unmap_addr(ch->vm, &g->syncpt_mem,
|
||||
ch->vm->syncpt_ro_map_gpu_va);
|
||||
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
||||
}
|
||||
@@ -305,7 +305,7 @@ done:
|
||||
|
||||
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
|
||||
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
|
||||
nvgpu_gmmu_unmap_addr(ch->vm, &g->syncpt_mem,
|
||||
ch->vm->syncpt_ro_map_gpu_va);
|
||||
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
||||
}
|
||||
@@ -326,7 +326,7 @@ static void syncpt_ro_map_gpu_va_clear(struct gk20a *g, struct nvgpu_channel *ch
|
||||
{
|
||||
if (nvgpu_mem_is_valid(&g->syncpt_mem) &&
|
||||
ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||
nvgpu_gmmu_unmap(ch->vm, &g->syncpt_mem,
|
||||
nvgpu_gmmu_unmap_addr(ch->vm, &g->syncpt_mem,
|
||||
ch->vm->syncpt_ro_map_gpu_va);
|
||||
ch->vm->syncpt_ro_map_gpu_va = 0ULL;
|
||||
} else if (ch->vm->syncpt_ro_map_gpu_va != 0ULL) {
|
||||
|
||||
Reference in New Issue
Block a user