From 5e7d459927477cddaa178f634165e65252c8d35a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Konsta=20H=C3=B6ltt=C3=A4?= Date: Mon, 4 Oct 2021 16:25:07 +0300 Subject: [PATCH] gpu: nvgpu: restructure gmmu cache maintenance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the logic that manages tlb invalidation and l2 flushes to separate functions to keep the complexity manageable and to help reuse the logic. Bug 200778663 Change-Id: Ib9dd79c1ec92933a59dc2c8e4cd3fa8355433bbe Signed-off-by: Konsta Hölttä Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2604939 Reviewed-by: Alex Waterman Reviewed-by: mobile promotions Tested-by: mobile promotions GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/common/mm/gmmu/page_table.c | 70 ++++++++++++------- 1 file changed, 45 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index 9647861ae..52488c140 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -811,6 +811,47 @@ static int nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm, return err; } +static int nvgpu_gmmu_cache_maint_map(struct gk20a *g, struct vm_gk20a *vm, + struct vm_gk20a_mapping_batch *batch) +{ + int err = 0; + + if (batch == NULL) { + err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem); + if (err != 0) { + nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err); + } + } else { + batch->need_tlb_invalidate = true; + } + + return err; +} + +static void nvgpu_gmmu_cache_maint_unmap(struct gk20a *g, struct vm_gk20a *vm, + struct vm_gk20a_mapping_batch *batch) +{ + int err; + + if (batch == NULL) { + if (g->ops.mm.cache.l2_flush(g, true) != 0) { + nvgpu_err(g, "gk20a_mm_l2_flush[1] failed"); + } + err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem); + if (err != 0) { + nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err); + } + } else { + if (!batch->gpu_l2_flushed) { + if (g->ops.mm.cache.l2_flush(g, true) != 0) { + nvgpu_err(g, "gk20a_mm_l2_flush[2] failed"); + } + batch->gpu_l2_flushed = true; + } + batch->need_tlb_invalidate = true; + } +} + /* * This is the true top level GMMU mapping logic. This breaks down the incoming * scatter gather table and does actual programming of GPU virtual address to @@ -1003,14 +1044,9 @@ u64 nvgpu_gmmu_map_locked(struct vm_gk20a *vm, goto fail_validate; } - if (batch == NULL) { - err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem); - if (err != 0) { - nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err); - goto fail_validate; - } - } else { - batch->need_tlb_invalidate = true; + err = nvgpu_gmmu_cache_maint_map(g, vm, batch); + if (err != 0) { + goto fail_validate; } return vaddr; @@ -1066,23 +1102,7 @@ void nvgpu_gmmu_unmap_locked(struct vm_gk20a *vm, nvgpu_err(g, "failed to update gmmu ptes on unmap"); } - if (batch == NULL) { - if (g->ops.mm.cache.l2_flush(g, true) != 0) { - nvgpu_err(g, "gk20a_mm_l2_flush[1] failed"); - } - err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem); - if (err != 0) { - nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err); - } - } else { - if (!batch->gpu_l2_flushed) { - if (g->ops.mm.cache.l2_flush(g, true) != 0) { - nvgpu_err(g, "gk20a_mm_l2_flush[2] failed"); - } - batch->gpu_l2_flushed = true; - } - batch->need_tlb_invalidate = true; - } + nvgpu_gmmu_cache_maint_unmap(g, vm, batch); } u32 nvgpu_pte_words(struct gk20a *g)