From 5ac1e40296c89d59e8a5b36526e335b7c415f1b3 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Mon, 17 Dec 2018 15:49:09 -0800 Subject: [PATCH] gpu: nvgpu: MISRA rule 21.2 fixes in VM Delete the '__' prefix from the following two functions: __nvgpu_vm_alloc_va() __nvgpu_vm_free_va() JIRA NVGPU-1029 Change-Id: I02c6dcb9cbf744b830cacbd5b9ea621abe99e9a7 Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1974843 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Vinod Gopalakrishnakurup Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-misra GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/gmmu/page_table.c | 6 +-- drivers/gpu/nvgpu/common/mm/vm.c | 5 +- drivers/gpu/nvgpu/include/nvgpu/vm.h | 6 +-- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c | 2 +- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 52 +++++++++---------- .../gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c | 14 ++--- .../gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c | 8 +-- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 2 +- 8 files changed, 46 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c index 6ba21f7f2..facc9d235 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu/page_table.c @@ -821,7 +821,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, * GPU VA range. This facilitates fixed mappings. */ if (vaddr == 0ULL) { - vaddr = __nvgpu_vm_alloc_va(vm, size, pgsz_idx); + vaddr = nvgpu_vm_alloc_va(vm, size, pgsz_idx); if (vaddr == 0ULL) { nvgpu_err(g, "failed to allocate va space"); err = -ENOMEM; @@ -847,7 +847,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, fail_validate: if (allocated) { - __nvgpu_vm_free_va(vm, vaddr, pgsz_idx); + nvgpu_vm_free_va(vm, vaddr, pgsz_idx); } fail_alloc: nvgpu_err(g, "%s: failed with err=%d", __func__, err); @@ -879,7 +879,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, }; if (va_allocated) { - __nvgpu_vm_free_va(vm, vaddr, pgsz_idx); + nvgpu_vm_free_va(vm, vaddr, pgsz_idx); } /* unmap here needs to know the page size we assigned at mapping */ diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 6ad9acf01..bc1930ef3 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -124,8 +124,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm, pdb->entries = NULL; } -u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) - +u64 nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) { struct gk20a *g = vm->mm->g; struct nvgpu_allocator *vma = NULL; @@ -161,7 +160,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx) return addr; } -void __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx) +void nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx) { struct nvgpu_allocator *vma = vm->vma[pgsz_idx]; diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index 95faef6e3..28109b4e4 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h @@ -324,9 +324,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, * Ideally the RM server can just batch mappings but until such a time this * will be used by the vgpu code. */ -u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, - u32 pgsz_idx); -void __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, - u32 pgsz_idx); +u64 nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx); +void nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx); #endif /* NVGPU_VM_H */ diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c index 0cf3b5c71..fc75f630d 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c @@ -95,7 +95,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, /* Allocate (or validate when map_offset != 0) the virtual address. */ if (!map_offset) { - map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx); + map_offset = nvgpu_vm_alloc_va(vm, size, pgsz_idx); if (!map_offset) { nvgpu_err(g, "failed to allocate va space"); err = -ENOMEM; diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index dbfd8c282..90f344ce7 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -190,7 +190,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size; /* Circular Buffer */ - gpu_va = __nvgpu_vm_alloc_va(ch_vm, + gpu_va = nvgpu_vm_alloc_va(ch_vm, gr->global_ctx_buffer[CIRCULAR].mem.size, GMMU_PAGE_SIZE_KERNEL); @@ -200,7 +200,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size; /* Attribute Buffer */ - gpu_va = __nvgpu_vm_alloc_va(ch_vm, + gpu_va = nvgpu_vm_alloc_va(ch_vm, gr->global_ctx_buffer[ATTRIBUTE].mem.size, GMMU_PAGE_SIZE_KERNEL); @@ -210,7 +210,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size; /* Page Pool */ - gpu_va = __nvgpu_vm_alloc_va(ch_vm, + gpu_va = nvgpu_vm_alloc_va(ch_vm, gr->global_ctx_buffer[PAGEPOOL].mem.size, GMMU_PAGE_SIZE_KERNEL); if (!gpu_va) @@ -219,7 +219,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size; /* Priv register Access Map */ - gpu_va = __nvgpu_vm_alloc_va(ch_vm, + gpu_va = nvgpu_vm_alloc_va(ch_vm, gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, GMMU_PAGE_SIZE_KERNEL); if (!gpu_va) @@ -230,7 +230,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, /* FECS trace Buffer */ #ifdef CONFIG_GK20A_CTXSW_TRACE - gpu_va = __nvgpu_vm_alloc_va(ch_vm, + gpu_va = nvgpu_vm_alloc_va(ch_vm, gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem.size, GMMU_PAGE_SIZE_KERNEL); @@ -261,8 +261,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g, clean_up: for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { if (g_bfr_va[i]) { - __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(ch_vm, g_bfr_va[i], + GMMU_PAGE_SIZE_KERNEL); g_bfr_va[i] = 0; } } @@ -284,8 +284,8 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg) for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { if (g_bfr_va[i]) { - __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(ch_vm, g_bfr_va[i], + GMMU_PAGE_SIZE_KERNEL); g_bfr_va[i] = 0; g_bfr_size[i] = 0; } @@ -313,9 +313,9 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g, gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size; gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; - gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm, - gr->ctx_vars.buffer_total_size, - GMMU_PAGE_SIZE_KERNEL); + gr_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm, + gr->ctx_vars.buffer_total_size, + GMMU_PAGE_SIZE_KERNEL); if (!gr_ctx->mem.gpu_va) return -ENOMEM; @@ -332,8 +332,8 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g, if (unlikely(err)) { nvgpu_err(g, "fail to alloc gr_ctx"); - __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); gr_ctx->mem.aperture = APERTURE_INVALID; } @@ -358,9 +358,9 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, patch_ctx = &tsg->gr_ctx->patch_ctx; patch_ctx->mem.size = 128 * sizeof(u32); - patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm, - patch_ctx->mem.size, - GMMU_PAGE_SIZE_KERNEL); + patch_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch_vm, + patch_ctx->mem.size, + GMMU_PAGE_SIZE_KERNEL); if (!patch_ctx->mem.gpu_va) return -ENOMEM; @@ -370,8 +370,8 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g, p->patch_ctx_va = patch_ctx->mem.gpu_va; err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); if (err || msg.ret) { - __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); err = -ENOMEM; } @@ -388,8 +388,8 @@ static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg) if (patch_ctx->mem.gpu_va) { /* server will free on channel close */ - __nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); patch_ctx->mem.gpu_va = 0; } } @@ -408,8 +408,8 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg) /* server will free on channel close */ - __nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); pm_ctx->mem.gpu_va = 0; } @@ -431,8 +431,8 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); - __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, + GMMU_PAGE_SIZE_KERNEL); tsg = &g->fifo.tsg[gr_ctx->tsgid]; vgpu_gr_unmap_global_ctx_buffers(tsg); @@ -1110,7 +1110,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g, if (mode != NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW) { /* Allocate buffer if necessary */ if (pm_ctx->mem.gpu_va == 0) { - pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm, + pm_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch->vm, g->gr.ctx_vars.pm_ctxsw_image_size, GMMU_PAGE_SIZE_KERNEL); diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c index fed2b7407..f991c7737 100644 --- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c @@ -42,7 +42,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm) if (vm->syncpt_ro_map_gpu_va) return 0; - vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm, + vm->syncpt_ro_map_gpu_va = nvgpu_vm_alloc_va(vm, g->syncpt_unit_size, GMMU_PAGE_SIZE_KERNEL); if (!vm->syncpt_ro_map_gpu_va) { @@ -63,8 +63,8 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm) nvgpu_err(g, "mapping read-only va space failed err %d", err); - __nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va, - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va, + GMMU_PAGE_SIZE_KERNEL); vm->syncpt_ro_map_gpu_va = 0; return err; } @@ -91,7 +91,7 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, if (err) return err; - syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size, + syncpt_buf->gpu_va = nvgpu_vm_alloc_va(c->vm, g->syncpt_size, GMMU_PAGE_SIZE_KERNEL); if (!syncpt_buf->gpu_va) { nvgpu_err(g, "allocating syncpt va space failed"); @@ -110,8 +110,8 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, err = err ? err : msg.ret; if (err) { nvgpu_err(g, "mapping syncpt va space failed err %d", err); - __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, + GMMU_PAGE_SIZE_KERNEL); return err; } @@ -122,7 +122,7 @@ void vgpu_gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c, struct nvgpu_mem *syncpt_buf) { nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va); - __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, GMMU_PAGE_SIZE_KERNEL); nvgpu_dma_free(c->g, syncpt_buf); } diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c index e49d99ba1..a59c58c38 100644 --- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c @@ -39,7 +39,7 @@ int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c) msg.cmd = TEGRA_VGPU_CMD_ALLOC_CTX_HEADER; msg.handle = vgpu_get_handle(c->g); p->ch_handle = c->virt_ctx; - p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm, + p->ctx_header_va = nvgpu_vm_alloc_va(c->vm, c->g->ops.gr.ctxsw_prog.hw_get_fecs_header_size(), GMMU_PAGE_SIZE_KERNEL); if (!p->ctx_header_va) { @@ -50,7 +50,7 @@ int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c) err = err ? err : msg.ret; if (unlikely(err)) { nvgpu_err(c->g, "alloc ctx_header failed err %d", err); - __nvgpu_vm_free_va(c->vm, p->ctx_header_va, + nvgpu_vm_free_va(c->vm, p->ctx_header_va, GMMU_PAGE_SIZE_KERNEL); return err; } @@ -75,8 +75,8 @@ void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c) err = err ? err : msg.ret; if (unlikely(err)) nvgpu_err(c->g, "free ctx_header failed err %d", err); - __nvgpu_vm_free_va(c->vm, ctxheader->gpu_va, - GMMU_PAGE_SIZE_KERNEL); + nvgpu_vm_free_va(c->vm, ctxheader->gpu_va, + GMMU_PAGE_SIZE_KERNEL); ctxheader->gpu_va = 0; } } diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 214389e66..78709ed64 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -107,7 +107,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, nvgpu_err(g, "failed to update gmmu ptes on unmap"); if (va_allocated) { - __nvgpu_vm_free_va(vm, vaddr, pgsz_idx); + nvgpu_vm_free_va(vm, vaddr, pgsz_idx); } /* TLB invalidate handled on server side */ }