gpu: nvgpu: MISRA rule 21.2 fixes in VM

Delete the '__' prefix from the following two functions:

  __nvgpu_vm_alloc_va()
  __nvgpu_vm_free_va()

JIRA NVGPU-1029

Change-Id: I02c6dcb9cbf744b830cacbd5b9ea621abe99e9a7
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1974843
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2018-12-17 15:49:09 -08:00
committed by mobile promotions
parent 1a611c9928
commit 5ac1e40296
8 changed files with 46 additions and 49 deletions

View File

@@ -821,7 +821,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
* GPU VA range. This facilitates fixed mappings.
*/
if (vaddr == 0ULL) {
vaddr = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
vaddr = nvgpu_vm_alloc_va(vm, size, pgsz_idx);
if (vaddr == 0ULL) {
nvgpu_err(g, "failed to allocate va space");
err = -ENOMEM;
@@ -847,7 +847,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
fail_validate:
if (allocated) {
__nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
}
fail_alloc:
nvgpu_err(g, "%s: failed with err=%d", __func__, err);
@@ -879,7 +879,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
};
if (va_allocated) {
__nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
}
/* unmap here needs to know the page size we assigned at mapping */

View File

@@ -124,8 +124,7 @@ static void nvgpu_vm_free_entries(struct vm_gk20a *vm,
pdb->entries = NULL;
}
u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
u64 nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
{
struct gk20a *g = vm->mm->g;
struct nvgpu_allocator *vma = NULL;
@@ -161,7 +160,7 @@ u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx)
return addr;
}
void __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx)
void nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx)
{
struct nvgpu_allocator *vma = vm->vma[pgsz_idx];

View File

@@ -324,9 +324,7 @@ struct vm_gk20a *nvgpu_vm_init(struct gk20a *g,
* Ideally the RM server can just batch mappings but until such a time this
* will be used by the vgpu code.
*/
u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size,
u32 pgsz_idx);
void __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr,
u32 pgsz_idx);
u64 nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, u32 pgsz_idx);
void nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, u32 pgsz_idx);
#endif /* NVGPU_VM_H */

View File

@@ -95,7 +95,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
/* Allocate (or validate when map_offset != 0) the virtual address. */
if (!map_offset) {
map_offset = __nvgpu_vm_alloc_va(vm, size, pgsz_idx);
map_offset = nvgpu_vm_alloc_va(vm, size, pgsz_idx);
if (!map_offset) {
nvgpu_err(g, "failed to allocate va space");
err = -ENOMEM;

View File

@@ -190,7 +190,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
g_bfr_size = tsg->gr_ctx->global_ctx_buffer_size;
/* Circular Buffer */
gpu_va = __nvgpu_vm_alloc_va(ch_vm,
gpu_va = nvgpu_vm_alloc_va(ch_vm,
gr->global_ctx_buffer[CIRCULAR].mem.size,
GMMU_PAGE_SIZE_KERNEL);
@@ -200,7 +200,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
/* Attribute Buffer */
gpu_va = __nvgpu_vm_alloc_va(ch_vm,
gpu_va = nvgpu_vm_alloc_va(ch_vm,
gr->global_ctx_buffer[ATTRIBUTE].mem.size,
GMMU_PAGE_SIZE_KERNEL);
@@ -210,7 +210,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
/* Page Pool */
gpu_va = __nvgpu_vm_alloc_va(ch_vm,
gpu_va = nvgpu_vm_alloc_va(ch_vm,
gr->global_ctx_buffer[PAGEPOOL].mem.size,
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va)
@@ -219,7 +219,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
/* Priv register Access Map */
gpu_va = __nvgpu_vm_alloc_va(ch_vm,
gpu_va = nvgpu_vm_alloc_va(ch_vm,
gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
GMMU_PAGE_SIZE_KERNEL);
if (!gpu_va)
@@ -230,7 +230,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
/* FECS trace Buffer */
#ifdef CONFIG_GK20A_CTXSW_TRACE
gpu_va = __nvgpu_vm_alloc_va(ch_vm,
gpu_va = nvgpu_vm_alloc_va(ch_vm,
gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem.size,
GMMU_PAGE_SIZE_KERNEL);
@@ -261,8 +261,8 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
clean_up:
for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
if (g_bfr_va[i]) {
__nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
GMMU_PAGE_SIZE_KERNEL);
g_bfr_va[i] = 0;
}
}
@@ -284,8 +284,8 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
if (g_bfr_va[i]) {
__nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
GMMU_PAGE_SIZE_KERNEL);
g_bfr_va[i] = 0;
g_bfr_size[i] = 0;
}
@@ -313,9 +313,9 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size;
gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size;
gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm,
gr->ctx_vars.buffer_total_size,
GMMU_PAGE_SIZE_KERNEL);
gr_ctx->mem.gpu_va = nvgpu_vm_alloc_va(vm,
gr->ctx_vars.buffer_total_size,
GMMU_PAGE_SIZE_KERNEL);
if (!gr_ctx->mem.gpu_va)
return -ENOMEM;
@@ -332,8 +332,8 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
if (unlikely(err)) {
nvgpu_err(g, "fail to alloc gr_ctx");
__nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
gr_ctx->mem.aperture = APERTURE_INVALID;
}
@@ -358,9 +358,9 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
patch_ctx = &tsg->gr_ctx->patch_ctx;
patch_ctx->mem.size = 128 * sizeof(u32);
patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
patch_ctx->mem.size,
GMMU_PAGE_SIZE_KERNEL);
patch_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch_vm,
patch_ctx->mem.size,
GMMU_PAGE_SIZE_KERNEL);
if (!patch_ctx->mem.gpu_va)
return -ENOMEM;
@@ -370,8 +370,8 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
p->patch_ctx_va = patch_ctx->mem.gpu_va;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) {
__nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
err = -ENOMEM;
}
@@ -388,8 +388,8 @@ static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
if (patch_ctx->mem.gpu_va) {
/* server will free on channel close */
__nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
patch_ctx->mem.gpu_va = 0;
}
}
@@ -408,8 +408,8 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
/* server will free on channel close */
__nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
pm_ctx->mem.gpu_va = 0;
}
@@ -431,8 +431,8 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret);
__nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
GMMU_PAGE_SIZE_KERNEL);
tsg = &g->fifo.tsg[gr_ctx->tsgid];
vgpu_gr_unmap_global_ctx_buffers(tsg);
@@ -1110,7 +1110,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
if (mode != NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW) {
/* Allocate buffer if necessary */
if (pm_ctx->mem.gpu_va == 0) {
pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm,
pm_ctx->mem.gpu_va = nvgpu_vm_alloc_va(ch->vm,
g->gr.ctx_vars.pm_ctxsw_image_size,
GMMU_PAGE_SIZE_KERNEL);

View File

@@ -42,7 +42,7 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
if (vm->syncpt_ro_map_gpu_va)
return 0;
vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm,
vm->syncpt_ro_map_gpu_va = nvgpu_vm_alloc_va(vm,
g->syncpt_unit_size,
GMMU_PAGE_SIZE_KERNEL);
if (!vm->syncpt_ro_map_gpu_va) {
@@ -63,8 +63,8 @@ static int set_syncpt_ro_map_gpu_va_locked(struct vm_gk20a *vm)
nvgpu_err(g,
"mapping read-only va space failed err %d",
err);
__nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va,
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(vm, vm->syncpt_ro_map_gpu_va,
GMMU_PAGE_SIZE_KERNEL);
vm->syncpt_ro_map_gpu_va = 0;
return err;
}
@@ -91,7 +91,7 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
if (err)
return err;
syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size,
syncpt_buf->gpu_va = nvgpu_vm_alloc_va(c->vm, g->syncpt_size,
GMMU_PAGE_SIZE_KERNEL);
if (!syncpt_buf->gpu_va) {
nvgpu_err(g, "allocating syncpt va space failed");
@@ -110,8 +110,8 @@ int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
err = err ? err : msg.ret;
if (err) {
nvgpu_err(g, "mapping syncpt va space failed err %d", err);
__nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va,
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va,
GMMU_PAGE_SIZE_KERNEL);
return err;
}
@@ -122,7 +122,7 @@ void vgpu_gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c,
struct nvgpu_mem *syncpt_buf)
{
nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va);
__nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, GMMU_PAGE_SIZE_KERNEL);
nvgpu_dma_free(c->g, syncpt_buf);
}

View File

@@ -39,7 +39,7 @@ int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
msg.cmd = TEGRA_VGPU_CMD_ALLOC_CTX_HEADER;
msg.handle = vgpu_get_handle(c->g);
p->ch_handle = c->virt_ctx;
p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm,
p->ctx_header_va = nvgpu_vm_alloc_va(c->vm,
c->g->ops.gr.ctxsw_prog.hw_get_fecs_header_size(),
GMMU_PAGE_SIZE_KERNEL);
if (!p->ctx_header_va) {
@@ -50,7 +50,7 @@ int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
err = err ? err : msg.ret;
if (unlikely(err)) {
nvgpu_err(c->g, "alloc ctx_header failed err %d", err);
__nvgpu_vm_free_va(c->vm, p->ctx_header_va,
nvgpu_vm_free_va(c->vm, p->ctx_header_va,
GMMU_PAGE_SIZE_KERNEL);
return err;
}
@@ -75,8 +75,8 @@ void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c)
err = err ? err : msg.ret;
if (unlikely(err))
nvgpu_err(c->g, "free ctx_header failed err %d", err);
__nvgpu_vm_free_va(c->vm, ctxheader->gpu_va,
GMMU_PAGE_SIZE_KERNEL);
nvgpu_vm_free_va(c->vm, ctxheader->gpu_va,
GMMU_PAGE_SIZE_KERNEL);
ctxheader->gpu_va = 0;
}
}

View File

@@ -107,7 +107,7 @@ void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
nvgpu_err(g, "failed to update gmmu ptes on unmap");
if (va_allocated) {
__nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
nvgpu_vm_free_va(vm, vaddr, pgsz_idx);
}
/* TLB invalidate handled on server side */
}