gpu: nvgpu: MISRA rule 21.2 fixes in VM

Rename the __nvgpu_vm_find_mapped_buf*() functions to
nvgpu_vm_find_mapped_buf*(). This removes the '__' prefix.

JIRA NVGPU-1029

Change-Id: I7144e8705550c77d9169d5ac643b93507abbe56f
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1974839
Reviewed-by: Scott Long <scottl@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2018-12-17 15:24:29 -08:00
committed by mobile promotions
parent 78c513790a
commit 7b8f776822
5 changed files with 15 additions and 15 deletions

View File

@@ -701,7 +701,7 @@ static void nvgpu_remove_mapped_buf(struct vm_gk20a *vm,
vm->num_user_mapped_buffers--;
}
struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf(
struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf(
struct vm_gk20a *vm, u64 addr)
{
struct nvgpu_rbtree_node *node = NULL;
@@ -715,7 +715,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf(
return mapped_buffer_from_rbtree_node(node);
}
struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range(
struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf_range(
struct vm_gk20a *vm, u64 addr)
{
struct nvgpu_rbtree_node *node = NULL;
@@ -729,7 +729,7 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range(
return mapped_buffer_from_rbtree_node(node);
}
struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than(
struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf_less_than(
struct vm_gk20a *vm, u64 addr)
{
struct nvgpu_rbtree_node *node = NULL;
@@ -1210,7 +1210,7 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset,
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
mapped_buffer = nvgpu_vm_find_mapped_buf(vm, offset);
if (mapped_buffer == NULL) {
goto done;
}

View File

@@ -80,7 +80,7 @@ int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
/* check that this mapping does not collide with existing
* mappings by checking the buffer with the highest GPU VA
* that is less than our buffer end */
buffer = __nvgpu_vm_find_mapped_buf_less_than(
buffer = nvgpu_vm_find_mapped_buf_less_than(
vm, map_addr + map_size);
if (buffer != NULL && buffer->addr + buffer->size > map_addr) {
nvgpu_warn(g, "overlapping buffer map requested");

View File

@@ -282,11 +282,11 @@ u64 nvgpu_os_buf_get_size(struct nvgpu_os_buffer *os_buf);
/*
* These all require the VM update lock to be held.
*/
struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf(
struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf(
struct vm_gk20a *vm, u64 addr);
struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range(
struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf_range(
struct vm_gk20a *vm, u64 addr);
struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than(
struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf_less_than(
struct vm_gk20a *vm, u64 addr);
int nvgpu_insert_mapped_buf(struct vm_gk20a *vm,

View File

@@ -68,7 +68,7 @@ static dma_addr_t gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr)
struct gk20a *g = gk20a_from_vm(vm);
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
buffer = __nvgpu_vm_find_mapped_buf(vm, gpu_vaddr);
buffer = nvgpu_vm_find_mapped_buf(vm, gpu_vaddr);
if (buffer)
addr = nvgpu_mem_get_addr_sgl(g, buffer->os_priv.sgt->sgl);
nvgpu_mutex_release(&vm->update_gmmu_lock);

View File

@@ -62,7 +62,7 @@ static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
return core_flags;
}
static struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_reverse(
static struct nvgpu_mapped_buf *nvgpu_vm_find_mapped_buf_reverse(
struct vm_gk20a *vm, struct dma_buf *dmabuf, u32 kind)
{
struct nvgpu_rbtree_node *node = NULL;
@@ -95,7 +95,7 @@ int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
mapped_buffer = __nvgpu_vm_find_mapped_buf_range(vm, gpu_va);
mapped_buffer = nvgpu_vm_find_mapped_buf_range(vm, gpu_va);
if (!mapped_buffer) {
nvgpu_mutex_release(&vm->update_gmmu_lock);
return -EINVAL;
@@ -129,7 +129,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
struct nvgpu_mapped_buf *mapped_buffer = NULL;
if (flags & NVGPU_VM_MAP_FIXED_OFFSET) {
mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, map_addr);
mapped_buffer = nvgpu_vm_find_mapped_buf(vm, map_addr);
if (!mapped_buffer)
return NULL;
@@ -138,9 +138,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
return NULL;
} else {
mapped_buffer =
__nvgpu_vm_find_mapped_buf_reverse(vm,
os_buf->dmabuf,
kind);
nvgpu_vm_find_mapped_buf_reverse(vm,
os_buf->dmabuf,
kind);
if (!mapped_buffer)
return NULL;
}