gpu: nvgpu: mm: fix MISRA 4.7 violations in vm.c

This fixes MISRA rule 4.7 violations in the function nvgpu_vm_map(). The
violations were caused by trying to use ERR_PTR() to return error
information. Rather than try to return errors in a pointer, just change
the API to return an int and pass the pointer the arguments.

JIRA NVGPU-3332

Change-Id: I2852a6de808d9203b8c7826e2b8211bab97ccd16
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2114027
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vinod Gopalakrishnakurup <vinodg@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-05-07 11:43:31 -04:00
committed by mobile promotions
parent 43c6e208fd
commit bceac52f0b
4 changed files with 63 additions and 56 deletions

View File

@@ -866,18 +866,19 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
nvgpu_big_free(vm->mm->g, mapped_buffers);
}
struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
struct nvgpu_os_buffer *os_buf,
struct nvgpu_sgt *sgt,
u64 map_addr,
u64 map_size,
u64 phys_offset,
enum gk20a_mem_rw_flag rw,
u32 flags,
s16 compr_kind,
s16 incompr_kind,
struct vm_gk20a_mapping_batch *batch,
enum nvgpu_aperture aperture)
int nvgpu_vm_map(struct vm_gk20a *vm,
struct nvgpu_os_buffer *os_buf,
struct nvgpu_sgt *sgt,
u64 map_addr,
u64 map_size,
u64 phys_offset,
enum gk20a_mem_rw_flag rw,
u32 flags,
s16 compr_kind,
s16 incompr_kind,
struct vm_gk20a_mapping_batch *batch,
enum nvgpu_aperture aperture,
struct nvgpu_mapped_buf **mapped_buffer_arg)
{
struct gk20a *g = gk20a_from_vm(vm);
struct nvgpu_mapped_buf *mapped_buffer = NULL;
@@ -901,12 +902,14 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
*/
u8 pte_kind;
*mapped_buffer_arg = NULL;
if (vm->userspace_managed &&
(flags & NVGPU_VM_MAP_FIXED_OFFSET) == 0U) {
nvgpu_err(g,
"non-fixed-offset mapping not available on "
"userspace managed address spaces");
return ERR_PTR(-EINVAL);
return -EINVAL;
}
binfo.flags = flags;
@@ -936,7 +939,8 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
if (mapped_buffer != NULL) {
nvgpu_ref_get(&mapped_buffer->ref);
nvgpu_mutex_release(&vm->update_gmmu_lock);
return mapped_buffer;
*mapped_buffer_arg = mapped_buffer;
return 0;
}
nvgpu_mutex_release(&vm->update_gmmu_lock);
}
@@ -947,8 +951,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer));
if (mapped_buffer == NULL) {
nvgpu_warn(g, "oom allocating tracking buffer");
return ERR_PTR(-ENOMEM);
return -ENOMEM;
}
*mapped_buffer_arg = mapped_buffer;
align = nvgpu_sgt_alignment(g, sgt);
if (g->mm.disable_bigpage) {
@@ -1151,7 +1156,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
nvgpu_mutex_release(&vm->update_gmmu_lock);
return mapped_buffer;
return 0;
clean_up:
if (mapped_buffer->addr != 0ULL) {
@@ -1169,7 +1174,7 @@ clean_up:
clean_up_nolock:
nvgpu_kfree(g, mapped_buffer);
return ERR_PTR(err);
return err;
}
/*

View File

@@ -255,18 +255,19 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
u32 flags,
int kind);
struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
struct nvgpu_os_buffer *os_buf,
struct nvgpu_sgt *sgt,
u64 map_addr,
u64 map_size,
u64 phys_offset,
enum gk20a_mem_rw_flag rw,
u32 flags,
s16 compr_kind,
s16 incompr_kind,
struct vm_gk20a_mapping_batch *batch,
enum nvgpu_aperture aperture);
int nvgpu_vm_map(struct vm_gk20a *vm,
struct nvgpu_os_buffer *os_buf,
struct nvgpu_sgt *sgt,
u64 map_addr,
u64 map_size,
u64 phys_offset,
enum gk20a_mem_rw_flag rw,
u32 flags,
s16 compr_kind,
s16 incompr_kind,
struct vm_gk20a_mapping_batch *batch,
enum nvgpu_aperture aperture,
struct nvgpu_mapped_buf **mapped_buffer_arg);
void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset,
struct vm_gk20a_mapping_batch *batch);

View File

@@ -219,23 +219,23 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
goto clean_up;
}
mapped_buffer = nvgpu_vm_map(vm,
&os_buf,
nvgpu_sgt,
map_addr,
mapping_size,
buffer_offset,
rw_flag,
flags,
compr_kind,
incompr_kind,
batch,
gk20a_dmabuf_aperture(g, dmabuf));
err = nvgpu_vm_map(vm,
&os_buf,
nvgpu_sgt,
map_addr,
mapping_size,
buffer_offset,
rw_flag,
flags,
compr_kind,
incompr_kind,
batch,
gk20a_dmabuf_aperture(g, dmabuf),
&mapped_buffer);
nvgpu_sgt_free(g, nvgpu_sgt);
if (IS_ERR(mapped_buffer)) {
err = PTR_ERR(mapped_buffer);
if (err != 0) {
goto clean_up;
}

View File

@@ -227,19 +227,20 @@ static int map_buffer(struct unit_module *m,
}
}
mapped_buf = nvgpu_vm_map(vm,
&os_buf,
sgt,
gpu_va,
buf_size,
0,
gk20a_mem_flag_none,
NVGPU_VM_MAP_CACHEABLE,
0,
0,
NULL,
APERTURE_SYSMEM);
if (mapped_buf == NULL) {
ret = nvgpu_vm_map(vm,
&os_buf,
sgt,
gpu_va,
buf_size,
0,
gk20a_mem_flag_none,
NVGPU_VM_MAP_CACHEABLE,
0,
0,
NULL,
APERTURE_SYSMEM,
&mapped_buf);
if (ret != 0) {
unit_err(m, "Failed to map buffer into the GPU virtual address"
" space\n");
ret = UNIT_FAIL;