gpu: nvgpu: Fix some dma.[ch] MISRA violations

This doesn't correspond to a specific rule; it just cleans up
the violations introduced by http://git-master/r/1799807.

JIRA NVGPU-990

Change-Id: Ia20af754da9ad60f81d58ba00bf781a8c441827b
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1804887
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2018-08-22 15:06:23 -07:00
committed by mobile promotions
parent cdbe89a272
commit c1b66bc6a9
2 changed files with 19 additions and 10 deletions

View File

@@ -50,8 +50,10 @@ int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
NVGPU_DMA_NO_KERNEL_MAPPING,
size, mem);
if (!err)
if (!err) {
return 0;
}
/*
* Fall back to sysmem (which may then also fail) in case
* vidmem is exhausted.
@@ -105,8 +107,10 @@ int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
flags | NVGPU_DMA_NO_KERNEL_MAPPING,
size, mem);
if (!err)
if (!err) {
return 0;
}
/*
* Fall back to sysmem (which may then also fail) in case
* vidmem is exhausted.
@@ -127,8 +131,9 @@ int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
{
int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem);
if (err)
if (err) {
return err;
}
mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
gk20a_mem_flag_none, false,
@@ -157,8 +162,9 @@ int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
{
int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem);
if (err)
if (err) {
return err;
}
mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
gk20a_mem_flag_none, false,
@@ -179,9 +185,11 @@ void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
{
switch (mem->aperture) {
case APERTURE_SYSMEM:
return nvgpu_dma_free_sys(g, mem);
nvgpu_dma_free_sys(g, mem);
break;
case APERTURE_VIDMEM:
return nvgpu_dma_free_vid(g, mem);
nvgpu_dma_free_vid(g, mem);
break;
default:
break; /* like free() on "null" memory */
}
@@ -189,8 +197,9 @@ void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
{
if (mem->gpu_va)
if (mem->gpu_va) {
nvgpu_gmmu_unmap(vm, mem, mem->gpu_va);
}
mem->gpu_va = 0;
nvgpu_dma_free(vm->mm->g, mem);

View File

@@ -37,18 +37,18 @@ struct nvgpu_mem;
* Don't create a virtual kernel mapping for the buffer but only allocate it;
* this may save some resources. The buffer can be mapped later explicitly.
*/
#define NVGPU_DMA_NO_KERNEL_MAPPING (1 << 0)
#define NVGPU_DMA_NO_KERNEL_MAPPING BIT32(0)
/*
* Don't allow building the buffer from individual pages but require a
* physically contiguous block.
*/
#define NVGPU_DMA_FORCE_CONTIGUOUS (1 << 1)
#define NVGPU_DMA_FORCE_CONTIGUOUS BIT32(1)
/*
* Make the mapping read-only.
*/
#define NVGPU_DMA_READ_ONLY (1 << 2)
#define NVGPU_DMA_READ_ONLY BIT32(2)
/**
* nvgpu_iommuable - Check if GPU is behind IOMMU