gpu: nvgpu: fix MISRA 17.7 violations in mm

MISRA Rule-17.7 requires the return value of all functions to be
used. Fix is either to use the return value or change the function
to return void. This patch contains fixes for all 17.7 violations
in common/mm code.

JIRA NVGPU-3034

Change-Id: Ica4a0b00e08aea3af3774b9068c72bc59b9fe4b2
Signed-off-by: Nicolas Benech <nbenech@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2084068
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Nicolas Benech
2019-03-28 13:31:20 -04:00
committed by mobile promotions
parent 63b17cb482
commit bd1ae5c9e1
4 changed files with 69 additions and 14 deletions

View File

@@ -827,7 +827,11 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
}
if (batch == NULL) {
g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
if (err != 0) {
nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err);
goto fail_validate;
}
} else {
batch->need_tlb_invalidate = true;
}
@@ -881,7 +885,10 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
if (gk20a_mm_l2_flush(g, true) != 0) {
nvgpu_err(g, "gk20a_mm_l2_flush[1] failed");
}
g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
if (err != 0) {
nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err);
}
} else {
if (!batch->gpu_l2_flushed) {
if (gk20a_mm_l2_flush(g, true) != 0) {

View File

@@ -42,12 +42,16 @@
void nvgpu_vidmem_destroy(struct gk20a *g)
{
struct nvgpu_timeout timeout;
int err;
if (!g->ops.fb.get_vidmem_size) {
return;
}
nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
err = nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
}
/*
* Ensure that the thread runs one last time to flush anything in the
@@ -119,9 +123,13 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
if (gk20a_fence_out) {
struct nvgpu_timeout timeout;
nvgpu_timeout_init(g, &timeout,
err = nvgpu_timeout_init(g, &timeout,
nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
return err;
}
do {
err = gk20a_fence_wait(g, gk20a_fence_out,
@@ -228,11 +236,15 @@ static void nvgpu_vidmem_clear_pending_allocs(struct mm_gk20a *mm)
{
struct gk20a *g = mm->g;
struct nvgpu_mem *mem;
int err;
vidmem_dbg(g, "Running VIDMEM clearing thread:");
while ((mem = nvgpu_vidmem_clear_list_dequeue(mm)) != NULL) {
nvgpu_vidmem_clear(g, mem);
err = nvgpu_vidmem_clear(g, mem);
if (err != 0) {
nvgpu_err(g, "nvgpu_vidmem_clear() failed err=%d", err);
}
WARN_ON(nvgpu_atomic64_sub_return((long)mem->aligned_size,
&g->mm.vidmem.bytes_pending) < 0);
@@ -344,7 +356,13 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
}
/* Reserve bootstrap region in vidmem allocator */
nvgpu_alloc_reserve_carveout(&g->mm.vidmem.allocator, &bootstrap_co);
err = nvgpu_alloc_reserve_carveout(&g->mm.vidmem.allocator,
&bootstrap_co);
if (err != 0) {
nvgpu_err(g, "nvgpu_alloc_reserve_carveout() failed err=%d",
err);
goto fail;
}
mm->vidmem.base = base;
mm->vidmem.size = size - base;
@@ -358,9 +376,28 @@ int nvgpu_vidmem_init(struct mm_gk20a *mm)
nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0);
nvgpu_init_list_node(&mm->vidmem.clear_list_head);
nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
nvgpu_mutex_init(&mm->vidmem.clearing_thread_lock);
nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
err = nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
if (err != 0) {
nvgpu_err(g, "nvgpu_mutex_init(list_mutex) failed err=%d",
err);
goto fail;
}
err = nvgpu_mutex_init(&mm->vidmem.clearing_thread_lock);
if (err != 0) {
nvgpu_err(g, "nvgpu_mutex_init(thread_lock) failed err=%d",
err);
goto fail;
}
err = nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
if (err != 0) {
nvgpu_err(g, "nvgpu_mutex_init(first_clear) failed err=%d",
err);
goto fail;
}
nvgpu_atomic_set(&mm->vidmem.pause_count, 0);
/*
@@ -462,9 +499,13 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
if (gk20a_last_fence) {
struct nvgpu_timeout timeout;
nvgpu_timeout_init(g, &timeout,
err = nvgpu_timeout_init(g, &timeout,
nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
return err;
}
do {
err = gk20a_fence_wait(g, gk20a_last_fence,

View File

@@ -178,12 +178,17 @@ void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch)
void nvgpu_vm_mapping_batch_finish_locked(
struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *mapping_batch)
{
int err;
/* hanging kref_put batch pointer? */
WARN_ON(vm->kref_put_batch == mapping_batch);
if (mapping_batch->need_tlb_invalidate) {
struct gk20a *g = gk20a_from_vm(vm);
g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
err = g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
if (err != 0) {
nvgpu_err(g, "fb.tlb_invalidate() failed err=%d", err);
}
}
}

View File

@@ -256,14 +256,16 @@ static inline bool __nvgpu_atomic64_sub_and_test(long x, nvgpu_atomic64_t *v)
}
/*
* The following are defined for the lockless allocator in the driver that
* uses the cmpxchg() operation directly instead of nvgpu_atomic_cmpxchg().
* The following is only used by the lockless allocator and makes direct use
* of the cmpxchg function. For POSIX, this is translated to a call to
* nvgpu_atomic_cmpxchg.
*/
#define cmpxchg(p, old, new) \
({ \
typeof(*(p)) tmp = old; \
\
atomic_compare_exchange_strong(p, &tmp, new); \
(void) nvgpu_atomic_cmpxchg((nvgpu_atomic_t *) p, tmp, \
new); \
tmp; \
})