gpu: nvgpu: Fix mutex MISRA 17.7 violations

MISRA Rule-17.7 requires the return value of all functions to be used.
Fix is either to use the return value or change the function to return
void. This patch contains fix for calls to nvgpu_mutex_init and
improves related error handling.

JIRA NVGPU-677

Change-Id: I609fa138520cc7ccfdd5aa0e7fd28c8ca0b3a21c
Signed-off-by: Nicolas Benech <nbenech@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1805598
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Nicolas Benech
2018-08-23 16:23:52 -04:00
committed by mobile promotions
parent b44c7fdb11
commit 2eface802a
18 changed files with 168 additions and 68 deletions

View File

@@ -397,10 +397,11 @@ int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn,
return status;
}
void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
int nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
{
struct nvgpu_falcon *flcn = NULL;
struct gpu_ops *gops = &g->ops;
int err = 0;
switch (flcn_id) {
case FALCON_ID_PMU:
@@ -431,12 +432,15 @@ void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
break;
default:
nvgpu_err(g, "Invalid/Unsupported falcon ID %x", flcn_id);
err = -ENODEV;
break;
};
/* call to HAL method to assign flcn base & ops to selected falcon */
if (flcn) {
flcn->g = g;
gops->falcon.falcon_hal_sw_init(flcn);
if (err != 0) {
return err;
}
/* call to HAL method to assign flcn base & ops to selected falcon */
flcn->g = g;
return gops->falcon.falcon_hal_sw_init(flcn);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -66,7 +66,12 @@ int gk20a_comptag_allocator_init(struct gk20a *g,
struct gk20a_comptag_allocator *allocator,
unsigned long size)
{
nvgpu_mutex_init(&allocator->lock);
int err = nvgpu_mutex_init(&allocator->lock);
if (err != 0) {
nvgpu_err(g, "Error in allocator.lock mutex initialization");
return err;
}
/*
* 0th comptag is special and is never used. The base for this bitmap

View File

@@ -1,4 +1,6 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -375,7 +377,7 @@ clean_up_vm:
static int nvgpu_init_mm_setup_sw(struct gk20a *g)
{
struct mm_gk20a *mm = &g->mm;
int err;
int err = 0;
if (mm->sw_ready) {
nvgpu_log_info(g, "skip init");
@@ -383,7 +385,11 @@ static int nvgpu_init_mm_setup_sw(struct gk20a *g)
}
mm->g = g;
nvgpu_mutex_init(&mm->l2_op_lock);
err = nvgpu_mutex_init(&mm->l2_op_lock);
if (err != 0) {
nvgpu_err(g, "Error in l2_op_lock mutex initialization");
return err;
}
/*TBD: make channel vm size configurable */
mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE -

View File

@@ -90,6 +90,8 @@ int nvgpu_pd_cache_init(struct gk20a *g)
{
struct nvgpu_pd_cache *cache;
u32 i;
int err = 0;
/*
* This gets called from finalize_poweron() so we need to make sure we
@@ -111,9 +113,15 @@ int nvgpu_pd_cache_init(struct gk20a *g)
}
cache->mem_tree = NULL;
g->mm.pd_cache = cache;
nvgpu_mutex_init(&cache->lock);
err = nvgpu_mutex_init(&cache->lock);
if (err != 0) {
nvgpu_err(g, "Error in cache.lock initialization");
nvgpu_kfree(g, cache);
return err;
}
g->mm.pd_cache = cache;
pd_dbg(g, "PD cache initialized!");
return 0;

View File

@@ -284,7 +284,7 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
bool userspace_managed,
char *name)
{
int err;
int err = 0;
char alloc_name[32];
u64 kernel_vma_flags;
u64 user_vma_start, user_vma_limit;
@@ -476,8 +476,19 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
vm->mapped_buffers = NULL;
nvgpu_mutex_init(&vm->syncpt_ro_map_lock);
nvgpu_mutex_init(&vm->update_gmmu_lock);
err = nvgpu_mutex_init(&vm->syncpt_ro_map_lock);
if (err != 0) {
nvgpu_err(g,
"Error in syncpt_ro_map_lock mutex initialization");
goto clean_up_allocators;
}
err = nvgpu_mutex_init(&vm->update_gmmu_lock);
if (err != 0) {
nvgpu_err(g, "Error in update_gmmu_lock mutex initialization");
goto clean_up_ro_map_lock;
}
nvgpu_ref_init(&vm->ref);
nvgpu_init_list_node(&vm->vm_area_list);
@@ -489,12 +500,16 @@ int __nvgpu_vm_init(struct mm_gk20a *mm,
if (vm->va_limit > 4ULL * SZ_1G) {
err = nvgpu_init_sema_pool(vm);
if (err) {
goto clean_up_allocators;
goto clean_up_gmmu_lock;
}
}
return 0;
clean_up_gmmu_lock:
nvgpu_mutex_destroy(&vm->update_gmmu_lock);
clean_up_ro_map_lock:
nvgpu_mutex_destroy(&vm->syncpt_ro_map_lock);
clean_up_allocators:
if (nvgpu_alloc_initialized(&vm->kernel)) {
nvgpu_alloc_destroy(&vm->kernel);

View File

@@ -696,6 +696,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
u32 active_engine_id, pbdma_id, engine_id;
int flags = nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ?
NVGPU_DMA_FORCE_CONTIGUOUS : 0;
int err = 0;
nvgpu_log_fn(g, " ");
@@ -733,7 +734,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
f->num_runlist_entries, runlist_size);
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
int err = nvgpu_dma_alloc_flags_sys(g, flags,
err = nvgpu_dma_alloc_flags_sys(g, flags,
runlist_size,
&runlist->mem[i]);
if (err) {
@@ -741,7 +742,13 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
goto clean_up_runlist;
}
}
nvgpu_mutex_init(&runlist->runlist_lock);
err = nvgpu_mutex_init(&runlist->runlist_lock);
if (err != 0) {
nvgpu_err(g,
"Error in runlist_lock mutex initialization");
goto clean_up_runlist;
}
/* None of buffers is pinned if this value doesn't change.
Otherwise, one of them (cur_buffer) must have been pinned. */
@@ -773,7 +780,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
clean_up_runlist:
gk20a_fifo_delete_runlist(f);
nvgpu_log_fn(g, "fail");
return -ENOMEM;
return err;
}
u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g)

View File

@@ -707,9 +707,10 @@ void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
gk20a_falcon_engine_dependency_ops(flcn);
}
void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
{
struct gk20a *g = flcn->g;
int err = 0;
switch (flcn->flcn_id) {
case FALCON_ID_PMU:
@@ -726,28 +727,35 @@ void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
flcn->flcn_base = FALCON_FECS_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
break;
case FALCON_ID_GPCCS:
flcn->flcn_base = FALCON_GPCCS_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
break;
case FALCON_ID_NVDEC:
flcn->flcn_base = FALCON_NVDEC_BASE;
flcn->is_falcon_supported = false;
flcn->is_interrupt_enabled = false;
break;
break;
default:
flcn->is_falcon_supported = false;
nvgpu_err(g, "Invalid flcn request");
err = -ENODEV;
break;
}
if (flcn->is_falcon_supported) {
nvgpu_mutex_init(&flcn->copy_lock);
gk20a_falcon_ops(flcn);
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
} else {
gk20a_falcon_ops(flcn);
}
} else {
nvgpu_log_info(g, "falcon 0x%x not supported on %s",
flcn->flcn_id, g->name);
}
return err;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,7 +23,7 @@
#define __FLCN_GK20A_H__
void gk20a_falcon_ops(struct nvgpu_falcon *flcn);
void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn);
#endif /* __FLCN_GK20A_H__ */

View File

@@ -150,7 +150,7 @@ int gk20a_prepare_poweroff(struct gk20a *g)
int gk20a_finalize_poweron(struct gk20a *g)
{
int err;
int err = 0;
#if defined(CONFIG_TEGRA_GK20A_NVHOST)
u32 nr_pages;
#endif
@@ -182,9 +182,21 @@ int gk20a_finalize_poweron(struct gk20a *g)
}
/* init interface layer support for PMU falcon */
nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
nvgpu_flcn_sw_init(g, FALCON_ID_SEC2);
nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC);
err = nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
if (err != 0) {
nvgpu_err(g, "failed to sw init FALCON_ID_PMU");
goto done;
}
err = nvgpu_flcn_sw_init(g, FALCON_ID_SEC2);
if (err != 0) {
nvgpu_err(g, "failed to sw init FALCON_ID_SEC2");
goto done;
}
err = nvgpu_flcn_sw_init(g, FALCON_ID_NVDEC);
if (err != 0) {
nvgpu_err(g, "failed to sw init FALCON_ID_NVDEC");
goto done;
}
if (g->ops.bios.init) {
err = g->ops.bios.init(g);

View File

@@ -1263,7 +1263,7 @@ struct gpu_ops {
u32 (*get_link_control_status)(struct gk20a *g);
} xve;
struct {
void (*falcon_hal_sw_init)(struct nvgpu_falcon *flcn);
int (*falcon_hal_sw_init)(struct nvgpu_falcon *flcn);
} falcon;
struct {
void (*enable_priv_ring)(struct gk20a *g);

View File

@@ -3983,10 +3983,14 @@ static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
{
struct zbc_entry zbc_val;
u32 i;
int err;
u32 i = 0;
int err = 0;
nvgpu_mutex_init(&gr->zbc_lock);
err = nvgpu_mutex_init(&gr->zbc_lock);
if (err != 0) {
nvgpu_err(g, "Error in zbc_lock mutex initialization");
return err;
}
/* load default color table */
zbc_val.type = GK20A_ZBC_TYPE_COLOR;
@@ -4749,7 +4753,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
static int gk20a_init_gr_setup_sw(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
int err;
int err = 0;
nvgpu_log_fn(g, " ");
@@ -4761,7 +4765,11 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
gr->g = g;
#if defined(CONFIG_GK20A_CYCLE_STATS)
nvgpu_mutex_init(&g->gr.cs_lock);
err = nvgpu_mutex_init(&g->gr.cs_lock);
if (err != 0) {
nvgpu_err(g, "Error in gr.cs_lock mutex initialization");
return err;
}
#endif
err = gr_gk20a_init_gr_config(g, gr);
@@ -4802,7 +4810,12 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
if (g->ops.gr.init_gfxp_wfi_timeout_count)
g->ops.gr.init_gfxp_wfi_timeout_count(g);
nvgpu_mutex_init(&gr->ctx_mutex);
err = nvgpu_mutex_init(&gr->ctx_mutex);
if (err != 0) {
nvgpu_err(g, "Error in gr.ctx_mutex initialization");
goto clean_up;
}
nvgpu_spinlock_init(&gr->ch_tlb_lock);
gr->remove_support = gk20a_remove_gr_support;
@@ -4869,12 +4882,16 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
int gk20a_init_gr_support(struct gk20a *g)
{
u32 err;
int err = 0;
nvgpu_log_fn(g, " ");
/* this is required before gr_gk20a_init_ctx_state */
nvgpu_mutex_init(&g->gr.fecs_mutex);
err = nvgpu_mutex_init(&g->gr.fecs_mutex);
if (err != 0) {
nvgpu_err(g, "Error in gr.fecs_mutex initialization");
return err;
}
err = gr_gk20a_init_ctxsw(g);
if (err)

View File

@@ -53,9 +53,10 @@ static void gp106_falcon_ops(struct nvgpu_falcon *flcn)
gp106_falcon_engine_dependency_ops(flcn);
}
void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
int gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
{
struct gk20a *g = flcn->g;
int err = 0;
switch (flcn->flcn_id) {
case FALCON_ID_PMU:
@@ -72,28 +73,35 @@ void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
flcn->flcn_base = FALCON_FECS_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
break;
case FALCON_ID_GPCCS:
flcn->flcn_base = FALCON_GPCCS_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = false;
break;
break;
case FALCON_ID_NVDEC:
flcn->flcn_base = FALCON_NVDEC_BASE;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = true;
break;
break;
default:
flcn->is_falcon_supported = false;
nvgpu_err(g, "Invalid flcn request");
err = -ENODEV;
break;
}
if (flcn->is_falcon_supported) {
nvgpu_mutex_init(&flcn->copy_lock);
gp106_falcon_ops(flcn);
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in copy_lock mutex initialization");
} else {
gp106_falcon_ops(flcn);
}
} else {
nvgpu_info(g, "falcon 0x%x not supported on %s",
flcn->flcn_id, g->name);
}
return err;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,6 +22,6 @@
#ifndef __FLCN_GP106_H__
#define __FLCN_GP106_H__
void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
int gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
#endif /* __FLCN_GP106_H__ */

View File

@@ -29,27 +29,29 @@
#include <nvgpu/hw/gv100/hw_falcon_gv100.h>
void gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
int gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
{
struct gk20a *g = flcn->g;
int err = 0;
switch (flcn->flcn_id) {
case FALCON_ID_MINION:
if (flcn->flcn_id == FALCON_ID_MINION) {
flcn->flcn_base = g->nvlink.minion_base;
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = true;
break;
default:
break;
}
if (flcn->is_falcon_supported) {
nvgpu_mutex_init(&flcn->copy_lock);
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
return err;
}
gk20a_falcon_ops(flcn);
} else {
/*
* Fall back
*/
gp106_falcon_hal_sw_init(flcn);
err = gp106_falcon_hal_sw_init(flcn);
}
return err;
}

View File

@@ -22,6 +22,6 @@
#ifndef __FLCN_GV100_H__
#define __FLCN_GV100_H__
void gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
int gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn);
#endif /* __FLCN_GV100_H__ */

View File

@@ -2715,7 +2715,11 @@ int gv100_nvlink_early_init(struct gk20a *g)
goto nvlink_init_exit;
}
nvgpu_flcn_sw_init(g, FALCON_ID_MINION);
err = nvgpu_flcn_sw_init(g, FALCON_ID_MINION);
if (err != 0) {
nvgpu_err(g, "failed to sw init FALCON_ID_MINION");
goto nvlink_init_exit;
}
g->nvlink.discovered_links &= ~g->nvlink.link_disable_mask;
nvgpu_log(g, gpu_dbg_nvlink, "link_disable_mask = 0x%08x (from VBIOS)",

View File

@@ -167,11 +167,15 @@ static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
{
int err;
int err = 0;
nvgpu_log_fn(g, " ");
nvgpu_mutex_init(&g->mm.hub_isr_mutex);
err = nvgpu_mutex_init(&g->mm.hub_isr_mutex);
if (err != 0) {
nvgpu_err(g, "Error in hub_isr_mutex initialization");
return err;
}
err = gv11b_mm_mmu_fault_info_buf_init(g);

View File

@@ -29,12 +29,12 @@
/*
* Falcon Id Defines
*/
#define FALCON_ID_PMU (0)
#define FALCON_ID_FECS (2)
#define FALCON_ID_GPCCS (3)
#define FALCON_ID_NVDEC (4)
#define FALCON_ID_SEC2 (7)
#define FALCON_ID_MINION (10)
#define FALCON_ID_PMU (0U)
#define FALCON_ID_FECS (2U)
#define FALCON_ID_GPCCS (3U)
#define FALCON_ID_NVDEC (4U)
#define FALCON_ID_SEC2 (7U)
#define FALCON_ID_MINION (10U)
/*
* Falcon Base address Defines
@@ -317,6 +317,6 @@ int nvgpu_flcn_queue_push(struct nvgpu_falcon *flcn,
void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue);
void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id);
int nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id);
#endif /* __FALCON_H__ */