gpu: nvgpu: fix coverity defects

Fix following coverity defects:
  ioctl_prof.c resource leak
  ioctl_dbg.c logically dead code
  global_ctx.c identical code for branches
  therm_dev.c resource leak
  pmu_pstate.c unused value
  nvgpu_mem.c dead default in switch
  tsg.c Dereference before null check
  nvlink_gv100.c logically dead code
  nvlink.c Out-of-bounds write
  fifo_vgpu.c Dereference null return value
  pmu_pg.c Dereference before null check
  fw_ver_ops.c Identical code for different branches
  boardobjgrp.c Dereference after null check
  boardobjgrp.c Dereference before null check
  boardobjgrp.c Dereference after null check
  engines.c Dereference before null check
  nvgpu_init.c Unused value

CID 10127875
CID 10127820
CID 10063535
CID 10059311
CID 10127863
CID 9875900
CID 9865875
CID 9858045
CID 9852644
CID 9852635
CID 9852232
CID 9847593
CID 9847051
CID 9846056
CID 9846055
CID 9846054
CID 9842821

Bug 3460991

Change-Id: I91c215a545d07eb0e5b236849d5a8440ed6fe18d
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2657444
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Sagar Kamble
2022-01-20 21:04:56 +05:30
committed by mobile promotions
parent a3f3249c76
commit 29a0a146ac
15 changed files with 61 additions and 63 deletions

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -509,10 +509,6 @@ void nvgpu_engine_reset(struct gk20a *g, u32 engine_id)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (g == NULL) {
return;
}
nvgpu_swprofile_begin_sample(prof); nvgpu_swprofile_begin_sample(prof);
dev = nvgpu_engine_get_active_eng_info(g, engine_id); dev = nvgpu_engine_get_active_eng_info(g, engine_id);

View File

@@ -1180,7 +1180,7 @@ int nvgpu_tsg_set_mmu_debug_mode(struct nvgpu_channel *ch, bool enable)
u32 fb_refcnt; u32 fb_refcnt;
struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch); struct nvgpu_tsg *tsg = nvgpu_tsg_from_ch(ch);
if ((ch == NULL) || (tsg == NULL)) { if (tsg == NULL) {
return -EINVAL; return -EINVAL;
} }
g = ch->g; g = ch->g;

View File

@@ -260,9 +260,7 @@ static int nvgpu_gr_global_ctx_buffer_sys_alloc(struct gk20a *g,
err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc, err = nvgpu_gr_global_ctx_buffer_alloc_sys(g, desc,
NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP); NVGPU_GR_GLOBAL_CTX_PRIV_ACCESS_MAP);
if (err != 0) {
goto fail;
}
fail: fail:
return err; return err;
} }

View File

@@ -347,8 +347,9 @@ int nvgpu_prepare_poweroff(struct gk20a *g)
} }
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST #ifdef CONFIG_NVGPU_GSP_STRESS_TEST
ret = nvgpu_gsp_stress_test_halt(g, true); tmp_ret = nvgpu_gsp_stress_test_halt(g, true);
if (ret != 0) { if (tmp_ret != 0) {
ret = tmp_ret;
nvgpu_err(g, "Failed to halt GSP stress test"); nvgpu_err(g, "Failed to halt GSP stress test");
} }
#endif #endif

View File

@@ -41,19 +41,6 @@ u32 nvgpu_aperture_mask_raw(struct gk20a *g, enum nvgpu_aperture aperture,
{ {
u32 ret_mask = 0; u32 ret_mask = 0;
if ((aperture == APERTURE_INVALID) || (aperture >= APERTURE_MAX_ENUM)) {
nvgpu_do_assert_print(g, "Bad aperture");
return 0;
}
/*
* Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the
* "sysmem" aperture should really be translated to VIDMEM.
*/
if (!nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE)) {
aperture = APERTURE_VIDMEM;
}
switch (aperture) { switch (aperture) {
case APERTURE_SYSMEM_COH: case APERTURE_SYSMEM_COH:
ret_mask = sysmem_coh_mask; ret_mask = sysmem_coh_mask;
@@ -69,6 +56,15 @@ u32 nvgpu_aperture_mask_raw(struct gk20a *g, enum nvgpu_aperture aperture,
ret_mask = 0; ret_mask = 0;
break; break;
} }
/*
* Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the
* "sysmem" aperture should really be translated to VIDMEM.
*/
if (!nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE) && ret_mask != 0) {
ret_mask = vidmem_mask;
}
return ret_mask; return ret_mask;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -310,13 +310,20 @@ exit:
int nvgpu_nvlink_link_early_init(struct gk20a *g) int nvgpu_nvlink_link_early_init(struct gk20a *g)
{ {
u32 discovered_links;
u32 link_id; u32 link_id;
int ret = 0; int ret = 0;
/* /*
* First check the topology and setup connectivity * First check the topology and setup connectivity
* HACK: we are only enabling one link for now!!! * HACK: we are only enabling one link for now!!!
*/ */
link_id = (u32)(nvgpu_ffs(g->nvlink.discovered_links) - 1UL); discovered_links = nvgpu_ffs(g->nvlink.discovered_links);
if (discovered_links == 0) {
nvgpu_err(g, "discovered links is 0");
return -EINVAL;
}
link_id = (u32)(discovered_links - 1UL);
g->nvlink.links[link_id].remote_info.is_connected = true; g->nvlink.links[link_id].remote_info.is_connected = true;
g->nvlink.links[link_id].remote_info.device_type = g->nvlink.links[link_id].remote_info.device_type =
nvgpu_nvlink_endp_tegra; nvgpu_nvlink_endp_tegra;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -66,14 +66,16 @@ static int
obj_insert_final(struct boardobjgrp *pboardobjgrp, obj_insert_final(struct boardobjgrp *pboardobjgrp,
struct pmu_board_obj *obj, u8 index) struct pmu_board_obj *obj, u8 index)
{ {
struct gk20a *g = pboardobjgrp->g; struct gk20a *g;
nvgpu_log_info(g, " ");
if (pboardobjgrp == NULL) { if (pboardobjgrp == NULL) {
return -EINVAL; return -EINVAL;
} }
g = pboardobjgrp->g;
nvgpu_log_info(g, " ");
if (obj == NULL) { if (obj == NULL) {
return -EINVAL; return -EINVAL;
} }
@@ -448,7 +450,7 @@ static int pmu_set_impl(struct gk20a *g,
return -EINVAL; return -EINVAL;
} }
if ((pcmd->buf == NULL) && if ((pcmd->buf == NULL) ||
(pboardobjgrp->pmu.rpc_func_id == (pboardobjgrp->pmu.rpc_func_id ==
BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID)) { BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID)) {
return -EINVAL; return -EINVAL;
@@ -511,7 +513,7 @@ pmu_get_status_impl(struct gk20a *g, struct boardobjgrp *pboardobjgrp,
return -EINVAL; return -EINVAL;
} }
if ((pcmd->buf == NULL) && if ((pcmd->buf == NULL) ||
(pboardobjgrp->pmu.rpc_func_id == (pboardobjgrp->pmu.rpc_func_id ==
BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID)) { BOARDOBJGRP_GRP_RPC_FUNC_ID_INVALID)) {
return -EINVAL; return -EINVAL;

View File

@@ -1423,11 +1423,7 @@ int nvgpu_pmu_init_fw_ver_ops(struct gk20a *g,
pmu_get_init_msg_sw_mngd_area_off_v5; pmu_get_init_msg_sw_mngd_area_off_v5;
fw_ops->get_init_msg_sw_mngd_area_size = fw_ops->get_init_msg_sw_mngd_area_size =
pmu_get_init_msg_sw_mngd_area_size_v5; pmu_get_init_msg_sw_mngd_area_size_v5;
if (app_version == APP_VERSION_GV10X) {
fw_ops->clk.clk_set_boot_clk = NULL; fw_ops->clk.clk_set_boot_clk = NULL;
} else {
fw_ops->clk.clk_set_boot_clk = NULL;
}
} else { } else {
fw_ops->get_init_msg_queue_params = fw_ops->get_init_msg_queue_params =
pmu_get_init_msg_queue_params_v4; pmu_get_init_msg_queue_params_v4;

View File

@@ -694,8 +694,7 @@ static int pmu_pg_init_powergating(struct gk20a *g, struct nvgpu_pmu *pmu,
pg_engine_id++) { pg_engine_id++) {
if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) { if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) {
if (pmu != NULL && if (nvgpu_pmu_get_fw_state(g, pmu) ==
nvgpu_pmu_get_fw_state(g, pmu) ==
PMU_FW_STATE_INIT_RECEIVED) { PMU_FW_STATE_INIT_RECEIVED) {
nvgpu_pmu_fw_state_change(g, pmu, nvgpu_pmu_fw_state_change(g, pmu,
PMU_FW_STATE_ELPG_BOOTING, false); PMU_FW_STATE_ELPG_BOOTING, false);

View File

@@ -1,7 +1,7 @@
/* /*
* general p state infrastructure * general p state infrastructure
* *
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -192,6 +192,9 @@ int nvgpu_pmu_pstate_pmu_setup(struct gk20a *g)
if (g->ops.clk.support_pmgr_domain) { if (g->ops.clk.support_pmgr_domain) {
err = pmgr_domain_pmu_setup(g); err = pmgr_domain_pmu_setup(g);
if (err != 0) {
return err;
}
} }
err = g->ops.clk.perf_pmu_vfe_load(g); err = g->ops.clk.perf_pmu_vfe_load(g);

View File

@@ -82,30 +82,27 @@ static struct pmu_board_obj *therm_device_construct(struct gk20a *g,
struct therm_device *ptherm_device = NULL; struct therm_device *ptherm_device = NULL;
int status = 0; int status = 0;
if (pmu_board_obj_get_type(pargs) !=
NV_VBIOS_THERM_DEVICE_1X_ENTRY_CLASS_GPU) {
nvgpu_err(g, "unsupported therm_device class - 0x%x",
pmu_board_obj_get_type(pargs));
return NULL;
}
ptherm_device = nvgpu_kzalloc(g, sizeof(struct therm_device)); ptherm_device = nvgpu_kzalloc(g, sizeof(struct therm_device));
if (ptherm_device == NULL) { if (ptherm_device == NULL) {
return NULL; return NULL;
} }
obj = (struct pmu_board_obj *)(void *)ptherm_device; obj = (struct pmu_board_obj *)(void *)ptherm_device;
if (pmu_board_obj_get_type(pargs) ==
NV_VBIOS_THERM_DEVICE_1X_ENTRY_CLASS_GPU) {
status = construct_therm_device_gpu(g, obj, pargs); status = construct_therm_device_gpu(g, obj, pargs);
} else {
nvgpu_err(g, "unsupported therm_device class - 0x%x",
pmu_board_obj_get_type(pargs));
return NULL;
}
if(status != 0) { if (status != 0) {
obj = NULL;
nvgpu_err(g, nvgpu_err(g,
"could not allocate memory for therm_device"); "could not allocate memory for therm_device");
if (obj != NULL) {
nvgpu_kfree(g, obj); nvgpu_kfree(g, obj);
obj = NULL;
} }
}
return obj; return obj;
} }

View File

@@ -1,7 +1,7 @@
/* /*
* Virtualized GPU Fifo * Virtualized GPU Fifo
* *
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -113,6 +113,11 @@ int vgpu_fifo_isr(struct gk20a *g, struct tegra_vgpu_fifo_intr_info *info)
nvgpu_err(g, "fifo intr (%d) on ch %u", nvgpu_err(g, "fifo intr (%d) on ch %u",
info->type, info->chid); info->type, info->chid);
if (ch == NULL) {
nvgpu_err(g, "Invalid channel");
return -EINVAL;
}
switch (info->type) { switch (info->type) {
case TEGRA_VGPU_FIFO_INTR_PBDMA: case TEGRA_VGPU_FIFO_INTR_PBDMA:
g->ops.channel.set_error_notifier(ch, g->ops.channel.set_error_notifier(ch,

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -76,9 +76,6 @@ static const char *gv100_device_type_to_str(u32 type)
if (type == NVL_DEVICE(ioctrlmif_multicast)) { if (type == NVL_DEVICE(ioctrlmif_multicast)) {
return "IOCTRLMIF MULTICAST"; return "IOCTRLMIF MULTICAST";
} }
if (type == NVL_DEVICE(nvltlc_multicast)) {
return "NVLTLC MULTICAST";
}
return "UNKNOWN"; return "UNKNOWN";
} }

View File

@@ -1,7 +1,7 @@
/* /*
* Tegra GK20A GPU Debugger/Profiler Driver * Tegra GK20A GPU Debugger/Profiler Driver
* *
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -2446,13 +2446,13 @@ static int nvgpu_gpu_access_sysmem_gpu_va(struct gk20a *g, u8 cmd, u32 size,
ret = dma_buf_vmap(dmabuf, &map); ret = dma_buf_vmap(dmabuf, &map);
cpu_va = ret ? NULL : map.vaddr; cpu_va = ret ? NULL : map.vaddr;
#else
cpu_va = (u8 *)dma_buf_vmap(dmabuf) + offset;
#endif
if (!cpu_va) { if (!cpu_va) {
return -ENOMEM; return -ENOMEM;
} }
#else
cpu_va = (u8 *)dma_buf_vmap(dmabuf) + offset;
#endif
switch (cmd) { switch (cmd) {
case NVGPU_DBG_GPU_IOCTL_ACCESS_GPUVA_CMD_READ: case NVGPU_DBG_GPU_IOCTL_ACCESS_GPUVA_CMD_READ:

View File

@@ -810,6 +810,7 @@ static int nvgpu_prof_ioctl_vab_reserve(struct nvgpu_profiler_object *prof,
sizeof(struct nvgpu_vab_range_checker) * sizeof(struct nvgpu_vab_range_checker) *
arg->num_range_checkers)) { arg->num_range_checkers)) {
gk20a_idle(g); gk20a_idle(g);
nvgpu_kfree(g, ckr);
return -EFAULT; return -EFAULT;
} }