gpu: nvgpu: fix cert-c issues in common.fifo unit

Fix cert-c issues that violate the following rule for common/fifo/*
INT30-C: Unsigned integer operation may wrap.

Jira NVGPU-3881

Change-Id: Ifd1994960774cc0e190610c67d0e3f4334b73cf0
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2166535
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-08-02 10:34:43 +05:30
committed by mobile promotions
parent b1175cba64
commit 5980d4c44f
4 changed files with 24 additions and 17 deletions

View File

@@ -54,6 +54,7 @@
#include <nvgpu/fifo/userd.h>
#include <nvgpu/fence.h>
#include <nvgpu/preempt.h>
#include <nvgpu/safe_ops.h>
#ifdef CONFIG_NVGPU_DEBUGGER
#include <nvgpu/gr/gr.h>
#endif
@@ -90,7 +91,7 @@ static struct nvgpu_channel *allocate_channel(struct nvgpu_fifo *f)
nvgpu_list_del(&ch->free_chs);
WARN_ON(nvgpu_atomic_read(&ch->ref_count) != 0);
WARN_ON(ch->referenceable);
f->used_channels++;
f->used_channels = nvgpu_safe_add_u32(f->used_channels, 1U);
}
nvgpu_mutex_release(&f->free_chs_mutex);
@@ -115,7 +116,7 @@ static void free_channel(struct nvgpu_fifo *f,
nvgpu_mutex_acquire(&f->free_chs_mutex);
/* add to head to increase visibility of timing-related bugs */
nvgpu_list_add(&ch->free_chs, &f->free_chs);
f->used_channels--;
f->used_channels = nvgpu_safe_sub_u32(f->used_channels, 1U);
nvgpu_mutex_release(&f->free_chs_mutex);
/*

View File

@@ -41,6 +41,7 @@
#include <nvgpu/gr/gr_falcon.h>
#include <nvgpu/gr/gr.h>
#include <nvgpu/fifo.h>
#include <nvgpu/safe_ops.h>
#define FECS_METHOD_WFI_RESTORE 0x80000U
@@ -477,14 +478,14 @@ int nvgpu_engine_setup_sw(struct gk20a *g)
size_t size;
f->max_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
size = f->max_engines * sizeof(*f->engine_info);
size = nvgpu_safe_mult_u64(f->max_engines, sizeof(*f->engine_info));
f->engine_info = nvgpu_kzalloc(g, size);
if (f->engine_info == NULL) {
nvgpu_err(g, "no mem for engine info");
return -ENOMEM;
}
size = f->max_engines * sizeof(u32);
size = nvgpu_safe_mult_u64(f->max_engines, sizeof(u32));
f->active_engines_list = nvgpu_kzalloc(g, size);
if (f->active_engines_list == NULL) {
nvgpu_err(g, "no mem for active engine list");
@@ -983,7 +984,8 @@ u32 nvgpu_engine_mmu_fault_id_to_veid(struct gk20a *g, u32 mmu_fault_id,
num_subctx = f->max_subctx_count;
if (mmu_fault_id >= gr_eng_fault_id &&
mmu_fault_id < (gr_eng_fault_id + num_subctx)) {
mmu_fault_id < nvgpu_safe_add_u32(gr_eng_fault_id,
num_subctx)) {
veid = mmu_fault_id - gr_eng_fault_id;
}

View File

@@ -29,6 +29,7 @@
#include <nvgpu/bug.h>
#include <nvgpu/dma.h>
#include <nvgpu/rc.h>
#include <nvgpu/safe_ops.h>
#ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/mutex.h>
#endif
@@ -116,7 +117,7 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
nvgpu_log_info(g, "rl entries left %d runlist [0] %x [1] %x",
*entries_left,
(*runlist_entry)[0], (*runlist_entry)[1]);
count++;
count = nvgpu_safe_add_u32(count, 1U);
*runlist_entry += runlist_entry_words;
(*entries_left)--;
}
@@ -344,7 +345,8 @@ static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
} else {
/* new, and belongs to a tsg */
nvgpu_set_bit(tsg->tsgid, runlist->active_tsgs);
tsg->num_active_channels++;
tsg->num_active_channels = nvgpu_safe_add_u32(
tsg->num_active_channels, 1U);
}
} else {
if (!nvgpu_test_and_clear_bit(ch->chid,
@@ -352,7 +354,9 @@ static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
/* wasn't there */
return false;
} else {
if (--tsg->num_active_channels == 0U) {
tsg->num_active_channels = nvgpu_safe_sub_u32(
tsg->num_active_channels, 1U);
if (tsg->num_active_channels == 0U) {
/* was the only member of this tsg */
nvgpu_clear_bit(tsg->tsgid,
runlist->active_tsgs);
@@ -724,21 +728,21 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
f->runlist_entry_size = g->ops.runlist.entry_size(g);
f->num_runlist_entries = g->ops.runlist.length_max(g);
f->max_runlists = g->ops.runlist.count_max();
f->runlist_info = nvgpu_kzalloc(g,
sizeof(*f->runlist_info) * f->max_runlists);
f->runlist_info = nvgpu_kzalloc(g, nvgpu_safe_mult_u64(
sizeof(*f->runlist_info), f->max_runlists));
if (f->runlist_info == NULL) {
goto clean_up_runlist;
}
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
if (nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
num_runlists++;
num_runlists = nvgpu_safe_add_u32(num_runlists, 1U);
}
}
f->num_runlists = num_runlists;
f->active_runlist_info = nvgpu_kzalloc(g,
sizeof(*f->active_runlist_info) * num_runlists);
f->active_runlist_info = nvgpu_kzalloc(g, nvgpu_safe_mult_u64(
sizeof(*f->active_runlist_info), num_runlists));
if (f->active_runlist_info == NULL) {
goto clean_up_runlist;
}
@@ -757,7 +761,7 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
runlist = &f->active_runlist_info[i];
runlist->runlist_id = runlist_id;
f->runlist_info[runlist_id] = runlist;
i++;
i = nvgpu_safe_add_u32(i, 1U);
runlist->active_channels =
nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,

View File

@@ -33,6 +33,7 @@
#include <nvgpu/gr/config.h>
#include <nvgpu/gr/ctx.h>
#include <nvgpu/runlist.h>
#include <nvgpu/safe_ops.h>
void nvgpu_tsg_disable(struct nvgpu_tsg *tsg)
{
@@ -794,9 +795,8 @@ int nvgpu_tsg_alloc_sm_error_states_mem(struct gk20a *g,
return -EINVAL;
}
tsg->sm_error_states = nvgpu_kzalloc(g,
sizeof(struct nvgpu_tsg_sm_error_state)
* num_sm);
tsg->sm_error_states = nvgpu_kzalloc(g, nvgpu_safe_mult_u64(
sizeof(struct nvgpu_tsg_sm_error_state), num_sm));
if (tsg->sm_error_states == NULL) {
nvgpu_err(g, "sm_error_states mem allocation failed");
return -ENOMEM;