gpu: nvgpu: fifo: cleanup MISRA 10.3 violations

MISRA 10.3 prohibits assigning of objects of different size or essential
type. This fixes a number of violations in the common/fifo code.

JIRA NVGPU-1008

Change-Id: I138c27eb86f6e0f9481c39a94d6632e2b4360af8
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2009940
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-02-01 16:54:37 -05:00
committed by mobile promotions
parent ff80b0e6c1
commit fa81cf9000
4 changed files with 26 additions and 20 deletions

View File

@@ -765,7 +765,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c,
struct gk20a *g = c->g;
struct vm_gk20a *ch_vm = c->vm;
struct priv_cmd_queue *q = &c->priv_cmd_q;
u32 size;
u64 size, tmp_size;
int err = 0;
bool gpfifo_based = false;
@@ -798,12 +798,14 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c,
*
* num_in_flight * (8 + 10) * 4 bytes
*/
size = num_in_flight * 18U * (u32)sizeof(u32);
size = num_in_flight * 18UL * sizeof(u32);
if (gpfifo_based) {
size = 2U * size / 3U;
}
size = PAGE_ALIGN(roundup_pow_of_two(size));
tmp_size = PAGE_ALIGN(roundup_pow_of_two(size));
nvgpu_assert(tmp_size <= U32_MAX);
size = (u32)tmp_size;
err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem);
if (err != 0) {
@@ -811,7 +813,9 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c,
goto clean_up;
}
q->size = q->mem.size / sizeof (u32);
tmp_size = q->mem.size / sizeof(u32);
nvgpu_assert(tmp_size <= U32_MAX);
q->size = (u32)tmp_size;
return 0;
@@ -914,8 +918,8 @@ int channel_gk20a_alloc_job(struct channel_gk20a *c,
int err = 0;
if (channel_gk20a_is_prealloc_enabled(c)) {
int put = c->joblist.pre_alloc.put;
int get = c->joblist.pre_alloc.get;
u32 put = c->joblist.pre_alloc.put;
u32 get = c->joblist.pre_alloc.get;
/*
* ensure all subsequent reads happen after reading get.
@@ -982,7 +986,7 @@ void channel_gk20a_joblist_unlock(struct channel_gk20a *c)
static struct channel_gk20a_job *channel_gk20a_joblist_peek(
struct channel_gk20a *c)
{
int get;
u32 get;
struct channel_gk20a_job *job = NULL;
if (channel_gk20a_is_prealloc_enabled(c)) {
@@ -1025,8 +1029,8 @@ static void channel_gk20a_joblist_delete(struct channel_gk20a *c,
bool channel_gk20a_joblist_is_empty(struct channel_gk20a *c)
{
if (channel_gk20a_is_prealloc_enabled(c)) {
int get = c->joblist.pre_alloc.get;
int put = c->joblist.pre_alloc.put;
u32 get = c->joblist.pre_alloc.get;
u32 put = c->joblist.pre_alloc.put;
return !(CIRC_CNT(put, get, c->joblist.pre_alloc.length));
}
@@ -2319,7 +2323,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
c->g = NULL;
c->chid = chid;
nvgpu_atomic_set(&c->bound, false);
nvgpu_atomic_set(&c->bound, 0);
nvgpu_spinlock_init(&c->ref_obtain_lock);
nvgpu_atomic_set(&c->ref_count, 0);
c->referenceable = false;

View File

@@ -75,7 +75,7 @@ void tu104_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
int tu104_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
{
struct nvgpu_timeout timeout;
unsigned long delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = GR_IDLE_CHECK_DEFAULT;
int ret = -ETIMEDOUT;
ret = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
@@ -92,7 +92,7 @@ int tu104_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
break;
}
nvgpu_usleep_range(delay, delay * 2UL);
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (nvgpu_timeout_expired(&timeout) == 0);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,7 @@
#include <nvgpu/utils.h>
#include <nvgpu/channel_sync.h>
#include <nvgpu/channel_sync_syncpt.h>
#include <nvgpu/bug.h>
#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
@@ -98,7 +99,8 @@ static int nvgpu_submit_prepare_syncs(struct channel_gk20a *c,
}
if (sync_fence) {
wait_fence_fd = fence->id;
nvgpu_assert(fence->id <= (u32)INT_MAX);
wait_fence_fd = (int)fence->id;
err = nvgpu_channel_sync_wait_fence_fd(c->sync,
wait_fence_fd, job->wait_cmd, max_wait_cmds);
} else {
@@ -194,8 +196,8 @@ static void nvgpu_submit_append_priv_cmdbuf(struct channel_gk20a *c,
pbdma_gp_entry1_length_f(cmd->size)
};
nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * sizeof(x),
&x, sizeof(x));
nvgpu_mem_wr_n(g, gpfifo_mem, c->gpfifo.put * (u32)sizeof(x),
&x, (u32)sizeof(x));
if (cmd->mem->aperture == APERTURE_SYSMEM) {
trace_gk20a_push_cmdbuf(g->name, 0, cmd->size, 0,
@@ -255,9 +257,9 @@ static void nvgpu_submit_append_gpfifo_common(struct channel_gk20a *c,
struct nvgpu_mem *gpfifo_mem = &c->gpfifo.mem;
/* in bytes */
u32 gpfifo_size =
c->gpfifo.entry_num * sizeof(struct nvgpu_gpfifo_entry);
u32 len = num_entries * sizeof(struct nvgpu_gpfifo_entry);
u32 start = c->gpfifo.put * sizeof(struct nvgpu_gpfifo_entry);
c->gpfifo.entry_num * (u32)sizeof(struct nvgpu_gpfifo_entry);
u32 len = num_entries * (u32)sizeof(struct nvgpu_gpfifo_entry);
u32 start = c->gpfifo.put * (u32)sizeof(struct nvgpu_gpfifo_entry);
u32 end = start + len; /* exclusive */
if (end > gpfifo_size) {

View File

@@ -99,7 +99,7 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
for (i = 0; i < f->max_runlists; ++i) {
runlist = &f->runlist_info[i];
if (test_bit(ch->chid, runlist->active_channels)) {
if (test_bit((int)ch->chid, runlist->active_channels)) {
return true;
}
}