gpu: nvgpu: fifo: fix MISRA 10.3 violations

MISRA Rule 10.3 prohibits assigning objects of different essential or
narrower type. This fixes MISRA 10.3 violations in the common/fifo unit.

JIRA NVGPU-3023

Change-Id: Ibab6704e8d3cffd37c6c0e31ba6fc6c0bb7b517b
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2087812
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-04-01 11:07:04 -04:00
committed by mobile promotions
parent 295ff82fc9
commit c0b65e8b05
13 changed files with 23 additions and 12 deletions

View File

@@ -668,7 +668,7 @@ void __gk20a_channel_kill(struct channel_gk20a *ch)
} }
struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
s32 runlist_id, u32 runlist_id,
bool is_privileged_channel, bool is_privileged_channel,
pid_t pid, pid_t tid) pid_t pid, pid_t tid)
{ {

View File

@@ -55,7 +55,7 @@ void gm20b_channel_bind(struct channel_gk20a *c)
~ccsr_channel_enable_set_f(~U32(0U))) | ~ccsr_channel_enable_set_f(~U32(0U))) |
ccsr_channel_enable_set_true_f()); ccsr_channel_enable_set_true_f());
nvgpu_smp_wmb(); nvgpu_smp_wmb();
nvgpu_atomic_set(&c->bound, true); nvgpu_atomic_set(&c->bound, (int)true);
} }
u32 gm20b_channel_count(struct gk20a *g) u32 gm20b_channel_count(struct gk20a *g)

View File

@@ -38,7 +38,7 @@ void gv11b_channel_unbind(struct channel_gk20a *ch)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (nvgpu_atomic_cmpxchg(&ch->bound, true, false) != 0) { if (nvgpu_atomic_cmpxchg(&ch->bound, (int)true, (int)false) != 0) {
gk20a_writel(g, ccsr_channel_inst_r(ch->chid), gk20a_writel(g, ccsr_channel_inst_r(ch->chid),
ccsr_channel_inst_ptr_f(0) | ccsr_channel_inst_ptr_f(0) |
ccsr_channel_inst_bind_false_f()); ccsr_channel_inst_bind_false_f());

View File

@@ -666,7 +666,7 @@ static void nvgpu_init_runlist_enginfo(struct gk20a *g, struct fifo_gk20a *f)
if ((engine_info != NULL) && if ((engine_info != NULL) &&
(engine_info->runlist_id == runlist->runlist_id)) { (engine_info->runlist_id == runlist->runlist_id)) {
runlist->eng_bitmask |= BIT(active_engine_id); runlist->eng_bitmask |= BIT32(active_engine_id);
} }
} }
nvgpu_log(g, gpu_dbg_info, "runlist %d : act eng bitmask 0x%x", nvgpu_log(g, gpu_dbg_info, "runlist %d : act eng bitmask 0x%x",

View File

@@ -362,7 +362,7 @@ void nvgpu_init_mm_ce_context(struct gk20a *g)
(g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)) { (g->mm.vidmem.ce_ctx_id == NVGPU_CE_INVAL_CTX_ID)) {
g->mm.vidmem.ce_ctx_id = g->mm.vidmem.ce_ctx_id =
gk20a_ce_create_context(g, gk20a_ce_create_context(g,
(int)nvgpu_engine_get_fast_ce_runlist_id(g), nvgpu_engine_get_fast_ce_runlist_id(g),
-1, -1,
-1); -1);

View File

@@ -435,7 +435,7 @@ void gk20a_ce_suspend(struct gk20a *g)
/* CE app utility functions */ /* CE app utility functions */
u32 gk20a_ce_create_context(struct gk20a *g, u32 gk20a_ce_create_context(struct gk20a *g,
int runlist_id, u32 runlist_id,
int timeslice, int timeslice,
int runlist_level) int runlist_level)
{ {

View File

@@ -123,7 +123,7 @@ void gk20a_ce_destroy(struct gk20a *g);
/* CE app utility functions */ /* CE app utility functions */
u32 gk20a_ce_create_context(struct gk20a *g, u32 gk20a_ce_create_context(struct gk20a *g,
int runlist_id, u32 runlist_id,
int timeslice, int timeslice,
int runlist_level); int runlist_level);
int gk20a_ce_execute_ops(struct gk20a *g, int gk20a_ce_execute_ops(struct gk20a *g,

View File

@@ -420,7 +420,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch);
/* runlist_id -1 is synonym for NVGPU_ENGINE_GR_GK20A runlist id */ /* runlist_id -1 is synonym for NVGPU_ENGINE_GR_GK20A runlist id */
struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
s32 runlist_id, u32 runlist_id,
bool is_privileged_channel, bool is_privileged_channel,
pid_t pid, pid_t tid); pid_t pid, pid_t tid);

View File

@@ -32,6 +32,8 @@ struct fifo_gk20a;
struct channel_gk20a; struct channel_gk20a;
#define RUNLIST_APPEND_FAILURE 0xffffffffU #define RUNLIST_APPEND_FAILURE 0xffffffffU
#define RUNLIST_INVALID_ID U32_MAX
u32 nvgpu_runlist_construct_locked(struct fifo_gk20a *f, u32 nvgpu_runlist_construct_locked(struct fifo_gk20a *f,
struct fifo_runlist_info_gk20a *runlist, struct fifo_runlist_info_gk20a *runlist,
u32 buf_id, u32 buf_id,

View File

@@ -35,6 +35,7 @@
#include <nvgpu/firmware.h> #include <nvgpu/firmware.h>
#include <nvgpu/os_sched.h> #include <nvgpu/os_sched.h>
#include <nvgpu/channel.h> #include <nvgpu/channel.h>
#include <nvgpu/runlist.h>
#include <nvgpu/utils.h> #include <nvgpu/utils.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/string.h> #include <nvgpu/string.h>
@@ -1339,7 +1340,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
ch = gk20a_open_new_channel_with_cb(g, gk20a_cde_finished_ctx_cb, ch = gk20a_open_new_channel_with_cb(g, gk20a_cde_finished_ctx_cb,
cde_ctx, cde_ctx,
-1, RUNLIST_INVALID_ID,
false); false);
if (!ch) { if (!ch) {
nvgpu_warn(g, "cde: gk20a channel not available"); nvgpu_warn(g, "cde: gk20a channel not available");

View File

@@ -96,7 +96,7 @@ void nvgpu_channel_remove_support_linux(struct nvgpu_os_linux *l);
struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g, struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
void (*update_fn)(struct channel_gk20a *, void *), void (*update_fn)(struct channel_gk20a *, void *),
void *update_fn_data, void *update_fn_data,
int runlist_id, u32 runlist_id,
bool is_privileged_channel); bool is_privileged_channel);
#endif #endif

View File

@@ -437,9 +437,17 @@ static int __gk20a_channel_open(struct gk20a *g,
int err; int err;
struct channel_gk20a *ch; struct channel_gk20a *ch;
struct channel_priv *priv; struct channel_priv *priv;
u32 tmp_runlist_id;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
nvgpu_assert(runlist_id >= -1);
if (runlist_id == -1) {
tmp_runlist_id = NVGPU_ENGINE_GR_GK20A;
} else {
tmp_runlist_id = runlist_id;
}
g = gk20a_get(g); g = gk20a_get(g);
if (!g) if (!g)
return -ENODEV; return -ENODEV;
@@ -458,7 +466,7 @@ static int __gk20a_channel_open(struct gk20a *g,
goto fail_busy; goto fail_busy;
} }
/* All the user space channel should be non privilege */ /* All the user space channel should be non privilege */
ch = gk20a_open_new_channel(g, runlist_id, false, ch = gk20a_open_new_channel(g, tmp_runlist_id, false,
nvgpu_current_pid(g), nvgpu_current_tid(g)); nvgpu_current_pid(g), nvgpu_current_tid(g));
gk20a_idle(g); gk20a_idle(g);
if (!ch) { if (!ch) {

View File

@@ -246,7 +246,7 @@ static void nvgpu_channel_work_completion_cancel_sync(struct channel_gk20a *ch)
struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g, struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
void (*update_fn)(struct channel_gk20a *, void *), void (*update_fn)(struct channel_gk20a *, void *),
void *update_fn_data, void *update_fn_data,
int runlist_id, u32 runlist_id,
bool is_privileged_channel) bool is_privileged_channel)
{ {
struct channel_gk20a *ch; struct channel_gk20a *ch;