gpu: nvgpu: Code updates for MISRA violations

Code related to MC module is updated for handling
MISRA violations

Rule 10.1: Operands shalln't be an inappropriate
essential type.
Rule 10.3: Value of expression shalln't be assigned
to an object with a narrow essential type.
Rule 10.4: Both operands in an operator shall have
the same essential type.
Rule 14.4: Controlling if statement shall have
essentially Boolean type.
Rule 15.6: Enclose if() sequences with braces.

JIRA NVGPU-646
JIRA NVGPU-659
JIRA NVGPU-671

Change-Id: Ia7ada40068eab5c164b8bad99bf8103b37a2fbc9
Signed-off-by: Vinod G <vinodg@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1720926
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vinod G
2018-05-16 10:43:13 -07:00
committed by mobile promotions
parent de67fb18fb
commit ac687c95d3
11 changed files with 120 additions and 92 deletions

View File

@@ -2672,7 +2672,7 @@ void gk20a_fifo_isr(struct gk20a *g)
return;
}
int gk20a_fifo_nonstall_isr(struct gk20a *g)
u32 gk20a_fifo_nonstall_isr(struct gk20a *g)
{
u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
u32 clear_intr = 0;

View File

@@ -21,8 +21,8 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __FIFO_GK20A_H__
#define __FIFO_GK20A_H__
#ifndef FIFO_GK20A_H
#define FIFO_GK20A_H
#include "channel_gk20a.h"
#include "tsg_gk20a.h"
@@ -103,10 +103,10 @@ struct fifo_runlist_info_gk20a {
};
enum {
ENGINE_GR_GK20A = 0,
ENGINE_GRCE_GK20A = 1,
ENGINE_ASYNC_CE_GK20A = 2,
ENGINE_INVAL_GK20A
ENGINE_GR_GK20A = 0U,
ENGINE_GRCE_GK20A = 1U,
ENGINE_ASYNC_CE_GK20A = 2U,
ENGINE_INVAL_GK20A = 3U,
};
struct fifo_pbdma_exception_info_gk20a {
@@ -140,7 +140,7 @@ struct fifo_engine_info_gk20a {
};
enum {
PROFILE_IOCTL_ENTRY = 0,
PROFILE_IOCTL_ENTRY = 0U,
PROFILE_ENTRY,
PROFILE_JOB_TRACKING,
PROFILE_APPEND,
@@ -231,7 +231,7 @@ int gk20a_init_fifo_support(struct gk20a *g);
int gk20a_init_fifo_setup_hw(struct gk20a *g);
void gk20a_fifo_isr(struct gk20a *g);
int gk20a_fifo_nonstall_isr(struct gk20a *g);
u32 gk20a_fifo_nonstall_isr(struct gk20a *g);
int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid);
int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
@@ -454,4 +454,4 @@ void gk20a_fifo_add_sema_cmd(struct gk20a *g,
struct nvgpu_semaphore *s, u64 sema_va,
struct priv_cmd_entry *cmd,
u32 off, bool acquire, bool wfi);
#endif /*__GR_GK20A_H__*/
#endif /* FIFO_GK20A_H */

View File

@@ -1076,7 +1076,7 @@ struct gpu_ops {
u32 (*intr_nonstall)(struct gk20a *g);
void (*intr_nonstall_pause)(struct gk20a *g);
void (*intr_nonstall_resume)(struct gk20a *g);
int (*isr_nonstall)(struct gk20a *g);
u32 (*isr_nonstall)(struct gk20a *g);
void (*enable)(struct gk20a *g, u32 units);
void (*disable)(struct gk20a *g, u32 units);
void (*reset)(struct gk20a *g, u32 units);

View File

@@ -6134,18 +6134,18 @@ int gk20a_gr_isr(struct gk20a *g)
return 0;
}
int gk20a_gr_nonstall_isr(struct gk20a *g)
u32 gk20a_gr_nonstall_isr(struct gk20a *g)
{
int ops = 0;
u32 ops = 0;
u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r());
nvgpu_log(g, gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
if (gr_intr & gr_intr_nonstall_trap_pending_f()) {
if ((gr_intr & gr_intr_nonstall_trap_pending_f()) != 0U) {
/* Clear the interrupt */
gk20a_writel(g, gr_intr_nonstall_r(),
gr_intr_nonstall_trap_pending_f());
ops |= (gk20a_nonstall_ops_wakeup_semaphore |
ops |= (u32)(gk20a_nonstall_ops_wakeup_semaphore |
gk20a_nonstall_ops_post_events);
}
return ops;

View File

@@ -24,6 +24,8 @@
#ifndef GR_GK20A_H
#define GR_GK20A_H
#include <nvgpu/types.h>
#include "gr_ctx_gk20a.h"
#include "mm_gk20a.h"
@@ -566,7 +568,7 @@ int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a);
int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
int gk20a_gr_isr(struct gk20a *g);
int gk20a_gr_nonstall_isr(struct gk20a *g);
u32 gk20a_gr_nonstall_isr(struct gk20a *g);
/* zcull */
u32 gr_gk20a_get_ctxsw_zcull_size(struct gk20a *g, struct gr_gk20a *gr);
@@ -603,15 +605,17 @@ u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
#define gr_gk20a_elpg_protected_call(g, func) \
({ \
int err = 0; \
if (g->support_pmu && g->elpg_enabled) {\
if ((g->support_pmu) && (g->elpg_enabled)) {\
err = nvgpu_pmu_disable_elpg(g); \
if (err) \
if (err != 0) {\
nvgpu_pmu_enable_elpg(g); \
} \
} \
if (!err) { \
if (err == 0) { \
err = func; \
if (g->support_pmu && g->elpg_enabled) \
if ((g->support_pmu) && (g->elpg_enabled)) {\
nvgpu_pmu_enable_elpg(g); \
} \
} \
err; \
})

View File

@@ -45,7 +45,7 @@ void mc_gk20a_isr_stall(struct gk20a *g)
for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
active_engine_id = g->fifo.active_engines_list[engine_id_idx];
if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) {
if ((mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) != 0U) {
engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
/* GR Engine */
if (engine_enum == ENGINE_GR_GK20A) {
@@ -55,28 +55,33 @@ void mc_gk20a_isr_stall(struct gk20a *g)
/* CE Engine */
if (((engine_enum == ENGINE_GRCE_GK20A) ||
(engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
g->ops.ce2.isr_stall){
(g->ops.ce2.isr_stall != NULL)) {
g->ops.ce2.isr_stall(g,
g->fifo.engine_info[active_engine_id].inst_id,
g->fifo.engine_info[active_engine_id].pri_base);
}
}
}
if (mc_intr_0 & mc_intr_0_pfifo_pending_f())
if ((mc_intr_0 & mc_intr_0_pfifo_pending_f()) != 0U) {
gk20a_fifo_isr(g);
if (mc_intr_0 & mc_intr_0_pmu_pending_f())
}
if ((mc_intr_0 & mc_intr_0_pmu_pending_f()) != 0U) {
gk20a_pmu_isr(g);
if (mc_intr_0 & mc_intr_0_priv_ring_pending_f())
}
if ((mc_intr_0 & mc_intr_0_priv_ring_pending_f()) != 0U) {
g->ops.priv_ring.isr(g);
if (mc_intr_0 & mc_intr_0_ltc_pending_f())
}
if ((mc_intr_0 & mc_intr_0_ltc_pending_f()) != 0U) {
g->ops.ltc.isr(g);
if (mc_intr_0 & mc_intr_0_pbus_pending_f())
}
if ((mc_intr_0 & mc_intr_0_pbus_pending_f()) != 0U) {
g->ops.bus.isr(g);
}
}
int mc_gk20a_isr_nonstall(struct gk20a *g)
u32 mc_gk20a_isr_nonstall(struct gk20a *g)
{
int ops = 0;
u32 ops = 0;
u32 mc_intr_1;
u32 engine_id_idx;
u32 active_engine_id = 0;
@@ -84,8 +89,9 @@ int mc_gk20a_isr_nonstall(struct gk20a *g)
mc_intr_1 = g->ops.mc.intr_nonstall(g);
if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1))
if (g->ops.mc.is_intr1_pending(g, NVGPU_UNIT_FIFO, mc_intr_1) != 0U) {
ops |= gk20a_fifo_nonstall_isr(g);
}
for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines;
engine_id_idx++) {
@@ -94,19 +100,20 @@ int mc_gk20a_isr_nonstall(struct gk20a *g)
active_engine_id = g->fifo.active_engines_list[engine_id_idx];
engine_info = &g->fifo.engine_info[active_engine_id];
if (mc_intr_1 & engine_info->intr_mask) {
if ((mc_intr_1 & engine_info->intr_mask) != 0U) {
engine_enum = engine_info->engine_enum;
/* GR Engine */
if (engine_enum == ENGINE_GR_GK20A)
if (engine_enum == ENGINE_GR_GK20A) {
ops |= gk20a_gr_nonstall_isr(g);
}
/* CE Engine */
if (((engine_enum == ENGINE_GRCE_GK20A) ||
(engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
g->ops.ce2.isr_nonstall)
(g->ops.ce2.isr_nonstall != NULL)) {
ops |= g->ops.ce2.isr_nonstall(g,
engine_info->inst_id,
engine_info->pri_base);
}
}
}
@@ -219,7 +226,7 @@ void gk20a_mc_enable(struct gk20a *g, u32 units)
pmc = gk20a_readl(g, mc_enable_r());
pmc |= units;
gk20a_writel(g, mc_enable_r(), pmc);
gk20a_readl(g, mc_enable_r());
pmc = gk20a_readl(g, mc_enable_r());
nvgpu_spinlock_release(&g->mc_enable_lock);
nvgpu_udelay(20);
@@ -228,10 +235,11 @@ void gk20a_mc_enable(struct gk20a *g, u32 units)
void gk20a_mc_reset(struct gk20a *g, u32 units)
{
g->ops.mc.disable(g, units);
if (units & gk20a_fifo_get_all_ce_engine_reset_mask(g))
if ((units & gk20a_fifo_get_all_ce_engine_reset_mask(g)) != 0U) {
nvgpu_udelay(500);
else
} else {
nvgpu_udelay(20);
}
g->ops.mc.enable(g, units);
}
@@ -239,19 +247,22 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
{
u32 val = __nvgpu_readl(g, mc_boot_0_r());
if (val == 0xffffffff)
return val;
if (val != 0xffffffffU) {
if (arch)
*arch = mc_boot_0_architecture_v(val) <<
NVGPU_GPU_ARCHITECTURE_SHIFT;
if (arch != NULL) {
*arch = mc_boot_0_architecture_v(val) <<
NVGPU_GPU_ARCHITECTURE_SHIFT;
}
if (impl)
*impl = mc_boot_0_implementation_v(val);
if (impl != NULL) {
*impl = mc_boot_0_implementation_v(val);
}
if (rev)
*rev = (mc_boot_0_major_revision_v(val) << 4) |
mc_boot_0_minor_revision_v(val);
if (rev != NULL) {
*rev = (mc_boot_0_major_revision_v(val) << 4) |
mc_boot_0_minor_revision_v(val);
}
}
return val;
}
@@ -259,7 +270,7 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
bool mc_gk20a_is_intr1_pending(struct gk20a *g,
enum nvgpu_unit unit, u32 mc_intr_1)
{
u32 mask = 0;
u32 mask = 0U;
bool is_pending;
switch (unit) {
@@ -270,11 +281,11 @@ bool mc_gk20a_is_intr1_pending(struct gk20a *g,
break;
}
if (mask == 0) {
if (mask == 0U) {
nvgpu_err(g, "unknown unit %d", unit);
is_pending = false;
} else {
is_pending = (mc_intr_1 & mask) ? true : false;
is_pending = ((mc_intr_1 & mask) != 0U) ? true : false;
}
return is_pending;
@@ -284,9 +295,12 @@ void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops)
{
bool semaphore_wakeup, post_events;
semaphore_wakeup = ops & gk20a_nonstall_ops_wakeup_semaphore;
post_events = ops & gk20a_nonstall_ops_post_events;
semaphore_wakeup = (((ops & (u32)gk20a_nonstall_ops_wakeup_semaphore) != 0U) ?
true : false);
post_events = (((ops & (u32)gk20a_nonstall_ops_post_events) != 0U) ?
true: false);
if (semaphore_wakeup)
if (semaphore_wakeup) {
g->ops.semaphore_wakeup(g, post_events);
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ u32 mc_gk20a_intr_stall(struct gk20a *g);
void mc_gk20a_intr_stall_pause(struct gk20a *g);
void mc_gk20a_intr_stall_resume(struct gk20a *g);
u32 mc_gk20a_intr_nonstall(struct gk20a *g);
int mc_gk20a_isr_nonstall(struct gk20a *g);
u32 mc_gk20a_isr_nonstall(struct gk20a *g);
void mc_gk20a_intr_nonstall_pause(struct gk20a *g);
void mc_gk20a_intr_nonstall_resume(struct gk20a *g);
void gk20a_mc_enable(struct gk20a *g, u32 units);
@@ -42,4 +42,4 @@ u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev);
bool mc_gk20a_is_intr1_pending(struct gk20a *g,
enum nvgpu_unit unit, u32 mc_intr_1);
void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops);
#endif
#endif /* MC_GK20A_H */

View File

@@ -23,7 +23,6 @@
*/
#include "gk20a/gk20a.h"
#include "gk20a/mc_gk20a.h"
#include "mc_gp10b.h"
@@ -37,7 +36,7 @@ void mc_gp10b_intr_enable(struct gk20a *g)
u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
0xffffffff);
0xffffffffU);
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
mc_intr_pfifo_pending_f() |
mc_intr_priv_ring_pending_f() |
@@ -49,7 +48,7 @@ void mc_gp10b_intr_enable(struct gk20a *g)
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
0xffffffffU);
g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
mc_intr_pfifo_pending_f() |
eng_intr_mask;
@@ -92,7 +91,7 @@ void mc_gp10b_isr_stall(struct gk20a *g)
for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
active_engine_id = g->fifo.active_engines_list[engine_id_idx];
if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) {
if ((mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) != 0U) {
engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
/* GR Engine */
if (engine_enum == ENGINE_GR_GK20A) {
@@ -102,29 +101,36 @@ void mc_gp10b_isr_stall(struct gk20a *g)
/* CE Engine */
if (((engine_enum == ENGINE_GRCE_GK20A) ||
(engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
g->ops.ce2.isr_stall){
(g->ops.ce2.isr_stall != NULL)) {
g->ops.ce2.isr_stall(g,
g->fifo.engine_info[active_engine_id].inst_id,
g->fifo.engine_info[active_engine_id].pri_base);
}
}
}
if (g->ops.mc.is_intr_hub_pending &&
g->ops.mc.is_intr_hub_pending(g, mc_intr_0))
if ((g->ops.mc.is_intr_hub_pending != NULL) &&
g->ops.mc.is_intr_hub_pending(g, mc_intr_0)) {
g->ops.fb.hub_isr(g);
if (mc_intr_0 & mc_intr_pfifo_pending_f())
}
if ((mc_intr_0 & mc_intr_pfifo_pending_f()) != 0U) {
gk20a_fifo_isr(g);
if (mc_intr_0 & mc_intr_pmu_pending_f())
}
if ((mc_intr_0 & mc_intr_pmu_pending_f()) != 0U) {
gk20a_pmu_isr(g);
if (mc_intr_0 & mc_intr_priv_ring_pending_f())
}
if ((mc_intr_0 & mc_intr_priv_ring_pending_f()) != 0U) {
g->ops.priv_ring.isr(g);
if (mc_intr_0 & mc_intr_ltc_pending_f())
}
if ((mc_intr_0 & mc_intr_ltc_pending_f()) != 0U) {
g->ops.ltc.isr(g);
if (mc_intr_0 & mc_intr_pbus_pending_f())
}
if ((mc_intr_0 & mc_intr_pbus_pending_f()) != 0U) {
g->ops.bus.isr(g);
if (g->ops.mc.is_intr_nvlink_pending &&
g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0))
}
if ((g->ops.mc.is_intr_nvlink_pending != NULL) &&
g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) {
g->ops.nvlink.isr(g);
}
nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0);
@@ -137,7 +143,7 @@ u32 mc_gp10b_intr_stall(struct gk20a *g)
void mc_gp10b_intr_stall_pause(struct gk20a *g)
{
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffff);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 0xffffffffU);
}
void mc_gp10b_intr_stall_resume(struct gk20a *g)
@@ -154,7 +160,7 @@ u32 mc_gp10b_intr_nonstall(struct gk20a *g)
void mc_gp10b_intr_nonstall_pause(struct gk20a *g)
{
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
0xffffffffU);
}
void mc_gp10b_intr_nonstall_resume(struct gk20a *g)
@@ -177,11 +183,11 @@ bool mc_gp10b_is_intr1_pending(struct gk20a *g,
break;
}
if (mask == 0) {
if (mask == 0U) {
nvgpu_err(g, "unknown unit %d", unit);
is_pending = false;
} else {
is_pending = (mc_intr_1 & mask) ? true : false;
is_pending = ((mc_intr_1 & mask) != 0U) ? true : false;
}
return is_pending;

View File

@@ -38,9 +38,9 @@ void mc_gv100_intr_enable(struct gk20a *g)
u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
0xffffffff);
0xffffffffU);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
0xffffffffU);
gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
@@ -69,7 +69,7 @@ void mc_gv100_intr_enable(struct gk20a *g)
bool gv100_mc_is_intr_nvlink_pending(struct gk20a *g, u32 mc_intr_0)
{
return ((mc_intr_0 & mc_intr_nvlink_pending_f()) ? true : false);
return (((mc_intr_0 & mc_intr_nvlink_pending_f()) != 0U) ? true : false);
}
bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
@@ -78,8 +78,9 @@ bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
u32 stall_intr, eng_intr_mask;
eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id);
if (mc_intr_0 & eng_intr_mask)
if ((mc_intr_0 & eng_intr_mask) != 0U) {
return true;
}
stall_intr = mc_intr_pfifo_pending_f() |
mc_intr_hub_pending_f() |
@@ -87,8 +88,9 @@ bool gv100_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
mc_intr_pbus_pending_f() |
mc_intr_ltc_pending_f() |
mc_intr_nvlink_pending_f();
if (mc_intr_0 & stall_intr)
if ((mc_intr_0 & stall_intr) != 0U) {
return true;
}
return false;
}

View File

@@ -1,7 +1,7 @@
/*
* GV11B FB
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -37,11 +37,11 @@
#define FAULT_BUF_INVALID 0
#define FAULT_BUF_VALID 1
#define HUB_INTR_TYPE_OTHER 1 /* bit 0 */
#define HUB_INTR_TYPE_NONREPLAY 2 /* bit 1 */
#define HUB_INTR_TYPE_REPLAY 4 /* bit 2 */
#define HUB_INTR_TYPE_ECC_UNCORRECTED 8 /* bit 3 */
#define HUB_INTR_TYPE_ACCESS_COUNTER 16 /* bit 4 */
#define HUB_INTR_TYPE_OTHER 1U /* bit 0 */
#define HUB_INTR_TYPE_NONREPLAY 2U /* bit 1 */
#define HUB_INTR_TYPE_REPLAY 4U /* bit 2 */
#define HUB_INTR_TYPE_ECC_UNCORRECTED 8U /* bit 3 */
#define HUB_INTR_TYPE_ACCESS_COUNTER 16U /* bit 4 */
#define HUB_INTR_TYPE_ALL (HUB_INTR_TYPE_OTHER | \
HUB_INTR_TYPE_NONREPLAY | \
HUB_INTR_TYPE_REPLAY | \

View File

@@ -1,7 +1,7 @@
/*
* GV11B master
*
* Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,7 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
#include <nvgpu/types.h>
#include "gk20a/gk20a.h"
@@ -38,9 +38,9 @@ void mc_gv11b_intr_enable(struct gk20a *g)
u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
0xffffffff);
0xffffffffU);
gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
0xffffffff);
0xffffffffU);
gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
@@ -68,7 +68,7 @@ void mc_gv11b_intr_enable(struct gk20a *g)
bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
{
return ((mc_intr_0 & mc_intr_hub_pending_f()) ? true : false);
return (((mc_intr_0 & mc_intr_hub_pending_f()) != 0U) ? true : false);
}
bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
@@ -77,16 +77,18 @@ bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
u32 stall_intr, eng_intr_mask;
eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id);
if (mc_intr_0 & eng_intr_mask)
if ((mc_intr_0 & eng_intr_mask) != 0U) {
return true;
}
stall_intr = mc_intr_pfifo_pending_f() |
mc_intr_hub_pending_f() |
mc_intr_priv_ring_pending_f() |
mc_intr_pbus_pending_f() |
mc_intr_ltc_pending_f();
if (mc_intr_0 & stall_intr)
if ((mc_intr_0 & stall_intr) != 0U) {
return true;
}
return false;
}