mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: Fix 10.1 MISRA issues in common/nvlink
Fix the MISRA 10.1 violations by not allowing non-boolean type variables to be used as boolean. JIRA NVGPU-1921 Change-Id: Iccfc1794575530500da652a7f9db9f27c4187231 Signed-off-by: Tejal Kudav <tkudav@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2011066 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Dinesh T <dt@nvidia.com> Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
05e3be8177
commit
558af783ef
@@ -163,7 +163,7 @@ bool gv100_nvlink_minion_falcon_isr(struct gk20a *g)
|
||||
intr = MINION_REG_RD32(g, minion_falcon_irqstat_r()) &
|
||||
MINION_REG_RD32(g, minion_falcon_irqmask_r());
|
||||
|
||||
if (!intr) {
|
||||
if (intr == 0U) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -240,25 +240,25 @@ static bool gv100_nvlink_minion_isr(struct gk20a *g) {
|
||||
intr = MINION_REG_RD32(g, minion_minion_intr_r()) &
|
||||
MINION_REG_RD32(g, minion_minion_intr_stall_en_r());
|
||||
|
||||
if (minion_minion_intr_falcon_stall_v(intr) ||
|
||||
minion_minion_intr_falcon_nostall_v(intr)) {
|
||||
if ((minion_minion_intr_falcon_stall_v(intr) != 0U) ||
|
||||
(minion_minion_intr_falcon_nostall_v(intr) != 0U)) {
|
||||
gv100_nvlink_minion_falcon_isr(g);
|
||||
}
|
||||
|
||||
if (minion_minion_intr_fatal_v(intr)) {
|
||||
if (minion_minion_intr_fatal_v(intr) != 0U) {
|
||||
gv100_nvlink_minion_falcon_intr_enable(g, false);
|
||||
MINION_REG_WR32(g, minion_minion_intr_r(),
|
||||
minion_minion_intr_fatal_f(1));
|
||||
}
|
||||
|
||||
if (minion_minion_intr_nonfatal_v(intr)) {
|
||||
if (minion_minion_intr_nonfatal_v(intr) != 0U) {
|
||||
MINION_REG_WR32(g, minion_minion_intr_r(),
|
||||
minion_minion_intr_nonfatal_f(1));
|
||||
}
|
||||
|
||||
links = minion_minion_intr_link_v(intr) & g->nvlink.enabled_links;
|
||||
|
||||
if (links) {
|
||||
if (links != 0UL) {
|
||||
for_each_set_bit(i, &links, 32) {
|
||||
gv100_nvlink_minion_link_isr(g, i);
|
||||
}
|
||||
@@ -310,7 +310,7 @@ static void gv100_nvlink_tlc_get_intr(struct gk20a *g, u32 link_id)
|
||||
static void gv100_nvlink_tlc_isr(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
|
||||
if (g->nvlink.tlc_rx_err_status_0[link_id]) {
|
||||
if (g->nvlink.tlc_rx_err_status_0[link_id] != 0U) {
|
||||
/* All TLC RX 0 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC RX 0 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_rx_err_status_0[link_id]);
|
||||
@@ -319,7 +319,7 @@ static void gv100_nvlink_tlc_isr(struct gk20a *g, u32 link_id)
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_0_r(),
|
||||
g->nvlink.tlc_rx_err_status_0[link_id]);
|
||||
}
|
||||
if (g->nvlink.tlc_rx_err_status_1[link_id]) {
|
||||
if (g->nvlink.tlc_rx_err_status_1[link_id] != 0U) {
|
||||
/* All TLC RX 1 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC RX 1 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_rx_err_status_1[link_id]);
|
||||
@@ -328,7 +328,7 @@ static void gv100_nvlink_tlc_isr(struct gk20a *g, u32 link_id)
|
||||
TLC_REG_WR32(g, link_id, nvtlc_rx_err_status_1_r(),
|
||||
g->nvlink.tlc_rx_err_status_1[link_id]);
|
||||
}
|
||||
if (g->nvlink.tlc_tx_err_status_0[link_id]) {
|
||||
if (g->nvlink.tlc_tx_err_status_0[link_id] != 0U) {
|
||||
/* All TLC TX 0 errors are fatal. Notify and disable */
|
||||
nvgpu_err(g, "Fatal TLC TX 0 interrupt on link %d mask: %x",
|
||||
link_id, g->nvlink.tlc_tx_err_status_0[link_id]);
|
||||
@@ -408,7 +408,7 @@ static void gv100_nvlink_dlpl_isr(struct gk20a *g, u32 link_id)
|
||||
intr = DLPL_REG_RD32(g, link_id, nvl_intr_r()) &
|
||||
DLPL_REG_RD32(g, link_id, nvl_intr_stall_en_r());
|
||||
|
||||
if (!intr) {
|
||||
if (intr == 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ static u32 nvgpu_nvlink_get_link(struct gk20a *g)
|
||||
{
|
||||
u32 link_id;
|
||||
|
||||
if (!g) {
|
||||
if (g == NULL) {
|
||||
return NVLINK_MAX_LINKS_SW;
|
||||
}
|
||||
|
||||
|
||||
@@ -107,10 +107,10 @@ static bool gv100_nvlink_minion_is_running(struct gk20a *g)
|
||||
{
|
||||
|
||||
/* if minion is booted and not halted, it is running */
|
||||
if ((MINION_REG_RD32(g, minion_minion_status_r()) &
|
||||
minion_minion_status_status_f(1)) &&
|
||||
(!minion_falcon_irqstat_halt_v(
|
||||
MINION_REG_RD32(g, minion_falcon_irqstat_r())))) {
|
||||
if (((MINION_REG_RD32(g, minion_minion_status_r()) &
|
||||
minion_minion_status_status_f(1)) != 0U) &&
|
||||
((minion_falcon_irqstat_halt_v(
|
||||
MINION_REG_RD32(g, minion_falcon_irqstat_r()))) == 0U)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ static int gv100_nvlink_minion_load(struct gk20a *g)
|
||||
|
||||
/* get mem unlock ucode binary */
|
||||
nvgpu_minion_fw = nvgpu_request_firmware(g, "minion.bin", 0);
|
||||
if (!nvgpu_minion_fw) {
|
||||
if (nvgpu_minion_fw == NULL) {
|
||||
nvgpu_err(g, "minion ucode get fail");
|
||||
err = -ENOENT;
|
||||
goto exit;
|
||||
@@ -185,7 +185,8 @@ static int gv100_nvlink_minion_load(struct gk20a *g)
|
||||
nvgpu_usleep_range(delay, delay * 2);
|
||||
delay = min_t(unsigned long,
|
||||
delay << 1, GR_IDLE_CHECK_MAX);
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout, " minion boot timeout"));
|
||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||
"minion boot timeout") == 0);
|
||||
|
||||
/* Service interrupts */
|
||||
g->ops.nvlink.intr.minion_falcon_isr(g);
|
||||
@@ -239,9 +240,10 @@ static u32 gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
|
||||
delay = min_t(unsigned long,
|
||||
delay << 1, GR_IDLE_CHECK_MAX);
|
||||
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout, " minion cmd timeout"));
|
||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||
"minion cmd timeout") == 0);
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@@ -323,7 +325,7 @@ static int gv100_nvlink_minion_init_uphy(struct gk20a *g, unsigned long mask,
|
||||
}
|
||||
|
||||
/* Check if INIT PLL is done on link */
|
||||
if (!(BIT(master_pll) & g->nvlink.init_pll_done)) {
|
||||
if ((BIT(master_pll) & g->nvlink.init_pll_done) == 0U) {
|
||||
err = gv100_nvlink_minion_send_command(g, master_pll,
|
||||
g->nvlink.initpll_cmd, 0, sync);
|
||||
if (err != 0) {
|
||||
@@ -536,10 +538,10 @@ int gv100_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
|
||||
}
|
||||
nvgpu_udelay(5);
|
||||
|
||||
} while((!nvgpu_timeout_expired_msg(&timeout, "timeout on pll on")) &&
|
||||
links_off);
|
||||
} while ((nvgpu_timeout_expired_msg(&timeout, "timeout on pll on") == 0)
|
||||
&& (links_off != 0U));
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@@ -719,10 +721,10 @@ static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask)
|
||||
break;
|
||||
}
|
||||
nvgpu_udelay(5);
|
||||
} while(!nvgpu_timeout_expired_msg(&timeout,
|
||||
"timeout on rxcal"));
|
||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||
"timeout on rxcal") == 0);
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
@@ -792,7 +794,8 @@ int gv100_nvlink_discover_link(struct gk20a *g)
|
||||
/*
|
||||
* Process Entry 0 & 1 of IOCTRL table to find table size
|
||||
*/
|
||||
if (g->nvlink.ioctrl_table && g->nvlink.ioctrl_table[0].pri_base_addr) {
|
||||
if ((g->nvlink.ioctrl_table != NULL) &&
|
||||
(g->nvlink.ioctrl_table[0].pri_base_addr != 0U)) {
|
||||
ioctrl_entry_addr = g->nvlink.ioctrl_table[0].pri_base_addr;
|
||||
table_entry = gk20a_readl(g, ioctrl_entry_addr);
|
||||
ioctrl_info_entry_type = nvlinkip_discovery_common_device_v(table_entry);
|
||||
@@ -813,7 +816,7 @@ int gv100_nvlink_discover_link(struct gk20a *g)
|
||||
|
||||
device_table = nvgpu_kzalloc(g, ioctrl_discovery_size *
|
||||
sizeof(struct nvgpu_nvlink_device_list));
|
||||
if (!device_table) {
|
||||
if (device_table == NULL) {
|
||||
nvgpu_err(g, " Unable to allocate nvlink device table");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -1126,7 +1129,7 @@ int gv100_nvlink_discover_link(struct gk20a *g)
|
||||
nvgpu_log(g, gpu_dbg_nvlink, "+ TLC MCAST Base: 0x%08x", g->nvlink.tl_multicast_base);
|
||||
nvgpu_log(g, gpu_dbg_nvlink, "+ MIF MCAST Base: 0x%08x", g->nvlink.mif_multicast_base);
|
||||
|
||||
if (!g->nvlink.minion_version) {
|
||||
if (g->nvlink.minion_version == 0U) {
|
||||
nvgpu_err(g, "Unsupported MINION version");
|
||||
|
||||
nvgpu_kfree(g, device_table);
|
||||
@@ -1161,7 +1164,7 @@ int gv100_nvlink_discover_ioctrl(struct gk20a *g)
|
||||
|
||||
ioctrl_table = nvgpu_kzalloc(g, ioctrl_num_entries *
|
||||
sizeof(struct nvgpu_nvlink_ioctrl_list));
|
||||
if (!ioctrl_table) {
|
||||
if (ioctrl_table == NULL) {
|
||||
nvgpu_err(g, "Failed to allocate memory for nvlink io table");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -1171,7 +1174,7 @@ int gv100_nvlink_discover_ioctrl(struct gk20a *g)
|
||||
|
||||
ret = g->ops.top.get_device_info(g, &dev_info,
|
||||
NVGPU_ENGINE_IOCTRL, i);
|
||||
if (ret) {
|
||||
if (ret != 0) {
|
||||
nvgpu_err(g, "Failed to parse dev_info table"
|
||||
"for engine %d",
|
||||
NVGPU_ENGINE_IOCTRL);
|
||||
@@ -1275,7 +1278,7 @@ u32 gv100_nvlink_link_get_state(struct gk20a *g, u32 link_id)
|
||||
u32 gv100_nvlink_link_get_mode(struct gk20a *g, u32 link_id)
|
||||
{
|
||||
u32 state;
|
||||
if (!(BIT(link_id) & g->nvlink.discovered_links)) {
|
||||
if ((BIT(link_id) & g->nvlink.discovered_links) == 0U) {
|
||||
return nvgpu_nvlink_link__last;
|
||||
}
|
||||
|
||||
@@ -1319,7 +1322,7 @@ int gv100_nvlink_link_set_mode(struct gk20a *g, u32 link_id, u32 mode)
|
||||
|
||||
nvgpu_log(g, gpu_dbg_nvlink, "link :%d, mode:%u", link_id, mode);
|
||||
|
||||
if (!(BIT(link_id) & g->nvlink.enabled_links)) {
|
||||
if ((BIT(link_id) & g->nvlink.enabled_links) == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1438,9 +1441,10 @@ static u32 gv100_nvlink_link_sublink_check_change(struct gk20a *g, u32 link_id)
|
||||
return -EFAULT;
|
||||
}
|
||||
nvgpu_udelay(5);
|
||||
} while(!nvgpu_timeout_expired_msg(&timeout, "timeout on sublink rdy"));
|
||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||
"timeout on sublink rdy") == 0);
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
return-0;
|
||||
@@ -1454,7 +1458,7 @@ int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
|
||||
u32 tx_sublink_state = nvgpu_nvlink_sublink_tx__last;
|
||||
u32 reg;
|
||||
|
||||
if (!(BIT(link_id) & g->nvlink.enabled_links)) {
|
||||
if ((BIT(link_id) & g->nvlink.enabled_links) == 0U) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1619,7 +1623,7 @@ u32 gv100_nvlink_link_get_sublink_mode(struct gk20a *g, u32 link_id,
|
||||
{
|
||||
u32 state;
|
||||
|
||||
if (!(BIT(link_id) & g->nvlink.discovered_links)) {
|
||||
if ((BIT(link_id) & g->nvlink.discovered_links) == 0U) {
|
||||
if (!is_rx_sublink) {
|
||||
return nvgpu_nvlink_sublink_tx__last;
|
||||
}
|
||||
|
||||
@@ -81,10 +81,10 @@ int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id)
|
||||
break;
|
||||
}
|
||||
nvgpu_udelay(NV_NVLINK_TIMEOUT_DELAY_US);
|
||||
} while (!nvgpu_timeout_expired_msg(
|
||||
} while (nvgpu_timeout_expired_msg(
|
||||
&timeout,
|
||||
"RXDET status check timed out on link %u",
|
||||
link_id));
|
||||
link_id) == 0);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@@ -121,10 +121,10 @@ int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
|
||||
break;
|
||||
}
|
||||
nvgpu_udelay(NV_NVLINK_TIMEOUT_DELAY_US);
|
||||
} while ((!nvgpu_timeout_expired_msg(&timeout,
|
||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||
"Timed out setting pll on link %u",
|
||||
link_id)));
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
link_id) == 0);
|
||||
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
@@ -152,8 +152,8 @@ u32 tu104_nvlink_link_get_tx_sublink_state(struct gk20a *g, u32 link_id)
|
||||
return nvl_sl0_slsm_status_tx_primary_state_v(reg);
|
||||
}
|
||||
nvgpu_udelay(NV_NVLINK_TIMEOUT_DELAY_US);
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"Timeout on TX SLSM substate = stable check"));
|
||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||
"Timeout on TX SLSM substate = stable check") == 0);
|
||||
|
||||
nvgpu_log(g, gpu_dbg_nvlink, "TX SLSM primary state :%u, substate:%u",
|
||||
nvl_sl0_slsm_status_tx_primary_state_v(reg),
|
||||
@@ -184,8 +184,8 @@ u32 tu104_nvlink_link_get_rx_sublink_state(struct gk20a *g, u32 link_id)
|
||||
return nvl_sl1_slsm_status_rx_primary_state_v(reg);
|
||||
}
|
||||
nvgpu_udelay(NV_NVLINK_TIMEOUT_DELAY_US);
|
||||
} while (!nvgpu_timeout_expired_msg(&timeout,
|
||||
"Timeout on RX SLSM substate = stable check"));
|
||||
} while (nvgpu_timeout_expired_msg(&timeout,
|
||||
"Timeout on RX SLSM substate = stable check") == 0);
|
||||
|
||||
nvgpu_log(g, gpu_dbg_nvlink, "RX SLSM primary state :%u, substate:%u",
|
||||
nvl_sl1_slsm_status_rx_primary_state_v(reg),
|
||||
|
||||
Reference in New Issue
Block a user