mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: misra 12.1 fixes
MISRA Advisory Rule states that the precedence of operators within expressions should be made explicit. This change removes the Advisory Rule 12.1 violations from various common units. Jira NVGPU-3178 Change-Id: I4b77238afdb929c81320efa93ac105f9e69af9cd Signed-off-by: Scott Long <scottl@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2277480 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
a54c207c37
commit
5ee9a446b5
@@ -204,7 +204,7 @@ static int falcon_memcpy_params_check(struct nvgpu_falcon *flcn,
|
|||||||
|
|
||||||
mem_size = g->ops.falcon.get_mem_size(flcn, mem_type);
|
mem_size = g->ops.falcon.get_mem_size(flcn, mem_type);
|
||||||
|
|
||||||
if (!(offset < mem_size && (offset + size) <= mem_size)) {
|
if (!((offset < mem_size) && ((offset + size) <= mem_size))) {
|
||||||
nvgpu_err(g, "flcn-id 0x%x, copy overflow ",
|
nvgpu_err(g, "flcn-id 0x%x, copy overflow ",
|
||||||
flcn->flcn_id);
|
flcn->flcn_id);
|
||||||
nvgpu_err(g, "total size 0x%x, offset 0x%x, copy size 0x%x",
|
nvgpu_err(g, "total size 0x%x, offset 0x%x, copy size 0x%x",
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GK20A Graphics
|
* GK20A Graphics
|
||||||
*
|
*
|
||||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -163,7 +163,7 @@ void nvgpu_sw_quiesce_remove_support(struct gk20a *g)
|
|||||||
void nvgpu_sw_quiesce(struct gk20a *g)
|
void nvgpu_sw_quiesce(struct gk20a *g)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_NVGPU_RECOVERY
|
#ifndef CONFIG_NVGPU_RECOVERY
|
||||||
if (g->is_virtual || g->enabled_flags == NULL ||
|
if (g->is_virtual || (g->enabled_flags == NULL) ||
|
||||||
nvgpu_is_enabled(g, NVGPU_DISABLE_SW_QUIESCE)) {
|
nvgpu_is_enabled(g, NVGPU_DISABLE_SW_QUIESCE)) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -63,7 +63,7 @@ int nvgpu_init_ltc_support(struct gk20a *g)
|
|||||||
g->ops.ltc.init_fs_state(g);
|
g->ops.ltc.init_fs_state(g);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (g->ops.ltc.ecc_init != NULL && !g->ecc.initialized) {
|
if ((g->ops.ltc.ecc_init != NULL) && !g->ecc.initialized) {
|
||||||
err = g->ops.ltc.ecc_init(g);
|
err = g->ops.ltc.ecc_init(g);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_kfree(g, ltc);
|
nvgpu_kfree(g, ltc);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -450,7 +450,7 @@ clean_up:
|
|||||||
|
|
||||||
static bool nvgpu_netlist_is_valid(int net, u32 major_v, u32 major_v_hw)
|
static bool nvgpu_netlist_is_valid(int net, u32 major_v, u32 major_v_hw)
|
||||||
{
|
{
|
||||||
if (net != NETLIST_FINAL && major_v != major_v_hw) {
|
if ((net != NETLIST_FINAL) && (major_v != major_v_hw)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -74,7 +74,7 @@ static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
|
|||||||
|
|
||||||
void nvgpu_pmu_enable_irq(struct gk20a *g, bool enable)
|
void nvgpu_pmu_enable_irq(struct gk20a *g, bool enable)
|
||||||
{
|
{
|
||||||
if (g->pmu != NULL && g->ops.pmu.pmu_enable_irq != NULL) {
|
if ((g->pmu != NULL) && (g->ops.pmu.pmu_enable_irq != NULL)) {
|
||||||
nvgpu_mutex_acquire(&g->pmu->isr_mutex);
|
nvgpu_mutex_acquire(&g->pmu->isr_mutex);
|
||||||
g->ops.pmu.pmu_enable_irq(g->pmu, enable);
|
g->ops.pmu.pmu_enable_irq(g->pmu, enable);
|
||||||
g->pmu->isr_enabled = enable;
|
g->pmu->isr_enabled = enable;
|
||||||
@@ -180,7 +180,7 @@ int nvgpu_pmu_early_init(struct gk20a *g)
|
|||||||
pmu->g = g;
|
pmu->g = g;
|
||||||
pmu->flcn = &g->pmu_flcn;
|
pmu->flcn = &g->pmu_flcn;
|
||||||
|
|
||||||
if (g->ops.pmu.ecc_init != NULL && !g->ecc.initialized) {
|
if ((g->ops.pmu.ecc_init != NULL) && !g->ecc.initialized) {
|
||||||
err = g->ops.pmu.ecc_init(g);
|
err = g->ops.pmu.ecc_init(g);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_kfree(g, pmu);
|
nvgpu_kfree(g, pmu);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -299,7 +299,7 @@ void nvgpu_rc_mmu_fault(struct gk20a *g, u32 act_eng_bitmask,
|
|||||||
g->ops.fifo.recover(g, act_eng_bitmask,
|
g->ops.fifo.recover(g, act_eng_bitmask,
|
||||||
id, id_type, rc_type, mmufault);
|
id, id_type, rc_type, mmufault);
|
||||||
#else
|
#else
|
||||||
if (id != INVAL_ID && id_type == ID_TYPE_TSG) {
|
if ((id != INVAL_ID) && (id_type == ID_TYPE_TSG)) {
|
||||||
struct nvgpu_tsg *tsg = &g->fifo.tsg[id];
|
struct nvgpu_tsg *tsg = &g->fifo.tsg[id];
|
||||||
nvgpu_tsg_set_ctx_mmu_error(g, tsg);
|
nvgpu_tsg_set_ctx_mmu_error(g, tsg);
|
||||||
nvgpu_tsg_mark_error(g, tsg);
|
nvgpu_tsg_mark_error(g, tsg);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -410,8 +410,8 @@ void nvgpu_rbtree_range_search(u64 key,
|
|||||||
struct nvgpu_rbtree_node *curr = root;
|
struct nvgpu_rbtree_node *curr = root;
|
||||||
|
|
||||||
while (curr != NULL) {
|
while (curr != NULL) {
|
||||||
if (key >= curr->key_start &&
|
if ((key >= curr->key_start) &&
|
||||||
key < curr->key_end) {
|
(key < curr->key_end)) {
|
||||||
*node = curr;
|
*node = curr;
|
||||||
return;
|
return;
|
||||||
} else if (key < curr->key_start) {
|
} else if (key < curr->key_start) {
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ int nvgpu_strnadd_u32(char *dst, const u32 value, size_t size, u32 radix)
|
|||||||
char *p;
|
char *p;
|
||||||
u32 digit;
|
u32 digit;
|
||||||
|
|
||||||
if (radix < 2U || radix > 16U) {
|
if ((radix < 2U) || (radix > 16U)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -81,7 +81,7 @@ int nvgpu_strnadd_u32(char *dst, const u32 value, size_t size, u32 radix)
|
|||||||
|
|
||||||
bool nvgpu_mem_is_word_aligned(struct gk20a *g, u8 *addr)
|
bool nvgpu_mem_is_word_aligned(struct gk20a *g, u8 *addr)
|
||||||
{
|
{
|
||||||
if ((unsigned long)addr % 4UL != 0UL) {
|
if (((unsigned long)addr % 4UL) != 0UL) {
|
||||||
nvgpu_log_info(g, "addr (%p) not 4-byte aligned", addr);
|
nvgpu_log_info(g, "addr (%p) not 4-byte aligned", addr);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user