mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: fifo: misra 12.1 fixes
MISRA Advisory Rule states that the precedence of operators within expressions should be made explicit. This change removes the Advisory Rule 12.1 violations from fifo code. Jira NVGPU-3178 Change-Id: I487d039c5be8024b21ec87d520d86763f9338d2a Signed-off-by: Scott Long <scottl@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2276793 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
08e52125e3
commit
3b4b418330
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Graphics channel
|
||||
*
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -1981,7 +1981,7 @@ NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 2, NVGPU_MISRA(Rule, 15_6), "Bug
|
||||
/* Also, more puts than gets. ref_count can go to 0 only if
|
||||
* the channel is closing. Channel is probably going to get
|
||||
* stuck. */
|
||||
WARN_ON(nvgpu_atomic_read(&ch->ref_count) == 0 && ch->referenceable);
|
||||
WARN_ON((nvgpu_atomic_read(&ch->ref_count) == 0) && ch->referenceable);
|
||||
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3))
|
||||
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 14_4))
|
||||
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -773,11 +773,11 @@ u32 nvgpu_engine_get_mask_on_id(struct gk20a *g, u32 id, bool is_tsg)
|
||||
|
||||
busy = engine_status.is_busy;
|
||||
|
||||
if (busy && ctx_id == id) {
|
||||
if ((is_tsg && type ==
|
||||
ENGINE_STATUS_CTX_ID_TYPE_TSGID) ||
|
||||
(!is_tsg && type ==
|
||||
ENGINE_STATUS_CTX_ID_TYPE_CHID)) {
|
||||
if (busy && (ctx_id == id)) {
|
||||
if ((is_tsg && (type ==
|
||||
ENGINE_STATUS_CTX_ID_TYPE_TSGID)) ||
|
||||
(!is_tsg && (type ==
|
||||
ENGINE_STATUS_CTX_ID_TYPE_CHID))) {
|
||||
engines |= BIT32(engine_id);
|
||||
}
|
||||
}
|
||||
@@ -944,7 +944,7 @@ u32 nvgpu_engine_get_runlist_busy_engines(struct gk20a *g, u32 runlist_id)
|
||||
&engine_status);
|
||||
engine_busy = engine_status.is_busy;
|
||||
|
||||
if (engine_busy && engine_runlist == runlist_id) {
|
||||
if (engine_busy && (engine_runlist == runlist_id)) {
|
||||
eng_bitmask |= BIT32(engine_id);
|
||||
}
|
||||
}
|
||||
@@ -1004,9 +1004,9 @@ u32 nvgpu_engine_mmu_fault_id_to_veid(struct gk20a *g, u32 mmu_fault_id,
|
||||
|
||||
num_subctx = f->max_subctx_count;
|
||||
|
||||
if (mmu_fault_id >= gr_eng_fault_id &&
|
||||
mmu_fault_id < nvgpu_safe_add_u32(gr_eng_fault_id,
|
||||
num_subctx)) {
|
||||
if ((mmu_fault_id >= gr_eng_fault_id) &&
|
||||
(mmu_fault_id < nvgpu_safe_add_u32(gr_eng_fault_id,
|
||||
num_subctx))) {
|
||||
veid = mmu_fault_id - gr_eng_fault_id;
|
||||
}
|
||||
|
||||
|
||||
@@ -427,7 +427,7 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
/* double buffering, swap to next */
|
||||
buf_id = runlist->cur_buffer == 0U ? 1U : 0U;
|
||||
buf_id = (runlist->cur_buffer == 0U) ? 1U : 0U;
|
||||
|
||||
ret = gk20a_runlist_reconstruct_locked(g, runlist_id, buf_id,
|
||||
add_entries);
|
||||
@@ -851,7 +851,8 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
|
||||
bool bitmask_disabled = (act_eng_bitmask == 0U && pbdma_bitmask == 0U);
|
||||
bool bitmask_disabled = ((act_eng_bitmask == 0U) &&
|
||||
(pbdma_bitmask == 0U));
|
||||
|
||||
/* engine and/or pbdma ids are known */
|
||||
if (!bitmask_disabled) {
|
||||
|
||||
@@ -791,9 +791,9 @@ void nvgpu_tsg_release(struct nvgpu_ref *ref)
|
||||
struct nvgpu_tsg *tsg = tsg_gk20a_from_ref(ref);
|
||||
struct gk20a *g = tsg->g;
|
||||
|
||||
if (tsg->gr_ctx != NULL && nvgpu_mem_is_valid(
|
||||
nvgpu_gr_ctx_get_ctx_mem(tsg->gr_ctx)) &&
|
||||
tsg->vm != NULL) {
|
||||
if ((tsg->gr_ctx != NULL) &&
|
||||
nvgpu_mem_is_valid(nvgpu_gr_ctx_get_ctx_mem(tsg->gr_ctx)) &&
|
||||
(tsg->vm != NULL)) {
|
||||
g->ops.gr.setup.free_gr_ctx(g, tsg->vm, tsg->gr_ctx);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user