mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 18:42:29 +03:00
gpu: nvgpu: clk_arb: fix MISRA 10.4 violations
MISRA Rule 10.4 requires both operands of an operator in which the usual arithmetic conversions are performed shall have the same essential type category. JIRA NVGPU-3159 Change-Id: I94857ab64ef0a9aab0cc3b0cc6c905ee14f917c2 Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2104521 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0e1e142aa9
commit
1a46d4efca
@@ -57,7 +57,7 @@ int nvgpu_clk_notification_queue_alloc(struct gk20a *g,
|
||||
|
||||
void nvgpu_clk_notification_queue_free(struct gk20a *g,
|
||||
struct nvgpu_clk_notification_queue *queue) {
|
||||
if (queue->size > 0) {
|
||||
if (queue->size > 0U) {
|
||||
nvgpu_kfree(g, queue->notifications);
|
||||
queue->size = 0;
|
||||
nvgpu_atomic_set(&queue->head, 0);
|
||||
@@ -72,7 +72,7 @@ static void nvgpu_clk_arb_queue_notification(struct gk20a *g,
|
||||
u32 queue_index;
|
||||
u64 timestamp;
|
||||
|
||||
queue_index = (nvgpu_atomic_inc_return(&queue->tail)) % queue->size;
|
||||
queue_index = U32(nvgpu_atomic_inc_return(&queue->tail)) % queue->size;
|
||||
/* get current timestamp */
|
||||
timestamp = (u64) nvgpu_hr_timestamp();
|
||||
|
||||
@@ -94,8 +94,8 @@ void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm)
|
||||
current_mask = (u64)nvgpu_atomic64_read(&arb->alarm_mask);
|
||||
/* atomic operations are strong so they do not need masks */
|
||||
|
||||
refcnt = ((u32) (current_mask >> 32)) + 1;
|
||||
alarm_mask = (u32) (current_mask & ~0) | alarm;
|
||||
refcnt = ((u32) (current_mask >> 32)) + 1U;
|
||||
alarm_mask = (u32) (current_mask & ~U32(0)) | alarm;
|
||||
new_mask = ((u64) refcnt << 32) | alarm_mask;
|
||||
|
||||
} while (unlikely(current_mask !=
|
||||
@@ -359,7 +359,7 @@ void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm)
|
||||
current_mask = (u64)nvgpu_atomic64_read(&arb->alarm_mask);
|
||||
/* atomic operations are strong so they do not need masks */
|
||||
|
||||
refcnt = ((u32) (current_mask >> 32)) + 1;
|
||||
refcnt = ((u32) (current_mask >> 32)) + 1U;
|
||||
alarm_mask = (u32) (current_mask & ~alarm);
|
||||
new_mask = ((u64) refcnt << 32) | alarm_mask;
|
||||
|
||||
@@ -647,10 +647,10 @@ bool nvgpu_clk_arb_is_valid_domain(struct gk20a *g, u32 api_domain)
|
||||
|
||||
switch (api_domain) {
|
||||
case NVGPU_CLK_DOMAIN_MCLK:
|
||||
return (clk_domains & CTRL_CLK_DOMAIN_MCLK) != 0;
|
||||
return (clk_domains & CTRL_CLK_DOMAIN_MCLK) != 0U;
|
||||
|
||||
case NVGPU_CLK_DOMAIN_GPCCLK:
|
||||
return (clk_domains & CTRL_CLK_DOMAIN_GPCCLK) != 0;
|
||||
return (clk_domains & CTRL_CLK_DOMAIN_GPCCLK) != 0U;
|
||||
|
||||
default:
|
||||
return false;
|
||||
|
||||
@@ -91,7 +91,7 @@ struct nvgpu_clk_session;
|
||||
#define LOCAL_ALARM_MASK (EVENT(ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE) | \
|
||||
EVENT(VF_UPDATE))
|
||||
|
||||
#define _WRAPGTEQ(a, b) ((a-b) > 0)
|
||||
#define _WRAPGTEQ(a, b) ((a-b) > (typeof(a))0)
|
||||
|
||||
/*
|
||||
* NVGPU_POLL* defines equivalent to the POLL* linux defines
|
||||
|
||||
Reference in New Issue
Block a user