From a2ce1dfd370a55abb0030c7e51bf9a1158953854 Mon Sep 17 00:00:00 2001 From: Philip Elcan Date: Tue, 22 Jan 2019 15:50:38 -0500 Subject: [PATCH] gpu: nvgpu: clk: casts for atomic ops in clk_arb Add the appropriate casts for the atomic ops in clk_arb.c to eliminate a number of MISRA 10.3 violations. JIRA NVGPU-1008 Change-Id: Ie098969584734f366901f8b2aaf1e2788fc18753 Signed-off-by: Philip Elcan Reviewed-on: https://git-master.nvidia.com/r/2001230 Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-misra GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/pmu/clk/clk_arb.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/nvgpu/common/pmu/clk/clk_arb.c b/drivers/gpu/nvgpu/common/pmu/clk/clk_arb.c index d057ba9fd..ba6a20390 100644 --- a/drivers/gpu/nvgpu/common/pmu/clk/clk_arb.c +++ b/drivers/gpu/nvgpu/common/pmu/clk/clk_arb.c @@ -92,7 +92,7 @@ void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm) u64 new_mask; do { - current_mask = nvgpu_atomic64_read(&arb->alarm_mask); + current_mask = (u64)nvgpu_atomic64_read(&arb->alarm_mask); /* atomic operations are strong so they do not need masks */ refcnt = ((u32) (current_mask >> 32)) + 1; @@ -101,7 +101,7 @@ void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm) } while (unlikely(current_mask != (u64)nvgpu_atomic64_cmpxchg(&arb->alarm_mask, - current_mask, new_mask))); + (long int)current_mask, (long int)new_mask))); nvgpu_clk_arb_queue_notification(g, &arb->notification_queue, alarm); } @@ -261,14 +261,14 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev, size_t size; int index; - enabled_mask = nvgpu_atomic_read(&dev->enabled_mask); + enabled_mask = (u32)nvgpu_atomic_read(&dev->enabled_mask); size = arb->notification_queue.size; /* queue global arbiter notifications in buffer */ do { - tail = nvgpu_atomic_read(&arb->notification_queue.tail); + tail = (u32)nvgpu_atomic_read(&arb->notification_queue.tail); /* copy items to the queue */ - queue_index = nvgpu_atomic_read(&dev->queue.tail); + queue_index = (u32)nvgpu_atomic_read(&dev->queue.tail); head = dev->arb_queue_head; head = (tail - head) < arb->notification_queue.size ? head : tail - arb->notification_queue.size; @@ -298,7 +298,7 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev, } while (unlikely(nvgpu_atomic_read(&arb->notification_queue.tail) != (int)tail)); - nvgpu_atomic_set(&dev->queue.tail, queue_index); + nvgpu_atomic_set(&dev->queue.tail, (int)queue_index); /* update the last notification we processed from global queue */ dev->arb_queue_head = tail; @@ -336,7 +336,7 @@ u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev, } if (poll_mask) { - nvgpu_atomic_set(&dev->poll_mask, poll_mask); + nvgpu_atomic_set(&dev->poll_mask, (int)poll_mask); nvgpu_clk_arb_event_post_event(dev); } @@ -353,7 +353,7 @@ void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm) u64 new_mask; do { - current_mask = nvgpu_atomic64_read(&arb->alarm_mask); + current_mask = (u64)nvgpu_atomic64_read(&arb->alarm_mask); /* atomic operations are strong so they do not need masks */ refcnt = ((u32) (current_mask >> 32)) + 1; @@ -362,7 +362,7 @@ void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm) } while (unlikely(current_mask != (u64)nvgpu_atomic64_cmpxchg(&arb->alarm_mask, - current_mask, new_mask))); + (long int)current_mask, (long int)new_mask))); } /*