gpu: nvgpu: cond: use u32 for COND_WAIT timeout

The type for the timeout parameter to the NVGPU_COND_WAIT and
NVGPU_COND_WAIT_INTERRUPTIBLE macros was too weak. This updates these
macros to require a u32 for the timeout.

Users of the macros are updated to be compliant as necessary.

This addresses MISRA 10.3 violations for implicit conversions of types
of different size or essential type.

JIRA NVGPU-1008

Change-Id: I12368dfa81b137c35bd056668c1867f03a73b7aa
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2017503
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-02-08 13:25:07 -05:00
committed by mobile promotions
parent 13f37f9c70
commit c02bccd6db
16 changed files with 59 additions and 32 deletions

View File

@@ -286,7 +286,7 @@ void gk20a_wait_until_counter_is_N(
if (NVGPU_COND_WAIT(
c,
nvgpu_atomic_read(counter) == wait_value,
5000) == 0) {
5000U) == 0) {
break;
}
@@ -1832,7 +1832,7 @@ static int gk20a_channel_poll_worker(void *arg)
{
struct gk20a *g = (struct gk20a *)arg;
struct gk20a_worker *worker = &g->channel_worker;
unsigned long watchdog_interval = 100; /* milliseconds */
u32 watchdog_interval = 100; /* milliseconds */
struct nvgpu_timeout timeout;
int get = 0;

View File

@@ -82,11 +82,11 @@ void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
NVGPU_COND_WAIT(&g->sw_irq_stall_last_handled_cond,
cyclic_delta(stall_irq_threshold,
nvgpu_atomic_read(&g->sw_irq_stall_last_handled))
<= 0, 0);
<= 0, 0U);
/* wait until all non-stalling irqs are handled */
NVGPU_COND_WAIT(&g->sw_irq_nonstall_last_handled_cond,
cyclic_delta(nonstall_irq_threshold,
nvgpu_atomic_read(&g->sw_irq_nonstall_last_handled))
<= 0, 0);
<= 0, 0U);
}

View File

@@ -271,7 +271,7 @@ static int nvgpu_vidmem_clear_pending_allocs_thr(void *mm_ptr)
nvgpu_thread_should_stop(
&mm->vidmem.clearing_thread) ||
!nvgpu_list_empty(&mm->vidmem.clear_list_head),
0);
0U);
if (ret == -ERESTARTSYS) {
continue;
}

View File

@@ -474,7 +474,8 @@ static int nvgpu_clk_arb_poll_worker(void *arg)
ret = NVGPU_COND_WAIT_INTERRUPTIBLE(
&worker->wq,
nvgpu_clk_arb_worker_pending(g, get) ||
nvgpu_thread_should_stop(&worker->poll_task), 0);
nvgpu_thread_should_stop(&worker->poll_task),
0U);
if (nvgpu_thread_should_stop(&worker->poll_task)) {
break;

View File

@@ -45,7 +45,7 @@ static int pmu_set_boot_clk_runcb_fn(void *arg)
while (true) {
NVGPU_COND_WAIT_INTERRUPTIBLE(&vfe_init->wq,
(vfe_init->state_change ||
nvgpu_thread_should_stop(&vfe_init->state_task)), 0);
nvgpu_thread_should_stop(&vfe_init->state_task)), 0U);
if (nvgpu_thread_should_stop(&vfe_init->state_task)) {
break;
}

View File

@@ -45,7 +45,7 @@ static int pmu_set_boot_clk_runcb_fn(void *arg)
while (true) {
NVGPU_COND_WAIT_INTERRUPTIBLE(&vfe_init->wq,
(vfe_init->state_change ||
nvgpu_thread_should_stop(&vfe_init->state_task)), 0);
nvgpu_thread_should_stop(&vfe_init->state_task)), 0U);
if (nvgpu_thread_should_stop(&vfe_init->state_task)) {
break;
}

View File

@@ -627,7 +627,7 @@ static int nvgpu_pg_init_task(void *arg)
while (true) {
NVGPU_COND_WAIT_INTERRUPTIBLE(&pg_init->wq,
(pg_init->state_change == true), 0);
(pg_init->state_change == true), 0U);
pmu->pg_init.state_change = false;
pmu_state = NV_ACCESS_ONCE(pmu->pmu_state);

View File

@@ -215,7 +215,7 @@ static int nvgpu_semaphore_fence_wait(struct gk20a_fence *f, long timeout)
return NVGPU_COND_WAIT_INTERRUPTIBLE(
f->semaphore_wq,
!nvgpu_semaphore_is_acquired(f->semaphore),
timeout);
(u32)timeout);
}
static bool nvgpu_semaphore_fence_is_expired(struct gk20a_fence *f)

View File

@@ -3739,7 +3739,7 @@ int gk20a_init_gr_support(struct gk20a *g)
/* Wait until GR is initialized */
void gk20a_gr_wait_initialized(struct gk20a *g)
{
NVGPU_COND_WAIT(&g->gr.init_wq, g->gr.initialized, 0);
NVGPU_COND_WAIT(&g->gr.init_wq, g->gr.initialized, 0U);
}
#define NVA297_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dcU

View File

@@ -215,7 +215,7 @@ int gp10b_init_clk_arbiter(struct gk20a *g)
/* Check that first run is completed */
nvgpu_smp_mb();
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
nvgpu_atomic_read(&arb->req_nr) != 0, 0);
nvgpu_atomic_read(&arb->req_nr) != 0, 0U);
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
/* Once the default request is completed, reduce the usage count */

View File

@@ -262,7 +262,7 @@ int gv100_init_clk_arbiter(struct gk20a *g)
/* Check that first run is completed */
nvgpu_smp_mb();
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
nvgpu_atomic_read(&arb->req_nr), 0);
nvgpu_atomic_read(&arb->req_nr), 0U);
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
return arb->status;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -30,7 +30,11 @@ struct nvgpu_cond {
*
* @c - The condition variable to sleep on
* @condition - The condition that needs to be true
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait.
* This parameter must be a u32. Since this is a macro, this is
* enforced by assigning a typecast NULL pointer to a u32 tmp
* variable which will generate a compiler warning (or error if
* the warning is configured as an error).
*
* Wait for a condition to become true. Returns -ETIMEOUT if
* the wait timed out with condition false.
@@ -38,10 +42,12 @@ struct nvgpu_cond {
#define NVGPU_COND_WAIT(c, condition, timeout_ms) \
({\
int ret = 0; \
long _timeout_ms = timeout_ms;\
if (_timeout_ms > 0) { \
/* This is the assignment to enforce a u32 for timeout_ms */ \
u32 *tmp = (typeof(timeout_ms) *)NULL; \
(void)tmp; \
if (timeout_ms > 0U) { \
long _ret = wait_event_timeout((c)->wq, condition, \
msecs_to_jiffies(_timeout_ms)); \
msecs_to_jiffies(timeout_ms)); \
if (_ret == 0) \
ret = -ETIMEDOUT; \
} else { \
@@ -55,7 +61,11 @@ struct nvgpu_cond {
*
* @c - The condition variable to sleep on
* @condition - The condition that needs to be true
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait.
* This parameter must be a u32. Since this is a macro, this is
* enforced by assigning a typecast NULL pointer to a u32 tmp
* variable which will generate a compiler warning (or error if
* the warning is configured as an error).
*
* Wait for a condition to become true. Returns -ETIMEOUT if
* the wait timed out with condition false or -ERESTARTSYS on
@@ -64,10 +74,12 @@ struct nvgpu_cond {
#define NVGPU_COND_WAIT_INTERRUPTIBLE(c, condition, timeout_ms) \
({ \
int ret = 0; \
long _timeout_ms = timeout_ms;\
if (_timeout_ms > 0) { \
long _ret = wait_event_interruptible_timeout((c)->wq, condition, \
msecs_to_jiffies(_timeout_ms)); \
/* This is the assignment to enforce a u32 for timeout_ms */ \
u32 *tmp = (typeof(timeout_ms) *)NULL; \
(void)tmp; \
if (timeout_ms > 0U) { \
long _ret = wait_event_interruptible_timeout((c)->wq, \
condition, msecs_to_jiffies(timeout_ms)); \
if (_ret == 0) \
ret = -ETIMEDOUT; \
else if (_ret == -ERESTARTSYS) \

View File

@@ -39,7 +39,11 @@ struct nvgpu_cond {
*
* @c - The condition variable to sleep on
* @condition - The condition that needs to be true
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait.
* This parameter must be a u32. Since this is a macro, this is
* enforced by assigning a typecast NULL pointer to a u32 tmp
* variable which will generate a compiler warning (or error if
* the warning is configured as an error).
*
* Wait for a condition to become true. Returns -ETIMEOUT if
* the wait timed out with condition false.
@@ -48,14 +52,24 @@ struct nvgpu_cond {
({ \
int ret = 0; \
struct timespec ts; \
long tmp_timeout_ms; \
/* This is the assignment to enforce a u32 for timeout_ms */ \
u32 *tmp = (typeof(timeout_ms) *)NULL; \
(void)tmp; \
if ((sizeof(long) <= sizeof(u32)) && \
((timeout_ms) >= (u32)LONG_MAX)) { \
tmp_timeout_ms = LONG_MAX; \
} else { \
tmp_timeout_ms = (long)(timeout_ms); \
} \
nvgpu_mutex_acquire(&(c)->mutex); \
if (timeout_ms == 0) { \
if (tmp_timeout_ms == 0) { \
ret = pthread_cond_wait(&(c)->cond, \
&(c)->mutex.lock.mutex); \
} else { \
clock_gettime(CLOCK_REALTIME, &ts); \
ts.tv_sec += timeout_ms / 1000; \
ts.tv_nsec += (timeout_ms % 1000) * 1000000; \
ts.tv_sec += tmp_timeout_ms / 1000; \
ts.tv_nsec += (tmp_timeout_ms % 1000) * 1000000; \
if (ts.tv_nsec >= 1000000000) { \
ts.tv_sec += 1; \
ts.tv_nsec %= 1000000000; \

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -109,7 +109,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
!ring_is_empty(hdr), 0);
!ring_is_empty(hdr), 0U);
if (err)
return err;
nvgpu_mutex_acquire(&dev->write_lock);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -203,7 +203,7 @@ static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf,
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
__pending_event(dev, &info), 0);
__pending_event(dev, &info), 0U);
if (err)
return err;
if (info.timestamp)

View File

@@ -55,7 +55,7 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&sched->readout_wq,
sched->status, 0);
sched->status, 0U);
if (err)
return err;
nvgpu_mutex_acquire(&sched->status_lock);