mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: cond: use u32 for COND_WAIT timeout
The type for the timeout parameter to the NVGPU_COND_WAIT and NVGPU_COND_WAIT_INTERRUPTIBLE macros was too weak. This updates these macros to require a u32 for the timeout. Users of the macros are updated to be compliant as necessary. This addresses MISRA 10.3 violations for implicit conversions of types of different size or essential type. JIRA NVGPU-1008 Change-Id: I12368dfa81b137c35bd056668c1867f03a73b7aa Signed-off-by: Philip Elcan <pelcan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2017503 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
13f37f9c70
commit
c02bccd6db
@@ -286,7 +286,7 @@ void gk20a_wait_until_counter_is_N(
|
|||||||
if (NVGPU_COND_WAIT(
|
if (NVGPU_COND_WAIT(
|
||||||
c,
|
c,
|
||||||
nvgpu_atomic_read(counter) == wait_value,
|
nvgpu_atomic_read(counter) == wait_value,
|
||||||
5000) == 0) {
|
5000U) == 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1832,7 +1832,7 @@ static int gk20a_channel_poll_worker(void *arg)
|
|||||||
{
|
{
|
||||||
struct gk20a *g = (struct gk20a *)arg;
|
struct gk20a *g = (struct gk20a *)arg;
|
||||||
struct gk20a_worker *worker = &g->channel_worker;
|
struct gk20a_worker *worker = &g->channel_worker;
|
||||||
unsigned long watchdog_interval = 100; /* milliseconds */
|
u32 watchdog_interval = 100; /* milliseconds */
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int get = 0;
|
int get = 0;
|
||||||
|
|
||||||
|
|||||||
@@ -82,11 +82,11 @@ void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
|
|||||||
NVGPU_COND_WAIT(&g->sw_irq_stall_last_handled_cond,
|
NVGPU_COND_WAIT(&g->sw_irq_stall_last_handled_cond,
|
||||||
cyclic_delta(stall_irq_threshold,
|
cyclic_delta(stall_irq_threshold,
|
||||||
nvgpu_atomic_read(&g->sw_irq_stall_last_handled))
|
nvgpu_atomic_read(&g->sw_irq_stall_last_handled))
|
||||||
<= 0, 0);
|
<= 0, 0U);
|
||||||
|
|
||||||
/* wait until all non-stalling irqs are handled */
|
/* wait until all non-stalling irqs are handled */
|
||||||
NVGPU_COND_WAIT(&g->sw_irq_nonstall_last_handled_cond,
|
NVGPU_COND_WAIT(&g->sw_irq_nonstall_last_handled_cond,
|
||||||
cyclic_delta(nonstall_irq_threshold,
|
cyclic_delta(nonstall_irq_threshold,
|
||||||
nvgpu_atomic_read(&g->sw_irq_nonstall_last_handled))
|
nvgpu_atomic_read(&g->sw_irq_nonstall_last_handled))
|
||||||
<= 0, 0);
|
<= 0, 0U);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -271,7 +271,7 @@ static int nvgpu_vidmem_clear_pending_allocs_thr(void *mm_ptr)
|
|||||||
nvgpu_thread_should_stop(
|
nvgpu_thread_should_stop(
|
||||||
&mm->vidmem.clearing_thread) ||
|
&mm->vidmem.clearing_thread) ||
|
||||||
!nvgpu_list_empty(&mm->vidmem.clear_list_head),
|
!nvgpu_list_empty(&mm->vidmem.clear_list_head),
|
||||||
0);
|
0U);
|
||||||
if (ret == -ERESTARTSYS) {
|
if (ret == -ERESTARTSYS) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -474,7 +474,8 @@ static int nvgpu_clk_arb_poll_worker(void *arg)
|
|||||||
ret = NVGPU_COND_WAIT_INTERRUPTIBLE(
|
ret = NVGPU_COND_WAIT_INTERRUPTIBLE(
|
||||||
&worker->wq,
|
&worker->wq,
|
||||||
nvgpu_clk_arb_worker_pending(g, get) ||
|
nvgpu_clk_arb_worker_pending(g, get) ||
|
||||||
nvgpu_thread_should_stop(&worker->poll_task), 0);
|
nvgpu_thread_should_stop(&worker->poll_task),
|
||||||
|
0U);
|
||||||
|
|
||||||
if (nvgpu_thread_should_stop(&worker->poll_task)) {
|
if (nvgpu_thread_should_stop(&worker->poll_task)) {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ static int pmu_set_boot_clk_runcb_fn(void *arg)
|
|||||||
while (true) {
|
while (true) {
|
||||||
NVGPU_COND_WAIT_INTERRUPTIBLE(&vfe_init->wq,
|
NVGPU_COND_WAIT_INTERRUPTIBLE(&vfe_init->wq,
|
||||||
(vfe_init->state_change ||
|
(vfe_init->state_change ||
|
||||||
nvgpu_thread_should_stop(&vfe_init->state_task)), 0);
|
nvgpu_thread_should_stop(&vfe_init->state_task)), 0U);
|
||||||
if (nvgpu_thread_should_stop(&vfe_init->state_task)) {
|
if (nvgpu_thread_should_stop(&vfe_init->state_task)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ static int pmu_set_boot_clk_runcb_fn(void *arg)
|
|||||||
while (true) {
|
while (true) {
|
||||||
NVGPU_COND_WAIT_INTERRUPTIBLE(&vfe_init->wq,
|
NVGPU_COND_WAIT_INTERRUPTIBLE(&vfe_init->wq,
|
||||||
(vfe_init->state_change ||
|
(vfe_init->state_change ||
|
||||||
nvgpu_thread_should_stop(&vfe_init->state_task)), 0);
|
nvgpu_thread_should_stop(&vfe_init->state_task)), 0U);
|
||||||
if (nvgpu_thread_should_stop(&vfe_init->state_task)) {
|
if (nvgpu_thread_should_stop(&vfe_init->state_task)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -627,7 +627,7 @@ static int nvgpu_pg_init_task(void *arg)
|
|||||||
while (true) {
|
while (true) {
|
||||||
|
|
||||||
NVGPU_COND_WAIT_INTERRUPTIBLE(&pg_init->wq,
|
NVGPU_COND_WAIT_INTERRUPTIBLE(&pg_init->wq,
|
||||||
(pg_init->state_change == true), 0);
|
(pg_init->state_change == true), 0U);
|
||||||
|
|
||||||
pmu->pg_init.state_change = false;
|
pmu->pg_init.state_change = false;
|
||||||
pmu_state = NV_ACCESS_ONCE(pmu->pmu_state);
|
pmu_state = NV_ACCESS_ONCE(pmu->pmu_state);
|
||||||
|
|||||||
@@ -215,7 +215,7 @@ static int nvgpu_semaphore_fence_wait(struct gk20a_fence *f, long timeout)
|
|||||||
return NVGPU_COND_WAIT_INTERRUPTIBLE(
|
return NVGPU_COND_WAIT_INTERRUPTIBLE(
|
||||||
f->semaphore_wq,
|
f->semaphore_wq,
|
||||||
!nvgpu_semaphore_is_acquired(f->semaphore),
|
!nvgpu_semaphore_is_acquired(f->semaphore),
|
||||||
timeout);
|
(u32)timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool nvgpu_semaphore_fence_is_expired(struct gk20a_fence *f)
|
static bool nvgpu_semaphore_fence_is_expired(struct gk20a_fence *f)
|
||||||
|
|||||||
@@ -3739,7 +3739,7 @@ int gk20a_init_gr_support(struct gk20a *g)
|
|||||||
/* Wait until GR is initialized */
|
/* Wait until GR is initialized */
|
||||||
void gk20a_gr_wait_initialized(struct gk20a *g)
|
void gk20a_gr_wait_initialized(struct gk20a *g)
|
||||||
{
|
{
|
||||||
NVGPU_COND_WAIT(&g->gr.init_wq, g->gr.initialized, 0);
|
NVGPU_COND_WAIT(&g->gr.init_wq, g->gr.initialized, 0U);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NVA297_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dcU
|
#define NVA297_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dcU
|
||||||
|
|||||||
@@ -215,7 +215,7 @@ int gp10b_init_clk_arbiter(struct gk20a *g)
|
|||||||
/* Check that first run is completed */
|
/* Check that first run is completed */
|
||||||
nvgpu_smp_mb();
|
nvgpu_smp_mb();
|
||||||
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
|
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
|
||||||
nvgpu_atomic_read(&arb->req_nr) != 0, 0);
|
nvgpu_atomic_read(&arb->req_nr) != 0, 0U);
|
||||||
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
|
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
|
||||||
|
|
||||||
/* Once the default request is completed, reduce the usage count */
|
/* Once the default request is completed, reduce the usage count */
|
||||||
|
|||||||
@@ -262,7 +262,7 @@ int gv100_init_clk_arbiter(struct gk20a *g)
|
|||||||
/* Check that first run is completed */
|
/* Check that first run is completed */
|
||||||
nvgpu_smp_mb();
|
nvgpu_smp_mb();
|
||||||
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
|
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
|
||||||
nvgpu_atomic_read(&arb->req_nr), 0);
|
nvgpu_atomic_read(&arb->req_nr), 0U);
|
||||||
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
|
} while (nvgpu_atomic_read(&arb->req_nr) == 0);
|
||||||
return arb->status;
|
return arb->status;
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -30,7 +30,11 @@ struct nvgpu_cond {
|
|||||||
*
|
*
|
||||||
* @c - The condition variable to sleep on
|
* @c - The condition variable to sleep on
|
||||||
* @condition - The condition that needs to be true
|
* @condition - The condition that needs to be true
|
||||||
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait
|
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait.
|
||||||
|
* This parameter must be a u32. Since this is a macro, this is
|
||||||
|
* enforced by assigning a typecast NULL pointer to a u32 tmp
|
||||||
|
* variable which will generate a compiler warning (or error if
|
||||||
|
* the warning is configured as an error).
|
||||||
*
|
*
|
||||||
* Wait for a condition to become true. Returns -ETIMEOUT if
|
* Wait for a condition to become true. Returns -ETIMEOUT if
|
||||||
* the wait timed out with condition false.
|
* the wait timed out with condition false.
|
||||||
@@ -38,10 +42,12 @@ struct nvgpu_cond {
|
|||||||
#define NVGPU_COND_WAIT(c, condition, timeout_ms) \
|
#define NVGPU_COND_WAIT(c, condition, timeout_ms) \
|
||||||
({\
|
({\
|
||||||
int ret = 0; \
|
int ret = 0; \
|
||||||
long _timeout_ms = timeout_ms;\
|
/* This is the assignment to enforce a u32 for timeout_ms */ \
|
||||||
if (_timeout_ms > 0) { \
|
u32 *tmp = (typeof(timeout_ms) *)NULL; \
|
||||||
|
(void)tmp; \
|
||||||
|
if (timeout_ms > 0U) { \
|
||||||
long _ret = wait_event_timeout((c)->wq, condition, \
|
long _ret = wait_event_timeout((c)->wq, condition, \
|
||||||
msecs_to_jiffies(_timeout_ms)); \
|
msecs_to_jiffies(timeout_ms)); \
|
||||||
if (_ret == 0) \
|
if (_ret == 0) \
|
||||||
ret = -ETIMEDOUT; \
|
ret = -ETIMEDOUT; \
|
||||||
} else { \
|
} else { \
|
||||||
@@ -55,7 +61,11 @@ struct nvgpu_cond {
|
|||||||
*
|
*
|
||||||
* @c - The condition variable to sleep on
|
* @c - The condition variable to sleep on
|
||||||
* @condition - The condition that needs to be true
|
* @condition - The condition that needs to be true
|
||||||
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait
|
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait.
|
||||||
|
* This parameter must be a u32. Since this is a macro, this is
|
||||||
|
* enforced by assigning a typecast NULL pointer to a u32 tmp
|
||||||
|
* variable which will generate a compiler warning (or error if
|
||||||
|
* the warning is configured as an error).
|
||||||
*
|
*
|
||||||
* Wait for a condition to become true. Returns -ETIMEOUT if
|
* Wait for a condition to become true. Returns -ETIMEOUT if
|
||||||
* the wait timed out with condition false or -ERESTARTSYS on
|
* the wait timed out with condition false or -ERESTARTSYS on
|
||||||
@@ -64,10 +74,12 @@ struct nvgpu_cond {
|
|||||||
#define NVGPU_COND_WAIT_INTERRUPTIBLE(c, condition, timeout_ms) \
|
#define NVGPU_COND_WAIT_INTERRUPTIBLE(c, condition, timeout_ms) \
|
||||||
({ \
|
({ \
|
||||||
int ret = 0; \
|
int ret = 0; \
|
||||||
long _timeout_ms = timeout_ms;\
|
/* This is the assignment to enforce a u32 for timeout_ms */ \
|
||||||
if (_timeout_ms > 0) { \
|
u32 *tmp = (typeof(timeout_ms) *)NULL; \
|
||||||
long _ret = wait_event_interruptible_timeout((c)->wq, condition, \
|
(void)tmp; \
|
||||||
msecs_to_jiffies(_timeout_ms)); \
|
if (timeout_ms > 0U) { \
|
||||||
|
long _ret = wait_event_interruptible_timeout((c)->wq, \
|
||||||
|
condition, msecs_to_jiffies(timeout_ms)); \
|
||||||
if (_ret == 0) \
|
if (_ret == 0) \
|
||||||
ret = -ETIMEDOUT; \
|
ret = -ETIMEDOUT; \
|
||||||
else if (_ret == -ERESTARTSYS) \
|
else if (_ret == -ERESTARTSYS) \
|
||||||
|
|||||||
@@ -39,7 +39,11 @@ struct nvgpu_cond {
|
|||||||
*
|
*
|
||||||
* @c - The condition variable to sleep on
|
* @c - The condition variable to sleep on
|
||||||
* @condition - The condition that needs to be true
|
* @condition - The condition that needs to be true
|
||||||
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait
|
* @timeout_ms - Timeout in milliseconds, or 0 for infinite wait.
|
||||||
|
* This parameter must be a u32. Since this is a macro, this is
|
||||||
|
* enforced by assigning a typecast NULL pointer to a u32 tmp
|
||||||
|
* variable which will generate a compiler warning (or error if
|
||||||
|
* the warning is configured as an error).
|
||||||
*
|
*
|
||||||
* Wait for a condition to become true. Returns -ETIMEOUT if
|
* Wait for a condition to become true. Returns -ETIMEOUT if
|
||||||
* the wait timed out with condition false.
|
* the wait timed out with condition false.
|
||||||
@@ -48,14 +52,24 @@ struct nvgpu_cond {
|
|||||||
({ \
|
({ \
|
||||||
int ret = 0; \
|
int ret = 0; \
|
||||||
struct timespec ts; \
|
struct timespec ts; \
|
||||||
|
long tmp_timeout_ms; \
|
||||||
|
/* This is the assignment to enforce a u32 for timeout_ms */ \
|
||||||
|
u32 *tmp = (typeof(timeout_ms) *)NULL; \
|
||||||
|
(void)tmp; \
|
||||||
|
if ((sizeof(long) <= sizeof(u32)) && \
|
||||||
|
((timeout_ms) >= (u32)LONG_MAX)) { \
|
||||||
|
tmp_timeout_ms = LONG_MAX; \
|
||||||
|
} else { \
|
||||||
|
tmp_timeout_ms = (long)(timeout_ms); \
|
||||||
|
} \
|
||||||
nvgpu_mutex_acquire(&(c)->mutex); \
|
nvgpu_mutex_acquire(&(c)->mutex); \
|
||||||
if (timeout_ms == 0) { \
|
if (tmp_timeout_ms == 0) { \
|
||||||
ret = pthread_cond_wait(&(c)->cond, \
|
ret = pthread_cond_wait(&(c)->cond, \
|
||||||
&(c)->mutex.lock.mutex); \
|
&(c)->mutex.lock.mutex); \
|
||||||
} else { \
|
} else { \
|
||||||
clock_gettime(CLOCK_REALTIME, &ts); \
|
clock_gettime(CLOCK_REALTIME, &ts); \
|
||||||
ts.tv_sec += timeout_ms / 1000; \
|
ts.tv_sec += tmp_timeout_ms / 1000; \
|
||||||
ts.tv_nsec += (timeout_ms % 1000) * 1000000; \
|
ts.tv_nsec += (tmp_timeout_ms % 1000) * 1000000; \
|
||||||
if (ts.tv_nsec >= 1000000000) { \
|
if (ts.tv_nsec >= 1000000000) { \
|
||||||
ts.tv_sec += 1; \
|
ts.tv_sec += 1; \
|
||||||
ts.tv_nsec %= 1000000000; \
|
ts.tv_nsec %= 1000000000; \
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -109,7 +109,7 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
|
|||||||
if (filp->f_flags & O_NONBLOCK)
|
if (filp->f_flags & O_NONBLOCK)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
|
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
|
||||||
!ring_is_empty(hdr), 0);
|
!ring_is_empty(hdr), 0U);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
nvgpu_mutex_acquire(&dev->write_lock);
|
nvgpu_mutex_acquire(&dev->write_lock);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This software is licensed under the terms of the GNU General Public
|
* This software is licensed under the terms of the GNU General Public
|
||||||
* License version 2, as published by the Free Software Foundation, and
|
* License version 2, as published by the Free Software Foundation, and
|
||||||
@@ -203,7 +203,7 @@ static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf,
|
|||||||
if (filp->f_flags & O_NONBLOCK)
|
if (filp->f_flags & O_NONBLOCK)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
|
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
|
||||||
__pending_event(dev, &info), 0);
|
__pending_event(dev, &info), 0U);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
if (info.timestamp)
|
if (info.timestamp)
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
|
|||||||
if (filp->f_flags & O_NONBLOCK)
|
if (filp->f_flags & O_NONBLOCK)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&sched->readout_wq,
|
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&sched->readout_wq,
|
||||||
sched->status, 0);
|
sched->status, 0U);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
nvgpu_mutex_acquire(&sched->status_lock);
|
nvgpu_mutex_acquire(&sched->status_lock);
|
||||||
|
|||||||
Reference in New Issue
Block a user