mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I91baa5cf1d38081161336bde5fbc06661b741273 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807133 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
2eface802a
commit
cf7850ee33
@@ -75,13 +75,13 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
|
||||
ch = nvgpu_list_first_entry(&f->free_chs, channel_gk20a,
|
||||
free_chs);
|
||||
nvgpu_list_del(&ch->free_chs);
|
||||
WARN_ON(nvgpu_atomic_read(&ch->ref_count));
|
||||
WARN_ON(nvgpu_atomic_read(&ch->ref_count) != 0);
|
||||
WARN_ON(ch->referenceable);
|
||||
f->used_channels++;
|
||||
}
|
||||
nvgpu_mutex_release(&f->free_chs_mutex);
|
||||
|
||||
if (g->aggressive_sync_destroy_thresh &&
|
||||
if ((g->aggressive_sync_destroy_thresh != 0U) &&
|
||||
(f->used_channels >
|
||||
g->aggressive_sync_destroy_thresh)) {
|
||||
g->aggressive_sync_destroy = true;
|
||||
@@ -108,7 +108,7 @@ static void free_channel(struct fifo_gk20a *f,
|
||||
* this is fine then because no new channels would be created.
|
||||
*/
|
||||
if (!nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) {
|
||||
if (g->aggressive_sync_destroy_thresh &&
|
||||
if ((g->aggressive_sync_destroy_thresh != 0U) &&
|
||||
(f->used_channels <
|
||||
g->aggressive_sync_destroy_thresh)) {
|
||||
g->aggressive_sync_destroy = false;
|
||||
@@ -251,7 +251,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
|
||||
}
|
||||
|
||||
nvgpu_usleep_range(1000, 3000);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
} while (nvgpu_timeout_expired(&timeout) == 0);
|
||||
|
||||
if (!channel_idle) {
|
||||
nvgpu_err(ch->g, "jobs not freed for channel %d",
|
||||
@@ -469,7 +469,7 @@ unbind:
|
||||
ch->vpr = false;
|
||||
ch->vm = NULL;
|
||||
|
||||
WARN_ON(ch->sync);
|
||||
WARN_ON(ch->sync != NULL);
|
||||
|
||||
/* unlink all debug sessions */
|
||||
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
|
||||
@@ -799,7 +799,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
|
||||
|
||||
nvgpu_log_fn(c->g, "size %d", orig_size);
|
||||
|
||||
if (!e) {
|
||||
if (e == NULL) {
|
||||
nvgpu_err(c->g,
|
||||
"ch %d: priv cmd entry is null",
|
||||
c->chid);
|
||||
@@ -889,7 +889,7 @@ int channel_gk20a_alloc_job(struct channel_gk20a *c,
|
||||
} else {
|
||||
*job_out = nvgpu_kzalloc(c->g,
|
||||
sizeof(struct channel_gk20a_job));
|
||||
if (!*job_out) {
|
||||
if (*job_out == NULL) {
|
||||
err = -ENOMEM;
|
||||
}
|
||||
}
|
||||
@@ -1004,7 +1004,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
|
||||
size_t size;
|
||||
struct priv_cmd_entry *entries = NULL;
|
||||
|
||||
if (channel_gk20a_is_prealloc_enabled(c) || !num_jobs) {
|
||||
if ((channel_gk20a_is_prealloc_enabled(c)) || (num_jobs == 0U)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1018,7 +1018,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
|
||||
c->joblist.pre_alloc.jobs = nvgpu_vzalloc(c->g,
|
||||
num_jobs * size);
|
||||
}
|
||||
if (!c->joblist.pre_alloc.jobs) {
|
||||
if (c->joblist.pre_alloc.jobs == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -1032,7 +1032,7 @@ static int channel_gk20a_prealloc_resources(struct channel_gk20a *c,
|
||||
if (num_jobs <= ULONG_MAX / (size << 1)) {
|
||||
entries = nvgpu_vzalloc(c->g, (num_jobs << 1) * size);
|
||||
}
|
||||
if (!entries) {
|
||||
if (entries == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up_joblist;
|
||||
}
|
||||
@@ -1172,7 +1172,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
|
||||
if (c->gpfifo.mem.aperture == APERTURE_VIDMEM) {
|
||||
c->gpfifo.pipe = nvgpu_big_malloc(g,
|
||||
gpfifo_size * gpfifo_entry_size);
|
||||
if (!c->gpfifo.pipe) {
|
||||
if (c->gpfifo.pipe == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up_unmap;
|
||||
}
|
||||
@@ -1188,10 +1188,10 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
|
||||
|
||||
g->ops.fifo.setup_userd(c);
|
||||
|
||||
if (!g->aggressive_sync_destroy_thresh) {
|
||||
if (g->aggressive_sync_destroy_thresh == 0U) {
|
||||
nvgpu_mutex_acquire(&c->sync_lock);
|
||||
c->sync = gk20a_channel_sync_create(c, false);
|
||||
if (!c->sync) {
|
||||
if (c->sync == NULL) {
|
||||
err = -ENOMEM;
|
||||
nvgpu_mutex_release(&c->sync_lock);
|
||||
goto clean_up_unmap;
|
||||
@@ -1433,7 +1433,7 @@ void gk20a_channel_timeout_restart_all_channels(struct gk20a *g)
|
||||
for (chid = 0; chid < f->num_channels; chid++) {
|
||||
struct channel_gk20a *ch = &f->channel[chid];
|
||||
|
||||
if (!gk20a_channel_get(ch)) {
|
||||
if (gk20a_channel_get(ch) == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1483,7 +1483,7 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!nvgpu_timeout_peek_expired(&ch->timeout.timer)) {
|
||||
if (nvgpu_timeout_peek_expired(&ch->timeout.timer) == 0) {
|
||||
/* Seems stuck but waiting to time out */
|
||||
return;
|
||||
}
|
||||
@@ -1637,7 +1637,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
|
||||
}
|
||||
nvgpu_spinlock_release(&g->channel_worker.items_lock);
|
||||
|
||||
if (!ch) {
|
||||
if (ch == NULL) {
|
||||
/*
|
||||
* Woke up for some other reason, but there are no
|
||||
* other reasons than a channel added in the items list
|
||||
@@ -1776,7 +1776,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
|
||||
/*
|
||||
* Warn if worker thread cannot run
|
||||
*/
|
||||
if (WARN_ON(__nvgpu_channel_worker_start(g))) {
|
||||
if (WARN_ON(__nvgpu_channel_worker_start(g) != 0)) {
|
||||
nvgpu_warn(g, "channel worker cannot run!");
|
||||
return;
|
||||
}
|
||||
@@ -1788,7 +1788,7 @@ static void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
|
||||
* the time we end up here (e.g., if the client got killed); if so, just
|
||||
* return.
|
||||
*/
|
||||
if (!gk20a_channel_get(ch)) {
|
||||
if (gk20a_channel_get(ch) == NULL) {
|
||||
nvgpu_info(g, "cannot get ch ref for worker!");
|
||||
return;
|
||||
}
|
||||
@@ -1814,7 +1814,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
|
||||
struct priv_cmd_queue *q = &c->priv_cmd_q;
|
||||
struct gk20a *g = c->g;
|
||||
|
||||
if (!e) {
|
||||
if (e == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1906,11 +1906,11 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
|
||||
struct vm_gk20a *vm;
|
||||
struct channel_gk20a_job *job;
|
||||
struct gk20a *g;
|
||||
int job_finished = 0;
|
||||
bool job_finished = false;
|
||||
bool watchdog_on = false;
|
||||
|
||||
c = gk20a_channel_get(c);
|
||||
if (!c) {
|
||||
if (c == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1970,9 +1970,9 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
|
||||
break;
|
||||
}
|
||||
|
||||
WARN_ON(!c->sync);
|
||||
WARN_ON(c->sync == NULL);
|
||||
|
||||
if (c->sync) {
|
||||
if (c->sync != NULL) {
|
||||
if (c->has_os_fence_framework_support &&
|
||||
g->os_channel.os_fence_framework_inst_exists(c)) {
|
||||
g->os_channel.signal_os_fence_framework(c);
|
||||
@@ -2024,7 +2024,7 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
|
||||
nvgpu_smp_wmb();
|
||||
|
||||
channel_gk20a_free_job(c, job);
|
||||
job_finished = 1;
|
||||
job_finished = true;
|
||||
|
||||
/*
|
||||
* Deterministic channels have a channel-wide power reference;
|
||||
@@ -2042,7 +2042,8 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
|
||||
|
||||
nvgpu_mutex_release(&c->joblist.cleanup_lock);
|
||||
|
||||
if (job_finished && g->os_channel.work_completion_signal) {
|
||||
if ((job_finished) &&
|
||||
(g->os_channel.work_completion_signal != NULL)) {
|
||||
g->os_channel.work_completion_signal(c);
|
||||
}
|
||||
|
||||
@@ -2089,7 +2090,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
|
||||
for (chid = 0; chid < f->num_channels; chid++) {
|
||||
struct channel_gk20a *ch = &f->channel[chid];
|
||||
|
||||
if (!gk20a_channel_get(ch)) {
|
||||
if (gk20a_channel_get(ch) == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2127,7 +2128,7 @@ void gk20a_channel_deterministic_unidle(struct gk20a *g)
|
||||
for (chid = 0; chid < f->num_channels; chid++) {
|
||||
struct channel_gk20a *ch = &f->channel[chid];
|
||||
|
||||
if (!gk20a_channel_get(ch)) {
|
||||
if (gk20a_channel_get(ch) == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2237,7 +2238,7 @@ int gk20a_channel_suspend(struct gk20a *g)
|
||||
|
||||
for (chid = 0; chid < f->num_channels; chid++) {
|
||||
struct channel_gk20a *ch = &f->channel[chid];
|
||||
if (gk20a_channel_get(ch)) {
|
||||
if (gk20a_channel_get(ch) != NULL) {
|
||||
nvgpu_log_info(g, "suspend channel %d", chid);
|
||||
/* disable channel */
|
||||
gk20a_disable_channel_tsg(g, ch);
|
||||
|
||||
@@ -98,7 +98,7 @@ void gm20b_priv_ring_isr(struct gk20a *g)
|
||||
/* poll for clear interrupt done */
|
||||
cmd = pri_ringmaster_command_cmd_v(
|
||||
gk20a_readl(g, pri_ringmaster_command_r()));
|
||||
while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && retry) {
|
||||
while ((cmd != pri_ringmaster_command_cmd_no_cmd_v()) && (retry != 0)) {
|
||||
nvgpu_udelay(20);
|
||||
retry--;
|
||||
cmd = pri_ringmaster_command_cmd_v(
|
||||
|
||||
@@ -181,7 +181,7 @@ void gp10b_priv_ring_isr(struct gk20a *g)
|
||||
}
|
||||
|
||||
status1 = status1 & (~(BIT(gpc)));
|
||||
if (!status1) {
|
||||
if (status1 == 0U) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -196,7 +196,7 @@ void gp10b_priv_ring_isr(struct gk20a *g)
|
||||
/* poll for clear interrupt done */
|
||||
cmd = pri_ringmaster_command_cmd_v(
|
||||
gk20a_readl(g, pri_ringmaster_command_r()));
|
||||
while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && retry) {
|
||||
while ((cmd != pri_ringmaster_command_cmd_no_cmd_v()) && (retry != 0)) {
|
||||
nvgpu_udelay(20);
|
||||
cmd = pri_ringmaster_command_cmd_v(
|
||||
gk20a_readl(g, pri_ringmaster_command_r()));
|
||||
|
||||
@@ -46,7 +46,7 @@ void gk20a_ptimer_isr(struct gk20a *g)
|
||||
nvgpu_err(g, "PRI timeout: ADR 0x%08x "
|
||||
"%s DATA 0x%08x",
|
||||
timer_pri_timeout_save_0_addr_v(save0) << 2,
|
||||
timer_pri_timeout_save_0_write_v(save0) ?
|
||||
(timer_pri_timeout_save_0_write_v(save0) != 0U) ?
|
||||
"WRITE" : "READ", save1);
|
||||
|
||||
gk20a_writel(g, timer_pri_timeout_save_0_r(), 0);
|
||||
@@ -67,7 +67,7 @@ int gk20a_read_ptimer(struct gk20a *g, u64 *value)
|
||||
unsigned int i = 0;
|
||||
u32 gpu_timestamp_hi_prev = 0;
|
||||
|
||||
if (!value) {
|
||||
if (value == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ static void insert_fixup(struct nvgpu_rbtree_node **root,
|
||||
if (x->parent == x->parent->parent->left) {
|
||||
struct nvgpu_rbtree_node *y = x->parent->parent->right;
|
||||
|
||||
if (y && y->is_red) {
|
||||
if ((y != NULL) && (y->is_red)) {
|
||||
/* uncle is RED */
|
||||
x->parent->is_red = false;
|
||||
y->is_red = false;
|
||||
@@ -119,7 +119,7 @@ static void insert_fixup(struct nvgpu_rbtree_node **root,
|
||||
/* mirror image of above code */
|
||||
struct nvgpu_rbtree_node *y = x->parent->parent->left;
|
||||
|
||||
if (y && y->is_red) {
|
||||
if ((y != NULL) && (y->is_red)) {
|
||||
/* uncle is RED */
|
||||
x->parent->is_red = false;
|
||||
y->is_red = false;
|
||||
@@ -189,7 +189,7 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
|
||||
struct nvgpu_rbtree_node *parent_of_x,
|
||||
struct nvgpu_rbtree_node *x)
|
||||
{
|
||||
while ((x != *root) && (!x || !x->is_red)) {
|
||||
while ((x != *root) && ((x == NULL) || (!x->is_red))) {
|
||||
/*
|
||||
* NULL nodes are sentinel nodes. If we delete a sentinel
|
||||
* node (x==NULL) it must have a parent node (or be the root).
|
||||
@@ -200,21 +200,21 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
|
||||
if ((parent_of_x != NULL) && (x == parent_of_x->left)) {
|
||||
struct nvgpu_rbtree_node *w = parent_of_x->right;
|
||||
|
||||
if (w && w->is_red) {
|
||||
if ((w != NULL) && (w->is_red)) {
|
||||
w->is_red = false;
|
||||
parent_of_x->is_red = true;
|
||||
rotate_left(root, parent_of_x);
|
||||
w = parent_of_x->right;
|
||||
}
|
||||
|
||||
if (!w || ((!w->left || !w->left->is_red)
|
||||
&& (!w->right || !w->right->is_red))) {
|
||||
if (w) {
|
||||
if ((w == NULL) || (((w->left == NULL) || (!w->left->is_red)) &&
|
||||
((w->right == NULL) || (!w->right->is_red)))) {
|
||||
if (w != NULL) {
|
||||
w->is_red = true;
|
||||
}
|
||||
x = parent_of_x;
|
||||
} else {
|
||||
if (!w->right || !w->right->is_red) {
|
||||
if ((w->right == NULL) || (!w->right->is_red)) {
|
||||
w->left->is_red = false;
|
||||
w->is_red = true;
|
||||
rotate_right(root, w);
|
||||
@@ -229,21 +229,21 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
|
||||
} else if (parent_of_x != NULL) {
|
||||
struct nvgpu_rbtree_node *w = parent_of_x->left;
|
||||
|
||||
if (w && w->is_red) {
|
||||
if ((w != NULL) && (w->is_red)) {
|
||||
w->is_red = false;
|
||||
parent_of_x->is_red = true;
|
||||
rotate_right(root, parent_of_x);
|
||||
w = parent_of_x->left;
|
||||
}
|
||||
|
||||
if (!w || ((!w->right || !w->right->is_red)
|
||||
&& (!w->left || !w->left->is_red))) {
|
||||
if (w) {
|
||||
if ((w == NULL) || (((w->right == NULL) || (!w->right->is_red)) &&
|
||||
((w->left == NULL) || (!w->left->is_red)))) {
|
||||
if (w != NULL) {
|
||||
w->is_red = true;
|
||||
}
|
||||
x = parent_of_x;
|
||||
} else {
|
||||
if (!w->left || !w->left->is_red) {
|
||||
if ((w->left == NULL) || (!w->left->is_red)) {
|
||||
w->right->is_red = false;
|
||||
w->is_red = true;
|
||||
rotate_left(root, w);
|
||||
@@ -259,7 +259,7 @@ static void _delete_fixup(struct nvgpu_rbtree_node **root,
|
||||
parent_of_x = x->parent;
|
||||
}
|
||||
|
||||
if (x) {
|
||||
if (x != NULL) {
|
||||
x->is_red = false;
|
||||
}
|
||||
}
|
||||
@@ -276,7 +276,7 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
|
||||
z = node;
|
||||
|
||||
/* unlink */
|
||||
if (!z->left || !z->right) {
|
||||
if ((z->left == NULL) || (z->right == NULL)) {
|
||||
/* y has a SENTINEL node as a child */
|
||||
y = z;
|
||||
} else {
|
||||
@@ -296,7 +296,7 @@ void nvgpu_rbtree_unlink(struct nvgpu_rbtree_node *node,
|
||||
|
||||
/* remove y from the parent chain */
|
||||
parent_of_x = y->parent;
|
||||
if (x) {
|
||||
if (x != NULL) {
|
||||
x->parent = parent_of_x;
|
||||
}
|
||||
|
||||
@@ -431,7 +431,7 @@ void nvgpu_rbtree_enum_next(struct nvgpu_rbtree_node **node,
|
||||
{
|
||||
struct nvgpu_rbtree_node *curr = NULL;
|
||||
|
||||
if (root && *node) {
|
||||
if ((root != NULL) && (*node != NULL)) {
|
||||
/* if we don't have a right subtree return the parent */
|
||||
curr = *node;
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ out:
|
||||
|
||||
void nvgpu_semaphore_sea_destroy(struct gk20a *g)
|
||||
{
|
||||
if (!g->sema_sea) {
|
||||
if (g->sema_sea == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
|
||||
}
|
||||
|
||||
g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea));
|
||||
if (!g->sema_sea) {
|
||||
if (g->sema_sea == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
|
||||
int ret;
|
||||
|
||||
p = nvgpu_kzalloc(sea->gk20a, sizeof(*p));
|
||||
if (!p) {
|
||||
if (p == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -234,13 +234,13 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
|
||||
p->sema_sea->map_size,
|
||||
0, gk20a_mem_flag_read_only, 0,
|
||||
p->sema_sea->sea_mem.aperture);
|
||||
if (!addr) {
|
||||
if (addr == 0ULL) {
|
||||
err = -ENOMEM;
|
||||
goto fail_unlock;
|
||||
}
|
||||
|
||||
p->gpu_va_ro = addr;
|
||||
p->mapped = 1;
|
||||
p->mapped = true;
|
||||
|
||||
gpu_sema_dbg(pool_to_gk20a(p),
|
||||
" %d: GPU read-only VA = 0x%llx",
|
||||
@@ -262,7 +262,7 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
|
||||
gk20a_mem_flag_none, 0,
|
||||
p->rw_mem.aperture);
|
||||
|
||||
if (!addr) {
|
||||
if (addr == 0ULL) {
|
||||
err = -ENOMEM;
|
||||
goto fail_free_submem;
|
||||
}
|
||||
@@ -305,7 +305,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
|
||||
|
||||
p->gpu_va = 0;
|
||||
p->gpu_va_ro = 0;
|
||||
p->mapped = 0;
|
||||
p->mapped = false;
|
||||
|
||||
__unlock_sema_sea(p->sema_sea);
|
||||
|
||||
@@ -324,7 +324,9 @@ static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref)
|
||||
struct nvgpu_semaphore_sea *s = p->sema_sea;
|
||||
|
||||
/* Freeing a mapped pool is a bad idea. */
|
||||
WARN_ON(p->mapped || p->gpu_va || p->gpu_va_ro);
|
||||
WARN_ON((p->mapped) ||
|
||||
(p->gpu_va != 0ULL) ||
|
||||
(p->gpu_va_ro != 0ULL));
|
||||
|
||||
__lock_sema_sea(s);
|
||||
nvgpu_list_del(&p->pool_list_entry);
|
||||
@@ -370,7 +372,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
|
||||
struct nvgpu_semaphore_pool *p = ch->vm->sema_pool;
|
||||
int current_value;
|
||||
|
||||
BUG_ON(!p);
|
||||
BUG_ON(p == NULL);
|
||||
|
||||
nvgpu_mutex_acquire(&p->pool_lock);
|
||||
|
||||
@@ -383,7 +385,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
|
||||
}
|
||||
|
||||
hw_sema = nvgpu_kzalloc(ch->g, sizeof(struct nvgpu_semaphore_int));
|
||||
if (!hw_sema) {
|
||||
if (hw_sema == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_free_idx;
|
||||
}
|
||||
@@ -416,7 +418,7 @@ void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch)
|
||||
struct nvgpu_semaphore_int *hw_sema = ch->hw_sema;
|
||||
int idx = hw_sema->location.offset / SEMAPHORE_SIZE;
|
||||
|
||||
BUG_ON(!p);
|
||||
BUG_ON(p == NULL);
|
||||
|
||||
nvgpu_mutex_acquire(&p->pool_lock);
|
||||
|
||||
@@ -439,7 +441,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
|
||||
struct nvgpu_semaphore *s;
|
||||
int ret;
|
||||
|
||||
if (!ch->hw_sema) {
|
||||
if (ch->hw_sema == NULL) {
|
||||
ret = __nvgpu_init_hw_sema(ch);
|
||||
if (ret) {
|
||||
return NULL;
|
||||
@@ -447,7 +449,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
|
||||
}
|
||||
|
||||
s = nvgpu_kzalloc(ch->g, sizeof(*s));
|
||||
if (!s) {
|
||||
if (s == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -619,7 +621,7 @@ void nvgpu_semaphore_prepare(struct nvgpu_semaphore *s,
|
||||
WARN_ON(s->incremented);
|
||||
|
||||
nvgpu_atomic_set(&s->value, next);
|
||||
s->incremented = 1;
|
||||
s->incremented = true;
|
||||
|
||||
gpu_sema_verbose_dbg(s->g, "INCR sema for c=%d (%u)",
|
||||
hw_sema->ch->chid, next);
|
||||
|
||||
@@ -44,8 +44,8 @@ int gp106_get_internal_sensor_curr_temp(struct gk20a *g, u32 *temp_f24_8)
|
||||
|
||||
readval = gk20a_readl(g, therm_temp_sensor_tsense_r());
|
||||
|
||||
if (!(therm_temp_sensor_tsense_state_v(readval) &
|
||||
therm_temp_sensor_tsense_state_valid_v())) {
|
||||
if ((therm_temp_sensor_tsense_state_v(readval) &
|
||||
therm_temp_sensor_tsense_state_valid_v()) == 0U) {
|
||||
nvgpu_err(g,
|
||||
"Attempt to read temperature while sensor is OFF!");
|
||||
err = -EINVAL;
|
||||
|
||||
@@ -291,7 +291,7 @@ int nvgpu_bios_parse_rom(struct gk20a *g)
|
||||
bool found = false;
|
||||
unsigned int i;
|
||||
|
||||
while (!last) {
|
||||
while (last == 0) {
|
||||
struct pci_exp_rom *pci_rom;
|
||||
struct pci_data_struct *pci_data;
|
||||
struct pci_ext_data_struct *pci_ext_data;
|
||||
@@ -790,7 +790,7 @@ s8 nvgpu_bios_read_s8(struct gk20a *g, u32 offset)
|
||||
{
|
||||
u32 val;
|
||||
val = __nvgpu_bios_readbyte(g, offset);
|
||||
val = val & 0x80U ? (val | ~0xffU) : val;
|
||||
val = ((val & 0x80U) != 0U) ? (val | ~0xffU) : val;
|
||||
|
||||
return (s8) val;
|
||||
}
|
||||
@@ -827,7 +827,7 @@ static void nvgpu_bios_init_xmemsel_zm_nv_reg_array(struct gk20a *g, bool *condi
|
||||
|
||||
strap = gk20a_readl(g, gc6_sci_strap_r()) & 0xfU;
|
||||
|
||||
index = g->bios.mem_strap_xlat_tbl_ptr ?
|
||||
index = (g->bios.mem_strap_xlat_tbl_ptr != 0U) ?
|
||||
nvgpu_bios_read_u8(g, g->bios.mem_strap_xlat_tbl_ptr +
|
||||
strap) : strap;
|
||||
|
||||
|
||||
@@ -111,7 +111,7 @@ int xve_get_speed_gp106(struct gk20a *g, u32 *xve_link_speed)
|
||||
if (link_speed == xve_link_control_status_link_speed_link_speed_8p0_v())
|
||||
real_link_speed = GPU_XVE_SPEED_8P0;
|
||||
|
||||
if (!real_link_speed)
|
||||
if (real_link_speed == 0U)
|
||||
return -ENODEV;
|
||||
|
||||
*xve_link_speed = real_link_speed;
|
||||
@@ -147,7 +147,7 @@ static void set_xve_l0s_mask(struct gk20a *g, bool status)
|
||||
static void set_xve_l1_mask(struct gk20a *g, int status)
|
||||
{
|
||||
u32 xve_priv;
|
||||
u32 status_bit = status ? 1 : 0;
|
||||
u32 status_bit = (status != 0) ? 1 : 0;
|
||||
|
||||
xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r());
|
||||
|
||||
@@ -242,7 +242,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
||||
(xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
|
||||
xp_pl_link_config_ltssm_directive_normal_operations_v()))
|
||||
break;
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
} while (nvgpu_timeout_expired(&timeout) == 0);
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
err_status = -ETIMEDOUT;
|
||||
@@ -313,7 +313,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
||||
if (pl_link_config ==
|
||||
gk20a_readl(g, xp_pl_link_config_r(0)))
|
||||
break;
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
} while (nvgpu_timeout_expired(&timeout) == 0);
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
err_status = -ETIMEDOUT;
|
||||
@@ -348,7 +348,7 @@ static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
||||
(xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
|
||||
xp_pl_link_config_ltssm_directive_normal_operations_v()))
|
||||
break;
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
} while (nvgpu_timeout_expired(&timeout) == 0);
|
||||
|
||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||
err_status = -ETIMEDOUT;
|
||||
|
||||
@@ -330,7 +330,7 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte);
|
||||
*/
|
||||
#define pte_dbg(g, attrs, fmt, args...) \
|
||||
do { \
|
||||
if (attrs && attrs->debug) \
|
||||
if ((attrs != NULL) && (attrs->debug)) \
|
||||
nvgpu_info(g, fmt, ##args); \
|
||||
else \
|
||||
nvgpu_log(g, gpu_dbg_pte, fmt, ##args); \
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
|
||||
#ifndef NVGPU_LIST_H
|
||||
#define NVGPU_LIST_H
|
||||
#include <nvgpu/types.h>
|
||||
|
||||
struct nvgpu_list_node {
|
||||
struct nvgpu_list_node *prev;
|
||||
@@ -57,7 +58,7 @@ static inline void nvgpu_list_del(struct nvgpu_list_node *node)
|
||||
nvgpu_init_list_node(node);
|
||||
}
|
||||
|
||||
static inline int nvgpu_list_empty(struct nvgpu_list_node *head)
|
||||
static inline bool nvgpu_list_empty(struct nvgpu_list_node *head)
|
||||
{
|
||||
return head->next == head;
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ struct nvgpu_semaphore {
|
||||
struct nvgpu_semaphore_loc location;
|
||||
|
||||
nvgpu_atomic_t value;
|
||||
int incremented;
|
||||
bool incremented;
|
||||
|
||||
struct nvgpu_ref ref;
|
||||
};
|
||||
@@ -99,7 +99,7 @@ struct nvgpu_semaphore_pool {
|
||||
*/
|
||||
struct nvgpu_mem rw_mem;
|
||||
|
||||
int mapped;
|
||||
bool mapped;
|
||||
|
||||
/*
|
||||
* Sometimes a channel can be released before other channels are
|
||||
|
||||
Reference in New Issue
Block a user