nvgpu: gk20a: MISRA 10.1 boolean fixes

Fix violations where a variable of type non-boolean is used as
boolean in gpu/nvgpu/gk20a.

JIRA NVGPU-646

Change-Id: Id02068c77f9385adb82c27ef1994a3f88499de48
Signed-off-by: Amurthyreddy <amurthyreddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1829584
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Amurthyreddy
2018-09-18 10:44:05 +05:30
committed by mobile promotions
parent 1b2a0833e0
commit d522a2ddfc
33 changed files with 324 additions and 163 deletions

View File

@@ -218,7 +218,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH:
ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid, ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid,
0, level); 0, level);
if (!ret) { if (ret == 0) {
tsg->interleave_level = level; tsg->interleave_level = level;
} }
break; break;
@@ -227,7 +227,7 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
break; break;
} }
return ret ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); return (ret != 0) ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true);
} }
int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice)
@@ -243,7 +243,7 @@ u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg)
{ {
struct gk20a *g = tsg->g; struct gk20a *g = tsg->g;
if (!tsg->timeslice_us) { if (tsg->timeslice_us == 0U) {
return g->ops.fifo.default_timeslice_us(g); return g->ops.fifo.default_timeslice_us(g);
} }

View File

@@ -144,7 +144,7 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
nvgpu_ref_put(&ce_ctx->tsg->refcount, gk20a_tsg_release); nvgpu_ref_put(&ce_ctx->tsg->refcount, gk20a_tsg_release);
/* housekeeping on app */ /* housekeeping on app */
if (list->prev && list->next) { if ((list->prev != NULL) && (list->next != NULL)) {
nvgpu_list_del(list); nvgpu_list_del(list);
} }
@@ -167,8 +167,8 @@ static inline unsigned int gk20a_ce_get_method_size(int request_operation,
while (chunk) { while (chunk) {
iterations++; iterations++;
shift = MAX_CE_ALIGN(chunk) ? __ffs(MAX_CE_ALIGN(chunk)) : shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
MAX_CE_SHIFT; __ffs(MAX_CE_ALIGN(chunk)) : MAX_CE_SHIFT;
width = chunk >> shift; width = chunk >> shift;
height = 1 << shift; height = 1 << shift;
width = MAX_CE_ALIGN(width); width = MAX_CE_ALIGN(width);
@@ -203,7 +203,7 @@ int gk20a_ce_prepare_submit(u64 src_buf,
/* failure case handling */ /* failure case handling */
if ((gk20a_ce_get_method_size(request_operation, size) > if ((gk20a_ce_get_method_size(request_operation, size) >
max_cmd_buf_size) || (!size) || max_cmd_buf_size) || (size == 0ULL) ||
(request_operation > NVGPU_CE_MEMSET)) { (request_operation > NVGPU_CE_MEMSET)) {
return 0; return 0;
} }
@@ -239,8 +239,8 @@ int gk20a_ce_prepare_submit(u64 src_buf,
* pix per line 2Gb * pix per line 2Gb
*/ */
shift = MAX_CE_ALIGN(chunk) ? __ffs(MAX_CE_ALIGN(chunk)) : shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
MAX_CE_SHIFT; __ffs(MAX_CE_ALIGN(chunk)) : MAX_CE_SHIFT;
height = chunk >> shift; height = chunk >> shift;
width = 1 << shift; width = 1 << shift;
height = MAX_CE_ALIGN(height); height = MAX_CE_ALIGN(height);
@@ -448,7 +448,7 @@ u32 gk20a_ce_create_context(struct gk20a *g,
} }
ce_ctx = nvgpu_kzalloc(g, sizeof(*ce_ctx)); ce_ctx = nvgpu_kzalloc(g, sizeof(*ce_ctx));
if (!ce_ctx) { if (ce_ctx == NULL) {
return ctx_id; return ctx_id;
} }
@@ -466,7 +466,7 @@ u32 gk20a_ce_create_context(struct gk20a *g,
/* allocate a tsg if needed */ /* allocate a tsg if needed */
ce_ctx->tsg = gk20a_tsg_open(g, nvgpu_current_pid(g)); ce_ctx->tsg = gk20a_tsg_open(g, nvgpu_current_pid(g));
if (!ce_ctx->tsg) { if (ce_ctx->tsg == NULL) {
nvgpu_err(g, "ce: gk20a tsg not available"); nvgpu_err(g, "ce: gk20a tsg not available");
err = -ENOMEM; err = -ENOMEM;
goto end; goto end;
@@ -475,7 +475,7 @@ u32 gk20a_ce_create_context(struct gk20a *g,
/* always kernel client needs privileged channel */ /* always kernel client needs privileged channel */
ce_ctx->ch = gk20a_open_new_channel(g, runlist_id, true, ce_ctx->ch = gk20a_open_new_channel(g, runlist_id, true,
nvgpu_current_pid(g), nvgpu_current_tid(g)); nvgpu_current_pid(g), nvgpu_current_tid(g));
if (!ce_ctx->ch) { if (ce_ctx->ch == NULL) {
nvgpu_err(g, "ce: gk20a channel not available"); nvgpu_err(g, "ce: gk20a channel not available");
err = -ENOMEM; err = -ENOMEM;
goto end; goto end;

View File

@@ -86,7 +86,7 @@ inline bool gk20a_fence_is_valid(struct gk20a_fence *f)
int gk20a_fence_install_fd(struct gk20a_fence *f, int fd) int gk20a_fence_install_fd(struct gk20a_fence *f, int fd)
{ {
if (!f || !gk20a_fence_is_valid(f) || if ((f == NULL) || !gk20a_fence_is_valid(f) ||
!nvgpu_os_fence_is_initialized(&f->os_fence)) { !nvgpu_os_fence_is_initialized(&f->os_fence)) {
return -EINVAL; return -EINVAL;
} }
@@ -99,7 +99,7 @@ int gk20a_fence_install_fd(struct gk20a_fence *f, int fd)
int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f, int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f,
unsigned long timeout) unsigned long timeout)
{ {
if (f && gk20a_fence_is_valid(f)) { if ((f != NULL) && gk20a_fence_is_valid(f)) {
if (!nvgpu_platform_is_silicon(g)) { if (!nvgpu_platform_is_silicon(g)) {
timeout = MAX_SCHEDULE_TIMEOUT; timeout = MAX_SCHEDULE_TIMEOUT;
} }
@@ -110,7 +110,7 @@ int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f,
bool gk20a_fence_is_expired(struct gk20a_fence *f) bool gk20a_fence_is_expired(struct gk20a_fence *f)
{ {
if (f && gk20a_fence_is_valid(f) && f->ops) { if ((f != NULL) && gk20a_fence_is_valid(f) && (f->ops != NULL)) {
return f->ops->is_expired(f); return f->ops->is_expired(f);
} else { } else {
return true; return true;
@@ -129,7 +129,7 @@ int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count)
fence_pool = nvgpu_vzalloc(c->g, size); fence_pool = nvgpu_vzalloc(c->g, size);
} }
if (!fence_pool) { if (fence_pool == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -190,7 +190,7 @@ void gk20a_init_fence(struct gk20a_fence *f,
const struct gk20a_fence_ops *ops, const struct gk20a_fence_ops *ops,
struct nvgpu_os_fence os_fence) struct nvgpu_os_fence os_fence)
{ {
if (!f) { if (f == NULL) {
return; return;
} }
f->ops = ops; f->ops = ops;
@@ -233,7 +233,7 @@ int gk20a_fence_from_semaphore(
struct gk20a_fence *f = fence_out; struct gk20a_fence *f = fence_out;
gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, os_fence); gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, os_fence);
if (!f) { if (f == NULL) {
return -EINVAL; return -EINVAL;
} }

View File

@@ -87,7 +87,9 @@ u32 gk20a_fifo_get_engine_ids(struct gk20a *g,
u32 active_engine_id = 0; u32 active_engine_id = 0;
struct fifo_engine_info_gk20a *info = NULL; struct fifo_engine_info_gk20a *info = NULL;
if (g && engine_id_sz && (engine_enum < ENGINE_INVAL_GK20A)) { if ((g != NULL) &&
(engine_id_sz != 0U) &&
(engine_enum < ENGINE_INVAL_GK20A)) {
f = &g->fifo; f = &g->fifo;
for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
active_engine_id = f->active_engines_list[engine_id_idx]; active_engine_id = f->active_engines_list[engine_id_idx];
@@ -113,7 +115,7 @@ struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 e
u32 engine_id_idx; u32 engine_id_idx;
struct fifo_engine_info_gk20a *info = NULL; struct fifo_engine_info_gk20a *info = NULL;
if (!g) { if (g == NULL) {
return info; return info;
} }
@@ -128,7 +130,7 @@ struct fifo_engine_info_gk20a *gk20a_fifo_get_engine_info(struct gk20a *g, u32 e
} }
} }
if (!info) { if (info == NULL) {
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id); nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
} }
@@ -141,7 +143,7 @@ bool gk20a_fifo_is_valid_engine_id(struct gk20a *g, u32 engine_id)
u32 engine_id_idx; u32 engine_id_idx;
bool valid = false; bool valid = false;
if (!g) { if (g == NULL) {
return valid; return valid;
} }
@@ -172,7 +174,7 @@ u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g)
gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id, gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id,
1, ENGINE_GR_GK20A); 1, ENGINE_GR_GK20A);
if (!gr_engine_cnt) { if (gr_engine_cnt == 0U) {
nvgpu_err(g, "No GR engine available on this device!"); nvgpu_err(g, "No GR engine available on this device!");
} }
@@ -188,7 +190,7 @@ u32 gk20a_fifo_get_all_ce_engine_reset_mask(struct gk20a *g)
struct fifo_engine_info_gk20a *engine_info; struct fifo_engine_info_gk20a *engine_info;
u32 active_engine_id = 0; u32 active_engine_id = 0;
if (!g) { if (g == NULL) {
return reset_mask; return reset_mask;
} }
@@ -217,7 +219,7 @@ u32 gk20a_fifo_get_fast_ce_runlist_id(struct gk20a *g)
struct fifo_engine_info_gk20a *engine_info; struct fifo_engine_info_gk20a *engine_info;
u32 active_engine_id = 0; u32 active_engine_id = 0;
if (!g) { if (g == NULL) {
return ce_runlist_id; return ce_runlist_id;
} }
@@ -248,7 +250,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id, gr_engine_cnt = gk20a_fifo_get_engine_ids(g, &gr_engine_id,
1, ENGINE_GR_GK20A); 1, ENGINE_GR_GK20A);
if (!gr_engine_cnt) { if (gr_engine_cnt == 0U) {
nvgpu_err(g, nvgpu_err(g,
"No GR engine available on this device!"); "No GR engine available on this device!");
goto end; goto end;
@@ -274,7 +276,7 @@ bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id)
u32 active_engine_id; u32 active_engine_id;
struct fifo_engine_info_gk20a *engine_info; struct fifo_engine_info_gk20a *engine_info;
if (!g) { if (g == NULL) {
return false; return false;
} }
@@ -283,7 +285,8 @@ bool gk20a_fifo_is_valid_runlist_id(struct gk20a *g, u32 runlist_id)
for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) { for (engine_id_idx = 0; engine_id_idx < f->num_engines; ++engine_id_idx) {
active_engine_id = f->active_engines_list[engine_id_idx]; active_engine_id = f->active_engines_list[engine_id_idx];
engine_info = gk20a_fifo_get_engine_info(g, active_engine_id); engine_info = gk20a_fifo_get_engine_info(g, active_engine_id);
if (engine_info && (engine_info->runlist_id == runlist_id)) { if ((engine_info != NULL) &&
(engine_info->runlist_id == runlist_id)) {
return true; return true;
} }
} }
@@ -441,7 +444,8 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
} }
} }
if (!top_device_info_chain_v(table_entry)) { if (top_device_info_chain_v(table_entry) ==
top_device_info_chain_disable_v()) {
if (engine_enum < ENGINE_INVAL_GK20A) { if (engine_enum < ENGINE_INVAL_GK20A) {
struct fifo_engine_info_gk20a *info = struct fifo_engine_info_gk20a *info =
&g->fifo.engine_info[engine_id]; &g->fifo.engine_info[engine_id];
@@ -465,7 +469,8 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
info->engine_enum = engine_enum; info->engine_enum = engine_enum;
if (!fault_id && (engine_enum == ENGINE_GRCE_GK20A)) { if ((fault_id == 0U) &&
(engine_enum == ENGINE_GRCE_GK20A)) {
fault_id = 0x1b; fault_id = 0x1b;
} }
info->fault_id = fault_id; info->fault_id = fault_id;
@@ -508,8 +513,9 @@ u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g)
intr_mask = g->fifo.engine_info[active_engine_id].intr_mask; intr_mask = g->fifo.engine_info[active_engine_id].intr_mask;
engine_enum = g->fifo.engine_info[active_engine_id].engine_enum; engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
if (((engine_enum == ENGINE_GRCE_GK20A) || if (((engine_enum == ENGINE_GRCE_GK20A) ||
(engine_enum == ENGINE_ASYNC_CE_GK20A)) && (engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
(!g->ops.ce2.isr_stall || !g->ops.ce2.isr_nonstall)) { ((g->ops.ce2.isr_stall == NULL) ||
(g->ops.ce2.isr_nonstall == NULL))) {
continue; continue;
} }
@@ -526,7 +532,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f)
struct fifo_runlist_info_gk20a *runlist; struct fifo_runlist_info_gk20a *runlist;
struct gk20a *g = NULL; struct gk20a *g = NULL;
if (!f || !f->runlist_info) { if ((f == NULL) || (f->runlist_info == NULL)) {
return; return;
} }
@@ -706,7 +712,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
f->runlist_info = nvgpu_kzalloc(g, f->runlist_info = nvgpu_kzalloc(g,
sizeof(struct fifo_runlist_info_gk20a) * sizeof(struct fifo_runlist_info_gk20a) *
f->max_runlists); f->max_runlists);
if (!f->runlist_info) { if (f->runlist_info == NULL) {
goto clean_up_runlist; goto clean_up_runlist;
} }
@@ -719,14 +725,14 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
runlist->active_channels = runlist->active_channels =
nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels, nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
BITS_PER_BYTE)); BITS_PER_BYTE));
if (!runlist->active_channels) { if (runlist->active_channels == NULL) {
goto clean_up_runlist; goto clean_up_runlist;
} }
runlist->active_tsgs = runlist->active_tsgs =
nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels, nvgpu_kzalloc(g, DIV_ROUND_UP(f->num_channels,
BITS_PER_BYTE)); BITS_PER_BYTE));
if (!runlist->active_tsgs) { if (runlist->active_tsgs == NULL) {
goto clean_up_runlist; goto clean_up_runlist;
} }
@@ -768,7 +774,8 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
active_engine_id = f->active_engines_list[engine_id]; active_engine_id = f->active_engines_list[engine_id];
engine_info = &f->engine_info[active_engine_id]; engine_info = &f->engine_info[active_engine_id];
if (engine_info && engine_info->runlist_id == runlist_id) { if ((engine_info != NULL) &&
(engine_info->runlist_id == runlist_id)) {
runlist->eng_bitmask |= BIT(active_engine_id); runlist->eng_bitmask |= BIT(active_engine_id);
} }
} }
@@ -939,8 +946,11 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
sizeof(*f->engine_info)); sizeof(*f->engine_info));
f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32)); f->active_engines_list = nvgpu_kzalloc(g, f->max_engines * sizeof(u32));
if (!(f->channel && f->tsg && f->pbdma_map && f->engine_info && if (!((f->channel != NULL) &&
f->active_engines_list)) { (f->tsg != NULL) &&
(f->pbdma_map != NULL) &&
(f->engine_info != NULL) &&
(f->active_engines_list != NULL))) {
err = -ENOMEM; err = -ENOMEM;
goto clean_up; goto clean_up;
} }
@@ -1130,7 +1140,7 @@ gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr)
{ {
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
unsigned int ci; unsigned int ci;
if (unlikely(!f->channel)) { if (unlikely(f->channel == NULL)) {
return NULL; return NULL;
} }
for (ci = 0; ci < f->num_channels; ci++) { for (ci = 0; ci < f->num_channels; ci++) {
@@ -1139,7 +1149,7 @@ gk20a_refch_from_inst_ptr(struct gk20a *g, u64 inst_ptr)
ch = gk20a_channel_get(&f->channel[ci]); ch = gk20a_channel_get(&f->channel[ci]);
/* only alive channels are searched */ /* only alive channels are searched */
if (!ch) { if (ch == NULL) {
continue; continue;
} }
@@ -1263,11 +1273,11 @@ static void get_exception_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id,
mmfault->client_id_desc = does_not_exist[0]; mmfault->client_id_desc = does_not_exist[0];
if ((mmfault->client_type == if ((mmfault->client_type ==
fifo_intr_mmu_fault_info_engine_subid_hub_v()) fifo_intr_mmu_fault_info_engine_subid_hub_v())
&& g->ops.fifo.get_mmu_fault_client_desc) { && (g->ops.fifo.get_mmu_fault_client_desc != NULL)) {
g->ops.fifo.get_mmu_fault_client_desc(mmfault); g->ops.fifo.get_mmu_fault_client_desc(mmfault);
} else if ((mmfault->client_type == } else if ((mmfault->client_type ==
fifo_intr_mmu_fault_info_engine_subid_gpc_v()) fifo_intr_mmu_fault_info_engine_subid_gpc_v())
&& g->ops.fifo.get_mmu_fault_gpc_desc) { && (g->ops.fifo.get_mmu_fault_gpc_desc != NULL)) {
g->ops.fifo.get_mmu_fault_gpc_desc(mmfault); g->ops.fifo.get_mmu_fault_gpc_desc(mmfault);
} }
} }
@@ -1311,7 +1321,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (!g) { if (g == NULL) {
return; return;
} }
@@ -1393,7 +1403,7 @@ bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
enum fifo_engine engine_enum = ENGINE_INVAL_GK20A; enum fifo_engine engine_enum = ENGINE_INVAL_GK20A;
struct fifo_engine_info_gk20a *engine_info; struct fifo_engine_info_gk20a *engine_info;
if (!g) { if (g == NULL) {
return false; return false;
} }
@@ -1431,7 +1441,7 @@ static bool gk20a_fifo_ch_timeout_debug_dump_state(struct gk20a *g,
struct channel_gk20a *refch) struct channel_gk20a *refch)
{ {
bool verbose = true; bool verbose = true;
if (!refch) { if (refch == NULL) {
return verbose; return verbose;
} }
@@ -1560,7 +1570,7 @@ int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
} else { } else {
engines = gk20a_fifo_engines_on_id(g, ch->chid, false); engines = gk20a_fifo_engines_on_id(g, ch->chid, false);
} }
if (!engines) { if (engines == 0U) {
goto clean_up; goto clean_up;
} }
@@ -1724,7 +1734,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
refch = ch; refch = ch;
} }
if (ch && gk20a_is_channel_marked_as_tsg(ch)) { if ((ch != NULL) && gk20a_is_channel_marked_as_tsg(ch)) {
tsg = &g->fifo.tsg[ch->tsgid]; tsg = &g->fifo.tsg[ch->tsgid];
} }
@@ -1733,7 +1743,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
bool defer = gk20a_fifo_should_defer_engine_reset(g, bool defer = gk20a_fifo_should_defer_engine_reset(g,
engine_id, mmfault_info.client_type, engine_id, mmfault_info.client_type,
fake_fault); fake_fault);
if ((ch || tsg) && defer) { if (((ch != NULL) || (tsg != NULL)) && defer) {
g->fifo.deferred_fault_engines |= BIT(engine_id); g->fifo.deferred_fault_engines |= BIT(engine_id);
/* handled during channel free */ /* handled during channel free */
@@ -2165,7 +2175,7 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
goto fail_enable_tsg; goto fail_enable_tsg;
} }
if (g->ops.fifo.tsg_verify_channel_status && !tsg_timedout) { if ((g->ops.fifo.tsg_verify_channel_status != NULL) && !tsg_timedout) {
err = g->ops.fifo.tsg_verify_channel_status(ch); err = g->ops.fifo.tsg_verify_channel_status(ch);
if (err != 0) { if (err != 0) {
goto fail_enable_tsg; goto fail_enable_tsg;
@@ -2856,15 +2866,15 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
nvgpu_timeout_init(g, &timeout, gk20a_fifo_get_preempt_timeout(g), nvgpu_timeout_init(g, &timeout, gk20a_fifo_get_preempt_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
do { do {
if (!(gk20a_readl(g, fifo_preempt_r()) & if ((gk20a_readl(g, fifo_preempt_r()) &
fifo_preempt_pending_true_f())) { fifo_preempt_pending_true_f()) == 0U) {
ret = 0; ret = 0;
break; break;
} }
nvgpu_usleep_range(delay, delay * 2); nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, "preempt timeout: id: %u id_type: %d ", nvgpu_err(g, "preempt timeout: id: %u id_type: %d ",
@@ -2886,7 +2896,7 @@ void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
nvgpu_rwsem_down_read(&tsg->ch_list_lock); nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, nvgpu_list_for_each_entry(ch, &tsg->ch_list,
channel_gk20a, ch_entry) { channel_gk20a, ch_entry) {
if (!gk20a_channel_get(ch)) { if (gk20a_channel_get(ch) == NULL) {
continue; continue;
} }
g->ops.fifo.set_error_notifier(ch, g->ops.fifo.set_error_notifier(ch,
@@ -2952,7 +2962,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
ret = __locked_fifo_preempt(g, chid, false); ret = __locked_fifo_preempt(g, chid, false);
if (!mutex_ret) { if (mutex_ret == 0U) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
} }
@@ -2996,7 +3006,7 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
ret = __locked_fifo_preempt(g, tsgid, true); ret = __locked_fifo_preempt(g, tsgid, true);
if (!mutex_ret) { if (mutex_ret == 0U) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
} }
@@ -3059,7 +3069,7 @@ void gk20a_fifo_set_runlist_state(struct gk20a *g, u32 runlists_mask,
gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state); gk20a_fifo_sched_disable_rw(g, runlists_mask, runlist_state);
if (!mutex_ret) { if (mutex_ret == 0U) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
} }
} }
@@ -3168,7 +3178,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
} }
clean_up: clean_up:
if (!mutex_ret) { if (mutex_ret == 0U) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
} }
@@ -3262,7 +3272,7 @@ int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
nvgpu_usleep_range(delay, delay * 2); nvgpu_usleep_range(delay, delay * 2);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, "runlist wait timeout: runlist id: %u", nvgpu_err(g, "runlist wait timeout: runlist id: %u",
@@ -3392,7 +3402,7 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
} }
/* append entries from higher level if this level is empty */ /* append entries from higher level if this level is empty */
if (!count && !last_level) { if ((count == 0U) && !last_level) {
runlist_entry = gk20a_runlist_construct_locked(f, runlist_entry = gk20a_runlist_construct_locked(f,
runlist, runlist,
cur_level + 1, cur_level + 1,
@@ -3408,7 +3418,7 @@ u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
* *
* ex. dropping from MEDIUM to LOW, need to insert HIGH * ex. dropping from MEDIUM to LOW, need to insert HIGH
*/ */
if (interleave_enabled && count && !prev_empty && !last_level) { if (interleave_enabled && (count != 0U) && !prev_empty && !last_level) {
runlist_entry = gk20a_runlist_construct_locked(f, runlist_entry = gk20a_runlist_construct_locked(f,
runlist, runlist,
cur_level + 1, cur_level + 1,
@@ -3505,7 +3515,7 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
runlist->active_channels) == 1) { runlist->active_channels) == 1) {
return 0; return 0;
} }
if (tsg && ++tsg->num_active_channels) { if ((tsg != NULL) && (++tsg->num_active_channels != 0)) {
set_bit((int)f->channel[chid].tsgid, set_bit((int)f->channel[chid].tsgid,
runlist->active_tsgs); runlist->active_tsgs);
} }
@@ -3514,7 +3524,8 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
runlist->active_channels) == 0) { runlist->active_channels) == 0) {
return 0; return 0;
} }
if (tsg && --tsg->num_active_channels == 0U) { if ((tsg != NULL) &&
(--tsg->num_active_channels == 0U)) {
clear_bit((int)f->channel[chid].tsgid, clear_bit((int)f->channel[chid].tsgid,
runlist->active_tsgs); runlist->active_tsgs);
} }
@@ -3529,13 +3540,13 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx", nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx",
runlist_id, (u64)runlist_iova); runlist_id, (u64)runlist_iova);
if (!runlist_iova) { if (runlist_iova == 0ULL) {
ret = -EINVAL; ret = -EINVAL;
goto clean_up; goto clean_up;
} }
runlist_entry_base = runlist->mem[new_buf].cpu_va; runlist_entry_base = runlist->mem[new_buf].cpu_va;
if (!runlist_entry_base) { if (runlist_entry_base == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto clean_up; goto clean_up;
} }
@@ -3552,7 +3563,7 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
g->runlist_interleave, g->runlist_interleave,
true, true,
&max_entries); &max_entries);
if (!runlist_end) { if (runlist_end == NULL) {
ret = -E2BIG; ret = -E2BIG;
goto clean_up; goto clean_up;
} }
@@ -3593,7 +3604,7 @@ int gk20a_fifo_update_runlist_ids(struct gk20a *g, u32 runlist_ids, u32 chid,
int errcode; int errcode;
unsigned long ulong_runlist_ids = (unsigned long)runlist_ids; unsigned long ulong_runlist_ids = (unsigned long)runlist_ids;
if (!g) { if (g == NULL) {
goto end; goto end;
} }
@@ -3628,12 +3639,12 @@ static int __locked_fifo_reschedule_preempt_next(struct channel_gk20a *ch,
g, &gr_eng_id, 1, ENGINE_GR_GK20A)) { g, &gr_eng_id, 1, ENGINE_GR_GK20A)) {
return ret; return ret;
} }
if (!(runlist->eng_bitmask & (1 << gr_eng_id))) { if ((runlist->eng_bitmask & BIT32(gr_eng_id)) == 0U) {
return ret; return ret;
} }
if (wait_preempt && gk20a_readl(g, fifo_preempt_r()) & if (wait_preempt && ((gk20a_readl(g, fifo_preempt_r()) &
fifo_preempt_pending_true_f()) { fifo_preempt_pending_true_f()) != 0U)) {
return ret; return ret;
} }
@@ -3688,7 +3699,7 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
int ret = 0; int ret = 0;
runlist = &g->fifo.runlist_info[ch->runlist_id]; runlist = &g->fifo.runlist_info[ch->runlist_id];
if (!nvgpu_mutex_tryacquire(&runlist->runlist_lock)) { if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) {
return -EBUSY; return -EBUSY;
} }
@@ -3704,7 +3715,7 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next,
gk20a_fifo_runlist_wait_pending(g, ch->runlist_id); gk20a_fifo_runlist_wait_pending(g, ch->runlist_id);
if (!mutex_ret) { if (mutex_ret == 0U) {
nvgpu_pmu_mutex_release( nvgpu_pmu_mutex_release(
&g->pmu, PMU_MUTEX_ID_FIFO, &token); &g->pmu, PMU_MUTEX_ID_FIFO, &token);
} }
@@ -3737,7 +3748,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 chid,
ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add, ret = gk20a_fifo_update_runlist_locked(g, runlist_id, chid, add,
wait_for_finish); wait_for_finish);
if (!mutex_ret) { if (mutex_ret == 0U) {
nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
} }
@@ -3812,7 +3823,8 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g)
for (i = 0; i < host_num_engines; i++) { for (i = 0; i < host_num_engines; i++) {
do { do {
u32 status = gk20a_readl(g, fifo_engine_status_r(i)); u32 status = gk20a_readl(g, fifo_engine_status_r(i));
if (!fifo_engine_status_engine_v(status)) { if (fifo_engine_status_engine_v(status) ==
fifo_engine_status_engine_idle_v()) {
ret = 0; ret = 0;
break; break;
} }
@@ -3820,7 +3832,7 @@ int gk20a_fifo_wait_engine_idle(struct gk20a *g)
nvgpu_usleep_range(delay, delay * 2); nvgpu_usleep_range(delay, delay * 2);
delay = min_t(unsigned long, delay = min_t(unsigned long,
delay << 1, GR_IDLE_CHECK_MAX); delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) { if (ret != 0) {
nvgpu_log_info(g, "cannot idle engine %u", i); nvgpu_log_info(g, "cannot idle engine %u", i);
@@ -3936,7 +3948,7 @@ void gk20a_dump_channel_status_ramfc(struct gk20a *g,
hw_sema = c->hw_sema; hw_sema = c->hw_sema;
} }
if (!ch_state) { if (ch_state == NULL) {
return; return;
} }
@@ -3951,9 +3963,11 @@ void gk20a_dump_channel_status_ramfc(struct gk20a *g,
ch_state->refs, ch_state->refs,
ch_state->deterministic ? ", deterministic" : ""); ch_state->deterministic ? ", deterministic" : "");
gk20a_debug_output(o, "channel status: %s in use %s %s\n", gk20a_debug_output(o, "channel status: %s in use %s %s\n",
ccsr_channel_enable_v(channel) ? "" : "not", (ccsr_channel_enable_v(channel) ==
ccsr_channel_enable_in_use_v()) ? "" : "not",
gk20a_decode_ccsr_chan_status(status), gk20a_decode_ccsr_chan_status(status),
ccsr_channel_busy_v(channel) ? "busy" : "not busy"); (ccsr_channel_busy_v(channel) ==
ccsr_channel_busy_true_v()) ? "busy" : "not busy");
gk20a_debug_output(o, "RAMFC : TOP: %016llx PUT: %016llx GET: %016llx " gk20a_debug_output(o, "RAMFC : TOP: %016llx PUT: %016llx GET: %016llx "
"FETCH: %016llx\nHEADER: %08x COUNT: %08x\n" "FETCH: %016llx\nHEADER: %08x COUNT: %08x\n"
"SYNCPOINT %08x %08x SEMAPHORE %08x %08x %08x %08x\n", "SYNCPOINT %08x %08x SEMAPHORE %08x %08x %08x %08x\n",
@@ -4004,7 +4018,7 @@ void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
struct ch_state **ch_state; struct ch_state **ch_state;
ch_state = nvgpu_kzalloc(g, sizeof(*ch_state) * f->num_channels); ch_state = nvgpu_kzalloc(g, sizeof(*ch_state) * f->num_channels);
if (!ch_state) { if (ch_state == NULL) {
gk20a_debug_output(o, "cannot alloc memory for channels\n"); gk20a_debug_output(o, "cannot alloc memory for channels\n");
return; return;
} }
@@ -4017,7 +4031,7 @@ void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
ram_in_alloc_size_v()); ram_in_alloc_size_v());
/* ref taken stays to below loop with /* ref taken stays to below loop with
* successful allocs */ * successful allocs */
if (!ch_state[chid]) { if (ch_state[chid] == NULL) {
gk20a_channel_put(ch); gk20a_channel_put(ch);
} }
} }
@@ -4025,7 +4039,7 @@ void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
for (chid = 0; chid < f->num_channels; chid++) { for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *ch = &f->channel[chid]; struct channel_gk20a *ch = &f->channel[chid];
if (!ch_state[chid]) { if (ch_state[chid] == NULL) {
continue; continue;
} }
@@ -4062,10 +4076,12 @@ void gk20a_dump_pbdma_status(struct gk20a *g,
gk20a_debug_output(o, gk20a_debug_output(o,
"id: %d (%s), next_id: %d (%s) chan status: %s\n", "id: %d (%s), next_id: %d (%s) chan status: %s\n",
fifo_pbdma_status_id_v(status), fifo_pbdma_status_id_v(status),
fifo_pbdma_status_id_type_v(status) ? (fifo_pbdma_status_id_type_v(status) ==
fifo_pbdma_status_id_type_tsgid_v()) ?
"tsg" : "channel", "tsg" : "channel",
fifo_pbdma_status_next_id_v(status), fifo_pbdma_status_next_id_v(status),
fifo_pbdma_status_next_id_type_v(status) ? (fifo_pbdma_status_next_id_type_v(status) ==
fifo_pbdma_status_next_id_type_tsgid_v()) ?
"tsg" : "channel", "tsg" : "channel",
gk20a_decode_pbdma_chan_eng_ctx_status(chan_status)); gk20a_decode_pbdma_chan_eng_ctx_status(chan_status));
gk20a_debug_output(o, "PBDMA_PUT: %016llx PBDMA_GET: %016llx " gk20a_debug_output(o, "PBDMA_PUT: %016llx PBDMA_GET: %016llx "
@@ -4102,10 +4118,12 @@ void gk20a_dump_eng_status(struct gk20a *g,
gk20a_debug_output(o, gk20a_debug_output(o,
"id: %d (%s), next_id: %d (%s), ctx status: %s ", "id: %d (%s), next_id: %d (%s), ctx status: %s ",
fifo_engine_status_id_v(status), fifo_engine_status_id_v(status),
fifo_engine_status_id_type_v(status) ? (fifo_engine_status_id_type_v(status) ==
fifo_engine_status_id_type_tsgid_v()) ?
"tsg" : "channel", "tsg" : "channel",
fifo_engine_status_next_id_v(status), fifo_engine_status_next_id_v(status),
fifo_engine_status_next_id_type_v(status) ? (fifo_engine_status_next_id_type_v(status) ==
fifo_engine_status_next_id_type_tsgid_v()) ?
"tsg" : "channel", "tsg" : "channel",
gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status)); gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status));
@@ -4337,7 +4355,7 @@ u32 gk20a_fifo_pbdma_acquire_val(u64 timeout)
val = pbdma_acquire_retry_man_2_f() | val = pbdma_acquire_retry_man_2_f() |
pbdma_acquire_retry_exp_2_f(); pbdma_acquire_retry_exp_2_f();
if (!timeout) { if (timeout == 0ULL) {
return val; return val;
} }

View File

@@ -98,9 +98,8 @@ static bool gk20a_is_falcon_cpu_halted(struct nvgpu_falcon *flcn)
struct gk20a *g = flcn->g; struct gk20a *g = flcn->g;
u32 base_addr = flcn->flcn_base; u32 base_addr = flcn->flcn_base;
return (gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()) & return ((gk20a_readl(g, base_addr + falcon_falcon_cpuctl_r()) &
falcon_falcon_cpuctl_halt_intr_m() ? falcon_falcon_cpuctl_halt_intr_m()) != 0U);
true : false);
} }
static bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn) static bool gk20a_is_falcon_idle(struct nvgpu_falcon *flcn)
@@ -422,9 +421,9 @@ static u32 gk20a_falcon_mailbox_read(struct nvgpu_falcon *flcn,
u32 data = 0; u32 data = 0;
if (mailbox_index < FALCON_MAILBOX_COUNT) { if (mailbox_index < FALCON_MAILBOX_COUNT) {
data = gk20a_readl(g, flcn->flcn_base + (mailbox_index ? data = gk20a_readl(g, flcn->flcn_base + (mailbox_index != 0U ?
falcon_falcon_mailbox1_r() : falcon_falcon_mailbox1_r() :
falcon_falcon_mailbox0_r())); falcon_falcon_mailbox0_r()));
} else { } else {
nvgpu_err(g, "incorrect mailbox id %d", mailbox_index); nvgpu_err(g, "incorrect mailbox id %d", mailbox_index);
} }
@@ -438,10 +437,11 @@ static void gk20a_falcon_mailbox_write(struct nvgpu_falcon *flcn,
struct gk20a *g = flcn->g; struct gk20a *g = flcn->g;
if (mailbox_index < FALCON_MAILBOX_COUNT) { if (mailbox_index < FALCON_MAILBOX_COUNT) {
gk20a_writel(g, flcn->flcn_base + (mailbox_index ? gk20a_writel(g,
falcon_falcon_mailbox1_r() : flcn->flcn_base + (mailbox_index != 0U ?
falcon_falcon_mailbox0_r()), falcon_falcon_mailbox1_r() :
data); falcon_falcon_mailbox0_r()),
data);
} else { } else {
nvgpu_err(g, "incorrect mailbox id %d", mailbox_index); nvgpu_err(g, "incorrect mailbox id %d", mailbox_index);
} }

View File

@@ -138,7 +138,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
* Before probing the GPU make sure the GPU's state is cleared. This is * Before probing the GPU make sure the GPU's state is cleared. This is
* relevant for rebind operations. * relevant for rebind operations.
*/ */
if (g->ops.xve.reset_gpu && !g->gpu_reset_done) { if ((g->ops.xve.reset_gpu != NULL) && !g->gpu_reset_done) {
g->ops.xve.reset_gpu(g); g->ops.xve.reset_gpu(g);
g->gpu_reset_done = true; g->gpu_reset_done = true;
} }
@@ -356,7 +356,8 @@ int gk20a_finalize_poweron(struct gk20a *g)
} }
} }
if (g->ops.pmu_ver.clk.clk_set_boot_clk && nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) { if ((g->ops.pmu_ver.clk.clk_set_boot_clk != NULL) &&
nvgpu_is_enabled(g, NVGPU_PMU_PSTATE)) {
g->ops.pmu_ver.clk.clk_set_boot_clk(g); g->ops.pmu_ver.clk.clk_set_boot_clk(g);
} else { } else {
err = nvgpu_clk_arb_init_arbiter(g); err = nvgpu_clk_arb_init_arbiter(g);
@@ -392,7 +393,8 @@ int gk20a_finalize_poweron(struct gk20a *g)
if (g->ops.xve.available_speeds) { if (g->ops.xve.available_speeds) {
u32 speed; u32 speed;
if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_ASPM) && g->ops.xve.disable_aspm) { if (!nvgpu_is_enabled(g, NVGPU_SUPPORT_ASPM) &&
(g->ops.xve.disable_aspm != NULL)) {
g->ops.xve.disable_aspm(g); g->ops.xve.disable_aspm(g);
} }
@@ -446,7 +448,7 @@ int gk20a_wait_for_idle(struct gk20a *g)
int wait_length = 150; /* 3 second overall max wait. */ int wait_length = 150; /* 3 second overall max wait. */
int target_usage_count = 0; int target_usage_count = 0;
if (!g) { if (g == NULL) {
return -ENODEV; return -ENODEV;
} }
@@ -474,7 +476,7 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true); __nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true);
} }
if (g->ops.mm.support_sparse && g->ops.mm.support_sparse(g)) { if ((g->ops.mm.support_sparse != NULL) && g->ops.mm.support_sparse(g)) {
__nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true); __nvgpu_set_enabled(g, NVGPU_SUPPORT_SPARSE_ALLOCS, true);
} }
@@ -563,9 +565,9 @@ struct gk20a * __must_check gk20a_get(struct gk20a *g)
nvgpu_log(g, gpu_dbg_shutdown, "GET: refs currently %d %s", nvgpu_log(g, gpu_dbg_shutdown, "GET: refs currently %d %s",
nvgpu_atomic_read(&g->refcount.refcount), nvgpu_atomic_read(&g->refcount.refcount),
success ? "" : "(FAILED)"); (success != 0) ? "" : "(FAILED)");
return success ? g : NULL; return (success != 0) ? g : NULL;
} }
/** /**

View File

@@ -38,7 +38,7 @@ static int gr_gk20a_alloc_load_netlist_u32(struct gk20a *g, u32 *src, u32 len,
struct u32_list_gk20a *u32_list) struct u32_list_gk20a *u32_list)
{ {
u32_list->count = (len + sizeof(u32) - 1) / sizeof(u32); u32_list->count = (len + sizeof(u32) - 1) / sizeof(u32);
if (!alloc_u32_list_gk20a(g, u32_list)) { if (alloc_u32_list_gk20a(g, u32_list) == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -51,7 +51,7 @@ static int gr_gk20a_alloc_load_netlist_av(struct gk20a *g, u32 *src, u32 len,
struct av_list_gk20a *av_list) struct av_list_gk20a *av_list)
{ {
av_list->count = len / sizeof(struct av_gk20a); av_list->count = len / sizeof(struct av_gk20a);
if (!alloc_av_list_gk20a(g, av_list)) { if (alloc_av_list_gk20a(g, av_list) == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -64,7 +64,7 @@ static int gr_gk20a_alloc_load_netlist_av64(struct gk20a *g, u32 *src, u32 len,
struct av64_list_gk20a *av64_list) struct av64_list_gk20a *av64_list)
{ {
av64_list->count = len / sizeof(struct av64_gk20a); av64_list->count = len / sizeof(struct av64_gk20a);
if (!alloc_av64_list_gk20a(g, av64_list)) { if (alloc_av64_list_gk20a(g, av64_list) == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -77,7 +77,7 @@ static int gr_gk20a_alloc_load_netlist_aiv(struct gk20a *g, u32 *src, u32 len,
struct aiv_list_gk20a *aiv_list) struct aiv_list_gk20a *aiv_list)
{ {
aiv_list->count = len / sizeof(struct aiv_gk20a); aiv_list->count = len / sizeof(struct aiv_gk20a);
if (!alloc_aiv_list_gk20a(g, aiv_list)) { if (alloc_aiv_list_gk20a(g, aiv_list) == NULL) {
return -ENOMEM; return -ENOMEM;
} }
@@ -116,7 +116,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
} }
netlist_fw = nvgpu_request_firmware(g, name, 0); netlist_fw = nvgpu_request_firmware(g, name, 0);
if (!netlist_fw) { if (netlist_fw == NULL) {
nvgpu_warn(g, "failed to load netlist %s", name); nvgpu_warn(g, "failed to load netlist %s", name);
continue; continue;
} }

View File

@@ -114,7 +114,7 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
} }
} }
if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) { if ((gk20a_mm_fb_flush(g) != 0) || (gk20a_mm_fb_flush(g) != 0)) {
return -EBUSY; return -EBUSY;
} }
@@ -229,8 +229,9 @@ static void __update_pte(struct vm_gk20a *vm,
gmmu_pte_kind_f(attrs->kind_v) | gmmu_pte_kind_f(attrs->kind_v) |
gmmu_pte_comptagline_f((u32)(attrs->ctag >> ctag_shift)); gmmu_pte_comptagline_f((u32)(attrs->ctag >> ctag_shift));
if (attrs->ctag && vm->mm->use_full_comp_tag_line && if ((attrs->ctag != 0ULL) &&
phys_addr & 0x10000) { vm->mm->use_full_comp_tag_line &&
((phys_addr & 0x10000ULL) != 0ULL)) {
pte_w[1] |= gmmu_pte_comptagline_f( pte_w[1] |= gmmu_pte_comptagline_f(
1 << (gmmu_pte_comptagline_s() - 1)); 1 << (gmmu_pte_comptagline_s() - 1));
} }
@@ -397,7 +398,7 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(), nvgpu_mem_wr32(g, inst_block, ram_in_adr_limit_hi_w(),
ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1))); ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit - 1)));
if (big_page_size && g->ops.mm.set_big_page_size) { if ((big_page_size != 0U) && (g->ops.mm.set_big_page_size != NULL)) {
g->ops.mm.set_big_page_size(g, inst_block, big_page_size); g->ops.mm.set_big_page_size(g, inst_block, big_page_size);
} }
} }
@@ -465,7 +466,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
} else { } else {
break; break;
} }
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) { if (nvgpu_timeout_peek_expired(&timeout)) {
if (g->ops.fb.dump_vpr_info) { if (g->ops.fb.dump_vpr_info) {
@@ -518,7 +519,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
} else { } else {
break; break;
} }
} while (!nvgpu_timeout_expired(&timeout)); } while (nvgpu_timeout_expired(&timeout) == 0);
if (nvgpu_timeout_peek_expired(&timeout)) { if (nvgpu_timeout_peek_expired(&timeout)) {
nvgpu_warn(g, "l2_system_invalidate too many retries"); nvgpu_warn(g, "l2_system_invalidate too many retries");
@@ -580,8 +581,8 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
} else { } else {
break; break;
} }
} while (!nvgpu_timeout_expired_msg(&timeout, } while (nvgpu_timeout_expired_msg(&timeout,
"l2_flush_dirty too many retries")); "l2_flush_dirty too many retries") == 0);
trace_gk20a_mm_l2_flush_done(g->name); trace_gk20a_mm_l2_flush_done(g->name);
@@ -633,8 +634,8 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
} else { } else {
break; break;
} }
} while (!nvgpu_timeout_expired_msg(&timeout, } while (nvgpu_timeout_expired_msg(&timeout,
"l2_clean_comptags too many retries")); "l2_clean_comptags too many retries") == 0);
nvgpu_mutex_release(&mm->l2_op_lock); nvgpu_mutex_release(&mm->l2_op_lock);

View File

@@ -287,7 +287,7 @@ int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
return -EINVAL; return -EINVAL;
} }
BUG_ON(!token); BUG_ON(token == NULL);
BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
BUG_ON(id > pmu->mutex_cnt); BUG_ON(id > pmu->mutex_cnt);
@@ -357,7 +357,7 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
return -EINVAL; return -EINVAL;
} }
BUG_ON(!token); BUG_ON(token == NULL);
BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); BUG_ON(!PMU_MUTEX_ID_IS_VALID(id));
BUG_ON(id > pmu->mutex_cnt); BUG_ON(id > pmu->mutex_cnt);
@@ -399,7 +399,7 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); queue_head_size = g->ops.pmu.pmu_get_queue_head_size();
} }
BUG_ON(!head || !queue_head_size); BUG_ON((head == NULL) || (queue_head_size == 0U));
if (PMU_IS_COMMAND_QUEUE(queue->id)) { if (PMU_IS_COMMAND_QUEUE(queue->id)) {
@@ -439,7 +439,7 @@ int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
} }
BUG_ON(!tail || !queue_tail_size); BUG_ON((tail == NULL) || (queue_tail_size == 0U));
if (PMU_IS_COMMAND_QUEUE(queue->id)) { if (PMU_IS_COMMAND_QUEUE(queue->id)) {
@@ -479,7 +479,7 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
} }
BUG_ON(!tail || !queue_tail_size); BUG_ON((tail == NULL) || (queue_tail_size == 0U));
if (!set) { if (!set) {
*tail = pwr_pmu_msgq_tail_val_v( *tail = pwr_pmu_msgq_tail_val_v(
@@ -542,7 +542,7 @@ static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
{ {
struct nvgpu_pmu *pmu = param; struct nvgpu_pmu *pmu = param;
gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE"); gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE");
pmu->zbc_save_done = 1; pmu->zbc_save_done = true;
} }
void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
@@ -551,7 +551,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
struct pmu_cmd cmd; struct pmu_cmd cmd;
u32 seq; u32 seq;
if (!pmu->pmu_ready || !entries || !pmu->zbc_ready) { if (!pmu->pmu_ready || (entries == 0U) || !pmu->zbc_ready) {
return; return;
} }
@@ -561,7 +561,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update; cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update;
cmd.cmd.zbc.entry_mask = ZBC_MASK(entries); cmd.cmd.zbc.entry_mask = ZBC_MASK(entries);
pmu->zbc_save_done = 0; pmu->zbc_save_done = false;
gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE"); gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
@@ -702,7 +702,7 @@ void gk20a_pmu_isr(struct gk20a *g)
gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr); gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr);
intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask;
if (!intr || pmu->pmu_state == PMU_STATE_OFF) { if ((intr == 0U) || (pmu->pmu_state == PMU_STATE_OFF)) {
gk20a_writel(g, pwr_falcon_irqsclr_r(), intr); gk20a_writel(g, pwr_falcon_irqsclr_r(), intr);
nvgpu_mutex_release(&pmu->isr_mutex); nvgpu_mutex_release(&pmu->isr_mutex);
return; return;

View File

@@ -286,60 +286,60 @@ static bool check_whitelists(struct dbg_session_gk20a *dbg_s,
if (op->type == REGOP(TYPE_GLOBAL)) { if (op->type == REGOP(TYPE_GLOBAL)) {
/* search global list */ /* search global list */
valid = g->ops.regops.get_global_whitelist_ranges && valid = (g->ops.regops.get_global_whitelist_ranges != NULL) &&
!!bsearch(&offset, (bsearch(&offset,
g->ops.regops.get_global_whitelist_ranges(), g->ops.regops.get_global_whitelist_ranges(),
g->ops.regops.get_global_whitelist_ranges_count(), g->ops.regops.get_global_whitelist_ranges_count(),
sizeof(*g->ops.regops.get_global_whitelist_ranges()), sizeof(*g->ops.regops.get_global_whitelist_ranges()),
regop_bsearch_range_cmp); regop_bsearch_range_cmp) != NULL);
/* if debug session and channel is bound search context list */ /* if debug session and channel is bound search context list */
if ((!valid) && (!dbg_s->is_profiler && ch)) { if ((!valid) && (!dbg_s->is_profiler) && (ch != NULL)) {
/* binary search context list */ /* binary search context list */
valid = g->ops.regops.get_context_whitelist_ranges && valid = (g->ops.regops.get_context_whitelist_ranges != NULL) &&
!!bsearch(&offset, (bsearch(&offset,
g->ops.regops.get_context_whitelist_ranges(), g->ops.regops.get_context_whitelist_ranges(),
g->ops.regops.get_context_whitelist_ranges_count(), g->ops.regops.get_context_whitelist_ranges_count(),
sizeof(*g->ops.regops.get_context_whitelist_ranges()), sizeof(*g->ops.regops.get_context_whitelist_ranges()),
regop_bsearch_range_cmp); regop_bsearch_range_cmp) != NULL);
} }
/* if debug session and channel is bound search runcontrol list */ /* if debug session and channel is bound search runcontrol list */
if ((!valid) && (!dbg_s->is_profiler && ch)) { if ((!valid) && (!dbg_s->is_profiler) && (ch != NULL)) {
valid = g->ops.regops.get_runcontrol_whitelist && valid = (g->ops.regops.get_runcontrol_whitelist != NULL) &&
linear_search(offset, linear_search(offset,
g->ops.regops.get_runcontrol_whitelist(), g->ops.regops.get_runcontrol_whitelist(),
g->ops.regops.get_runcontrol_whitelist_count()); g->ops.regops.get_runcontrol_whitelist_count());
} }
} else if (op->type == REGOP(TYPE_GR_CTX)) { } else if (op->type == REGOP(TYPE_GR_CTX)) {
/* it's a context-relative op */ /* it's a context-relative op */
if (!ch) { if (ch == NULL) {
nvgpu_err(dbg_s->g, "can't perform ctx regop unless bound"); nvgpu_err(dbg_s->g, "can't perform ctx regop unless bound");
op->status = REGOP(STATUS_UNSUPPORTED_OP); op->status = REGOP(STATUS_UNSUPPORTED_OP);
return valid; return valid;
} }
/* binary search context list */ /* binary search context list */
valid = g->ops.regops.get_context_whitelist_ranges && valid = (g->ops.regops.get_context_whitelist_ranges != NULL) &&
!!bsearch(&offset, (bsearch(&offset,
g->ops.regops.get_context_whitelist_ranges(), g->ops.regops.get_context_whitelist_ranges(),
g->ops.regops.get_context_whitelist_ranges_count(), g->ops.regops.get_context_whitelist_ranges_count(),
sizeof(*g->ops.regops.get_context_whitelist_ranges()), sizeof(*g->ops.regops.get_context_whitelist_ranges()),
regop_bsearch_range_cmp); regop_bsearch_range_cmp) != NULL);
/* if debug session and channel is bound search runcontrol list */ /* if debug session and channel is bound search runcontrol list */
if ((!valid) && (!dbg_s->is_profiler && ch)) { if ((!valid) && (!dbg_s->is_profiler) && (ch != NULL)) {
valid = g->ops.regops.get_runcontrol_whitelist && valid = (g->ops.regops.get_runcontrol_whitelist != NULL) &&
linear_search(offset, linear_search(offset,
g->ops.regops.get_runcontrol_whitelist(), g->ops.regops.get_runcontrol_whitelist(),
g->ops.regops.get_runcontrol_whitelist_count()); g->ops.regops.get_runcontrol_whitelist_count());
} }
} else if (op->type == REGOP(TYPE_GR_CTX_QUAD)) { } else if (op->type == REGOP(TYPE_GR_CTX_QUAD)) {
valid = g->ops.regops.get_qctl_whitelist && valid = (g->ops.regops.get_qctl_whitelist != NULL) &&
linear_search(offset, linear_search(offset,
g->ops.regops.get_qctl_whitelist(), g->ops.regops.get_qctl_whitelist(),
g->ops.regops.get_qctl_whitelist_count()); g->ops.regops.get_qctl_whitelist_count());
} }
return valid; return valid;
@@ -390,7 +390,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
return -EINVAL; return -EINVAL;
} }
} }
if (!num_offsets) { if (num_offsets == 0U) {
op->status |= REGOP(STATUS_INVALID_OFFSET); op->status |= REGOP(STATUS_INVALID_OFFSET);
return -EINVAL; return -EINVAL;
} }
@@ -447,11 +447,11 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
/* exported for tools like cyclestats, etc */ /* exported for tools like cyclestats, etc */
bool is_bar0_global_offset_whitelisted_gk20a(struct gk20a *g, u32 offset) bool is_bar0_global_offset_whitelisted_gk20a(struct gk20a *g, u32 offset)
{ {
bool valid = !!bsearch(&offset, bool valid = bsearch(&offset,
g->ops.regops.get_global_whitelist_ranges(), g->ops.regops.get_global_whitelist_ranges(),
g->ops.regops.get_global_whitelist_ranges_count(), g->ops.regops.get_global_whitelist_ranges_count(),
sizeof(*g->ops.regops.get_global_whitelist_ranges()), sizeof(*g->ops.regops.get_global_whitelist_ranges()),
regop_bsearch_range_cmp); regop_bsearch_range_cmp) != NULL;
return valid; return valid;
} }

View File

@@ -100,6 +100,10 @@ static inline u32 ccsr_channel_enable_v(u32 r)
{ {
return (r >> 0U) & 0x1U; return (r >> 0U) & 0x1U;
} }
static inline u32 ccsr_channel_enable_in_use_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v) static inline u32 ccsr_channel_enable_set_f(u32 v)
{ {
return (v & 0x1U) << 10U; return (v & 0x1U) << 10U;
@@ -160,4 +164,8 @@ static inline u32 ccsr_channel_busy_v(u32 r)
{ {
return (r >> 28U) & 0x1U; return (r >> 28U) & 0x1U;
} }
static inline u32 ccsr_channel_busy_true_v(void)
{
return 0x00000001U;
}
#endif #endif

View File

@@ -520,6 +520,10 @@ static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_engine_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_faulted_v(u32 r) static inline u32 fifo_engine_status_faulted_v(u32 r)
{ {
return (r >> 30U) & 0x1U; return (r >> 30U) & 0x1U;
@@ -608,6 +612,10 @@ static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_pbdma_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r) static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{ {
return (r >> 15U) & 0x1U; return (r >> 15U) & 0x1U;

View File

@@ -96,6 +96,10 @@ static inline u32 top_device_info_chain_enable_v(void)
{ {
return 0x00000001U; return 0x00000001U;
} }
static inline u32 top_device_info_chain_disable_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_engine_enum_v(u32 r) static inline u32 top_device_info_engine_enum_v(u32 r)
{ {
return (r >> 26U) & 0xfU; return (r >> 26U) & 0xfU;

View File

@@ -100,6 +100,10 @@ static inline u32 ccsr_channel_enable_v(u32 r)
{ {
return (r >> 0U) & 0x1U; return (r >> 0U) & 0x1U;
} }
static inline u32 ccsr_channel_enable_in_use_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v) static inline u32 ccsr_channel_enable_set_f(u32 v)
{ {
return (v & 0x1U) << 10U; return (v & 0x1U) << 10U;
@@ -160,4 +164,8 @@ static inline u32 ccsr_channel_busy_v(u32 r)
{ {
return (r >> 28U) & 0x1U; return (r >> 28U) & 0x1U;
} }
static inline u32 ccsr_channel_busy_true_v(void)
{
return 0x00000001U;
}
#endif #endif

View File

@@ -472,6 +472,10 @@ static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_engine_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_faulted_v(u32 r) static inline u32 fifo_engine_status_faulted_v(u32 r)
{ {
return (r >> 30U) & 0x1U; return (r >> 30U) & 0x1U;
@@ -560,6 +564,10 @@ static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_pbdma_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r) static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{ {
return (r >> 15U) & 0x1U; return (r >> 15U) & 0x1U;

View File

@@ -116,6 +116,10 @@ static inline u32 top_device_info_chain_enable_v(void)
{ {
return 0x00000001U; return 0x00000001U;
} }
static inline u32 top_device_info_chain_disable_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_engine_enum_v(u32 r) static inline u32 top_device_info_engine_enum_v(u32 r)
{ {
return (r >> 26U) & 0xfU; return (r >> 26U) & 0xfU;

View File

@@ -100,6 +100,10 @@ static inline u32 ccsr_channel_enable_v(u32 r)
{ {
return (r >> 0U) & 0x1U; return (r >> 0U) & 0x1U;
} }
static inline u32 ccsr_channel_enable_in_use_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v) static inline u32 ccsr_channel_enable_set_f(u32 v)
{ {
return (v & 0x1U) << 10U; return (v & 0x1U) << 10U;
@@ -160,4 +164,8 @@ static inline u32 ccsr_channel_busy_v(u32 r)
{ {
return (r >> 28U) & 0x1U; return (r >> 28U) & 0x1U;
} }
static inline u32 ccsr_channel_busy_true_v(void)
{
return 0x00000001U;
}
#endif #endif

View File

@@ -472,6 +472,10 @@ static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_engine_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_faulted_v(u32 r) static inline u32 fifo_engine_status_faulted_v(u32 r)
{ {
return (r >> 30U) & 0x1U; return (r >> 30U) & 0x1U;
@@ -560,6 +564,10 @@ static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_pbdma_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r) static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{ {
return (r >> 15U) & 0x1U; return (r >> 15U) & 0x1U;

View File

@@ -124,6 +124,10 @@ static inline u32 top_device_info_chain_enable_v(void)
{ {
return 0x00000001U; return 0x00000001U;
} }
static inline u32 top_device_info_chain_disable_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_engine_enum_v(u32 r) static inline u32 top_device_info_engine_enum_v(u32 r)
{ {
return (r >> 26U) & 0xfU; return (r >> 26U) & 0xfU;

View File

@@ -100,6 +100,10 @@ static inline u32 ccsr_channel_enable_v(u32 r)
{ {
return (r >> 0U) & 0x1U; return (r >> 0U) & 0x1U;
} }
static inline u32 ccsr_channel_enable_in_use_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v) static inline u32 ccsr_channel_enable_set_f(u32 v)
{ {
return (v & 0x1U) << 10U; return (v & 0x1U) << 10U;
@@ -160,4 +164,8 @@ static inline u32 ccsr_channel_busy_v(u32 r)
{ {
return (r >> 28U) & 0x1U; return (r >> 28U) & 0x1U;
} }
static inline u32 ccsr_channel_busy_true_v(void)
{
return 0x00000001U;
}
#endif #endif

View File

@@ -476,6 +476,10 @@ static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_engine_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_faulted_v(u32 r) static inline u32 fifo_engine_status_faulted_v(u32 r)
{ {
return (r >> 30U) & 0x1U; return (r >> 30U) & 0x1U;
@@ -564,6 +568,10 @@ static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_pbdma_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r) static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{ {
return (r >> 15U) & 0x1U; return (r >> 15U) & 0x1U;

View File

@@ -116,6 +116,10 @@ static inline u32 top_device_info_chain_enable_v(void)
{ {
return 0x00000001U; return 0x00000001U;
} }
static inline u32 top_device_info_chain_disable_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_engine_enum_v(u32 r) static inline u32 top_device_info_engine_enum_v(u32 r)
{ {
return (r >> 26U) & 0xfU; return (r >> 26U) & 0xfU;

View File

@@ -100,6 +100,10 @@ static inline u32 ccsr_channel_enable_v(u32 r)
{ {
return (r >> 0U) & 0x1U; return (r >> 0U) & 0x1U;
} }
static inline u32 ccsr_channel_enable_in_use_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v) static inline u32 ccsr_channel_enable_set_f(u32 v)
{ {
return (v & 0x1U) << 10U; return (v & 0x1U) << 10U;
@@ -184,4 +188,8 @@ static inline u32 ccsr_channel_busy_v(u32 r)
{ {
return (r >> 28U) & 0x1U; return (r >> 28U) & 0x1U;
} }
static inline u32 ccsr_channel_busy_true_v(void)
{
return 0x00000001U;
}
#endif #endif

View File

@@ -396,6 +396,10 @@ static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_engine_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_eng_reload_v(u32 r) static inline u32 fifo_engine_status_eng_reload_v(u32 r)
{ {
return (r >> 29U) & 0x1U; return (r >> 29U) & 0x1U;
@@ -488,6 +492,10 @@ static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_pbdma_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r) static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{ {
return (r >> 15U) & 0x1U; return (r >> 15U) & 0x1U;

View File

@@ -132,6 +132,10 @@ static inline u32 top_device_info_chain_enable_v(void)
{ {
return 0x00000001U; return 0x00000001U;
} }
static inline u32 top_device_info_chain_disable_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_engine_enum_v(u32 r) static inline u32 top_device_info_engine_enum_v(u32 r)
{ {
return (r >> 26U) & 0xfU; return (r >> 26U) & 0xfU;

View File

@@ -100,6 +100,10 @@ static inline u32 ccsr_channel_enable_v(u32 r)
{ {
return (r >> 0U) & 0x1U; return (r >> 0U) & 0x1U;
} }
static inline u32 ccsr_channel_enable_in_use_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v) static inline u32 ccsr_channel_enable_set_f(u32 v)
{ {
return (v & 0x1U) << 10U; return (v & 0x1U) << 10U;
@@ -184,4 +188,8 @@ static inline u32 ccsr_channel_busy_v(u32 r)
{ {
return (r >> 28U) & 0x1U; return (r >> 28U) & 0x1U;
} }
static inline u32 ccsr_channel_busy_true_v(void)
{
return 0x00000001U;
}
#endif #endif

View File

@@ -492,6 +492,10 @@ static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_engine_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_eng_reload_v(u32 r) static inline u32 fifo_engine_status_eng_reload_v(u32 r)
{ {
return (r >> 29U) & 0x1U; return (r >> 29U) & 0x1U;
@@ -624,6 +628,10 @@ static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_pbdma_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r) static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{ {
return (r >> 15U) & 0x1U; return (r >> 15U) & 0x1U;

View File

@@ -124,6 +124,10 @@ static inline u32 top_device_info_chain_enable_v(void)
{ {
return 0x00000001U; return 0x00000001U;
} }
static inline u32 top_device_info_chain_disable_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_engine_enum_v(u32 r) static inline u32 top_device_info_engine_enum_v(u32 r)
{ {
return (r >> 26U) & 0xfU; return (r >> 26U) & 0xfU;

View File

@@ -100,6 +100,10 @@ static inline u32 ccsr_channel_enable_v(u32 r)
{ {
return (r >> 0U) & 0x1U; return (r >> 0U) & 0x1U;
} }
static inline u32 ccsr_channel_enable_in_use_v(void)
{
return 0x00000001U;
}
static inline u32 ccsr_channel_enable_set_f(u32 v) static inline u32 ccsr_channel_enable_set_f(u32 v)
{ {
return (v & 0x1U) << 10U; return (v & 0x1U) << 10U;
@@ -184,4 +188,8 @@ static inline u32 ccsr_channel_busy_v(u32 r)
{ {
return (r >> 28U) & 0x1U; return (r >> 28U) & 0x1U;
} }
static inline u32 ccsr_channel_busy_true_v(void)
{
return 0x00000001U;
}
#endif #endif

View File

@@ -380,6 +380,10 @@ static inline u32 fifo_engine_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_engine_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_engine_status_eng_reload_v(u32 r) static inline u32 fifo_engine_status_eng_reload_v(u32 r)
{ {
return (r >> 29U) & 0x1U; return (r >> 29U) & 0x1U;
@@ -472,6 +476,10 @@ static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
{ {
return 0x00000000U; return 0x00000000U;
} }
static inline u32 fifo_pbdma_status_next_id_type_tsgid_v(void)
{
return 0x00000001U;
}
static inline u32 fifo_pbdma_status_chsw_v(u32 r) static inline u32 fifo_pbdma_status_chsw_v(u32 r)
{ {
return (r >> 15U) & 0x1U; return (r >> 15U) & 0x1U;

View File

@@ -132,6 +132,10 @@ static inline u32 top_device_info_chain_enable_v(void)
{ {
return 0x00000001U; return 0x00000001U;
} }
static inline u32 top_device_info_chain_disable_v(void)
{
return 0x00000000U;
}
static inline u32 top_device_info_engine_enum_v(u32 r) static inline u32 top_device_info_engine_enum_v(u32 r)
{ {
return (r >> 26U) & 0xfU; return (r >> 26U) & 0xfU;

View File

@@ -74,7 +74,7 @@ struct nvgpu_os_fence {
/* /*
* This API is used to validate the nvgpu_os_fence * This API is used to validate the nvgpu_os_fence
*/ */
static inline int nvgpu_os_fence_is_initialized(struct nvgpu_os_fence *fence) static inline bool nvgpu_os_fence_is_initialized(struct nvgpu_os_fence *fence)
{ {
return (fence->ops != NULL); return (fence->ops != NULL);
} }

View File

@@ -342,7 +342,7 @@ struct nvgpu_pmu {
u32 perfmon_query; u32 perfmon_query;
u32 zbc_save_done; bool zbc_save_done;
u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE]; u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE];