mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: MISRA 14.4 boolean fixes
MISRA rule 14.4 doesn't allow the usage of non-boolean variable as boolean in the controlling expression of an if statement or an iteration statement. Fix violations where a non-boolean variable is used as a boolean in the controlling expression of if and loop statements. JIRA NVGPU-1022 Change-Id: I957f8ca1fa0eb00928c476960da1e6e420781c09 Signed-off-by: Amurthyreddy <amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1941002 GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
f9ca193a60
commit
710aab6ba4
@@ -59,7 +59,7 @@ int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_usleep_range(100, 200);
|
nvgpu_usleep_range(100, 200);
|
||||||
} while (1);
|
} while (true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1957,7 +1957,7 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
|
|||||||
/* Synchronize with abort cleanup that needs the jobs. */
|
/* Synchronize with abort cleanup that needs the jobs. */
|
||||||
nvgpu_mutex_acquire(&c->joblist.cleanup_lock);
|
nvgpu_mutex_acquire(&c->joblist.cleanup_lock);
|
||||||
|
|
||||||
while (1) {
|
while (true) {
|
||||||
bool completed;
|
bool completed;
|
||||||
|
|
||||||
channel_gk20a_joblist_lock(c);
|
channel_gk20a_joblist_lock(c);
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (1) {
|
while (true) {
|
||||||
const u32 iter_max = min(min + max_lines - 1, max);
|
const u32 iter_max = min(min + max_lines - 1, max);
|
||||||
bool full_cache_op = true;
|
bool full_cache_op = true;
|
||||||
|
|
||||||
|
|||||||
@@ -155,7 +155,7 @@ int gp10b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (1) {
|
while (true) {
|
||||||
const u32 iter_max = min(min + max_lines - 1, max);
|
const u32 iter_max = min(min + max_lines - 1, max);
|
||||||
bool full_cache_op = true;
|
bool full_cache_op = true;
|
||||||
|
|
||||||
|
|||||||
@@ -154,7 +154,7 @@ int ltc_tu104_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (1) {
|
while (true) {
|
||||||
const u32 iter_max = min(min + max_lines - 1, max);
|
const u32 iter_max = min(min + max_lines - 1, max);
|
||||||
bool full_cache_op = true;
|
bool full_cache_op = true;
|
||||||
|
|
||||||
|
|||||||
@@ -43,7 +43,7 @@
|
|||||||
} else { \
|
} else { \
|
||||||
nvgpu_log(g, gpu_dbg_map, fmt, ##args); \
|
nvgpu_log(g, gpu_dbg_map, fmt, ##args); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
#define __gmmu_dbg_v(g, attrs, fmt, args...) \
|
#define __gmmu_dbg_v(g, attrs, fmt, args...) \
|
||||||
do { \
|
do { \
|
||||||
@@ -52,7 +52,7 @@
|
|||||||
} else { \
|
} else { \
|
||||||
nvgpu_log(g, gpu_dbg_map_v, fmt, ##args); \
|
nvgpu_log(g, gpu_dbg_map_v, fmt, ##args); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
static int pd_allocate(struct vm_gk20a *vm,
|
static int pd_allocate(struct vm_gk20a *vm,
|
||||||
struct nvgpu_gmmu_pd *pd,
|
struct nvgpu_gmmu_pd *pd,
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr)
|
|||||||
|
|
||||||
alloc_dbg(a, "Free node # %llu @ addr 0x%llx", cur_idx, addr);
|
alloc_dbg(a, "Free node # %llu @ addr 0x%llx", cur_idx, addr);
|
||||||
|
|
||||||
while (1) {
|
while (true) {
|
||||||
head = NV_ACCESS_ONCE(pa->head);
|
head = NV_ACCESS_ONCE(pa->head);
|
||||||
NV_ACCESS_ONCE(pa->next[cur_idx]) = head;
|
NV_ACCESS_ONCE(pa->next[cur_idx]) = head;
|
||||||
ret = cmpxchg(&pa->head, head, cur_idx);
|
ret = cmpxchg(&pa->head, head, cur_idx);
|
||||||
|
|||||||
@@ -249,7 +249,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
|||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (1);
|
} while (true);
|
||||||
|
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
|
nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
|
|||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (1);
|
} while (true);
|
||||||
|
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
|
nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
|
||||||
|
|||||||
@@ -40,13 +40,13 @@
|
|||||||
gpu_sema_verbose_dbg((s)->gk20a, "Acquiring sema lock..."); \
|
gpu_sema_verbose_dbg((s)->gk20a, "Acquiring sema lock..."); \
|
||||||
nvgpu_mutex_acquire(&(s)->sea_lock); \
|
nvgpu_mutex_acquire(&(s)->sea_lock); \
|
||||||
gpu_sema_verbose_dbg((s)->gk20a, "Sema lock aquried!"); \
|
gpu_sema_verbose_dbg((s)->gk20a, "Sema lock aquried!"); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
#define __unlock_sema_sea(s) \
|
#define __unlock_sema_sea(s) \
|
||||||
do { \
|
do { \
|
||||||
nvgpu_mutex_release(&(s)->sea_lock); \
|
nvgpu_mutex_release(&(s)->sea_lock); \
|
||||||
gpu_sema_verbose_dbg((s)->gk20a, "Released sema lock"); \
|
gpu_sema_verbose_dbg((s)->gk20a, "Released sema lock"); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the sema_sea pointer.
|
* Return the sema_sea pointer.
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ static void gk20a_ce_put_fences(struct gk20a_gpu_ctx *ce_ctx)
|
|||||||
|
|
||||||
for (i = 0; i < NVGPU_CE_MAX_INFLIGHT_JOBS; i++) {
|
for (i = 0; i < NVGPU_CE_MAX_INFLIGHT_JOBS; i++) {
|
||||||
struct gk20a_fence **fence = &ce_ctx->postfences[i];
|
struct gk20a_fence **fence = &ce_ctx->postfences[i];
|
||||||
if (*fence) {
|
if (*fence != NULL) {
|
||||||
gk20a_fence_put(*fence);
|
gk20a_fence_put(*fence);
|
||||||
}
|
}
|
||||||
*fence = NULL;
|
*fence = NULL;
|
||||||
@@ -164,7 +164,7 @@ static inline unsigned int gk20a_ce_get_method_size(int request_operation,
|
|||||||
u64 chunk = size;
|
u64 chunk = size;
|
||||||
u32 height, width;
|
u32 height, width;
|
||||||
|
|
||||||
while (chunk) {
|
while (chunk != 0ULL) {
|
||||||
iterations++;
|
iterations++;
|
||||||
|
|
||||||
shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
|
shift = (MAX_CE_ALIGN(chunk) != 0ULL) ?
|
||||||
@@ -233,7 +233,7 @@ int gk20a_ce_prepare_submit(u64 src_buf,
|
|||||||
* 1 <------ ffs ------->
|
* 1 <------ ffs ------->
|
||||||
* <-----------up to 30 bits----------->
|
* <-----------up to 30 bits----------->
|
||||||
*/
|
*/
|
||||||
while (chunk) {
|
while (chunk != 0ULL) {
|
||||||
u32 width, height, shift;
|
u32 width, height, shift;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -47,11 +47,11 @@ static void gk20a_fence_free(struct nvgpu_ref *ref)
|
|||||||
f->os_fence.ops->drop_ref(&f->os_fence);
|
f->os_fence.ops->drop_ref(&f->os_fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (f->semaphore) {
|
if (f->semaphore != NULL) {
|
||||||
nvgpu_semaphore_put(f->semaphore);
|
nvgpu_semaphore_put(f->semaphore);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (f->allocator) {
|
if (f->allocator != NULL) {
|
||||||
if (nvgpu_alloc_initialized(f->allocator)) {
|
if (nvgpu_alloc_initialized(f->allocator)) {
|
||||||
nvgpu_free(f->allocator, (u64)(uintptr_t)f);
|
nvgpu_free(f->allocator, (u64)(uintptr_t)f);
|
||||||
}
|
}
|
||||||
@@ -62,14 +62,14 @@ static void gk20a_fence_free(struct nvgpu_ref *ref)
|
|||||||
|
|
||||||
void gk20a_fence_put(struct gk20a_fence *f)
|
void gk20a_fence_put(struct gk20a_fence *f)
|
||||||
{
|
{
|
||||||
if (f) {
|
if (f != NULL) {
|
||||||
nvgpu_ref_put(&f->ref, gk20a_fence_free);
|
nvgpu_ref_put(&f->ref, gk20a_fence_free);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f)
|
struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f)
|
||||||
{
|
{
|
||||||
if (f) {
|
if (f != NULL) {
|
||||||
nvgpu_ref_get(&f->ref);
|
nvgpu_ref_get(&f->ref);
|
||||||
}
|
}
|
||||||
return f;
|
return f;
|
||||||
@@ -168,7 +168,7 @@ struct gk20a_fence *gk20a_alloc_fence(struct channel_gk20a *c)
|
|||||||
sizeof(struct gk20a_fence));
|
sizeof(struct gk20a_fence));
|
||||||
|
|
||||||
/* clear the node and reset the allocator pointer */
|
/* clear the node and reset the allocator pointer */
|
||||||
if (fence) {
|
if (fence != NULL) {
|
||||||
(void) memset(fence, 0, sizeof(*fence));
|
(void) memset(fence, 0, sizeof(*fence));
|
||||||
fence->allocator = &c->fence_allocator;
|
fence->allocator = &c->fence_allocator;
|
||||||
}
|
}
|
||||||
@@ -177,7 +177,7 @@ struct gk20a_fence *gk20a_alloc_fence(struct channel_gk20a *c)
|
|||||||
fence = nvgpu_kzalloc(c->g, sizeof(struct gk20a_fence));
|
fence = nvgpu_kzalloc(c->g, sizeof(struct gk20a_fence));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fence) {
|
if (fence != NULL) {
|
||||||
nvgpu_ref_init(&fence->ref);
|
nvgpu_ref_init(&fence->ref);
|
||||||
fence->g = c->g;
|
fence->g = c->g;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -257,7 +257,7 @@ u32 gk20a_fifo_get_gr_runlist_id(struct gk20a *g)
|
|||||||
|
|
||||||
engine_info = gk20a_fifo_get_engine_info(g, gr_engine_id);
|
engine_info = gk20a_fifo_get_engine_info(g, gr_engine_id);
|
||||||
|
|
||||||
if (engine_info) {
|
if (engine_info != NULL) {
|
||||||
gr_runlist_id = engine_info->runlist_id;
|
gr_runlist_id = engine_info->runlist_id;
|
||||||
} else {
|
} else {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
@@ -304,7 +304,7 @@ static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
|
|||||||
|
|
||||||
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
|
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
|
||||||
|
|
||||||
if (engine_info) {
|
if (engine_info != NULL) {
|
||||||
fault_id = engine_info->fault_id;
|
fault_id = engine_info->fault_id;
|
||||||
} else {
|
} else {
|
||||||
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
|
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
|
||||||
@@ -346,7 +346,7 @@ enum fifo_engine gk20a_fifo_engine_enum_from_type(struct gk20a *g,
|
|||||||
* comparsion logic with GR runlist_id in init_engine_info() */
|
* comparsion logic with GR runlist_id in init_engine_info() */
|
||||||
ret = ENGINE_ASYNC_CE_GK20A;
|
ret = ENGINE_ASYNC_CE_GK20A;
|
||||||
/* inst_id starts from CE0 to CE2 */
|
/* inst_id starts from CE0 to CE2 */
|
||||||
if (inst_id) {
|
if (inst_id != NULL) {
|
||||||
*inst_id = (engine_type - top_device_info_type_enum_copy0_v());
|
*inst_id = (engine_type - top_device_info_type_enum_copy0_v());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -381,7 +381,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
|
|||||||
u32 runlist_bit;
|
u32 runlist_bit;
|
||||||
|
|
||||||
if (entry == top_device_info_entry_enum_v()) {
|
if (entry == top_device_info_entry_enum_v()) {
|
||||||
if (top_device_info_engine_v(table_entry)) {
|
if (top_device_info_engine_v(table_entry) != 0U) {
|
||||||
engine_id =
|
engine_id =
|
||||||
top_device_info_engine_enum_v(table_entry);
|
top_device_info_engine_enum_v(table_entry);
|
||||||
nvgpu_log_info(g, "info: engine_id %d",
|
nvgpu_log_info(g, "info: engine_id %d",
|
||||||
@@ -389,7 +389,7 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (top_device_info_runlist_v(table_entry)) {
|
if (top_device_info_runlist_v(table_entry) != 0U) {
|
||||||
runlist_id =
|
runlist_id =
|
||||||
top_device_info_runlist_enum_v(table_entry);
|
top_device_info_runlist_enum_v(table_entry);
|
||||||
nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id);
|
nvgpu_log_info(g, "gr info: runlist_id %d", runlist_id);
|
||||||
@@ -416,13 +416,13 @@ int gk20a_fifo_init_engine_info(struct fifo_gk20a *f)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (top_device_info_intr_v(table_entry)) {
|
if (top_device_info_intr_v(table_entry) != 0U) {
|
||||||
intr_id =
|
intr_id =
|
||||||
top_device_info_intr_enum_v(table_entry);
|
top_device_info_intr_enum_v(table_entry);
|
||||||
nvgpu_log_info(g, "gr info: intr_id %d", intr_id);
|
nvgpu_log_info(g, "gr info: intr_id %d", intr_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (top_device_info_reset_v(table_entry)) {
|
if (top_device_info_reset_v(table_entry) != 0U) {
|
||||||
reset_id =
|
reset_id =
|
||||||
top_device_info_reset_enum_v(table_entry);
|
top_device_info_reset_enum_v(table_entry);
|
||||||
nvgpu_log_info(g, "gr info: reset_id %d",
|
nvgpu_log_info(g, "gr info: reset_id %d",
|
||||||
@@ -492,7 +492,7 @@ u32 gk20a_fifo_act_eng_interrupt_mask(struct gk20a *g, u32 act_eng_id)
|
|||||||
struct fifo_engine_info_gk20a *engine_info = NULL;
|
struct fifo_engine_info_gk20a *engine_info = NULL;
|
||||||
|
|
||||||
engine_info = gk20a_fifo_get_engine_info(g, act_eng_id);
|
engine_info = gk20a_fifo_get_engine_info(g, act_eng_id);
|
||||||
if (engine_info) {
|
if (engine_info != NULL) {
|
||||||
return engine_info->intr_mask;
|
return engine_info->intr_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1329,7 +1329,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
|||||||
|
|
||||||
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
|
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
|
||||||
|
|
||||||
if (engine_info) {
|
if (engine_info != NULL) {
|
||||||
engine_enum = engine_info->engine_enum;
|
engine_enum = engine_info->engine_enum;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1339,7 +1339,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
|||||||
|
|
||||||
if (engine_enum == ENGINE_GR_GK20A) {
|
if (engine_enum == ENGINE_GR_GK20A) {
|
||||||
if (g->support_pmu && g->can_elpg) {
|
if (g->support_pmu && g->can_elpg) {
|
||||||
if (nvgpu_pmu_disable_elpg(g)) {
|
if (nvgpu_pmu_disable_elpg(g) != 0) {
|
||||||
nvgpu_err(g, "failed to set disable elpg");
|
nvgpu_err(g, "failed to set disable elpg");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1354,7 +1354,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
|
|||||||
#endif
|
#endif
|
||||||
if (!nvgpu_platform_is_simulation(g)) {
|
if (!nvgpu_platform_is_simulation(g)) {
|
||||||
/*HALT_PIPELINE method, halt GR engine*/
|
/*HALT_PIPELINE method, halt GR engine*/
|
||||||
if (gr_gk20a_halt_pipe(g)) {
|
if (gr_gk20a_halt_pipe(g) != 0) {
|
||||||
nvgpu_err(g, "failed to HALT gr pipe");
|
nvgpu_err(g, "failed to HALT gr pipe");
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@@ -1411,7 +1411,7 @@ bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
|
|||||||
|
|
||||||
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
|
engine_info = gk20a_fifo_get_engine_info(g, engine_id);
|
||||||
|
|
||||||
if (engine_info) {
|
if (engine_info != NULL) {
|
||||||
engine_enum = engine_info->engine_enum;
|
engine_enum = engine_info->engine_enum;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1459,7 +1459,7 @@ static bool gk20a_fifo_ch_timeout_debug_dump_state(struct gk20a *g,
|
|||||||
static void gk20a_fifo_set_has_timedout_and_wake_up_wqs(struct gk20a *g,
|
static void gk20a_fifo_set_has_timedout_and_wake_up_wqs(struct gk20a *g,
|
||||||
struct channel_gk20a *refch)
|
struct channel_gk20a *refch)
|
||||||
{
|
{
|
||||||
if (refch) {
|
if (refch != NULL) {
|
||||||
/* mark channel as faulted */
|
/* mark channel as faulted */
|
||||||
refch->has_timedout = true;
|
refch->has_timedout = true;
|
||||||
nvgpu_smp_wmb();
|
nvgpu_smp_wmb();
|
||||||
@@ -1489,7 +1489,7 @@ bool gk20a_fifo_error_tsg(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
if (gk20a_fifo_error_ch(g, ch)) {
|
if (gk20a_fifo_error_ch(g, ch)) {
|
||||||
verbose = true;
|
verbose = true;
|
||||||
}
|
}
|
||||||
@@ -1521,7 +1521,7 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
|
gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
|
||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
}
|
}
|
||||||
@@ -1545,7 +1545,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
|
|||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
ch->has_timedout = true;
|
ch->has_timedout = true;
|
||||||
if (ch->g->ops.fifo.ch_abort_clean_up != NULL) {
|
if (ch->g->ops.fifo.ch_abort_clean_up != NULL) {
|
||||||
ch->g->ops.fifo.ch_abort_clean_up(ch);
|
ch->g->ops.fifo.ch_abort_clean_up(ch);
|
||||||
@@ -1615,7 +1615,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
|||||||
|
|
||||||
/* Disable power management */
|
/* Disable power management */
|
||||||
if (g->support_pmu && g->can_elpg) {
|
if (g->support_pmu && g->can_elpg) {
|
||||||
if (nvgpu_pmu_disable_elpg(g)) {
|
if (nvgpu_pmu_disable_elpg(g) != 0) {
|
||||||
nvgpu_err(g, "failed to set disable elpg");
|
nvgpu_err(g, "failed to set disable elpg");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1643,7 +1643,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
|||||||
grfifo_ctl | gr_gpfifo_ctl_access_f(0) |
|
grfifo_ctl | gr_gpfifo_ctl_access_f(0) |
|
||||||
gr_gpfifo_ctl_semaphore_access_f(0));
|
gr_gpfifo_ctl_semaphore_access_f(0));
|
||||||
|
|
||||||
if (mmu_fault_engines) {
|
if (mmu_fault_engines != 0U) {
|
||||||
fault_id = mmu_fault_engines;
|
fault_id = mmu_fault_engines;
|
||||||
fake_fault = true;
|
fake_fault = true;
|
||||||
} else {
|
} else {
|
||||||
@@ -1757,7 +1757,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
|||||||
} else {
|
} else {
|
||||||
/* if lock is already taken, a reset is taking place
|
/* if lock is already taken, a reset is taking place
|
||||||
so no need to repeat */
|
so no need to repeat */
|
||||||
if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex)) {
|
if (nvgpu_mutex_tryacquire(&g->fifo.gr_reset_mutex) != 0) {
|
||||||
gk20a_fifo_reset_engine(g, engine_id);
|
gk20a_fifo_reset_engine(g, engine_id);
|
||||||
nvgpu_mutex_release(&g->fifo.gr_reset_mutex);
|
nvgpu_mutex_release(&g->fifo.gr_reset_mutex);
|
||||||
}
|
}
|
||||||
@@ -1769,7 +1769,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
|||||||
* For non fake mmu fault, both tsg and ch pointers
|
* For non fake mmu fault, both tsg and ch pointers
|
||||||
* could be valid. Check tsg first.
|
* could be valid. Check tsg first.
|
||||||
*/
|
*/
|
||||||
if (tsg)
|
if (tsg != NULL)
|
||||||
gk20a_ctxsw_trace_tsg_reset(g, tsg);
|
gk20a_ctxsw_trace_tsg_reset(g, tsg);
|
||||||
else if (ch)
|
else if (ch)
|
||||||
gk20a_ctxsw_trace_channel_reset(g, ch);
|
gk20a_ctxsw_trace_channel_reset(g, ch);
|
||||||
@@ -1778,7 +1778,7 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
|||||||
/*
|
/*
|
||||||
* Disable the channel/TSG from hw and increment syncpoints.
|
* Disable the channel/TSG from hw and increment syncpoints.
|
||||||
*/
|
*/
|
||||||
if (tsg) {
|
if (tsg != NULL) {
|
||||||
if (g->fifo.deferred_reset_pending) {
|
if (g->fifo.deferred_reset_pending) {
|
||||||
gk20a_disable_tsg(tsg);
|
gk20a_disable_tsg(tsg);
|
||||||
} else {
|
} else {
|
||||||
@@ -1791,11 +1791,11 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* put back the ref taken early above */
|
/* put back the ref taken early above */
|
||||||
if (refch) {
|
if (refch != NULL) {
|
||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
}
|
}
|
||||||
} else if (ch) {
|
} else if (ch != NULL) {
|
||||||
if (refch) {
|
if (refch != NULL) {
|
||||||
if (g->fifo.deferred_reset_pending) {
|
if (g->fifo.deferred_reset_pending) {
|
||||||
g->ops.fifo.disable_channel(ch);
|
g->ops.fifo.disable_channel(ch);
|
||||||
} else {
|
} else {
|
||||||
@@ -1933,13 +1933,13 @@ void gk20a_fifo_recover_ch(struct gk20a *g, u32 chid, bool verbose, int rc_type)
|
|||||||
|
|
||||||
engines = gk20a_fifo_engines_on_id(g, chid, false);
|
engines = gk20a_fifo_engines_on_id(g, chid, false);
|
||||||
|
|
||||||
if (engines) {
|
if (engines != 0U) {
|
||||||
gk20a_fifo_recover(g, engines, chid, false, true, verbose,
|
gk20a_fifo_recover(g, engines, chid, false, true, verbose,
|
||||||
rc_type);
|
rc_type);
|
||||||
} else {
|
} else {
|
||||||
struct channel_gk20a *ch = &g->fifo.channel[chid];
|
struct channel_gk20a *ch = &g->fifo.channel[chid];
|
||||||
|
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
gk20a_channel_abort(ch, false);
|
gk20a_channel_abort(ch, false);
|
||||||
|
|
||||||
if (gk20a_fifo_error_ch(g, ch)) {
|
if (gk20a_fifo_error_ch(g, ch)) {
|
||||||
@@ -1966,7 +1966,7 @@ void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose,
|
|||||||
|
|
||||||
engines = gk20a_fifo_engines_on_id(g, tsgid, true);
|
engines = gk20a_fifo_engines_on_id(g, tsgid, true);
|
||||||
|
|
||||||
if (engines) {
|
if (engines != 0U) {
|
||||||
gk20a_fifo_recover(g, engines, tsgid, true, true, verbose,
|
gk20a_fifo_recover(g, engines, tsgid, true, true, verbose,
|
||||||
rc_type);
|
rc_type);
|
||||||
} else {
|
} else {
|
||||||
@@ -2051,7 +2051,7 @@ void gk20a_fifo_teardown_ch_tsg(struct gk20a *g, u32 __engine_ids,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mmu_fault_engines) {
|
if (mmu_fault_engines != 0U) {
|
||||||
/*
|
/*
|
||||||
* sched error prevents recovery, and ctxsw error will retrigger
|
* sched error prevents recovery, and ctxsw error will retrigger
|
||||||
* every 100ms. Disable the sched error to allow recovery.
|
* every 100ms. Disable the sched error to allow recovery.
|
||||||
@@ -2118,7 +2118,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
|
|||||||
|
|
||||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch_tsg)) {
|
if (gk20a_channel_get(ch_tsg) != NULL) {
|
||||||
g->ops.fifo.set_error_notifier(ch_tsg,
|
g->ops.fifo.set_error_notifier(ch_tsg,
|
||||||
err_code);
|
err_code);
|
||||||
gk20a_channel_put(ch_tsg);
|
gk20a_channel_put(ch_tsg);
|
||||||
@@ -2294,7 +2294,7 @@ bool gk20a_fifo_check_ch_ctxsw_timeout(struct channel_gk20a *ch,
|
|||||||
bool progress = false;
|
bool progress = false;
|
||||||
struct gk20a *g = ch->g;
|
struct gk20a *g = ch->g;
|
||||||
|
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
recover = gk20a_channel_update_and_check_timeout(ch,
|
recover = gk20a_channel_update_and_check_timeout(ch,
|
||||||
g->fifo_eng_timeout_us / 1000,
|
g->fifo_eng_timeout_us / 1000,
|
||||||
&progress);
|
&progress);
|
||||||
@@ -2328,7 +2328,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
|
|||||||
* maximum timeout without progress (update in gpfifo pointers).
|
* maximum timeout without progress (update in gpfifo pointers).
|
||||||
*/
|
*/
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
recover = gk20a_channel_update_and_check_timeout(ch,
|
recover = gk20a_channel_update_and_check_timeout(ch,
|
||||||
*ms, &progress);
|
*ms, &progress);
|
||||||
if (progress || recover) {
|
if (progress || recover) {
|
||||||
@@ -2351,7 +2351,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
|
|||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
ch->g->ops.fifo.set_error_notifier(ch,
|
ch->g->ops.fifo.set_error_notifier(ch,
|
||||||
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
|
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
|
||||||
if (ch->timeout_debug_dump) {
|
if (ch->timeout_debug_dump) {
|
||||||
@@ -2373,7 +2373,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
|
|||||||
*ms = g->fifo_eng_timeout_us / 1000;
|
*ms = g->fifo_eng_timeout_us / 1000;
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
ch->timeout_accumulated_ms = *ms;
|
ch->timeout_accumulated_ms = *ms;
|
||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
}
|
}
|
||||||
@@ -2701,7 +2701,7 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g,
|
|||||||
== fifo_pbdma_status_id_type_chid_v()) {
|
== fifo_pbdma_status_id_type_chid_v()) {
|
||||||
struct channel_gk20a *ch = &f->channel[id];
|
struct channel_gk20a *ch = &f->channel[id];
|
||||||
|
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
g->ops.fifo.set_error_notifier(ch, error_notifier);
|
g->ops.fifo.set_error_notifier(ch, error_notifier);
|
||||||
gk20a_fifo_recover_ch(g, id, true, RC_TYPE_PBDMA_FAULT);
|
gk20a_fifo_recover_ch(g, id, true, RC_TYPE_PBDMA_FAULT);
|
||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
@@ -2714,7 +2714,7 @@ static void gk20a_fifo_pbdma_fault_rc(struct gk20a *g,
|
|||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
g->ops.fifo.set_error_notifier(ch,
|
g->ops.fifo.set_error_notifier(ch,
|
||||||
error_notifier);
|
error_notifier);
|
||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
@@ -2735,7 +2735,7 @@ u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f,
|
|||||||
u32 error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_ERROR;
|
u32 error_notifier = NVGPU_ERR_NOTIFIER_PBDMA_ERROR;
|
||||||
unsigned int rc_type = RC_TYPE_NO_RC;
|
unsigned int rc_type = RC_TYPE_NO_RC;
|
||||||
|
|
||||||
if (pbdma_intr_0) {
|
if (pbdma_intr_0 != 0U) {
|
||||||
nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr,
|
nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr,
|
||||||
"pbdma id %d intr_0 0x%08x pending",
|
"pbdma id %d intr_0 0x%08x pending",
|
||||||
pbdma_id, pbdma_intr_0);
|
pbdma_id, pbdma_intr_0);
|
||||||
@@ -2747,7 +2747,7 @@ u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g, struct fifo_gk20a *f,
|
|||||||
gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
|
gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pbdma_intr_1) {
|
if (pbdma_intr_1 != 0U) {
|
||||||
nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr,
|
nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr,
|
||||||
"pbdma id %d intr_1 0x%08x pending",
|
"pbdma id %d intr_1 0x%08x pending",
|
||||||
pbdma_id, pbdma_intr_1);
|
pbdma_id, pbdma_intr_1);
|
||||||
@@ -2774,7 +2774,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
|
|||||||
u32 pbdma_pending = gk20a_readl(g, fifo_intr_pbdma_id_r());
|
u32 pbdma_pending = gk20a_readl(g, fifo_intr_pbdma_id_r());
|
||||||
|
|
||||||
for (i = 0; i < host_num_pbdma; i++) {
|
for (i = 0; i < host_num_pbdma; i++) {
|
||||||
if (fifo_intr_pbdma_id_status_v(pbdma_pending, i)) {
|
if (fifo_intr_pbdma_id_status_v(pbdma_pending, i) != 0U) {
|
||||||
nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i);
|
nvgpu_log(g, gpu_dbg_intr, "pbdma id %d intr pending", i);
|
||||||
clear_intr |=
|
clear_intr |=
|
||||||
gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES);
|
gk20a_fifo_handle_pbdma_intr(g, f, i, RC_YES);
|
||||||
@@ -2918,7 +2918,7 @@ void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
|
|||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"preempt channel %d timeout", id);
|
"preempt channel %d timeout", id);
|
||||||
|
|
||||||
if (gk20a_channel_get(ch)) {
|
if (gk20a_channel_get(ch) != NULL) {
|
||||||
g->ops.fifo.set_error_notifier(ch,
|
g->ops.fifo.set_error_notifier(ch,
|
||||||
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
|
NVGPU_ERR_NOTIFIER_FIFO_ERROR_IDLE_TIMEOUT);
|
||||||
gk20a_fifo_recover_ch(g, id, true,
|
gk20a_fifo_recover_ch(g, id, true,
|
||||||
@@ -3190,7 +3190,7 @@ clean_up:
|
|||||||
|
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_log_fn(g, "failed");
|
nvgpu_log_fn(g, "failed");
|
||||||
if (gk20a_fifo_enable_engine_activity(g, eng_info)) {
|
if (gk20a_fifo_enable_engine_activity(g, eng_info) != 0) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"failed to enable gr engine activity");
|
"failed to enable gr engine activity");
|
||||||
}
|
}
|
||||||
@@ -3254,7 +3254,7 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (engines) {
|
if (engines != 0U) {
|
||||||
gk20a_fifo_recover(g, engines, ~(u32)0, false, false, true,
|
gk20a_fifo_recover(g, engines, ~(u32)0, false, false, true,
|
||||||
RC_TYPE_RUNLIST_UPDATE_TIMEOUT);
|
RC_TYPE_RUNLIST_UPDATE_TIMEOUT);
|
||||||
}
|
}
|
||||||
@@ -3295,7 +3295,7 @@ void gk20a_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
|
|||||||
ram_rl_entry_type_tsg_f() |
|
ram_rl_entry_type_tsg_f() |
|
||||||
ram_rl_entry_tsg_length_f(tsg->num_active_channels);
|
ram_rl_entry_tsg_length_f(tsg->num_active_channels);
|
||||||
|
|
||||||
if (tsg->timeslice_timeout) {
|
if (tsg->timeslice_timeout != 0U) {
|
||||||
runlist_entry_0 |=
|
runlist_entry_0 |=
|
||||||
ram_rl_entry_timeslice_scale_f(tsg->timeslice_scale) |
|
ram_rl_entry_timeslice_scale_f(tsg->timeslice_scale) |
|
||||||
ram_rl_entry_timeslice_timeout_f(tsg->timeslice_timeout);
|
ram_rl_entry_timeslice_timeout_f(tsg->timeslice_timeout);
|
||||||
@@ -4157,7 +4157,7 @@ void gk20a_dump_channel_status_ramfc(struct gk20a *g,
|
|||||||
info->inst.semaphorec,
|
info->inst.semaphorec,
|
||||||
info->inst.semaphored);
|
info->inst.semaphored);
|
||||||
|
|
||||||
if (info->sema.addr) {
|
if (info->sema.addr != 0ULL) {
|
||||||
gk20a_debug_output(o, "SEMA STATE: value: 0x%08x "
|
gk20a_debug_output(o, "SEMA STATE: value: 0x%08x "
|
||||||
"next_val: 0x%08x addr: 0x%010llx\n",
|
"next_val: 0x%08x addr: 0x%010llx\n",
|
||||||
info->sema.value,
|
info->sema.value,
|
||||||
@@ -4317,10 +4317,10 @@ void gk20a_dump_eng_status(struct gk20a *g,
|
|||||||
"tsg" : "channel",
|
"tsg" : "channel",
|
||||||
gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status));
|
gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status));
|
||||||
|
|
||||||
if (fifo_engine_status_faulted_v(status)) {
|
if (fifo_engine_status_faulted_v(status) != 0U) {
|
||||||
gk20a_debug_output(o, "faulted ");
|
gk20a_debug_output(o, "faulted ");
|
||||||
}
|
}
|
||||||
if (fifo_engine_status_engine_v(status)) {
|
if (fifo_engine_status_engine_v(status) != 0U) {
|
||||||
gk20a_debug_output(o, "busy ");
|
gk20a_debug_output(o, "busy ");
|
||||||
}
|
}
|
||||||
gk20a_debug_output(o, "\n");
|
gk20a_debug_output(o, "\n");
|
||||||
@@ -4349,7 +4349,7 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a)
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, (int)true, (int)false)) {
|
if (nvgpu_atomic_cmpxchg(&ch_gk20a->bound, (int)true, (int)false) != 0) {
|
||||||
gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
|
gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->chid),
|
||||||
ccsr_channel_inst_ptr_f(0) |
|
ccsr_channel_inst_ptr_f(0) |
|
||||||
ccsr_channel_inst_bind_false_f());
|
ccsr_channel_inst_bind_false_f());
|
||||||
|
|||||||
@@ -201,7 +201,7 @@ static int gk20a_flcn_copy_from_dmem(struct nvgpu_falcon *flcn,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " src dmem offset - %x, size - %x", src, size);
|
nvgpu_log_fn(g, " src dmem offset - %x, size - %x", src, size);
|
||||||
|
|
||||||
if (flcn_mem_overflow_check(flcn, src, size, MEM_DMEM)) {
|
if (flcn_mem_overflow_check(flcn, src, size, MEM_DMEM) != 0) {
|
||||||
nvgpu_err(g, "incorrect parameters");
|
nvgpu_err(g, "incorrect parameters");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -246,7 +246,7 @@ static int gk20a_flcn_copy_to_dmem(struct nvgpu_falcon *flcn,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, "dest dmem offset - %x, size - %x", dst, size);
|
nvgpu_log_fn(g, "dest dmem offset - %x, size - %x", dst, size);
|
||||||
|
|
||||||
if (flcn_mem_overflow_check(flcn, dst, size, MEM_DMEM)) {
|
if (flcn_mem_overflow_check(flcn, dst, size, MEM_DMEM) != 0) {
|
||||||
nvgpu_err(g, "incorrect parameters");
|
nvgpu_err(g, "incorrect parameters");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -304,7 +304,7 @@ static int gk20a_flcn_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
|
|||||||
|
|
||||||
nvgpu_log_info(g, "download %d bytes from 0x%x", size, src);
|
nvgpu_log_info(g, "download %d bytes from 0x%x", size, src);
|
||||||
|
|
||||||
if (flcn_mem_overflow_check(flcn, src, size, MEM_IMEM)) {
|
if (flcn_mem_overflow_check(flcn, src, size, MEM_IMEM) != 0) {
|
||||||
nvgpu_err(g, "incorrect parameters");
|
nvgpu_err(g, "incorrect parameters");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -352,7 +352,7 @@ static int gk20a_flcn_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
|
|||||||
|
|
||||||
nvgpu_log_info(g, "upload %d bytes to 0x%x", size, dst);
|
nvgpu_log_info(g, "upload %d bytes to 0x%x", size, dst);
|
||||||
|
|
||||||
if (flcn_mem_overflow_check(flcn, dst, size, MEM_IMEM)) {
|
if (flcn_mem_overflow_check(flcn, dst, size, MEM_IMEM) != 0) {
|
||||||
nvgpu_err(g, "incorrect parameters");
|
nvgpu_err(g, "incorrect parameters");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -373,7 +373,7 @@ static int gk20a_flcn_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
|
|||||||
falcon_falcon_imemc_secure_f(sec ? 1U : 0U));
|
falcon_falcon_imemc_secure_f(sec ? 1U : 0U));
|
||||||
|
|
||||||
for (i = 0; i < words; i++) {
|
for (i = 0; i < words; i++) {
|
||||||
if (i % 64 == 0) {
|
if (i % 64U == 0U) {
|
||||||
/* tag is always 256B aligned */
|
/* tag is always 256B aligned */
|
||||||
gk20a_writel(g, base_addr + falcon_falcon_imemt_r(0),
|
gk20a_writel(g, base_addr + falcon_falcon_imemt_r(0),
|
||||||
tag);
|
tag);
|
||||||
@@ -385,7 +385,7 @@ static int gk20a_flcn_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* WARNING : setting remaining bytes in block to 0x0 */
|
/* WARNING : setting remaining bytes in block to 0x0 */
|
||||||
while (i % 64) {
|
while (i % 64U != 0U) {
|
||||||
gk20a_writel(g, base_addr + falcon_falcon_imemd_r(port), 0);
|
gk20a_writel(g, base_addr + falcon_falcon_imemd_r(port), 0);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -537,11 +537,11 @@ static void gk20a_free_cb(struct nvgpu_ref *refcount)
|
|||||||
|
|
||||||
gk20a_ce_destroy(g);
|
gk20a_ce_destroy(g);
|
||||||
|
|
||||||
if (g->remove_support) {
|
if (g->remove_support != NULL) {
|
||||||
g->remove_support(g);
|
g->remove_support(g);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (g->free) {
|
if (g->free != NULL) {
|
||||||
g->free(g);
|
g->free(g);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -355,13 +355,13 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
|
|||||||
NVGPU_TIMER_CPU_TIMER);
|
NVGPU_TIMER_CPU_TIMER);
|
||||||
|
|
||||||
while (check == WAIT_UCODE_LOOP) {
|
while (check == WAIT_UCODE_LOOP) {
|
||||||
if (nvgpu_timeout_expired(&timeout)) {
|
if (nvgpu_timeout_expired(&timeout) != 0) {
|
||||||
check = WAIT_UCODE_TIMEOUT;
|
check = WAIT_UCODE_TIMEOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(mailbox_id));
|
reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(mailbox_id));
|
||||||
|
|
||||||
if (mailbox_ret) {
|
if (mailbox_ret != NULL) {
|
||||||
*mailbox_ret = reg;
|
*mailbox_ret = reg;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -788,7 +788,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
|
|||||||
ctxsw_prog_main_image_zcull_o(),
|
ctxsw_prog_main_image_zcull_o(),
|
||||||
gr_ctx->zcull_ctx.ctx_sw_mode);
|
gr_ctx->zcull_ctx.ctx_sw_mode);
|
||||||
|
|
||||||
if (ctxheader->gpu_va) {
|
if (ctxheader->gpu_va != 0ULL) {
|
||||||
g->ops.gr.write_zcull_ptr(g, ctxheader,
|
g->ops.gr.write_zcull_ptr(g, ctxheader,
|
||||||
gr_ctx->zcull_ctx.gpu_va);
|
gr_ctx->zcull_ctx.gpu_va);
|
||||||
} else {
|
} else {
|
||||||
@@ -1505,7 +1505,7 @@ restore_fe_go_idle:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* load method init */
|
/* load method init */
|
||||||
if (sw_method_init->count) {
|
if (sw_method_init->count != 0U) {
|
||||||
gk20a_writel(g, gr_pri_mme_shadow_raw_data_r(),
|
gk20a_writel(g, gr_pri_mme_shadow_raw_data_r(),
|
||||||
sw_method_init->l[0].value);
|
sw_method_init->l[0].value);
|
||||||
gk20a_writel(g, gr_pri_mme_shadow_raw_index_r(),
|
gk20a_writel(g, gr_pri_mme_shadow_raw_index_r(),
|
||||||
@@ -1774,7 +1774,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data);
|
nvgpu_mem_wr(g, gr_mem, ctxsw_prog_main_image_pm_o(), data);
|
||||||
|
|
||||||
if (ctxheader->gpu_va) {
|
if (ctxheader->gpu_va != 0ULL) {
|
||||||
struct channel_gk20a *ch;
|
struct channel_gk20a *ch;
|
||||||
|
|
||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
@@ -2090,7 +2090,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
clean_up:
|
clean_up:
|
||||||
if (ucode_info->surface_desc.gpu_va) {
|
if (ucode_info->surface_desc.gpu_va != 0ULL) {
|
||||||
nvgpu_gmmu_unmap(vm, &ucode_info->surface_desc,
|
nvgpu_gmmu_unmap(vm, &ucode_info->surface_desc,
|
||||||
ucode_info->surface_desc.gpu_va);
|
ucode_info->surface_desc.gpu_va);
|
||||||
}
|
}
|
||||||
@@ -2486,7 +2486,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
|
|||||||
|
|
||||||
for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
|
for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
|
||||||
/* destroy exists iff buffer is allocated */
|
/* destroy exists iff buffer is allocated */
|
||||||
if (gr->global_ctx_buffer[i].destroy) {
|
if (gr->global_ctx_buffer[i].destroy != NULL) {
|
||||||
gr->global_ctx_buffer[i].destroy(g,
|
gr->global_ctx_buffer[i].destroy(g,
|
||||||
&gr->global_ctx_buffer[i]);
|
&gr->global_ctx_buffer[i]);
|
||||||
}
|
}
|
||||||
@@ -2618,7 +2618,7 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct gk20a *g,
|
|||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
|
for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
|
||||||
if (g_bfr_index[i]) {
|
if (g_bfr_index[i] != 0) {
|
||||||
struct nvgpu_mem *mem;
|
struct nvgpu_mem *mem;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2909,7 +2909,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
if (patch_ctx->mem.gpu_va) {
|
if (patch_ctx->mem.gpu_va != 0ULL) {
|
||||||
nvgpu_gmmu_unmap(vm, &patch_ctx->mem,
|
nvgpu_gmmu_unmap(vm, &patch_ctx->mem,
|
||||||
patch_ctx->mem.gpu_va);
|
patch_ctx->mem.gpu_va);
|
||||||
}
|
}
|
||||||
@@ -2926,7 +2926,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
if (pm_ctx->mem.gpu_va) {
|
if (pm_ctx->mem.gpu_va != 0ULL) {
|
||||||
nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va);
|
nvgpu_gmmu_unmap(vm, &pm_ctx->mem, pm_ctx->mem.gpu_va);
|
||||||
|
|
||||||
nvgpu_dma_free(g, &pm_ctx->mem);
|
nvgpu_dma_free(g, &pm_ctx->mem);
|
||||||
@@ -3111,7 +3111,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
|
|||||||
nvgpu_vfree(g, gr->ctx_vars.local_golden_image);
|
nvgpu_vfree(g, gr->ctx_vars.local_golden_image);
|
||||||
gr->ctx_vars.local_golden_image = NULL;
|
gr->ctx_vars.local_golden_image = NULL;
|
||||||
|
|
||||||
if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map) {
|
if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map != NULL) {
|
||||||
nvgpu_big_free(g, gr->ctx_vars.hwpm_ctxsw_buffer_offset_map);
|
nvgpu_big_free(g, gr->ctx_vars.hwpm_ctxsw_buffer_offset_map);
|
||||||
}
|
}
|
||||||
gr->ctx_vars.hwpm_ctxsw_buffer_offset_map = NULL;
|
gr->ctx_vars.hwpm_ctxsw_buffer_offset_map = NULL;
|
||||||
@@ -3484,7 +3484,7 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gr->map_tiles) {
|
if (gr->map_tiles != NULL) {
|
||||||
if (gr->map_tile_count != gr->tpc_count) {
|
if (gr->map_tile_count != gr->tpc_count) {
|
||||||
delete_map = true;
|
delete_map = true;
|
||||||
}
|
}
|
||||||
@@ -4111,7 +4111,7 @@ int _gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
|
|||||||
ret = gr_gk20a_add_zbc(g, gr, zbc_val);
|
ret = gr_gk20a_add_zbc(g, gr, zbc_val);
|
||||||
|
|
||||||
clean_up:
|
clean_up:
|
||||||
if (gk20a_fifo_enable_engine_activity(g, gr_info)) {
|
if (gk20a_fifo_enable_engine_activity(g, gr_info) != 0) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"failed to enable gr engine activity");
|
"failed to enable gr engine activity");
|
||||||
}
|
}
|
||||||
@@ -4530,7 +4530,7 @@ restore_fe_go_idle:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* load method init */
|
/* load method init */
|
||||||
if (sw_method_init->count) {
|
if (sw_method_init->count != 0U) {
|
||||||
gk20a_writel(g, gr_pri_mme_shadow_raw_data_r(),
|
gk20a_writel(g, gr_pri_mme_shadow_raw_data_r(),
|
||||||
sw_method_init->l[0].value);
|
sw_method_init->l[0].value);
|
||||||
gk20a_writel(g, gr_pri_mme_shadow_raw_index_r(),
|
gk20a_writel(g, gr_pri_mme_shadow_raw_index_r(),
|
||||||
@@ -5117,7 +5117,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
|
|||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch_tsg)) {
|
if (gk20a_channel_get(ch_tsg) != NULL) {
|
||||||
g->ops.fifo.set_error_notifier(ch_tsg,
|
g->ops.fifo.set_error_notifier(ch_tsg,
|
||||||
error_notifier);
|
error_notifier);
|
||||||
gk20a_channel_put(ch_tsg);
|
gk20a_channel_put(ch_tsg);
|
||||||
@@ -5241,7 +5241,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_err(g, "trapped data low 0x%08x",
|
nvgpu_err(g, "trapped data low 0x%08x",
|
||||||
gk20a_readl(g, gr_trapped_data_lo_r()));
|
gk20a_readl(g, gr_trapped_data_lo_r()));
|
||||||
if (gr_trapped_addr_datahigh_v(isr_data->addr)) {
|
if (gr_trapped_addr_datahigh_v(isr_data->addr) != 0U) {
|
||||||
nvgpu_err(g, "trapped data high 0x%08x",
|
nvgpu_err(g, "trapped data high 0x%08x",
|
||||||
gk20a_readl(g, gr_trapped_data_hi_r()));
|
gk20a_readl(g, gr_trapped_data_hi_r()));
|
||||||
}
|
}
|
||||||
@@ -5511,7 +5511,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx(
|
|||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
nvgpu_spinlock_release(&gr->ch_tlb_lock);
|
nvgpu_spinlock_release(&gr->ch_tlb_lock);
|
||||||
if (curr_tsgid) {
|
if (curr_tsgid != NULL) {
|
||||||
*curr_tsgid = tsgid;
|
*curr_tsgid = tsgid;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@@ -5880,7 +5880,7 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
isr_data.class_num = gr_fe_object_table_nvclass_v(obj_table);
|
isr_data.class_num = gr_fe_object_table_nvclass_v(obj_table);
|
||||||
|
|
||||||
ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid);
|
ch = gk20a_gr_get_channel_from_ctx(g, isr_data.curr_ctx, &tsgid);
|
||||||
if (ch) {
|
if (ch != NULL) {
|
||||||
isr_data.chid = ch->chid;
|
isr_data.chid = ch->chid;
|
||||||
} else {
|
} else {
|
||||||
isr_data.chid = FIFO_INVAL_CHANNEL_ID;
|
isr_data.chid = FIFO_INVAL_CHANNEL_ID;
|
||||||
@@ -6105,7 +6105,7 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
gk20a_fifo_recover(g, gr_engine_id,
|
gk20a_fifo_recover(g, gr_engine_id,
|
||||||
tsgid, true, true, true,
|
tsgid, true, true, true,
|
||||||
RC_TYPE_GR_FAULT);
|
RC_TYPE_GR_FAULT);
|
||||||
} else if (ch) {
|
} else if (ch != NULL) {
|
||||||
gk20a_fifo_recover(g, gr_engine_id,
|
gk20a_fifo_recover(g, gr_engine_id,
|
||||||
ch->chid, false, true, true,
|
ch->chid, false, true, true,
|
||||||
RC_TYPE_GR_FAULT);
|
RC_TYPE_GR_FAULT);
|
||||||
@@ -6130,7 +6130,7 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
grfifo_ctl | gr_gpfifo_ctl_access_f(1) |
|
grfifo_ctl | gr_gpfifo_ctl_access_f(1) |
|
||||||
gr_gpfifo_ctl_semaphore_access_f(1));
|
gr_gpfifo_ctl_semaphore_access_f(1));
|
||||||
|
|
||||||
if (gr_intr) {
|
if (gr_intr != 0U) {
|
||||||
nvgpu_err(g,
|
nvgpu_err(g,
|
||||||
"unhandled gr interrupt 0x%08x", gr_intr);
|
"unhandled gr interrupt 0x%08x", gr_intr);
|
||||||
}
|
}
|
||||||
@@ -6140,7 +6140,7 @@ int gk20a_gr_isr(struct gk20a *g)
|
|||||||
gk20a_gr_post_bpt_events(g, tsg, global_esr);
|
gk20a_gr_post_bpt_events(g, tsg, global_esr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ch) {
|
if (ch != NULL) {
|
||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6754,7 +6754,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
|
|||||||
nvgpu_mem_wr(g, mem,
|
nvgpu_mem_wr(g, mem,
|
||||||
ctxsw_prog_main_image_patch_count_o(),
|
ctxsw_prog_main_image_patch_count_o(),
|
||||||
gr_ctx->patch_ctx.data_count);
|
gr_ctx->patch_ctx.data_count);
|
||||||
if (ctxheader->gpu_va) {
|
if (ctxheader->gpu_va != 0ULL) {
|
||||||
nvgpu_mem_wr(g, ctxheader,
|
nvgpu_mem_wr(g, ctxheader,
|
||||||
ctxsw_prog_main_image_patch_adr_lo_o(),
|
ctxsw_prog_main_image_patch_adr_lo_o(),
|
||||||
vaddr_lo);
|
vaddr_lo);
|
||||||
@@ -7622,7 +7622,7 @@ static int add_ctxsw_buffer_map_entries_gpcs(struct gk20a *g,
|
|||||||
&g->netlist_vars->ctxsw_regs.pm_tpc,
|
&g->netlist_vars->ctxsw_regs.pm_tpc,
|
||||||
count, offset, max_cnt, base, num_tpcs,
|
count, offset, max_cnt, base, num_tpcs,
|
||||||
tpc_in_gpc_stride,
|
tpc_in_gpc_stride,
|
||||||
(tpc_in_gpc_stride - 1))) {
|
(tpc_in_gpc_stride - 1)) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7632,7 +7632,7 @@ static int add_ctxsw_buffer_map_entries_gpcs(struct gk20a *g,
|
|||||||
&g->netlist_vars->ctxsw_regs.pm_ppc,
|
&g->netlist_vars->ctxsw_regs.pm_ppc,
|
||||||
count, offset, max_cnt, base, num_ppcs,
|
count, offset, max_cnt, base, num_ppcs,
|
||||||
ppc_in_gpc_stride,
|
ppc_in_gpc_stride,
|
||||||
(ppc_in_gpc_stride - 1))) {
|
(ppc_in_gpc_stride - 1)) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7640,40 +7640,40 @@ static int add_ctxsw_buffer_map_entries_gpcs(struct gk20a *g,
|
|||||||
if (add_ctxsw_buffer_map_entries_pmgpc(g, map,
|
if (add_ctxsw_buffer_map_entries_pmgpc(g, map,
|
||||||
&g->netlist_vars->ctxsw_regs.pm_gpc,
|
&g->netlist_vars->ctxsw_regs.pm_gpc,
|
||||||
count, offset, max_cnt, base,
|
count, offset, max_cnt, base,
|
||||||
(gpc_stride - 1))) {
|
(gpc_stride - 1)) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
base = NV_XBAR_MXBAR_PRI_GPC_GNIC_STRIDE * gpc_num;
|
base = NV_XBAR_MXBAR_PRI_GPC_GNIC_STRIDE * gpc_num;
|
||||||
if (add_ctxsw_buffer_map_entries(map,
|
if (add_ctxsw_buffer_map_entries(map,
|
||||||
&g->netlist_vars->ctxsw_regs.pm_ucgpc,
|
&g->netlist_vars->ctxsw_regs.pm_ucgpc,
|
||||||
count, offset, max_cnt, base, ~0)) {
|
count, offset, max_cnt, base, ~0) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
base = (g->ops.gr.get_pmm_per_chiplet_offset() * gpc_num);
|
base = (g->ops.gr.get_pmm_per_chiplet_offset() * gpc_num);
|
||||||
if (add_ctxsw_buffer_map_entries(map,
|
if (add_ctxsw_buffer_map_entries(map,
|
||||||
&g->netlist_vars->ctxsw_regs.perf_gpc,
|
&g->netlist_vars->ctxsw_regs.perf_gpc,
|
||||||
count, offset, max_cnt, base, ~0)) {
|
count, offset, max_cnt, base, ~0) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
base = (NV_PERF_PMMGPCROUTER_STRIDE * gpc_num);
|
base = (NV_PERF_PMMGPCROUTER_STRIDE * gpc_num);
|
||||||
if (add_ctxsw_buffer_map_entries(map,
|
if (add_ctxsw_buffer_map_entries(map,
|
||||||
&g->netlist_vars->ctxsw_regs.gpc_router,
|
&g->netlist_vars->ctxsw_regs.gpc_router,
|
||||||
count, offset, max_cnt, base, ~0)) {
|
count, offset, max_cnt, base, ~0) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Counter Aggregation Unit, if available */
|
/* Counter Aggregation Unit, if available */
|
||||||
if (g->netlist_vars->ctxsw_regs.pm_cau.count) {
|
if (g->netlist_vars->ctxsw_regs.pm_cau.count != 0U) {
|
||||||
base = gpc_base + (gpc_stride * gpc_num)
|
base = gpc_base + (gpc_stride * gpc_num)
|
||||||
+ tpc_in_gpc_base;
|
+ tpc_in_gpc_base;
|
||||||
if (add_ctxsw_buffer_map_entries_subunits(map,
|
if (add_ctxsw_buffer_map_entries_subunits(map,
|
||||||
&g->netlist_vars->ctxsw_regs.pm_cau,
|
&g->netlist_vars->ctxsw_regs.pm_cau,
|
||||||
count, offset, max_cnt, base, num_tpcs,
|
count, offset, max_cnt, base, num_tpcs,
|
||||||
tpc_in_gpc_stride,
|
tpc_in_gpc_stride,
|
||||||
(tpc_in_gpc_stride - 1))) {
|
(tpc_in_gpc_stride - 1)) != 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -7785,19 +7785,19 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
|
|||||||
|
|
||||||
/* Add entries from _LIST_pm_ctx_reg_SYS */
|
/* Add entries from _LIST_pm_ctx_reg_SYS */
|
||||||
if (add_ctxsw_buffer_map_entries_pmsys(map, &g->netlist_vars->ctxsw_regs.pm_sys,
|
if (add_ctxsw_buffer_map_entries_pmsys(map, &g->netlist_vars->ctxsw_regs.pm_sys,
|
||||||
&count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0)) {
|
&count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0) != 0) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add entries from _LIST_nv_perf_ctx_reg_SYS */
|
/* Add entries from _LIST_nv_perf_ctx_reg_SYS */
|
||||||
if (add_ctxsw_buffer_map_entries(map, &g->netlist_vars->ctxsw_regs.perf_sys,
|
if (add_ctxsw_buffer_map_entries(map, &g->netlist_vars->ctxsw_regs.perf_sys,
|
||||||
&count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0)) {
|
&count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0) != 0) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add entries from _LIST_nv_perf_sysrouter_ctx_reg*/
|
/* Add entries from _LIST_nv_perf_sysrouter_ctx_reg*/
|
||||||
if (add_ctxsw_buffer_map_entries(map, &g->netlist_vars->ctxsw_regs.perf_sys_router,
|
if (add_ctxsw_buffer_map_entries(map, &g->netlist_vars->ctxsw_regs.perf_sys_router,
|
||||||
&count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0)) {
|
&count, &offset, hwpm_ctxsw_reg_count_max, 0, ~0) != 0) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7819,7 +7819,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
|
|||||||
hwpm_ctxsw_reg_count_max, 0,
|
hwpm_ctxsw_reg_count_max, 0,
|
||||||
g->gr.num_fbps,
|
g->gr.num_fbps,
|
||||||
g->ops.gr.get_pmm_per_chiplet_offset(),
|
g->ops.gr.get_pmm_per_chiplet_offset(),
|
||||||
~0)) {
|
~0) != 0) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7828,7 +7828,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
|
|||||||
&g->netlist_vars->ctxsw_regs.fbp_router,
|
&g->netlist_vars->ctxsw_regs.fbp_router,
|
||||||
&count, &offset,
|
&count, &offset,
|
||||||
hwpm_ctxsw_reg_count_max, 0, g->gr.num_fbps,
|
hwpm_ctxsw_reg_count_max, 0, g->gr.num_fbps,
|
||||||
NV_PERF_PMM_FBP_ROUTER_STRIDE, ~0)) {
|
NV_PERF_PMM_FBP_ROUTER_STRIDE, ~0) != 0) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7846,7 +7846,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
|
|||||||
if (add_ctxsw_buffer_map_entries(map,
|
if (add_ctxsw_buffer_map_entries(map,
|
||||||
&g->netlist_vars->ctxsw_regs.pm_rop,
|
&g->netlist_vars->ctxsw_regs.pm_rop,
|
||||||
&count, &offset,
|
&count, &offset,
|
||||||
hwpm_ctxsw_reg_count_max, 0, ~0)) {
|
hwpm_ctxsw_reg_count_max, 0, ~0) != 0) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7855,7 +7855,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
|
|||||||
&g->netlist_vars->ctxsw_regs.pm_ltc,
|
&g->netlist_vars->ctxsw_regs.pm_ltc,
|
||||||
&count, &offset,
|
&count, &offset,
|
||||||
hwpm_ctxsw_reg_count_max, 0,
|
hwpm_ctxsw_reg_count_max, 0,
|
||||||
num_ltc, ltc_stride, ~0)) {
|
num_ltc, ltc_stride, ~0) != 0) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7863,7 +7863,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
|
|||||||
|
|
||||||
/* Add GPC entries */
|
/* Add GPC entries */
|
||||||
if (add_ctxsw_buffer_map_entries_gpcs(g, map, &count, &offset,
|
if (add_ctxsw_buffer_map_entries_gpcs(g, map, &count, &offset,
|
||||||
hwpm_ctxsw_reg_count_max)) {
|
hwpm_ctxsw_reg_count_max) != 0) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -8208,11 +8208,11 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
|
|||||||
}
|
}
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
if (offsets) {
|
if (offsets != NULL) {
|
||||||
nvgpu_kfree(g, offsets);
|
nvgpu_kfree(g, offsets);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gr_ctx->patch_ctx.mem.cpu_va) {
|
if (gr_ctx->patch_ctx.mem.cpu_va != NULL) {
|
||||||
gr_gk20a_ctx_patch_write_end(g, gr_ctx, gr_ctx_ready);
|
gr_gk20a_ctx_patch_write_end(g, gr_ctx, gr_ctx_ready);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -8250,7 +8250,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
|
|||||||
num_ctx_rd_ops, ch_is_curr_ctx);
|
num_ctx_rd_ops, ch_is_curr_ctx);
|
||||||
|
|
||||||
tmp_err = gr_gk20a_enable_ctxsw(g);
|
tmp_err = gr_gk20a_enable_ctxsw(g);
|
||||||
if (tmp_err) {
|
if (tmp_err != 0) {
|
||||||
nvgpu_err(g, "unable to restart ctxsw!");
|
nvgpu_err(g, "unable to restart ctxsw!");
|
||||||
err = tmp_err;
|
err = tmp_err;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -247,7 +247,7 @@ static void __update_pte(struct vm_gk20a *vm,
|
|||||||
pte_w[1] |= gmmu_pte_vol_true_f();
|
pte_w[1] |= gmmu_pte_vol_true_f();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (attrs->ctag) {
|
if (attrs->ctag != 0ULL) {
|
||||||
attrs->ctag += page_size;
|
attrs->ctag += page_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -266,7 +266,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
|
|||||||
u32 pte_w[2] = {0, 0};
|
u32 pte_w[2] = {0, 0};
|
||||||
int ctag_shift = ilog2(g->ops.fb.compression_page_size(g));
|
int ctag_shift = ilog2(g->ops.fb.compression_page_size(g));
|
||||||
|
|
||||||
if (phys_addr) {
|
if (phys_addr != 0ULL) {
|
||||||
__update_pte(vm, pte_w, phys_addr, attrs);
|
__update_pte(vm, pte_w, phys_addr, attrs);
|
||||||
} else if (attrs->sparse) {
|
} else if (attrs->sparse) {
|
||||||
__update_pte_sparse(pte_w);
|
__update_pte_sparse(pte_w);
|
||||||
@@ -468,7 +468,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
} while (nvgpu_timeout_expired(&timeout) == 0);
|
} while (nvgpu_timeout_expired(&timeout) == 0);
|
||||||
|
|
||||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
|
||||||
if (g->ops.fb.dump_vpr_info != NULL) {
|
if (g->ops.fb.dump_vpr_info != NULL) {
|
||||||
g->ops.fb.dump_vpr_info(g);
|
g->ops.fb.dump_vpr_info(g);
|
||||||
}
|
}
|
||||||
@@ -521,7 +521,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
|
|||||||
}
|
}
|
||||||
} while (nvgpu_timeout_expired(&timeout) == 0);
|
} while (nvgpu_timeout_expired(&timeout) == 0);
|
||||||
|
|
||||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
if (nvgpu_timeout_peek_expired(&timeout) != 0) {
|
||||||
nvgpu_warn(g, "l2_system_invalidate too many retries");
|
nvgpu_warn(g, "l2_system_invalidate too many retries");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
|
|||||||
nvgpu_err(g, "dump PMU trace buffer");
|
nvgpu_err(g, "dump PMU trace buffer");
|
||||||
for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
|
for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
|
||||||
for (j = 0; j < 0x40; j++) {
|
for (j = 0; j < 0x40; j++) {
|
||||||
if (trace1[(i / 4) + j]) {
|
if (trace1[(i / 4U) + j] != 0U) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* be sure that ctx info is in place if there are ctx ops */
|
/* be sure that ctx info is in place if there are ctx ops */
|
||||||
if (ctx_wr_count | ctx_rd_count) {
|
if ((ctx_wr_count | ctx_rd_count) != 0U) {
|
||||||
if (!gr_context_info_available(&g->gr)) {
|
if (!gr_context_info_available(&g->gr)) {
|
||||||
nvgpu_err(g, "gr context data not available");
|
nvgpu_err(g, "gr context data not available");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@@ -218,7 +218,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx_wr_count | ctx_rd_count) {
|
if ((ctx_wr_count | ctx_rd_count) != 0U) {
|
||||||
err = gr_gk20a_exec_ctx_ops(ch, ops, num_ops,
|
err = gr_gk20a_exec_ctx_ops(ch, ops, num_ops,
|
||||||
ctx_wr_count, ctx_rd_count,
|
ctx_wr_count, ctx_rd_count,
|
||||||
is_current_ctx);
|
is_current_ctx);
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ static void dump_gpc_pll(struct gk20a *g, struct pll *gpll, u32 last_cfg)
|
|||||||
\
|
\
|
||||||
nvgpu_info(g, " " #__addr_str__ " [0x%x] = 0x%x", \
|
nvgpu_info(g, " " #__addr_str__ " [0x%x] = 0x%x", \
|
||||||
__addr__, __data__); \
|
__addr__, __data__); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
nvgpu_info(g, "GPCPLL DUMP:");
|
nvgpu_info(g, "GPCPLL DUMP:");
|
||||||
nvgpu_info(g, " gpcpll s/w M=%u N=%u P=%u\n", gpll->M, gpll->N, gpll->PL);
|
nvgpu_info(g, " gpcpll s/w M=%u N=%u P=%u\n", gpll->M, gpll->N, gpll->PL);
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
|
|||||||
struct gk20a *g = tsg->g;
|
struct gk20a *g = tsg->g;
|
||||||
u32 runlist_entry_0 = ram_rl_entry_type_tsg_v();
|
u32 runlist_entry_0 = ram_rl_entry_type_tsg_v();
|
||||||
|
|
||||||
if (tsg->timeslice_timeout) {
|
if (tsg->timeslice_timeout != 0U) {
|
||||||
runlist_entry_0 |=
|
runlist_entry_0 |=
|
||||||
ram_rl_entry_tsg_timeslice_scale_f(tsg->timeslice_scale) |
|
ram_rl_entry_tsg_timeslice_scale_f(tsg->timeslice_scale) |
|
||||||
ram_rl_entry_tsg_timeslice_timeout_f(tsg->timeslice_timeout);
|
ram_rl_entry_tsg_timeslice_timeout_f(tsg->timeslice_timeout);
|
||||||
@@ -280,7 +280,7 @@ void channel_gv11b_unbind(struct channel_gk20a *ch)
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
|
if (nvgpu_atomic_cmpxchg(&ch->bound, true, false) != 0) {
|
||||||
gk20a_writel(g, ccsr_channel_inst_r(ch->chid),
|
gk20a_writel(g, ccsr_channel_inst_r(ch->chid),
|
||||||
ccsr_channel_inst_ptr_f(0) |
|
ccsr_channel_inst_ptr_f(0) |
|
||||||
ccsr_channel_inst_bind_false_f());
|
ccsr_channel_inst_bind_false_f());
|
||||||
@@ -372,7 +372,7 @@ void gv11b_dump_channel_status_ramfc(struct gk20a *g,
|
|||||||
info->inst.sem_payload,
|
info->inst.sem_payload,
|
||||||
info->inst.sem_execute);
|
info->inst.sem_execute);
|
||||||
|
|
||||||
if (info->sema.addr) {
|
if (info->sema.addr != 0ULL) {
|
||||||
gk20a_debug_output(o, "SEMA STATE: value: 0x%08x "
|
gk20a_debug_output(o, "SEMA STATE: value: 0x%08x "
|
||||||
"next_val: 0x%08x addr: 0x%010llx\n",
|
"next_val: 0x%08x addr: 0x%010llx\n",
|
||||||
info->sema.value,
|
info->sema.value,
|
||||||
@@ -405,13 +405,13 @@ void gv11b_dump_eng_status(struct gk20a *g,
|
|||||||
"tsg" : "channel",
|
"tsg" : "channel",
|
||||||
gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status));
|
gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status));
|
||||||
|
|
||||||
if (fifo_engine_status_eng_reload_v(status)) {
|
if (fifo_engine_status_eng_reload_v(status) != 0U) {
|
||||||
gk20a_debug_output(o, "ctx_reload ");
|
gk20a_debug_output(o, "ctx_reload ");
|
||||||
}
|
}
|
||||||
if (fifo_engine_status_faulted_v(status)) {
|
if (fifo_engine_status_faulted_v(status) != 0U) {
|
||||||
gk20a_debug_output(o, "faulted ");
|
gk20a_debug_output(o, "faulted ");
|
||||||
}
|
}
|
||||||
if (fifo_engine_status_engine_v(status)) {
|
if (fifo_engine_status_engine_v(status) != 0U) {
|
||||||
gk20a_debug_output(o, "busy ");
|
gk20a_debug_output(o, "busy ");
|
||||||
}
|
}
|
||||||
gk20a_debug_output(o, "\n");
|
gk20a_debug_output(o, "\n");
|
||||||
@@ -594,7 +594,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
|
|||||||
if (ctx_stat ==
|
if (ctx_stat ==
|
||||||
fifo_engine_status_ctx_status_ctxsw_switch_v()) {
|
fifo_engine_status_ctx_status_ctxsw_switch_v()) {
|
||||||
/* Eng save hasn't started yet. Continue polling */
|
/* Eng save hasn't started yet. Continue polling */
|
||||||
if (eng_intr_pending) {
|
if (eng_intr_pending != 0U) {
|
||||||
/* if eng intr, stop polling */
|
/* if eng intr, stop polling */
|
||||||
*reset_eng_bitmask |= BIT(act_eng_id);
|
*reset_eng_bitmask |= BIT(act_eng_id);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@@ -607,7 +607,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
|
|||||||
fifo_engine_status_ctx_status_ctxsw_save_v()) {
|
fifo_engine_status_ctx_status_ctxsw_save_v()) {
|
||||||
|
|
||||||
if (id == fifo_engine_status_id_v(eng_stat)) {
|
if (id == fifo_engine_status_id_v(eng_stat)) {
|
||||||
if (eng_intr_pending) {
|
if (eng_intr_pending != 0U) {
|
||||||
/* preemption will not finish */
|
/* preemption will not finish */
|
||||||
*reset_eng_bitmask |= BIT(act_eng_id);
|
*reset_eng_bitmask |= BIT(act_eng_id);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@@ -623,7 +623,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
|
|||||||
fifo_engine_status_ctx_status_ctxsw_load_v()) {
|
fifo_engine_status_ctx_status_ctxsw_load_v()) {
|
||||||
|
|
||||||
if (id == fifo_engine_status_next_id_v(eng_stat)) {
|
if (id == fifo_engine_status_next_id_v(eng_stat)) {
|
||||||
if (eng_intr_pending) {
|
if (eng_intr_pending != 0U) {
|
||||||
/* preemption will not finish */
|
/* preemption will not finish */
|
||||||
*reset_eng_bitmask |= BIT(act_eng_id);
|
*reset_eng_bitmask |= BIT(act_eng_id);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@@ -764,7 +764,7 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
|
|||||||
|
|
||||||
if (id_type == ID_TYPE_UNKNOWN) {
|
if (id_type == ID_TYPE_UNKNOWN) {
|
||||||
for (rlid = 0; rlid < f->max_runlists; rlid++) {
|
for (rlid = 0; rlid < f->max_runlists; rlid++) {
|
||||||
if (act_eng_bitmask) {
|
if (act_eng_bitmask != 0U) {
|
||||||
/* eng ids are known */
|
/* eng ids are known */
|
||||||
runlist = &f->runlist_info[rlid];
|
runlist = &f->runlist_info[rlid];
|
||||||
if (runlist->eng_bitmask & act_eng_bitmask) {
|
if (runlist->eng_bitmask & act_eng_bitmask) {
|
||||||
@@ -867,7 +867,7 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
|
|||||||
}
|
}
|
||||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||||
|
|
||||||
if (last_ch) {
|
if (last_ch != NULL) {
|
||||||
g->ops.fifo.ring_channel_doorbell(last_ch);
|
g->ops.fifo.ring_channel_doorbell(last_ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1063,7 +1063,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
|||||||
* to multiple runlists, then abort all runlists
|
* to multiple runlists, then abort all runlists
|
||||||
*/
|
*/
|
||||||
for (rlid = 0; rlid < f->max_runlists; rlid++) {
|
for (rlid = 0; rlid < f->max_runlists; rlid++) {
|
||||||
if (act_eng_bitmask) {
|
if (act_eng_bitmask != 0U) {
|
||||||
/* eng ids are known */
|
/* eng ids are known */
|
||||||
runlist = &f->runlist_info[rlid];
|
runlist = &f->runlist_info[rlid];
|
||||||
if (runlist->eng_bitmask & act_eng_bitmask) {
|
if (runlist->eng_bitmask & act_eng_bitmask) {
|
||||||
@@ -1109,7 +1109,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
|||||||
|
|
||||||
/* Disable power management */
|
/* Disable power management */
|
||||||
if (g->support_pmu && g->elpg_enabled) {
|
if (g->support_pmu && g->elpg_enabled) {
|
||||||
if (nvgpu_pmu_disable_elpg(g)) {
|
if (nvgpu_pmu_disable_elpg(g) != 0) {
|
||||||
nvgpu_err(g, "failed to set disable elpg");
|
nvgpu_err(g, "failed to set disable elpg");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1136,7 +1136,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
|||||||
mmfault->faulted_engine);
|
mmfault->faulted_engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tsg) {
|
if (tsg != NULL) {
|
||||||
gk20a_disable_tsg(tsg);
|
gk20a_disable_tsg(tsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1198,7 +1198,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
|||||||
* taking place so no need to repeat
|
* taking place so no need to repeat
|
||||||
*/
|
*/
|
||||||
if (nvgpu_mutex_tryacquire(
|
if (nvgpu_mutex_tryacquire(
|
||||||
&g->fifo.gr_reset_mutex)) {
|
&g->fifo.gr_reset_mutex) != 0) {
|
||||||
|
|
||||||
gk20a_fifo_reset_engine(g,
|
gk20a_fifo_reset_engine(g,
|
||||||
engine_id);
|
engine_id);
|
||||||
@@ -1212,10 +1212,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_GK20A_CTXSW_TRACE
|
#ifdef CONFIG_GK20A_CTXSW_TRACE
|
||||||
if (tsg)
|
if (tsg != NULL)
|
||||||
gk20a_ctxsw_trace_tsg_reset(g, tsg);
|
gk20a_ctxsw_trace_tsg_reset(g, tsg);
|
||||||
#endif
|
#endif
|
||||||
if (tsg) {
|
if (tsg != NULL) {
|
||||||
if (g->fifo.deferred_reset_pending) {
|
if (g->fifo.deferred_reset_pending) {
|
||||||
gk20a_disable_tsg(tsg);
|
gk20a_disable_tsg(tsg);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -835,10 +835,10 @@ static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc,
|
|||||||
gr_gpc0_mmu_l1tlb_ecc_status_reset_task_f());
|
gr_gpc0_mmu_l1tlb_ecc_status_reset_task_f());
|
||||||
|
|
||||||
/* Handle overflow */
|
/* Handle overflow */
|
||||||
if (corrected_overflow) {
|
if (corrected_overflow != 0U) {
|
||||||
corrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_s());
|
corrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_s());
|
||||||
}
|
}
|
||||||
if (uncorrected_overflow) {
|
if (uncorrected_overflow != 0U) {
|
||||||
uncorrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s());
|
uncorrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1707,13 +1707,13 @@ void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
|
|||||||
cta_preempt_option);
|
cta_preempt_option);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gr_ctx->preempt_ctxsw_buffer.gpu_va) {
|
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
|
||||||
u32 addr;
|
u32 addr;
|
||||||
u32 size;
|
u32 size;
|
||||||
u32 cbes_reserve;
|
u32 cbes_reserve;
|
||||||
|
|
||||||
if (g->ops.gr.set_preemption_buffer_va != NULL) {
|
if (g->ops.gr.set_preemption_buffer_va != NULL) {
|
||||||
if (ctxheader->gpu_va) {
|
if (ctxheader->gpu_va != 0ULL) {
|
||||||
g->ops.gr.set_preemption_buffer_va(g, ctxheader,
|
g->ops.gr.set_preemption_buffer_va(g, ctxheader,
|
||||||
gr_ctx->preempt_ctxsw_buffer.gpu_va);
|
gr_ctx->preempt_ctxsw_buffer.gpu_va);
|
||||||
} else {
|
} else {
|
||||||
@@ -2016,7 +2016,7 @@ int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
|
|||||||
|
|
||||||
static bool gr_activity_empty_or_preempted(u32 val)
|
static bool gr_activity_empty_or_preempted(u32 val)
|
||||||
{
|
{
|
||||||
while (val) {
|
while (val != 0U) {
|
||||||
u32 v = val & 7;
|
u32 v = val & 7;
|
||||||
if (v != gr_activity_4_gpc0_empty_v() &&
|
if (v != gr_activity_4_gpc0_empty_v() &&
|
||||||
v != gr_activity_4_gpc0_preempted_v()) {
|
v != gr_activity_4_gpc0_preempted_v()) {
|
||||||
@@ -2082,7 +2082,7 @@ void gr_gv11b_commit_global_attrib_cb(struct gk20a *g,
|
|||||||
{
|
{
|
||||||
int attrBufferSize;
|
int attrBufferSize;
|
||||||
|
|
||||||
if (gr_ctx->preempt_ctxsw_buffer.gpu_va) {
|
if (gr_ctx->preempt_ctxsw_buffer.gpu_va != 0ULL) {
|
||||||
attrBufferSize = gr_ctx->betacb_ctxsw_buffer.size;
|
attrBufferSize = gr_ctx->betacb_ctxsw_buffer.size;
|
||||||
} else {
|
} else {
|
||||||
attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
|
attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
|
||||||
@@ -2179,7 +2179,7 @@ static int gr_gv11b_handle_warp_esr_error_mmu_nack(struct gk20a *g,
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
fault_ch = gk20a_channel_get(fault_ch);
|
fault_ch = gk20a_channel_get(fault_ch);
|
||||||
if (fault_ch) {
|
if (fault_ch != NULL) {
|
||||||
if (!fault_ch->mmu_nack_handled) {
|
if (!fault_ch->mmu_nack_handled) {
|
||||||
/* recovery is not done for the channel implying mmu
|
/* recovery is not done for the channel implying mmu
|
||||||
* nack interrupt is serviced before mmu fault. Force
|
* nack interrupt is serviced before mmu fault. Force
|
||||||
@@ -2296,7 +2296,7 @@ static int gr_gv11b_handle_all_warp_esr_errors(struct gk20a *g,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fault_ch) {
|
if (fault_ch != NULL) {
|
||||||
tsg = &g->fifo.tsg[fault_ch->tsgid];
|
tsg = &g->fifo.tsg[fault_ch->tsgid];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2317,7 +2317,7 @@ static int gr_gv11b_handle_all_warp_esr_errors(struct gk20a *g,
|
|||||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||||
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
nvgpu_list_for_each_entry(ch_tsg, &tsg->ch_list,
|
||||||
channel_gk20a, ch_entry) {
|
channel_gk20a, ch_entry) {
|
||||||
if (gk20a_channel_get(ch_tsg)) {
|
if (gk20a_channel_get(ch_tsg) != NULL) {
|
||||||
g->ops.fifo.set_error_notifier(ch_tsg,
|
g->ops.fifo.set_error_notifier(ch_tsg,
|
||||||
NVGPU_ERR_NOTIFIER_GR_EXCEPTION);
|
NVGPU_ERR_NOTIFIER_GR_EXCEPTION);
|
||||||
gk20a_channel_put(ch_tsg);
|
gk20a_channel_put(ch_tsg);
|
||||||
@@ -2377,7 +2377,7 @@ int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fault_ch) {
|
if (fault_ch != NULL) {
|
||||||
tsg = tsg_gk20a_from_ch(fault_ch);
|
tsg = tsg_gk20a_from_ch(fault_ch);
|
||||||
if (!tsg) {
|
if (!tsg) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -3120,10 +3120,10 @@ void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
|
|||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
|
||||||
"sm tpc esr sm sel reg val: 0x%x", reg_val);
|
"sm tpc esr sm sel reg val: 0x%x", reg_val);
|
||||||
*esr_sm_sel = 0;
|
*esr_sm_sel = 0;
|
||||||
if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val)) {
|
if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val) != 0U) {
|
||||||
*esr_sm_sel = 1;
|
*esr_sm_sel = 1;
|
||||||
}
|
}
|
||||||
if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val)) {
|
if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val) != 0U) {
|
||||||
*esr_sm_sel |= 1 << 1;
|
*esr_sm_sel |= 1 << 1;
|
||||||
}
|
}
|
||||||
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
|
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
|
||||||
@@ -4386,12 +4386,12 @@ static int gr_gv11b_ecc_scrub_is_done(struct gk20a *g,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nvgpu_timeout_expired(&timeout)) {
|
if (nvgpu_timeout_expired(&timeout) != 0) {
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_udelay(ECC_SCRUBBING_TIMEOUT_DEFAULT);
|
nvgpu_udelay(ECC_SCRUBBING_TIMEOUT_DEFAULT);
|
||||||
} while (1);
|
} while (true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4658,19 +4658,19 @@ void gr_gv11b_ecc_init_scrub_reg(struct gk20a *g)
|
|||||||
|
|
||||||
gr_gv11b_detect_ecc_enabled_units(g);
|
gr_gv11b_detect_ecc_enabled_units(g);
|
||||||
|
|
||||||
if (gr_gv11b_ecc_scrub_sm_lrf(g)) {
|
if (gr_gv11b_ecc_scrub_sm_lrf(g) != 0) {
|
||||||
nvgpu_warn(g, "ECC SCRUB SM LRF Failed");
|
nvgpu_warn(g, "ECC SCRUB SM LRF Failed");
|
||||||
}
|
}
|
||||||
if (gr_gv11b_ecc_scrub_sm_l1_data(g)) {
|
if (gr_gv11b_ecc_scrub_sm_l1_data(g) != 0) {
|
||||||
nvgpu_warn(g, "ECC SCRUB SM L1 DATA Failed");
|
nvgpu_warn(g, "ECC SCRUB SM L1 DATA Failed");
|
||||||
}
|
}
|
||||||
if (gr_gv11b_ecc_scrub_sm_l1_tag(g)) {
|
if (gr_gv11b_ecc_scrub_sm_l1_tag(g) != 0) {
|
||||||
nvgpu_warn(g, "ECC SCRUB SM L1 TAG Failed");
|
nvgpu_warn(g, "ECC SCRUB SM L1 TAG Failed");
|
||||||
}
|
}
|
||||||
if (gr_gv11b_ecc_scrub_sm_cbu(g)) {
|
if (gr_gv11b_ecc_scrub_sm_cbu(g) != 0) {
|
||||||
nvgpu_warn(g, "ECC SCRUB SM CBU Failed");
|
nvgpu_warn(g, "ECC SCRUB SM CBU Failed");
|
||||||
}
|
}
|
||||||
if (gr_gv11b_ecc_scrub_sm_icahe(g)) {
|
if (gr_gv11b_ecc_scrub_sm_icahe(g) != 0) {
|
||||||
nvgpu_warn(g, "ECC SCRUB SM ICACHE Failed");
|
nvgpu_warn(g, "ECC SCRUB SM ICACHE Failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -341,10 +341,10 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0)
|
|||||||
pwr_pmu_falcon_ecc_status_reset_task_f());
|
pwr_pmu_falcon_ecc_status_reset_task_f());
|
||||||
|
|
||||||
/* update counters per slice */
|
/* update counters per slice */
|
||||||
if (corrected_overflow) {
|
if (corrected_overflow != 0U) {
|
||||||
corrected_delta += (0x1UL << pwr_pmu_falcon_ecc_corrected_err_count_total_s());
|
corrected_delta += (0x1UL << pwr_pmu_falcon_ecc_corrected_err_count_total_s());
|
||||||
}
|
}
|
||||||
if (uncorrected_overflow) {
|
if (uncorrected_overflow != 0U) {
|
||||||
uncorrected_delta += (0x1UL << pwr_pmu_falcon_ecc_uncorrected_err_count_total_s());
|
uncorrected_delta += (0x1UL << pwr_pmu_falcon_ecc_uncorrected_err_count_total_s());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ void gv11b_free_subctx_header(struct channel_gk20a *c)
|
|||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_fn, "gv11b_free_subctx_header");
|
nvgpu_log(g, gpu_dbg_fn, "gv11b_free_subctx_header");
|
||||||
|
|
||||||
if (ctxheader->gpu_va) {
|
if (ctxheader->gpu_va != 0ULL) {
|
||||||
nvgpu_gmmu_unmap(c->vm, ctxheader, ctxheader->gpu_va);
|
nvgpu_gmmu_unmap(c->vm, ctxheader, ctxheader->gpu_va);
|
||||||
|
|
||||||
nvgpu_dma_free(g, ctxheader);
|
nvgpu_dma_free(g, ctxheader);
|
||||||
|
|||||||
@@ -303,7 +303,7 @@ static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a)
|
|||||||
seq_printf(seq, fmt "\n", ##arg); \
|
seq_printf(seq, fmt "\n", ##arg); \
|
||||||
else \
|
else \
|
||||||
alloc_dbg(allocator, fmt, ##arg); \
|
alloc_dbg(allocator, fmt, ##arg); \
|
||||||
} while (0)
|
} while (false)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define do_alloc_dbg(a, fmt, arg...) \
|
#define do_alloc_dbg(a, fmt, arg...) \
|
||||||
@@ -323,7 +323,7 @@ static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a)
|
|||||||
do { \
|
do { \
|
||||||
if ((a)->debug) \
|
if ((a)->debug) \
|
||||||
do_alloc_dbg((a), fmt, ##arg); \
|
do_alloc_dbg((a), fmt, ##arg); \
|
||||||
} while (0)
|
} while (false)
|
||||||
#else
|
#else
|
||||||
#define alloc_dbg(a, fmt, arg...) do_alloc_dbg(a, fmt, ##arg)
|
#define alloc_dbg(a, fmt, arg...) do_alloc_dbg(a, fmt, ##arg)
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -321,7 +321,7 @@ do { \
|
|||||||
(pboardobjgrp)->pmu.unitid = PMU_UNIT_##_ENG; \
|
(pboardobjgrp)->pmu.unitid = PMU_UNIT_##_ENG; \
|
||||||
(pboardobjgrp)->pmu.classid = \
|
(pboardobjgrp)->pmu.classid = \
|
||||||
NV_PMU_##_ENG##_BOARDOBJGRP_CLASS_ID_##_CLASS; \
|
NV_PMU_##_ENG##_BOARDOBJGRP_CLASS_ID_##_CLASS; \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
#define BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, eng, ENG, \
|
#define BOARDOBJGRP_PMU_CMD_GRP_SET_CONSTRUCT(g, pboardobjgrp, eng, ENG, \
|
||||||
class, CLASS) \
|
class, CLASS) \
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ struct nvgpu_clk_session;
|
|||||||
do { \
|
do { \
|
||||||
nvgpu_log(g, gpu_dbg_clk_arb, \
|
nvgpu_log(g, gpu_dbg_clk_arb, \
|
||||||
fmt, ##args); \
|
fmt, ##args); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
struct nvgpu_clk_notification {
|
struct nvgpu_clk_notification {
|
||||||
u32 notification;
|
u32 notification;
|
||||||
|
|||||||
@@ -266,6 +266,6 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte);
|
|||||||
} else { \
|
} else { \
|
||||||
nvgpu_log(g, gpu_dbg_pte, fmt, ##args); \
|
nvgpu_log(g, gpu_dbg_pte, fmt, ##args); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
#endif /* NVGPU_GMMU_H */
|
#endif /* NVGPU_GMMU_H */
|
||||||
|
|||||||
@@ -172,7 +172,7 @@ extern u64 nvgpu_dbg_mask;
|
|||||||
if (((log_mask) & nvgpu_dbg_mask) != 0) \
|
if (((log_mask) & nvgpu_dbg_mask) != 0) \
|
||||||
__nvgpu_log_msg(NULL, __func__, __LINE__, \
|
__nvgpu_log_msg(NULL, __func__, __LINE__, \
|
||||||
NVGPU_DEBUG, fmt "\n", ##arg); \
|
NVGPU_DEBUG, fmt "\n", ##arg); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some convenience macros.
|
* Some convenience macros.
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ enum {
|
|||||||
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
||||||
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
||||||
(_size), NULL, NULL, false); \
|
(_size), NULL, NULL, false); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
/* RPC blocking call to copy back data from PMU to _prpc */
|
/* RPC blocking call to copy back data from PMU to _prpc */
|
||||||
#define PMU_RPC_EXECUTE_CPB(_stat, _pmu, _unit, _func, _prpc, _size)\
|
#define PMU_RPC_EXECUTE_CPB(_stat, _pmu, _unit, _func, _prpc, _size)\
|
||||||
@@ -170,7 +170,7 @@ enum {
|
|||||||
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
||||||
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
||||||
(_size), NULL, NULL, true); \
|
(_size), NULL, NULL, true); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
/* RPC non-blocking with call_back handler option */
|
/* RPC non-blocking with call_back handler option */
|
||||||
#define PMU_RPC_EXECUTE_CB(_stat, _pmu, _unit, _func, _prpc, _size, _cb, _cbp)\
|
#define PMU_RPC_EXECUTE_CB(_stat, _pmu, _unit, _func, _prpc, _size, _cb, _cbp)\
|
||||||
@@ -184,7 +184,7 @@ enum {
|
|||||||
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
|
||||||
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
|
||||||
(_size), _cb, _cbp, false); \
|
(_size), _cb, _cbp, false); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
typedef void (*pmu_callback)(struct gk20a *g, struct pmu_msg *msg, void *param,
|
typedef void (*pmu_callback)(struct gk20a *g, struct pmu_msg *msg, void *param,
|
||||||
u32 handle, u32 status);
|
u32 handle, u32 status);
|
||||||
|
|||||||
@@ -192,7 +192,7 @@ static inline unsigned long __hweight64(uint64_t x)
|
|||||||
|
|
||||||
#define __packed __attribute__((packed))
|
#define __packed __attribute__((packed))
|
||||||
|
|
||||||
#define IS_ENABLED(config) 0
|
#define IS_ENABLED(config) false
|
||||||
|
|
||||||
#define MAX_ERRNO 4095
|
#define MAX_ERRNO 4095
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -28,7 +28,7 @@ struct seq_file;
|
|||||||
seq_printf(s, fmt, ##msg); \
|
seq_printf(s, fmt, ##msg); \
|
||||||
else \
|
else \
|
||||||
pr_info(fmt, ##msg); \
|
pr_info(fmt, ##msg); \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
#define MAX_STACK_TRACE 20
|
#define MAX_STACK_TRACE 20
|
||||||
|
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ static char *nvgpu_dma_flags_to_str(struct gk20a *g, unsigned long flags)
|
|||||||
strncat(buf, str_flag, bytes_available); \
|
strncat(buf, str_flag, bytes_available); \
|
||||||
bytes_available -= strlen(str_flag); \
|
bytes_available -= strlen(str_flag); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (false)
|
||||||
|
|
||||||
APPEND_FLAG(NVGPU_DMA_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING ");
|
APPEND_FLAG(NVGPU_DMA_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING ");
|
||||||
APPEND_FLAG(NVGPU_DMA_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS ");
|
APPEND_FLAG(NVGPU_DMA_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS ");
|
||||||
|
|||||||
Reference in New Issue
Block a user