gpu: nvgpu: remove duplicate \n from log messages

nvgpu_log/info/warn/err() internally add a \n to the end of the message.
Hence, callers should not include a \n at the end of the message. Doing
so results in duplicate \n being printed, which ends up creating empty
log messages. Remove the duplicate \n from all err/warn messages.

Bug 1928311

Change-Id: I99362c5327f36146f28ba63d4e68181589735c39
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-on: http://git-master/r/1487232
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Stephen Warren
2017-05-22 12:27:40 -06:00
committed by mobile promotions
parent 726900b843
commit 2e338c77ea
25 changed files with 123 additions and 123 deletions

View File

@@ -203,7 +203,7 @@ static int gk20a_init_error_notifier(struct channel_gk20a *ch,
if (end > dmabuf->size || end < sizeof(struct nvgpu_notification)) { if (end > dmabuf->size || end < sizeof(struct nvgpu_notification)) {
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvgpu_err(ch->g, "gk20a_init_error_notifier: invalid offset\n"); nvgpu_err(ch->g, "gk20a_init_error_notifier: invalid offset");
return -EINVAL; return -EINVAL;
} }
@@ -462,7 +462,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
if (end > dmabuf->size || end < sizeof(struct notification)) { if (end > dmabuf->size || end < sizeof(struct notification)) {
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
nvgpu_err(g, "invalid notifier offset\n"); nvgpu_err(g, "invalid notifier offset");
return -EINVAL; return -EINVAL;
} }

View File

@@ -357,7 +357,7 @@ static int nvgpu_gpu_ioctl_set_mmu_debug_mode(
struct nvgpu_gpu_mmu_debug_mode_args *args) struct nvgpu_gpu_mmu_debug_mode_args *args)
{ {
if (gk20a_busy(g)) { if (gk20a_busy(g)) {
nvgpu_err(g, "failed to power on gpu\n"); nvgpu_err(g, "failed to power on gpu");
return -EINVAL; return -EINVAL;
} }
@@ -559,7 +559,7 @@ static inline int get_timestamps_zipper(struct gk20a *g,
unsigned int i = 0; unsigned int i = 0;
if (gk20a_busy(g)) { if (gk20a_busy(g)) {
nvgpu_err(g, "GPU not powered on\n"); nvgpu_err(g, "GPU not powered on");
err = -EINVAL; err = -EINVAL;
goto end; goto end;
} }
@@ -598,7 +598,7 @@ static int nvgpu_gpu_get_cpu_time_correlation_info(
get_cpu_timestamp = get_cpu_timestamp_timeofday; get_cpu_timestamp = get_cpu_timestamp_timeofday;
break; break;
default: default:
nvgpu_err(g, "invalid cpu clock source id\n"); nvgpu_err(g, "invalid cpu clock source id");
return -EINVAL; return -EINVAL;
} }
@@ -663,7 +663,7 @@ static int nvgpu_gpu_get_engine_info(
break; break;
default: default:
nvgpu_err(g, "Unmapped engine enum %u\n", nvgpu_err(g, "Unmapped engine enum %u",
engine_enum); engine_enum);
continue; continue;
} }

View File

@@ -819,7 +819,7 @@ void nvgpu_kmem_fini(struct gk20a *g, int flags)
if (flags & NVGPU_KMEM_FINI_WARN) { if (flags & NVGPU_KMEM_FINI_WARN) {
WARN(1, "Letting %d allocs leak!!\n", count); WARN(1, "Letting %d allocs leak!!\n", count);
} else if (flags & NVGPU_KMEM_FINI_BUG) { } else if (flags & NVGPU_KMEM_FINI_BUG) {
nvgpu_err(g, "Letting %d allocs leak!!\n", count); nvgpu_err(g, "Letting %d allocs leak!!", count);
BUG(); BUG();
} }
} }

View File

@@ -399,7 +399,7 @@ clean_up:
gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt); gk20a_mm_unpin(g->dev, dmabuf, bfr.sgt);
nvgpu_mutex_release(&vm->update_gmmu_lock); nvgpu_mutex_release(&vm->update_gmmu_lock);
nvgpu_log_info(g, "err=%d\n", err); nvgpu_log_info(g, "err=%d", err);
return 0; return 0;
} }

View File

@@ -315,7 +315,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
} while (!nvgpu_timeout_expired(&timeout)); } while (!nvgpu_timeout_expired(&timeout));
if (!channel_idle) { if (!channel_idle) {
nvgpu_err(ch->g, "jobs not freed for channel %d\n", nvgpu_err(ch->g, "jobs not freed for channel %d",
ch->hw_chid); ch->hw_chid);
return -EBUSY; return -EBUSY;
} }
@@ -336,7 +336,7 @@ int gk20a_channel_set_runlist_interleave(struct channel_gk20a *ch,
int ret; int ret;
if (gk20a_is_channel_marked_as_tsg(ch)) { if (gk20a_is_channel_marked_as_tsg(ch)) {
nvgpu_err(g, "invalid operation for TSG!\n"); nvgpu_err(g, "invalid operation for TSG!");
return -EINVAL; return -EINVAL;
} }
@@ -916,7 +916,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem); err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem);
if (err) { if (err) {
nvgpu_err(g, "%s: memory allocation failed\n", __func__); nvgpu_err(g, "%s: memory allocation failed", __func__);
goto clean_up; goto clean_up;
} }
@@ -1032,7 +1032,7 @@ static int channel_gk20a_alloc_job(struct channel_gk20a *c,
*job_out = &c->joblist.pre_alloc.jobs[put]; *job_out = &c->joblist.pre_alloc.jobs[put];
else { else {
nvgpu_warn(c->g, nvgpu_warn(c->g,
"out of job ringbuffer space\n"); "out of job ringbuffer space");
err = -EAGAIN; err = -EAGAIN;
} }
} else { } else {
@@ -1261,7 +1261,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
gpfifo_size * sizeof(struct nvgpu_gpfifo), gpfifo_size * sizeof(struct nvgpu_gpfifo),
&c->gpfifo.mem); &c->gpfifo.mem);
if (err) { if (err) {
nvgpu_err(g, "%s: memory allocation failed\n", __func__); nvgpu_err(g, "%s: memory allocation failed", __func__);
goto clean_up; goto clean_up;
} }
@@ -1906,7 +1906,7 @@ int gk20a_free_priv_cmdbuf(struct channel_gk20a *c, struct priv_cmd_entry *e)
/* read the entry's valid flag before reading its contents */ /* read the entry's valid flag before reading its contents */
rmb(); rmb();
if ((q->get != e->off) && e->off != 0) if ((q->get != e->off) && e->off != 0)
nvgpu_err(g, "requests out-of-order, ch=%d\n", nvgpu_err(g, "requests out-of-order, ch=%d",
c->hw_chid); c->hw_chid);
q->get = e->off + e->size; q->get = e->off + e->size;
} }

View File

@@ -503,7 +503,7 @@ static void gk20a_channel_semaphore_launcher(
fence, fence->name); fence, fence->name);
err = sync_fence_wait(fence, -1); err = sync_fence_wait(fence, -1);
if (err < 0) if (err < 0)
nvgpu_err(g, "error waiting pre-fence: %d\n", err); nvgpu_err(g, "error waiting pre-fence: %d", err);
gk20a_dbg_info( gk20a_dbg_info(
"wait completed (%d) for fence %p '%s', triggering gpu work", "wait completed (%d) for fence %p '%s', triggering gpu work",

View File

@@ -241,7 +241,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!ch) { if (!ch) {
nvgpu_err(dbg_s->g, nvgpu_err(dbg_s->g,
"no channel bound to dbg session\n"); "no channel bound to dbg session");
return -EINVAL; return -EINVAL;
} }
@@ -759,7 +759,7 @@ static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
write_size); write_size);
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
if (err) { if (err) {
nvgpu_err(g, "copy_to_user failed!\n"); nvgpu_err(g, "copy_to_user failed!");
return err; return err;
} }
@@ -1197,7 +1197,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
/* be sure that ctx info is in place */ /* be sure that ctx info is in place */
if (!g->is_virtual && if (!g->is_virtual &&
!gr_context_info_available(dbg_s, &g->gr)) { !gr_context_info_available(dbg_s, &g->gr)) {
nvgpu_err(g, "gr context data not available\n"); nvgpu_err(g, "gr context data not available");
return -ENODEV; return -ENODEV;
} }
@@ -1414,7 +1414,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!ch_gk20a) { if (!ch_gk20a) {
nvgpu_err(g, nvgpu_err(g,
"no bound channel for smpc ctxsw mode update\n"); "no bound channel for smpc ctxsw mode update");
err = -EINVAL; err = -EINVAL;
goto clean_up; goto clean_up;
} }
@@ -1423,7 +1423,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
if (err) { if (err) {
nvgpu_err(g, nvgpu_err(g,
"error (%d) during smpc ctxsw mode update\n", err); "error (%d) during smpc ctxsw mode update", err);
goto clean_up; goto clean_up;
} }
@@ -1466,7 +1466,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
if (!ch_gk20a) { if (!ch_gk20a) {
nvgpu_err(g, nvgpu_err(g,
"no bound channel for pm ctxsw mode update\n"); "no bound channel for pm ctxsw mode update");
err = -EINVAL; err = -EINVAL;
goto clean_up; goto clean_up;
} }
@@ -1475,7 +1475,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW); args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW);
if (err) if (err)
nvgpu_err(g, nvgpu_err(g,
"error (%d) during pm ctxsw mode update\n", err); "error (%d) during pm ctxsw mode update", err);
/* gk20a would require a WAR to set the core PM_ENABLE bit, not /* gk20a would require a WAR to set the core PM_ENABLE bit, not
* added here with gk20a being deprecated * added here with gk20a being deprecated
@@ -1528,7 +1528,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
err = gr_gk20a_enable_ctxsw(g); err = gr_gk20a_enable_ctxsw(g);
if (err) if (err)
nvgpu_err(g, "unable to restart ctxsw!\n"); nvgpu_err(g, "unable to restart ctxsw!");
clean_up: clean_up:
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);

View File

@@ -152,7 +152,7 @@ u32 gk20a_fifo_get_gr_engine_id(struct gk20a *g)
1, ENGINE_GR_GK20A); 1, ENGINE_GR_GK20A);
if (!gr_engine_cnt) { if (!gr_engine_cnt) {
nvgpu_err(g, "No GR engine available on this device!\n"); nvgpu_err(g, "No GR engine available on this device!");
} }
return gr_engine_id; return gr_engine_id;
@@ -693,7 +693,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
int err = nvgpu_dma_alloc_sys(g, runlist_size, int err = nvgpu_dma_alloc_sys(g, runlist_size,
&runlist->mem[i]); &runlist->mem[i]);
if (err) { if (err) {
nvgpu_err(g, "memory allocation failed\n"); nvgpu_err(g, "memory allocation failed");
goto clean_up_runlist; goto clean_up_runlist;
} }
} }
@@ -947,7 +947,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * err = nvgpu_dma_alloc_sys(g, f->userd_entry_size *
f->num_channels, &f->userd); f->num_channels, &f->userd);
if (err) { if (err) {
nvgpu_err(g, "userd memory allocation failed\n"); nvgpu_err(g, "userd memory allocation failed");
goto clean_up; goto clean_up;
} }
gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va); gk20a_dbg(gpu_dbg_map, "userd gpu va = 0x%llx", f->userd.gpu_va);
@@ -1001,7 +1001,7 @@ void gk20a_fifo_handle_runlist_event(struct gk20a *g)
{ {
u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r()); u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
gk20a_dbg(gpu_dbg_intr, "runlist event %08x\n", gk20a_dbg(gpu_dbg_intr, "runlist event %08x",
runlist_event); runlist_event);
gk20a_writel(g, fifo_intr_runlist_r(), runlist_event); gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
@@ -1259,7 +1259,7 @@ static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
u32 intr; u32 intr;
intr = gk20a_readl(g, fifo_intr_chsw_error_r()); intr = gk20a_readl(g, fifo_intr_chsw_error_r());
nvgpu_err(g, "chsw: %08x\n", intr); nvgpu_err(g, "chsw: %08x", intr);
gk20a_fecs_dump_falcon_stats(g); gk20a_fecs_dump_falcon_stats(g);
gk20a_writel(g, fifo_intr_chsw_error_r(), intr); gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
} }
@@ -1545,7 +1545,7 @@ static bool gk20a_fifo_handle_mmu_fault(
nvgpu_err(g, "%s mmu fault on engine %d, " nvgpu_err(g, "%s mmu fault on engine %d, "
"engine subid %d (%s), client %d (%s), " "engine subid %d (%s), client %d (%s), "
"addr 0x%08x:0x%08x, type %d (%s), info 0x%08x," "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
"inst_ptr 0x%llx\n", "inst_ptr 0x%llx",
fake_fault ? "fake" : "", fake_fault ? "fake" : "",
engine_id, engine_id,
f.engine_subid_v, f.engine_subid_desc, f.engine_subid_v, f.engine_subid_desc,
@@ -2136,7 +2136,7 @@ bool gk20a_fifo_handle_sched_error(struct gk20a *g)
/* could not find the engine - should never happen */ /* could not find the engine - should never happen */
if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine\n", nvgpu_err(g, "fifo sched error : 0x%08x, failed to find engine",
sched_error); sched_error);
ret = false; ret = false;
goto err; goto err;
@@ -2193,7 +2193,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
if (fifo_intr & fifo_intr_0_pio_error_pending_f()) { if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
/* pio mode is unused. this shouldn't happen, ever. */ /* pio mode is unused. this shouldn't happen, ever. */
/* should we clear it or just leave it pending? */ /* should we clear it or just leave it pending? */
nvgpu_err(g, "fifo pio error!\n"); nvgpu_err(g, "fifo pio error!");
BUG_ON(1); BUG_ON(1);
} }
@@ -2547,7 +2547,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
struct channel_gk20a *ch = NULL; struct channel_gk20a *ch = NULL;
nvgpu_err(g, nvgpu_err(g,
"preempt TSG %d timeout\n", id); "preempt TSG %d timeout", id);
down_read(&tsg->ch_list_lock); down_read(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) { list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
@@ -2563,7 +2563,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
struct channel_gk20a *ch = &g->fifo.channel[id]; struct channel_gk20a *ch = &g->fifo.channel[id];
nvgpu_err(g, nvgpu_err(g,
"preempt channel %d timeout\n", id); "preempt channel %d timeout", id);
if (gk20a_channel_get(ch)) { if (gk20a_channel_get(ch)) {
gk20a_set_error_notifier(ch, gk20a_set_error_notifier(ch,
@@ -2746,7 +2746,7 @@ int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
&g->fifo.engine_info[active_engine_id]); &g->fifo.engine_info[active_engine_id]);
if (err) { if (err) {
nvgpu_err(g, nvgpu_err(g,
"failed to enable engine %d activity\n", active_engine_id); "failed to enable engine %d activity", active_engine_id);
ret = err; ret = err;
} }
} }
@@ -2819,7 +2819,7 @@ clean_up:
gk20a_dbg_fn("failed"); gk20a_dbg_fn("failed");
if (gk20a_fifo_enable_engine_activity(g, eng_info)) if (gk20a_fifo_enable_engine_activity(g, eng_info))
nvgpu_err(g, nvgpu_err(g,
"failed to enable gr engine activity\n"); "failed to enable gr engine activity");
} else { } else {
gk20a_dbg_fn("done"); gk20a_dbg_fn("done");
} }
@@ -2839,7 +2839,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
&g->fifo.engine_info[active_engine_id], &g->fifo.engine_info[active_engine_id],
wait_for_idle); wait_for_idle);
if (err) { if (err) {
nvgpu_err(g, "failed to disable engine %d activity\n", nvgpu_err(g, "failed to disable engine %d activity",
active_engine_id); active_engine_id);
ret = err; ret = err;
break; break;
@@ -2853,7 +2853,7 @@ int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
&g->fifo.engine_info[active_engine_id]); &g->fifo.engine_info[active_engine_id]);
if (err) if (err)
nvgpu_err(g, nvgpu_err(g,
"failed to re-enable engine %d activity\n", "failed to re-enable engine %d activity",
active_engine_id); active_engine_id);
} }
} }
@@ -4108,7 +4108,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
if (gk20a_is_channel_marked_as_tsg(ch)) { if (gk20a_is_channel_marked_as_tsg(ch)) {
nvgpu_err(g, "invalid operation for TSG!\n"); nvgpu_err(g, "invalid operation for TSG!");
return -EINVAL; return -EINVAL;
} }
@@ -4127,7 +4127,7 @@ int gk20a_fifo_set_timeslice(struct channel_gk20a *ch, u32 timeslice)
int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority) int gk20a_fifo_set_priority(struct channel_gk20a *ch, u32 priority)
{ {
if (gk20a_is_channel_marked_as_tsg(ch)) { if (gk20a_is_channel_marked_as_tsg(ch)) {
nvgpu_err(ch->g, "invalid operation for TSG!\n"); nvgpu_err(ch->g, "invalid operation for TSG!");
return -EINVAL; return -EINVAL;
} }

View File

@@ -346,7 +346,7 @@ int gk20a_finalize_poweron(struct gk20a *g)
speed = 1 << (fls(speed) - 1); speed = 1 << (fls(speed) - 1);
err = g->ops.xve.set_speed(g, speed); err = g->ops.xve.set_speed(g, speed);
if (err) { if (err) {
nvgpu_err(g, "Failed to set PCIe bus speed!\n"); nvgpu_err(g, "Failed to set PCIe bus speed!");
goto done; goto done;
} }
} }

View File

@@ -784,13 +784,13 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
ret = gk20a_disable_channel_tsg(g, c); ret = gk20a_disable_channel_tsg(g, c);
if (ret) { if (ret) {
nvgpu_err(g, "failed to disable channel/TSG\n"); nvgpu_err(g, "failed to disable channel/TSG");
goto clean_up; goto clean_up;
} }
ret = gk20a_fifo_preempt(g, c); ret = gk20a_fifo_preempt(g, c);
if (ret) { if (ret) {
gk20a_enable_channel_tsg(g, c); gk20a_enable_channel_tsg(g, c);
nvgpu_err(g, "failed to preempt channel/TSG\n"); nvgpu_err(g, "failed to preempt channel/TSG");
goto clean_up; goto clean_up;
} }
@@ -1857,13 +1857,13 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
ret = gk20a_disable_channel_tsg(g, c); ret = gk20a_disable_channel_tsg(g, c);
if (ret) { if (ret) {
nvgpu_err(g, "failed to disable channel/TSG\n"); nvgpu_err(g, "failed to disable channel/TSG");
goto out; goto out;
} }
ret = gk20a_fifo_preempt(g, c); ret = gk20a_fifo_preempt(g, c);
if (ret) { if (ret) {
gk20a_enable_channel_tsg(g, c); gk20a_enable_channel_tsg(g, c);
nvgpu_err(g, "failed to preempt channel/TSG\n"); nvgpu_err(g, "failed to preempt channel/TSG");
goto out; goto out;
} }
@@ -1925,14 +1925,14 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
ret = gk20a_disable_channel_tsg(g, c); ret = gk20a_disable_channel_tsg(g, c);
if (ret) { if (ret) {
nvgpu_err(g, "failed to disable channel/TSG\n"); nvgpu_err(g, "failed to disable channel/TSG");
return ret; return ret;
} }
ret = gk20a_fifo_preempt(g, c); ret = gk20a_fifo_preempt(g, c);
if (ret) { if (ret) {
gk20a_enable_channel_tsg(g, c); gk20a_enable_channel_tsg(g, c);
nvgpu_err(g, "failed to preempt channel/TSG\n"); nvgpu_err(g, "failed to preempt channel/TSG");
return ret; return ret;
} }
@@ -2213,7 +2213,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
false, false,
ucode_info->surface_desc.aperture); ucode_info->surface_desc.aperture);
if (!ucode_info->surface_desc.gpu_va) { if (!ucode_info->surface_desc.gpu_va) {
nvgpu_err(g, "failed to update gmmu ptes\n"); nvgpu_err(g, "failed to update gmmu ptes");
return -ENOMEM; return -ENOMEM;
} }
@@ -2977,7 +2977,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g,
int err; int err;
if (!tsg->vm) { if (!tsg->vm) {
nvgpu_err(tsg->g, "No address space bound\n"); nvgpu_err(tsg->g, "No address space bound");
return -ENOMEM; return -ENOMEM;
} }
@@ -3017,7 +3017,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg)
{ {
if (!tsg->vm) { if (!tsg->vm) {
nvgpu_err(tsg->g, "No address space bound\n"); nvgpu_err(tsg->g, "No address space bound");
return; return;
} }
tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx); tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx);
@@ -3942,7 +3942,7 @@ static void gr_gk20a_detect_sm_arch(struct gk20a *g)
if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v()) if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v())
version = 0x320; /* SM 3.2 */ version = 0x320; /* SM 3.2 */
else else
nvgpu_err(g, "Unknown SM version 0x%x\n", nvgpu_err(g, "Unknown SM version 0x%x",
raw_version); raw_version);
/* on Kepler, SM version == SPA version */ /* on Kepler, SM version == SPA version */
@@ -4056,7 +4056,7 @@ clean_up:
ret = gk20a_fifo_enable_engine_activity(g, gr_info); ret = gk20a_fifo_enable_engine_activity(g, gr_info);
if (ret) { if (ret) {
nvgpu_err(g, nvgpu_err(g,
"failed to enable gr engine activity\n"); "failed to enable gr engine activity");
} }
} }
@@ -4181,7 +4181,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
case GK20A_ZBC_TYPE_COLOR: case GK20A_ZBC_TYPE_COLOR:
if (index >= GK20A_ZBC_TABLE_SIZE) { if (index >= GK20A_ZBC_TABLE_SIZE) {
nvgpu_err(g, nvgpu_err(g,
"invalid zbc color table index\n"); "invalid zbc color table index");
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
@@ -4196,7 +4196,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
case GK20A_ZBC_TYPE_DEPTH: case GK20A_ZBC_TYPE_DEPTH:
if (index >= GK20A_ZBC_TABLE_SIZE) { if (index >= GK20A_ZBC_TABLE_SIZE) {
nvgpu_err(g, nvgpu_err(g,
"invalid zbc depth table index\n"); "invalid zbc depth table index");
return -EINVAL; return -EINVAL;
} }
query_params->depth = gr->zbc_dep_tbl[index].depth; query_params->depth = gr->zbc_dep_tbl[index].depth;
@@ -4209,13 +4209,13 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
query_params); query_params);
} else { } else {
nvgpu_err(g, nvgpu_err(g,
"invalid zbc table type\n"); "invalid zbc table type");
return -EINVAL; return -EINVAL;
} }
break; break;
default: default:
nvgpu_err(g, nvgpu_err(g,
"invalid zbc table type\n"); "invalid zbc table type");
return -EINVAL; return -EINVAL;
} }
@@ -4305,7 +4305,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
gr->max_default_color_index = 3; gr->max_default_color_index = 3;
else { else {
nvgpu_err(g, nvgpu_err(g,
"fail to load default zbc color table\n"); "fail to load default zbc color table");
return err; return err;
} }
@@ -4324,7 +4324,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
gr->max_default_depth_index = 2; gr->max_default_depth_index = 2;
else { else {
nvgpu_err(g, nvgpu_err(g,
"fail to load default zbc depth table\n"); "fail to load default zbc depth table");
return err; return err;
} }
@@ -5212,7 +5212,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
if (!pmu->pg_buf.cpu_va) { if (!pmu->pg_buf.cpu_va) {
err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf);
if (err) { if (err) {
nvgpu_err(g, "failed to allocate memory\n"); nvgpu_err(g, "failed to allocate memory");
return -ENOMEM; return -ENOMEM;
} }
} }
@@ -5589,7 +5589,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
gk20a_gr_set_error_notifier(g, isr_data, gk20a_gr_set_error_notifier(g, isr_data,
NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT); NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT);
nvgpu_err(g, nvgpu_err(g,
"gr semaphore timeout\n"); "gr semaphore timeout");
return -EINVAL; return -EINVAL;
} }
@@ -5601,7 +5601,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
/* This is an unrecoverable error, reset is needed */ /* This is an unrecoverable error, reset is needed */
nvgpu_err(g, nvgpu_err(g,
"gr semaphore timeout\n"); "gr semaphore timeout");
return -EINVAL; return -EINVAL;
} }
@@ -5615,7 +5615,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
gk20a_gr_set_error_notifier(g, isr_data, gk20a_gr_set_error_notifier(g, isr_data,
NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
nvgpu_err(g, "invalid method class 0x%08x" nvgpu_err(g, "invalid method class 0x%08x"
", offset 0x%08x address 0x%08x\n", ", offset 0x%08x address 0x%08x",
isr_data->class_num, isr_data->offset, isr_data->addr); isr_data->class_num, isr_data->offset, isr_data->addr);
} }
return ret; return ret;
@@ -5675,7 +5675,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
nvgpu_err(g, nvgpu_err(g,
"class error 0x%08x, offset 0x%08x," "class error 0x%08x, offset 0x%08x,"
" unhandled intr 0x%08x for channel %u\n", " unhandled intr 0x%08x for channel %u",
isr_data->class_num, isr_data->offset, isr_data->class_num, isr_data->offset,
gr_class_error, isr_data->chid); gr_class_error, isr_data->chid);
@@ -5690,7 +5690,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
gk20a_gr_set_error_notifier(g, isr_data, gk20a_gr_set_error_notifier(g, isr_data,
NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
nvgpu_err(g, nvgpu_err(g,
"firmware method 0x%08x, offset 0x%08x for channel %u\n", "firmware method 0x%08x, offset 0x%08x for channel %u",
isr_data->class_num, isr_data->offset, isr_data->class_num, isr_data->offset,
isr_data->chid); isr_data->chid);
return -EINVAL; return -EINVAL;
@@ -5768,7 +5768,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
if (offset + sizeof(struct share_buffer_head) > buffer_size || if (offset + sizeof(struct share_buffer_head) > buffer_size ||
offset + sizeof(struct share_buffer_head) < offset) { offset + sizeof(struct share_buffer_head) < offset) {
nvgpu_err(g, nvgpu_err(g,
"cyclestats buffer overrun at offset 0x%x\n", "cyclestats buffer overrun at offset 0x%x",
offset); offset);
break; break;
} }
@@ -5786,7 +5786,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
offset + sh_hdr->size > buffer_size || offset + sh_hdr->size > buffer_size ||
offset + sh_hdr->size < offset) { offset + sh_hdr->size < offset) {
nvgpu_err(g, nvgpu_err(g,
"bad cyclestate buffer header size at offset 0x%x\n", "bad cyclestate buffer header size at offset 0x%x",
offset); offset);
sh_hdr->failed = true; sh_hdr->failed = true;
break; break;
@@ -5810,7 +5810,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
if (!valid) { if (!valid) {
nvgpu_err(g, nvgpu_err(g,
"invalid cycletstats op offset: 0x%x\n", "invalid cycletstats op offset: 0x%x",
op_elem->offset_bar0); op_elem->offset_bar0);
sh_hdr->failed = exit = true; sh_hdr->failed = exit = true;
@@ -6065,7 +6065,7 @@ static int gk20a_gr_update_sm_error_state(struct gk20a *g,
err = gr_gk20a_disable_ctxsw(g); err = gr_gk20a_disable_ctxsw(g);
if (err) { if (err) {
nvgpu_err(g, "unable to stop gr ctxsw\n"); nvgpu_err(g, "unable to stop gr ctxsw");
goto fail; goto fail;
} }
@@ -6125,7 +6125,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g,
err = gr_gk20a_disable_ctxsw(g); err = gr_gk20a_disable_ctxsw(g);
if (err) { if (err) {
nvgpu_err(g, "unable to stop gr ctxsw\n"); nvgpu_err(g, "unable to stop gr ctxsw");
goto fail; goto fail;
} }
@@ -6179,7 +6179,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr); warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr);
if (!sm_debugger_attached) { if (!sm_debugger_attached) {
nvgpu_err(g, "sm hww global %08x warp %08x\n", nvgpu_err(g, "sm hww global %08x warp %08x",
global_esr, warp_esr); global_esr, warp_esr);
return -EFAULT; return -EFAULT;
} }
@@ -6199,7 +6199,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
&early_exit, &early_exit,
&ignore_debugger); &ignore_debugger);
if (ret) { if (ret) {
nvgpu_err(g, "could not pre-process sm error!\n"); nvgpu_err(g, "could not pre-process sm error!");
return ret; return ret;
} }
} }
@@ -6241,7 +6241,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
if (do_warp_sync) { if (do_warp_sync) {
ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true); ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true);
if (ret) { if (ret) {
nvgpu_err(g, "sm did not lock down!\n"); nvgpu_err(g, "sm did not lock down!");
return ret; return ret;
} }
} }
@@ -7357,7 +7357,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o()); num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o());
if (gpc_num >= num_gpcs) { if (gpc_num >= num_gpcs) {
nvgpu_err(g, nvgpu_err(g,
"GPC 0x%08x is greater than total count 0x%08x!\n", "GPC 0x%08x is greater than total count 0x%08x!",
gpc_num, num_gpcs); gpc_num, num_gpcs);
return -EINVAL; return -EINVAL;
} }
@@ -7378,7 +7378,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
context += ctxsw_prog_ucode_header_size_in_bytes(); context += ctxsw_prog_ucode_header_size_in_bytes();
if (!check_local_header_magic(context)) { if (!check_local_header_magic(context)) {
nvgpu_err(g, nvgpu_err(g,
"Invalid local header: magic value\n"); "Invalid local header: magic value");
return -EINVAL; return -EINVAL;
} }
@@ -7409,7 +7409,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
if (chk_addr != addr) { if (chk_addr != addr) {
nvgpu_err(g, nvgpu_err(g,
"Oops addr miss-match! : 0x%08x != 0x%08x\n", "Oops addr miss-match! : 0x%08x != 0x%08x",
addr, chk_addr); addr, chk_addr);
return -EINVAL; return -EINVAL;
} }
@@ -7440,7 +7440,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
if (chk_addr != addr) { if (chk_addr != addr) {
nvgpu_err(g, nvgpu_err(g,
"Oops addr miss-match! : 0x%08x != 0x%08x\n", "Oops addr miss-match! : 0x%08x != 0x%08x",
addr, chk_addr); addr, chk_addr);
return -EINVAL; return -EINVAL;
@@ -7509,7 +7509,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
* extended buffer? */ * extended buffer? */
if (offset_to_segment > offset_to_segment_end) { if (offset_to_segment > offset_to_segment_end) {
nvgpu_err(g, nvgpu_err(g,
"Overflow ctxsw buffer! 0x%08x > 0x%08x\n", "Overflow ctxsw buffer! 0x%08x > 0x%08x",
offset_to_segment, offset_to_segment_end); offset_to_segment, offset_to_segment_end);
return -EINVAL; return -EINVAL;
} }
@@ -7710,7 +7710,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
context += ctxsw_prog_ucode_header_size_in_bytes(); context += ctxsw_prog_ucode_header_size_in_bytes();
if (!check_local_header_magic(context)) { if (!check_local_header_magic(context)) {
nvgpu_err(g, nvgpu_err(g,
"Invalid FECS local header: magic value\n"); "Invalid FECS local header: magic value");
return -EINVAL; return -EINVAL;
} }
data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o());
@@ -7745,7 +7745,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
if ((gpc_num + 1) > num_gpcs) { if ((gpc_num + 1) > num_gpcs) {
nvgpu_err(g, nvgpu_err(g,
"GPC %d not in this context buffer.\n", "GPC %d not in this context buffer.",
gpc_num); gpc_num);
return -EINVAL; return -EINVAL;
} }
@@ -7755,7 +7755,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
context += ctxsw_prog_ucode_header_size_in_bytes(); context += ctxsw_prog_ucode_header_size_in_bytes();
if (!check_local_header_magic(context)) { if (!check_local_header_magic(context)) {
nvgpu_err(g, nvgpu_err(g,
"Invalid GPCCS local header: magic value\n"); "Invalid GPCCS local header: magic value");
return -EINVAL; return -EINVAL;
} }
@@ -7772,7 +7772,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) { if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) {
nvgpu_err(g, nvgpu_err(g,
"GPC %d TPC %d not in this context buffer.\n", "GPC %d TPC %d not in this context buffer.",
gpc_num, tpc_num); gpc_num, tpc_num);
return -EINVAL; return -EINVAL;
} }
@@ -8547,7 +8547,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
tmp_err = gr_gk20a_enable_ctxsw(g); tmp_err = gr_gk20a_enable_ctxsw(g);
if (tmp_err) { if (tmp_err) {
nvgpu_err(g, "unable to restart ctxsw!\n"); nvgpu_err(g, "unable to restart ctxsw!");
err = tmp_err; err = tmp_err;
} }
@@ -8718,7 +8718,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
nvgpu_err(g, nvgpu_err(g,
"GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc); "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc);
nvgpu_err(g, nvgpu_err(g,
"STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx",
gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0, gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0,
warps_valid, warps_paused, warps_trapped); warps_valid, warps_paused, warps_trapped);
@@ -8739,7 +8739,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
/* if an SM debugger isn't attached, skip suspend */ /* if an SM debugger isn't attached, skip suspend */
if (!gk20a_gr_sm_debugger_attached(g)) { if (!gk20a_gr_sm_debugger_attached(g)) {
nvgpu_err(g, nvgpu_err(g,
"SM debugger not attached, skipping suspend!\n"); "SM debugger not attached, skipping suspend!");
return; return;
} }
@@ -8754,7 +8754,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
global_esr_mask, check_errors); global_esr_mask, check_errors);
if (err) { if (err) {
nvgpu_err(g, nvgpu_err(g,
"SuspendSm failed\n"); "SuspendSm failed");
return; return;
} }
} }
@@ -8770,7 +8770,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
/* if an SM debugger isn't attached, skip suspend */ /* if an SM debugger isn't attached, skip suspend */
if (!gk20a_gr_sm_debugger_attached(g)) { if (!gk20a_gr_sm_debugger_attached(g)) {
nvgpu_err(g, nvgpu_err(g,
"SM debugger not attached, skipping suspend!\n"); "SM debugger not attached, skipping suspend!");
return; return;
} }
@@ -8791,7 +8791,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
global_esr_mask, check_errors); global_esr_mask, check_errors);
if (err) { if (err) {
nvgpu_err(g, nvgpu_err(g,
"SuspendAllSms failed\n"); "SuspendAllSms failed");
return; return;
} }
} }
@@ -9099,7 +9099,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0);
if (err) if (err)
nvgpu_err(g, "Failed to access register\n"); nvgpu_err(g, "Failed to access register");
nvgpu_kfree(g, ops); nvgpu_kfree(g, ops);
return err; return err;
} }
@@ -9237,7 +9237,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
err = gr_gk20a_enable_ctxsw(g); err = gr_gk20a_enable_ctxsw(g);
if (err) if (err)
nvgpu_err(g, "unable to restart ctxsw!\n"); nvgpu_err(g, "unable to restart ctxsw!");
*ctx_resident_ch_fd = local_ctx_resident_ch_fd; *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
@@ -9275,7 +9275,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
err = gr_gk20a_enable_ctxsw(g); err = gr_gk20a_enable_ctxsw(g);
if (err) if (err)
nvgpu_err(g, "unable to restart ctxsw!\n"); nvgpu_err(g, "unable to restart ctxsw!");
*ctx_resident_ch_fd = local_ctx_resident_ch_fd; *ctx_resident_ch_fd = local_ctx_resident_ch_fd;

View File

@@ -185,7 +185,7 @@ static void gk20a_ltc_isr(struct gk20a *g)
u32 intr; u32 intr;
intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r()); intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r());
nvgpu_err(g, "ltc: %08x\n", intr); nvgpu_err(g, "ltc: %08x", intr);
gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr); gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr);
} }

View File

@@ -1171,7 +1171,7 @@ fail_validate:
if (allocated) if (allocated)
__nvgpu_vm_free_va(vm, map_offset, pgsz_idx); __nvgpu_vm_free_va(vm, map_offset, pgsz_idx);
fail_alloc: fail_alloc:
nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); nvgpu_err(g, "%s: failed with err=%d", __func__, err);
return 0; return 0;
} }
@@ -2670,7 +2670,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block);
if (err) { if (err) {
nvgpu_err(g, "%s: memory allocation failed\n", __func__); nvgpu_err(g, "%s: memory allocation failed", __func__);
return err; return err;
} }

View File

@@ -2193,7 +2193,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
get_pmu_sequence_out_alloc_ptr_v0; get_pmu_sequence_out_alloc_ptr_v0;
break; break;
default: default:
nvgpu_err(g, "PMU code version not supported version: %d\n", nvgpu_err(g, "PMU code version not supported version: %d",
pmu->desc->app_version); pmu->desc->app_version);
err = -EINVAL; err = -EINVAL;
goto fail_pmu_seq; goto fail_pmu_seq;
@@ -3227,7 +3227,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
&pmu->seq_buf); &pmu->seq_buf);
if (err) { if (err) {
nvgpu_err(g, "failed to allocate memory\n"); nvgpu_err(g, "failed to allocate memory");
goto err_free_seq; goto err_free_seq;
} }
@@ -3244,7 +3244,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
&pmu->trace_buf); &pmu->trace_buf);
if (err) { if (err) {
nvgpu_err(g, "failed to allocate pmu trace buffer\n"); nvgpu_err(g, "failed to allocate pmu trace buffer");
goto err_free_seq_buf; goto err_free_seq_buf;
} }
@@ -4542,7 +4542,7 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu)
nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x", nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
} }
nvgpu_err(g, "elpg stat: %d\n", nvgpu_err(g, "elpg stat: %d",
pmu->elpg_stat); pmu->elpg_stat);
/* PMU may crash due to FECS crash. Dump FECS status */ /* PMU may crash due to FECS crash. Dump FECS status */
@@ -4671,7 +4671,7 @@ static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
return true; return true;
invalid_cmd: invalid_cmd:
nvgpu_err(g, "invalid pmu cmd :\n" nvgpu_err(g, "invalid pmu cmd :"
"queue_id=%d,\n" "queue_id=%d,\n"
"cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n" "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n"
"payload in=%p, in_size=%d, in_offset=%d,\n" "payload in=%p, in_size=%d, in_offset=%d,\n"
@@ -4756,7 +4756,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
err = nvgpu_dma_alloc_map_sys(vm, size, mem); err = nvgpu_dma_alloc_map_sys(vm, size, mem);
if (err) { if (err) {
nvgpu_err(g, "failed to allocate memory\n"); nvgpu_err(g, "failed to allocate memory");
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -636,7 +636,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
/* support only 24-bit 4-byte aligned offsets */ /* support only 24-bit 4-byte aligned offsets */
if (offset & 0xFF000003) { if (offset & 0xFF000003) {
nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset); nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x", offset);
op->status |= REGOP(STATUS_INVALID_OFFSET); op->status |= REGOP(STATUS_INVALID_OFFSET);
return -EINVAL; return -EINVAL;
} }
@@ -674,7 +674,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
} }
if (!valid) { if (!valid) {
nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x\n", offset); nvgpu_err(dbg_s->g, "invalid regop offset: 0x%x", offset);
op->status |= REGOP(STATUS_INVALID_OFFSET); op->status |= REGOP(STATUS_INVALID_OFFSET);
return -EINVAL; return -EINVAL;
} }

View File

@@ -78,14 +78,14 @@ static int alloc_and_kmap_iopage(struct gk20a *g,
if (!*page) { if (!*page) {
err = -ENOMEM; err = -ENOMEM;
nvgpu_err(g, "couldn't allocate io page\n"); nvgpu_err(g, "couldn't allocate io page");
goto fail; goto fail;
} }
*kvaddr = kmap(*page); *kvaddr = kmap(*page);
if (!*kvaddr) { if (!*kvaddr) {
err = -ENOMEM; err = -ENOMEM;
nvgpu_err(g, "couldn't kmap io page\n"); nvgpu_err(g, "couldn't kmap io page");
goto fail; goto fail;
} }
*phys = page_to_phys(*page); *phys = page_to_phys(*page);
@@ -119,7 +119,7 @@ int gk20a_init_sim_support(struct platform_device *pdev)
if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr && if (!(g->sim.send_bfr.kvaddr && g->sim.recv_bfr.kvaddr &&
g->sim.msg_bfr.kvaddr)) { g->sim.msg_bfr.kvaddr)) {
nvgpu_err(g, "couldn't allocate all sim buffers\n"); nvgpu_err(g, "couldn't allocate all sim buffers");
goto fail; goto fail;
} }
@@ -269,7 +269,7 @@ static int rpc_recv_poll(struct gk20a *g)
(u64)recv_phys_addr_lo << PAGE_SHIFT; (u64)recv_phys_addr_lo << PAGE_SHIFT;
if (recv_phys_addr != g->sim.msg_bfr.phys) { if (recv_phys_addr != g->sim.msg_bfr.phys) {
nvgpu_err(g, "%s Error in RPC reply\n", nvgpu_err(g, "%s Error in RPC reply",
__func__); __func__);
return -1; return -1;
} }
@@ -296,21 +296,21 @@ static int issue_rpc_and_wait(struct gk20a *g)
err = rpc_send_message(g); err = rpc_send_message(g);
if (err) { if (err) {
nvgpu_err(g, "%s failed rpc_send_message\n", nvgpu_err(g, "%s failed rpc_send_message",
__func__); __func__);
return err; return err;
} }
err = rpc_recv_poll(g); err = rpc_recv_poll(g);
if (err) { if (err) {
nvgpu_err(g, "%s failed rpc_recv_poll\n", nvgpu_err(g, "%s failed rpc_recv_poll",
__func__); __func__);
return err; return err;
} }
/* Now check if RPC really succeeded */ /* Now check if RPC really succeeded */
if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) { if (*sim_msg_hdr(g, sim_msg_result_r()) != sim_msg_result_success_v()) {
nvgpu_err(g, "%s received failed status!\n", nvgpu_err(g, "%s received failed status!",
__func__); __func__);
return -(*sim_msg_hdr(g, sim_msg_result_r())); return -(*sim_msg_hdr(g, sim_msg_result_r()));
} }

View File

@@ -95,7 +95,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
tsg->runlist_id = ch->runlist_id; tsg->runlist_id = ch->runlist_id;
else if (tsg->runlist_id != ch->runlist_id) { else if (tsg->runlist_id != ch->runlist_id) {
nvgpu_err(tsg->g, nvgpu_err(tsg->g,
"Error: TSG channel should be share same runlist ch[%d] tsg[%d]\n", "Error: TSG channel should be share same runlist ch[%d] tsg[%d]",
ch->runlist_id, tsg->runlist_id); ch->runlist_id, tsg->runlist_id);
return -EINVAL; return -EINVAL;
} }

View File

@@ -1418,7 +1418,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
err = nvgpu_dma_alloc_flags_sys(g, err = nvgpu_dma_alloc_flags_sys(g,
NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode); NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode);
if (err) { if (err) {
nvgpu_err(g, "failed to allocate memory\n"); nvgpu_err(g, "failed to allocate memory");
goto err_done; goto err_done;
} }

View File

@@ -225,7 +225,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
if (val == 2) { if (val == 2) {
return base * 2; return base * 2;
} else if (val != 1) { } else if (val != 1) {
nvgpu_err(g, "Invalid number of active ltcs: %08x\n", val); nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
} }
return base; return base;

View File

@@ -529,7 +529,7 @@ static ssize_t xve_link_speed_write(struct file *filp,
else if (strncmp(kbuff, "Gen3", check_len) == 0) else if (strncmp(kbuff, "Gen3", check_len) == 0)
link_speed = GPU_XVE_SPEED_8P0; link_speed = GPU_XVE_SPEED_8P0;
else else
nvgpu_err(g, "%s: Unknown PCIe speed: %s\n", nvgpu_err(g, "%s: Unknown PCIe speed: %s",
__func__, kbuff); __func__, kbuff);
if (!link_speed) if (!link_speed)

View File

@@ -35,7 +35,7 @@ static ssize_t ecc_enable_store(struct device *dev,
err = g->ops.pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd err = g->ops.pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd
(g, ecc_mask); (g, ecc_mask);
if (err) if (err)
nvgpu_err(g, "ECC override did not happen\n"); nvgpu_err(g, "ECC override did not happen");
} else } else
return -EINVAL; return -EINVAL;
return count; return count;
@@ -90,7 +90,7 @@ void gp10b_create_sysfs(struct device *dev)
error |= device_create_file(dev, &dev_attr_ecc_enable); error |= device_create_file(dev, &dev_attr_ecc_enable);
error |= device_create_file(dev, &dev_attr_czf_bypass); error |= device_create_file(dev, &dev_attr_czf_bypass);
if (error) if (error)
nvgpu_err(g, "Failed to create sysfs attributes!\n"); nvgpu_err(g, "Failed to create sysfs attributes!");
} }
void gp10b_remove_sysfs(struct device *dev) void gp10b_remove_sysfs(struct device *dev)

View File

@@ -1631,7 +1631,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
ret = gk20a_disable_channel_tsg(g, fault_ch); ret = gk20a_disable_channel_tsg(g, fault_ch);
if (ret) { if (ret) {
nvgpu_err(g, nvgpu_err(g,
"CILP: failed to disable channel/TSG!\n"); "CILP: failed to disable channel/TSG!");
return ret; return ret;
} }
@@ -1833,7 +1833,7 @@ static int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
if (ret) { if (ret) {
nvgpu_err(g, "CILP: error while setting CILP preempt pending!\n"); nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
return ret; return ret;
} }

View File

@@ -102,7 +102,7 @@ static u32 therm_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
&handlerparams->success, 1); &handlerparams->success, 1);
if (handlerparams->success == 0) { if (handlerparams->success == 0) {
nvgpu_err(g, "could not process cmd\n"); nvgpu_err(g, "could not process cmd");
status = -ETIMEDOUT; status = -ETIMEDOUT;
goto exit; goto exit;
} }

View File

@@ -428,7 +428,7 @@ static int vgpu_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
if (err || msg.ret) { if (err || msg.ret) {
nvgpu_err(g, nvgpu_err(g,
"preempt channel %d failed\n", hw_chid); "preempt channel %d failed", hw_chid);
err = -ENOMEM; err = -ENOMEM;
} }
@@ -452,7 +452,7 @@ static int vgpu_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
if (err) { if (err) {
nvgpu_err(g, nvgpu_err(g,
"preempt tsg %u failed\n", tsgid); "preempt tsg %u failed", tsgid);
} }
return err; return err;

View File

@@ -141,7 +141,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
vm->gmmu_page_sizes[gmmu_page_size_big]) { vm->gmmu_page_sizes[gmmu_page_size_big]) {
pgsz_idx = gmmu_page_size_big; pgsz_idx = gmmu_page_size_big;
} else { } else {
nvgpu_err(g, "invalid kernel page size %d\n", nvgpu_err(g, "invalid kernel page size %d",
page_size); page_size);
goto fail; goto fail;
} }
@@ -172,7 +172,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
fail: fail:
if (handle) if (handle)
tegra_gr_comm_oob_put_ptr(handle); tegra_gr_comm_oob_put_ptr(handle);
nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); nvgpu_err(g, "%s: failed with err=%d", __func__, err);
return 0; return 0;
} }

View File

@@ -110,7 +110,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
map_offset = __nvgpu_vm_alloc_va(vm, size, map_offset = __nvgpu_vm_alloc_va(vm, size,
pgsz_idx); pgsz_idx);
if (!map_offset) { if (!map_offset) {
nvgpu_err(g, "failed to allocate va space\n"); nvgpu_err(g, "failed to allocate va space");
err = -ENOMEM; err = -ENOMEM;
goto fail; goto fail;
} }
@@ -138,7 +138,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
vm->gmmu_page_sizes[gmmu_page_size_big]) { vm->gmmu_page_sizes[gmmu_page_size_big]) {
pgsz_idx = gmmu_page_size_big; pgsz_idx = gmmu_page_size_big;
} else { } else {
nvgpu_err(g, "invalid kernel page size %d\n", nvgpu_err(g, "invalid kernel page size %d",
page_size); page_size);
goto fail; goto fail;
} }
@@ -160,7 +160,7 @@ static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
return map_offset; return map_offset;
fail: fail:
nvgpu_err(g, "%s: failed with err=%d\n", __func__, err); nvgpu_err(g, "%s: failed with err=%d", __func__, err);
return 0; return 0;
} }