gpu: nvgpu: gv11b: Use new error macros

gk20a_err() and gk20a_warn() require a struct device pointer,
which is not portable across operating systems. The new nvgpu_err()
and nvgpu_warn() macros take struct gk20a pointer. Convert code
to use the more portable macros.

JIRA NVGPU-16

Change-Id: I8c0d8944f625e3c5b16a9f5a2a59d95a680f4e55
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1459822
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Terje Bergstrom
2017-04-10 11:09:13 -07:00
committed by mobile promotions
parent 1a426c981c
commit 7fe4b6572b
3 changed files with 32 additions and 47 deletions

View File

@@ -875,8 +875,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
/* Disable power management */
if (support_gk20a_pmu(g->dev) && g->elpg_enabled) {
if (gk20a_pmu_disable_elpg(g))
gk20a_err(dev_from_gk20a(g),
"failed to set disable elpg");
nvgpu_err(g, "failed to set disable elpg");
}
if (g->ops.clock_gating.slcg_gr_load_gating_prod)
g->ops.clock_gating.slcg_gr_load_gating_prod(g,
@@ -943,7 +942,7 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
gk20a_channel_abort(refch, false);
gk20a_channel_put(refch);
} else {
gk20a_err(dev_from_gk20a(g), "id unknown, abort runlist");
nvgpu_err(g, "id unknown, abort runlist");
for (runlist_id = 0; runlist_id < g->fifo.max_runlists;
runlist_id++) {
if (runlists_mask & BIT(runlist_id))

View File

@@ -22,6 +22,7 @@
#include <nvgpu/timers.h>
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include "gk20a/gk20a.h"
#include "gk20a/gr_gk20a.h"
@@ -234,8 +235,7 @@ static int gr_gv11b_zbc_s_query_table(struct gk20a *g, struct gr_gk20a *gr,
u32 index = query_params->index_size;
if (index >= GK20A_ZBC_TABLE_SIZE) {
gk20a_err(dev_from_gk20a(g),
"invalid zbc stencil table index\n");
nvgpu_err(g, "invalid zbc stencil table index");
return -EINVAL;
}
query_params->depth = gr->zbc_s_tbl[index].stencil;
@@ -332,8 +332,7 @@ static int gr_gv11b_load_stencil_default_tbl(struct gk20a *g,
if (!err) {
gr->max_default_s_index = 3;
} else {
gk20a_err(dev_from_gk20a(g),
"fail to load default zbc stencil table\n");
nvgpu_err(g, "fail to load default zbc stencil table");
return err;
}
@@ -628,8 +627,7 @@ static int gr_gv11b_init_ctx_state(struct gk20a *g)
op.mailbox.ret = &g->gr.t18x.ctx_vars.preempt_image_size;
err = gr_gk20a_submit_fecs_method_op(g, op, false);
if (err) {
gk20a_err(dev_from_gk20a(g),
"query preempt image size failed");
nvgpu_err(g, "query preempt image size failed");
return err;
}
}
@@ -717,8 +715,7 @@ static int gr_gv11b_alloc_gr_ctx(struct gk20a *g,
g->gr.t18x.ctx_vars.preempt_image_size,
&(*gr_ctx)->t18x.preempt_ctxsw_buffer);
if (err) {
gk20a_err(dev_from_gk20a(vm->mm->g),
"cannot allocate preempt buffer");
nvgpu_err(vm->mm->g, "cannot allocate preempt buffer");
goto fail_free_gk20a_ctx;
}
@@ -726,8 +723,7 @@ static int gr_gv11b_alloc_gr_ctx(struct gk20a *g,
spill_size,
&(*gr_ctx)->t18x.spill_ctxsw_buffer);
if (err) {
gk20a_err(dev_from_gk20a(vm->mm->g),
"cannot allocate spill buffer");
nvgpu_err(vm->mm->g, "cannot allocate spill buffer");
goto fail_free_preempt;
}
@@ -735,8 +731,7 @@ static int gr_gv11b_alloc_gr_ctx(struct gk20a *g,
attrib_cb_size,
&(*gr_ctx)->t18x.betacb_ctxsw_buffer);
if (err) {
gk20a_err(dev_from_gk20a(vm->mm->g),
"cannot allocate beta buffer");
nvgpu_err(vm->mm->g, "cannot allocate beta buffer");
goto fail_free_spill;
}
@@ -744,8 +739,7 @@ static int gr_gv11b_alloc_gr_ctx(struct gk20a *g,
pagepool_size,
&(*gr_ctx)->t18x.pagepool_ctxsw_buffer);
if (err) {
gk20a_err(dev_from_gk20a(vm->mm->g),
"cannot allocate page pool");
nvgpu_err(vm->mm->g, "cannot allocate page pool");
goto fail_free_betacb;
}
@@ -785,29 +779,28 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
WARN_ON("Cannot map context");
return;
}
gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n",
nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
nvgpu_mem_rd(g, mem,
ctxsw_prog_main_image_magic_value_o()),
ctxsw_prog_main_image_magic_value_v_value_v());
gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n",
nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, mem,
ctxsw_prog_main_image_num_save_ops_o()));
gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n",
nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, mem,
ctxsw_prog_main_image_num_wfi_save_ops_o()));
gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n",
nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, mem,
ctxsw_prog_main_image_num_cta_save_ops_o()));
gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n",
nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, mem,
ctxsw_prog_main_image_num_gfxp_save_ops_o()));
gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n",
nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
nvgpu_mem_rd(g, mem,
ctxsw_prog_main_image_num_cilp_save_ops_o()));
gk20a_err(dev_from_gk20a(g),
"image gfx preemption option (GFXP is 1) %x\n",
nvgpu_err(g, "image gfx preemption option (GFXP is 1) %x",
nvgpu_mem_rd(g, mem,
ctxsw_prog_main_image_graphics_preemption_options_o()));
nvgpu_mem_end(g, mem);
@@ -868,8 +861,7 @@ static void gr_gv11b_update_ctxsw_preemption_mode(struct gk20a *g,
err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
if (err) {
gk20a_err(dev_from_gk20a(g),
"can't map patch context");
nvgpu_err(g, "can't map patch context");
goto out;
}
@@ -1118,7 +1110,7 @@ static int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
} while (!nvgpu_timeout_expired(&timeout));
gk20a_err(dev_from_gk20a(g),
nvgpu_err(g,
"timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x",
ctxsw_active, gr_busy, activity0, activity1, activity2, activity4);
@@ -1272,15 +1264,13 @@ static int gr_gv11b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
ret = gk20a_disable_channel_tsg(g, fault_ch);
if (ret) {
gk20a_err(dev_from_gk20a(g),
"CILP: failed to disable channel/TSG!\n");
nvgpu_err(g, "CILP: failed to disable channel/TSG!");
return ret;
}
ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false);
if (ret) {
gk20a_err(dev_from_gk20a(g),
"CILP: failed to restart runlist 0!");
nvgpu_err(g, "CILP: failed to restart runlist 0!");
return ret;
}
@@ -1319,7 +1309,7 @@ static int gr_gv11b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
"CILP: looking up ctx id");
ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id);
if (ret) {
gk20a_err(dev_from_gk20a(g), "CILP: error looking up ctx id!\n");
nvgpu_err(g, "CILP: error looking up ctx id!");
return ret;
}
gr_ctx->t18x.ctx_id_valid = true;
@@ -1343,8 +1333,7 @@ static int gr_gv11b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
.cond.fail = GR_IS_UCODE_OP_SKIP});
if (ret) {
gk20a_err(dev_from_gk20a(g),
"CILP: failed to enable ctxsw interrupt!");
nvgpu_err(g, "CILP: failed to enable ctxsw interrupt!");
return ret;
}
@@ -1357,8 +1346,7 @@ static int gr_gv11b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
ret = gr_gv11b_disable_channel_or_tsg(g, fault_ch);
if (ret) {
gk20a_err(dev_from_gk20a(g),
"CILP: failed to disable channel!!");
nvgpu_err(g, "CILP: failed to disable channel!!");
return ret;
}
@@ -1472,7 +1460,7 @@ static int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
ret = gr_gv11b_set_cilp_preempt_pending(g, fault_ch);
if (ret) {
gk20a_err(dev_from_gk20a(g), "CILP: error while setting CILP preempt pending!\n");
nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
return ret;
}
@@ -1562,7 +1550,7 @@ static int gr_gv11b_handle_fecs_error(struct gk20a *g,
/* set preempt_pending to false */
ret = gr_gv11b_clear_cilp_preempt_pending(g, ch);
if (ret) {
gk20a_err(dev_from_gk20a(g), "CILP: error while unsetting CILP preempt pending!\n");
nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!");
gk20a_channel_put(ch);
goto clean_up;
}
@@ -2002,8 +1990,7 @@ void gr_gv11b_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
therm_gate_ctrl_eng_clk_auto_f());
break;
default:
gk20a_err(dev_from_gk20a(g),
"invalid elcg mode %d", mode);
nvgpu_err(g, "invalid elcg mode %d", mode);
}
gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl);

View File

@@ -16,12 +16,13 @@
* this program.
*/
#include <nvgpu/dma.h>
#include "gk20a/gk20a.h"
#include "gv11b/subctx_gv11b.h"
#include <nvgpu/dma.h>
#include <nvgpu/log.h>
#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
#include <nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h>
@@ -58,8 +59,7 @@ int gv11b_alloc_subctx_header(struct channel_gk20a *c)
gr->ctx_vars.golden_image_size,
&ctx->mem);
if (ret) {
gk20a_err(dev_from_gk20a(g),
"failed to allocate sub ctx header");
nvgpu_err(g, "failed to allocate sub ctx header");
return ret;
}
ctx->mem.gpu_va = gk20a_gmmu_map(c->vm,
@@ -69,8 +69,7 @@ int gv11b_alloc_subctx_header(struct channel_gk20a *c)
gk20a_mem_flag_none, true,
ctx->mem.aperture);
if (!ctx->mem.gpu_va) {
gk20a_err(dev_from_gk20a(g),
"failed to map ctx header");
nvgpu_err(g, "failed to map ctx header");
nvgpu_dma_free(g, &ctx->mem);
return -ENOMEM;
}