mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: gp10b: Use new error macros
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: I8dc0ddf3b6ea38af6300c27558b60786c163da6d Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1457344 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
3ba374a5d9
commit
57d624f900
@@ -16,6 +16,8 @@
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "cde_gp10b.h"
|
||||
|
||||
#include <nvgpu/log.h>
|
||||
|
||||
enum gp10b_programs {
|
||||
GP10B_PROG_HPASS = 0,
|
||||
GP10B_PROG_HPASS_4K = 1,
|
||||
@@ -46,7 +48,7 @@ static void gp10b_cde_get_program_numbers(struct gk20a *g,
|
||||
}
|
||||
if (g->mm.bypass_smmu) {
|
||||
if (!g->mm.disable_bigpage) {
|
||||
gk20a_warn(g->dev,
|
||||
nvgpu_warn(g,
|
||||
"when bypass_smmu is 1, disable_bigpage must be 1 too");
|
||||
}
|
||||
hprog |= 1;
|
||||
|
||||
@@ -37,7 +37,7 @@ static int gp10b_fecs_trace_flush(struct gk20a *g)
|
||||
err = gr_gk20a_elpg_protected_call(g,
|
||||
gr_gk20a_submit_fecs_method_op(g, op, false));
|
||||
if (err)
|
||||
gk20a_err(dev_from_gk20a(g), "write timestamp record failed");
|
||||
nvgpu_err(g, "write timestamp record failed");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -228,7 +228,7 @@ static void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry,
|
||||
gk20a_dbg_info("device info: fault_id: %d", *fault_id);
|
||||
}
|
||||
} else
|
||||
gk20a_err(g->dev, "unknown device_info_data %d",
|
||||
nvgpu_err(g, "unknown device_info_data %d",
|
||||
top_device_info_data_type_v(table_entry));
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ static ssize_t ecc_enable_store(struct device *dev,
|
||||
err = g->ops.pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd
|
||||
(g, ecc_mask);
|
||||
if (err)
|
||||
dev_err(dev, "ECC override did not happen\n");
|
||||
nvgpu_err(g, "ECC override did not happen\n");
|
||||
} else
|
||||
return -EINVAL;
|
||||
return count;
|
||||
@@ -51,11 +51,12 @@ static DEVICE_ATTR(ecc_enable, ROOTRW, ecc_enable_read, ecc_enable_store);
|
||||
|
||||
void gp10b_create_sysfs(struct device *dev)
|
||||
{
|
||||
struct gk20a *g = get_gk20a(dev);
|
||||
int error = 0;
|
||||
|
||||
error |= device_create_file(dev, &dev_attr_ecc_enable);
|
||||
if (error)
|
||||
dev_err(dev, "Failed to create sysfs attributes!\n");
|
||||
nvgpu_err(g, "Failed to create sysfs attributes!\n");
|
||||
}
|
||||
|
||||
void gp10b_remove_sysfs(struct device *dev)
|
||||
|
||||
@@ -818,8 +818,7 @@ static int gr_gp10b_init_ctx_state(struct gk20a *g)
|
||||
op.mailbox.ret = &g->gr.t18x.ctx_vars.preempt_image_size;
|
||||
err = gr_gk20a_submit_fecs_method_op(g, op, false);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"query preempt image size failed");
|
||||
nvgpu_err(g, "query preempt image size failed");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@@ -921,8 +920,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||
g->gr.t18x.ctx_vars.preempt_image_size,
|
||||
&gr_ctx->t18x.preempt_ctxsw_buffer);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"cannot allocate preempt buffer");
|
||||
nvgpu_err(g, "cannot allocate preempt buffer");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -930,8 +928,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||
spill_size,
|
||||
&gr_ctx->t18x.spill_ctxsw_buffer);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"cannot allocate spill buffer");
|
||||
nvgpu_err(g, "cannot allocate spill buffer");
|
||||
goto fail_free_preempt;
|
||||
}
|
||||
|
||||
@@ -939,8 +936,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||
attrib_cb_size,
|
||||
&gr_ctx->t18x.betacb_ctxsw_buffer);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"cannot allocate beta buffer");
|
||||
nvgpu_err(g, "cannot allocate beta buffer");
|
||||
goto fail_free_spill;
|
||||
}
|
||||
|
||||
@@ -948,8 +944,7 @@ static int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
|
||||
pagepool_size,
|
||||
&gr_ctx->t18x.pagepool_ctxsw_buffer);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"cannot allocate page pool");
|
||||
nvgpu_err(g, "cannot allocate page pool");
|
||||
goto fail_free_betacb;
|
||||
}
|
||||
|
||||
@@ -1016,8 +1011,7 @@ static int gr_gp10b_alloc_gr_ctx(struct gk20a *g,
|
||||
err = g->ops.gr.set_ctxsw_preemption_mode(g, *gr_ctx, vm,
|
||||
class, graphics_preempt_mode, compute_preempt_mode);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"set_ctxsw_preemption_mode failed");
|
||||
nvgpu_err(g, "set_ctxsw_preemption_mode failed");
|
||||
goto fail_free_gk20a_ctx;
|
||||
}
|
||||
} else
|
||||
@@ -1044,44 +1038,44 @@ static void dump_ctx_switch_stats(struct gk20a *g, struct vm_gk20a *vm,
|
||||
WARN_ON("Cannot map context");
|
||||
return;
|
||||
}
|
||||
gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_magic_value_o : %x (expect %x)\n",
|
||||
nvgpu_err(g, "ctxsw_prog_main_image_magic_value_o : %x (expect %x)",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_magic_value_o()),
|
||||
ctxsw_prog_main_image_magic_value_v_value_v());
|
||||
|
||||
gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x\n",
|
||||
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi : %x",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_o()));
|
||||
|
||||
gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x\n",
|
||||
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_ptr : %x",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_context_timestamp_buffer_ptr_o()));
|
||||
|
||||
gk20a_err(dev_from_gk20a(g), "ctxsw_prog_main_image_context_timestamp_buffer_control : %x\n",
|
||||
nvgpu_err(g, "ctxsw_prog_main_image_context_timestamp_buffer_control : %x",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_context_timestamp_buffer_control_o()));
|
||||
|
||||
gk20a_err(dev_from_gk20a(g), "NUM_SAVE_OPERATIONS : %d\n",
|
||||
nvgpu_err(g, "NUM_SAVE_OPERATIONS : %d",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_num_save_ops_o()));
|
||||
gk20a_err(dev_from_gk20a(g), "WFI_SAVE_OPERATIONS : %d\n",
|
||||
nvgpu_err(g, "WFI_SAVE_OPERATIONS : %d",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_num_wfi_save_ops_o()));
|
||||
gk20a_err(dev_from_gk20a(g), "CTA_SAVE_OPERATIONS : %d\n",
|
||||
nvgpu_err(g, "CTA_SAVE_OPERATIONS : %d",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_num_cta_save_ops_o()));
|
||||
gk20a_err(dev_from_gk20a(g), "GFXP_SAVE_OPERATIONS : %d\n",
|
||||
nvgpu_err(g, "GFXP_SAVE_OPERATIONS : %d",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_num_gfxp_save_ops_o()));
|
||||
gk20a_err(dev_from_gk20a(g), "CILP_SAVE_OPERATIONS : %d\n",
|
||||
nvgpu_err(g, "CILP_SAVE_OPERATIONS : %d",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_num_cilp_save_ops_o()));
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"image gfx preemption option (GFXP is 1) %x\n",
|
||||
nvgpu_err(g,
|
||||
"image gfx preemption option (GFXP is 1) %x",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_graphics_preemption_options_o()));
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"image compute preemption option (CTA is 1) %x\n",
|
||||
nvgpu_err(g,
|
||||
"image compute preemption option (CTA is 1) %x",
|
||||
nvgpu_mem_rd(g, mem,
|
||||
ctxsw_prog_main_image_compute_preemption_options_o()));
|
||||
nvgpu_mem_end(g, mem);
|
||||
@@ -1154,8 +1148,7 @@ static void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
|
||||
err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"can't map patch context");
|
||||
nvgpu_err(g, "can't map patch context");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1403,7 +1396,7 @@ static int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms,
|
||||
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
nvgpu_err(g,
|
||||
"timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x",
|
||||
ctxsw_active, gr_busy, activity0, activity1, activity2, activity4);
|
||||
|
||||
@@ -1617,14 +1610,14 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a
|
||||
|
||||
ret = gk20a_disable_channel_tsg(g, fault_ch);
|
||||
if (ret) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
nvgpu_err(g,
|
||||
"CILP: failed to disable channel/TSG!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = g->ops.fifo.update_runlist(g, fault_ch->runlist_id, ~0, true, false);
|
||||
if (ret) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
nvgpu_err(g,
|
||||
"CILP: failed to restart runlist 0!");
|
||||
return ret;
|
||||
}
|
||||
@@ -1664,7 +1657,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
|
||||
"CILP: looking up ctx id");
|
||||
ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->t18x.ctx_id);
|
||||
if (ret) {
|
||||
gk20a_err(dev_from_gk20a(g), "CILP: error looking up ctx id!\n");
|
||||
nvgpu_err(g, "CILP: error looking up ctx id!");
|
||||
return ret;
|
||||
}
|
||||
gr_ctx->t18x.ctx_id_valid = true;
|
||||
@@ -1688,8 +1681,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
|
||||
.cond.fail = GR_IS_UCODE_OP_SKIP});
|
||||
|
||||
if (ret) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"CILP: failed to enable ctxsw interrupt!");
|
||||
nvgpu_err(g, "CILP: failed to enable ctxsw interrupt!");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1702,8 +1694,7 @@ static int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, struct channel_gk2
|
||||
|
||||
ret = gr_gp10b_disable_channel_or_tsg(g, fault_ch);
|
||||
if (ret) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"CILP: failed to disable channel!!");
|
||||
nvgpu_err(g, "CILP: failed to disable channel!!");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1822,7 +1813,7 @@ static int gr_gp10b_pre_process_sm_exception(struct gk20a *g,
|
||||
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
|
||||
ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
|
||||
if (ret) {
|
||||
gk20a_err(dev_from_gk20a(g), "CILP: error while setting CILP preempt pending!\n");
|
||||
nvgpu_err(g, "CILP: error while setting CILP preempt pending!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1912,7 +1903,7 @@ static int gr_gp10b_handle_fecs_error(struct gk20a *g,
|
||||
/* set preempt_pending to false */
|
||||
ret = gr_gp10b_clear_cilp_preempt_pending(g, ch);
|
||||
if (ret) {
|
||||
gk20a_err(dev_from_gk20a(g), "CILP: error while unsetting CILP preempt pending!\n");
|
||||
nvgpu_err(g, "CILP: error while unsetting CILP preempt pending!");
|
||||
gk20a_channel_put(ch);
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -1976,8 +1967,7 @@ static bool gr_gp10b_suspend_context(struct channel_gk20a *ch,
|
||||
if (gr_ctx->compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP) {
|
||||
err = gr_gp10b_set_cilp_preempt_pending(g, ch);
|
||||
if (err)
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"unable to set CILP preempt pending\n");
|
||||
nvgpu_err(g, "unable to set CILP preempt pending");
|
||||
else
|
||||
*cilp_preempt_pending = true;
|
||||
|
||||
@@ -2009,7 +1999,7 @@ static int gr_gp10b_suspend_contexts(struct gk20a *g,
|
||||
|
||||
err = gr_gk20a_disable_ctxsw(g);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
|
||||
nvgpu_err(g, "unable to stop gr ctxsw");
|
||||
nvgpu_mutex_release(&g->dbg_sessions_lock);
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -2159,8 +2149,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
|
||||
err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
|
||||
graphics_preempt_mode, compute_preempt_mode);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"set_ctxsw_preemption_mode failed");
|
||||
nvgpu_err(g, "set_ctxsw_preemption_mode failed");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@@ -2181,8 +2170,7 @@ static int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
|
||||
|
||||
err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
|
||||
if (err) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"can't map patch context");
|
||||
nvgpu_err(g, "can't map patch context");
|
||||
goto enable_ch;
|
||||
}
|
||||
g->ops.gr.commit_global_cb_manager(g, ch, true);
|
||||
@@ -2245,8 +2233,7 @@ static int gp10b_gr_fuse_override(struct gk20a *g)
|
||||
g->gr.t18x.fecs_feature_override_ecc_val = value;
|
||||
break;
|
||||
default:
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"ignore unknown fuse override %08x", fuse);
|
||||
nvgpu_err(g, "ignore unknown fuse override %08x", fuse);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +178,7 @@ static int gp10b_get_litter_value(struct gk20a *g, int value)
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
gk20a_err(dev_from_gk20a(g), "Missing definition %d", value);
|
||||
nvgpu_err(g, "Missing definition %d", value);
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gm20b/ltc_gm20b.h"
|
||||
|
||||
#include <nvgpu/log.h>
|
||||
|
||||
#include <nvgpu/hw/gp10b/hw_mc_gp10b.h>
|
||||
#include <nvgpu/hw/gp10b/hw_ltc_gp10b.h>
|
||||
|
||||
@@ -128,8 +130,7 @@ static void gp10b_ltc_isr(struct gk20a *g)
|
||||
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
|
||||
|
||||
mc_intr = gk20a_readl(g, mc_intr_ltc_r());
|
||||
gk20a_err(dev_from_gk20a(g), "mc_ltc_intr: %08x",
|
||||
mc_intr);
|
||||
nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
|
||||
for (ltc = 0; ltc < g->ltc_count; ltc++) {
|
||||
if ((mc_intr & 1 << ltc) == 0)
|
||||
continue;
|
||||
@@ -142,7 +143,7 @@ static void gp10b_ltc_isr(struct gk20a *g)
|
||||
ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) {
|
||||
u32 ecc_stats_reg_val;
|
||||
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
nvgpu_err(g,
|
||||
"Single bit error detected in GPU L2!");
|
||||
|
||||
ecc_stats_reg_val =
|
||||
@@ -162,7 +163,7 @@ static void gp10b_ltc_isr(struct gk20a *g)
|
||||
ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) {
|
||||
u32 ecc_stats_reg_val;
|
||||
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
nvgpu_err(g,
|
||||
"Double bit error detected in GPU L2!");
|
||||
|
||||
ecc_stats_reg_val =
|
||||
@@ -177,7 +178,7 @@ static void gp10b_ltc_isr(struct gk20a *g)
|
||||
ecc_stats_reg_val);
|
||||
}
|
||||
|
||||
gk20a_err(dev_from_gk20a(g), "ltc%d, slice %d: %08x",
|
||||
nvgpu_err(g, "ltc%d, slice %d: %08x",
|
||||
ltc, slice, ltc_intr);
|
||||
gk20a_writel(g, ltc_ltc0_lts0_intr_r() +
|
||||
ltc_stride * ltc + lts_stride * slice,
|
||||
|
||||
@@ -24,6 +24,8 @@
|
||||
#include "pmu_gp10b.h"
|
||||
#include "gp10b_sysfs.h"
|
||||
|
||||
#include <nvgpu/log.h>
|
||||
|
||||
#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
|
||||
#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h>
|
||||
|
||||
@@ -192,8 +194,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
&g->ops.pmu.lspmuwprinitdone, 1);
|
||||
/* check again if it still not ready indicate an error */
|
||||
if (!g->ops.pmu.lspmuwprinitdone) {
|
||||
gk20a_err(dev_from_gk20a(g),
|
||||
"PMU not ready to load LSF");
|
||||
nvgpu_err(g, "PMU not ready to load LSF");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
@@ -213,7 +214,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
if (status != 0) {
|
||||
gk20a_err(dev_from_gk20a(g), "GR PARAM cmd aborted");
|
||||
nvgpu_err(g, "GR PARAM cmd aborted");
|
||||
/* TBD: disable ELPG */
|
||||
return;
|
||||
}
|
||||
@@ -378,12 +379,12 @@ static void pmu_dump_security_fuses_gp10b(struct gk20a *g)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
gk20a_err(dev_from_gk20a(g), "FUSE_OPT_SEC_DEBUG_EN_0 : 0x%x",
|
||||
nvgpu_err(g, "FUSE_OPT_SEC_DEBUG_EN_0 : 0x%x",
|
||||
gk20a_readl(g, fuse_opt_sec_debug_en_r()));
|
||||
gk20a_err(dev_from_gk20a(g), "FUSE_OPT_PRIV_SEC_EN_0 : 0x%x",
|
||||
nvgpu_err(g, "FUSE_OPT_PRIV_SEC_EN_0 : 0x%x",
|
||||
gk20a_readl(g, fuse_opt_priv_sec_en_r()));
|
||||
tegra_fuse_readl(FUSE_GCPLEX_CONFIG_FUSE_0, &val);
|
||||
gk20a_err(dev_from_gk20a(g), "FUSE_GCPLEX_CONFIG_FUSE_0 : 0x%x",
|
||||
nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0 : 0x%x",
|
||||
val);
|
||||
}
|
||||
|
||||
|
||||
@@ -42,8 +42,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g)
|
||||
err = nvgpu_dma_alloc_map_sys(vm, rbfb_size,
|
||||
&g->mm.bar2_desc);
|
||||
if (err) {
|
||||
dev_err(dev_from_gk20a(g),
|
||||
"%s Error in replayable fault buffer\n", __func__);
|
||||
nvgpu_err(g, "Error in replayable fault buffer");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@@ -75,8 +74,8 @@ u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g)
|
||||
get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r());
|
||||
|
||||
if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
|
||||
dev_err(dev_from_gk20a(g), "%s Error in replayable fault buffer\n",
|
||||
__func__);
|
||||
nvgpu_err(g, "Error in replayable fault buffer");
|
||||
|
||||
gk20a_dbg_fn("done");
|
||||
return get_idx;
|
||||
}
|
||||
@@ -89,8 +88,8 @@ u32 gp10b_replayable_pagefault_buffer_put_index(struct gk20a *g)
|
||||
put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r());
|
||||
|
||||
if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v())
|
||||
dev_err(dev_from_gk20a(g), "%s Error in UVM\n",
|
||||
__func__);
|
||||
nvgpu_err(g, "Error in UVM");
|
||||
|
||||
gk20a_dbg_fn("done");
|
||||
return put_idx;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user