gpu: nvgpu: gp106: Use new error macros

gk20a_err() and gk20a_warn() require a struct device pointer,
which is not portable across operating systems. The new nvgpu_err()
and nvgpu_warn() macros take struct gk20a pointer. Convert code
to use the more portable macros.

JIRA NVGPU-16

Change-Id: I18955b4c46c082883ee0bf589ab17cd66ab0add2
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1457346
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2017-04-06 12:09:01 -07:00
committed by mobile promotions
parent bb72b7e2ed
commit 86ecddf687
8 changed files with 30 additions and 42 deletions

View File

@@ -150,7 +150,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!pmu_fw) {
gk20a_err(dev_from_gk20a(g), "failed to load pmu ucode!!");
nvgpu_err(g, "failed to load pmu ucode!!");
return -ENOENT;
}
g->acr.pmu_fw = pmu_fw;
@@ -160,14 +160,14 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!pmu_desc) {
gk20a_err(dev_from_gk20a(g), "failed to load pmu ucode desc!!");
nvgpu_err(g, "failed to load pmu ucode desc!!");
err = -ENOENT;
goto release_img_fw;
}
pmu_sig = nvgpu_request_firmware(g, GM20B_PMU_UCODE_SIG,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!pmu_sig) {
gk20a_err(dev_from_gk20a(g), "failed to load pmu sig!!");
nvgpu_err(g, "failed to load pmu sig!!");
err = -ENOENT;
goto release_desc;
}
@@ -177,8 +177,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
err = gk20a_init_pmu(pmu);
if (err) {
gk20a_err(dev_from_gk20a(g),
"failed to set function pointers\n");
nvgpu_err(g, "failed to set function pointers");
goto release_sig;
}
@@ -229,11 +228,11 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
NVGPU_REQUEST_FIRMWARE_NO_SOC);
break;
default:
gk20a_err(g->dev, "no support for GPUID %x", ver);
nvgpu_err(g, "no support for GPUID %x", ver);
}
if (!fecs_sig) {
gk20a_err(dev_from_gk20a(g), "failed to load fecs sig");
nvgpu_err(g, "failed to load fecs sig");
return -ENOENT;
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
@@ -315,11 +314,11 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
NVGPU_REQUEST_FIRMWARE_NO_SOC);
break;
default:
gk20a_err(g->dev, "no support for GPUID %x", ver);
nvgpu_err(g, "no support for GPUID %x", ver);
}
if (!gpccs_sig) {
gk20a_err(dev_from_gk20a(g), "failed to load gpccs sig");
nvgpu_err(g, "failed to load gpccs sig");
return -ENOENT;
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc_v1));
@@ -1067,7 +1066,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g)
GM20B_HSBIN_PMU_UCODE_IMAGE,
NVGPU_REQUEST_FIRMWARE_NO_SOC);
if (!acr_fw) {
gk20a_err(dev_from_gk20a(g), "pmu ucode get fail");
nvgpu_err(g, "pmu ucode get fail");
return -ENOENT;
}
acr->acr_fw = acr_fw;
@@ -1090,7 +1089,7 @@ static int gp106_bootstrap_hs_flcn(struct gk20a *g)
acr->fw_hdr->patch_loc),
(u32 *)(acr_fw->data +
acr->fw_hdr->patch_sig)) < 0) {
gk20a_err(dev_from_gk20a(g), "patch signatures fail");
nvgpu_err(g, "patch signatures fail");
err = -1;
goto err_release_acr_fw;
}

View File

@@ -192,8 +192,7 @@ static u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c) {
} while ((--retries) && (cntr = gk20a_readl(g, c->cntr.reg_cntr_addr)));
if (!retries) {
gk20a_err(dev_from_gk20a(g),
"unable to settle counter reset, bailing");
nvgpu_err(g, "unable to settle counter reset, bailing");
goto read_err;
}
/* Program counter */

View File

@@ -30,7 +30,7 @@ static int gr_gp106_get_netlist_name(struct gk20a *g, int index, char *name)
GP106_NETLIST_IMAGE_FW_NAME);
break;
default:
gk20a_err(g->dev, "no support for GPUID %x", ver);
nvgpu_err(g, "no support for GPUID %x", ver);
}
return 0;

View File

@@ -167,8 +167,7 @@ static int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
g->gr.t18x.ctx_vars.preempt_image_size,
&gr_ctx->t18x.preempt_ctxsw_buffer);
if (err) {
gk20a_err(dev_from_gk20a(g),
"cannot allocate preempt buffer");
nvgpu_err(g, "cannot allocate preempt buffer");
goto fail;
}
@@ -176,8 +175,7 @@ static int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
spill_size,
&gr_ctx->t18x.spill_ctxsw_buffer);
if (err) {
gk20a_err(dev_from_gk20a(g),
"cannot allocate spill buffer");
nvgpu_err(g, "cannot allocate spill buffer");
goto fail_free_preempt;
}
@@ -185,8 +183,7 @@ static int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
attrib_cb_size,
&gr_ctx->t18x.betacb_ctxsw_buffer);
if (err) {
gk20a_err(dev_from_gk20a(g),
"cannot allocate beta buffer");
nvgpu_err(g, "cannot allocate beta buffer");
goto fail_free_spill;
}
@@ -194,8 +191,7 @@ static int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
pagepool_size,
&gr_ctx->t18x.pagepool_ctxsw_buffer);
if (err) {
gk20a_err(dev_from_gk20a(g),
"cannot allocate page pool");
nvgpu_err(g, "cannot allocate page pool");
goto fail_free_betacb;
}

View File

@@ -77,7 +77,7 @@ static int gp106_pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
gk20a_writel(g, pwr_falcon_engine_r(),
pwr_falcon_engine_reset_true_f());
gk20a_readl(g, pwr_falcon_engine_r());
gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
nvgpu_err(g, "Falcon mem scrubbing timeout");
return -ETIMEDOUT;
} else {
/* DISBALE */
@@ -202,7 +202,7 @@ static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
gk20a_dbg_fn("");
if (status != 0) {
gk20a_err(dev_from_gk20a(g), "PG PARAM cmd aborted");
nvgpu_err(g, "PG PARAM cmd aborted");
return;
}
@@ -222,7 +222,7 @@ static int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
status = init_rppg(g);
if (status != 0) {
gk20a_err(dev_from_gk20a(g), "RPPG init Failed");
nvgpu_err(g, "RPPG init Failed");
return -1;
}
@@ -386,8 +386,7 @@ static int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
&g->ops.pmu.lspmuwprinitdone, 1);
/* check again if it still not ready indicate an error */
if (!g->ops.pmu.lspmuwprinitdone) {
gk20a_err(dev_from_gk20a(g),
"PMU not ready to load LSF");
nvgpu_err(g, "PMU not ready to load LSF");
return -ETIMEDOUT;
}
}

View File

@@ -72,7 +72,7 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
} while (!nvgpu_timeout_expired(&to));
if (completion) {
gk20a_err(dev_from_gk20a(g), "ACR boot timed out");
nvgpu_err(g, "ACR boot timed out");
return completion;
}
@@ -81,8 +81,7 @@ int sec2_wait_for_halt(struct gk20a *g, unsigned int timeout)
data = gk20a_readl(g, psec_falcon_mailbox0_r());
if (data) {
gk20a_err(dev_from_gk20a(g),
"ACR boot failed, err %x", data);
nvgpu_err(g, "ACR boot failed, err %x", data);
completion = -EAGAIN;
}
@@ -100,14 +99,12 @@ void sec2_copy_to_dmem(struct pmu_gk20a *pmu,
u32 *src_u32 = (u32*)src;
if (size == 0) {
gk20a_err(dev_from_gk20a(g),
"size is zero");
nvgpu_err(g, "size is zero");
return;
}
if (dst & 0x3) {
gk20a_err(dev_from_gk20a(g),
"dst (0x%08x) not 4-byte aligned", dst);
nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst);
return;
}
@@ -137,8 +134,7 @@ void sec2_copy_to_dmem(struct pmu_gk20a *pmu,
data = gk20a_readl(g, psec_falcon_dmemc_r(port)) & addr_mask;
size = ALIGN(size, 4);
if (data != dst + size) {
gk20a_err(dev_from_gk20a(g),
"copy failed. bytes written %d, expected %d",
nvgpu_err(g, "copy failed. bytes written %d, expected %d",
data - dst, size);
}
nvgpu_mutex_release(&pmu->pmu_copy_lock);

View File

@@ -32,13 +32,12 @@ static int gp106_get_internal_sensor_curr_temp(struct gk20a *g, u32 *temp_f24_8)
if (!(therm_temp_sensor_tsense_state_v(readval) &
therm_temp_sensor_tsense_state_valid_v())) {
gk20a_err(dev_from_gk20a(g),
"Attempt to read temperature while sensor is OFF!\n");
nvgpu_err(g,
"Attempt to read temperature while sensor is OFF!");
err = -EINVAL;
} else if (therm_temp_sensor_tsense_state_v(readval) &
therm_temp_sensor_tsense_state_shadow_v()) {
gk20a_err(dev_from_gk20a(g),
"Reading temperature from SHADOWed sensor!\n");
nvgpu_err(g, "Reading temperature from SHADOWed sensor!");
}
// Convert from F9.5 -> F27.5 -> F24.8.
@@ -71,7 +70,7 @@ static void gp106_therm_debugfs_init(struct gk20a *g) {
dbgentry = debugfs_create_file(
"temp", S_IRUGO, platform->debugfs, g, &therm_ctrl_fops);
if (!dbgentry)
gk20a_err(dev_from_gk20a(g), "debugfs entry create failed for therm_curr_temp");
nvgpu_err(g, "debugfs entry create failed for therm_curr_temp");
}
#endif

View File

@@ -522,7 +522,7 @@ static ssize_t xve_link_speed_write(struct file *filp,
else if (strncmp(kbuff, "Gen3", check_len) == 0)
link_speed = GPU_XVE_SPEED_8P0;
else
gk20a_err(g->dev, "%s: Unknown PCIe speed: %s\n",
nvgpu_err(g, "%s: Unknown PCIe speed: %s\n",
__func__, kbuff);
if (!link_speed)