gpu: nvgpu: gm20b: Use new error macros

gk20a_err() and gk20a_warn() require a struct device pointer,
which is not portable across operating systems. The new nvgpu_err()
and nvgpu_warn() macros take struct gk20a pointer. Convert code
to use the more portable macros.

JIRA NVGPU-16

Change-Id: Ic27fb98e03a982e5a1cf672cb4e8f87ecea10a5b
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1457345
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2017-04-06 11:01:46 -07:00
committed by mobile promotions
parent 85f27cec5d
commit bb72b7e2ed
8 changed files with 47 additions and 62 deletions

View File

@@ -134,7 +134,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
gm20b_dbg_pmu("requesting PMU ucode in GM20B\n");
pmu_fw = nvgpu_request_firmware(g, GM20B_PMU_UCODE_IMAGE, 0);
if (!pmu_fw) {
gk20a_err(dev_from_gk20a(g), "failed to load pmu ucode!!");
nvgpu_err(g, "failed to load pmu ucode!!");
return -ENOENT;
}
g->acr.pmu_fw = pmu_fw;
@@ -143,13 +143,13 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
gm20b_dbg_pmu("requesting PMU ucode desc in GM20B\n");
pmu_desc = nvgpu_request_firmware(g, GM20B_PMU_UCODE_DESC, 0);
if (!pmu_desc) {
gk20a_err(dev_from_gk20a(g), "failed to load pmu ucode desc!!");
nvgpu_err(g, "failed to load pmu ucode desc!!");
err = -ENOENT;
goto release_img_fw;
}
pmu_sig = nvgpu_request_firmware(g, GM20B_PMU_UCODE_SIG, 0);
if (!pmu_sig) {
gk20a_err(dev_from_gk20a(g), "failed to load pmu sig!!");
nvgpu_err(g, "failed to load pmu sig!!");
err = -ENOENT;
goto release_desc;
}
@@ -197,7 +197,7 @@ static int fecs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
fecs_sig = nvgpu_request_firmware(g, GM20B_FECS_UCODE_SIG, 0);
if (!fecs_sig) {
gk20a_err(dev_from_gk20a(g), "failed to load fecs sig");
nvgpu_err(g, "failed to load fecs sig");
return -ENOENT;
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc));
@@ -267,7 +267,7 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0);
if (!gpccs_sig) {
gk20a_err(dev_from_gk20a(g), "failed to load gpccs sig");
nvgpu_err(g, "failed to load gpccs sig");
return -ENOENT;
}
lsf_desc = nvgpu_kzalloc(g, sizeof(struct lsf_ucode_desc));
@@ -412,12 +412,12 @@ int prepare_ucode_blob(struct gk20a *g)
sgt = nvgpu_kzalloc(g, sizeof(*sgt));
if (!sgt) {
gk20a_err(dev_from_gk20a(g), "failed to allocate memory\n");
nvgpu_err(g, "failed to allocate memory");
return -ENOMEM;
}
err = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (err) {
gk20a_err(dev_from_gk20a(g), "failed to allocate sg_table\n");
nvgpu_err(g, "failed to allocate sg_table");
goto free_sgt;
}
page = phys_to_page(wpr_addr);
@@ -1088,7 +1088,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
/*First time init case*/
acr_fw = nvgpu_request_firmware(g, GM20B_HSBIN_PMU_UCODE_IMAGE, 0);
if (!acr_fw) {
gk20a_err(dev_from_gk20a(g), "pmu ucode get fail");
nvgpu_err(g, "pmu ucode get fail");
return -ENOENT;
}
acr->acr_fw = acr_fw;
@@ -1111,7 +1111,7 @@ static int gm20b_bootstrap_hs_flcn(struct gk20a *g)
acr->fw_hdr->patch_loc),
(u32 *)(acr_fw->data +
acr->fw_hdr->patch_sig)) < 0) {
gk20a_err(dev_from_gk20a(g), "patch signatures fail");
nvgpu_err(g, "patch signatures fail");
err = -1;
goto err_release_acr_fw;
}
@@ -1386,7 +1386,6 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
{
struct mm_gk20a *mm = &g->mm;
struct vm_gk20a *vm = &mm->pmu.vm;
struct device *d = dev_from_gk20a(g);
int err = 0;
u32 bl_sz;
struct acr_desc *acr = &g->acr;
@@ -1399,7 +1398,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
hsbl_fw = nvgpu_request_firmware(g,
GM20B_HSBIN_PMU_BL_UCODE_IMAGE, 0);
if (!hsbl_fw) {
gk20a_err(dev_from_gk20a(g), "pmu ucode load fail");
nvgpu_err(g, "pmu ucode load fail");
return -ENOENT;
}
acr->hsbl_fw = hsbl_fw;
@@ -1420,7 +1419,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
err = nvgpu_dma_alloc_flags_sys(g,
NVGPU_DMA_READ_ONLY, bl_sz, &acr->hsbl_ucode);
if (err) {
gk20a_err(d, "failed to allocate memory\n");
nvgpu_err(g, "failed to allocate memory\n");
goto err_done;
}
@@ -1430,7 +1429,7 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
gk20a_mem_flag_read_only, false,
acr->hsbl_ucode.aperture);
if (!acr->hsbl_ucode.gpu_va) {
gk20a_err(d, "failed to map pmu ucode memory!!");
nvgpu_err(g, "failed to map pmu ucode memory!!");
goto err_free_ucode;
}
@@ -1506,7 +1505,7 @@ static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
} while (!nvgpu_timeout_expired(&timeout));
if (ret) {
gk20a_err(dev_from_gk20a(g), "ACR boot timed out");
nvgpu_err(g, "ACR boot timed out");
return ret;
}
@@ -1514,8 +1513,7 @@ static int pmu_wait_for_halt(struct gk20a *g, unsigned int timeout_ms)
gm20b_dbg_pmu("ACR capabilities %x\n", g->acr.capabilities);
data = gk20a_readl(g, pwr_falcon_mailbox0_r());
if (data) {
gk20a_err(dev_from_gk20a(g),
"ACR boot failed, err %x", data);
nvgpu_err(g, "ACR boot failed, err %x", data);
ret = -EAGAIN;
}

View File

@@ -310,7 +310,7 @@ static int clk_config_calibration_params(struct gk20a *g)
* (non-production config), report error, but allow to use
* boot internal calibration with default slope.
*/
gk20a_err(dev_from_gk20a(g), "ADC coeff are not fused\n");
nvgpu_err(g, "ADC coeff are not fused");
return -EINVAL;
}
return 0;
@@ -532,7 +532,7 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
} while (delay > 0);
if (delay <= 0) {
gk20a_err(dev_from_gk20a(g), "GPCPLL calibration timeout");
nvgpu_err(g, "GPCPLL calibration timeout");
return -ETIMEDOUT;
}
@@ -564,8 +564,7 @@ static void clk_setup_slide(struct gk20a *g, u32 clk_u)
step_b = 0x05;
break;
default:
gk20a_err(dev_from_gk20a(g), "Unexpected reference rate %u kHz",
clk_u);
nvgpu_err(g, "Unexpected reference rate %u kHz", clk_u);
BUG();
}
@@ -671,7 +670,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
if (ramp_timeout <= 0) {
gk20a_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout");
nvgpu_err(g, "gpcpll dynamic ramp timeout");
return -ETIMEDOUT;
}
return 0;
@@ -1041,7 +1040,7 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
ret = clk_program_gpc_pll(g, &gpll_safe, 1);
if (ret) {
gk20a_err(dev_from_gk20a(g), "Safe dvfs program fail\n");
nvgpu_err(g, "Safe dvfs program fail");
return ret;
}
}
@@ -1154,8 +1153,7 @@ static int gm20b_init_clk_setup_sw(struct gk20a *g)
#endif
if (IS_ERR(ref)) {
gk20a_err(dev_from_gk20a(g),
"failed to get GPCPLL reference clock");
nvgpu_err(g, "failed to get GPCPLL reference clock");
err = -EINVAL;
goto fail;
}
@@ -1163,8 +1161,7 @@ static int gm20b_init_clk_setup_sw(struct gk20a *g)
clk->gpc_pll.id = GK20A_GPC_PLL;
clk->gpc_pll.clk_in = clk_get_rate(ref) / KHZ;
if (clk->gpc_pll.clk_in == 0) {
gk20a_err(dev_from_gk20a(g),
"GPCPLL reference clock is zero");
nvgpu_err(g, "GPCPLL reference clock is zero");
err = -EINVAL;
goto fail;
}
@@ -1327,8 +1324,7 @@ int gm20b_register_gpcclk(struct gk20a *g) {
clk->hw.init = &init;
c = clk_register(g->dev, &clk->hw);
if (IS_ERR(c)) {
gk20a_err(dev_from_gk20a(g),
"Failed to register GPCPLL clock");
nvgpu_err(g, "Failed to register GPCPLL clock");
return -EINVAL;
}
@@ -1405,8 +1401,7 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
/* gpc_pll.freq is changed to new value here */
if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
&freq, true)) {
gk20a_err(dev_from_gk20a(g),
"failed to set pll target for %d", freq);
nvgpu_err(g, "failed to set pll target for %d", freq);
return -EINVAL;
}
}
@@ -1442,8 +1437,7 @@ static int set_pll_freq(struct gk20a *g, int allow_slide)
* Just report error but not restore PLL since dvfs could already change
* voltage even when programming failed.
*/
gk20a_err(dev_from_gk20a(g), "failed to set pll to %d",
clk->gpc_pll.freq);
nvgpu_err(g, "failed to set pll to %d", clk->gpc_pll.freq);
return err;
}

View File

@@ -121,7 +121,7 @@ static void gm20b_fb_dump_vpr_wpr_info(struct gk20a *g)
val &= ~0x3;
val |= fb_mmu_vpr_info_index_addr_lo_v();
gk20a_writel(g, fb_mmu_vpr_info_r(), val);
gk20a_err(dev_from_gk20a(g), "VPR: %08x %08x %08x %08x",
nvgpu_err(g, "VPR: %08x %08x %08x %08x",
gk20a_readl(g, fb_mmu_vpr_info_r()),
gk20a_readl(g, fb_mmu_vpr_info_r()),
gk20a_readl(g, fb_mmu_vpr_info_r()),
@@ -131,7 +131,7 @@ static void gm20b_fb_dump_vpr_wpr_info(struct gk20a *g)
val &= ~0xf;
val |= (fb_mmu_wpr_info_index_allow_read_v());
gk20a_writel(g, fb_mmu_wpr_info_r(), val);
gk20a_err(dev_from_gk20a(g), "WPR: %08x %08x %08x %08x %08x %08x",
nvgpu_err(g, "WPR: %08x %08x %08x %08x %08x %08x",
gk20a_readl(g, fb_mmu_wpr_info_r()),
gk20a_readl(g, fb_mmu_wpr_info_r()),
gk20a_readl(g, fb_mmu_wpr_info_r()),

View File

@@ -21,6 +21,7 @@
#include "fifo_gm20b.h"
#include <nvgpu/timers.h>
#include <nvgpu/log.h>
#include <nvgpu/hw/gm20b/hw_ccsr_gm20b.h>
#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
@@ -64,7 +65,7 @@ static inline u32 gm20b_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
if (engine_info) {
fault_id = engine_info->fault_id;
} else {
gk20a_err(g->dev, "engine_id is not in active list/invalid %d", engine_id);
nvgpu_err(g, "engine_id is not in active list/invalid %d", engine_id);
}
return fault_id;
}
@@ -80,8 +81,7 @@ static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
/* trigger faults for all bad engines */
for_each_set_bit(engine_id, &engine_ids, 32) {
if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) {
gk20a_err(dev_from_gk20a(g),
"faulting unknown engine %ld", engine_id);
nvgpu_err(g, "faulting unknown engine %ld", engine_id);
} else {
u32 mmu_id = gm20b_engine_id_to_mmu_id(g,
engine_id);
@@ -107,7 +107,7 @@ static void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
} while (!nvgpu_timeout_expired(&timeout));
if (ret)
gk20a_err(dev_from_gk20a(g), "mmu fault timeout");
nvgpu_err(g, "mmu fault timeout");
/* release mmu fault trigger */
for_each_set_bit(engine_id, &engine_ids, 32)
@@ -136,7 +136,7 @@ static void gm20b_device_info_data_parse(struct gk20a *g,
top_device_info_data_fault_id_enum_v(table_entry);
}
} else
gk20a_err(g->dev, "unknown device_info_data %d",
nvgpu_err(g, "unknown device_info_data %d",
top_device_info_data_type_v(table_entry));
}

View File

@@ -21,6 +21,7 @@
#include <dt-bindings/soc/gm20b-fuse.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include "gk20a/gk20a.h"
#include "gk20a/gr_gk20a.h"
@@ -754,8 +755,7 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
(1 << LSF_FALCON_ID_GPCCS));
}
if (err) {
gk20a_err(dev_from_gk20a(g),
"Unable to recover GR falcon");
nvgpu_err(g, "Unable to recover GR falcon");
return err;
}
@@ -775,8 +775,7 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
err = g->ops.pmu.load_lsfalcon_ucode(g, falcon_id_mask);
if (err) {
gk20a_err(dev_from_gk20a(g),
"Unable to boot GPCCS\n");
nvgpu_err(g, "Unable to boot GPCCS");
return err;
}
}
@@ -1294,7 +1293,7 @@ static int gm20b_gr_update_sm_error_state(struct gk20a *g,
err = gr_gk20a_disable_ctxsw(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw\n");
nvgpu_err(g, "unable to stop gr ctxsw");
goto fail;
}
@@ -1356,7 +1355,7 @@ static int gm20b_gr_clear_sm_error_state(struct gk20a *g,
err = gr_gk20a_disable_ctxsw(g);
if (err) {
gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw\n");
nvgpu_err(g, "unable to stop gr ctxsw");
goto fail;
}
@@ -1434,8 +1433,7 @@ static int gm20b_gr_fuse_override(struct gk20a *g)
gm20b_gr_tpc_disable_override(g, value);
break;
default:
gk20a_err(dev_from_gk20a(g),
"ignore unknown fuse override %08x", fuse);
nvgpu_err(g, "ignore unknown fuse override %08x", fuse);
break;
}
}

View File

@@ -169,7 +169,7 @@ static int gm20b_get_litter_value(struct gk20a *g, int value)
ret = 0;
break;
default:
gk20a_err(dev_from_gk20a(g), "Missing definition %d", value);
nvgpu_err(g, "Missing definition %d", value);
BUG();
break;
}

View File

@@ -153,8 +153,7 @@ int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op,
} while (!nvgpu_timeout_expired(&timeout));
if (nvgpu_timeout_peek_expired(&timeout)) {
gk20a_err(dev_from_gk20a(g),
"comp tag clear timeout\n");
nvgpu_err(g, "comp tag clear timeout");
err = -EBUSY;
goto out;
}
@@ -201,8 +200,7 @@ void gm20b_ltc_isr(struct gk20a *g)
u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
mc_intr = gk20a_readl(g, mc_intr_ltc_r());
gk20a_err(dev_from_gk20a(g), "mc_ltc_intr: %08x",
mc_intr);
nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
for (ltc = 0; ltc < g->ltc_count; ltc++) {
if ((mc_intr & 1 << ltc) == 0)
continue;
@@ -210,7 +208,7 @@ void gm20b_ltc_isr(struct gk20a *g)
ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() +
ltc_stride * ltc +
lts_stride * slice);
gk20a_err(dev_from_gk20a(g), "ltc%d, slice %d: %08x",
nvgpu_err(g, "ltc%d, slice %d: %08x",
ltc, slice, ltc_intr);
gk20a_writel(g, ltc_ltc0_lts0_intr_r() +
ltc_stride * ltc +
@@ -226,8 +224,7 @@ u32 gm20b_ltc_cbc_fix_config(struct gk20a *g, int base)
if (val == 2) {
return base * 2;
} else if (val != 1) {
gk20a_err(dev_from_gk20a(g),
"Invalid number of active ltcs: %08x\n", val);
nvgpu_err(g, "Invalid number of active ltcs: %08x\n", val);
}
return base;
@@ -335,8 +332,7 @@ static int gm20b_determine_L2_size_bytes(struct gk20a *g)
ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
sets = 16;
} else {
dev_err(dev_from_gk20a(g),
"Unknown constant %u for active sets",
nvgpu_err(g, "Unknown constant %u for active sets",
(unsigned)active_sets_value);
sets = 0;
}

View File

@@ -242,8 +242,7 @@ static int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
&g->ops.pmu.lspmuwprinitdone, 1);
/* check again if it still not ready indicate an error */
if (!g->ops.pmu.lspmuwprinitdone) {
gk20a_err(dev_from_gk20a(g),
"PMU not ready to load LSF");
nvgpu_err(g, "PMU not ready to load LSF");
return -ETIMEDOUT;
}
}
@@ -266,12 +265,12 @@ static void pmu_dump_security_fuses_gm20b(struct gk20a *g)
{
u32 val;
gk20a_err(dev_from_gk20a(g), "FUSE_OPT_SEC_DEBUG_EN_0 : 0x%x",
nvgpu_err(g, "FUSE_OPT_SEC_DEBUG_EN_0 : 0x%x",
gk20a_readl(g, fuse_opt_sec_debug_en_r()));
gk20a_err(dev_from_gk20a(g), "FUSE_OPT_PRIV_SEC_EN_0 : 0x%x",
nvgpu_err(g, "FUSE_OPT_PRIV_SEC_EN_0 : 0x%x",
gk20a_readl(g, fuse_opt_priv_sec_en_r()));
tegra_fuse_readl(FUSE_GCPLEX_CONFIG_FUSE_0, &val);
gk20a_err(dev_from_gk20a(g), "FUSE_GCPLEX_CONFIG_FUSE_0 : 0x%x",
nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0 : 0x%x",
val);
}