mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: gm20b: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I1651ae8ee680bdeb48606569c4e8c2fc7cb87f20 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1805077 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8676b2e65b
commit
4032e8915a
@@ -238,8 +238,9 @@ static int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
|
||||
struct nvgpu_firmware *gpccs_sig;
|
||||
int err;
|
||||
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS))
|
||||
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
gpccs_sig = nvgpu_request_firmware(g, T18x_GPCCS_UCODE_SIG, 0);
|
||||
if (!gpccs_sig) {
|
||||
@@ -381,20 +382,23 @@ int prepare_ucode_blob(struct gk20a *g)
|
||||
/* Discover all managed falcons*/
|
||||
err = lsfm_discover_ucode_images(g, plsfm);
|
||||
gm20b_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
|
||||
if (err)
|
||||
if (err) {
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) {
|
||||
/* Generate WPR requirements*/
|
||||
err = lsf_gen_wpr_requirements(g, plsfm);
|
||||
if (err)
|
||||
if (err) {
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
/*Alloc memory to hold ucode blob contents*/
|
||||
err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size
|
||||
, &g->acr.ucode_blob);
|
||||
if (err)
|
||||
if (err) {
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
gm20b_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
|
||||
plsfm->managed_flcn_cnt, plsfm->wpr_size);
|
||||
@@ -428,8 +432,9 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
|
||||
/* Obtain the PMU ucode image and add it to the list if required*/
|
||||
memset(&ucode_img, 0, sizeof(ucode_img));
|
||||
status = pmu_ucode_details(g, &ucode_img);
|
||||
if (status)
|
||||
if (status) {
|
||||
return status;
|
||||
}
|
||||
|
||||
/* The falon_id is formed by grabbing the static base
|
||||
* falon_id from the image and adding the
|
||||
@@ -441,8 +446,9 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
|
||||
if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) {
|
||||
pmu->falcon_id = falcon_id;
|
||||
if (lsfm_add_ucode_img(g, plsfm, &ucode_img,
|
||||
pmu->falcon_id) == 0)
|
||||
pmu->falcon_id) == 0) {
|
||||
pmu->pmu_mode |= PMU_LSFM_MANAGED;
|
||||
}
|
||||
|
||||
plsfm->managed_flcn_cnt++;
|
||||
} else {
|
||||
@@ -480,8 +486,9 @@ static int lsfm_discover_ucode_images(struct gk20a *g,
|
||||
/* Do not manage non-FB ucode*/
|
||||
if (lsfm_add_ucode_img(g,
|
||||
plsfm, &ucode_img, falcon_id)
|
||||
== 0)
|
||||
== 0) {
|
||||
plsfm->managed_flcn_cnt++;
|
||||
}
|
||||
} else {
|
||||
gm20b_dbg_pmu(g, "not managed %d\n",
|
||||
ucode_img.lsf_desc->falcon_id);
|
||||
@@ -513,18 +520,22 @@ int gm20b_pmu_populate_loader_cfg(struct gk20a *g,
|
||||
u64 addr_code, addr_data;
|
||||
u32 addr_args;
|
||||
|
||||
if (p_img->desc == NULL) /*This means its a header based ucode,
|
||||
and so we do not fill BL gen desc structure*/
|
||||
if (p_img->desc == NULL) {
|
||||
/*
|
||||
* This means its a header based ucode,
|
||||
* and so we do not fill BL gen desc structure
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
desc = p_img->desc;
|
||||
/*
|
||||
Calculate physical and virtual addresses for various portions of
|
||||
the PMU ucode image
|
||||
Calculate the 32-bit addresses for the application code, application
|
||||
data, and bootloader code. These values are all based on IM_BASE.
|
||||
The 32-bit addresses will be the upper 32-bits of the virtual or
|
||||
physical addresses of each respective segment.
|
||||
*/
|
||||
* Calculate physical and virtual addresses for various portions of
|
||||
* the PMU ucode image
|
||||
* Calculate the 32-bit addresses for the application code, application
|
||||
* data, and bootloader code. These values are all based on IM_BASE.
|
||||
* The 32-bit addresses will be the upper 32-bits of the virtual or
|
||||
* physical addresses of each respective segment.
|
||||
*/
|
||||
addr_base = p_lsfm->lsb_header.ucode_off;
|
||||
g->ops.pmu.get_wpr(g, &wpr_inf);
|
||||
addr_base += wpr_inf.wpr_base;
|
||||
@@ -584,19 +595,23 @@ int gm20b_flcn_populate_bl_dmem_desc(struct gk20a *g,
|
||||
struct pmu_ucode_desc *desc;
|
||||
u64 addr_code, addr_data;
|
||||
|
||||
if (p_img->desc == NULL) /*This means its a header based ucode,
|
||||
and so we do not fill BL gen desc structure*/
|
||||
if (p_img->desc == NULL) {
|
||||
/*
|
||||
* This means its a header based ucode,
|
||||
* and so we do not fill BL gen desc structure
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
desc = p_img->desc;
|
||||
|
||||
/*
|
||||
Calculate physical and virtual addresses for various portions of
|
||||
the PMU ucode image
|
||||
Calculate the 32-bit addresses for the application code, application
|
||||
data, and bootloader code. These values are all based on IM_BASE.
|
||||
The 32-bit addresses will be the upper 32-bits of the virtual or
|
||||
physical addresses of each respective segment.
|
||||
*/
|
||||
* Calculate physical and virtual addresses for various portions of
|
||||
* the PMU ucode image
|
||||
* Calculate the 32-bit addresses for the application code, application
|
||||
* data, and bootloader code. These values are all based on IM_BASE.
|
||||
* The 32-bit addresses will be the upper 32-bits of the virtual or
|
||||
* physical addresses of each respective segment.
|
||||
*/
|
||||
addr_base = p_lsfm->lsb_header.ucode_off;
|
||||
g->ops.pmu.get_wpr(g, &wpr_inf);
|
||||
addr_base += wpr_inf.wpr_base;
|
||||
@@ -642,9 +657,10 @@ static int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
|
||||
|
||||
if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
|
||||
gm20b_dbg_pmu(g, "pmu write flcn bl gen desc\n");
|
||||
if (pnode->wpr_header.falcon_id == pmu->falcon_id)
|
||||
if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
|
||||
return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
|
||||
&pnode->bl_gen_desc_size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Failed to find the falcon requested. */
|
||||
@@ -795,9 +811,10 @@ static void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
|
||||
u32 full_app_size = 0;
|
||||
u32 data = 0;
|
||||
|
||||
if (pnode->ucode_img.lsf_desc)
|
||||
if (pnode->ucode_img.lsf_desc) {
|
||||
memcpy(&pnode->lsb_header.signature, pnode->ucode_img.lsf_desc,
|
||||
sizeof(struct lsf_ucode_desc));
|
||||
}
|
||||
pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
|
||||
|
||||
/* The remainder of the LSB depends on the loader usage */
|
||||
@@ -865,8 +882,9 @@ static int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr *plsfm,
|
||||
|
||||
struct lsfm_managed_ucode_img *pnode;
|
||||
pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img));
|
||||
if (pnode == NULL)
|
||||
if (pnode == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Keep a copy of the ucode image info locally */
|
||||
memcpy(&pnode->ucode_img, ucode_image, sizeof(struct flcn_ucode_img));
|
||||
@@ -919,11 +937,12 @@ static void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr *plsfm)
|
||||
while (cnt) {
|
||||
mg_ucode_img = plsfm->ucode_img_list;
|
||||
if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
|
||||
LSF_FALCON_ID_PMU)
|
||||
LSF_FALCON_ID_PMU) {
|
||||
lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img);
|
||||
else
|
||||
} else {
|
||||
lsfm_free_nonpmu_ucode_img_res(g,
|
||||
&mg_ucode_img->ucode_img);
|
||||
}
|
||||
plsfm->ucode_img_list = mg_ucode_img->next;
|
||||
nvgpu_kfree(g, mg_ucode_img);
|
||||
cnt--;
|
||||
@@ -1110,8 +1129,9 @@ int gm20b_bootstrap_hs_flcn(struct gk20a *g)
|
||||
((acr_ucode_header_t210_load[2]) >> 8);
|
||||
bl_dmem_desc->data_dma_base1 = 0x0;
|
||||
bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
|
||||
} else
|
||||
} else {
|
||||
acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
|
||||
}
|
||||
status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
|
||||
if (status != 0) {
|
||||
err = status;
|
||||
@@ -1274,10 +1294,12 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g,
|
||||
pmu->isr_enabled = true;
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
|
||||
if (g->ops.pmu.setup_apertures)
|
||||
if (g->ops.pmu.setup_apertures) {
|
||||
g->ops.pmu.setup_apertures(g);
|
||||
if (g->ops.pmu.update_lspmu_cmdline_args)
|
||||
}
|
||||
if (g->ops.pmu.update_lspmu_cmdline_args) {
|
||||
g->ops.pmu.update_lspmu_cmdline_args(g);
|
||||
}
|
||||
|
||||
/*disable irqs for hs falcon booting as we will poll for halt*/
|
||||
nvgpu_mutex_acquire(&pmu->isr_mutex);
|
||||
@@ -1287,8 +1309,9 @@ int gm20b_init_pmu_setup_hw1(struct gk20a *g,
|
||||
/*Clearing mailbox register used to reflect capabilities*/
|
||||
gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
|
||||
err = bl_bootstrap(pmu, desc, bl_sz);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1362,8 +1385,9 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
|
||||
*/
|
||||
|
||||
if (g->ops.pmu.falcon_clear_halt_interrupt_status(g,
|
||||
gk20a_get_gr_idle_timeout(g)))
|
||||
gk20a_get_gr_idle_timeout(g))) {
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
|
||||
gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
|
||||
pwr_falcon_mmu_phys_sec_r()));
|
||||
@@ -1377,12 +1401,13 @@ int pmu_exec_gen_bl(struct gk20a *g, void *desc, u8 b_wait_for_halt)
|
||||
ACR_COMPLETION_TIMEOUT_MS);
|
||||
if (err == 0) {
|
||||
/* Clear the HALT interrupt */
|
||||
if (g->ops.pmu.falcon_clear_halt_interrupt_status(g,
|
||||
gk20a_get_gr_idle_timeout(g)))
|
||||
if (g->ops.pmu.falcon_clear_halt_interrupt_status(g,
|
||||
gk20a_get_gr_idle_timeout(g))) {
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
} else {
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
else
|
||||
goto err_unmap_bl;
|
||||
}
|
||||
gm20b_dbg_pmu(g, "after waiting for halt, err %x\n", err);
|
||||
gm20b_dbg_pmu(g, "phys sec reg %x\n", gk20a_readl(g,
|
||||
@@ -1447,8 +1472,9 @@ int clear_halt_interrupt_status(struct gk20a *g, unsigned int timeout_ms)
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
int status = 0;
|
||||
|
||||
if (nvgpu_flcn_clear_halt_intr_status(pmu->flcn, timeout_ms))
|
||||
if (nvgpu_flcn_clear_halt_intr_status(pmu->flcn, timeout_ms)) {
|
||||
status = -EBUSY;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -121,8 +121,9 @@ static u32 get_interim_pldiv(struct gk20a *g, u32 old_pl, u32 new_pl)
|
||||
{
|
||||
u32 pl;
|
||||
|
||||
if ((g->clk.gpc_pll.id == GM20B_GPC_PLL_C1) || (old_pl & new_pl))
|
||||
if ((g->clk.gpc_pll.id == GM20B_GPC_PLL_C1) || (old_pl & new_pl)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
pl = old_pl | BIT(ffs(new_pl) - 1); /* pl never 0 */
|
||||
new_pl |= BIT(ffs(old_pl) - 1);
|
||||
@@ -163,8 +164,9 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
|
||||
best_PL = pll_params->min_PL;
|
||||
|
||||
target_vco_f = target_clk_f + target_clk_f / 50;
|
||||
if (max_vco_f < target_vco_f)
|
||||
if (max_vco_f < target_vco_f) {
|
||||
max_vco_f = target_vco_f;
|
||||
}
|
||||
|
||||
/* Set PL search boundaries. */
|
||||
high_PL = nvgpu_div_to_pl((max_vco_f + target_vco_f - 1) / target_vco_f);
|
||||
@@ -184,22 +186,27 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
|
||||
for (m = pll_params->min_M; m <= pll_params->max_M; m++) {
|
||||
u_f = ref_clk_f / m;
|
||||
|
||||
if (u_f < pll_params->min_u)
|
||||
if (u_f < pll_params->min_u) {
|
||||
break;
|
||||
if (u_f > pll_params->max_u)
|
||||
}
|
||||
if (u_f > pll_params->max_u) {
|
||||
continue;
|
||||
}
|
||||
|
||||
n = (target_vco_f * m) / ref_clk_f;
|
||||
n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
|
||||
|
||||
if (n > pll_params->max_N)
|
||||
if (n > pll_params->max_N) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (; n <= n2; n++) {
|
||||
if (n < pll_params->min_N)
|
||||
if (n < pll_params->min_N) {
|
||||
continue;
|
||||
if (n > pll_params->max_N)
|
||||
}
|
||||
if (n > pll_params->max_N) {
|
||||
break;
|
||||
}
|
||||
|
||||
vco_f = ref_clk_f * n / m;
|
||||
|
||||
@@ -231,9 +238,10 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
|
||||
found_match:
|
||||
BUG_ON(best_delta == ~0U);
|
||||
|
||||
if (best_fit && best_delta != 0)
|
||||
if (best_fit && best_delta != 0) {
|
||||
gk20a_dbg_clk(g, "no best match for target @ %dMHz on gpc_pll",
|
||||
target_clk_f);
|
||||
}
|
||||
|
||||
pll->M = best_M;
|
||||
pll->N = best_N;
|
||||
@@ -278,11 +286,13 @@ static int nvgpu_fuse_calib_gpcpll_get_adc(struct gk20a *g,
|
||||
int ret;
|
||||
|
||||
ret = nvgpu_tegra_fuse_read_reserved_calib(g, &val);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!fuse_get_gpcpll_adc_rev(val))
|
||||
if (!fuse_get_gpcpll_adc_rev(val)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*slope_uv = fuse_get_gpcpll_adc_slope_uv(val);
|
||||
*intercept_uv = fuse_get_gpcpll_adc_intercept_uv(val);
|
||||
@@ -521,8 +531,9 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
|
||||
*/
|
||||
clk_setup_slide(g, g->clk.gpc_pll.clk_in);
|
||||
|
||||
if (calibrated)
|
||||
if (calibrated) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If calibration parameters are not fused, start internal calibration,
|
||||
@@ -544,8 +555,9 @@ static int clk_enbale_pll_dvfs(struct gk20a *g)
|
||||
/* Wait for internal calibration done (spec < 2us). */
|
||||
do {
|
||||
data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
|
||||
if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data))
|
||||
if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data)) {
|
||||
break;
|
||||
}
|
||||
nvgpu_udelay(1);
|
||||
delay--;
|
||||
} while (delay > 0);
|
||||
@@ -623,11 +635,13 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
|
||||
coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
|
||||
sdm_old = trim_sys_gpcpll_cfg2_sdm_din_v(coeff);
|
||||
if ((gpll->dvfs.n_int == nold) &&
|
||||
(gpll->dvfs.sdm_din == sdm_old))
|
||||
(gpll->dvfs.sdm_din == sdm_old)) {
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (gpll->N == nold)
|
||||
if (gpll->N == nold) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* dynamic ramp setup based on update rate */
|
||||
clk_setup_slide(g, gpll->clk_in / gpll->M);
|
||||
@@ -674,8 +688,9 @@ static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
|
||||
ramp_timeout--;
|
||||
data = gk20a_readl(
|
||||
g, trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r());
|
||||
if (trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(data))
|
||||
if (trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(data)) {
|
||||
break;
|
||||
}
|
||||
} while (ramp_timeout > 0);
|
||||
|
||||
if ((gpll->mode == GPC_PLL_MODE_DVFS) && (ramp_timeout > 0)) {
|
||||
@@ -836,8 +851,9 @@ static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
|
||||
do {
|
||||
nvgpu_udelay(1);
|
||||
cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
|
||||
if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f())
|
||||
if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) {
|
||||
goto pll_locked;
|
||||
}
|
||||
} while (--timeout > 0);
|
||||
|
||||
/* PLL is messed up. What can we do here? */
|
||||
@@ -883,8 +899,9 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_platform_is_silicon(g))
|
||||
if (!nvgpu_platform_is_silicon(g)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get old coefficients */
|
||||
coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
|
||||
@@ -901,19 +918,22 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
|
||||
cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
|
||||
can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg);
|
||||
|
||||
if (can_slide && (gpll_new->M == gpll.M) && (gpll_new->PL == gpll.PL))
|
||||
if (can_slide && (gpll_new->M == gpll.M) && (gpll_new->PL == gpll.PL)) {
|
||||
return clk_slide_gpc_pll(g, gpll_new);
|
||||
}
|
||||
|
||||
/* slide down to NDIV_LO */
|
||||
if (can_slide) {
|
||||
int ret;
|
||||
gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
|
||||
gpll.clk_in);
|
||||
if (gpll.mode == GPC_PLL_MODE_DVFS)
|
||||
if (gpll.mode == GPC_PLL_MODE_DVFS) {
|
||||
clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
|
||||
}
|
||||
ret = clk_slide_gpc_pll(g, &gpll);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
pldiv_only = can_slide && (gpll_new->M == gpll.M);
|
||||
|
||||
@@ -962,13 +982,15 @@ static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
|
||||
if (allow_slide) {
|
||||
gpll.N = DIV_ROUND_UP(gpll_new->M * gpc_pll_params.min_vco,
|
||||
gpll_new->clk_in);
|
||||
if (gpll.mode == GPC_PLL_MODE_DVFS)
|
||||
if (gpll.mode == GPC_PLL_MODE_DVFS) {
|
||||
clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
|
||||
}
|
||||
}
|
||||
if (pldiv_only)
|
||||
if (pldiv_only) {
|
||||
clk_change_pldiv_under_bypass(g, &gpll);
|
||||
else
|
||||
} else {
|
||||
clk_lock_gpc_pll_under_bypass(g, &gpll);
|
||||
}
|
||||
|
||||
#if PLDIV_GLITCHLESS
|
||||
coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
|
||||
@@ -1003,8 +1025,9 @@ static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll)
|
||||
{
|
||||
u32 nsafe, nmin;
|
||||
|
||||
if (gpll->freq > g->clk.dvfs_safe_max_freq)
|
||||
if (gpll->freq > g->clk.dvfs_safe_max_freq) {
|
||||
gpll->freq = gpll->freq * (100 - DVFS_SAFE_MARGIN) / 100;
|
||||
}
|
||||
|
||||
nmin = DIV_ROUND_UP(gpll->M * gpc_pll_params.min_vco, gpll->clk_in);
|
||||
nsafe = gpll->M * gpll->freq / gpll->clk_in;
|
||||
@@ -1054,8 +1077,9 @@ static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
|
||||
* - voltage is not changing, so DVFS detection settings are the same
|
||||
*/
|
||||
if (!allow_slide || !gpll_new->enabled ||
|
||||
(gpll_old->dvfs.mv == gpll_new->dvfs.mv))
|
||||
(gpll_old->dvfs.mv == gpll_new->dvfs.mv)) {
|
||||
return clk_program_gpc_pll(g, gpll_new, allow_slide);
|
||||
}
|
||||
|
||||
/*
|
||||
* Interim step for changing DVFS detection settings: low enough
|
||||
@@ -1129,8 +1153,9 @@ static int clk_disable_gpcpll(struct gk20a *g, int allow_slide)
|
||||
gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff);
|
||||
gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
|
||||
gpll.clk_in);
|
||||
if (gpll.mode == GPC_PLL_MODE_DVFS)
|
||||
if (gpll.mode == GPC_PLL_MODE_DVFS) {
|
||||
clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
|
||||
}
|
||||
clk_slide_gpc_pll(g, &gpll);
|
||||
}
|
||||
|
||||
@@ -1174,8 +1199,9 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_mutex_init(&clk->clk_mutex);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (clk->sw_ready) {
|
||||
nvgpu_log_fn(g, "skip init");
|
||||
@@ -1184,12 +1210,14 @@ int gm20b_init_clk_setup_sw(struct gk20a *g)
|
||||
|
||||
if (clk->gpc_pll.id == GM20B_GPC_PLL_C1) {
|
||||
gpc_pll_params = gpc_pll_params_c1;
|
||||
if (!clk->pll_poweron_uv)
|
||||
if (!clk->pll_poweron_uv) {
|
||||
clk->pll_poweron_uv = BOOT_GPU_UV_C1;
|
||||
}
|
||||
} else {
|
||||
gpc_pll_params = gpc_pll_params_b1;
|
||||
if (!clk->pll_poweron_uv)
|
||||
if (!clk->pll_poweron_uv) {
|
||||
clk->pll_poweron_uv = BOOT_GPU_UV_B1;
|
||||
}
|
||||
}
|
||||
|
||||
clk->gpc_pll.clk_in = g->ops.clk.get_ref_clock_rate(g) / KHZ;
|
||||
@@ -1254,8 +1282,9 @@ int gm20b_clk_prepare(struct clk_gk20a *clk)
|
||||
int ret = 0;
|
||||
|
||||
nvgpu_mutex_acquire(&clk->clk_mutex);
|
||||
if (!clk->gpc_pll.enabled && clk->clk_hw_on)
|
||||
if (!clk->gpc_pll.enabled && clk->clk_hw_on) {
|
||||
ret = set_pll_freq(clk->g, 1);
|
||||
}
|
||||
nvgpu_mutex_release(&clk->clk_mutex);
|
||||
return ret;
|
||||
}
|
||||
@@ -1263,8 +1292,9 @@ int gm20b_clk_prepare(struct clk_gk20a *clk)
|
||||
void gm20b_clk_unprepare(struct clk_gk20a *clk)
|
||||
{
|
||||
nvgpu_mutex_acquire(&clk->clk_mutex);
|
||||
if (clk->gpc_pll.enabled && clk->clk_hw_on)
|
||||
if (clk->gpc_pll.enabled && clk->clk_hw_on) {
|
||||
clk_disable_gpcpll(clk->g, 1);
|
||||
}
|
||||
nvgpu_mutex_release(&clk->clk_mutex);
|
||||
}
|
||||
|
||||
@@ -1287,8 +1317,9 @@ int gm20b_gpcclk_set_rate(struct clk_gk20a *clk, unsigned long rate,
|
||||
nvgpu_mutex_acquire(&clk->clk_mutex);
|
||||
old_freq = clk->gpc_pll.freq;
|
||||
ret = set_pll_target(clk->g, rate_gpu_to_gpc2clk(rate), old_freq);
|
||||
if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on)
|
||||
if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on) {
|
||||
ret = set_pll_freq(clk->g, 1);
|
||||
}
|
||||
nvgpu_mutex_release(&clk->clk_mutex);
|
||||
|
||||
return ret;
|
||||
@@ -1303,15 +1334,17 @@ long gm20b_round_rate(struct clk_gk20a *clk, unsigned long rate,
|
||||
struct gk20a *g = clk->g;
|
||||
|
||||
maxrate = g->ops.clk.get_maxrate(g, CTRL_CLK_DOMAIN_GPCCLK);
|
||||
if (rate > maxrate)
|
||||
if (rate > maxrate) {
|
||||
rate = maxrate;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&clk->clk_mutex);
|
||||
freq = rate_gpu_to_gpc2clk(rate);
|
||||
if (freq > gpc_pll_params.max_freq)
|
||||
if (freq > gpc_pll_params.max_freq) {
|
||||
freq = gpc_pll_params.max_freq;
|
||||
else if (freq < gpc_pll_params.min_freq)
|
||||
} else if (freq < gpc_pll_params.min_freq) {
|
||||
freq = gpc_pll_params.min_freq;
|
||||
}
|
||||
|
||||
tmp_pll = clk->gpc_pll;
|
||||
clk_config_pll(clk, &tmp_pll, &gpc_pll_params, &freq, true);
|
||||
@@ -1366,8 +1399,9 @@ static int gm20b_init_clk_setup_hw(struct gk20a *g)
|
||||
gk20a_writel(g, therm_clk_slowdown_r(0), data);
|
||||
gk20a_readl(g, therm_clk_slowdown_r(0));
|
||||
|
||||
if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS)
|
||||
if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS) {
|
||||
return clk_enbale_pll_dvfs(g);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1376,10 +1410,11 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
|
||||
{
|
||||
struct clk_gk20a *clk = &g->clk;
|
||||
|
||||
if (freq > gpc_pll_params.max_freq)
|
||||
if (freq > gpc_pll_params.max_freq) {
|
||||
freq = gpc_pll_params.max_freq;
|
||||
else if (freq < gpc_pll_params.min_freq)
|
||||
} else if (freq < gpc_pll_params.min_freq) {
|
||||
freq = gpc_pll_params.min_freq;
|
||||
}
|
||||
|
||||
if (freq != old_freq) {
|
||||
/* gpc_pll.freq is changed to new value here */
|
||||
@@ -1403,12 +1438,14 @@ static int set_pll_freq(struct gk20a *g, int allow_slide)
|
||||
/* If programming with dynamic sliding failed, re-try under bypass */
|
||||
if (clk->gpc_pll.mode == GPC_PLL_MODE_DVFS) {
|
||||
err = clk_program_na_gpc_pll(g, &clk->gpc_pll, allow_slide);
|
||||
if (err && allow_slide)
|
||||
if (err && allow_slide) {
|
||||
err = clk_program_na_gpc_pll(g, &clk->gpc_pll, 0);
|
||||
}
|
||||
} else {
|
||||
err = clk_program_gpc_pll(g, &clk->gpc_pll, allow_slide);
|
||||
if (err && allow_slide)
|
||||
if (err && allow_slide) {
|
||||
err = clk_program_gpc_pll(g, &clk->gpc_pll, 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (!err) {
|
||||
@@ -1437,26 +1474,31 @@ int gm20b_init_clk_support(struct gk20a *g)
|
||||
|
||||
err = gm20b_init_clk_setup_hw(g);
|
||||
nvgpu_mutex_release(&clk->clk_mutex);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
/* FIXME: this effectively prevents host level clock gating */
|
||||
err = g->ops.clk.prepare_enable(&g->clk);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
/* The prev call may not enable PLL if gbus is unbalanced - force it */
|
||||
nvgpu_mutex_acquire(&clk->clk_mutex);
|
||||
if (!clk->gpc_pll.enabled)
|
||||
if (!clk->gpc_pll.enabled) {
|
||||
err = set_pll_freq(g, 1);
|
||||
}
|
||||
nvgpu_mutex_release(&clk->clk_mutex);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!clk->debugfs_set && g->ops.clk.init_debugfs) {
|
||||
err = g->ops.clk.init_debugfs(g);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
clk->debugfs_set = true;
|
||||
}
|
||||
|
||||
@@ -1471,8 +1513,9 @@ int gm20b_suspend_clk_support(struct gk20a *g)
|
||||
|
||||
/* The prev call may not disable PLL if gbus is unbalanced - force it */
|
||||
nvgpu_mutex_acquire(&g->clk.clk_mutex);
|
||||
if (g->clk.gpc_pll.enabled)
|
||||
if (g->clk.gpc_pll.enabled) {
|
||||
ret = clk_disable_gpcpll(g, 1);
|
||||
}
|
||||
g->clk.clk_hw_on = false;
|
||||
nvgpu_mutex_release(&g->clk.clk_mutex);
|
||||
|
||||
@@ -1488,12 +1531,14 @@ int gm20b_clk_get_voltage(struct clk_gk20a *clk, u64 *val)
|
||||
u32 det_out;
|
||||
int err;
|
||||
|
||||
if (clk->gpc_pll.mode != GPC_PLL_MODE_DVFS)
|
||||
if (clk->gpc_pll.mode != GPC_PLL_MODE_DVFS) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
err = gk20a_busy(g);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&g->clk.clk_mutex);
|
||||
|
||||
@@ -1519,8 +1564,9 @@ int gm20b_clk_get_gpcclk_clock_counter(struct clk_gk20a *clk, u64 *val)
|
||||
u32 count1, count2;
|
||||
|
||||
err = gk20a_busy(g);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&g->clk.clk_mutex);
|
||||
|
||||
@@ -1559,8 +1605,9 @@ int gm20b_clk_get_gpcclk_clock_counter(struct clk_gk20a *clk, u64 *val)
|
||||
|
||||
gk20a_idle(g);
|
||||
|
||||
if (count1 != count2)
|
||||
if (count1 != count2) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1571,11 +1618,13 @@ int gm20b_clk_pll_reg_write(struct gk20a *g, u32 reg, u32 val)
|
||||
(reg > trim_sys_gpcpll_dvfs2_r())) &&
|
||||
(reg != trim_sys_sel_vco_r()) &&
|
||||
(reg != trim_sys_gpc2clk_out_r()) &&
|
||||
(reg != trim_sys_bypassctrl_r()))
|
||||
(reg != trim_sys_bypassctrl_r())) {
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (reg == trim_sys_gpcpll_dvfs2_r())
|
||||
if (reg == trim_sys_gpcpll_dvfs2_r()) {
|
||||
reg = trim_gpc_bcast_gpcpll_dvfs2_r();
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&g->clk.clk_mutex);
|
||||
if (!g->clk.clk_hw_on) {
|
||||
|
||||
@@ -99,9 +99,10 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
|
||||
} else {
|
||||
u32 mmu_id = gm20b_engine_id_to_mmu_id(g,
|
||||
engine_id);
|
||||
if (mmu_id != (u32)~0)
|
||||
if (mmu_id != (u32)~0) {
|
||||
gk20a_writel(g, fifo_trigger_mmu_fault_r(mmu_id),
|
||||
fifo_trigger_mmu_fault_enable_f(1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,8 +121,9 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
|
||||
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
nvgpu_err(g, "mmu fault timeout");
|
||||
}
|
||||
|
||||
/* release mmu fault trigger */
|
||||
for_each_set_bit(engine_id, &engine_ids, 32) {
|
||||
@@ -150,9 +152,10 @@ void gm20b_device_info_data_parse(struct gk20a *g,
|
||||
*fault_id =
|
||||
top_device_info_data_fault_id_enum_v(table_entry);
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
nvgpu_err(g, "unknown device_info_data %d",
|
||||
top_device_info_data_type_v(table_entry));
|
||||
}
|
||||
}
|
||||
|
||||
void gm20b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f)
|
||||
@@ -250,10 +253,11 @@ static const char * const gm20b_gpc_client_descs[] = {
|
||||
|
||||
void gm20b_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault)
|
||||
{
|
||||
if (mmfault->client_id >= ARRAY_SIZE(gm20b_gpc_client_descs))
|
||||
if (mmfault->client_id >= ARRAY_SIZE(gm20b_gpc_client_descs)) {
|
||||
WARN_ON(mmfault->client_id >=
|
||||
ARRAY_SIZE(gm20b_gpc_client_descs));
|
||||
else
|
||||
} else {
|
||||
mmfault->client_id_desc =
|
||||
gm20b_gpc_client_descs[mmfault->client_id];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,9 +89,10 @@ void gr_gm20b_cb_size_default(struct gk20a *g)
|
||||
{
|
||||
struct gr_gk20a *gr = &g->gr;
|
||||
|
||||
if (!gr->attrib_cb_default_size)
|
||||
if (!gr->attrib_cb_default_size) {
|
||||
gr->attrib_cb_default_size =
|
||||
gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
|
||||
}
|
||||
gr->alpha_cb_default_size =
|
||||
gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
|
||||
}
|
||||
@@ -189,8 +190,9 @@ int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
tsg = tsg_gk20a_from_ch(c);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ch_ctx = &tsg->gr_ctx;
|
||||
|
||||
@@ -338,8 +340,9 @@ void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
|
||||
/* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF)
|
||||
return; */
|
||||
|
||||
if (alpha_cb_size > gr->alpha_cb_size)
|
||||
if (alpha_cb_size > gr->alpha_cb_size) {
|
||||
alpha_cb_size = gr->alpha_cb_size;
|
||||
}
|
||||
|
||||
gk20a_writel(g, gr_ds_tga_constraintlogic_r(),
|
||||
(gk20a_readl(g, gr_ds_tga_constraintlogic_r()) &
|
||||
@@ -385,8 +388,9 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (cb_size > gr->attrib_cb_size)
|
||||
if (cb_size > gr->attrib_cb_size) {
|
||||
cb_size = gr->attrib_cb_size;
|
||||
}
|
||||
|
||||
gk20a_writel(g, gr_ds_tga_constraintlogic_r(),
|
||||
(gk20a_readl(g, gr_ds_tga_constraintlogic_r()) &
|
||||
@@ -485,18 +489,20 @@ bool gr_gm20b_is_valid_class(struct gk20a *g, u32 class_num)
|
||||
|
||||
bool gr_gm20b_is_valid_gfx_class(struct gk20a *g, u32 class_num)
|
||||
{
|
||||
if (class_num == MAXWELL_B)
|
||||
if (class_num == MAXWELL_B) {
|
||||
return true;
|
||||
else
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool gr_gm20b_is_valid_compute_class(struct gk20a *g, u32 class_num)
|
||||
{
|
||||
if (class_num == MAXWELL_COMPUTE_B)
|
||||
if (class_num == MAXWELL_COMPUTE_B) {
|
||||
return true;
|
||||
else
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -511,8 +517,9 @@ static u32 _sm_dsm_perf_ctrl_regs[2];
|
||||
|
||||
void gr_gm20b_init_sm_dsm_reg_info(void)
|
||||
{
|
||||
if (_sm_dsm_perf_ctrl_regs[0] != 0)
|
||||
if (_sm_dsm_perf_ctrl_regs[0] != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
_sm_dsm_perf_ctrl_regs[0] =
|
||||
gr_pri_gpc0_tpc0_sm_dsm_perf_counter_control0_r();
|
||||
@@ -619,8 +626,9 @@ int gr_gm20b_load_smid_config(struct gk20a *g)
|
||||
u32 tpc_index, gpc_index;
|
||||
|
||||
tpc_sm_id = nvgpu_kcalloc(g, gr_cwd_sm_id__size_1_v(), sizeof(u32));
|
||||
if (!tpc_sm_id)
|
||||
if (!tpc_sm_id) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/
|
||||
for (i = 0; i <= ((g->gr.tpc_count-1) / 4); i++) {
|
||||
@@ -632,8 +640,9 @@ int gr_gm20b_load_smid_config(struct gk20a *g)
|
||||
u32 sm_id = (i * 4) + j;
|
||||
u32 bits;
|
||||
|
||||
if (sm_id >= g->gr.tpc_count)
|
||||
if (sm_id >= g->gr.tpc_count) {
|
||||
break;
|
||||
}
|
||||
|
||||
gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index;
|
||||
tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index;
|
||||
@@ -663,8 +672,9 @@ int gr_gm20b_init_fs_state(struct gk20a *g)
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = gr_gk20a_init_fs_state(g);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
g->ops.gr.load_tpc_mask(g);
|
||||
|
||||
@@ -731,8 +741,9 @@ u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr)
|
||||
for (i = 0; i < num_tpcs; i++) {
|
||||
start = tpc_in_gpc_base + (i * tpc_in_gpc_stride);
|
||||
if ((addr >= start) &&
|
||||
(addr < (start + tpc_in_gpc_stride)))
|
||||
(addr < (start + tpc_in_gpc_stride))) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -793,10 +804,12 @@ int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
|
||||
} else {
|
||||
/* bind WPR VA inst block */
|
||||
gr_gk20a_load_falcon_bind_instblk(g);
|
||||
if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_FECS))
|
||||
if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_FECS)) {
|
||||
falcon_id_mask |= (1 << LSF_FALCON_ID_FECS);
|
||||
if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_GPCCS))
|
||||
}
|
||||
if (g->ops.pmu.is_lazy_bootstrap(LSF_FALCON_ID_GPCCS)) {
|
||||
falcon_id_mask |= (1 << LSF_FALCON_ID_GPCCS);
|
||||
}
|
||||
|
||||
err = g->ops.pmu.load_lsfalcon_ucode(g, falcon_id_mask);
|
||||
|
||||
@@ -856,11 +869,13 @@ int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags);
|
||||
if (err)
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (class == MAXWELL_COMPUTE_B)
|
||||
if (class == MAXWELL_COMPUTE_B) {
|
||||
gr_ctx->compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CTA;
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
|
||||
@@ -879,8 +894,9 @@ void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
tsg = tsg_gk20a_from_ch(c);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) {
|
||||
@@ -941,9 +957,10 @@ int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
|
||||
gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r()));
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r()));
|
||||
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2)
|
||||
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r()));
|
||||
}
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r()));
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY0: 0x%x\n",
|
||||
@@ -956,9 +973,10 @@ int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
|
||||
gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r()));
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r()));
|
||||
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2)
|
||||
if (gr->gpc_tpc_count && gr->gpc_tpc_count[0] == 2) {
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r()));
|
||||
}
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n",
|
||||
gk20a_readl(g, gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r()));
|
||||
gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_ACTIVITY0: 0x%x\n",
|
||||
@@ -1042,13 +1060,15 @@ int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
|
||||
nvgpu_log_fn(c->g, " ");
|
||||
|
||||
tsg = tsg_gk20a_from_ch(c);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gr_ctx = &tsg->gr_ctx;
|
||||
mem = &gr_ctx->mem;
|
||||
if (!nvgpu_mem_is_valid(mem) || c->vpr)
|
||||
if (!nvgpu_mem_is_valid(mem) || c->vpr) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
v = nvgpu_mem_rd(c->g, mem, ctxsw_prog_main_image_pm_o());
|
||||
@@ -1349,8 +1369,9 @@ int gm20b_gr_update_sm_error_state(struct gk20a *g,
|
||||
int err = 0;
|
||||
|
||||
tsg = tsg_gk20a_from_ch(ch);
|
||||
if (!tsg)
|
||||
if (!tsg) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ch_ctx = &tsg->gr_ctx;
|
||||
|
||||
@@ -1374,8 +1395,9 @@ int gm20b_gr_update_sm_error_state(struct gk20a *g,
|
||||
gm20b_gr_write_sm_error_state(g, offset, tsg_sm_error_states);
|
||||
} else {
|
||||
err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false);
|
||||
if (err)
|
||||
if (err) {
|
||||
goto enable_ctxsw;
|
||||
}
|
||||
|
||||
gr_gk20a_ctx_patch_write(g, ch_ctx,
|
||||
gr_gpcs_tpcs_sm_hww_global_esr_report_mask_r() + offset,
|
||||
|
||||
@@ -716,8 +716,9 @@ int gm20b_init_hal(struct gk20a *g)
|
||||
__nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
|
||||
|
||||
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */
|
||||
if (gops->fuse.check_priv_security(g))
|
||||
if (gops->fuse.check_priv_security(g)) {
|
||||
return -EINVAL; /* Do not boot gpu */
|
||||
}
|
||||
|
||||
/* priv security dependent ops */
|
||||
if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
|
||||
|
||||
@@ -42,10 +42,11 @@ void gm20b_mm_set_big_page_size(struct gk20a *g,
|
||||
val = nvgpu_mem_rd32(g, mem, ram_in_big_page_size_w());
|
||||
val &= ~ram_in_big_page_size_m();
|
||||
|
||||
if (size == SZ_64K)
|
||||
if (size == SZ_64K) {
|
||||
val |= ram_in_big_page_size_64kb_f();
|
||||
else
|
||||
} else {
|
||||
val |= ram_in_big_page_size_128kb_f();
|
||||
}
|
||||
|
||||
nvgpu_mem_wr32(g, mem, ram_in_big_page_size_w(), val);
|
||||
nvgpu_log_fn(g, "done");
|
||||
|
||||
@@ -131,8 +131,9 @@ static void pmu_handle_acr_init_wpr_msg(struct gk20a *g, struct pmu_msg *msg,
|
||||
|
||||
gm20b_dbg_pmu(g, "reply PMU_ACR_CMD_ID_INIT_WPR_REGION");
|
||||
|
||||
if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS)
|
||||
if (msg->msg.acr.acrmsg.errorcode == PMU_ACR_SUCCESS) {
|
||||
g->pmu_lsf_pmu_wpr_init_done = 1;
|
||||
}
|
||||
nvgpu_log_fn(g, "done");
|
||||
}
|
||||
|
||||
@@ -189,8 +190,9 @@ static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
|
||||
|
||||
do {
|
||||
reg = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(0));
|
||||
if (reg == val)
|
||||
if (reg == val) {
|
||||
return 0;
|
||||
}
|
||||
nvgpu_udelay(delay);
|
||||
} while (!nvgpu_timeout_expired(&timeout));
|
||||
|
||||
@@ -233,8 +235,9 @@ int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
|
||||
unsigned long timeout = gk20a_get_gr_idle_timeout(g);
|
||||
|
||||
/* GM20B PMU supports loading FECS only */
|
||||
if (!(falconidmask == (1 << LSF_FALCON_ID_FECS)))
|
||||
if (!(falconidmask == (1 << LSF_FALCON_ID_FECS))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
/* check whether pmu is ready to bootstrap lsf if not wait for it */
|
||||
if (!g->pmu_lsf_pmu_wpr_init_done) {
|
||||
pmu_wait_message_cond(&g->pmu,
|
||||
|
||||
Reference in New Issue
Block a user