gpu: nvgpu: gp106: Fix MISRA 15.6 violations

MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces,
including single statement blocks. Fix errors due to single statement
if blocks without braces, introducing the braces.

JIRA NVGPU-671

Change-Id: I8493274995ed8de526902dd0ca0808b2972e28aa
Signed-off-by: Srirangan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1796806
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Srirangan
2018-08-27 11:29:01 +05:30
committed by mobile promotions
parent 2f97e683fe
commit e3710e5431
12 changed files with 161 additions and 88 deletions

View File

@@ -90,8 +90,9 @@ int gp106_alloc_blob_space(struct gk20a *g,
struct wpr_carveout_info wpr_inf;
int err;
if (mem->size)
if (mem->size) {
return 0;
}
g->ops.pmu.get_wpr(g, &wpr_inf);
@@ -102,8 +103,9 @@ int gp106_alloc_blob_space(struct gk20a *g,
err = nvgpu_dma_alloc_vid_at(g,
wpr_inf.size,
&g->acr.wpr_dummy, wpr_inf.wpr_base);
if (err)
if (err) {
return err;
}
return nvgpu_dma_alloc_vid_at(g,
wpr_inf.size, mem,
@@ -291,8 +293,9 @@ int gpccs_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img)
struct nvgpu_firmware *gpccs_sig = NULL;
int err;
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS))
if (!nvgpu_is_enabled(g, NVGPU_SEC_SECUREGPCCS)) {
return -ENOENT;
}
switch (ver) {
case NVGPU_GPUID_GP104:
@@ -415,8 +418,9 @@ static u32 lsfm_discover_and_add_sub_wprs(struct gk20a *g,
if (size_4K) {
pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_sub_wpr));
if (pnode == NULL)
if (pnode == NULL) {
return -ENOMEM;
}
pnode->sub_wpr_header.use_case_id = sub_wpr_index;
pnode->sub_wpr_header.size_4K = size_4K;
@@ -460,23 +464,27 @@ int gp106_prepare_ucode_blob(struct gk20a *g)
/* Discover all managed falcons*/
err = lsfm_discover_ucode_images(g, plsfm);
gp106_dbg_pmu(g, " Managed Falcon cnt %d\n", plsfm->managed_flcn_cnt);
if (err)
if (err) {
goto exit_err;
}
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR))
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
lsfm_discover_and_add_sub_wprs(g, plsfm);
}
if (plsfm->managed_flcn_cnt && !g->acr.ucode_blob.cpu_va) {
/* Generate WPR requirements*/
err = lsf_gen_wpr_requirements(g, plsfm);
if (err)
if (err) {
goto exit_err;
}
/*Alloc memory to hold ucode blob contents*/
err = g->ops.pmu.alloc_blob_space(g, plsfm->wpr_size
,&g->acr.ucode_blob);
if (err)
if (err) {
goto exit_err;
}
gp106_dbg_pmu(g, "managed LS falcon %d, WPR size %d bytes.\n",
plsfm->managed_flcn_cnt, plsfm->wpr_size);
@@ -512,13 +520,15 @@ int lsfm_discover_ucode_images(struct gk20a *g,
/* Obtain the PMU ucode image and add it to the list if required*/
memset(&ucode_img, 0, sizeof(ucode_img));
status = pmu_ucode_details(g, &ucode_img);
if (status)
if (status) {
return status;
}
if (ucode_img.lsf_desc != NULL) {
/* The falon_id is formed by grabbing the static base
* falon_id from the image and adding the
* engine-designated falcon instance.*/
* engine-designated falcon instance.
*/
pmu->pmu_mode |= PMU_SECURE_MODE;
falcon_id = ucode_img.lsf_desc->falcon_id +
ucode_img.flcn_inst;
@@ -526,8 +536,9 @@ int lsfm_discover_ucode_images(struct gk20a *g,
if (!lsfm_falcon_disabled(g, plsfm, falcon_id)) {
pmu->falcon_id = falcon_id;
if (lsfm_add_ucode_img(g, plsfm, &ucode_img,
pmu->falcon_id) == 0)
pmu->falcon_id) == 0) {
pmu->pmu_mode |= PMU_LSFM_MANAGED;
}
plsfm->managed_flcn_cnt++;
} else {
@@ -566,8 +577,9 @@ int lsfm_discover_ucode_images(struct gk20a *g,
/* Do not manage non-FB ucode*/
if (lsfm_add_ucode_img(g,
plsfm, &ucode_img, falcon_id)
== 0)
== 0) {
plsfm->managed_flcn_cnt++;
}
} else {
gp106_dbg_pmu(g, "not managed %d\n",
ucode_img.lsf_desc->falcon_id);
@@ -599,17 +611,20 @@ int gp106_pmu_populate_loader_cfg(struct gk20a *g,
u64 addr_code, addr_data;
u32 addr_args;
if (p_img->desc == NULL) /*This means its a header based ucode,
and so we do not fill BL gen desc structure*/
if (p_img->desc == NULL) {
/* This means its a header based ucode,
* and so we do not fill BL gen desc structure
*/
return -EINVAL;
}
desc = p_img->desc;
/*
Calculate physical and virtual addresses for various portions of
the PMU ucode image
Calculate the 32-bit addresses for the application code, application
data, and bootloader code. These values are all based on IM_BASE.
The 32-bit addresses will be the upper 32-bits of the virtual or
physical addresses of each respective segment.
* Calculate physical and virtual addresses for various portions of
* the PMU ucode image
* Calculate the 32-bit addresses for the application code, application
* data, and bootloader code. These values are all based on IM_BASE.
* The 32-bit addresses will be the upper 32-bits of the virtual or
* physical addresses of each respective segment.
*/
addr_base = p_lsfm->lsb_header.ucode_off;
g->ops.pmu.get_wpr(g, &wpr_inf);
@@ -670,18 +685,21 @@ int gp106_flcn_populate_bl_dmem_desc(struct gk20a *g,
struct pmu_ucode_desc_v1 *desc;
u64 addr_code, addr_data;
if (p_img->desc == NULL) /*This means its a header based ucode,
and so we do not fill BL gen desc structure*/
if (p_img->desc == NULL) {
/* This means its a header based ucode,
* and so we do not fill BL gen desc structure
*/
return -EINVAL;
}
desc = p_img->desc;
/*
Calculate physical and virtual addresses for various portions of
the PMU ucode image
Calculate the 32-bit addresses for the application code, application
data, and bootloader code. These values are all based on IM_BASE.
The 32-bit addresses will be the upper 32-bits of the virtual or
physical addresses of each respective segment.
* Calculate physical and virtual addresses for various portions of
* the PMU ucode image
* Calculate the 32-bit addresses for the application code, application
* data, and bootloader code. These values are all based on IM_BASE.
* The 32-bit addresses will be the upper 32-bits of the virtual or
* physical addresses of each respective segment.
*/
addr_base = p_lsfm->lsb_header.ucode_off;
g->ops.pmu.get_wpr(g, &wpr_inf);
@@ -728,10 +746,11 @@ int lsfm_fill_flcn_bl_gen_desc(struct gk20a *g,
if (pmu->pmu_mode & PMU_LSFM_MANAGED) {
gp106_dbg_pmu(g, "pmu write flcn bl gen desc\n");
if (pnode->wpr_header.falcon_id == pmu->falcon_id)
if (pnode->wpr_header.falcon_id == pmu->falcon_id) {
return g->ops.pmu.pmu_populate_loader_cfg(g, pnode,
&pnode->bl_gen_desc_size);
}
}
/* Failed to find the falcon requested. */
return -ENOENT;
@@ -784,8 +803,9 @@ void lsfm_init_wpr_contents(struct gk20a *g,
memset(&last_wpr_hdr, 0, sizeof(struct lsf_wpr_header_v1));
i = 0;
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR))
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR)) {
lsfm_init_sub_wpr_contents(g, plsfm, ucode);
}
/*
* Walk the managed falcons, flush WPR and LSB headers to FB.
@@ -918,9 +938,10 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
u32 full_app_size = 0;
u32 data = 0;
if (pnode->ucode_img.lsf_desc)
if (pnode->ucode_img.lsf_desc) {
memcpy(&pnode->lsb_header.signature, pnode->ucode_img.lsf_desc,
sizeof(struct lsf_ucode_desc_v1));
}
pnode->lsb_header.ucode_size = pnode->ucode_img.data_size;
/* The remainder of the LSB depends on the loader usage */
@@ -974,10 +995,11 @@ void lsfm_fill_static_lsb_hdr_info(struct gk20a *g,
pnode->lsb_header.flags = data;
}
if(g->ops.pmu.is_priv_load(falcon_id))
if (g->ops.pmu.is_priv_load(falcon_id)) {
pnode->lsb_header.flags |=
NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE;
}
}
}
/* Adds a ucode image to the list of managed ucode images managed. */
@@ -987,8 +1009,9 @@ int lsfm_add_ucode_img(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm,
struct lsfm_managed_ucode_img_v2 *pnode;
pnode = nvgpu_kzalloc(g, sizeof(struct lsfm_managed_ucode_img_v2));
if (pnode == NULL)
if (pnode == NULL) {
return -ENOMEM;
}
/* Keep a copy of the ucode image info locally */
memcpy(&pnode->ucode_img, ucode_image, sizeof(struct flcn_ucode_img_v1));
@@ -1043,11 +1066,12 @@ void free_acr_resources(struct gk20a *g, struct ls_flcn_mgr_v1 *plsfm)
while (cnt) {
mg_ucode_img = plsfm->ucode_img_list;
if (mg_ucode_img->ucode_img.lsf_desc->falcon_id ==
LSF_FALCON_ID_PMU)
LSF_FALCON_ID_PMU) {
lsfm_free_ucode_img_res(g, &mg_ucode_img->ucode_img);
else
} else {
lsfm_free_nonpmu_ucode_img_res(g,
&mg_ucode_img->ucode_img);
}
plsfm->ucode_img_list = mg_ucode_img->next;
nvgpu_kfree(g, mg_ucode_img);
cnt--;
@@ -1279,8 +1303,9 @@ int gp106_bootstrap_hs_flcn(struct gk20a *g)
acr->acr_ucode.gpu_va +
(acr_ucode_header_t210_load[2]));
bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
} else
} else {
acr->acr_dmem_desc->nonwpr_ucode_blob_size = 0;
}
status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
if (status != 0) {

View File

@@ -123,8 +123,9 @@ int gp106_bios_devinit(struct gk20a *g)
nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
} while (!devinit_completed && !nvgpu_timeout_expired(&timeout));
if (nvgpu_timeout_peek_expired(&timeout))
if (nvgpu_timeout_peek_expired(&timeout)) {
err = -ETIMEDOUT;
}
nvgpu_flcn_clear_halt_intr_status(g->pmu.flcn,
gk20a_get_gr_idle_timeout(g));
@@ -138,8 +139,9 @@ int gp106_bios_preos_wait_for_halt(struct gk20a *g)
{
int err = 0;
if (nvgpu_flcn_wait_for_halt(g->pmu.flcn, PMU_BOOT_TIMEOUT_MAX / 1000))
if (nvgpu_flcn_wait_for_halt(g->pmu.flcn, PMU_BOOT_TIMEOUT_MAX / 1000)) {
err = -ETIMEDOUT;
}
return err;
}
@@ -155,8 +157,9 @@ int gp106_bios_preos(struct gk20a *g)
goto out;
}
if (g->ops.bios.preos_reload_check)
if (g->ops.bios.preos_reload_check) {
g->ops.bios.preos_reload_check(g);
}
upload_code(g, g->bios.preos.bootloader_phys_base,
g->bios.preos.bootloader,
@@ -190,17 +193,20 @@ int gp106_bios_init(struct gk20a *g)
nvgpu_log_fn(g, " ");
if (g->bios_is_init)
if (g->bios_is_init) {
return 0;
}
nvgpu_log_info(g, "reading bios from EEPROM");
g->bios.size = BIOS_SIZE;
g->bios.data = nvgpu_vmalloc(g, BIOS_SIZE);
if (!g->bios.data)
if (!g->bios.data) {
return -ENOMEM;
}
if (g->ops.xve.disable_shadow_rom)
if (g->ops.xve.disable_shadow_rom) {
g->ops.xve.disable_shadow_rom(g);
}
for (i = 0; i < g->bios.size/4; i++) {
u32 val = be32_to_cpu(gk20a_readl(g, 0x300000 + i*4));
@@ -209,12 +215,14 @@ int gp106_bios_init(struct gk20a *g)
g->bios.data[(i*4)+2] = (val >> 8) & 0xff;
g->bios.data[(i*4)+3] = val & 0xff;
}
if (g->ops.xve.enable_shadow_rom)
if (g->ops.xve.enable_shadow_rom) {
g->ops.xve.enable_shadow_rom(g);
}
err = nvgpu_bios_parse_rom(g);
if (err)
if (err) {
goto free_firmware;
}
if (g->bios.vbios_version < g->vbios_min_version) {
nvgpu_err(g, "unsupported VBIOS version %08x",
@@ -254,7 +262,8 @@ int gp106_bios_init(struct gk20a *g)
return 0;
free_firmware:
if (g->bios.data)
if (g->bios.data) {
nvgpu_vfree(g, g->bios.data);
}
return err;
}

View File

@@ -55,20 +55,24 @@ int gp106_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
p5_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P5, clkwhich);
if (!p5_info)
if (!p5_info) {
return -EINVAL;
}
p0_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P0, clkwhich);
if (!p0_info)
if (!p0_info) {
return -EINVAL;
}
limit_min_mhz = p5_info->min_mhz;
/* WAR for DVCO min */
if (api_domain == CTRL_CLK_DOMAIN_GPC2CLK)
if (api_domain == CTRL_CLK_DOMAIN_GPC2CLK) {
if ((pfllobjs->max_min_freq_mhz) &&
(pfllobjs->max_min_freq_mhz >= limit_min_mhz))
(pfllobjs->max_min_freq_mhz >= limit_min_mhz)) {
limit_min_mhz = pfllobjs->max_min_freq_mhz + 1;
}
}
*min_mhz = limit_min_mhz;
*max_mhz = p0_info->max_mhz;
@@ -97,8 +101,9 @@ int gp106_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
p0_info = pstate_get_clk_set_info(g,
CTRL_PERF_PSTATE_P0, clkwhich);
if (!p0_info)
if (!p0_info) {
return -EINVAL;
}
*default_mhz = p0_info->max_mhz;

View File

@@ -68,8 +68,9 @@ unsigned long gp106_clk_measure_freq(struct gk20a *g, u32 api_domain)
}
}
if (!c)
if (!c) {
return 0;
}
freq_khz = c->is_counter ? c->scale * gp106_get_rate_cntr(g, c) :
0; /* TODO: PLL read */
@@ -86,8 +87,9 @@ int gp106_init_clk_support(struct gk20a *g)
nvgpu_log_fn(g, " ");
err = nvgpu_mutex_init(&clk->clk_mutex);
if (err)
if (err) {
return err;
}
clk->clk_namemap = (struct namemap_cfg *)
nvgpu_kzalloc(g, sizeof(struct namemap_cfg) * NUM_NAMEMAPS);
@@ -185,8 +187,9 @@ static u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c) {
struct clk_gk20a *clk = &g->clk;
if (!c || !c->cntr.reg_ctrl_addr || !c->cntr.reg_cntr_addr)
if (!c || !c->cntr.reg_ctrl_addr || !c->cntr.reg_cntr_addr) {
return 0;
}
nvgpu_mutex_acquire(&clk->clk_mutex);

View File

@@ -85,21 +85,23 @@ static const char * const gp106_gpc_client_descs[] = {
void gp106_fifo_get_mmu_fault_gpc_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gp106_gpc_client_descs))
if (mmfault->client_id >= ARRAY_SIZE(gp106_gpc_client_descs)) {
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gp106_gpc_client_descs));
else
} else {
mmfault->client_id_desc =
gp106_gpc_client_descs[mmfault->client_id];
}
}
/* fill in mmu fault client description */
void gp106_fifo_get_mmu_fault_client_desc(struct mmu_fault_info *mmfault)
{
if (mmfault->client_id >= ARRAY_SIZE(gp106_hub_client_descs))
if (mmfault->client_id >= ARRAY_SIZE(gp106_hub_client_descs)) {
WARN_ON(mmfault->client_id >=
ARRAY_SIZE(gp106_hub_client_descs));
else
} else {
mmfault->client_id_desc =
gp106_hub_client_descs[mmfault->client_id];
}
}

View File

@@ -92,7 +92,8 @@ void gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
if (flcn->is_falcon_supported) {
nvgpu_mutex_init(&flcn->copy_lock);
gp106_falcon_ops(flcn);
} else
} else {
nvgpu_info(g, "falcon 0x%x not supported on %s",
flcn->flcn_id, g->name);
}
}

View File

@@ -129,8 +129,9 @@ void gr_gp106_cb_size_default(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (!gr->attrib_cb_default_size)
if (!gr->attrib_cb_default_size) {
gr->attrib_cb_default_size = 0x800;
}
gr->alpha_cb_default_size =
gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
gr->attrib_cb_gfxp_default_size =
@@ -147,20 +148,24 @@ int gr_gp106_set_ctxsw_preemption_mode(struct gk20a *g,
{
int err = 0;
if (class == PASCAL_B && g->gr.ctx_vars.force_preemption_gfxp)
if (class == PASCAL_B && g->gr.ctx_vars.force_preemption_gfxp) {
graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP;
}
if (class == PASCAL_COMPUTE_B &&
g->gr.ctx_vars.force_preemption_cilp)
g->gr.ctx_vars.force_preemption_cilp) {
compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP;
}
/* check for invalid combinations */
if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0)) {
return -EINVAL;
}
if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) &&
(compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP))
(compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP)) {
return -EINVAL;
}
/* set preemption modes */
switch (graphics_preempt_mode) {

View File

@@ -224,8 +224,9 @@ static int gp106_init_gpu_characteristics(struct gk20a *g)
int err;
err = gk20a_init_gpu_characteristics(g);
if (err)
if (err) {
return err;
}
__nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_VOLTAGE, true);
__nvgpu_set_enabled(g, NVGPU_SUPPORT_GET_CURRENT, true);
@@ -868,8 +869,10 @@ int gp106_init_hal(struct gk20a *g)
__nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, false);
/* Read fuses to check if gpu needs to boot in secure/non-secure mode */
if (gops->fuse.check_priv_security(g))
return -EINVAL; /* Do not boot gpu */
if (gops->fuse.check_priv_security(g)) {
/* Do not boot gpu */
return -EINVAL;
}
g->pmu_lsf_pmu_wpr_init_done = 0;
g->bootstrap_owner = LSF_FALCON_ID_SEC2;

View File

@@ -3079,8 +3079,9 @@ static int mclk_get_memclk_table(struct gk20a *g)
memcpy(&memclock_base_entry, mem_entry_ptr,
memclock_table_header.base_entry_size);
if (memclock_base_entry.maximum == 0)
if (memclock_base_entry.maximum == 0) {
continue;
}
script_index = BIOS_GET_FIELD(memclock_base_entry.flags1,
VBIOS_MEMORY_CLOCK_BASE_ENTRY_11_FLAGS1_SCRIPT_INDEX);
@@ -3089,8 +3090,9 @@ static int mclk_get_memclk_table(struct gk20a *g)
memclock_table_header.script_list_ptr +
script_index * sizeof(u32));
if (!script_ptr)
if (!script_ptr) {
continue;
}
/* Link and execute shadow scripts */
@@ -3107,9 +3109,10 @@ static int mclk_get_memclk_table(struct gk20a *g)
for (shadow_idx = 0; shadow_idx <
fb_fbpa_fbio_delay_priv_max_v();
++shadow_idx) {
if (idx_to_ptr_tbl[shadow_idx] == 0)
if (idx_to_ptr_tbl[shadow_idx] == 0) {
break;
}
}
if (shadow_idx > fb_fbpa_fbio_delay_priv_max_v()) {
nvgpu_err(g, "invalid shadow reg script index");
@@ -3142,15 +3145,17 @@ static int mclk_get_memclk_table(struct gk20a *g)
memclock_table_header.cmd_script_list_ptr +
cmd_script_index * sizeof(u32));
if (!cmd_script_ptr)
if (!cmd_script_ptr) {
continue;
}
/* Link and execute cmd shadow scripts */
for (cmd_idx = 0; cmd_idx <= fb_fbpa_fbio_cmd_delay_cmd_priv_max_v();
++cmd_idx) {
if (cmd_script_ptr == idx_to_cmd_ptr_tbl[cmd_idx])
if (cmd_script_ptr == idx_to_cmd_ptr_tbl[cmd_idx]) {
break;
}
}
/* script has not been executed before */
if (cmd_idx > fb_fbpa_fbio_cmd_delay_cmd_priv_max_v()) {
@@ -3158,9 +3163,10 @@ static int mclk_get_memclk_table(struct gk20a *g)
for (cmd_idx = 0; cmd_idx <
fb_fbpa_fbio_cmd_delay_cmd_priv_max_v();
++cmd_idx) {
if (idx_to_cmd_ptr_tbl[cmd_idx] == 0)
if (idx_to_cmd_ptr_tbl[cmd_idx] == 0) {
break;
}
}
if (cmd_idx > fb_fbpa_fbio_cmd_delay_cmd_priv_max_v()) {
nvgpu_err(g,
@@ -3220,12 +3226,14 @@ int gp106_mclk_init(struct gk20a *g)
mclk = &g->clk_pmu.clk_mclk;
err = nvgpu_mutex_init(&mclk->mclk_lock);
if (err)
if (err) {
return err;
}
err = nvgpu_mutex_init(&mclk->data_lock);
if (err)
if (err) {
goto fail_mclk_mutex;
}
/* FBPA gain WAR */
gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222);
@@ -3326,15 +3334,17 @@ int gp106_mclk_change(struct gk20a *g, u16 val)
nvgpu_mutex_acquire(&mclk->mclk_lock);
if (!mclk->init)
if (!mclk->init) {
goto exit_status;
}
speed = (val < mclk->p5_min) ? GP106_MCLK_LOW_SPEED :
(val < mclk->p0_min) ? GP106_MCLK_MID_SPEED :
GP106_MCLK_HIGH_SPEED;
if (speed == mclk->speed)
if (speed == mclk->speed) {
goto exit_status;
}
seq_script_ptr = m->scripts[mclk->speed][speed].addr;
seq_script_size = m->scripts[mclk->speed][speed].size;

View File

@@ -39,8 +39,9 @@ size_t gp106_mm_get_vidmem_size(struct gk20a *g)
u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
size_t bytes = ((size_t)mag << scale) * SZ_1M;
if (ecc)
if (ecc) {
bytes = bytes / 16 * 15;
}
return bytes;
}

View File

@@ -54,8 +54,9 @@ bool gp106_pmu_is_engine_in_reset(struct gk20a *g)
bool status = false;
reg_reset = gk20a_readl(g, pwr_falcon_engine_r());
if (reg_reset == pwr_falcon_engine_reset_true_f())
if (reg_reset == pwr_falcon_engine_reset_true_f()) {
status = true;
}
return status;
}
@@ -82,11 +83,13 @@ int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset)
u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
{
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
return NVGPU_PMU_GR_FEATURE_MASK_RPPG;
}
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
return NVGPU_PMU_MS_FEATURE_MASK_ALL;
}
return 0;
}
@@ -274,11 +277,13 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
/* GM20B PMU supports loading FECS and GPCCS only */
if (falconidmask == 0)
if (falconidmask == 0) {
return -EINVAL;
}
if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
(1 << LSF_FALCON_ID_GPCCS)))
(1 << LSF_FALCON_ID_GPCCS))) {
return -EINVAL;
}
g->pmu_lsf_loaded_falcon_id = 0;
/* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) {
@@ -296,7 +301,8 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g),
&g->pmu_lsf_loaded_falcon_id, falconidmask);
if (g->pmu_lsf_loaded_falcon_id != falconidmask)
if (g->pmu_lsf_loaded_falcon_id != falconidmask) {
return -ETIMEDOUT;
}
return 0;
}

View File

@@ -41,8 +41,9 @@ int gp106_sec2_clear_halt_interrupt_status(struct gk20a *g,
{
int status = 0;
if (nvgpu_flcn_clear_halt_intr_status(&g->sec2_flcn, timeout))
if (nvgpu_flcn_clear_halt_intr_status(&g->sec2_flcn, timeout)) {
status = -EBUSY;
}
return status;
}
@@ -166,8 +167,9 @@ void init_pmu_setup_hw1(struct gk20a *g)
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
pmu, GK20A_PMU_DMAIDX_VIRT);
if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface)
if (g->ops.pmu_ver.config_pmu_cmdline_args_super_surface) {
g->ops.pmu_ver.config_pmu_cmdline_args_super_surface(pmu);
}
nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
(u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
@@ -222,8 +224,9 @@ int init_sec2_setup_hw1(struct gk20a *g,
psec_fbif_transcfg_target_noncoherent_sysmem_f());
err = bl_bootstrap_sec2(pmu, desc, bl_sz);
if (err)
if (err) {
return err;
}
return 0;
}