gpu: nvgpu: pmu: fix MISRA 10.3 violations

This fixes a number of MISRA 10.3 violations in the common/pmu/pmu_g*
files.  MISRA Rule 10.3 prohibits implicit assignment of different size
or essential types.

JIRA NVGPU-1008

Change-Id: If29f70697ab397e5716d3a0b087b3b5c2232cf0f
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2017608
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-02-04 17:07:37 -05:00
committed by mobile promotions
parent a2a6ed903e
commit c493342dc0
6 changed files with 72 additions and 44 deletions

View File

@@ -49,7 +49,7 @@
bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
{
u32 i = 0, j = strlen(strings);
u32 i = 0, j = (u32)strlen(strings);
for (; i < j; i++) {
if (strings[i] == '%') {
@@ -59,7 +59,7 @@ bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
}
}
}
*hex_pos = -1;
*hex_pos = U32_MAX;
return false;
}
@@ -189,18 +189,20 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
struct mm_gk20a *mm = &g->mm;
struct pmu_ucode_desc *desc =
(struct pmu_ucode_desc *)(void *)pmu->fw_image->data;
u64 addr_code, addr_data, addr_load;
u32 addr_code, addr_data, addr_load;
u32 i, blocks, addr_args;
int err;
u64 tmp_addr;
nvgpu_log_fn(g, " ");
gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) |
pwr_falcon_itfen_ctxen_enable_f());
tmp_addr = nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12;
nvgpu_assert(u64_hi32(tmp_addr) == 0U);
gk20a_writel(g, pwr_pmu_new_instblk_r(),
pwr_pmu_new_instblk_ptr_f(
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
pwr_pmu_new_instblk_ptr_f((u32)tmp_addr) |
pwr_pmu_new_instblk_valid_f(1) |
pwr_pmu_new_instblk_target_sys_coh_f());
@@ -249,7 +251,7 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args);
g->ops.pmu.write_dmatrfbase(g,
U32(addr_load) - (desc->bootloader_imem_offset >> U32(8)));
addr_load - (desc->bootloader_imem_offset >> U32(8)));
blocks = ((desc->bootloader_size + 0xFFU) & ~0xFFU) >> 8;
@@ -528,7 +530,7 @@ bool gk20a_is_pmu_supported(struct gk20a *g)
u32 gk20a_pmu_pg_engines_list(struct gk20a *g)
{
return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS);
return BIT32(PMU_PG_ELPG_ENGINE_ID_GRAPHICS);
}
u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
@@ -553,6 +555,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
size_t tmp_size;
if (!pmu->pmu_ready || (entries == 0U) || !pmu->zbc_ready) {
return;
@@ -560,7 +563,9 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_zbc_cmd);
tmp_size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_zbc_cmd);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update;
cmd.cmd.zbc.entry_mask = ZBC_MASK(entries);

View File

@@ -28,6 +28,7 @@
#include <nvgpu/enabled.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/bug.h>
#include "pmu_gk20a.h"
#include "pmu_gm20b.h"
@@ -99,14 +100,13 @@ static struct pg_init_sequence_list _pginitseq_gm20b[] = {
int gm20b_pmu_setup_elpg(struct gk20a *g)
{
int ret = 0;
u32 reg_writes;
u32 index;
size_t reg_writes;
size_t index;
nvgpu_log_fn(g, " ");
if (g->elpg_enabled) {
reg_writes = ((sizeof(_pginitseq_gm20b) /
sizeof((_pginitseq_gm20b)[0])));
reg_writes = ARRAY_SIZE(_pginitseq_gm20b);
/* Initialize registers with production values*/
for (index = 0; index < reg_writes; index++) {
gk20a_writel(g, _pginitseq_gm20b[index].regaddr,
@@ -137,14 +137,17 @@ int gm20b_pmu_init_acr(struct gk20a *g)
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
size_t tmp_size;
nvgpu_log_fn(g, " ");
/* init ACR */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_init_wpr_details);
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_init_wpr_details);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.init_wpr.cmd_type = PMU_ACR_CMD_ID_INIT_WPR_REGION;
cmd.cmd.acr.init_wpr.regionid = 0x01U;
cmd.cmd.acr.init_wpr.wproffset = 0x00U;
@@ -173,7 +176,7 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
static int pmu_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout_ms,
u32 val)
{
unsigned long delay = GR_FECS_POLL_INTERVAL;
u32 delay = GR_FECS_POLL_INTERVAL;
u32 reg;
struct nvgpu_timeout timeout;
@@ -198,6 +201,7 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
size_t tmp_size;
nvgpu_log_fn(g, " ");
@@ -206,8 +210,10 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
/* send message to load FECS falcon */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_falcon);
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_falcon);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.bootstrap_falcon.cmd_type =
PMU_ACR_CMD_ID_BOOTSTRAP_FALCON;
cmd.cmd.acr.bootstrap_falcon.flags = flags;
@@ -224,9 +230,9 @@ void gm20b_pmu_load_lsf(struct gk20a *g, u32 falcon_id, u32 flags)
int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
{
u32 err = 0;
int err = 0;
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
unsigned long timeout = gk20a_get_gr_idle_timeout(g);
u32 timeout = gk20a_get_gr_idle_timeout(g);
/* GM20B PMU supports loading FECS only */
if (!(falconidmask == BIT32(FALCON_ID_FECS))) {
@@ -352,15 +358,17 @@ static int gm20b_bl_bootstrap(struct gk20a *g,
struct nvgpu_falcon_bl_info *bl_info)
{
struct mm_gk20a *mm = &g->mm;
u64 tmp_addr;
nvgpu_log_fn(g, " ");
gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) |
pwr_falcon_itfen_ctxen_enable_f());
tmp_addr = nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12U;
nvgpu_assert(u64_hi32(tmp_addr) == 0U);
gk20a_writel(g, pwr_pmu_new_instblk_r(),
pwr_pmu_new_instblk_ptr_f(
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12U) |
pwr_pmu_new_instblk_ptr_f((u32)tmp_addr) |
pwr_pmu_new_instblk_valid_f(1U) |
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
pwr_pmu_new_instblk_target_sys_coh_f() :

View File

@@ -28,6 +28,7 @@
#include <nvgpu/enabled.h>
#include <nvgpu/io.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/bug.h>
#include "pmu_gk20a.h"
#include "pmu_gm20b.h"
@@ -144,6 +145,7 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
size_t tmp_size;
nvgpu_log_fn(g, " ");
@@ -152,8 +154,10 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
/* send message to load FECS falcon */
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_ACR;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_multiple_falcons);
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_acr_cmd_bootstrap_multiple_falcons);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.acr.boot_falcons.cmd_type =
PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS;
cmd.cmd.acr.boot_falcons.flags = flags;
@@ -199,9 +203,10 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
}
/* load falcon(s) */
gp10b_pmu_load_multiple_falcons(g, falconidmask, flags);
nvgpu_assert(falconidmask <= U8_MAX);
pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g),
&g->pmu_lsf_loaded_falcon_id, falconidmask);
&g->pmu_lsf_loaded_falcon_id, (u8)falconidmask);
if (g->pmu_lsf_loaded_falcon_id != falconidmask) {
return -ETIMEDOUT;
}
@@ -230,12 +235,15 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
size_t tmp_size;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_pg_cmd_gr_init_param_v2);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.pg.gr_init_param_v2.cmd_type =
PMU_PG_CMD_ID_PG_PARAM;
cmd.cmd.pg.gr_init_param_v2.sub_cmd_id =
@@ -283,14 +291,13 @@ int gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
int gp10b_pmu_setup_elpg(struct gk20a *g)
{
int ret = 0;
u32 reg_writes;
u32 index;
size_t reg_writes;
size_t index;
nvgpu_log_fn(g, " ");
if (g->elpg_enabled) {
reg_writes = ((sizeof(_pginitseq_gp10b) /
sizeof((_pginitseq_gp10b)[0])));
reg_writes = ARRAY_SIZE(_pginitseq_gp10b);
/* Initialize registers with production values*/
for (index = 0; index < reg_writes; index++) {
gk20a_writel(g, _pginitseq_gp10b[index].regaddr,

View File

@@ -31,6 +31,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/nvgpu_err.h>
#include <nvgpu/firmware.h>
#include <nvgpu/bug.h>
#include "pmu_gp10b.h"
#include "pmu_gp106.h"
@@ -136,14 +137,13 @@ static void gv11b_pmu_report_ecc_error(struct gk20a *g, u32 inst,
int gv11b_pmu_setup_elpg(struct gk20a *g)
{
int ret = 0;
u32 reg_writes;
u32 index;
size_t reg_writes;
size_t index;
nvgpu_log_fn(g, " ");
if (g->elpg_enabled) {
reg_writes = ((sizeof(_pginitseq_gv11b) /
sizeof((_pginitseq_gv11b)[0])));
reg_writes = ARRAY_SIZE(_pginitseq_gv11b);
/* Initialize registers with production values*/
for (index = 0; index < reg_writes; index++) {
gk20a_writel(g, _pginitseq_gv11b[index].regaddr,
@@ -166,10 +166,11 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
struct mm_gk20a *mm = &g->mm;
struct pmu_ucode_desc *desc =
(struct pmu_ucode_desc *)(void *)pmu->fw_image->data;
u64 addr_code_lo, addr_data_lo, addr_load_lo;
u64 addr_code_hi, addr_data_hi;
u32 addr_code_lo, addr_data_lo, addr_load_lo;
u32 addr_code_hi, addr_data_hi;
u32 i, blocks, addr_args;
int err;
u64 tmp_addr;
nvgpu_log_fn(g, " ");
@@ -177,9 +178,10 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
gk20a_readl(g, pwr_falcon_itfen_r()) |
pwr_falcon_itfen_ctxen_enable_f());
tmp_addr = nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> ALIGN_4KB;
nvgpu_assert(u64_hi32(tmp_addr) == 0U);
gk20a_writel(g, pwr_pmu_new_instblk_r(),
pwr_pmu_new_instblk_ptr_f(
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> ALIGN_4KB) |
pwr_pmu_new_instblk_ptr_f((u32)tmp_addr) |
pwr_pmu_new_instblk_valid_f(1) |
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
pwr_pmu_new_instblk_target_sys_coh_f() :
@@ -248,7 +250,7 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args);
g->ops.pmu.write_dmatrfbase(g,
U32(addr_load_lo) -
addr_load_lo -
(desc->bootloader_imem_offset >> U32(8)));
blocks = ((desc->bootloader_size + 0xFFU) & ~0xFFU) >> 8U;
@@ -325,10 +327,10 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0)
/* update counters per slice */
if (corrected_overflow != 0U) {
corrected_delta += (0x1UL << pwr_pmu_falcon_ecc_corrected_err_count_total_s());
corrected_delta += BIT32(pwr_pmu_falcon_ecc_corrected_err_count_total_s());
}
if (uncorrected_overflow != 0U) {
uncorrected_delta += (0x1UL << pwr_pmu_falcon_ecc_uncorrected_err_count_total_s());
uncorrected_delta += BIT32(pwr_pmu_falcon_ecc_uncorrected_err_count_total_s());
}
g->ecc.pmu.pmu_ecc_corrected_err_count[0].counter += corrected_delta;
@@ -446,12 +448,15 @@ int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
size_t tmp_size;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_pg_cmd_gr_init_param_v1);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.pg.gr_init_param_v1.cmd_type =
PMU_PG_CMD_ID_PG_PARAM;
cmd.cmd.pg.gr_init_param_v1.sub_cmd_id =
@@ -475,12 +480,15 @@ int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
size_t tmp_size;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
tmp_size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_pg_cmd_sub_feature_mask_update);
nvgpu_assert(tmp_size <= (size_t)U8_MAX);
cmd.hdr.size = (u8)tmp_size;
cmd.cmd.pg.sf_mask_update.cmd_type =
PMU_PG_CMD_ID_PG_PARAM;
cmd.cmd.pg.sf_mask_update.sub_cmd_id =

View File

@@ -1813,7 +1813,7 @@ struct gk20a {
struct gpu_ops ops;
u32 mc_intr_mask_restore[4];
/*used for change of enum zbc update cmd id from ver 0 to ver1*/
u32 pmu_ver_cmd_id_zbc_table_update;
u8 pmu_ver_cmd_id_zbc_table_update;
bool pmu_lsf_pmu_wpr_init_done;
u32 pmu_lsf_loaded_falcon_id;

View File

@@ -157,7 +157,7 @@ enum pmu_seq_state {
(_prpc)->hdr.flags = 0x0; \
\
_stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
(u16)(sizeof(*(_prpc)) - sizeof((_prpc)->scratch)), \
(_size), NULL, NULL, false); \
} while (false)