gpu: nvgpu: pmu: fix MISRA 10.3 issues in pmu_ipc

MISRA Rule 10.3 prohibits implicit assignment of objects of narrower
size or essential type. This fixes MISRA 10.3 violations in pmu_ipc.c

JIRA NVGPU-2841

Change-Id: I97e236ce8e64407ab776611c512caee13c9186a0
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2027768
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-02-25 14:21:50 -05:00
committed by mobile promotions
parent 0990135166
commit 0a57c4b93e
2 changed files with 15 additions and 10 deletions

View File

@@ -665,7 +665,7 @@ static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
struct nvgpu_pmu *pmu = &g->pmu;
struct nv_falcon_fbq_hdr *fbq_hdr = NULL;
struct pmu_cmd *flcn_cmd = NULL;
u16 fbq_size_needed = 0;
u32 fbq_size_needed = 0;
u16 heap_offset = 0;
u64 tmp;
int err = 0;
@@ -679,16 +679,16 @@ static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) {
if (payload != NULL) {
fbq_size_needed = payload->rpc.size_rpc +
payload->rpc.size_scratch;
fbq_size_needed = (u32)payload->rpc.size_rpc +
(u32)payload->rpc.size_scratch;
}
} else {
if (payload != NULL) {
if (payload->in.offset != 0U) {
if (payload->in.buf != payload->out.buf) {
fbq_size_needed = (u16)payload->in.size;
fbq_size_needed = payload->in.size;
} else {
fbq_size_needed = (u16)max(payload->in.size,
fbq_size_needed = max(payload->in.size,
payload->out.size);
}
}
@@ -701,9 +701,11 @@ static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
}
}
fbq_size_needed = fbq_size_needed +
tmp = fbq_size_needed +
sizeof(struct nv_falcon_fbq_hdr) +
cmd->hdr.size;
nvgpu_assert(tmp <= (size_t)U32_MAX);
fbq_size_needed = (u32)tmp;
fbq_size_needed = ALIGN_UP(fbq_size_needed, 4);
@@ -723,14 +725,17 @@ static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
nvgpu_engine_fb_queue_get_element_size(queue));
/* Need to save room for both FBQ hdr, and the CMD */
seq->buffer_size_used = sizeof(struct nv_falcon_fbq_hdr) +
cmd->hdr.size;
tmp = sizeof(struct nv_falcon_fbq_hdr) +
cmd->hdr.size;
nvgpu_assert(tmp <= (size_t)U16_MAX);
seq->buffer_size_used = (u16)tmp;
/* copy cmd into the work buffer */
nvgpu_memcpy((u8 *)flcn_cmd, (u8 *)cmd, cmd->hdr.size);
/* Fill in FBQ hdr, and offset in seq structure */
fbq_hdr->heap_size = fbq_size_needed;
nvgpu_assert(fbq_size_needed < U16_MAX);
fbq_hdr->heap_size = (u16)fbq_size_needed;
fbq_hdr->heap_offset = heap_offset;
seq->fbq_heap_offset = heap_offset;

View File

@@ -314,7 +314,7 @@ struct pmu_sequence {
* has been used so far, as the outbound frame is assembled
* (first FB Queue hdr, then CMD, then payloads).
*/
u32 buffer_size_used;
u16 buffer_size_used;
/* offset to out data in the queue element */
u16 fbq_out_offset_in_queue_element;
};