gpu: nvgpu: common: fix MISRA 10.4 violations

MISRA Rule 10.4 only allows the usage of arithmetic operations on
operands of the same essential type category.

Adding "U" at the end of the integer literals to have same type of
operands when an arithmetic operation is performed.

This fix violations where an arithmetic operation is performed on
signed and unsigned int types.

Jira NVGPU-992

Change-Id: Iab512139a025e035ec82a9dd74245bcf1f3869fb
Signed-off-by: Sai Nikhil <snikhil@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1789425
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Adeel Raza <araza@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sai Nikhil
2018-08-22 10:42:37 +05:30
committed by mobile promotions
parent 650171566b
commit d28a401e6d
11 changed files with 107 additions and 107 deletions

View File

@@ -184,9 +184,9 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
goto invalid_cmd;
}
if ((payload->in.buf != NULL && payload->in.size == 0) ||
(payload->out.buf != NULL && payload->out.size == 0) ||
(payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0)) {
if ((payload->in.buf != NULL && payload->in.size == 0U) ||
(payload->out.buf != NULL && payload->out.size == 0U) ||
(payload->rpc.prpc != NULL && payload->rpc.size_rpc == 0U)) {
goto invalid_cmd;
}
@@ -207,8 +207,8 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
}
if ((payload->in.offset != 0 && payload->in.buf == NULL) ||
(payload->out.offset != 0 && payload->out.buf == NULL)) {
if ((payload->in.offset != 0U && payload->in.buf == NULL) ||
(payload->out.offset != 0U && payload->out.buf == NULL)) {
goto invalid_cmd;
}
@@ -316,7 +316,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
seq->out_payload = payload->out.buf;
}
if (payload && payload->in.offset != 0) {
if (payload && payload->in.offset != 0U) {
pv->set_pmu_allocation_ptr(pmu, &in,
((u8 *)&cmd->cmd + payload->in.offset));
@@ -335,7 +335,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
goto clean_up;
}
if (payload->in.fb_size != 0x0) {
if (payload->in.fb_size != 0x0U) {
seq->in_mem = nvgpu_kzalloc(g,
sizeof(struct nvgpu_mem));
if (!seq->in_mem) {
@@ -365,7 +365,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
pv->pmu_allocation_get_dmem_offset(pmu, in));
}
if (payload && payload->out.offset != 0) {
if (payload && payload->out.offset != 0U) {
pv->set_pmu_allocation_ptr(pmu, &out,
((u8 *)&cmd->cmd + payload->out.offset));
pv->pmu_allocation_set_dmem_size(pmu, out,
@@ -381,7 +381,7 @@ static int pmu_cmd_payload_extract(struct gk20a *g, struct pmu_cmd *cmd,
goto clean_up;
}
if (payload->out.fb_size != 0x0) {
if (payload->out.fb_size != 0x0U) {
seq->out_mem = nvgpu_kzalloc(g,
sizeof(struct nvgpu_mem));
if (!seq->out_mem) {
@@ -534,7 +534,7 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
}
}
if (pv->pmu_allocation_get_dmem_size(pmu,
pv->get_pmu_seq_out_a_ptr(seq)) != 0) {
pv->get_pmu_seq_out_a_ptr(seq)) != 0U) {
nvgpu_flcn_copy_from_dmem(pmu->flcn,
pv->pmu_allocation_get_dmem_offset(pmu,
pv->get_pmu_seq_out_a_ptr(seq)),
@@ -546,13 +546,13 @@ static int pmu_response_handle(struct nvgpu_pmu *pmu,
seq->callback = NULL;
}
if (pv->pmu_allocation_get_dmem_size(pmu,
pv->get_pmu_seq_in_a_ptr(seq)) != 0) {
pv->get_pmu_seq_in_a_ptr(seq)) != 0U) {
nvgpu_free(&pmu->dmem,
pv->pmu_allocation_get_dmem_offset(pmu,
pv->get_pmu_seq_in_a_ptr(seq)));
}
if (pv->pmu_allocation_get_dmem_size(pmu,
pv->get_pmu_seq_out_a_ptr(seq)) != 0) {
pv->get_pmu_seq_out_a_ptr(seq)) != 0U) {
nvgpu_free(&pmu->dmem,
pv->pmu_allocation_get_dmem_offset(pmu,
pv->get_pmu_seq_out_a_ptr(seq)));
@@ -748,7 +748,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
gk20a_pmu_isr(g);
}
nvgpu_usleep_range(delay, delay * 2);
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
} while (!nvgpu_timeout_expired(&timeout));