gpu: nvgpu: MISRA 10.1 fixes to pmu/sec2

MISRA Rule 10.1 states that operands shall not be of an
inappropriate essential type.

For example, the use of bitwise OR on signed values is not
permitted.

Both the pmu_read_message() and sec2_read_message() routines
do this in some cases when an error (or unexpected number of
bytes) is returned from the falcon queue pop/rewind routines.

This patch eliminates the MISRA violations by modifying these
cases to return the falcon queue operation error unmodified in the
corresponding status argument (or use -EINVAL in the event the
requested number of bytes isn't returned).

To reduce code duplication new pmu_falcon_queue_read() and
sec2_falcon_queue_read() routines are added here to wrap the
code that handles the error for the respective units.

Note that higher up in the call sequence (tu104_sec2_isr() in the
sec2_read_message() case and gk20a_pmu_isr() in the pmu_read_message()
case) the actual status value is only checked for non-zero or ignored
altogether.  So it appears no existing code would depend on the
bitwise OR result anyway.

JIRA NVGPU-650

Change-Id: Id303523ac096f1989e612044082e0a62ae8179c2
Signed-off-by: Scott Long <scottl@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1972624
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Scott Long
2018-12-13 14:31:23 -08:00
committed by mobile promotions
parent 9ad31113e8
commit d5f26aa074
2 changed files with 76 additions and 39 deletions

View File

@@ -638,12 +638,36 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
return err;
}
static bool pmu_read_message(struct nvgpu_pmu *pmu,
struct nvgpu_falcon_queue *queue,
struct pmu_msg *msg, int *status)
static bool pmu_falcon_queue_read(struct nvgpu_pmu *pmu,
struct nvgpu_falcon_queue *queue, void *data,
u32 bytes_to_read, int *status)
{
struct gk20a *g = gk20a_from_pmu(pmu);
u32 read_size, bytes_read;
u32 bytes_read;
int err;
err = nvgpu_falcon_queue_pop(pmu->flcn, queue, data,
bytes_to_read, &bytes_read);
if (err != 0) {
nvgpu_err(g, "fail to read msg: err %d", err);
*status = err;
return false;
}
if (bytes_read != bytes_to_read) {
nvgpu_err(g, "fail to read requested bytes: 0x%x != 0x%x",
bytes_to_read, bytes_read);
*status = -EINVAL;
return false;
}
return true;
}
static bool pmu_read_message(struct nvgpu_pmu *pmu,
struct nvgpu_falcon_queue *queue, struct pmu_msg *msg, int *status)
{
struct gk20a *g = gk20a_from_pmu(pmu);
u32 read_size;
u32 queue_id;
int err;
@@ -655,11 +679,9 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
queue_id = nvgpu_falcon_queue_get_id(queue);
err = nvgpu_falcon_queue_pop(pmu->flcn, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) {
if (!pmu_falcon_queue_read(pmu, queue, &msg->hdr, PMU_MSG_HDR_SIZE,
status)) {
nvgpu_err(g, "fail to read msg from queue %d", queue_id);
*status = err | -EINVAL;
goto clean_up;
}
@@ -667,16 +689,14 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
err = nvgpu_falcon_queue_rewind(pmu->flcn, queue);
if (err != 0) {
nvgpu_err(g, "fail to rewind queue %d", queue_id);
*status = err | -EINVAL;
*status = err;
goto clean_up;
}
/* read again after rewind */
err = nvgpu_falcon_queue_pop(pmu->flcn, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) {
nvgpu_err(g,
"fail to read msg from queue %d", queue_id);
*status = err | -EINVAL;
if (!pmu_falcon_queue_read(pmu, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, status)) {
nvgpu_err(g, "fail to read msg from queue %d",
queue_id);
goto clean_up;
}
}
@@ -690,12 +710,10 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
if (msg->hdr.size > PMU_MSG_HDR_SIZE) {
read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
err = nvgpu_falcon_queue_pop(pmu->flcn, queue, &msg->msg,
read_size, &bytes_read);
if (err != 0 || bytes_read != read_size) {
nvgpu_err(g,
"fail to read msg from queue %d", queue_id);
*status = err;
if (!pmu_falcon_queue_read(pmu, queue, &msg->msg, read_size,
status)) {
nvgpu_err(g, "fail to read msg from queue %d",
queue_id);
goto clean_up;
}
}

View File

@@ -241,12 +241,37 @@ static int sec2_handle_event(struct nvgpu_sec2 *sec2,
return err;
}
static bool sec2_falcon_queue_read(struct nvgpu_sec2 *sec2,
struct nvgpu_falcon_queue *queue, void *data,
u32 bytes_to_read, int *status)
{
struct gk20a *g = sec2->g;
u32 bytes_read;
int err;
err = nvgpu_falcon_queue_pop(sec2->flcn, queue, data,
bytes_to_read, &bytes_read);
if (err != 0) {
nvgpu_err(g, "fail to read msg: err %d", err);
*status = err;
return false;
}
if (bytes_read != bytes_to_read) {
nvgpu_err(g, "fail to read requested bytes: 0x%x != 0x%x",
bytes_to_read, bytes_read);
*status = -EINVAL;
return false;
}
return true;
}
static bool sec2_read_message(struct nvgpu_sec2 *sec2,
struct nvgpu_falcon_queue *queue,
struct nv_flcn_msg_sec2 *msg, int *status)
{
struct gk20a *g = sec2->g;
u32 read_size, bytes_read;
u32 read_size;
u32 queue_id;
int err;
@@ -258,11 +283,9 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
queue_id = nvgpu_falcon_queue_get_id(queue);
err = nvgpu_falcon_queue_pop(sec2->flcn, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if ((err != 0) || (bytes_read != PMU_MSG_HDR_SIZE)) {
if (!sec2_falcon_queue_read(sec2, queue, &msg->hdr, PMU_MSG_HDR_SIZE,
status)) {
nvgpu_err(g, "fail to read msg from queue %d", queue_id);
*status = err | -EINVAL;
goto clean_up;
}
@@ -270,17 +293,15 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
err = nvgpu_falcon_queue_rewind(sec2->flcn, queue);
if (err != 0) {
nvgpu_err(g, "fail to rewind queue %d", queue_id);
*status = err | -EINVAL;
*status = err;
goto clean_up;
}
/* read again after rewind */
err = nvgpu_falcon_queue_pop(sec2->flcn, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if ((err != 0) || (bytes_read != PMU_MSG_HDR_SIZE)) {
nvgpu_err(g,
"fail to read msg from queue %d", queue_id);
*status = err | -EINVAL;
if (!sec2_falcon_queue_read(sec2, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, status)) {
nvgpu_err(g, "fail to read msg from queue %d",
queue_id);
goto clean_up;
}
}
@@ -294,12 +315,10 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
if (msg->hdr.size > PMU_MSG_HDR_SIZE) {
read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
err = nvgpu_falcon_queue_pop(sec2->flcn, queue, &msg->msg,
read_size, &bytes_read);
if ((err != 0) || (bytes_read != read_size)) {
nvgpu_err(g,
"fail to read msg from queue %d", queue_id);
*status = err;
if (!sec2_falcon_queue_read(sec2, queue, &msg->msg, read_size,
status)) {
nvgpu_err(g, "fail to read msg from queue %d",
queue_id);
goto clean_up;
}
}