diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c index 888cb6b35..803ec1050 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon_queue.c +++ b/drivers/gpu/nvgpu/common/falcon/falcon_queue.c @@ -472,6 +472,21 @@ void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn, (void) memset(queue, 0, sizeof(struct nvgpu_falcon_queue)); } +u32 nvgpu_falcon_queue_get_id(struct nvgpu_falcon_queue *queue) +{ + return queue->id; +} + +u32 nvgpu_falcon_queue_get_index(struct nvgpu_falcon_queue *queue) +{ + return queue->index; +} + +u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue) +{ + return queue->size; +} + int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn, struct nvgpu_falcon_queue *queue) { diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.c b/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.c index 65484c0ea..e568b4120 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_gk20a.c @@ -395,6 +395,8 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, u32 *head, bool set) { u32 queue_head_size = 0; + u32 queue_id = nvgpu_falcon_queue_get_id(queue); + u32 queue_index = nvgpu_falcon_queue_get_index(queue); if (g->ops.pmu.pmu_get_queue_head_size != NULL) { queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); @@ -402,19 +404,19 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, BUG_ON((head == NULL) || (queue_head_size == 0U)); - if (PMU_IS_COMMAND_QUEUE(queue->id)) { + if (PMU_IS_COMMAND_QUEUE(queue_id)) { - if (queue->index >= queue_head_size) { + if (queue_index >= queue_head_size) { return -EINVAL; } if (!set) { *head = pwr_pmu_queue_head_address_v( gk20a_readl(g, - g->ops.pmu.pmu_get_queue_head(queue->index))); + g->ops.pmu.pmu_get_queue_head(queue_index))); } else { gk20a_writel(g, - g->ops.pmu.pmu_get_queue_head(queue->index), + g->ops.pmu.pmu_get_queue_head(queue_index), pwr_pmu_queue_head_address_f(*head)); } } else { @@ -435,6 +437,8 @@ int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, u32 *tail, bool set) { u32 queue_tail_size = 0; + u32 queue_id = nvgpu_falcon_queue_get_id(queue); + u32 queue_index = nvgpu_falcon_queue_get_index(queue); if (g->ops.pmu.pmu_get_queue_tail_size != NULL) { queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); @@ -442,18 +446,18 @@ int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, BUG_ON((tail == NULL) || (queue_tail_size == 0U)); - if (PMU_IS_COMMAND_QUEUE(queue->id)) { + if (PMU_IS_COMMAND_QUEUE(queue_id)) { - if (queue->index >= queue_tail_size) { + if (queue_index >= queue_tail_size) { return -EINVAL; } if (!set) { *tail = pwr_pmu_queue_tail_address_v(gk20a_readl(g, - g->ops.pmu.pmu_get_queue_tail(queue->index))); + g->ops.pmu.pmu_get_queue_tail(queue_index))); } else { gk20a_writel(g, - g->ops.pmu.pmu_get_queue_tail(queue->index), + g->ops.pmu.pmu_get_queue_tail(queue_index), pwr_pmu_queue_tail_address_f(*tail)); } diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 2cd962911..2585d5abc 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c @@ -153,6 +153,7 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, { struct gk20a *g = gk20a_from_pmu(pmu); struct nvgpu_falcon_queue *queue; + u32 queue_size; u32 in_size, out_size; if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) { @@ -160,11 +161,12 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, } queue = &pmu->queue[queue_id]; + queue_size = nvgpu_falcon_queue_get_size(queue); if (cmd->hdr.size < PMU_CMD_HDR_SIZE) { goto invalid_cmd; } - if (cmd->hdr.size > (queue->size >> 1)) { + if (cmd->hdr.size > (queue_size >> 1)) { goto invalid_cmd; } @@ -633,6 +635,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, { struct gk20a *g = gk20a_from_pmu(pmu); u32 read_size, bytes_read; + u32 queue_id; int err; *status = 0; @@ -641,10 +644,12 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, return false; } + queue_id = nvgpu_falcon_queue_get_id(queue); + err = nvgpu_falcon_queue_pop(pmu->flcn, queue, &msg->hdr, PMU_MSG_HDR_SIZE, &bytes_read); if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) { - nvgpu_err(g, "fail to read msg from queue %d", queue->id); + nvgpu_err(g, "fail to read msg from queue %d", queue_id); *status = err | -EINVAL; goto clean_up; } @@ -652,7 +657,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, if (msg->hdr.unit_id == PMU_UNIT_REWIND) { err = nvgpu_falcon_queue_rewind(pmu->flcn, queue); if (err != 0) { - nvgpu_err(g, "fail to rewind queue %d", queue->id); + nvgpu_err(g, "fail to rewind queue %d", queue_id); *status = err | -EINVAL; goto clean_up; } @@ -661,7 +666,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, PMU_MSG_HDR_SIZE, &bytes_read); if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) { nvgpu_err(g, - "fail to read msg from queue %d", queue->id); + "fail to read msg from queue %d", queue_id); *status = err | -EINVAL; goto clean_up; } @@ -669,7 +674,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) { nvgpu_err(g, "read invalid unit_id %d from queue %d", - msg->hdr.unit_id, queue->id); + msg->hdr.unit_id, queue_id); *status = -EINVAL; goto clean_up; } @@ -680,7 +685,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, read_size, &bytes_read); if (err != 0 || bytes_read != read_size) { nvgpu_err(g, - "fail to read msg from queue %d", queue->id); + "fail to read msg from queue %d", queue_id); *status = err; goto clean_up; } diff --git a/drivers/gpu/nvgpu/common/sec2/sec2_ipc.c b/drivers/gpu/nvgpu/common/sec2/sec2_ipc.c index f401a9cdb..17ae081fa 100644 --- a/drivers/gpu/nvgpu/common/sec2/sec2_ipc.c +++ b/drivers/gpu/nvgpu/common/sec2/sec2_ipc.c @@ -82,6 +82,7 @@ static bool sec2_validate_cmd(struct nvgpu_sec2 *sec2, { struct gk20a *g = sec2->g; struct nvgpu_falcon_queue *queue; + u32 queue_size; if (queue_id != SEC2_NV_CMDQ_LOG_ID) { goto invalid_cmd; @@ -92,7 +93,8 @@ static bool sec2_validate_cmd(struct nvgpu_sec2 *sec2, goto invalid_cmd; } - if (cmd->hdr.size > (queue->size >> 1)) { + queue_size = nvgpu_falcon_queue_get_size(queue); + if (cmd->hdr.size > (queue_size >> 1)) { goto invalid_cmd; } @@ -245,6 +247,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2, { struct gk20a *g = sec2->g; u32 read_size, bytes_read; + u32 queue_id; int err; *status = 0U; @@ -253,10 +256,12 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2, return false; } + queue_id = nvgpu_falcon_queue_get_id(queue); + err = nvgpu_falcon_queue_pop(sec2->flcn, queue, &msg->hdr, PMU_MSG_HDR_SIZE, &bytes_read); if ((err != 0) || (bytes_read != PMU_MSG_HDR_SIZE)) { - nvgpu_err(g, "fail to read msg from queue %d", queue->id); + nvgpu_err(g, "fail to read msg from queue %d", queue_id); *status = err | -EINVAL; goto clean_up; } @@ -264,7 +269,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2, if (msg->hdr.unit_id == NV_SEC2_UNIT_REWIND) { err = nvgpu_falcon_queue_rewind(sec2->flcn, queue); if (err != 0) { - nvgpu_err(g, "fail to rewind queue %d", queue->id); + nvgpu_err(g, "fail to rewind queue %d", queue_id); *status = err | -EINVAL; goto clean_up; } @@ -274,7 +279,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2, PMU_MSG_HDR_SIZE, &bytes_read); if ((err != 0) || (bytes_read != PMU_MSG_HDR_SIZE)) { nvgpu_err(g, - "fail to read msg from queue %d", queue->id); + "fail to read msg from queue %d", queue_id); *status = err | -EINVAL; goto clean_up; } @@ -282,7 +287,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2, if (!NV_SEC2_UNITID_IS_VALID(msg->hdr.unit_id)) { nvgpu_err(g, "read invalid unit_id %d from queue %d", - msg->hdr.unit_id, queue->id); + msg->hdr.unit_id, queue_id); *status = -EINVAL; goto clean_up; } @@ -293,7 +298,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2, read_size, &bytes_read); if ((err != 0) || (bytes_read != read_size)) { nvgpu_err(g, - "fail to read msg from queue %d", queue->id); + "fail to read msg from queue %d", queue_id); *status = err; goto clean_up; } diff --git a/drivers/gpu/nvgpu/include/nvgpu/falcon.h b/drivers/gpu/nvgpu/include/nvgpu/falcon.h index ab92081d8..baf95763a 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/falcon.h +++ b/drivers/gpu/nvgpu/include/nvgpu/falcon.h @@ -280,6 +280,9 @@ int nvgpu_falcon_queue_push(struct nvgpu_falcon *flcn, struct nvgpu_falcon_queue *queue, void *data, u32 size); void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn, struct nvgpu_falcon_queue *queue); +u32 nvgpu_falcon_queue_get_id(struct nvgpu_falcon_queue *queue); +u32 nvgpu_falcon_queue_get_index(struct nvgpu_falcon_queue *queue); +u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue); int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id); diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h index 53e505f02..1c2651d97 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h @@ -814,8 +814,8 @@ struct gpu_ops { u32 (*pmu_allocation_get_fb_size)( struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); void (*get_pmu_init_msg_pmu_queue_params)( - struct nvgpu_falcon_queue *queue, u32 id, - void *pmu_init_msg); + struct nvgpu_falcon_queue *queue, + u32 id, void *pmu_init_msg); void *(*get_pmu_msg_pmu_init_msg_ptr)( struct pmu_init_msg *init); u16 (*get_pmu_init_msg_pmu_sw_mg_off)( diff --git a/drivers/gpu/nvgpu/tu104/sec2_tu104.c b/drivers/gpu/nvgpu/tu104/sec2_tu104.c index de1deae0a..dbf9f5abe 100644 --- a/drivers/gpu/nvgpu/tu104/sec2_tu104.c +++ b/drivers/gpu/nvgpu/tu104/sec2_tu104.c @@ -240,17 +240,21 @@ int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, u32 *head, bool set) { u32 queue_head_size = 8; + u32 queue_id, queue_index; - if (queue->id <= SEC2_NV_CMDQ_LOG_ID__LAST) { - if (queue->index >= queue_head_size) { + queue_id = nvgpu_falcon_queue_get_id(queue); + queue_index = nvgpu_falcon_queue_get_index(queue); + + if (queue_id <= SEC2_NV_CMDQ_LOG_ID__LAST) { + if (queue_index >= queue_head_size) { return -EINVAL; } if (!set) { *head = psec_queue_head_address_v( - gk20a_readl(g, psec_queue_head_r(queue->index))); + gk20a_readl(g, psec_queue_head_r(queue_index))); } else { - gk20a_writel(g, psec_queue_head_r(queue->index), + gk20a_writel(g, psec_queue_head_r(queue_index), psec_queue_head_address_f(*head)); } } else { @@ -271,18 +275,22 @@ int tu104_sec2_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, u32 *tail, bool set) { u32 queue_tail_size = 8; + u32 queue_id, queue_index; - if (queue->id <= SEC2_NV_CMDQ_LOG_ID__LAST) { - if (queue->index >= queue_tail_size) { + queue_id = nvgpu_falcon_queue_get_id(queue); + queue_index = nvgpu_falcon_queue_get_index(queue); + + if (queue_id <= SEC2_NV_CMDQ_LOG_ID__LAST) { + if (queue_index >= queue_tail_size) { return -EINVAL; } if (!set) { *tail = psec_queue_tail_address_v( - gk20a_readl(g, psec_queue_tail_r(queue->index))); + gk20a_readl(g, psec_queue_tail_r(queue_index))); } else { gk20a_writel(g, - psec_queue_tail_r(queue->index), + psec_queue_tail_r(queue_index), psec_queue_tail_address_f(*tail)); } } else {