mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: add falcon queue field getters
To eliminate direct accesses to falcon queue members id, index and size introduce getters falcon_queue_get_id|index|size. JIRA NVGPU-1459 Change-Id: Ic01e36bde0bad522087f49e5c70ac875f58ca10f Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1958400 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8ebf2f0f26
commit
d13059701f
@@ -472,6 +472,21 @@ void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn,
|
||||
(void) memset(queue, 0, sizeof(struct nvgpu_falcon_queue));
|
||||
}
|
||||
|
||||
u32 nvgpu_falcon_queue_get_id(struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
return queue->id;
|
||||
}
|
||||
|
||||
u32 nvgpu_falcon_queue_get_index(struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
return queue->index;
|
||||
}
|
||||
|
||||
u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
return queue->size;
|
||||
}
|
||||
|
||||
int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
|
||||
@@ -395,6 +395,8 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *head, bool set)
|
||||
{
|
||||
u32 queue_head_size = 0;
|
||||
u32 queue_id = nvgpu_falcon_queue_get_id(queue);
|
||||
u32 queue_index = nvgpu_falcon_queue_get_index(queue);
|
||||
|
||||
if (g->ops.pmu.pmu_get_queue_head_size != NULL) {
|
||||
queue_head_size = g->ops.pmu.pmu_get_queue_head_size();
|
||||
@@ -402,19 +404,19 @@ int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
|
||||
BUG_ON((head == NULL) || (queue_head_size == 0U));
|
||||
|
||||
if (PMU_IS_COMMAND_QUEUE(queue->id)) {
|
||||
if (PMU_IS_COMMAND_QUEUE(queue_id)) {
|
||||
|
||||
if (queue->index >= queue_head_size) {
|
||||
if (queue_index >= queue_head_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!set) {
|
||||
*head = pwr_pmu_queue_head_address_v(
|
||||
gk20a_readl(g,
|
||||
g->ops.pmu.pmu_get_queue_head(queue->index)));
|
||||
g->ops.pmu.pmu_get_queue_head(queue_index)));
|
||||
} else {
|
||||
gk20a_writel(g,
|
||||
g->ops.pmu.pmu_get_queue_head(queue->index),
|
||||
g->ops.pmu.pmu_get_queue_head(queue_index),
|
||||
pwr_pmu_queue_head_address_f(*head));
|
||||
}
|
||||
} else {
|
||||
@@ -435,6 +437,8 @@ int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *tail, bool set)
|
||||
{
|
||||
u32 queue_tail_size = 0;
|
||||
u32 queue_id = nvgpu_falcon_queue_get_id(queue);
|
||||
u32 queue_index = nvgpu_falcon_queue_get_index(queue);
|
||||
|
||||
if (g->ops.pmu.pmu_get_queue_tail_size != NULL) {
|
||||
queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
|
||||
@@ -442,18 +446,18 @@ int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
|
||||
BUG_ON((tail == NULL) || (queue_tail_size == 0U));
|
||||
|
||||
if (PMU_IS_COMMAND_QUEUE(queue->id)) {
|
||||
if (PMU_IS_COMMAND_QUEUE(queue_id)) {
|
||||
|
||||
if (queue->index >= queue_tail_size) {
|
||||
if (queue_index >= queue_tail_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!set) {
|
||||
*tail = pwr_pmu_queue_tail_address_v(gk20a_readl(g,
|
||||
g->ops.pmu.pmu_get_queue_tail(queue->index)));
|
||||
g->ops.pmu.pmu_get_queue_tail(queue_index)));
|
||||
} else {
|
||||
gk20a_writel(g,
|
||||
g->ops.pmu.pmu_get_queue_tail(queue->index),
|
||||
g->ops.pmu.pmu_get_queue_tail(queue_index),
|
||||
pwr_pmu_queue_tail_address_f(*tail));
|
||||
}
|
||||
|
||||
|
||||
@@ -153,6 +153,7 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct nvgpu_falcon_queue *queue;
|
||||
u32 queue_size;
|
||||
u32 in_size, out_size;
|
||||
|
||||
if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) {
|
||||
@@ -160,11 +161,12 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
}
|
||||
|
||||
queue = &pmu->queue[queue_id];
|
||||
queue_size = nvgpu_falcon_queue_get_size(queue);
|
||||
if (cmd->hdr.size < PMU_CMD_HDR_SIZE) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
if (cmd->hdr.size > (queue->size >> 1)) {
|
||||
if (cmd->hdr.size > (queue_size >> 1)) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
@@ -633,6 +635,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
u32 read_size, bytes_read;
|
||||
u32 queue_id;
|
||||
int err;
|
||||
|
||||
*status = 0;
|
||||
@@ -641,10 +644,12 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
|
||||
return false;
|
||||
}
|
||||
|
||||
queue_id = nvgpu_falcon_queue_get_id(queue);
|
||||
|
||||
err = nvgpu_falcon_queue_pop(pmu->flcn, queue, &msg->hdr,
|
||||
PMU_MSG_HDR_SIZE, &bytes_read);
|
||||
if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) {
|
||||
nvgpu_err(g, "fail to read msg from queue %d", queue->id);
|
||||
nvgpu_err(g, "fail to read msg from queue %d", queue_id);
|
||||
*status = err | -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -652,7 +657,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
|
||||
if (msg->hdr.unit_id == PMU_UNIT_REWIND) {
|
||||
err = nvgpu_falcon_queue_rewind(pmu->flcn, queue);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "fail to rewind queue %d", queue->id);
|
||||
nvgpu_err(g, "fail to rewind queue %d", queue_id);
|
||||
*status = err | -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -661,7 +666,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
|
||||
PMU_MSG_HDR_SIZE, &bytes_read);
|
||||
if (err != 0 || bytes_read != PMU_MSG_HDR_SIZE) {
|
||||
nvgpu_err(g,
|
||||
"fail to read msg from queue %d", queue->id);
|
||||
"fail to read msg from queue %d", queue_id);
|
||||
*status = err | -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -669,7 +674,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
|
||||
|
||||
if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) {
|
||||
nvgpu_err(g, "read invalid unit_id %d from queue %d",
|
||||
msg->hdr.unit_id, queue->id);
|
||||
msg->hdr.unit_id, queue_id);
|
||||
*status = -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -680,7 +685,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu,
|
||||
read_size, &bytes_read);
|
||||
if (err != 0 || bytes_read != read_size) {
|
||||
nvgpu_err(g,
|
||||
"fail to read msg from queue %d", queue->id);
|
||||
"fail to read msg from queue %d", queue_id);
|
||||
*status = err;
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
@@ -82,6 +82,7 @@ static bool sec2_validate_cmd(struct nvgpu_sec2 *sec2,
|
||||
{
|
||||
struct gk20a *g = sec2->g;
|
||||
struct nvgpu_falcon_queue *queue;
|
||||
u32 queue_size;
|
||||
|
||||
if (queue_id != SEC2_NV_CMDQ_LOG_ID) {
|
||||
goto invalid_cmd;
|
||||
@@ -92,7 +93,8 @@ static bool sec2_validate_cmd(struct nvgpu_sec2 *sec2,
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
if (cmd->hdr.size > (queue->size >> 1)) {
|
||||
queue_size = nvgpu_falcon_queue_get_size(queue);
|
||||
if (cmd->hdr.size > (queue_size >> 1)) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
@@ -245,6 +247,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
|
||||
{
|
||||
struct gk20a *g = sec2->g;
|
||||
u32 read_size, bytes_read;
|
||||
u32 queue_id;
|
||||
int err;
|
||||
|
||||
*status = 0U;
|
||||
@@ -253,10 +256,12 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
|
||||
return false;
|
||||
}
|
||||
|
||||
queue_id = nvgpu_falcon_queue_get_id(queue);
|
||||
|
||||
err = nvgpu_falcon_queue_pop(sec2->flcn, queue, &msg->hdr,
|
||||
PMU_MSG_HDR_SIZE, &bytes_read);
|
||||
if ((err != 0) || (bytes_read != PMU_MSG_HDR_SIZE)) {
|
||||
nvgpu_err(g, "fail to read msg from queue %d", queue->id);
|
||||
nvgpu_err(g, "fail to read msg from queue %d", queue_id);
|
||||
*status = err | -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -264,7 +269,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
|
||||
if (msg->hdr.unit_id == NV_SEC2_UNIT_REWIND) {
|
||||
err = nvgpu_falcon_queue_rewind(sec2->flcn, queue);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "fail to rewind queue %d", queue->id);
|
||||
nvgpu_err(g, "fail to rewind queue %d", queue_id);
|
||||
*status = err | -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -274,7 +279,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
|
||||
PMU_MSG_HDR_SIZE, &bytes_read);
|
||||
if ((err != 0) || (bytes_read != PMU_MSG_HDR_SIZE)) {
|
||||
nvgpu_err(g,
|
||||
"fail to read msg from queue %d", queue->id);
|
||||
"fail to read msg from queue %d", queue_id);
|
||||
*status = err | -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -282,7 +287,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
|
||||
|
||||
if (!NV_SEC2_UNITID_IS_VALID(msg->hdr.unit_id)) {
|
||||
nvgpu_err(g, "read invalid unit_id %d from queue %d",
|
||||
msg->hdr.unit_id, queue->id);
|
||||
msg->hdr.unit_id, queue_id);
|
||||
*status = -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
@@ -293,7 +298,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
|
||||
read_size, &bytes_read);
|
||||
if ((err != 0) || (bytes_read != read_size)) {
|
||||
nvgpu_err(g,
|
||||
"fail to read msg from queue %d", queue->id);
|
||||
"fail to read msg from queue %d", queue_id);
|
||||
*status = err;
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
@@ -280,6 +280,9 @@ int nvgpu_falcon_queue_push(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size);
|
||||
void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue);
|
||||
u32 nvgpu_falcon_queue_get_id(struct nvgpu_falcon_queue *queue);
|
||||
u32 nvgpu_falcon_queue_get_index(struct nvgpu_falcon_queue *queue);
|
||||
u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue);
|
||||
|
||||
int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id);
|
||||
|
||||
|
||||
@@ -814,8 +814,8 @@ struct gpu_ops {
|
||||
u32 (*pmu_allocation_get_fb_size)(
|
||||
struct nvgpu_pmu *pmu, void *pmu_alloc_ptr);
|
||||
void (*get_pmu_init_msg_pmu_queue_params)(
|
||||
struct nvgpu_falcon_queue *queue, u32 id,
|
||||
void *pmu_init_msg);
|
||||
struct nvgpu_falcon_queue *queue,
|
||||
u32 id, void *pmu_init_msg);
|
||||
void *(*get_pmu_msg_pmu_init_msg_ptr)(
|
||||
struct pmu_init_msg *init);
|
||||
u16 (*get_pmu_init_msg_pmu_sw_mg_off)(
|
||||
|
||||
@@ -240,17 +240,21 @@ int tu104_sec2_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *head, bool set)
|
||||
{
|
||||
u32 queue_head_size = 8;
|
||||
u32 queue_id, queue_index;
|
||||
|
||||
if (queue->id <= SEC2_NV_CMDQ_LOG_ID__LAST) {
|
||||
if (queue->index >= queue_head_size) {
|
||||
queue_id = nvgpu_falcon_queue_get_id(queue);
|
||||
queue_index = nvgpu_falcon_queue_get_index(queue);
|
||||
|
||||
if (queue_id <= SEC2_NV_CMDQ_LOG_ID__LAST) {
|
||||
if (queue_index >= queue_head_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!set) {
|
||||
*head = psec_queue_head_address_v(
|
||||
gk20a_readl(g, psec_queue_head_r(queue->index)));
|
||||
gk20a_readl(g, psec_queue_head_r(queue_index)));
|
||||
} else {
|
||||
gk20a_writel(g, psec_queue_head_r(queue->index),
|
||||
gk20a_writel(g, psec_queue_head_r(queue_index),
|
||||
psec_queue_head_address_f(*head));
|
||||
}
|
||||
} else {
|
||||
@@ -271,18 +275,22 @@ int tu104_sec2_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *tail, bool set)
|
||||
{
|
||||
u32 queue_tail_size = 8;
|
||||
u32 queue_id, queue_index;
|
||||
|
||||
if (queue->id <= SEC2_NV_CMDQ_LOG_ID__LAST) {
|
||||
if (queue->index >= queue_tail_size) {
|
||||
queue_id = nvgpu_falcon_queue_get_id(queue);
|
||||
queue_index = nvgpu_falcon_queue_get_index(queue);
|
||||
|
||||
if (queue_id <= SEC2_NV_CMDQ_LOG_ID__LAST) {
|
||||
if (queue_index >= queue_tail_size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!set) {
|
||||
*tail = psec_queue_tail_address_v(
|
||||
gk20a_readl(g, psec_queue_tail_r(queue->index)));
|
||||
gk20a_readl(g, psec_queue_tail_r(queue_index)));
|
||||
} else {
|
||||
gk20a_writel(g,
|
||||
psec_queue_tail_r(queue->index),
|
||||
psec_queue_tail_r(queue_index),
|
||||
psec_queue_tail_address_f(*tail));
|
||||
}
|
||||
} else {
|
||||
|
||||
Reference in New Issue
Block a user