gpu: nvgpu: make flcn queues struct nvgpu_falcon_queue*

To move struct nvgpu_falcon_queue members to falcon private header
convert falcon queues to be struct nvgpu_falcon_queue pointers.

JIRA NVGPU-1594

Change-Id: Icf8ef929f8256aadd46956164bd418958ba4756f
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1968243
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2018-12-06 16:38:42 +05:30
committed by mobile promotions
parent 5efc446a06
commit d2242ac909
10 changed files with 127 additions and 33 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -330,6 +330,10 @@ int nvgpu_falcon_queue_push(struct nvgpu_falcon *flcn,
{
int err = 0;
if ((flcn == NULL) || (queue == NULL)) {
return -EINVAL;
}
if (queue->oflag != OFLAG_WRITE) {
nvgpu_err(flcn->g, "flcn-%d, queue-%d not opened for write",
flcn->flcn_id, queue->id);
@@ -371,6 +375,10 @@ int nvgpu_falcon_queue_pop(struct nvgpu_falcon *flcn,
{
int err = 0;
if ((flcn == NULL) || (queue == NULL)) {
return -EINVAL;
}
if (queue->oflag != OFLAG_READ) {
nvgpu_err(flcn->g, "flcn-%d, queue-%d, not opened for read",
flcn->flcn_id, queue->id);
@@ -412,6 +420,10 @@ int nvgpu_falcon_queue_rewind(struct nvgpu_falcon *flcn,
{
int err = 0;
if ((flcn == NULL) || (queue == NULL)) {
return -EINVAL;
}
/* acquire mutex */
nvgpu_mutex_acquire(&queue->mutex);
@@ -433,6 +445,10 @@ bool nvgpu_falcon_queue_is_empty(struct nvgpu_falcon *flcn,
u32 q_tail = 0;
int err = 0;
if ((flcn == NULL) || (queue == NULL)) {
return true;
}
/* acquire mutex */
nvgpu_mutex_acquire(&queue->mutex);
@@ -458,16 +474,25 @@ exit:
}
void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue)
struct nvgpu_falcon_queue **queue_p)
{
nvgpu_log(flcn->g, gpu_dbg_pmu, "flcn id-%d q-id %d: index %d ",
flcn->flcn_id, queue->id, queue->index);
struct nvgpu_falcon_queue *queue = NULL;
struct gk20a *g = flcn->g;
if ((queue_p == NULL) || (*queue_p == NULL)) {
return;
}
queue = *queue_p;
nvgpu_pmu_dbg(g, "flcn id-%d q-id %d: index %d ",
flcn->flcn_id, queue->id, queue->index);
/* destroy mutex */
nvgpu_mutex_destroy(&queue->mutex);
/* clear data*/
(void) memset(queue, 0, sizeof(struct nvgpu_falcon_queue));
nvgpu_kfree(g, queue);
*queue_p = NULL;
}
u32 nvgpu_falcon_queue_get_id(struct nvgpu_falcon_queue *queue)
@@ -486,12 +511,24 @@ u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue)
}
int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue,
struct nvgpu_falcon_queue **queue_p,
struct nvgpu_falcon_queue_params params)
{
struct nvgpu_falcon_queue *queue = NULL;
struct gk20a *g = flcn->g;
int err = 0;
if (queue_p == NULL) {
return -EINVAL;
}
queue = (struct nvgpu_falcon_queue *)
nvgpu_kmalloc(g, sizeof(struct nvgpu_falcon_queue));
if (queue == NULL) {
return -ENOMEM;
}
queue->id = params.id;
queue->index = params.index;
queue->offset = params.offset;
@@ -514,19 +551,23 @@ int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
break;
default:
err = -EINVAL;
goto exit;
break;
}
if (err != 0) {
nvgpu_err(flcn->g, "flcn-%d queue-%d, init failed",
flcn->flcn_id, queue->id);
nvgpu_kfree(g, queue);
goto exit;
}
/* init mutex */
err = nvgpu_mutex_init(&queue->mutex);
exit:
if (err != 0) {
nvgpu_err(flcn->g, "flcn-%d queue-%d, init failed",
flcn->flcn_id, queue->id);
goto exit;
}
*queue_p = queue;
exit:
return err;
}

View File

@@ -371,7 +371,8 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
struct pmu_v *pv = &g->ops.pmu_ver;
union pmu_init_msg_pmu *init;
struct pmu_sha1_gid_data gid_data;
u32 i, tail = 0;
u32 i, j, tail = 0;
int err;
nvgpu_log_fn(g, " ");
@@ -421,7 +422,14 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
}
for (i = 0; i < PMU_QUEUE_COUNT; i++) {
nvgpu_pmu_queue_init(pmu, i, init);
err = nvgpu_pmu_queue_init(pmu, i, init);
if (err != 0) {
for (j = 0; j < i; j++) {
nvgpu_pmu_queue_free(pmu, j);
}
nvgpu_err(g, "PMU queue init failed");
return err;
}
}
if (!nvgpu_alloc_initialized(&pmu->dmem)) {
@@ -587,7 +595,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
nvgpu_mutex_release(&pmu->isr_mutex);
for (i = 0U; i < PMU_QUEUE_COUNT; i++) {
nvgpu_falcon_queue_free(pmu->flcn, &pmu->queue[i]);
nvgpu_pmu_queue_free(pmu, i);
}
nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);

View File

@@ -1,7 +1,7 @@
/*
* GK20A PMU (aka. gPMU outside gk20a context)
*
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -746,7 +746,7 @@ void gk20a_pmu_isr(struct gk20a *g)
gk20a_writel(g, pwr_falcon_irqsclr_r(), intr);
if (recheck) {
queue = &pmu->queue[PMU_MESSAGE_QUEUE];
queue = pmu->queue[PMU_MESSAGE_QUEUE];
if (!nvgpu_falcon_queue_is_empty(pmu->flcn, queue)) {
gk20a_writel(g, pwr_falcon_irqsset_r(),
pwr_falcon_irqsset_swgen0_set_f());

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -149,6 +149,24 @@ exit:
return err;
}
void nvgpu_pmu_queue_free(struct nvgpu_pmu *pmu, u32 id)
{
struct gk20a *g = gk20a_from_pmu(pmu);
if (!PMU_IS_COMMAND_QUEUE(id) && !PMU_IS_MESSAGE_QUEUE(id)) {
nvgpu_err(g, "invalid queue-id %d", id);
goto exit;
}
if (pmu->queue[id] == NULL) {
goto exit;
}
nvgpu_falcon_queue_free(pmu->flcn, &pmu->queue[id]);
exit:
return;
}
static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
struct pmu_msg *msg, struct pmu_payload *payload,
u32 queue_id)
@@ -162,7 +180,7 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
goto invalid_cmd;
}
queue = &pmu->queue[queue_id];
queue = pmu->queue[queue_id];
queue_size = nvgpu_falcon_queue_get_size(queue);
if (cmd->hdr.size < PMU_CMD_HDR_SIZE) {
goto invalid_cmd;
@@ -243,7 +261,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
nvgpu_log_fn(g, " ");
queue = &pmu->queue[queue_id];
queue = pmu->queue[queue_id];
nvgpu_timeout_init(g, &timeout, U32_MAX, NVGPU_TIMER_CPU_TIMER);
do {
@@ -744,7 +762,7 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
}
while (pmu_read_message(pmu,
&pmu->queue[PMU_MESSAGE_QUEUE], &msg, &status)) {
pmu->queue[PMU_MESSAGE_QUEUE], &msg, &status)) {
nvgpu_pmu_dbg(g, "read msg hdr: ");
nvgpu_pmu_dbg(g, "unit_id = 0x%08x, size = 0x%08x",

View File

@@ -79,6 +79,24 @@ exit:
return err;
}
void nvgpu_sec2_queue_free(struct nvgpu_sec2 *sec2, u32 id)
{
struct gk20a *g = sec2->g;
if (!(id == SEC2_NV_CMDQ_LOG_ID) && !(id == SEC2_NV_MSGQ_LOG_ID)) {
nvgpu_err(g, "invalid queue-id %d", id);
goto exit;
}
if (sec2->queue[id] == NULL) {
goto exit;
}
nvgpu_falcon_queue_free(sec2->flcn, &sec2->queue[id]);
exit:
return;
}
static void sec2_seq_init(struct nvgpu_sec2 *sec2)
{
u32 i = 0;
@@ -177,7 +195,7 @@ int nvgpu_sec2_destroy(struct gk20a *g)
nvgpu_mutex_release(&sec2->isr_mutex);
for (i = 0; i < SEC2_QUEUE_NUM; i++) {
nvgpu_falcon_queue_free(sec2->flcn, &sec2->queue[i]);
nvgpu_sec2_queue_free(sec2, i);
}
sec2->sec2_ready = false;

View File

@@ -88,7 +88,7 @@ static bool sec2_validate_cmd(struct nvgpu_sec2 *sec2,
goto invalid_cmd;
}
queue = &sec2->queue[queue_id];
queue = sec2->queue[queue_id];
if (cmd->hdr.size < PMU_CMD_HDR_SIZE) {
goto invalid_cmd;
}
@@ -123,7 +123,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
nvgpu_log_fn(g, " ");
queue = &sec2->queue[queue_id];
queue = sec2->queue[queue_id];
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
do {
@@ -334,7 +334,7 @@ static int sec2_process_init_msg(struct nvgpu_sec2 *sec2,
{
struct gk20a *g = sec2->g;
struct sec2_init_msg_sec2_init *sec2_init;
u32 i, tail = 0;
u32 i, j, tail = 0;
int err = 0;
g->ops.sec2.msgq_tail(g, sec2, &tail, QUEUE_GET);
@@ -369,7 +369,14 @@ static int sec2_process_init_msg(struct nvgpu_sec2 *sec2,
sec2_init = &msg->msg.init.sec2_init;
for (i = 0; i < SEC2_QUEUE_NUM; i++) {
nvgpu_sec2_queue_init(sec2, i, sec2_init);
err = nvgpu_sec2_queue_init(sec2, i, sec2_init);
if (err != 0) {
for (j = 0; j < i; j++) {
nvgpu_sec2_queue_free(sec2, j);
}
nvgpu_err(g, "SEC2 queue init failed");
return err;
}
}
if (!nvgpu_alloc_initialized(&sec2->dmem)) {
@@ -404,7 +411,7 @@ int nvgpu_sec2_process_message(struct nvgpu_sec2 *sec2)
}
while (sec2_read_message(sec2,
&sec2->queue[SEC2_NV_MSGQ_LOG_ID], &msg, &status)) {
sec2->queue[SEC2_NV_MSGQ_LOG_ID], &msg, &status)) {
nvgpu_sec2_dbg(g, "read msg hdr: ");
nvgpu_sec2_dbg(g, "unit_id = 0x%08x, size = 0x%08x",

View File

@@ -278,7 +278,7 @@ u32 nvgpu_falcon_get_id(struct nvgpu_falcon *flcn);
/* queue public functions */
int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue,
struct nvgpu_falcon_queue **queue_p,
struct nvgpu_falcon_queue_params params);
bool nvgpu_falcon_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue);
@@ -290,7 +290,7 @@ int nvgpu_falcon_queue_pop(struct nvgpu_falcon *flcn,
int nvgpu_falcon_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, void *data, u32 size);
void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue);
struct nvgpu_falcon_queue **queue_p);
u32 nvgpu_falcon_queue_get_id(struct nvgpu_falcon_queue *queue);
u32 nvgpu_falcon_queue_get_index(struct nvgpu_falcon_queue *queue);
u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue);

View File

@@ -324,7 +324,7 @@ struct nvgpu_pmu {
struct pmu_sha1_gid gid_info;
struct nvgpu_falcon_queue queue[PMU_QUEUE_COUNT];
struct nvgpu_falcon_queue *queue[PMU_QUEUE_COUNT];
struct pmu_sequence *seq;
unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE];
@@ -429,6 +429,7 @@ int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, u32 id,
union pmu_init_msg_pmu *init);
void nvgpu_pmu_queue_free(struct nvgpu_pmu *pmu, u32 id);
/* send a cmd to pmu */
int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,

View File

@@ -71,7 +71,7 @@ struct nvgpu_sec2 {
struct nvgpu_falcon *flcn;
u32 falcon_id;
struct nvgpu_falcon_queue queue[SEC2_QUEUE_NUM];
struct nvgpu_falcon_queue *queue[SEC2_QUEUE_NUM];
struct sec2_sequence *seq;
unsigned long sec2_seq_tbl[SEC2_SEQ_TBL_SIZE];
@@ -111,5 +111,6 @@ int nvgpu_init_sec2_support(struct gk20a *g);
int nvgpu_sec2_destroy(struct gk20a *g);
int nvgpu_sec2_queue_init(struct nvgpu_sec2 *sec2, u32 id,
struct sec2_init_msg_sec2_init *init);
void nvgpu_sec2_queue_free(struct nvgpu_sec2 *sec2, u32 id);
#endif /* NVGPU_SEC2_H */

View File

@@ -425,7 +425,7 @@ void tu104_sec2_isr(struct gk20a *g)
gk20a_writel(g, psec_falcon_irqsclr_r(), intr);
if (recheck) {
queue = &sec2->queue[SEC2_NV_MSGQ_LOG_ID];
queue = sec2->queue[SEC2_NV_MSGQ_LOG_ID];
if (!nvgpu_falcon_queue_is_empty(sec2->flcn, queue)) {
gk20a_writel(g, psec_falcon_irqsset_r(),
psec_falcon_irqsset_swgen0_set_f());