gpu: nvgpu: rename falcon queues to engine queues

As we plan to move the queue implementations out of falcon unit let us
rename these as:
1. engine_mem_queue - Generic implementation.
2. engine_dmem_queue - DMEM queue implementation of engine_mem_queue.
3. engine_emem_queue - EMEM queue implementation of engine_mem_queu.
4. engine_fb_queue - FB queue implementation.

JIRA NVGPU-1994

Change-Id: Ic81dcc154b3383d9f75fe57cc01269bda2698b25
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2016288
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-02-11 15:06:27 +05:30
committed by mobile promotions
parent c5dde07a31
commit ece30fc2f9
19 changed files with 271 additions and 271 deletions

View File

@@ -320,10 +320,10 @@ nvgpu-y += \
common/falcon/falcon_gp106.o \
common/falcon/falcon_gv100.o \
common/falcon/falcon_tu104.o \
common/falcon/falcon_queue.o \
common/falcon/falcon_dmem_queue.o \
common/falcon/falcon_emem_queue.o \
common/falcon/falcon_fb_queue.o \
common/falcon/engine_mem_queue.o \
common/falcon/engine_dmem_queue.o \
common/falcon/engine_emem_queue.o \
common/falcon/engine_fb_queue.o \
common/init/hal_init.o \
common/sec2/sec2.o \
common/sec2/sec2_ipc.o \

View File

@@ -123,10 +123,10 @@ srcs += common/sim.c \
common/falcon/falcon_gp106.c \
common/falcon/falcon_gv100.c \
common/falcon/falcon_tu104.c \
common/falcon/falcon_queue.c \
common/falcon/falcon_dmem_queue.c \
common/falcon/falcon_emem_queue.c \
common/falcon/falcon_fb_queue.c \
common/falcon/engine_mem_queue.c \
common/falcon/engine_dmem_queue.c \
common/falcon/engine_emem_queue.c \
common/falcon/engine_fb_queue.c \
common/gr/ctxsw_prog/ctxsw_prog_gm20b.c \
common/gr/ctxsw_prog/ctxsw_prog_gp10b.c \
common/gr/ctxsw_prog/ctxsw_prog_gv11b.c \

View File

@@ -23,13 +23,13 @@
#include <nvgpu/falcon.h>
#include <nvgpu/log.h>
#include "falcon_queue_priv.h"
#include "falcon_dmem_queue.h"
#include "engine_mem_queue_priv.h"
#include "engine_dmem_queue.h"
#include "falcon_priv.h"
/* DMEM-Q specific ops */
static int falcon_dmem_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 dst, void *data, u32 size)
static int engine_dmem_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, u32 dst, void *data, u32 size)
{
struct gk20a *g = queue->g;
int err = 0;
@@ -45,8 +45,8 @@ exit:
return err;
}
static int falcon_dmem_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 src, void *data, u32 size)
static int engine_dmem_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, u32 src, void *data, u32 size)
{
struct gk20a *g = queue->g;
int err = 0;
@@ -63,8 +63,8 @@ exit:
}
/* assign DMEM queue type specific ops */
void falcon_dmem_queue_init(struct nvgpu_falcon_queue *queue)
void engine_dmem_queue_init(struct nvgpu_engine_mem_queue *queue)
{
queue->push = falcon_dmem_queue_push;
queue->pop = falcon_dmem_queue_pop;
queue->push = engine_dmem_queue_push;
queue->pop = engine_dmem_queue_pop;
}

View File

@@ -20,9 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FALCON_DMEM_QUEUE_H
#define NVGPU_FALCON_DMEM_QUEUE_H
#ifndef NVGPU_ENGINE_DMEM_QUEUE_H
#define NVGPU_ENGINE_DMEM_QUEUE_H
void falcon_dmem_queue_init(struct nvgpu_falcon_queue *queue);
void engine_dmem_queue_init(struct nvgpu_engine_mem_queue *queue);
#endif /* NVGPU_FALCON_DMEM_QUEUE_H */
#endif /* NVGPU_ENGINE_DMEM_QUEUE_H */

View File

@@ -23,13 +23,13 @@
#include <nvgpu/falcon.h>
#include <nvgpu/log.h>
#include "falcon_queue_priv.h"
#include "falcon_emem_queue.h"
#include "engine_mem_queue_priv.h"
#include "engine_emem_queue.h"
#include "falcon_priv.h"
/* EMEM-Q specific ops */
static int falcon_emem_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 dst, void *data, u32 size)
static int engine_emem_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, u32 dst, void *data, u32 size)
{
struct gk20a *g = queue->g;
int err = 0;
@@ -45,8 +45,8 @@ exit:
return err;
}
static int falcon_emem_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 src, void *data, u32 size)
static int engine_emem_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, u32 src, void *data, u32 size)
{
struct gk20a *g = queue->g;
int err = 0;
@@ -63,8 +63,8 @@ exit:
}
/* assign EMEM queue type specific ops */
void falcon_emem_queue_init(struct nvgpu_falcon_queue *queue)
void engine_emem_queue_init(struct nvgpu_engine_mem_queue *queue)
{
queue->push = falcon_emem_queue_push;
queue->pop = falcon_emem_queue_pop;
queue->push = engine_emem_queue_push;
queue->pop = engine_emem_queue_pop;
}

View File

@@ -20,9 +20,9 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FALCON_EMEM_QUEUE_H
#define NVGPU_FALCON_EMEM_QUEUE_H
#ifndef NVGPU_ENGINE_EMEM_QUEUE_H
#define NVGPU_ENGINE_EMEM_QUEUE_H
void falcon_emem_queue_init(struct nvgpu_falcon_queue *queue);
void engine_emem_queue_init(struct nvgpu_engine_mem_queue *queue);
#endif /* NVGPU_FALCON_EMEM_QUEUE_H */
#endif /* NVGPU_ENGINE_EMEM_QUEUE_H */

View File

@@ -29,14 +29,14 @@
#include <nvgpu/pmu.h>
#include <nvgpu/string.h>
#include <nvgpu/kmem.h>
#include <nvgpu/falcon_fb_queue.h>
#include <nvgpu/engine_fb_queue.h>
#include "falcon_priv.h"
#include "falcon_fb_queue_priv.h"
#include "engine_fb_queue_priv.h"
/* FB-Q ops */
static int falcon_fb_queue_head(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 *head, bool set)
static int engine_fb_queue_head(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, u32 *head, bool set)
{
int err = -EINVAL;
@@ -48,8 +48,8 @@ static int falcon_fb_queue_head(struct nvgpu_falcon *flcn,
return err;
}
static int falcon_fb_queue_tail(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 *tail, bool set)
static int engine_fb_queue_tail(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, u32 *tail, bool set)
{
struct gk20a *g = flcn->g;
int err = -EINVAL;
@@ -67,14 +67,14 @@ static int falcon_fb_queue_tail(struct nvgpu_falcon *flcn,
return err;
}
static inline u32 falcon_fb_queue_get_next(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 head)
static inline u32 engine_fb_queue_get_next(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, u32 head)
{
return (head + 1U) % queue->size;
}
static bool falcon_fb_queue_has_room(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue,
static bool engine_fb_queue_has_room(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue,
u32 size)
{
u32 head = 0;
@@ -94,14 +94,14 @@ static bool falcon_fb_queue_has_room(struct nvgpu_falcon *flcn,
goto exit;
}
next_head = falcon_fb_queue_get_next(flcn, queue, head);
next_head = engine_fb_queue_get_next(flcn, queue, head);
exit:
return next_head != tail;
}
static int falcon_fb_queue_write(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 offset,
static int engine_fb_queue_write(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, u32 offset,
u8 *src, u32 size)
{
struct gk20a *g = flcn->g;
@@ -137,8 +137,8 @@ exit:
return err;
}
static int falcon_fb_queue_set_element_use_state(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 queue_pos, bool set)
static int engine_fb_queue_set_element_use_state(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, u32 queue_pos, bool set)
{
int err = 0;
@@ -166,8 +166,8 @@ exit:
return err;
}
static int falcon_fb_queue_is_element_in_use(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue,
static int engine_fb_queue_is_element_in_use(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue,
u32 queue_pos, bool *in_use)
{
int err = 0;
@@ -183,8 +183,8 @@ exit:
return err;
}
static int falcon_fb_queue_sweep(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue)
static int engine_fb_queue_sweep(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue)
{
u32 head;
u32 tail;
@@ -205,7 +205,7 @@ static int falcon_fb_queue_sweep(struct nvgpu_falcon *flcn,
* can be made available.
*/
while (tail != head) {
if (falcon_fb_queue_is_element_in_use(flcn, queue,
if (engine_fb_queue_is_element_in_use(flcn, queue,
tail, &in_use) != 0) {
break;
}
@@ -214,7 +214,7 @@ static int falcon_fb_queue_sweep(struct nvgpu_falcon *flcn,
break;
}
tail = falcon_fb_queue_get_next(flcn, queue, tail);
tail = engine_fb_queue_get_next(flcn, queue, tail);
}
/* Update tail */
@@ -224,50 +224,50 @@ exit:
return err;
}
u32 nvgpu_falcon_fb_queue_get_position(struct nvgpu_falcon_fb_queue *queue)
u32 nvgpu_engine_fb_queue_get_position(struct nvgpu_engine_fb_queue *queue)
{
return queue->position;
}
/* return the queue element size */
u32 nvgpu_falcon_fb_queue_get_element_size(struct nvgpu_falcon_fb_queue *queue)
u32 nvgpu_engine_fb_queue_get_element_size(struct nvgpu_engine_fb_queue *queue)
{
return queue->fbq.element_size;
}
/* return the queue offset from super surface FBQ's */
u32 nvgpu_falcon_fb_queue_get_offset(struct nvgpu_falcon_fb_queue *queue)
u32 nvgpu_engine_fb_queue_get_offset(struct nvgpu_engine_fb_queue *queue)
{
return queue->fbq.fb_offset;
}
/* lock work buffer of queue */
void nvgpu_falcon_fb_queue_lock_work_buffer(struct nvgpu_falcon_fb_queue *queue)
void nvgpu_engine_fb_queue_lock_work_buffer(struct nvgpu_engine_fb_queue *queue)
{
/* acquire work buffer mutex */
nvgpu_mutex_acquire(&queue->fbq.work_buffer_mutex);
}
/* unlock work buffer of queue */
void nvgpu_falcon_fb_queue_unlock_work_buffer(
struct nvgpu_falcon_fb_queue *queue)
void nvgpu_engine_fb_queue_unlock_work_buffer(
struct nvgpu_engine_fb_queue *queue)
{
/* release work buffer mutex */
nvgpu_mutex_release(&queue->fbq.work_buffer_mutex);
}
/* return a pointer of queue work buffer */
u8 *nvgpu_falcon_fb_queue_get_work_buffer(struct nvgpu_falcon_fb_queue *queue)
u8 *nvgpu_engine_fb_queue_get_work_buffer(struct nvgpu_engine_fb_queue *queue)
{
return queue->fbq.work_buffer;
}
int nvgpu_falcon_fb_queue_free_element(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 queue_pos)
int nvgpu_engine_fb_queue_free_element(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, u32 queue_pos)
{
int err = 0;
err = falcon_fb_queue_set_element_use_state(flcn, queue,
err = engine_fb_queue_set_element_use_state(flcn, queue,
queue_pos, false);
if (err != 0) {
nvgpu_err(flcn->g, "fb queue elelment %d free failed",
@@ -275,15 +275,15 @@ int nvgpu_falcon_fb_queue_free_element(struct nvgpu_falcon *flcn,
goto exit;
}
err = falcon_fb_queue_sweep(flcn, queue);
err = engine_fb_queue_sweep(flcn, queue);
exit:
return err;
}
/* queue is_empty check with lock */
bool nvgpu_falcon_fb_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue)
bool nvgpu_engine_fb_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue)
{
u32 q_head = 0;
u32 q_tail = 0;
@@ -317,13 +317,13 @@ exit:
return q_head == q_tail;
}
static int falcon_fb_queue_prepare_write(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 size)
static int engine_fb_queue_prepare_write(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, u32 size)
{
int err = 0;
/* make sure there's enough free space for the write */
if (!falcon_fb_queue_has_room(flcn, queue, size)) {
if (!engine_fb_queue_has_room(flcn, queue, size)) {
nvgpu_pmu_dbg(flcn->g, "queue full: queue-id %d: index %d",
queue->id, queue->index);
err = -EAGAIN;
@@ -342,8 +342,8 @@ exit:
}
/* queue push operation with lock */
int nvgpu_falcon_fb_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, void *data, u32 size)
int nvgpu_engine_fb_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, void *data, u32 size)
{
struct gk20a *g;
int err = 0;
@@ -366,7 +366,7 @@ int nvgpu_falcon_fb_queue_push(struct nvgpu_falcon *flcn,
/* acquire mutex */
nvgpu_mutex_acquire(&queue->mutex);
err = falcon_fb_queue_prepare_write(flcn, queue, size);
err = engine_fb_queue_prepare_write(flcn, queue, size);
if (err != 0) {
goto unlock_mutex;
}
@@ -378,7 +378,7 @@ int nvgpu_falcon_fb_queue_push(struct nvgpu_falcon *flcn,
}
/* Set queue element in use */
if (falcon_fb_queue_set_element_use_state(flcn, queue,
if (engine_fb_queue_set_element_use_state(flcn, queue,
queue->position, true) != 0) {
nvgpu_err(g,
"fb-queue element in use map is in invalid state");
@@ -387,13 +387,13 @@ int nvgpu_falcon_fb_queue_push(struct nvgpu_falcon *flcn,
}
/* write data to FB */
err = falcon_fb_queue_write(flcn, queue, queue->position, data, size);
err = engine_fb_queue_write(flcn, queue, queue->position, data, size);
if (err != 0) {
nvgpu_err(g, "write to fb-queue failed");
goto unlock_mutex;
}
queue->position = falcon_fb_queue_get_next(flcn, queue,
queue->position = engine_fb_queue_get_next(flcn, queue,
queue->position);
err = queue->head(flcn, queue, &queue->position, QUEUE_SET);
@@ -416,8 +416,8 @@ exit:
}
/* queue pop operation with lock */
int nvgpu_falcon_fb_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, void *data, u32 size,
int nvgpu_engine_fb_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, void *data, u32 size,
u32 *bytes_read)
{
struct gk20a *g;
@@ -495,7 +495,7 @@ int nvgpu_falcon_fb_queue_pop(struct nvgpu_falcon *flcn,
if (queue->fbq.read_position >= hdr->size) {
queue->fbq.read_position = 0U;
/* Increment queue index. */
queue->position = falcon_fb_queue_get_next(flcn, queue,
queue->position = engine_fb_queue_get_next(flcn, queue,
queue->position);
}
@@ -520,10 +520,10 @@ exit:
return err;
}
void nvgpu_falcon_fb_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue **queue_p)
void nvgpu_engine_fb_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue **queue_p)
{
struct nvgpu_falcon_fb_queue *queue = NULL;
struct nvgpu_engine_fb_queue *queue = NULL;
struct gk20a *g = flcn->g;
if ((queue_p == NULL) || (*queue_p == NULL)) {
@@ -545,11 +545,11 @@ void nvgpu_falcon_fb_queue_free(struct nvgpu_falcon *flcn,
*queue_p = NULL;
}
int nvgpu_falcon_fb_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue **queue_p,
struct nvgpu_falcon_fb_queue_params params)
int nvgpu_engine_fb_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue **queue_p,
struct nvgpu_engine_fb_queue_params params)
{
struct nvgpu_falcon_fb_queue *queue = NULL;
struct nvgpu_engine_fb_queue *queue = NULL;
struct gk20a *g = flcn->g;
int err = 0;
@@ -557,8 +557,8 @@ int nvgpu_falcon_fb_queue_init(struct nvgpu_falcon *flcn,
return -EINVAL;
}
queue = (struct nvgpu_falcon_fb_queue *)
nvgpu_kmalloc(g, sizeof(struct nvgpu_falcon_fb_queue));
queue = (struct nvgpu_engine_fb_queue *)
nvgpu_kmalloc(g, sizeof(struct nvgpu_engine_fb_queue));
if (queue == NULL) {
return -ENOMEM;
@@ -579,8 +579,8 @@ int nvgpu_falcon_fb_queue_init(struct nvgpu_falcon *flcn,
queue->position = 0U;
queue->head = falcon_fb_queue_head;
queue->tail = falcon_fb_queue_tail;
queue->head = engine_fb_queue_head;
queue->tail = engine_fb_queue_tail;
/* init mutex */
err = nvgpu_mutex_init(&queue->mutex);

View File

@@ -20,10 +20,10 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FALCON_FB_QUEUE_PRIV_H
#define NVGPU_FALCON_FB_QUEUE_PRIV_H
#ifndef NVGPU_ENGINE_FB_QUEUE_PRIV_H
#define NVGPU_ENGINE_FB_QUEUE_PRIV_H
struct nvgpu_falcon_fb_queue {
struct nvgpu_engine_fb_queue {
struct gk20a *g;
/* used by nvgpu, for command LPQ/HPQ */
@@ -93,9 +93,9 @@ struct nvgpu_falcon_fb_queue {
/* queue ops */
int (*tail)(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 *tail, bool set);
struct nvgpu_engine_fb_queue *queue, u32 *tail, bool set);
int (*head)(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 *head, bool set);
struct nvgpu_engine_fb_queue *queue, u32 *head, bool set);
};
#endif /* NVGPU_FALCON_FB_QUEUE_PRIV_H */
#endif /* NVGPU_ENGINE_FB_QUEUE_PRIV_H */

View File

@@ -23,16 +23,16 @@
#include <nvgpu/lock.h>
#include <nvgpu/pmu.h>
#include "falcon_queue_priv.h"
#include "engine_mem_queue_priv.h"
#include "falcon_priv.h"
#include "falcon_dmem_queue.h"
#include "falcon_emem_queue.h"
#include "engine_dmem_queue.h"
#include "engine_emem_queue.h"
/* common falcon queue ops */
static int falcon_queue_head(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 *head, bool set)
static int engine_mem_queue_head(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, u32 *head, bool set)
{
int err = -ENOSYS;
int err = -EINVAL;
if (flcn->flcn_engine_dep_ops.queue_head != NULL) {
err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue->id,
@@ -42,10 +42,10 @@ static int falcon_queue_head(struct nvgpu_falcon *flcn,
return err;
}
static int falcon_queue_tail(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 *tail, bool set)
static int engine_mem_queue_tail(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, u32 *tail, bool set)
{
int err = -ENOSYS;
int err = -EINVAL;
if (flcn->flcn_engine_dep_ops.queue_tail != NULL) {
err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue->id,
@@ -55,8 +55,8 @@ static int falcon_queue_tail(struct nvgpu_falcon *flcn,
return err;
}
static bool falcon_queue_has_room(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 size, bool *need_rewind)
static bool engine_mem_queue_has_room(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, u32 size, bool *need_rewind)
{
u32 q_head = 0;
u32 q_tail = 0;
@@ -100,8 +100,8 @@ exit:
return size <= q_free;
}
static int falcon_queue_rewind(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue)
static int engine_mem_queue_rewind(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue)
{
struct gk20a *g = flcn->g;
struct pmu_cmd cmd;
@@ -129,7 +129,7 @@ static int falcon_queue_rewind(struct nvgpu_falcon *flcn,
if (queue->oflag == OFLAG_READ) {
err = queue->tail(flcn, queue, &queue->position,
QUEUE_SET);
if (err != 0){
if (err != 0) {
nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
flcn->flcn_id, queue->id);
goto exit;
@@ -140,14 +140,14 @@ exit:
return err;
}
static int falcon_queue_prepare_write(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 size)
static int engine_mem_queue_prepare_write(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, u32 size)
{
bool q_rewind = false;
int err = 0;
/* make sure there's enough free space for the write */
if (!falcon_queue_has_room(flcn, queue, size, &q_rewind)) {
if (!engine_mem_queue_has_room(flcn, queue, size, &q_rewind)) {
nvgpu_pmu_dbg(flcn->g, "queue full: queue-id %d: index %d",
queue->id, queue->index);
err = -EAGAIN;
@@ -162,7 +162,7 @@ static int falcon_queue_prepare_write(struct nvgpu_falcon *flcn,
}
if (q_rewind) {
err = falcon_queue_rewind(flcn, queue);
err = engine_mem_queue_rewind(flcn, queue);
}
exit:
@@ -172,8 +172,8 @@ exit:
/* queue public functions */
/* queue push operation with lock */
int nvgpu_falcon_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, void *data, u32 size)
int nvgpu_engine_mem_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, void *data, u32 size)
{
int err = 0;
@@ -191,7 +191,7 @@ int nvgpu_falcon_queue_push(struct nvgpu_falcon *flcn,
/* acquire mutex */
nvgpu_mutex_acquire(&queue->mutex);
err = falcon_queue_prepare_write(flcn, queue, size);
err = engine_mem_queue_prepare_write(flcn, queue, size);
if (err != 0) {
goto unlock_mutex;
}
@@ -206,7 +206,7 @@ int nvgpu_falcon_queue_push(struct nvgpu_falcon *flcn,
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
err = queue->head(flcn, queue, &queue->position, QUEUE_SET);
if (err != 0){
if (err != 0) {
nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
flcn->flcn_id, queue->id);
}
@@ -219,8 +219,8 @@ exit:
}
/* queue pop operation with lock */
int nvgpu_falcon_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, void *data, u32 size,
int nvgpu_engine_mem_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, void *data, u32 size,
u32 *bytes_read)
{
struct gk20a *g = flcn->g;
@@ -284,7 +284,7 @@ int nvgpu_falcon_queue_pop(struct nvgpu_falcon *flcn,
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
err = queue->tail(flcn, queue, &queue->position, QUEUE_SET);
if (err != 0){
if (err != 0) {
nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
flcn->flcn_id, queue->id);
goto unlock_mutex;
@@ -299,8 +299,8 @@ exit:
return err;
}
int nvgpu_falcon_queue_rewind(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue)
int nvgpu_engine_mem_queue_rewind(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue)
{
int err = 0;
@@ -311,7 +311,7 @@ int nvgpu_falcon_queue_rewind(struct nvgpu_falcon *flcn,
/* acquire mutex */
nvgpu_mutex_acquire(&queue->mutex);
err = falcon_queue_rewind(flcn, queue);
err = engine_mem_queue_rewind(flcn, queue);
/* release mutex */
nvgpu_mutex_release(&queue->mutex);
@@ -320,8 +320,8 @@ int nvgpu_falcon_queue_rewind(struct nvgpu_falcon *flcn,
}
/* queue is_empty check with lock */
bool nvgpu_falcon_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue)
bool nvgpu_engine_mem_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue)
{
u32 q_head = 0;
u32 q_tail = 0;
@@ -355,10 +355,10 @@ exit:
return q_head == q_tail;
}
void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue **queue_p)
void nvgpu_engine_mem_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue **queue_p)
{
struct nvgpu_falcon_queue *queue = NULL;
struct nvgpu_engine_mem_queue *queue = NULL;
struct gk20a *g = flcn->g;
if ((queue_p == NULL) || (*queue_p == NULL)) {
@@ -377,16 +377,16 @@ void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn,
*queue_p = NULL;
}
u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue)
u32 nvgpu_engine_mem_queue_get_size(struct nvgpu_engine_mem_queue *queue)
{
return queue->size;
}
int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue **queue_p,
struct nvgpu_falcon_queue_params params)
int nvgpu_engine_mem_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue **queue_p,
struct nvgpu_engine_mem_queue_params params)
{
struct nvgpu_falcon_queue *queue = NULL;
struct nvgpu_engine_mem_queue *queue = NULL;
struct gk20a *g = flcn->g;
int err = 0;
@@ -394,8 +394,8 @@ int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
return -EINVAL;
}
queue = (struct nvgpu_falcon_queue *)
nvgpu_kmalloc(g, sizeof(struct nvgpu_falcon_queue));
queue = (struct nvgpu_engine_mem_queue *)
nvgpu_kmalloc(g, sizeof(struct nvgpu_engine_mem_queue));
if (queue == NULL) {
return -ENOMEM;
@@ -410,8 +410,8 @@ int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
queue->oflag = params.oflag;
queue->queue_type = params.queue_type;
queue->head = falcon_queue_head;
queue->tail = falcon_queue_tail;
queue->head = engine_mem_queue_head;
queue->tail = engine_mem_queue_tail;
nvgpu_log(g, gpu_dbg_pmu,
"flcn id-%d q-id %d: index %d, offset 0x%08x, size 0x%08x",
@@ -420,10 +420,10 @@ int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
switch (queue->queue_type) {
case QUEUE_TYPE_DMEM:
falcon_dmem_queue_init(queue);
engine_dmem_queue_init(queue);
break;
case QUEUE_TYPE_EMEM:
falcon_emem_queue_init(queue);
engine_emem_queue_init(queue);
break;
default:
err = -EINVAL;

View File

@@ -20,8 +20,8 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FALCON_QUEUE_PRIV_H
#define NVGPU_FALCON_QUEUE_PRIV_H
#ifndef NVGPU_ENGINE_MEM_QUEUE_PRIV_H
#define NVGPU_ENGINE_MEM_QUEUE_PRIV_H
#include <nvgpu/lock.h>
#include <nvgpu/types.h>
@@ -29,7 +29,7 @@
struct gk20a;
struct nvgpu_falcon;
struct nvgpu_falcon_queue {
struct nvgpu_engine_mem_queue {
struct gk20a *g;
/* Queue Type (queue_type) */
u8 queue_type;
@@ -52,17 +52,17 @@ struct nvgpu_falcon_queue {
/* queue type(DMEM-Q/EMEM-Q) specific ops */
int (*push)(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue,
struct nvgpu_engine_mem_queue *queue,
u32 dst, void *data, u32 size);
int (*pop)(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue,
struct nvgpu_engine_mem_queue *queue,
u32 src, void *data, u32 size);
/* engine specific ops */
int (*head)(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 *head, bool set);
struct nvgpu_engine_mem_queue *queue, u32 *head, bool set);
int (*tail)(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, u32 *tail, bool set);
struct nvgpu_engine_mem_queue *queue, u32 *tail, bool set);
};
#endif /* NVGPU_FALCON_QUEUE_PRIV_H */
#endif /* NVGPU_ENGINE_MEM_QUEUE_PRIV_H */

View File

@@ -30,7 +30,7 @@
#include <nvgpu/bug.h>
#include <nvgpu/firmware.h>
#include <nvgpu/falcon.h>
#include <nvgpu/falcon_queue.h>
#include <nvgpu/engine_mem_queue.h>
#include <nvgpu/mm.h>
#include <nvgpu/io.h>
#include <nvgpu/clk_arb.h>

View File

@@ -28,8 +28,8 @@
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
#include <nvgpu/falcon.h>
#include <nvgpu/falcon_queue.h>
#include <nvgpu/falcon_fb_queue.h>
#include <nvgpu/engine_mem_queue.h>
#include <nvgpu/engine_fb_queue.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
@@ -111,7 +111,7 @@ int nvgpu_pmu_queue_init_fb(struct nvgpu_pmu *pmu,
u32 id, union pmu_init_msg_pmu *init)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_falcon_fb_queue_params params = {0};
struct nvgpu_engine_fb_queue_params params = {0};
u32 oflag = 0;
int err = 0;
u32 tmp_id = id;
@@ -176,7 +176,7 @@ int nvgpu_pmu_queue_init_fb(struct nvgpu_pmu *pmu,
}
params.index = init->v5.queue_index[tmp_id];
err = nvgpu_falcon_fb_queue_init(pmu->flcn, &pmu->fb_queue[id], params);
err = nvgpu_engine_fb_queue_init(pmu->flcn, &pmu->fb_queue[id], params);
if (err != 0) {
nvgpu_err(g, "queue-%d init failed", id);
}
@@ -190,7 +190,7 @@ int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu,
u32 id, union pmu_init_msg_pmu *init)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_falcon_queue_params params = {0};
struct nvgpu_engine_mem_queue_params params = {0};
u32 oflag = 0;
int err = 0;
@@ -222,7 +222,7 @@ int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu,
&params.index,
&params.offset,
&params.size);
err = nvgpu_falcon_queue_init(pmu->flcn, &pmu->queue[id], params);
err = nvgpu_engine_mem_queue_init(pmu->flcn, &pmu->queue[id], params);
if (err != 0) {
nvgpu_err(g, "queue-%d init failed", id);
}
@@ -245,13 +245,13 @@ void nvgpu_pmu_queue_free(struct nvgpu_pmu *pmu, u32 id)
goto exit;
}
nvgpu_falcon_fb_queue_free(pmu->flcn, &pmu->fb_queue[id]);
nvgpu_engine_fb_queue_free(pmu->flcn, &pmu->fb_queue[id]);
} else {
if (pmu->queue[id] == NULL) {
goto exit;
}
nvgpu_falcon_queue_free(pmu->flcn, &pmu->queue[id]);
nvgpu_engine_mem_queue_free(pmu->flcn, &pmu->queue[id]);
}
exit:
@@ -263,8 +263,8 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
u32 queue_id)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_falcon_fb_queue *fb_queue = NULL;
struct nvgpu_falcon_queue *queue = NULL;
struct nvgpu_engine_fb_queue *fb_queue = NULL;
struct nvgpu_engine_mem_queue *queue = NULL;
u32 queue_size;
u32 in_size, out_size;
@@ -274,10 +274,10 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
if (pmu->queue_type == QUEUE_TYPE_FB) {
fb_queue = pmu->fb_queue[queue_id];
queue_size = nvgpu_falcon_fb_queue_get_element_size(fb_queue);
queue_size = nvgpu_engine_fb_queue_get_element_size(fb_queue);
} else {
queue = pmu->queue[queue_id];
queue_size = nvgpu_falcon_queue_get_size(queue);
queue_size = nvgpu_engine_mem_queue_get_size(queue);
}
if (cmd->hdr.size < PMU_CMD_HDR_SIZE) {
@@ -353,8 +353,8 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
u32 queue_id)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_falcon_fb_queue *fb_queue = NULL;
struct nvgpu_falcon_queue *queue = NULL;
struct nvgpu_engine_fb_queue *fb_queue = NULL;
struct nvgpu_engine_mem_queue *queue = NULL;
struct nvgpu_timeout timeout;
int err;
@@ -365,12 +365,12 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
do {
if (pmu->queue_type == QUEUE_TYPE_FB) {
fb_queue = pmu->fb_queue[queue_id];
err = nvgpu_falcon_fb_queue_push(pmu->flcn, fb_queue,
err = nvgpu_engine_fb_queue_push(pmu->flcn, fb_queue,
cmd, cmd->hdr.size);
} else {
queue = pmu->queue[queue_id];
err = nvgpu_falcon_queue_push(pmu->flcn, queue,
cmd, cmd->hdr.size);
err = nvgpu_engine_mem_queue_push(pmu->flcn, queue,
cmd, cmd->hdr.size);
}
if (err == -EAGAIN && nvgpu_timeout_expired(&timeout) == 0) {
@@ -431,7 +431,7 @@ static int pmu_cmd_payload_setup_rpc(struct gk20a *g, struct pmu_cmd *cmd,
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_v *pv = &g->ops.pmu_ver;
struct nvgpu_falcon_fb_queue *queue = seq->cmd_queue;
struct nvgpu_engine_fb_queue *queue = seq->cmd_queue;
struct falcon_payload_alloc alloc;
int err = 0;
@@ -452,7 +452,7 @@ static int pmu_cmd_payload_setup_rpc(struct gk20a *g, struct pmu_cmd *cmd,
if (pmu->queue_type == QUEUE_TYPE_FB) {
/* copy payload to FBQ work buffer */
nvgpu_memcpy((u8 *)
nvgpu_falcon_fb_queue_get_work_buffer(queue) +
nvgpu_engine_fb_queue_get_work_buffer(queue) +
alloc.dmem_offset,
(u8 *)payload->rpc.prpc, payload->rpc.size_rpc);
@@ -547,7 +547,7 @@ static int pmu_cmd_payload_setup(struct gk20a *g, struct pmu_cmd *cmd,
if (pmu->queue_type == QUEUE_TYPE_FB) {
/* copy payload to FBQ work buffer */
nvgpu_memcpy((u8 *)
nvgpu_falcon_fb_queue_get_work_buffer(
nvgpu_engine_fb_queue_get_work_buffer(
seq->cmd_queue) +
alloc.dmem_offset,
(u8 *)payload->in.buf,
@@ -643,7 +643,7 @@ clean_up:
}
static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
struct nvgpu_falcon_fb_queue *queue, struct pmu_payload *payload,
struct nvgpu_engine_fb_queue *queue, struct pmu_payload *payload,
struct pmu_sequence *seq)
{
struct nvgpu_pmu *pmu = &g->pmu;
@@ -655,10 +655,10 @@ static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
int err = 0;
fbq_hdr = (struct nv_falcon_fbq_hdr *)
nvgpu_falcon_fb_queue_get_work_buffer(queue);
nvgpu_engine_fb_queue_get_work_buffer(queue);
flcn_cmd = (struct pmu_cmd *)
(nvgpu_falcon_fb_queue_get_work_buffer(queue) +
(nvgpu_engine_fb_queue_get_work_buffer(queue) +
sizeof(struct nv_falcon_fbq_hdr));
if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) {
@@ -703,8 +703,8 @@ static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
seq->out_payload_fb_queue = false;
/* clear work queue buffer */
memset(nvgpu_falcon_fb_queue_get_work_buffer(queue), 0,
nvgpu_falcon_fb_queue_get_element_size(queue));
memset(nvgpu_engine_fb_queue_get_work_buffer(queue), 0,
nvgpu_engine_fb_queue_get_element_size(queue));
/* Need to save room for both FBQ hdr, and the CMD */
seq->buffer_size_used = sizeof(struct nv_falcon_fbq_hdr) +
@@ -722,7 +722,7 @@ static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
* save queue index in seq structure
* so can free queue element when response is received
*/
seq->fbq_element_index = nvgpu_falcon_fb_queue_get_position(queue);
seq->fbq_element_index = nvgpu_engine_fb_queue_get_position(queue);
exit:
return err;
@@ -735,7 +735,7 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_sequence *seq = NULL;
struct nvgpu_falcon_fb_queue *fb_queue = NULL;
struct nvgpu_engine_fb_queue *fb_queue = NULL;
int err;
nvgpu_log_fn(g, " ");
@@ -782,7 +782,7 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
seq->cmd_queue = fb_queue;
/* Lock the FBQ work buffer */
nvgpu_falcon_fb_queue_lock_work_buffer(fb_queue);
nvgpu_engine_fb_queue_lock_work_buffer(fb_queue);
/* Create FBQ work buffer & copy cmd to FBQ work buffer */
err = pmu_fbq_cmd_setup(g, cmd, fb_queue, payload, seq);
@@ -798,7 +798,7 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
* in call pmu_fgq_cmd_setup()
*/
cmd = (struct pmu_cmd *)
(nvgpu_falcon_fb_queue_get_work_buffer(fb_queue) +
(nvgpu_engine_fb_queue_get_work_buffer(fb_queue) +
sizeof(struct nv_falcon_fbq_hdr));
}
@@ -824,7 +824,7 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
exit:
if (pmu->queue_type == QUEUE_TYPE_FB) {
/* Unlock the FBQ work buffer */
nvgpu_falcon_fb_queue_unlock_work_buffer(fb_queue);
nvgpu_engine_fb_queue_unlock_work_buffer(fb_queue);
}
nvgpu_log_fn(g, "Done, err %x", err);
@@ -843,10 +843,10 @@ static int pmu_payload_extract(struct nvgpu_pmu *pmu,
if (seq->out_payload_fb_queue) {
fbq_payload_offset =
nvgpu_falcon_fb_queue_get_offset(seq->cmd_queue) +
nvgpu_engine_fb_queue_get_offset(seq->cmd_queue) +
seq->fbq_out_offset_in_queue_element +
(seq->fbq_element_index *
nvgpu_falcon_fb_queue_get_element_size(seq->cmd_queue));
nvgpu_engine_fb_queue_get_element_size(seq->cmd_queue));
nvgpu_mem_rd_n(g, &pmu->super_surface_buf, fbq_payload_offset,
seq->out_payload,
@@ -888,7 +888,7 @@ static void pmu_payload_fbq_free(struct nvgpu_pmu *pmu,
* set FBQ element work buffer to NULL
* Clear the in use bit for the queue entry this CMD used.
*/
nvgpu_falcon_fb_queue_free_element(pmu->flcn, seq->cmd_queue,
nvgpu_engine_fb_queue_free_element(pmu->flcn, seq->cmd_queue,
seq->fbq_element_index);
}
@@ -1044,23 +1044,23 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
return err;
}
static bool pmu_falcon_queue_read(struct nvgpu_pmu *pmu,
static bool pmu_engine_mem_queue_read(struct nvgpu_pmu *pmu,
u32 queue_id, void *data,
u32 bytes_to_read, int *status)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_falcon_fb_queue *fb_queue = NULL;
struct nvgpu_falcon_queue *queue = NULL;
struct nvgpu_engine_fb_queue *fb_queue = NULL;
struct nvgpu_engine_mem_queue *queue = NULL;
u32 bytes_read;
int err;
if (pmu->queue_type == QUEUE_TYPE_FB) {
fb_queue = pmu->fb_queue[queue_id];
err = nvgpu_falcon_fb_queue_pop(pmu->flcn, fb_queue, data,
err = nvgpu_engine_fb_queue_pop(pmu->flcn, fb_queue, data,
bytes_to_read, &bytes_read);
} else {
queue = pmu->queue[queue_id];
err = nvgpu_falcon_queue_pop(pmu->flcn, queue, data,
err = nvgpu_engine_mem_queue_pop(pmu->flcn, queue, data,
bytes_to_read, &bytes_read);
}
@@ -1081,16 +1081,16 @@ static bool pmu_falcon_queue_read(struct nvgpu_pmu *pmu,
bool nvgpu_pmu_queue_is_empty(struct nvgpu_pmu *pmu, u32 queue_id)
{
struct nvgpu_falcon_queue *queue = NULL;
struct nvgpu_falcon_fb_queue *fb_queue = NULL;
struct nvgpu_engine_mem_queue *queue = NULL;
struct nvgpu_engine_fb_queue *fb_queue = NULL;
bool empty = true;
if (pmu->queue_type == QUEUE_TYPE_FB) {
fb_queue = pmu->fb_queue[queue_id];
empty = nvgpu_falcon_fb_queue_is_empty(pmu->flcn, fb_queue);
empty = nvgpu_engine_fb_queue_is_empty(pmu->flcn, fb_queue);
} else {
queue = pmu->queue[queue_id];
empty = nvgpu_falcon_queue_is_empty(pmu->flcn, queue);
empty = nvgpu_engine_mem_queue_is_empty(pmu->flcn, queue);
}
return empty;
@@ -1100,7 +1100,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, u32 queue_id,
struct pmu_msg *msg, int *status)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_falcon_queue *queue = NULL;
struct nvgpu_engine_mem_queue *queue = NULL;
u32 read_size;
int err;
@@ -1110,8 +1110,8 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, u32 queue_id,
return false;
}
if (!pmu_falcon_queue_read(pmu, queue_id, &msg->hdr, PMU_MSG_HDR_SIZE,
status)) {
if (!pmu_engine_mem_queue_read(pmu, queue_id, &msg->hdr,
PMU_MSG_HDR_SIZE, status)) {
nvgpu_err(g, "fail to read msg from queue %d", queue_id);
goto clean_up;
}
@@ -1119,7 +1119,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, u32 queue_id,
if (msg->hdr.unit_id == PMU_UNIT_REWIND) {
if (pmu->queue_type != QUEUE_TYPE_FB) {
queue = pmu->queue[queue_id];
err = nvgpu_falcon_queue_rewind(pmu->flcn, queue);
err = nvgpu_engine_mem_queue_rewind(pmu->flcn, queue);
if (err != 0) {
nvgpu_err(g, "fail to rewind queue %d",
queue_id);
@@ -1129,7 +1129,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, u32 queue_id,
}
/* read again after rewind */
if (!pmu_falcon_queue_read(pmu, queue_id, &msg->hdr,
if (!pmu_engine_mem_queue_read(pmu, queue_id, &msg->hdr,
PMU_MSG_HDR_SIZE, status)) {
nvgpu_err(g, "fail to read msg from queue %d",
queue_id);
@@ -1146,8 +1146,8 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, u32 queue_id,
if (msg->hdr.size > PMU_MSG_HDR_SIZE) {
read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
if (!pmu_falcon_queue_read(pmu, queue_id, &msg->msg, read_size,
status)) {
if (!pmu_engine_mem_queue_read(pmu, queue_id, &msg->msg,
read_size, status)) {
nvgpu_err(g, "fail to read msg from queue %d",
queue_id);
goto clean_up;

View File

@@ -32,7 +32,7 @@ int nvgpu_sec2_queue_init(struct nvgpu_sec2 *sec2, u32 id,
struct sec2_init_msg_sec2_init *init)
{
struct gk20a *g = sec2->g;
struct nvgpu_falcon_queue_params params = {0};
struct nvgpu_engine_mem_queue_params params = {0};
u32 queue_log_id = 0;
u32 oflag = 0;
int err = 0;
@@ -68,7 +68,7 @@ int nvgpu_sec2_queue_init(struct nvgpu_sec2 *sec2, u32 id,
params.oflag = oflag;
params.queue_type = QUEUE_TYPE_EMEM;
err = nvgpu_falcon_queue_init(sec2->flcn,
err = nvgpu_engine_mem_queue_init(sec2->flcn,
&sec2->queue[queue_log_id],
params);
if (err != 0) {
@@ -92,7 +92,7 @@ void nvgpu_sec2_queue_free(struct nvgpu_sec2 *sec2, u32 id)
goto exit;
}
nvgpu_falcon_queue_free(sec2->flcn, &sec2->queue[id]);
nvgpu_engine_mem_queue_free(sec2->flcn, &sec2->queue[id]);
exit:
return;
}

View File

@@ -81,7 +81,7 @@ static bool sec2_validate_cmd(struct nvgpu_sec2 *sec2,
struct nv_flcn_cmd_sec2 *cmd, u32 queue_id)
{
struct gk20a *g = sec2->g;
struct nvgpu_falcon_queue *queue;
struct nvgpu_engine_mem_queue *queue;
u32 queue_size;
if (queue_id != SEC2_NV_CMDQ_LOG_ID) {
@@ -93,7 +93,7 @@ static bool sec2_validate_cmd(struct nvgpu_sec2 *sec2,
goto invalid_cmd;
}
queue_size = nvgpu_falcon_queue_get_size(queue);
queue_size = nvgpu_engine_mem_queue_get_size(queue);
if (cmd->hdr.size > (queue_size >> 1)) {
goto invalid_cmd;
}
@@ -117,7 +117,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
unsigned long timeout_ms)
{
struct gk20a *g = sec2->g;
struct nvgpu_falcon_queue *queue;
struct nvgpu_engine_mem_queue *queue;
struct nvgpu_timeout timeout;
int err;
@@ -127,7 +127,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
do {
err = nvgpu_falcon_queue_push(g->sec2.flcn, queue, cmd,
err = nvgpu_engine_mem_queue_push(g->sec2.flcn, queue, cmd,
cmd->hdr.size);
if ((err == -EAGAIN) && (nvgpu_timeout_expired(&timeout) == 0)) {
nvgpu_usleep_range(1000U, 2000U);
@@ -241,15 +241,15 @@ static int sec2_handle_event(struct nvgpu_sec2 *sec2,
return err;
}
static bool sec2_falcon_queue_read(struct nvgpu_sec2 *sec2,
struct nvgpu_falcon_queue *queue, void *data,
static bool sec2_engine_mem_queue_read(struct nvgpu_sec2 *sec2,
struct nvgpu_engine_mem_queue *queue, void *data,
u32 bytes_to_read, int *status)
{
struct gk20a *g = sec2->g;
u32 bytes_read;
int err;
err = nvgpu_falcon_queue_pop(sec2->flcn, queue, data,
err = nvgpu_engine_mem_queue_pop(sec2->flcn, queue, data,
bytes_to_read, &bytes_read);
if (err != 0) {
nvgpu_err(g, "fail to read msg: err %d", err);
@@ -269,25 +269,25 @@ static bool sec2_falcon_queue_read(struct nvgpu_sec2 *sec2,
static bool sec2_read_message(struct nvgpu_sec2 *sec2,
u32 queue_id, struct nv_flcn_msg_sec2 *msg, int *status)
{
struct nvgpu_falcon_queue *queue = sec2->queue[queue_id];
struct nvgpu_engine_mem_queue *queue = sec2->queue[queue_id];
struct gk20a *g = sec2->g;
u32 read_size;
int err;
*status = 0U;
if (nvgpu_falcon_queue_is_empty(sec2->flcn, queue)) {
if (nvgpu_engine_mem_queue_is_empty(sec2->flcn, queue)) {
return false;
}
if (!sec2_falcon_queue_read(sec2, queue, &msg->hdr, PMU_MSG_HDR_SIZE,
status)) {
if (!sec2_engine_mem_queue_read(sec2, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, status)) {
nvgpu_err(g, "fail to read msg from queue %d", queue_id);
goto clean_up;
}
if (msg->hdr.unit_id == NV_SEC2_UNIT_REWIND) {
err = nvgpu_falcon_queue_rewind(sec2->flcn, queue);
err = nvgpu_engine_mem_queue_rewind(sec2->flcn, queue);
if (err != 0) {
nvgpu_err(g, "fail to rewind queue %d", queue_id);
*status = err;
@@ -295,7 +295,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
}
/* read again after rewind */
if (!sec2_falcon_queue_read(sec2, queue, &msg->hdr,
if (!sec2_engine_mem_queue_read(sec2, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, status)) {
nvgpu_err(g, "fail to read msg from queue %d",
queue_id);
@@ -312,8 +312,8 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2,
if (msg->hdr.size > PMU_MSG_HDR_SIZE) {
read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
if (!sec2_falcon_queue_read(sec2, queue, &msg->msg, read_size,
status)) {
if (!sec2_engine_mem_queue_read(sec2, queue, &msg->msg,
read_size, status)) {
nvgpu_err(g, "fail to read msg from queue %d",
queue_id);
goto clean_up;

View File

@@ -20,16 +20,16 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FALCON_FB_QUEUE_H
#define NVGPU_FALCON_FB_QUEUE_H
#ifndef NVGPU_ENGINE_FB_QUEUE_H
#define NVGPU_ENGINE_FB_QUEUE_H
#include <nvgpu/types.h>
struct gk20a;
struct nvgpu_falcon;
struct nvgpu_falcon_fb_queue;
struct nvgpu_engine_fb_queue;
struct nvgpu_falcon_fb_queue_params {
struct nvgpu_engine_fb_queue_params {
/* logical queue identifier */
u32 id;
/* physical queue index */
@@ -51,27 +51,27 @@ struct nvgpu_falcon_fb_queue_params {
};
/* queue public functions */
int nvgpu_falcon_fb_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue **queue_p,
struct nvgpu_falcon_fb_queue_params params);
bool nvgpu_falcon_fb_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue);
int nvgpu_falcon_fb_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, void *data, u32 size,
int nvgpu_engine_fb_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue **queue_p,
struct nvgpu_engine_fb_queue_params params);
bool nvgpu_engine_fb_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue);
int nvgpu_engine_fb_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, void *data, u32 size,
u32 *bytes_read);
int nvgpu_falcon_fb_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, void *data, u32 size);
void nvgpu_falcon_fb_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue **queue_p);
u32 nvgpu_falcon_fb_queue_get_position(struct nvgpu_falcon_fb_queue *queue);
u32 nvgpu_falcon_fb_queue_get_element_size(struct nvgpu_falcon_fb_queue *queue);
u32 nvgpu_falcon_fb_queue_get_offset(struct nvgpu_falcon_fb_queue *queue);
u8 *nvgpu_falcon_fb_queue_get_work_buffer(struct nvgpu_falcon_fb_queue *queue);
int nvgpu_falcon_fb_queue_free_element(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_fb_queue *queue, u32 queue_pos);
void nvgpu_falcon_fb_queue_lock_work_buffer(
struct nvgpu_falcon_fb_queue *queue);
void nvgpu_falcon_fb_queue_unlock_work_buffer(
struct nvgpu_falcon_fb_queue *queue);
int nvgpu_engine_fb_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, void *data, u32 size);
void nvgpu_engine_fb_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue **queue_p);
u32 nvgpu_engine_fb_queue_get_position(struct nvgpu_engine_fb_queue *queue);
u32 nvgpu_engine_fb_queue_get_element_size(struct nvgpu_engine_fb_queue *queue);
u32 nvgpu_engine_fb_queue_get_offset(struct nvgpu_engine_fb_queue *queue);
u8 *nvgpu_engine_fb_queue_get_work_buffer(struct nvgpu_engine_fb_queue *queue);
int nvgpu_engine_fb_queue_free_element(struct nvgpu_falcon *flcn,
struct nvgpu_engine_fb_queue *queue, u32 queue_pos);
void nvgpu_engine_fb_queue_lock_work_buffer(
struct nvgpu_engine_fb_queue *queue);
void nvgpu_engine_fb_queue_unlock_work_buffer(
struct nvgpu_engine_fb_queue *queue);
#endif /* NVGPU_FALCON_FB_QUEUE_H */
#endif /* NVGPU_ENGINE_FB_QUEUE_H */

View File

@@ -20,8 +20,8 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_FALCON_QUEUE_H
#define NVGPU_FALCON_QUEUE_H
#ifndef NVGPU_ENGINE_MEM_QUEUE_H
#define NVGPU_ENGINE_MEM_QUEUE_H
#include <nvgpu/types.h>
@@ -31,9 +31,9 @@
#define QUEUE_TYPE_FB 0x2U
struct nvgpu_falcon;
struct nvgpu_falcon_queue;
struct nvgpu_engine_mem_queue;
struct nvgpu_falcon_queue_params {
struct nvgpu_engine_mem_queue_params {
/* Queue Type (queue_type) */
u8 queue_type;
/* current write position */
@@ -51,20 +51,20 @@ struct nvgpu_falcon_queue_params {
};
/* queue public functions */
int nvgpu_falcon_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue **queue_p,
struct nvgpu_falcon_queue_params params);
bool nvgpu_falcon_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue);
int nvgpu_falcon_queue_rewind(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue);
int nvgpu_falcon_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, void *data, u32 size,
int nvgpu_engine_mem_queue_init(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue **queue_p,
struct nvgpu_engine_mem_queue_params params);
bool nvgpu_engine_mem_queue_is_empty(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue);
int nvgpu_engine_mem_queue_rewind(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue);
int nvgpu_engine_mem_queue_pop(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, void *data, u32 size,
u32 *bytes_read);
int nvgpu_falcon_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue *queue, void *data, u32 size);
void nvgpu_falcon_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_falcon_queue **queue_p);
u32 nvgpu_falcon_queue_get_size(struct nvgpu_falcon_queue *queue);
int nvgpu_engine_mem_queue_push(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue *queue, void *data, u32 size);
void nvgpu_engine_mem_queue_free(struct nvgpu_falcon *flcn,
struct nvgpu_engine_mem_queue **queue_p);
u32 nvgpu_engine_mem_queue_get_size(struct nvgpu_engine_mem_queue *queue);
#endif /* NVGPU_FALCON_QUEUE_H */
#endif /* NVGPU_ENGINE_MEM_QUEUE_H */

View File

@@ -34,7 +34,7 @@
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
#include <nvgpu/falcon.h>
#include <nvgpu/falcon_queue.h>
#include <nvgpu/engine_mem_queue.h>
#define nvgpu_pmu_dbg(g, fmt, args...) \
nvgpu_log(g, gpu_dbg_pmu, fmt, ##args)
@@ -299,7 +299,7 @@ struct pmu_sequence {
void *cb_params;
/* fb queue that is associated with this seq */
struct nvgpu_falcon_fb_queue *cmd_queue;
struct nvgpu_engine_fb_queue *cmd_queue;
/* fbq element that is associated with this seq */
u8 *fbq_work_buffer;
u32 fbq_element_index;
@@ -352,10 +352,10 @@ struct nvgpu_pmu {
struct pmu_sha1_gid gid_info;
struct nvgpu_falcon_queue *queue[PMU_QUEUE_COUNT];
struct nvgpu_engine_mem_queue *queue[PMU_QUEUE_COUNT];
u32 queue_type;
struct nvgpu_falcon_fb_queue *fb_queue[PMU_QUEUE_COUNT];
struct nvgpu_engine_fb_queue *fb_queue[PMU_QUEUE_COUNT];
struct pmu_sequence *seq;
unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE];

View File

@@ -30,7 +30,7 @@
#include <nvgpu/lock.h>
#include <nvgpu/flcnif_cmn.h>
#include <nvgpu/falcon.h>
#include <nvgpu/falcon_queue.h>
#include <nvgpu/engine_mem_queue.h>
#include <nvgpu/sec2if/sec2_cmd_if.h>
#include <nvgpu/sec2if/sec2_if_sec2.h>
@@ -72,7 +72,7 @@ struct nvgpu_sec2 {
struct nvgpu_falcon *flcn;
u32 falcon_id;
struct nvgpu_falcon_queue *queue[SEC2_QUEUE_NUM];
struct nvgpu_engine_mem_queue *queue[SEC2_QUEUE_NUM];
struct sec2_sequence *seq;
unsigned long sec2_seq_tbl[SEC2_SEQ_TBL_SIZE];

View File

@@ -27,7 +27,7 @@
#include <nvgpu/io.h>
#include <nvgpu/timers.h>
#include <nvgpu/falcon.h>
#include <nvgpu/falcon_queue.h>
#include <nvgpu/engine_mem_queue.h>
#include <nvgpu/sec2.h>
#include "sec2_tu104.h"
@@ -372,7 +372,7 @@ bool tu104_sec2_is_interrupted(struct nvgpu_sec2 *sec2)
void tu104_sec2_isr(struct gk20a *g)
{
struct nvgpu_sec2 *sec2 = &g->sec2;
struct nvgpu_falcon_queue *queue;
struct nvgpu_engine_mem_queue *queue;
u32 intr, mask;
bool recheck = false;
@@ -419,7 +419,7 @@ void tu104_sec2_isr(struct gk20a *g)
if (recheck) {
queue = sec2->queue[SEC2_NV_MSGQ_LOG_ID];
if (!nvgpu_falcon_queue_is_empty(sec2->flcn, queue)) {
if (!nvgpu_engine_mem_queue_is_empty(sec2->flcn, queue)) {
gk20a_writel(g, psec_falcon_irqsset_r(),
psec_falcon_irqsset_swgen0_set_f());
}