mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: falcon engine EMEM queue support
-Removed _dmem postfix to some functions which can be common for DMEM & EMEM queue, and made changes as needed. -Defined flcn_queue_push_emem() & flcn_queue_pop_emem() functions to to read/write queue data to/from EMEM -Defined flcn_queue_init_emem_queue() function to assign EMEM specific functions to support EMEM queue type. -Defined QUEUE_TYPE_DMEM to support DMEM based queue. -Defined QUEUE_TYPE_EMEM to support EMEM based queue. -Modified nvgpu_flcn_queue_init() to call queue type flcn_queue_init_dmem/emem_queue() function to assign its ops. JIRA NVGPU-1161 Change-Id: I06333fa318b7ca4137c977ad63f5a857e7b36cc8 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1841084 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
628e2c7901
commit
4dafb2e492
@@ -25,8 +25,8 @@
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
|
||||
/* DMEM-Q specific ops */
|
||||
static int flcn_queue_head_dmem(struct nvgpu_falcon *flcn,
|
||||
/* common falcon queue ops */
|
||||
static int flcn_queue_head(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 *head, bool set)
|
||||
{
|
||||
int err = -ENOSYS;
|
||||
@@ -39,7 +39,7 @@ static int flcn_queue_head_dmem(struct nvgpu_falcon *flcn,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int flcn_queue_tail_dmem(struct nvgpu_falcon *flcn,
|
||||
static int flcn_queue_tail(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 *tail, bool set)
|
||||
{
|
||||
int err = -ENOSYS;
|
||||
@@ -52,7 +52,7 @@ static int flcn_queue_tail_dmem(struct nvgpu_falcon *flcn,
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool flcn_queue_has_room_dmem(struct nvgpu_falcon *flcn,
|
||||
static bool flcn_queue_has_room(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 size, bool *need_rewind)
|
||||
{
|
||||
u32 q_head = 0;
|
||||
@@ -97,6 +97,124 @@ exit:
|
||||
return size <= q_free;
|
||||
}
|
||||
|
||||
static int flcn_queue_rewind(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct pmu_cmd cmd;
|
||||
int err = 0;
|
||||
|
||||
if (queue->oflag == OFLAG_WRITE) {
|
||||
cmd.hdr.unit_id = PMU_UNIT_REWIND;
|
||||
cmd.hdr.size = (u8)PMU_CMD_HDR_SIZE;
|
||||
err = queue->push(flcn, queue, &cmd, cmd.hdr.size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "flcn-%d queue-%d, rewind request failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
} else {
|
||||
nvgpu_pmu_dbg(g, "flcn-%d queue-%d, rewinded",
|
||||
flcn->flcn_id, queue->id);
|
||||
}
|
||||
}
|
||||
|
||||
/* update queue position */
|
||||
queue->position = queue->offset;
|
||||
|
||||
if (queue->oflag == OFLAG_READ) {
|
||||
err = queue->tail(flcn, queue, &queue->position,
|
||||
QUEUE_SET);
|
||||
if (err != 0){
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* EMEM-Q specific ops */
|
||||
static int flcn_queue_push_emem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = nvgpu_flcn_copy_to_emem(flcn, queue->position, data, size, 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d, queue-%d", flcn->flcn_id,
|
||||
queue->id);
|
||||
nvgpu_err(flcn->g, "emem queue write failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int flcn_queue_pop_emem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size,
|
||||
u32 *bytes_read)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
u32 q_tail = queue->position;
|
||||
u32 q_head = 0;
|
||||
u32 used = 0;
|
||||
int err = 0;
|
||||
|
||||
*bytes_read = 0;
|
||||
|
||||
err = queue->head(flcn, queue, &q_head, QUEUE_GET);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d, queue-%d, head GET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (q_head == q_tail) {
|
||||
goto exit;
|
||||
} else if (q_head > q_tail) {
|
||||
used = q_head - q_tail;
|
||||
} else {
|
||||
used = queue->offset + queue->size - q_tail;
|
||||
}
|
||||
|
||||
if (size > used) {
|
||||
nvgpu_warn(g, "queue size smaller than request read");
|
||||
size = used;
|
||||
}
|
||||
|
||||
err = nvgpu_flcn_copy_from_emem(flcn, q_tail, data, size, 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "flcn-%d, queue-%d", flcn->flcn_id,
|
||||
queue->id);
|
||||
nvgpu_err(flcn->g, "emem queue read failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
|
||||
*bytes_read = size;
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* assign EMEM queue type specific ops */
|
||||
static void flcn_queue_init_emem_queue(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
queue->head = flcn_queue_head;
|
||||
queue->tail = flcn_queue_tail;
|
||||
queue->has_room = flcn_queue_has_room;
|
||||
queue->rewind = flcn_queue_rewind;
|
||||
queue->push = flcn_queue_push_emem;
|
||||
queue->pop = flcn_queue_pop_emem;
|
||||
}
|
||||
|
||||
/* DMEM-Q specific ops */
|
||||
static int flcn_queue_push_dmem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size)
|
||||
{
|
||||
@@ -163,54 +281,16 @@ exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int flcn_queue_rewind_dmem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct pmu_cmd cmd;
|
||||
int err = 0;
|
||||
|
||||
if (queue->oflag == OFLAG_WRITE) {
|
||||
cmd.hdr.unit_id = PMU_UNIT_REWIND;
|
||||
cmd.hdr.size = (u8)PMU_CMD_HDR_SIZE;
|
||||
err = queue->push(flcn, queue, &cmd, cmd.hdr.size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "flcn-%d queue-%d, rewind request failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
} else {
|
||||
nvgpu_pmu_dbg(g, "flcn-%d queue-%d, rewinded",
|
||||
flcn->flcn_id, queue->id);
|
||||
}
|
||||
}
|
||||
|
||||
/* update queue position */
|
||||
queue->position = queue->offset;
|
||||
|
||||
if (queue->oflag == OFLAG_READ) {
|
||||
err = queue->tail(flcn, queue, &queue->position,
|
||||
QUEUE_SET);
|
||||
if (err != 0){
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* assign DMEM queue type specific ops */
|
||||
static void flcn_queue_init_dmem_queue(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
queue->head = flcn_queue_head_dmem;
|
||||
queue->tail = flcn_queue_tail_dmem;
|
||||
queue->has_room = flcn_queue_has_room_dmem;
|
||||
queue->head = flcn_queue_head;
|
||||
queue->tail = flcn_queue_tail;
|
||||
queue->has_room = flcn_queue_has_room;
|
||||
queue->push = flcn_queue_push_dmem;
|
||||
queue->pop = flcn_queue_pop_dmem;
|
||||
queue->rewind = flcn_queue_rewind_dmem;
|
||||
queue->rewind = flcn_queue_rewind;
|
||||
}
|
||||
|
||||
static int flcn_queue_prepare_write(struct nvgpu_falcon *flcn,
|
||||
@@ -403,13 +483,21 @@ int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn,
|
||||
flcn->flcn_id, queue->id, queue->index,
|
||||
queue->offset, queue->size);
|
||||
|
||||
/* init mutex */
|
||||
err = nvgpu_mutex_init(&queue->mutex);
|
||||
if (err != 0) {
|
||||
switch (queue->queue_type) {
|
||||
case QUEUE_TYPE_DMEM:
|
||||
flcn_queue_init_dmem_queue(flcn, queue);
|
||||
break;
|
||||
case QUEUE_TYPE_EMEM:
|
||||
flcn_queue_init_emem_queue(flcn, queue);
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
break;
|
||||
}
|
||||
|
||||
flcn_queue_init_dmem_queue(flcn, queue);
|
||||
/* init mutex */
|
||||
err = nvgpu_mutex_init(&queue->mutex);
|
||||
|
||||
exit:
|
||||
if (err != 0) {
|
||||
|
||||
@@ -134,6 +134,7 @@ int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu,
|
||||
queue = &pmu->queue[id];
|
||||
queue->id = id;
|
||||
queue->oflag = oflag;
|
||||
queue->queue_type = QUEUE_TYPE_DMEM;
|
||||
g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init);
|
||||
|
||||
err = nvgpu_flcn_queue_init(pmu->flcn, queue);
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
#define FALCON_MAILBOX_0 0x0
|
||||
#define FALCON_MAILBOX_1 0x1
|
||||
#define FALCON_MAILBOX_COUNT 0x02
|
||||
#define FALCON_BLOCK_SIZE 0x100
|
||||
#define FALCON_BLOCK_SIZE 0x100U
|
||||
|
||||
#define GET_IMEM_TAG(IMEM_ADDR) (IMEM_ADDR >> 8)
|
||||
|
||||
@@ -168,6 +168,10 @@ struct gk20a;
|
||||
struct nvgpu_falcon;
|
||||
struct nvgpu_falcon_bl_info;
|
||||
|
||||
/* Queue Type */
|
||||
#define QUEUE_TYPE_DMEM 0x0U
|
||||
#define QUEUE_TYPE_EMEM 0x1U
|
||||
|
||||
struct nvgpu_falcon_queue {
|
||||
|
||||
/* Queue Type (queue_type) */
|
||||
|
||||
Reference in New Issue
Block a user