mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: falcon queue support
-Renamed "struct pmu_queue" to "struct nvgpu_falcon_queue" & moved to falcon.h -Renamed pmu_queue_* functions to flcn_queue_* & moved to new file falcon_queue.c -Created ops for queue functions in struct nvgpu_falcon_queue to support different queue types like DMEM/FB-Q. -Created ops in nvgpu_falcon_engine_dependency_ops to add engine specific queue functionality & assigned correct HAL functions in hal*.c file. -Made changes in dependent functions as needed to replace struct pmu_queue & calling queue functions using nvgpu_falcon_queue data structure. -Replaced input param "struct nvgpu_pmu *pmu" with "struct gk20a *g" for pmu ops pmu_queue_head/pmu_queue_tail & also for functions gk20a_pmu_queue_head()/ gk20a_pmu_queue_tail(). -Made changes in nvgpu_pmu_queue_init() to use nvgpu_falcon_queue for PMU queue. -Modified Makefile to include falcon_queue.o -Modified Makefile.sources to include falcon_queue.c Change-Id: I956328f6631b7154267fd5a29eaa1826190d99d1 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1776070 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
d32692ae24
commit
2d454db04f
@@ -170,6 +170,7 @@ nvgpu-y += \
|
||||
common/rbtree.o \
|
||||
common/vbios/bios.o \
|
||||
common/falcon/falcon.o \
|
||||
common/falcon/falcon_queue.o \
|
||||
common/pmu/pmu.o \
|
||||
common/pmu/pmu_ipc.o \
|
||||
common/pmu/pmu_fw.o \
|
||||
|
||||
@@ -52,6 +52,7 @@ srcs := common/mm/nvgpu_allocator.c \
|
||||
common/ecc.c \
|
||||
common/vbios/bios.c \
|
||||
common/falcon/falcon.c \
|
||||
common/falcon/falcon_queue.c \
|
||||
common/pmu/pmu.c \
|
||||
common/pmu/pmu_ipc.c \
|
||||
common/pmu/pmu_fw.c \
|
||||
|
||||
422
drivers/gpu/nvgpu/common/falcon/falcon_queue.c
Normal file
422
drivers/gpu/nvgpu/common/falcon/falcon_queue.c
Normal file
@@ -0,0 +1,422 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/lock.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
|
||||
/* DMEM-Q specific ops */
|
||||
static int flcn_queue_head_dmem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 *head, bool set)
|
||||
{
|
||||
int err = -ENOSYS;
|
||||
|
||||
if (flcn->flcn_engine_dep_ops.queue_head != NULL) {
|
||||
err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue,
|
||||
head, set);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int flcn_queue_tail_dmem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 *tail, bool set)
|
||||
{
|
||||
int err = -ENOSYS;
|
||||
|
||||
if (flcn->flcn_engine_dep_ops.queue_tail != NULL) {
|
||||
err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue,
|
||||
tail, set);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool flcn_queue_has_room_dmem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 size, bool *need_rewind)
|
||||
{
|
||||
u32 q_head = 0;
|
||||
u32 q_tail = 0;
|
||||
u32 q_free = 0;
|
||||
bool q_rewind = false;
|
||||
int err = 0;
|
||||
|
||||
size = ALIGN(size, QUEUE_ALIGNMENT);
|
||||
|
||||
err = queue->head(flcn, queue, &q_head, QUEUE_GET);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "queue head GET failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = queue->tail(flcn, queue, &q_tail, QUEUE_GET);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "queue tail GET failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (q_head >= q_tail) {
|
||||
q_free = queue->offset + queue->size - q_head;
|
||||
q_free -= (u32)PMU_CMD_HDR_SIZE;
|
||||
|
||||
if (size > q_free) {
|
||||
q_rewind = true;
|
||||
q_head = queue->offset;
|
||||
}
|
||||
}
|
||||
|
||||
if (q_head < q_tail) {
|
||||
q_free = q_tail - q_head - 1U;
|
||||
}
|
||||
|
||||
if (need_rewind != NULL) {
|
||||
*need_rewind = q_rewind;
|
||||
}
|
||||
|
||||
exit:
|
||||
return size <= q_free;
|
||||
}
|
||||
|
||||
static int flcn_queue_push_dmem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = nvgpu_flcn_copy_to_dmem(flcn, queue->position, data, size, 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d, queue-%d", flcn->flcn_id,
|
||||
queue->id);
|
||||
nvgpu_err(flcn->g, "dmem queue write failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int flcn_queue_pop_dmem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size,
|
||||
u32 *bytes_read)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
u32 q_tail = queue->position;
|
||||
u32 q_head = 0;
|
||||
u32 used = 0;
|
||||
int err = 0;
|
||||
|
||||
*bytes_read = 0;
|
||||
|
||||
err = queue->head(flcn, queue, &q_head, QUEUE_GET);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d, queue-%d, head GET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (q_head == q_tail) {
|
||||
goto exit;
|
||||
} else if (q_head > q_tail) {
|
||||
used = q_head - q_tail;
|
||||
} else {
|
||||
used = queue->offset + queue->size - q_tail;
|
||||
}
|
||||
|
||||
if (size > used) {
|
||||
nvgpu_warn(g, "queue size smaller than request read");
|
||||
size = used;
|
||||
}
|
||||
|
||||
err = nvgpu_flcn_copy_from_dmem(flcn, q_tail, data, size, 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "flcn-%d, queue-%d", flcn->flcn_id,
|
||||
queue->id);
|
||||
nvgpu_err(flcn->g, "dmem queue read failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
|
||||
*bytes_read = size;
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int flcn_queue_rewind_dmem(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct pmu_cmd cmd;
|
||||
int err = 0;
|
||||
|
||||
if (queue->oflag == OFLAG_WRITE) {
|
||||
cmd.hdr.unit_id = PMU_UNIT_REWIND;
|
||||
cmd.hdr.size = (u8)PMU_CMD_HDR_SIZE;
|
||||
err = queue->push(flcn, queue, &cmd, cmd.hdr.size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "flcn-%d queue-%d, rewind request failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
} else {
|
||||
nvgpu_pmu_dbg(g, "flcn-%d queue-%d, rewinded",
|
||||
flcn->flcn_id, queue->id);
|
||||
}
|
||||
}
|
||||
|
||||
/* update queue position */
|
||||
queue->position = queue->offset;
|
||||
|
||||
if (queue->oflag == OFLAG_READ) {
|
||||
err = queue->tail(flcn, queue, &queue->position,
|
||||
QUEUE_SET);
|
||||
if (err != 0){
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* assign DMEM queue type specific ops */
|
||||
static void flcn_queue_init_dmem_queue(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
queue->head = flcn_queue_head_dmem;
|
||||
queue->tail = flcn_queue_tail_dmem;
|
||||
queue->has_room = flcn_queue_has_room_dmem;
|
||||
queue->push = flcn_queue_push_dmem;
|
||||
queue->pop = flcn_queue_pop_dmem;
|
||||
queue->rewind = flcn_queue_rewind_dmem;
|
||||
}
|
||||
|
||||
static int flcn_queue_prepare_write(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 size)
|
||||
{
|
||||
bool q_rewind = false;
|
||||
int err = 0;
|
||||
|
||||
/* make sure there's enough free space for the write */
|
||||
if (!queue->has_room(flcn, queue, size, &q_rewind)) {
|
||||
nvgpu_pmu_dbg(flcn->g, "queue full: queue-id %d: index %d",
|
||||
queue->id, queue->index);
|
||||
err = -EAGAIN;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = queue->head(flcn, queue, &queue->position, QUEUE_GET);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, position GET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (q_rewind) {
|
||||
err = queue->rewind(flcn, queue);
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* queue public functions */
|
||||
|
||||
/* queue push operation with lock */
|
||||
int nvgpu_flcn_queue_push(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (queue->oflag != OFLAG_WRITE) {
|
||||
nvgpu_err(flcn->g, "flcn-%d, queue-%d not opened for write",
|
||||
flcn->flcn_id, queue->id);
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* acquire mutex */
|
||||
nvgpu_mutex_acquire(&queue->mutex);
|
||||
|
||||
err = flcn_queue_prepare_write(flcn, queue, size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to open",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto unlock_mutex;
|
||||
}
|
||||
|
||||
err = queue->push(flcn, queue, data, size);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to write",
|
||||
flcn->flcn_id, queue->id);
|
||||
}
|
||||
|
||||
err = queue->head(flcn, queue, &queue->position, QUEUE_SET);
|
||||
if (err != 0){
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
}
|
||||
|
||||
unlock_mutex:
|
||||
/* release mutex */
|
||||
nvgpu_mutex_release(&queue->mutex);
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* queue pop operation with lock */
|
||||
int nvgpu_flcn_queue_pop(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size,
|
||||
u32 *bytes_read)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (queue->oflag != OFLAG_READ) {
|
||||
nvgpu_err(flcn->g, "flcn-%d, queue-%d, not opened for read",
|
||||
flcn->flcn_id, queue->id);
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* acquire mutex */
|
||||
nvgpu_mutex_acquire(&queue->mutex);
|
||||
|
||||
err = queue->tail(flcn, queue, &queue->position, QUEUE_GET);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, position GET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto unlock_mutex;
|
||||
}
|
||||
|
||||
err = queue->pop(flcn, queue, data, size, bytes_read);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, fail to read",
|
||||
flcn->flcn_id, queue->id);
|
||||
}
|
||||
|
||||
err = queue->tail(flcn, queue, &queue->position, QUEUE_SET);
|
||||
if (err != 0){
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, position SET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
}
|
||||
|
||||
unlock_mutex:
|
||||
/* release mutex */
|
||||
nvgpu_mutex_release(&queue->mutex);
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_flcn_queue_rewind(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
/* acquire mutex */
|
||||
nvgpu_mutex_acquire(&queue->mutex);
|
||||
|
||||
if (queue->rewind != NULL) {
|
||||
err = queue->rewind(flcn, queue);
|
||||
}
|
||||
|
||||
/* release mutex */
|
||||
nvgpu_mutex_release(&queue->mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* queue is_empty check with lock */
|
||||
bool nvgpu_flcn_queue_is_empty(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
u32 q_head = 0;
|
||||
u32 q_tail = 0;
|
||||
int err = 0;
|
||||
|
||||
/* acquire mutex */
|
||||
nvgpu_mutex_acquire(&queue->mutex);
|
||||
|
||||
err = queue->head(flcn, queue, &q_head, QUEUE_GET);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, head GET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = queue->tail(flcn, queue, &q_tail, QUEUE_GET);
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, tail GET failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
exit:
|
||||
/* release mutex */
|
||||
nvgpu_mutex_release(&queue->mutex);
|
||||
|
||||
return q_head == q_tail;
|
||||
}
|
||||
|
||||
void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
nvgpu_log(flcn->g, gpu_dbg_pmu, "flcn id-%d q-id %d: index %d ",
|
||||
flcn->flcn_id, queue->id, queue->index);
|
||||
|
||||
/* destroy mutex */
|
||||
nvgpu_mutex_destroy(&queue->mutex);
|
||||
|
||||
/* clear data*/
|
||||
memset(queue, 0, sizeof(struct nvgpu_falcon_queue));
|
||||
}
|
||||
|
||||
int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_pmu,
|
||||
"flcn id-%d q-id %d: index %d, offset 0x%08x, size 0x%08x",
|
||||
flcn->flcn_id, queue->id, queue->index,
|
||||
queue->offset, queue->size);
|
||||
|
||||
/* init mutex */
|
||||
err = nvgpu_mutex_init(&queue->mutex);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
flcn_queue_init_dmem_queue(flcn, queue);
|
||||
|
||||
exit:
|
||||
if (err != 0) {
|
||||
nvgpu_err(flcn->g, "flcn-%d queue-%d, init failed",
|
||||
flcn->flcn_id, queue->id);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -517,8 +517,9 @@ int nvgpu_pmu_destroy(struct gk20a *g)
|
||||
pmu->isr_enabled = false;
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
|
||||
for (i = 0; i < PMU_QUEUE_COUNT; i++)
|
||||
nvgpu_mutex_destroy(&pmu->queue[i].mutex);
|
||||
for (i = 0; i < PMU_QUEUE_COUNT; i++) {
|
||||
nvgpu_flcn_queue_free(pmu->flcn, &pmu->queue[i]);
|
||||
}
|
||||
|
||||
nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);
|
||||
pmu->pmu_ready = false;
|
||||
|
||||
@@ -848,7 +848,8 @@ static void perfmon_cmd_init_set_mov_avg_v1(struct pmu_perfmon_cmd *pc,
|
||||
init->samples_in_moving_avg = value;
|
||||
}
|
||||
|
||||
static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue,
|
||||
static void get_pmu_init_msg_pmu_queue_params_v1(
|
||||
struct nvgpu_falcon_queue *queue,
|
||||
u32 id, void *pmu_init_msg)
|
||||
{
|
||||
struct pmu_init_msg_pmu_v1 *init =
|
||||
@@ -859,7 +860,8 @@ static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue,
|
||||
queue->size = init->queue_info[id].size;
|
||||
}
|
||||
|
||||
static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue,
|
||||
static void get_pmu_init_msg_pmu_queue_params_v4(
|
||||
struct nvgpu_falcon_queue *queue,
|
||||
u32 id, void *pmu_init_msg)
|
||||
{
|
||||
struct pmu_init_msg_pmu_v4 *init = pmu_init_msg;
|
||||
@@ -885,7 +887,8 @@ static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue,
|
||||
queue->offset = init->queue_offset + current_ptr;
|
||||
}
|
||||
|
||||
static void get_pmu_init_msg_pmu_queue_params_v5(struct pmu_queue *queue,
|
||||
static void get_pmu_init_msg_pmu_queue_params_v5(
|
||||
struct nvgpu_falcon_queue *queue,
|
||||
u32 id, void *pmu_init_msg)
|
||||
{
|
||||
struct pmu_init_msg_pmu_v5 *init = pmu_init_msg;
|
||||
@@ -911,7 +914,8 @@ static void get_pmu_init_msg_pmu_queue_params_v5(struct pmu_queue *queue,
|
||||
queue->offset = init->queue_offset + current_ptr;
|
||||
}
|
||||
|
||||
static void get_pmu_init_msg_pmu_queue_params_v3(struct pmu_queue *queue,
|
||||
static void get_pmu_init_msg_pmu_queue_params_v3(
|
||||
struct nvgpu_falcon_queue *queue,
|
||||
u32 id, void *pmu_init_msg)
|
||||
{
|
||||
struct pmu_init_msg_pmu_v3 *init =
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
@@ -100,295 +101,56 @@ int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
return g->ops.pmu.pmu_mutex_release(pmu, id, token);
|
||||
}
|
||||
|
||||
/* queue */
|
||||
/* PMU falcon queue init */
|
||||
int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu,
|
||||
u32 id, union pmu_init_msg_pmu *init)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct pmu_queue *queue = &pmu->queue[id];
|
||||
int err;
|
||||
struct nvgpu_falcon_queue *queue = NULL;
|
||||
u32 oflag = 0;
|
||||
int err = 0;
|
||||
|
||||
err = nvgpu_mutex_init(&queue->mutex);
|
||||
if (err)
|
||||
return err;
|
||||
if (PMU_IS_COMMAND_QUEUE(id)) {
|
||||
/*
|
||||
* set OFLAG_WRITE for command queue
|
||||
* i.e, push from nvgpu &
|
||||
* pop form falcon ucode
|
||||
*/
|
||||
oflag = OFLAG_WRITE;
|
||||
} else if (PMU_IS_MESSAGE_QUEUE(id)) {
|
||||
/*
|
||||
* set OFLAG_READ for message queue
|
||||
* i.e, push from falcon ucode &
|
||||
* pop form nvgpu
|
||||
*/
|
||||
oflag = OFLAG_READ;
|
||||
} else {
|
||||
nvgpu_err(g, "invalid queue-id %d", id);
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
queue->id = id;
|
||||
/* init queue parameters */
|
||||
queue = &pmu->queue[id];
|
||||
queue->id = id;
|
||||
queue->oflag = oflag;
|
||||
g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(queue, id, init);
|
||||
queue->mutex_id = id;
|
||||
|
||||
nvgpu_pmu_dbg(g, "queue %d: index %d, offset 0x%08x, size 0x%08x",
|
||||
id, queue->index, queue->offset, queue->size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
u32 *head, bool set)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
|
||||
return g->ops.pmu.pmu_queue_head(pmu, queue, head, set);
|
||||
}
|
||||
|
||||
static int pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
u32 *tail, bool set)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
|
||||
return g->ops.pmu.pmu_queue_tail(pmu, queue, tail, set);
|
||||
}
|
||||
|
||||
static inline void pmu_queue_read(struct nvgpu_pmu *pmu,
|
||||
u32 offset, u8 *dst, u32 size)
|
||||
{
|
||||
nvgpu_flcn_copy_from_dmem(pmu->flcn, offset, dst, size, 0);
|
||||
}
|
||||
|
||||
static inline void pmu_queue_write(struct nvgpu_pmu *pmu,
|
||||
u32 offset, u8 *src, u32 size)
|
||||
{
|
||||
nvgpu_flcn_copy_to_dmem(pmu->flcn, offset, src, size, 0);
|
||||
}
|
||||
|
||||
|
||||
static int pmu_queue_lock(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (PMU_IS_MESSAGE_QUEUE(queue->id))
|
||||
return 0;
|
||||
|
||||
if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) {
|
||||
nvgpu_mutex_acquire(&queue->mutex);
|
||||
return 0;
|
||||
err = nvgpu_flcn_queue_init(pmu->flcn, queue);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "queue-%d init failed", queue->id);
|
||||
}
|
||||
|
||||
err = nvgpu_pmu_mutex_acquire(pmu, queue->mutex_id, &queue->mutex_lock);
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pmu_queue_unlock(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (PMU_IS_MESSAGE_QUEUE(queue->id))
|
||||
return 0;
|
||||
|
||||
if (PMU_IS_SW_COMMAND_QUEUE(queue->id)) {
|
||||
nvgpu_mutex_release(&queue->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = nvgpu_pmu_mutex_release(pmu, queue->mutex_id, &queue->mutex_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* called by pmu_read_message, no lock */
|
||||
bool nvgpu_pmu_queue_is_empty(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue)
|
||||
{
|
||||
u32 head, tail;
|
||||
|
||||
pmu_queue_head(pmu, queue, &head, QUEUE_GET);
|
||||
if (queue->opened && queue->oflag == OFLAG_READ)
|
||||
tail = queue->position;
|
||||
else
|
||||
pmu_queue_tail(pmu, queue, &tail, QUEUE_GET);
|
||||
|
||||
return head == tail;
|
||||
}
|
||||
|
||||
static bool pmu_queue_has_room(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue, u32 size, bool *need_rewind)
|
||||
{
|
||||
u32 head, tail;
|
||||
bool rewind = false;
|
||||
unsigned int free;
|
||||
|
||||
size = ALIGN(size, QUEUE_ALIGNMENT);
|
||||
|
||||
pmu_queue_head(pmu, queue, &head, QUEUE_GET);
|
||||
pmu_queue_tail(pmu, queue, &tail, QUEUE_GET);
|
||||
if (head >= tail) {
|
||||
free = queue->offset + queue->size - head;
|
||||
free -= PMU_CMD_HDR_SIZE;
|
||||
|
||||
if (size > free) {
|
||||
rewind = true;
|
||||
head = queue->offset;
|
||||
}
|
||||
}
|
||||
|
||||
if (head < tail)
|
||||
free = tail - head - 1;
|
||||
|
||||
if (need_rewind)
|
||||
*need_rewind = rewind;
|
||||
|
||||
return size <= free;
|
||||
}
|
||||
|
||||
static int pmu_queue_push(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue, void *data, u32 size)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!queue->opened && queue->oflag == OFLAG_WRITE) {
|
||||
nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for write");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pmu_queue_write(pmu, queue->position, data, size);
|
||||
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_queue_pop(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue, void *data, u32 size,
|
||||
u32 *bytes_read)
|
||||
{
|
||||
u32 head, tail, used;
|
||||
|
||||
*bytes_read = 0;
|
||||
|
||||
if (!queue->opened && queue->oflag == OFLAG_READ) {
|
||||
nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for read");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pmu_queue_head(pmu, queue, &head, QUEUE_GET);
|
||||
tail = queue->position;
|
||||
|
||||
if (head == tail)
|
||||
return 0;
|
||||
|
||||
if (head > tail)
|
||||
used = head - tail;
|
||||
else
|
||||
used = queue->offset + queue->size - tail;
|
||||
|
||||
if (size > used) {
|
||||
nvgpu_warn(gk20a_from_pmu(pmu),
|
||||
"queue size smaller than request read");
|
||||
size = used;
|
||||
}
|
||||
|
||||
pmu_queue_read(pmu, tail, data, size);
|
||||
queue->position += ALIGN(size, QUEUE_ALIGNMENT);
|
||||
*bytes_read = size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pmu_queue_rewind(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct pmu_cmd cmd;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!queue->opened) {
|
||||
nvgpu_err(gk20a_from_pmu(pmu), "queue not opened");
|
||||
return;
|
||||
}
|
||||
|
||||
if (queue->oflag == OFLAG_WRITE) {
|
||||
cmd.hdr.unit_id = PMU_UNIT_REWIND;
|
||||
cmd.hdr.size = PMU_CMD_HDR_SIZE;
|
||||
pmu_queue_push(pmu, queue, &cmd, cmd.hdr.size);
|
||||
nvgpu_pmu_dbg(g, "queue %d rewinded", queue->id);
|
||||
}
|
||||
|
||||
queue->position = queue->offset;
|
||||
}
|
||||
|
||||
/* open for read and lock the queue */
|
||||
static int pmu_queue_open_read(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = pmu_queue_lock(pmu, queue);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (queue->opened)
|
||||
BUG();
|
||||
|
||||
pmu_queue_tail(pmu, queue, &queue->position, QUEUE_GET);
|
||||
queue->oflag = OFLAG_READ;
|
||||
queue->opened = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* open for write and lock the queue
|
||||
* make sure there's enough free space for the write
|
||||
* */
|
||||
static int pmu_queue_open_write(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue, u32 size)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
bool rewind = false;
|
||||
int err;
|
||||
|
||||
err = pmu_queue_lock(pmu, queue);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (queue->opened)
|
||||
BUG();
|
||||
|
||||
if (!pmu_queue_has_room(pmu, queue, size, &rewind)) {
|
||||
nvgpu_pmu_dbg(g, "queue full: queue-id %d: index %d",
|
||||
queue->id, queue->index);
|
||||
pmu_queue_unlock(pmu, queue);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
pmu_queue_head(pmu, queue, &queue->position, QUEUE_GET);
|
||||
queue->oflag = OFLAG_WRITE;
|
||||
queue->opened = true;
|
||||
|
||||
if (rewind)
|
||||
pmu_queue_rewind(pmu, queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* close and unlock the queue */
|
||||
static int pmu_queue_close(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue, bool commit)
|
||||
{
|
||||
if (!queue->opened)
|
||||
return 0;
|
||||
|
||||
if (commit) {
|
||||
if (queue->oflag == OFLAG_READ)
|
||||
pmu_queue_tail(pmu, queue,
|
||||
&queue->position, QUEUE_SET);
|
||||
else
|
||||
pmu_queue_head(pmu, queue,
|
||||
&queue->position, QUEUE_SET);
|
||||
}
|
||||
|
||||
queue->opened = false;
|
||||
|
||||
pmu_queue_unlock(pmu, queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
struct pmu_msg *msg, struct pmu_payload *payload,
|
||||
u32 queue_id)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct pmu_queue *queue;
|
||||
struct nvgpu_falcon_queue *queue;
|
||||
u32 in_size, out_size;
|
||||
|
||||
if (!PMU_IS_SW_COMMAND_QUEUE(queue_id))
|
||||
@@ -459,7 +221,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
u32 queue_id, unsigned long timeout_ms)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
struct pmu_queue *queue;
|
||||
struct nvgpu_falcon_queue *queue;
|
||||
struct nvgpu_timeout timeout;
|
||||
int err;
|
||||
|
||||
@@ -469,22 +231,13 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
||||
|
||||
do {
|
||||
err = pmu_queue_open_write(pmu, queue, cmd->hdr.size);
|
||||
err = nvgpu_flcn_queue_push(pmu->flcn, queue, cmd, cmd->hdr.size);
|
||||
if (err == -EAGAIN && !nvgpu_timeout_expired(&timeout))
|
||||
nvgpu_usleep_range(1000, 2000);
|
||||
else
|
||||
break;
|
||||
} while (1);
|
||||
|
||||
if (err)
|
||||
goto clean_up;
|
||||
|
||||
pmu_queue_push(pmu, queue, cmd, cmd->hdr.size);
|
||||
|
||||
|
||||
err = pmu_queue_close(pmu, queue, true);
|
||||
|
||||
clean_up:
|
||||
if (err)
|
||||
nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
|
||||
else
|
||||
@@ -840,8 +593,9 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
struct pmu_msg *msg, int *status)
|
||||
static bool pmu_read_message(struct nvgpu_pmu *pmu,
|
||||
struct nvgpu_falcon_queue *queue,
|
||||
struct pmu_msg *msg, int *status)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
u32 read_size, bytes_read;
|
||||
@@ -849,17 +603,11 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
|
||||
*status = 0;
|
||||
|
||||
if (nvgpu_pmu_queue_is_empty(pmu, queue))
|
||||
return false;
|
||||
|
||||
err = pmu_queue_open_read(pmu, queue);
|
||||
if (err) {
|
||||
nvgpu_err(g, "fail to open queue %d for read", queue->id);
|
||||
*status = err;
|
||||
if (nvgpu_flcn_queue_is_empty(pmu->flcn, queue)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
err = pmu_queue_pop(pmu, queue, &msg->hdr,
|
||||
err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr,
|
||||
PMU_MSG_HDR_SIZE, &bytes_read);
|
||||
if (err || bytes_read != PMU_MSG_HDR_SIZE) {
|
||||
nvgpu_err(g, "fail to read msg from queue %d", queue->id);
|
||||
@@ -868,9 +616,14 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
}
|
||||
|
||||
if (msg->hdr.unit_id == PMU_UNIT_REWIND) {
|
||||
pmu_queue_rewind(pmu, queue);
|
||||
err = nvgpu_flcn_queue_rewind(pmu->flcn, queue);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "fail to rewind queue %d", queue->id);
|
||||
*status = err | -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
/* read again after rewind */
|
||||
err = pmu_queue_pop(pmu, queue, &msg->hdr,
|
||||
err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->hdr,
|
||||
PMU_MSG_HDR_SIZE, &bytes_read);
|
||||
if (err || bytes_read != PMU_MSG_HDR_SIZE) {
|
||||
nvgpu_err(g,
|
||||
@@ -889,7 +642,7 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
|
||||
if (msg->hdr.size > PMU_MSG_HDR_SIZE) {
|
||||
read_size = msg->hdr.size - PMU_MSG_HDR_SIZE;
|
||||
err = pmu_queue_pop(pmu, queue, &msg->msg,
|
||||
err = nvgpu_flcn_queue_pop(pmu->flcn, queue, &msg->msg,
|
||||
read_size, &bytes_read);
|
||||
if (err || bytes_read != read_size) {
|
||||
nvgpu_err(g,
|
||||
@@ -899,19 +652,9 @@ static bool pmu_read_message(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
}
|
||||
}
|
||||
|
||||
err = pmu_queue_close(pmu, queue, true);
|
||||
if (err) {
|
||||
nvgpu_err(g, "fail to close queue %d", queue->id);
|
||||
*status = err;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
clean_up:
|
||||
err = pmu_queue_close(pmu, queue, false);
|
||||
if (err)
|
||||
nvgpu_err(g, "fail to close queue %d", queue->id);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -649,12 +649,15 @@ void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn)
|
||||
|
||||
static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
|
||||
&flcn->flcn_engine_dep_ops;
|
||||
|
||||
switch (flcn->flcn_id) {
|
||||
case FALCON_ID_PMU:
|
||||
flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset;
|
||||
flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head;
|
||||
flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail;
|
||||
break;
|
||||
default:
|
||||
/* NULL assignment make sure
|
||||
|
||||
@@ -771,7 +771,7 @@ struct gpu_ops {
|
||||
u32 (*pmu_allocation_get_fb_size)(
|
||||
struct nvgpu_pmu *pmu, void *pmu_alloc_ptr);
|
||||
void (*get_pmu_init_msg_pmu_queue_params)(
|
||||
struct pmu_queue *queue, u32 id,
|
||||
struct nvgpu_falcon_queue *queue, u32 id,
|
||||
void *pmu_init_msg);
|
||||
void *(*get_pmu_msg_pmu_init_msg_ptr)(
|
||||
struct pmu_init_msg *init);
|
||||
@@ -1003,10 +1003,10 @@ struct gpu_ops {
|
||||
u32 (*pmu_get_queue_head_size)(void);
|
||||
u32 (*pmu_get_queue_tail_size)(void);
|
||||
u32 (*pmu_get_queue_tail)(u32 i);
|
||||
int (*pmu_queue_head)(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue, u32 *head, bool set);
|
||||
int (*pmu_queue_tail)(struct nvgpu_pmu *pmu,
|
||||
struct pmu_queue *queue, u32 *tail, bool set);
|
||||
int (*pmu_queue_head)(struct gk20a *g,
|
||||
struct nvgpu_falcon_queue *queue, u32 *head, bool set);
|
||||
int (*pmu_queue_tail)(struct gk20a *g,
|
||||
struct nvgpu_falcon_queue *queue, u32 *tail, bool set);
|
||||
void (*pmu_msgq_tail)(struct nvgpu_pmu *pmu,
|
||||
u32 *tail, bool set);
|
||||
u32 (*pmu_mutex_size)(void);
|
||||
|
||||
@@ -377,10 +377,9 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *head, bool set)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
u32 queue_head_size = 0;
|
||||
|
||||
if (g->ops.pmu.pmu_get_queue_head_size)
|
||||
@@ -414,10 +413,9 @@ int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gk20a_pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *tail, bool set)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
u32 queue_tail_size = 0;
|
||||
|
||||
if (g->ops.pmu.pmu_get_queue_tail_size)
|
||||
@@ -692,7 +690,7 @@ bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu)
|
||||
void gk20a_pmu_isr(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct pmu_queue *queue;
|
||||
struct nvgpu_falcon_queue *queue;
|
||||
u32 intr, mask;
|
||||
bool recheck = false;
|
||||
|
||||
@@ -749,9 +747,10 @@ void gk20a_pmu_isr(struct gk20a *g)
|
||||
|
||||
if (recheck) {
|
||||
queue = &pmu->queue[PMU_MESSAGE_QUEUE];
|
||||
if (!nvgpu_pmu_queue_is_empty(pmu, queue))
|
||||
if (!nvgpu_flcn_queue_is_empty(pmu->flcn, queue)) {
|
||||
gk20a_writel(g, pwr_falcon_irqsset_r(),
|
||||
pwr_falcon_irqsset_swgen0_set_f());
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_mutex_release(&pmu->isr_mutex);
|
||||
|
||||
@@ -49,9 +49,9 @@ void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id);
|
||||
int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
|
||||
int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *head, bool set);
|
||||
int gk20a_pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue,
|
||||
int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *tail, bool set);
|
||||
void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set);
|
||||
|
||||
|
||||
@@ -28,12 +28,15 @@
|
||||
|
||||
static void gp106_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct gk20a *g = flcn->g;
|
||||
struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
|
||||
&flcn->flcn_engine_dep_ops;
|
||||
|
||||
switch (flcn->flcn_id) {
|
||||
case FALCON_ID_PMU:
|
||||
flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset;
|
||||
flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head;
|
||||
flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail;
|
||||
break;
|
||||
case FALCON_ID_SEC2:
|
||||
flcn_eng_dep_ops->reset_eng = gp106_sec2_reset;
|
||||
|
||||
@@ -167,6 +167,44 @@ struct gk20a;
|
||||
struct nvgpu_falcon;
|
||||
struct nvgpu_falcon_bl_info;
|
||||
|
||||
struct nvgpu_falcon_queue {
|
||||
|
||||
/* Queue Type (queue_type) */
|
||||
u8 queue_type;
|
||||
|
||||
/* used by nvgpu, for command LPQ/HPQ */
|
||||
struct nvgpu_mutex mutex;
|
||||
|
||||
/* current write position */
|
||||
u32 position;
|
||||
/* physical dmem offset where this queue begins */
|
||||
u32 offset;
|
||||
/* logical queue identifier */
|
||||
u32 id;
|
||||
/* physical queue index */
|
||||
u32 index;
|
||||
/* in bytes */
|
||||
u32 size;
|
||||
/* open-flag */
|
||||
u32 oflag;
|
||||
|
||||
/* queue type(DMEM-Q/FB-Q) specific ops */
|
||||
int (*rewind)(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue);
|
||||
int (*pop)(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size,
|
||||
u32 *bytes_read);
|
||||
int (*push)(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size);
|
||||
bool (*has_room)(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 size,
|
||||
bool *need_rewind);
|
||||
int (*tail)(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 *tail, bool set);
|
||||
int (*head)(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, u32 *head, bool set);
|
||||
};
|
||||
|
||||
struct nvgpu_falcon_version_ops {
|
||||
void (*start_cpu_secure)(struct nvgpu_falcon *flcn);
|
||||
void (*write_dmatrfbase)(struct nvgpu_falcon *flcn, u32 addr);
|
||||
@@ -175,6 +213,11 @@ struct nvgpu_falcon_version_ops {
|
||||
/* ops which are falcon engine specific */
|
||||
struct nvgpu_falcon_engine_dependency_ops {
|
||||
int (*reset_eng)(struct gk20a *g);
|
||||
int (*queue_head)(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *head, bool set);
|
||||
int (*queue_tail)(struct gk20a *g, struct nvgpu_falcon_queue *queue,
|
||||
u32 *tail, bool set);
|
||||
void (*msgq_tail)(struct gk20a *g, u32 *tail, bool set);
|
||||
};
|
||||
|
||||
struct nvgpu_falcon_ops {
|
||||
@@ -259,6 +302,21 @@ void nvgpu_flcn_dump_stats(struct nvgpu_falcon *flcn);
|
||||
int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_bl_info *bl_info);
|
||||
|
||||
/* queue public functions */
|
||||
int nvgpu_flcn_queue_init(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue);
|
||||
bool nvgpu_flcn_queue_is_empty(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue);
|
||||
int nvgpu_flcn_queue_rewind(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue);
|
||||
int nvgpu_flcn_queue_pop(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size,
|
||||
u32 *bytes_read);
|
||||
int nvgpu_flcn_queue_push(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue, void *data, u32 size);
|
||||
void nvgpu_flcn_queue_free(struct nvgpu_falcon *flcn,
|
||||
struct nvgpu_falcon_queue *queue);
|
||||
|
||||
void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id);
|
||||
|
||||
#endif /* __FALCON_H__ */
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include <nvgpu/nvgpu_common.h>
|
||||
#include <nvgpu/flcnif_cmn.h>
|
||||
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
|
||||
#define nvgpu_pmu_dbg(g, fmt, args...) \
|
||||
nvgpu_log(g, gpu_dbg_pmu, fmt, ##args)
|
||||
@@ -266,30 +267,6 @@ struct pmu_ucode_desc_v1 {
|
||||
u32 compressed;
|
||||
};
|
||||
|
||||
struct pmu_queue {
|
||||
|
||||
/* used by hw, for BIOS/SMI queue */
|
||||
u32 mutex_id;
|
||||
u32 mutex_lock;
|
||||
/* used by sw, for LPQ/HPQ queue */
|
||||
struct nvgpu_mutex mutex;
|
||||
|
||||
/* current write position */
|
||||
u32 position;
|
||||
/* physical dmem offset where this queue begins */
|
||||
u32 offset;
|
||||
/* logical queue identifier */
|
||||
u32 id;
|
||||
/* physical queue index */
|
||||
u32 index;
|
||||
/* in bytes */
|
||||
u32 size;
|
||||
|
||||
/* open-flag */
|
||||
u32 oflag;
|
||||
bool opened; /* opened implies locked */
|
||||
};
|
||||
|
||||
struct pmu_mutex {
|
||||
u32 id;
|
||||
u32 index;
|
||||
@@ -345,7 +322,7 @@ struct nvgpu_pmu {
|
||||
|
||||
struct pmu_sha1_gid gid_info;
|
||||
|
||||
struct pmu_queue queue[PMU_QUEUE_COUNT];
|
||||
struct nvgpu_falcon_queue queue[PMU_QUEUE_COUNT];
|
||||
|
||||
struct pmu_sequence *seq;
|
||||
unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE];
|
||||
@@ -450,7 +427,6 @@ int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
|
||||
|
||||
int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, u32 id,
|
||||
union pmu_init_msg_pmu *init);
|
||||
bool nvgpu_pmu_queue_is_empty(struct nvgpu_pmu *pmu, struct pmu_queue *queue);
|
||||
|
||||
/* send a cmd to pmu */
|
||||
int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
|
||||
@@ -27,15 +27,11 @@
|
||||
* commands to the PMU
|
||||
*/
|
||||
/* write by sw, read by pmu, protected by sw mutex lock */
|
||||
#define PMU_COMMAND_QUEUE_HPQ 0
|
||||
#define PMU_COMMAND_QUEUE_HPQ 0U
|
||||
/* write by sw, read by pmu, protected by sw mutex lock */
|
||||
#define PMU_COMMAND_QUEUE_LPQ 1
|
||||
/* read/write by sw/hw, protected by hw pmu mutex, id = 2 */
|
||||
#define PMU_COMMAND_QUEUE_BIOS 2
|
||||
/* read/write by sw/hw, protected by hw pmu mutex, id = 3 */
|
||||
#define PMU_COMMAND_QUEUE_SMI 3
|
||||
#define PMU_COMMAND_QUEUE_LPQ 1U
|
||||
/* write by pmu, read by sw, accessed by interrupt handler, no lock */
|
||||
#define PMU_MESSAGE_QUEUE 4
|
||||
#define PMU_MESSAGE_QUEUE 4U
|
||||
#define PMU_QUEUE_COUNT 5
|
||||
|
||||
#define PMU_IS_COMMAND_QUEUE(id) \
|
||||
@@ -48,15 +44,13 @@
|
||||
#define PMU_IS_MESSAGE_QUEUE(id) \
|
||||
((id) == PMU_MESSAGE_QUEUE)
|
||||
|
||||
enum {
|
||||
OFLAG_READ = 0,
|
||||
OFLAG_WRITE
|
||||
};
|
||||
#define OFLAG_READ 0U
|
||||
#define OFLAG_WRITE 1U
|
||||
|
||||
#define QUEUE_SET (true)
|
||||
#define QUEUE_GET (false)
|
||||
|
||||
#define QUEUE_ALIGNMENT (4)
|
||||
#define QUEUE_ALIGNMENT (4U)
|
||||
|
||||
/* An enumeration containing all valid logical mutex identifiers */
|
||||
enum {
|
||||
|
||||
Reference in New Issue
Block a user