gpu: nvgpu: add cmd post support

Add command post support to send commands to GSP nvriscv.

NVGPU-6784

Signed-off-by: Ramesh Mylavarapu <rmylavarapu@nvidia.com>
Change-Id: Ib7fde3712c24a5b4f0f58d7788e67d29a1e351a2
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2590763
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Ramesh Mylavarapu
2021-09-08 00:19:21 +05:30
committed by mobile promotions
parent 085f94bf89
commit 3c980954c4
7 changed files with 220 additions and 2 deletions

View File

@@ -316,6 +316,8 @@ gsp:
common/gsp/ipc/gsp_seq.h,
common/gsp/ipc/gsp_queue.c,
common/gsp/ipc/gsp_queue.h,
common/gsp/ipc/gsp_cmd.c,
common/gsp/ipc/gsp_cmd.h,
include/nvgpu/gsp.h ]
engine_queues:

View File

@@ -412,7 +412,8 @@ nvgpu-$(CONFIG_NVGPU_GSP_SCHEDULER) += \
common/gsp/gsp_init.o \
common/gsp/gsp_bootstrap.o \
common/gsp/ipc/gsp_seq.o \
common/gsp/ipc/gsp_queue.o
common/gsp/ipc/gsp_queue.o \
common/gsp/ipc/gsp_cmd.o
endif
ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),y)

View File

@@ -182,7 +182,8 @@ ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),1)
srcs += common/gsp/gsp_init.c \
common/gsp/gsp_bootstrap.c \
common/gsp/ipc/gsp_seq.c \
common/gsp/ipc/gsp_queue.c
common/gsp/ipc/gsp_queue.c \
common/gsp/ipc/gsp_cmd.c
endif
# Source files below are functionaly safe (FuSa) and must always be included.

View File

@@ -62,6 +62,8 @@ struct nvgpu_gsp {
struct nvgpu_engine_mem_queue *queues[GSP_QUEUE_NUM];
u32 command_ack;
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
struct gsp_stress_test gsp_test;
#endif

View File

@@ -0,0 +1,152 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/pmu.h>
#include <nvgpu/log.h>
#include <nvgpu/gsp.h>
#include "../gsp_priv.h"
#include "gsp_seq.h"
#include "gsp_queue.h"
#include "gsp_cmd.h"
u8 gsp_unit_id_is_valid(u8 id)
{
return (id < NV_GSP_UNIT_END);
}
static bool gsp_validate_cmd(struct nvgpu_gsp *gsp,
struct nv_flcn_cmd_gsp *cmd, u32 queue_id)
{
struct gk20a *g = gsp->g;
u32 queue_size;
if (queue_id != GSP_NV_CMDQ_LOG_ID) {
goto invalid_cmd;
}
if (cmd->hdr.size < PMU_CMD_HDR_SIZE) {
goto invalid_cmd;
}
queue_size = nvgpu_gsp_queue_get_size(gsp->queues, queue_id);
if (cmd->hdr.size > (queue_size >> 1)) {
goto invalid_cmd;
}
if (!gsp_unit_id_is_valid(cmd->hdr.unit_id)) {
goto invalid_cmd;
}
return true;
invalid_cmd:
nvgpu_err(g, "invalid gsp cmd :");
nvgpu_err(g, "queue_id=%d, cmd_size=%d, cmd_unit_id=%d\n",
queue_id, cmd->hdr.size, cmd->hdr.unit_id);
return false;
}
static int gsp_write_cmd(struct nvgpu_gsp *gsp,
struct nv_flcn_cmd_gsp *cmd, u32 queue_id,
u32 timeout_ms)
{
struct nvgpu_timeout timeout;
struct gk20a *g = gsp->g;
int err;
nvgpu_log_fn(g, " ");
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
do {
err = nvgpu_gsp_queue_push(gsp->queues, queue_id, gsp->gsp_flcn,
cmd, cmd->hdr.size);
if ((err == -EAGAIN) &&
(nvgpu_timeout_expired(&timeout) == 0)) {
nvgpu_usleep_range(1000U, 2000U);
} else {
break;
}
} while (true);
if (err != 0) {
nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
}
return err;
}
int nvgpu_gsp_cmd_post(struct gk20a *g, struct nv_flcn_cmd_gsp *cmd,
u32 queue_id, gsp_callback callback,
void *cb_param, u32 timeout)
{
struct nvgpu_gsp *gsp = g->gsp;
struct gsp_sequence *seq = NULL;
int err = 0;
if (cmd == NULL) {
nvgpu_err(g, "gsp cmd buffer is empty");
err = -EINVAL;
goto exit;
}
/* Sanity check the command input. */
if (!gsp_validate_cmd(gsp, cmd, queue_id)) {
err = -EINVAL;
goto exit;
}
/* Attempt to reserve a sequence for this command. */
err = nvgpu_gsp_seq_acquire(g, gsp->sequences, &seq,
callback, cb_param);
if (err != 0) {
goto exit;
}
/* Set the sequence number in the command header. */
cmd->hdr.seq_id = nvgpu_gsp_seq_get_id(seq);
cmd->hdr.ctrl_flags = 0U;
cmd->hdr.ctrl_flags = PMU_CMD_FLAGS_STATUS;
nvgpu_gsp_seq_set_state(seq, GSP_SEQ_STATE_USED);
err = gsp_write_cmd(gsp, cmd, queue_id, timeout);
if (err != 0) {
gsp_seq_release(gsp->sequences, seq);
}
exit:
return err;
}
u32 nvgpu_gsp_get_last_cmd_id(struct gk20a *g)
{
return GSP_NV_CMDQ_LOG_ID__LAST;
}

View File

@@ -0,0 +1,59 @@
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_GSP_CMD_IF_H
#define NVGPU_GSP_CMD_IF_H
#include <nvgpu/types.h>
#include "gsp_seq.h"
struct gk20a;
#define GSP_NV_CMDQ_LOG_ID 0U
#define GSP_NV_CMDQ_LOG_ID__LAST 0U
#define GSP_NV_MSGQ_LOG_ID 1U
#define NV_GSP_UNIT_REWIND NV_FLCN_UNIT_ID_REWIND
#define NV_GSP_UNIT_NULL 0x01U
#define NV_GSP_UNIT_INIT 0x02U
#define NV_GSP_UNIT_END 0x0AU
#define GSP_MSG_HDR_SIZE U32(sizeof(struct gsp_hdr))
#define GSP_CMD_HDR_SIZE U32(sizeof(struct gsp_hdr))
struct gsp_hdr {
u8 unit_id;
u8 size;
u8 ctrl_flags;
u8 seq_id;
};
struct nv_flcn_cmd_gsp {
struct gsp_hdr hdr;
};
u8 gsp_unit_id_is_valid(u8 id);
/* command handling methods*/
int nvgpu_gsp_cmd_post(struct gk20a *g, struct nv_flcn_cmd_gsp *cmd,
u32 queue_id, gsp_callback callback, void *cb_param, u32 timeout);
#endif /* NVGPU_GSP_CMD_IF_H */

View File

@@ -32,6 +32,7 @@ void nvgpu_gsp_isr_support(struct gk20a *g, bool enable);
void nvgpu_gsp_isr_mutex_aquire(struct gk20a *g);
void nvgpu_gsp_isr_mutex_release(struct gk20a *g);
bool nvgpu_gsp_is_isr_enable(struct gk20a *g);
u32 nvgpu_gsp_get_last_cmd_id(struct gk20a *g);
struct nvgpu_falcon *nvgpu_gsp_falcon_instance(struct gk20a *g);
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
int nvgpu_gsp_stress_test_bootstrap(struct gk20a *g, bool start);