mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
Open source GPL/LGPL release
This commit is contained in:
748
drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c
Normal file
748
drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c
Normal file
@@ -0,0 +1,748 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/pmu/pmuif/nvgpu_cmdif.h>
|
||||
#include <nvgpu/falcon.h>
|
||||
#include <nvgpu/engine_fb_queue.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
#include <nvgpu/string.h>
|
||||
#include <nvgpu/pmu/seq.h>
|
||||
#include <nvgpu/pmu/queue.h>
|
||||
#include <nvgpu/pmu/cmd.h>
|
||||
#include <nvgpu/pmu/msg.h>
|
||||
#include <nvgpu/pmu/fw.h>
|
||||
#include <nvgpu/pmu/allocator.h>
|
||||
#include <nvgpu/pmu/seq.h>
|
||||
|
||||
static bool pmu_validate_in_out_payload(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
struct pmu_in_out_payload_desc *payload)
|
||||
{
|
||||
u32 size;
|
||||
|
||||
if (payload->offset != 0U && payload->buf == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (payload->buf == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (payload->size == 0U) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size = PMU_CMD_HDR_SIZE;
|
||||
size += payload->offset;
|
||||
size += pmu->fw->ops.get_allocation_struct_size(pmu);
|
||||
|
||||
if (size > cmd->hdr.size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pmu_validate_rpc_payload(struct pmu_payload *payload)
|
||||
{
|
||||
if (payload->rpc.prpc == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (payload->rpc.size_rpc == 0U) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
invalid_cmd:
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
struct pmu_payload *payload, u32 queue_id)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
u32 queue_size;
|
||||
|
||||
if (cmd == NULL) {
|
||||
nvgpu_err(g, "PMU cmd buffer is NULL");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!PMU_IS_SW_COMMAND_QUEUE(queue_id)) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
if (cmd->hdr.size < PMU_CMD_HDR_SIZE) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
queue_size = nvgpu_pmu_queue_get_size(&pmu->queues, queue_id);
|
||||
|
||||
if (cmd->hdr.size > (queue_size >> 1)) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
if (!PMU_UNIT_ID_IS_VALID(cmd->hdr.unit_id)) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
if (payload == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (payload->in.buf == NULL && payload->out.buf == NULL &&
|
||||
payload->rpc.prpc == NULL) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
if (!pmu_validate_in_out_payload(pmu, cmd, &payload->in)) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
if (!pmu_validate_in_out_payload(pmu, cmd, &payload->out)) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
if (!pmu_validate_rpc_payload(payload)) {
|
||||
goto invalid_cmd;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
invalid_cmd:
|
||||
nvgpu_err(g, "invalid pmu cmd :\n"
|
||||
"queue_id=%d,\n"
|
||||
"cmd_size=%d, cmd_unit_id=%d,\n"
|
||||
"payload in=%p, in_size=%d, in_offset=%d,\n"
|
||||
"payload out=%p, out_size=%d, out_offset=%d",
|
||||
queue_id, cmd->hdr.size, cmd->hdr.unit_id,
|
||||
&payload->in, payload->in.size, payload->in.offset,
|
||||
&payload->out, payload->out.size, payload->out.offset);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
u32 queue_id)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
struct nvgpu_timeout timeout;
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_timeout_init(g, &timeout, U32_MAX, NVGPU_TIMER_CPU_TIMER);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init timer");
|
||||
return err;
|
||||
}
|
||||
|
||||
do {
|
||||
err = nvgpu_pmu_queue_push(&pmu->queues, pmu->flcn,
|
||||
queue_id, cmd);
|
||||
if (nvgpu_timeout_expired(&timeout) == 0 && err == -EAGAIN) {
|
||||
nvgpu_usleep_range(1000, 2000);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} while (true);
|
||||
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
|
||||
} else {
|
||||
nvgpu_log_fn(g, "done");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void pmu_payload_deallocate(struct gk20a *g,
|
||||
struct falcon_payload_alloc *alloc)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
|
||||
if (alloc->dmem_offset != 0U) {
|
||||
nvgpu_free(&pmu->dmem, alloc->dmem_offset);
|
||||
}
|
||||
}
|
||||
|
||||
static int pmu_payload_allocate(struct gk20a *g, struct pmu_sequence *seq,
|
||||
struct falcon_payload_alloc *alloc)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
u16 buffer_size;
|
||||
int err = 0;
|
||||
u64 tmp;
|
||||
|
||||
if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {
|
||||
buffer_size = nvgpu_pmu_seq_get_buffer_size(seq);
|
||||
nvgpu_pmu_seq_set_fbq_out_offset(seq, buffer_size);
|
||||
/* Save target address in FBQ work buffer. */
|
||||
alloc->dmem_offset = buffer_size;
|
||||
buffer_size += alloc->dmem_size;
|
||||
nvgpu_pmu_seq_set_buffer_size(seq, buffer_size);
|
||||
} else {
|
||||
tmp = nvgpu_alloc(&pmu->dmem, alloc->dmem_size);
|
||||
nvgpu_assert(tmp <= U32_MAX);
|
||||
alloc->dmem_offset = (u32)tmp;
|
||||
if (alloc->dmem_offset == 0U) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up;
|
||||
}
|
||||
}
|
||||
|
||||
clean_up:
|
||||
if (err != 0) {
|
||||
pmu_payload_deallocate(g, alloc);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pmu_cmd_payload_setup_rpc(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
struct pmu_payload *payload, struct pmu_sequence *seq)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops;
|
||||
struct nvgpu_engine_fb_queue *queue = nvgpu_pmu_seq_get_cmd_queue(seq);
|
||||
struct falcon_payload_alloc alloc;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
(void) memset(&alloc, 0, sizeof(struct falcon_payload_alloc));
|
||||
|
||||
alloc.dmem_size = payload->rpc.size_rpc +
|
||||
payload->rpc.size_scratch;
|
||||
|
||||
err = pmu_payload_allocate(g, seq, &alloc);
|
||||
if (err != 0) {
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
alloc.dmem_size = payload->rpc.size_rpc;
|
||||
|
||||
if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {
|
||||
/* copy payload to FBQ work buffer */
|
||||
nvgpu_memcpy((u8 *)
|
||||
nvgpu_engine_fb_queue_get_work_buffer(queue) +
|
||||
alloc.dmem_offset,
|
||||
(u8 *)payload->rpc.prpc, alloc.dmem_size);
|
||||
|
||||
alloc.dmem_offset += nvgpu_pmu_seq_get_fbq_heap_offset(seq);
|
||||
|
||||
nvgpu_pmu_seq_set_in_payload_fb_queue(seq, true);
|
||||
nvgpu_pmu_seq_set_out_payload_fb_queue(seq, true);
|
||||
} else {
|
||||
err = nvgpu_falcon_copy_to_dmem(pmu->flcn, alloc.dmem_offset,
|
||||
payload->rpc.prpc, payload->rpc.size_rpc, 0);
|
||||
if (err != 0) {
|
||||
pmu_payload_deallocate(g, &alloc);
|
||||
goto clean_up;
|
||||
}
|
||||
}
|
||||
|
||||
cmd->cmd.rpc.rpc_dmem_size = payload->rpc.size_rpc;
|
||||
cmd->cmd.rpc.rpc_dmem_ptr = alloc.dmem_offset;
|
||||
|
||||
nvgpu_pmu_seq_set_out_payload(seq, payload->rpc.prpc);
|
||||
g->pmu->fw->ops.allocation_set_dmem_size(pmu,
|
||||
fw_ops->get_seq_out_alloc_ptr(seq),
|
||||
payload->rpc.size_rpc);
|
||||
g->pmu->fw->ops.allocation_set_dmem_offset(pmu,
|
||||
fw_ops->get_seq_out_alloc_ptr(seq),
|
||||
alloc.dmem_offset);
|
||||
|
||||
clean_up:
|
||||
if (err != 0) {
|
||||
nvgpu_log_fn(g, "fail");
|
||||
} else {
|
||||
nvgpu_log_fn(g, "done");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pmu_cmd_in_payload_setup(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
struct pmu_payload *payload, struct pmu_sequence *seq)
|
||||
{
|
||||
struct nvgpu_engine_fb_queue *fb_queue =
|
||||
nvgpu_pmu_seq_get_cmd_queue(seq);
|
||||
struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops;
|
||||
struct falcon_payload_alloc alloc;
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
void *in = NULL;
|
||||
int err = 0;
|
||||
u32 offset;
|
||||
|
||||
(void) memset(&alloc, 0, sizeof(struct falcon_payload_alloc));
|
||||
|
||||
if (payload != NULL && payload->in.offset != 0U) {
|
||||
fw_ops->set_allocation_ptr(pmu, &in,
|
||||
((u8 *)&cmd->cmd + payload->in.offset));
|
||||
|
||||
if (payload->in.buf != payload->out.buf) {
|
||||
fw_ops->allocation_set_dmem_size(pmu, in,
|
||||
(u16)payload->in.size);
|
||||
} else {
|
||||
fw_ops->allocation_set_dmem_size(pmu, in,
|
||||
(u16)max(payload->in.size, payload->out.size));
|
||||
}
|
||||
|
||||
alloc.dmem_size = fw_ops->allocation_get_dmem_size(pmu, in);
|
||||
|
||||
err = pmu_payload_allocate(g, seq, &alloc);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
*(fw_ops->allocation_get_dmem_offset_addr(pmu, in)) =
|
||||
alloc.dmem_offset;
|
||||
|
||||
if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {
|
||||
/* copy payload to FBQ work buffer */
|
||||
nvgpu_memcpy((u8 *)
|
||||
nvgpu_engine_fb_queue_get_work_buffer(
|
||||
fb_queue) +
|
||||
alloc.dmem_offset,
|
||||
(u8 *)payload->in.buf,
|
||||
payload->in.size);
|
||||
|
||||
alloc.dmem_offset +=
|
||||
nvgpu_pmu_seq_get_fbq_heap_offset(seq);
|
||||
*(fw_ops->allocation_get_dmem_offset_addr(pmu,
|
||||
in)) = alloc.dmem_offset;
|
||||
|
||||
nvgpu_pmu_seq_set_in_payload_fb_queue(seq, true);
|
||||
} else {
|
||||
offset =
|
||||
fw_ops->allocation_get_dmem_offset(pmu,
|
||||
in);
|
||||
err = nvgpu_falcon_copy_to_dmem(pmu->flcn,
|
||||
offset, payload->in.buf,
|
||||
payload->in.size, 0);
|
||||
if (err != 0) {
|
||||
pmu_payload_deallocate(g, &alloc);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
fw_ops->allocation_set_dmem_size(pmu,
|
||||
fw_ops->get_seq_in_alloc_ptr(seq),
|
||||
fw_ops->allocation_get_dmem_size(pmu, in));
|
||||
fw_ops->allocation_set_dmem_offset(pmu,
|
||||
fw_ops->get_seq_in_alloc_ptr(seq),
|
||||
fw_ops->allocation_get_dmem_offset(pmu, in));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_cmd_out_payload_setup(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
struct pmu_payload *payload, struct pmu_sequence *seq)
|
||||
{
|
||||
struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops;
|
||||
struct falcon_payload_alloc alloc;
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
void *in = NULL, *out = NULL;
|
||||
int err = 0;
|
||||
|
||||
(void) memset(&alloc, 0, sizeof(struct falcon_payload_alloc));
|
||||
|
||||
if (payload != NULL && payload->out.offset != 0U) {
|
||||
fw_ops->set_allocation_ptr(pmu, &out,
|
||||
((u8 *)&cmd->cmd + payload->out.offset));
|
||||
fw_ops->allocation_set_dmem_size(pmu, out,
|
||||
(u16)payload->out.size);
|
||||
|
||||
if (payload->in.buf != payload->out.buf) {
|
||||
alloc.dmem_size =
|
||||
fw_ops->allocation_get_dmem_size(pmu, out);
|
||||
|
||||
err = pmu_payload_allocate(g, seq, &alloc);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
*(fw_ops->allocation_get_dmem_offset_addr(pmu,
|
||||
out)) = alloc.dmem_offset;
|
||||
} else {
|
||||
WARN_ON(payload->in.offset == 0U);
|
||||
|
||||
fw_ops->set_allocation_ptr(pmu, &in,
|
||||
((u8 *)&cmd->cmd + payload->in.offset));
|
||||
|
||||
fw_ops->allocation_set_dmem_offset(pmu, out,
|
||||
fw_ops->allocation_get_dmem_offset(pmu,
|
||||
in));
|
||||
}
|
||||
|
||||
if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {
|
||||
if (payload->in.buf != payload->out.buf) {
|
||||
*(fw_ops->allocation_get_dmem_offset_addr(pmu,
|
||||
out)) +=
|
||||
nvgpu_pmu_seq_get_fbq_heap_offset(seq);
|
||||
}
|
||||
|
||||
nvgpu_pmu_seq_set_out_payload_fb_queue(seq, true);
|
||||
}
|
||||
|
||||
fw_ops->allocation_set_dmem_size(pmu,
|
||||
fw_ops->get_seq_out_alloc_ptr(seq),
|
||||
fw_ops->allocation_get_dmem_size(pmu, out));
|
||||
fw_ops->allocation_set_dmem_offset(pmu,
|
||||
fw_ops->get_seq_out_alloc_ptr(seq),
|
||||
fw_ops->allocation_get_dmem_offset(pmu, out));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_cmd_payload_setup(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
struct pmu_payload *payload, struct pmu_sequence *seq)
|
||||
{
|
||||
struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops;
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
void *in = NULL;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (payload != NULL) {
|
||||
nvgpu_pmu_seq_set_out_payload(seq, payload->out.buf);
|
||||
}
|
||||
|
||||
err = pmu_cmd_in_payload_setup(g, cmd, payload, seq);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = pmu_cmd_out_payload_setup(g, cmd, payload, seq);
|
||||
if (err != 0) {
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
goto exit;
|
||||
|
||||
clean_up:
|
||||
if (payload->in.offset != 0U) {
|
||||
fw_ops->set_allocation_ptr(pmu, &in,
|
||||
((u8 *)&cmd->cmd + payload->in.offset));
|
||||
|
||||
nvgpu_free(&pmu->dmem,
|
||||
fw_ops->allocation_get_dmem_offset(pmu,
|
||||
in));
|
||||
}
|
||||
|
||||
exit:
|
||||
if (err != 0) {
|
||||
nvgpu_log_fn(g, "fail");
|
||||
} else {
|
||||
nvgpu_log_fn(g, "done");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pmu_fbq_cmd_setup(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
struct nvgpu_engine_fb_queue *queue, struct pmu_payload *payload,
|
||||
struct pmu_sequence *seq)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
struct nv_falcon_fbq_hdr *fbq_hdr = NULL;
|
||||
struct pmu_cmd *flcn_cmd = NULL;
|
||||
u32 fbq_size_needed = 0;
|
||||
u16 heap_offset = 0;
|
||||
u64 tmp;
|
||||
int err = 0;
|
||||
|
||||
fbq_hdr = (struct nv_falcon_fbq_hdr *)
|
||||
nvgpu_engine_fb_queue_get_work_buffer(queue);
|
||||
|
||||
flcn_cmd = (struct pmu_cmd *)
|
||||
(nvgpu_engine_fb_queue_get_work_buffer(queue) +
|
||||
sizeof(struct nv_falcon_fbq_hdr));
|
||||
|
||||
if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) {
|
||||
if (payload != NULL) {
|
||||
fbq_size_needed = (u32)payload->rpc.size_rpc +
|
||||
(u32)payload->rpc.size_scratch;
|
||||
}
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
tmp = fbq_size_needed +
|
||||
sizeof(struct nv_falcon_fbq_hdr) +
|
||||
cmd->hdr.size;
|
||||
nvgpu_assert(tmp <= (size_t)U32_MAX);
|
||||
fbq_size_needed = (u32)tmp;
|
||||
|
||||
fbq_size_needed = ALIGN_UP(fbq_size_needed, 4U);
|
||||
|
||||
/* Check for allocator pointer and proceed */
|
||||
if (pmu->dmem.priv != NULL) {
|
||||
tmp = nvgpu_alloc(&pmu->dmem, fbq_size_needed);
|
||||
}
|
||||
nvgpu_assert(tmp <= U32_MAX);
|
||||
heap_offset = (u16) tmp;
|
||||
if (heap_offset == 0U) {
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* clear work queue buffer */
|
||||
(void) memset(nvgpu_engine_fb_queue_get_work_buffer(queue), 0,
|
||||
nvgpu_engine_fb_queue_get_element_size(queue));
|
||||
|
||||
/* Need to save room for both FBQ hdr, and the CMD */
|
||||
tmp = sizeof(struct nv_falcon_fbq_hdr) +
|
||||
cmd->hdr.size;
|
||||
nvgpu_assert(tmp <= (size_t)U16_MAX);
|
||||
nvgpu_pmu_seq_set_buffer_size(seq, (u16)tmp);
|
||||
|
||||
/* copy cmd into the work buffer */
|
||||
nvgpu_memcpy((u8 *)flcn_cmd, (u8 *)cmd, cmd->hdr.size);
|
||||
|
||||
/* Fill in FBQ hdr, and offset in seq structure */
|
||||
nvgpu_assert(fbq_size_needed < U16_MAX);
|
||||
fbq_hdr->heap_size = (u16)fbq_size_needed;
|
||||
fbq_hdr->heap_offset = heap_offset;
|
||||
nvgpu_pmu_seq_set_fbq_heap_offset(seq, heap_offset);
|
||||
|
||||
/*
|
||||
* save queue index in seq structure
|
||||
* so can free queue element when response is received
|
||||
*/
|
||||
nvgpu_pmu_seq_set_fbq_element_index(seq,
|
||||
nvgpu_engine_fb_queue_get_position(queue));
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
struct pmu_payload *payload,
|
||||
u32 queue_id, pmu_callback callback, void *cb_param)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
struct pmu_sequence *seq = NULL;
|
||||
struct nvgpu_engine_fb_queue *fb_queue = NULL;
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (!nvgpu_pmu_get_fw_ready(g, pmu)) {
|
||||
nvgpu_warn(g, "PMU is not ready");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!pmu_validate_cmd(pmu, cmd, payload, queue_id)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = nvgpu_pmu_seq_acquire(g, pmu->sequences, &seq, callback,
|
||||
cb_param);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
cmd->hdr.seq_id = nvgpu_pmu_seq_get_id(seq);
|
||||
|
||||
cmd->hdr.ctrl_flags = 0;
|
||||
cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_STATUS;
|
||||
cmd->hdr.ctrl_flags |= PMU_CMD_FLAGS_INTR;
|
||||
|
||||
if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {
|
||||
fb_queue = nvgpu_pmu_fb_queue(&pmu->queues, queue_id);
|
||||
/* Save the queue in the seq structure. */
|
||||
nvgpu_pmu_seq_set_cmd_queue(seq, fb_queue);
|
||||
|
||||
/* Lock the FBQ work buffer */
|
||||
nvgpu_engine_fb_queue_lock_work_buffer(fb_queue);
|
||||
|
||||
/* Create FBQ work buffer & copy cmd to FBQ work buffer */
|
||||
err = pmu_fbq_cmd_setup(g, cmd, fb_queue, payload, seq);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "FBQ cmd setup failed");
|
||||
nvgpu_pmu_seq_release(g, pmu->sequences, seq);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* change cmd pointer to point to FBQ work
|
||||
* buffer as cmd copied to FBQ work buffer
|
||||
* in call pmu_fbq_cmd_setup()
|
||||
*/
|
||||
cmd = (struct pmu_cmd *)
|
||||
(nvgpu_engine_fb_queue_get_work_buffer(fb_queue) +
|
||||
sizeof(struct nv_falcon_fbq_hdr));
|
||||
}
|
||||
|
||||
if (cmd->cmd.rpc.cmd_type == NV_PMU_RPC_CMD_ID) {
|
||||
err = pmu_cmd_payload_setup_rpc(g, cmd, payload, seq);
|
||||
} else {
|
||||
err = pmu_cmd_payload_setup(g, cmd, payload, seq);
|
||||
}
|
||||
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "payload setup failed");
|
||||
pmu->fw->ops.allocation_set_dmem_size(pmu,
|
||||
pmu->fw->ops.get_seq_in_alloc_ptr(seq), 0);
|
||||
pmu->fw->ops.allocation_set_dmem_size(pmu,
|
||||
pmu->fw->ops.get_seq_out_alloc_ptr(seq), 0);
|
||||
|
||||
nvgpu_pmu_seq_release(g, pmu->sequences, seq);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_pmu_seq_set_state(seq, PMU_SEQ_STATE_USED);
|
||||
|
||||
err = pmu_write_cmd(pmu, cmd, queue_id);
|
||||
if (err != 0) {
|
||||
nvgpu_pmu_seq_set_state(seq, PMU_SEQ_STATE_PENDING);
|
||||
}
|
||||
|
||||
exit:
|
||||
if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {
|
||||
/* Unlock the FBQ work buffer */
|
||||
nvgpu_engine_fb_queue_unlock_work_buffer(fb_queue);
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "Done, err %x", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
|
||||
u16 size_rpc, u16 size_scratch, pmu_callback caller_cb,
|
||||
void *caller_cb_param, bool is_copy_back)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
struct pmu_cmd cmd;
|
||||
struct pmu_payload payload;
|
||||
struct rpc_handler_payload *rpc_payload = NULL;
|
||||
pmu_callback callback = NULL;
|
||||
void *rpc_buff = NULL;
|
||||
int status = 0;
|
||||
|
||||
if (nvgpu_can_busy(g) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!nvgpu_pmu_get_fw_ready(g, pmu)) {
|
||||
nvgpu_warn(g, "PMU is not ready to process RPC");
|
||||
status = EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (caller_cb == NULL) {
|
||||
rpc_payload = nvgpu_kzalloc(g,
|
||||
sizeof(struct rpc_handler_payload) + size_rpc);
|
||||
if (rpc_payload == NULL) {
|
||||
status = ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
rpc_payload->rpc_buff = (u8 *)rpc_payload +
|
||||
sizeof(struct rpc_handler_payload);
|
||||
rpc_payload->is_mem_free_set =
|
||||
is_copy_back ? false : true;
|
||||
|
||||
/* assign default RPC handler*/
|
||||
callback = nvgpu_pmu_rpc_handler;
|
||||
} else {
|
||||
if (caller_cb_param == NULL) {
|
||||
nvgpu_err(g, "Invalid cb param addr");
|
||||
status = EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
rpc_payload = nvgpu_kzalloc(g,
|
||||
sizeof(struct rpc_handler_payload));
|
||||
if (rpc_payload == NULL) {
|
||||
status = ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
rpc_payload->rpc_buff = caller_cb_param;
|
||||
rpc_payload->is_mem_free_set = true;
|
||||
callback = caller_cb;
|
||||
WARN_ON(is_copy_back);
|
||||
}
|
||||
|
||||
rpc_buff = rpc_payload->rpc_buff;
|
||||
(void) memset(&cmd, 0, sizeof(struct pmu_cmd));
|
||||
(void) memset(&payload, 0, sizeof(struct pmu_payload));
|
||||
|
||||
cmd.hdr.unit_id = rpc->unit_id;
|
||||
cmd.hdr.size = (u8)(PMU_CMD_HDR_SIZE + sizeof(struct nv_pmu_rpc_cmd));
|
||||
cmd.cmd.rpc.cmd_type = NV_PMU_RPC_CMD_ID;
|
||||
cmd.cmd.rpc.flags = rpc->flags;
|
||||
|
||||
nvgpu_memcpy((u8 *)rpc_buff, (u8 *)rpc, size_rpc);
|
||||
payload.rpc.prpc = rpc_buff;
|
||||
payload.rpc.size_rpc = size_rpc;
|
||||
payload.rpc.size_scratch = size_scratch;
|
||||
|
||||
status = nvgpu_pmu_cmd_post(g, &cmd, &payload,
|
||||
PMU_COMMAND_QUEUE_LPQ, callback,
|
||||
rpc_payload);
|
||||
if (status != 0) {
|
||||
nvgpu_err(g, "Failed to execute RPC status=0x%x, func=0x%x",
|
||||
status, rpc->function);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Option act like blocking call, which waits till RPC request
|
||||
* executes on PMU & copy back processed data to rpc_buff
|
||||
* to read data back in nvgpu
|
||||
*/
|
||||
if (is_copy_back) {
|
||||
/* wait till RPC execute in PMU & ACK */
|
||||
if (nvgpu_pmu_wait_fw_ack_status(g, pmu,
|
||||
nvgpu_get_poll_timeout(g),
|
||||
&rpc_payload->complete, 1U) != 0) {
|
||||
nvgpu_err(g, "PMU wait timeout expired.");
|
||||
status = -ETIMEDOUT;
|
||||
goto cleanup;
|
||||
}
|
||||
/* copy back data to caller */
|
||||
nvgpu_memcpy((u8 *)rpc, (u8 *)rpc_buff, size_rpc);
|
||||
/* free allocated memory */
|
||||
nvgpu_kfree(g, rpc_payload);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
nvgpu_kfree(g, rpc_payload);
|
||||
exit:
|
||||
return status;
|
||||
}
|
||||
639
drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c
Normal file
639
drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c
Normal file
@@ -0,0 +1,639 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/pmu/allocator.h>
|
||||
#include <nvgpu/engine_fb_queue.h>
|
||||
#include <nvgpu/engine_queue.h>
|
||||
#include <nvgpu/pmu/msg.h>
|
||||
#include <nvgpu/string.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
#include <nvgpu/pmu/lsfm.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
#include <nvgpu/pmu/pmu_perfmon.h>
|
||||
#include <nvgpu/pmu/pmu_pg.h>
|
||||
#include <nvgpu/pmu/fw.h>
|
||||
#include <nvgpu/pmu/seq.h>
|
||||
|
||||
static int pmu_payload_extract(struct nvgpu_pmu *pmu, struct pmu_sequence *seq)
|
||||
{
|
||||
struct nvgpu_engine_fb_queue *fb_queue =
|
||||
nvgpu_pmu_seq_get_cmd_queue(seq);
|
||||
struct gk20a *g = pmu->g;
|
||||
struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops;
|
||||
u32 fbq_payload_offset = 0U;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (nvgpu_pmu_seq_get_out_payload_fb_queue(seq)) {
|
||||
fbq_payload_offset =
|
||||
nvgpu_engine_fb_queue_get_offset(fb_queue) +
|
||||
nvgpu_pmu_seq_get_fbq_out_offset(seq) +
|
||||
(nvgpu_pmu_seq_get_fbq_element_index(seq) *
|
||||
nvgpu_engine_fb_queue_get_element_size(fb_queue));
|
||||
|
||||
nvgpu_mem_rd_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface), fbq_payload_offset,
|
||||
nvgpu_pmu_seq_get_out_payload(seq),
|
||||
fw_ops->allocation_get_dmem_size(pmu,
|
||||
fw_ops->get_seq_out_alloc_ptr(seq)));
|
||||
|
||||
} else {
|
||||
if (fw_ops->allocation_get_dmem_size(pmu,
|
||||
fw_ops->get_seq_out_alloc_ptr(seq)) != 0U) {
|
||||
err = nvgpu_falcon_copy_from_dmem(pmu->flcn,
|
||||
fw_ops->allocation_get_dmem_offset(pmu,
|
||||
fw_ops->get_seq_out_alloc_ptr(seq)),
|
||||
nvgpu_pmu_seq_get_out_payload(seq),
|
||||
fw_ops->allocation_get_dmem_size(pmu,
|
||||
fw_ops->get_seq_out_alloc_ptr(seq)),
|
||||
0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU falcon DMEM copy failed");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void pmu_payload_free(struct nvgpu_pmu *pmu, struct pmu_sequence *seq)
|
||||
{
|
||||
struct nvgpu_engine_fb_queue *fb_queue =
|
||||
nvgpu_pmu_seq_get_cmd_queue(seq);
|
||||
struct gk20a *g = pmu->g;
|
||||
struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops;
|
||||
void *seq_in_ptr = fw_ops->get_seq_in_alloc_ptr(seq);
|
||||
void *seq_out_ptr = fw_ops->get_seq_out_alloc_ptr(seq);
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {
|
||||
/* Check for allocator pointer and proceed */
|
||||
if (pmu->dmem.priv != NULL) {
|
||||
nvgpu_free(&pmu->dmem,
|
||||
nvgpu_pmu_seq_get_fbq_heap_offset(seq));
|
||||
}
|
||||
|
||||
/*
|
||||
* free FBQ allocated work buffer
|
||||
* set FBQ element work buffer to NULL
|
||||
* Clear the in use bit for the queue entry this CMD used.
|
||||
*/
|
||||
err = nvgpu_engine_fb_queue_free_element(fb_queue,
|
||||
nvgpu_pmu_seq_get_fbq_element_index(seq));
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "fb queue element free failed %d", err);
|
||||
}
|
||||
} else {
|
||||
/* free DMEM space payload*/
|
||||
if (fw_ops->allocation_get_dmem_size(pmu,
|
||||
seq_in_ptr) != 0U) {
|
||||
nvgpu_free(&pmu->dmem,
|
||||
fw_ops->allocation_get_dmem_offset(pmu,
|
||||
seq_in_ptr));
|
||||
|
||||
fw_ops->allocation_set_dmem_size(pmu,
|
||||
seq_in_ptr, 0);
|
||||
}
|
||||
|
||||
if (fw_ops->allocation_get_dmem_size(pmu,
|
||||
seq_out_ptr) != 0U) {
|
||||
nvgpu_free(&pmu->dmem,
|
||||
fw_ops->allocation_get_dmem_offset(pmu,
|
||||
seq_out_ptr));
|
||||
|
||||
fw_ops->allocation_set_dmem_size(pmu,
|
||||
seq_out_ptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_pmu_seq_payload_free(g, seq);
|
||||
}
|
||||
|
||||
static int pmu_response_handle(struct nvgpu_pmu *pmu,
|
||||
struct pmu_msg *msg)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
enum pmu_seq_state state;
|
||||
struct pmu_sequence *seq;
|
||||
int err = 0;
|
||||
u8 id;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
seq = nvgpu_pmu_sequences_get_seq(pmu->sequences, msg->hdr.seq_id);
|
||||
state = nvgpu_pmu_seq_get_state(seq);
|
||||
id = nvgpu_pmu_seq_get_id(seq);
|
||||
|
||||
if (state != PMU_SEQ_STATE_USED) {
|
||||
nvgpu_err(g, "msg for an unknown sequence %u", (u32) id);
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (msg->hdr.unit_id == PMU_UNIT_RC &&
|
||||
msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) {
|
||||
nvgpu_err(g, "unhandled cmd: seq %u", (u32) id);
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
err = pmu_payload_extract(pmu, seq);
|
||||
}
|
||||
|
||||
exit:
|
||||
/*
|
||||
* free allocated space for payload in
|
||||
* DMEM/FB-surface/FB_QUEUE as data is
|
||||
* copied to buffer pointed by
|
||||
* seq->out_payload
|
||||
*/
|
||||
pmu_payload_free(pmu, seq);
|
||||
|
||||
nvgpu_pmu_seq_callback(g, seq, msg, err);
|
||||
|
||||
nvgpu_pmu_seq_release(g, pmu->sequences, seq);
|
||||
|
||||
/* TBD: notify client waiting for available dmem */
|
||||
|
||||
nvgpu_log_fn(g, "done err %d", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
|
||||
{
|
||||
int err = 0;
|
||||
struct gk20a *g = pmu->g;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
switch (msg->hdr.unit_id) {
|
||||
case PMU_UNIT_PERFMON:
|
||||
case PMU_UNIT_PERFMON_T18X:
|
||||
err = nvgpu_pmu_perfmon_event_handler(g, pmu, msg);
|
||||
break;
|
||||
case PMU_UNIT_PERF:
|
||||
if (g->ops.pmu_perf.handle_pmu_perf_event != NULL) {
|
||||
err = g->ops.pmu_perf.handle_pmu_perf_event(g,
|
||||
(void *)&msg->hdr);
|
||||
} else {
|
||||
WARN_ON(true);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
nvgpu_log_info(g, "Received invalid PMU unit event");
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool pmu_engine_mem_queue_read(struct nvgpu_pmu *pmu,
|
||||
u32 queue_id, void *data,
|
||||
u32 bytes_to_read, int *status)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
u32 bytes_read;
|
||||
int err;
|
||||
|
||||
err = nvgpu_pmu_queue_pop(&pmu->queues, pmu->flcn, queue_id, data,
|
||||
bytes_to_read, &bytes_read);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "fail to read msg: err %d", err);
|
||||
*status = err;
|
||||
return false;
|
||||
}
|
||||
if (bytes_read != bytes_to_read) {
|
||||
nvgpu_err(g, "fail to read requested bytes: 0x%x != 0x%x",
|
||||
bytes_to_read, bytes_read);
|
||||
*status = -EINVAL;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pmu_read_message(struct nvgpu_pmu *pmu, u32 queue_id,
|
||||
struct pmu_msg *msg, int *status)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
u32 read_size;
|
||||
int err;
|
||||
|
||||
*status = 0;
|
||||
|
||||
if (nvgpu_pmu_queue_is_empty(&pmu->queues, queue_id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pmu_engine_mem_queue_read(pmu, queue_id, &msg->hdr,
|
||||
PMU_MSG_HDR_SIZE, status)) {
|
||||
nvgpu_err(g, "fail to read msg from queue %d", queue_id);
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
if (msg->hdr.unit_id == PMU_UNIT_REWIND) {
|
||||
if (!nvgpu_pmu_fb_queue_enabled(&pmu->queues)) {
|
||||
err = nvgpu_pmu_queue_rewind(&pmu->queues, queue_id,
|
||||
pmu->flcn);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "fail to rewind queue %d",
|
||||
queue_id);
|
||||
*status = err;
|
||||
goto clean_up;
|
||||
}
|
||||
}
|
||||
|
||||
/* read again after rewind */
|
||||
if (!pmu_engine_mem_queue_read(pmu, queue_id, &msg->hdr,
|
||||
PMU_MSG_HDR_SIZE, status)) {
|
||||
nvgpu_err(g, "fail to read msg from queue %d",
|
||||
queue_id);
|
||||
goto clean_up;
|
||||
}
|
||||
}
|
||||
|
||||
if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) {
|
||||
nvgpu_err(g, "read invalid unit_id %d from queue %d",
|
||||
msg->hdr.unit_id, queue_id);
|
||||
*status = -EINVAL;
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
if (msg->hdr.size > PMU_MSG_HDR_SIZE) {
|
||||
read_size = U32(msg->hdr.size) - PMU_MSG_HDR_SIZE;
|
||||
if (!pmu_engine_mem_queue_read(pmu, queue_id, &msg->msg,
|
||||
read_size, status)) {
|
||||
nvgpu_err(g, "fail to read msg from queue %d",
|
||||
queue_id);
|
||||
goto clean_up;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
clean_up:
|
||||
return false;
|
||||
}
|
||||
|
||||
static void pmu_read_init_msg_fb(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
u32 element_index, u32 size, void *buffer)
|
||||
{
|
||||
u32 fbq_msg_queue_ss_offset = 0U;
|
||||
|
||||
fbq_msg_queue_ss_offset =
|
||||
nvgpu_pmu_get_ss_msg_fbq_element_offset(g, pmu,
|
||||
pmu->super_surface, element_index);
|
||||
|
||||
fbq_msg_queue_ss_offset = nvgpu_safe_add_u32(fbq_msg_queue_ss_offset,
|
||||
(u32)sizeof(struct nv_falcon_fbq_msgq_hdr));
|
||||
nvgpu_mem_rd_n(g, nvgpu_pmu_super_surface_mem(g,
|
||||
pmu, pmu->super_surface), fbq_msg_queue_ss_offset,
|
||||
buffer, size);
|
||||
}
|
||||
|
||||
static int pmu_process_init_msg_fb(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_msg *msg)
|
||||
{
|
||||
u32 tail = 0U;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET);
|
||||
|
||||
pmu_read_init_msg_fb(g, pmu, tail, PMU_MSG_HDR_SIZE,
|
||||
(void *)&msg->hdr);
|
||||
|
||||
if (msg->hdr.unit_id != PMU_UNIT_INIT_DGPU &&
|
||||
msg->hdr.unit_id != PMU_UNIT_CMDMGMT) {
|
||||
nvgpu_err(g, "FB MSG Q: expecting init msg");
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
pmu_read_init_msg_fb(g, pmu, tail, msg->hdr.size,
|
||||
(void *)&msg->hdr);
|
||||
if (msg->event_rpc.cmdmgmt_init.hdr.function !=
|
||||
PMU_INIT_MSG_TYPE_PMU_INIT) {
|
||||
nvgpu_err(g, "FB MSG Q: expecting pmu init msg");
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Queue is not yet constructed, so inline next element code here.*/
|
||||
tail++;
|
||||
if (tail >= NV_PMU_FBQ_MSG_NUM_ELEMENTS) {
|
||||
tail = 0U;
|
||||
}
|
||||
|
||||
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pmu_process_init_msg_dmem(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_msg *msg)
|
||||
{
|
||||
u32 tail = 0U;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET);
|
||||
|
||||
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, tail,
|
||||
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU falcon DMEM copy failed");
|
||||
goto exit;
|
||||
}
|
||||
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
|
||||
nvgpu_err(g, "expecting init msg");
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, tail + PMU_MSG_HDR_SIZE,
|
||||
(u8 *)&msg->msg, (u32)msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "PMU falcon DMEM copy failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
|
||||
nvgpu_err(g, "expecting pmu init msg");
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
tail += NVGPU_ALIGN(U32(msg->hdr.size), PMU_DMEM_ALIGNMENT);
|
||||
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pmu_gid_info_dmem_read(struct nvgpu_pmu *pmu,
|
||||
union pmu_init_msg_pmu *init)
|
||||
{
|
||||
struct pmu_fw_ver_ops *fw_ops = &pmu->fw->ops;
|
||||
struct pmu_sha1_gid *gid_info = &pmu->gid_info;
|
||||
struct pmu_sha1_gid_data gid_data;
|
||||
int err = 0;
|
||||
|
||||
if (!gid_info->valid) {
|
||||
err = nvgpu_falcon_copy_from_dmem(pmu->flcn,
|
||||
fw_ops->get_init_msg_sw_mngd_area_off(init),
|
||||
(u8 *)&gid_data,
|
||||
(u32)sizeof(struct pmu_sha1_gid_data), 0);
|
||||
if (err != 0) {
|
||||
nvgpu_err(pmu->g, "PMU falcon DMEM copy failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
gid_info->valid =
|
||||
(gid_data.signature == PMU_SHA1_GID_SIGNATURE);
|
||||
|
||||
if (gid_info->valid) {
|
||||
if (sizeof(gid_info->gid) !=
|
||||
sizeof(gid_data.gid)) {
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
nvgpu_memcpy((u8 *)gid_info->gid, (u8 *)gid_data.gid,
|
||||
sizeof(gid_info->gid));
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pmu_process_init_msg(struct nvgpu_pmu *pmu,
|
||||
struct pmu_msg *msg)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops;
|
||||
union pmu_init_msg_pmu *init;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
nvgpu_pmu_dbg(g, "init received\n");
|
||||
|
||||
(void)memset((void *)msg, 0x0, sizeof(struct pmu_msg));
|
||||
|
||||
init = fw_ops->get_init_msg_ptr(&(msg->msg.init));
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_RTOS_FBQ)) {
|
||||
err = pmu_process_init_msg_fb(g, pmu, msg);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
} else {
|
||||
err = pmu_process_init_msg_dmem(g, pmu, msg);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = pmu_gid_info_dmem_read(pmu, init);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
err = nvgpu_pmu_queues_init(g, init, &pmu->queues,
|
||||
nvgpu_pmu_super_surface_mem(g, pmu,
|
||||
pmu->super_surface));
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_pmu_allocator_dmem_init(g, pmu, &pmu->dmem, init);
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE)) {
|
||||
err = nvgpu_pmu_ss_create_ssmd_lookup_table(g,
|
||||
pmu, pmu->super_surface);
|
||||
if (err != 0) {
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
nvgpu_pmu_set_fw_ready(g, pmu, true);
|
||||
|
||||
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_INIT_RECEIVED, true);
|
||||
exit:
|
||||
nvgpu_pmu_dbg(g, "init received end, err %x", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
|
||||
{
|
||||
struct pmu_msg msg;
|
||||
int status;
|
||||
struct gk20a *g = pmu->g;
|
||||
int err;
|
||||
|
||||
if (nvgpu_can_busy(g) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(!nvgpu_pmu_get_fw_ready(g, pmu))) {
|
||||
err = pmu_process_init_msg(pmu, &msg);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nvgpu_pmu_lsfm_int_wpr_region(g, pmu, pmu->lsfm);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_PMU_PERFMON)) {
|
||||
err = nvgpu_pmu_perfmon_initialization(g, pmu,
|
||||
pmu->pmu_perfmon);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (pmu_read_message(pmu, PMU_MESSAGE_QUEUE, &msg, &status)) {
|
||||
|
||||
if (nvgpu_can_busy(g) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvgpu_pmu_dbg(g, "read msg hdr: ");
|
||||
nvgpu_pmu_dbg(g, "unit_id = 0x%08x, size = 0x%08x",
|
||||
msg.hdr.unit_id, msg.hdr.size);
|
||||
nvgpu_pmu_dbg(g, "ctrl_flags = 0x%08x, seq_id = 0x%08x",
|
||||
msg.hdr.ctrl_flags, msg.hdr.seq_id);
|
||||
|
||||
msg.hdr.ctrl_flags &= ~PMU_CMD_FLAGS_PMU_MASK;
|
||||
|
||||
if ((msg.hdr.ctrl_flags == PMU_CMD_FLAGS_EVENT) ||
|
||||
(msg.hdr.ctrl_flags == PMU_CMD_FLAGS_RPC_EVENT)) {
|
||||
err = pmu_handle_event(pmu, &msg);
|
||||
} else {
|
||||
err = pmu_response_handle(pmu, &msg);
|
||||
}
|
||||
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
struct nv_pmu_rpc_header rpc,
|
||||
struct rpc_handler_payload *rpc_payload)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
|
||||
switch (msg->hdr.unit_id) {
|
||||
case PMU_UNIT_ACR:
|
||||
nvgpu_pmu_lsfm_rpc_handler(g, rpc_payload);
|
||||
break;
|
||||
case PMU_UNIT_PERFMON_T18X:
|
||||
case PMU_UNIT_PERFMON:
|
||||
nvgpu_pmu_perfmon_rpc_handler(g, pmu, &rpc, rpc_payload);
|
||||
break;
|
||||
case PMU_UNIT_VOLT:
|
||||
if (pmu->volt->volt_rpc_handler != NULL) {
|
||||
pmu->volt->volt_rpc_handler(g, &rpc);
|
||||
}
|
||||
break;
|
||||
case PMU_UNIT_CLK:
|
||||
nvgpu_pmu_dbg(g, "reply PMU_UNIT_CLK");
|
||||
break;
|
||||
case PMU_UNIT_PERF:
|
||||
nvgpu_pmu_dbg(g, "reply PMU_UNIT_PERF");
|
||||
break;
|
||||
case PMU_UNIT_THERM:
|
||||
if (pmu->therm_rpc_handler != NULL) {
|
||||
pmu->therm_rpc_handler(g, pmu, &rpc);
|
||||
}
|
||||
break;
|
||||
case PMU_UNIT_PG_LOADING:
|
||||
case PMU_UNIT_PG:
|
||||
if (pmu->pg->rpc_handler != NULL) {
|
||||
pmu->pg->rpc_handler(g, pmu, &rpc);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g, " Invalid RPC response, stats 0x%x",
|
||||
rpc.flcn_status);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
void *param, u32 status)
|
||||
{
|
||||
struct nv_pmu_rpc_header rpc;
|
||||
struct rpc_handler_payload *rpc_payload =
|
||||
(struct rpc_handler_payload *)param;
|
||||
|
||||
if (nvgpu_can_busy(g) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header));
|
||||
nvgpu_memcpy((u8 *)&rpc, (u8 *)rpc_payload->rpc_buff,
|
||||
sizeof(struct nv_pmu_rpc_header));
|
||||
|
||||
if (rpc.flcn_status != 0U) {
|
||||
nvgpu_err(g,
|
||||
"failed RPC response, unit-id=0x%x, func=0x%x, status=0x%x",
|
||||
rpc.unit_id, rpc.function, rpc.flcn_status);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
pmu_rpc_handler(g, msg, rpc, rpc_payload);
|
||||
|
||||
exit:
|
||||
rpc_payload->complete = true;
|
||||
|
||||
/* free allocated memory */
|
||||
if (rpc_payload->is_mem_free_set) {
|
||||
nvgpu_kfree(g, rpc_payload);
|
||||
}
|
||||
}
|
||||
|
||||
void pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
|
||||
void *var, u8 val)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
|
||||
if (nvgpu_pmu_wait_fw_ack_status(g, pmu, timeout_ms, var, val) != 0) {
|
||||
nvgpu_err(g, "PMU wait timeout expired.");
|
||||
}
|
||||
}
|
||||
329
drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c
Normal file
329
drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c
Normal file
@@ -0,0 +1,329 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/engine_mem_queue.h>
|
||||
#include <nvgpu/engine_fb_queue.h>
|
||||
#include <nvgpu/engine_queue.h>
|
||||
#include <nvgpu/pmu/cmd.h>
|
||||
#include <nvgpu/pmu/queue.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pmu/super_surface.h>
|
||||
|
||||
/* FB queue init */
|
||||
static int pmu_fb_queue_init(struct gk20a *g, struct pmu_queues *queues,
|
||||
u32 id, union pmu_init_msg_pmu *init,
|
||||
struct nvgpu_mem *super_surface_buf)
|
||||
{
|
||||
struct nvgpu_pmu *pmu = g->pmu;
|
||||
struct nvgpu_engine_fb_queue_params params = {0};
|
||||
u32 oflag = 0;
|
||||
int err = 0;
|
||||
u32 tmp_id = id;
|
||||
|
||||
/* init queue parameters */
|
||||
if (PMU_IS_COMMAND_QUEUE(id)) {
|
||||
|
||||
/* currently PMU FBQ support SW command queue only */
|
||||
if (!PMU_IS_SW_COMMAND_QUEUE(id)) {
|
||||
queues->queue[id] = NULL;
|
||||
err = 0;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* set OFLAG_WRITE for command queue
|
||||
* i.e, push from nvgpu &
|
||||
* pop form falcon ucode
|
||||
*/
|
||||
oflag = OFLAG_WRITE;
|
||||
|
||||
params.super_surface_mem = super_surface_buf;
|
||||
params.fbq_offset =
|
||||
nvgpu_pmu_get_ss_cmd_fbq_offset(g, pmu,
|
||||
pmu->super_surface, id);
|
||||
params.size = NV_PMU_FBQ_CMD_NUM_ELEMENTS;
|
||||
params.fbq_element_size = NV_PMU_FBQ_CMD_ELEMENT_SIZE;
|
||||
} else if (PMU_IS_MESSAGE_QUEUE(id)) {
|
||||
/*
|
||||
* set OFLAG_READ for message queue
|
||||
* i.e, push from falcon ucode &
|
||||
* pop form nvgpu
|
||||
*/
|
||||
oflag = OFLAG_READ;
|
||||
|
||||
params.super_surface_mem = super_surface_buf;
|
||||
params.fbq_offset =
|
||||
nvgpu_pmu_get_ss_msg_fbq_offset(g, pmu,
|
||||
pmu->super_surface);
|
||||
params.size = NV_PMU_FBQ_MSG_NUM_ELEMENTS;
|
||||
params.fbq_element_size = NV_PMU_FBQ_MSG_ELEMENT_SIZE;
|
||||
} else {
|
||||
nvgpu_err(g, "invalid queue-id %d", id);
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
params.g = g;
|
||||
params.flcn_id = FALCON_ID_PMU;
|
||||
params.id = id;
|
||||
params.oflag = oflag;
|
||||
params.queue_head = g->ops.pmu.pmu_queue_head;
|
||||
params.queue_tail = g->ops.pmu.pmu_queue_tail;
|
||||
|
||||
if (tmp_id == PMU_COMMAND_QUEUE_HPQ) {
|
||||
tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3;
|
||||
} else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) {
|
||||
tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3;
|
||||
} else {
|
||||
tmp_id = PMU_QUEUE_MSG_IDX_FOR_V5;
|
||||
}
|
||||
|
||||
params.index = init->v5.queue_phy_id[tmp_id];
|
||||
|
||||
err = nvgpu_engine_fb_queue_init(&queues->fb_queue[id], params);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "queue-%d init failed", id);
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* DMEM queue init */
|
||||
static int pmu_dmem_queue_init(struct gk20a *g, struct pmu_queues *queues,
|
||||
u32 id, union pmu_init_msg_pmu *init)
|
||||
{
|
||||
struct nvgpu_engine_mem_queue_params params = {0};
|
||||
u32 oflag = 0;
|
||||
int err = 0;
|
||||
|
||||
if (PMU_IS_COMMAND_QUEUE(id)) {
|
||||
/*
|
||||
* set OFLAG_WRITE for command queue
|
||||
* i.e, push from nvgpu &
|
||||
* pop form falcon ucode
|
||||
*/
|
||||
oflag = OFLAG_WRITE;
|
||||
} else if (PMU_IS_MESSAGE_QUEUE(id)) {
|
||||
/*
|
||||
* set OFLAG_READ for message queue
|
||||
* i.e, push from falcon ucode &
|
||||
* pop form nvgpu
|
||||
*/
|
||||
oflag = OFLAG_READ;
|
||||
} else {
|
||||
nvgpu_err(g, "invalid queue-id %d", id);
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* init queue parameters */
|
||||
params.g = g;
|
||||
params.flcn_id = FALCON_ID_PMU;
|
||||
params.id = id;
|
||||
params.oflag = oflag;
|
||||
params.queue_head = g->ops.pmu.pmu_queue_head;
|
||||
params.queue_tail = g->ops.pmu.pmu_queue_tail;
|
||||
params.queue_type = QUEUE_TYPE_DMEM;
|
||||
g->pmu->fw->ops.get_init_msg_queue_params(id, init,
|
||||
¶ms.index,
|
||||
¶ms.offset,
|
||||
¶ms.size);
|
||||
err = nvgpu_engine_mem_queue_init(&queues->queue[id], params);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "queue-%d init failed", id);
|
||||
}
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void pmu_queue_free(struct gk20a *g, struct pmu_queues *queues, u32 id)
|
||||
{
|
||||
if (!PMU_IS_COMMAND_QUEUE(id) && !PMU_IS_MESSAGE_QUEUE(id)) {
|
||||
nvgpu_err(g, "invalid queue-id %d", id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (queues->queue_type == QUEUE_TYPE_FB) {
|
||||
if (queues->fb_queue[id] == NULL) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_engine_fb_queue_free(&queues->fb_queue[id]);
|
||||
} else {
|
||||
if (queues->queue[id] == NULL) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_engine_mem_queue_free(&queues->queue[id]);
|
||||
}
|
||||
|
||||
exit:
|
||||
return;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_queues_init(struct gk20a *g,
|
||||
union pmu_init_msg_pmu *init,
|
||||
struct pmu_queues *queues,
|
||||
struct nvgpu_mem *super_surface_buf)
|
||||
{
|
||||
u32 i = 0U;
|
||||
u32 j = 0U;
|
||||
int err;
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_RTOS_FBQ)) {
|
||||
queues->queue_type = QUEUE_TYPE_FB;
|
||||
for (i = 0; i < PMU_QUEUE_COUNT; i++) {
|
||||
err = pmu_fb_queue_init(g, queues, i, init,
|
||||
super_surface_buf);
|
||||
if (err != 0) {
|
||||
for (j = 0; j < i; j++) {
|
||||
pmu_queue_free(g, queues, j);
|
||||
}
|
||||
nvgpu_err(g, "PMU queue init failed");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
queues->queue_type = QUEUE_TYPE_DMEM;
|
||||
for (i = 0; i < PMU_QUEUE_COUNT; i++) {
|
||||
err = pmu_dmem_queue_init(g, queues, i, init);
|
||||
if (err != 0) {
|
||||
for (j = 0; j < i; j++) {
|
||||
pmu_queue_free(g, queues, j);
|
||||
}
|
||||
nvgpu_err(g, "PMU queue init failed");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_queues_free(struct gk20a *g, struct pmu_queues *queues)
|
||||
{
|
||||
u32 i = 0U;
|
||||
|
||||
for (i = 0U; i < PMU_QUEUE_COUNT; i++) {
|
||||
pmu_queue_free(g, queues, i);
|
||||
}
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_queue_get_size(struct pmu_queues *queues, u32 queue_id)
|
||||
{
|
||||
struct nvgpu_engine_fb_queue *fb_queue = NULL;
|
||||
struct nvgpu_engine_mem_queue *queue = NULL;
|
||||
u32 queue_size;
|
||||
|
||||
if (queues->queue_type == QUEUE_TYPE_FB) {
|
||||
fb_queue = queues->fb_queue[queue_id];
|
||||
queue_size = nvgpu_engine_fb_queue_get_element_size(fb_queue);
|
||||
} else {
|
||||
queue = queues->queue[queue_id];
|
||||
queue_size = nvgpu_engine_mem_queue_get_size(queue);
|
||||
}
|
||||
|
||||
return queue_size;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_queue_push(struct pmu_queues *queues, struct nvgpu_falcon *flcn,
|
||||
u32 queue_id, struct pmu_cmd *cmd)
|
||||
{
|
||||
struct nvgpu_engine_fb_queue *fb_queue = NULL;
|
||||
struct nvgpu_engine_mem_queue *queue = NULL;
|
||||
int err;
|
||||
|
||||
if (queues->queue_type == QUEUE_TYPE_FB) {
|
||||
fb_queue = queues->fb_queue[queue_id];
|
||||
err = nvgpu_engine_fb_queue_push(fb_queue,
|
||||
cmd, cmd->hdr.size);
|
||||
} else {
|
||||
queue = queues->queue[queue_id];
|
||||
err = nvgpu_engine_mem_queue_push(flcn, queue,
|
||||
cmd, cmd->hdr.size);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_queue_pop(struct pmu_queues *queues, struct nvgpu_falcon *flcn,
|
||||
u32 queue_id, void *data, u32 bytes_to_read,
|
||||
u32 *bytes_read)
|
||||
{
|
||||
struct nvgpu_engine_fb_queue *fb_queue = NULL;
|
||||
struct nvgpu_engine_mem_queue *queue = NULL;
|
||||
int err;
|
||||
|
||||
if (queues->queue_type == QUEUE_TYPE_FB) {
|
||||
fb_queue = queues->fb_queue[queue_id];
|
||||
err = nvgpu_engine_fb_queue_pop(fb_queue, data,
|
||||
bytes_to_read, bytes_read);
|
||||
} else {
|
||||
queue = queues->queue[queue_id];
|
||||
err = nvgpu_engine_mem_queue_pop(flcn, queue, data,
|
||||
bytes_to_read, bytes_read);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool nvgpu_pmu_queue_is_empty(struct pmu_queues *queues, u32 queue_id)
|
||||
{
|
||||
struct nvgpu_engine_mem_queue *queue = NULL;
|
||||
struct nvgpu_engine_fb_queue *fb_queue = NULL;
|
||||
bool empty;
|
||||
|
||||
if (queues->queue_type == QUEUE_TYPE_FB) {
|
||||
fb_queue = queues->fb_queue[queue_id];
|
||||
empty = nvgpu_engine_fb_queue_is_empty(fb_queue);
|
||||
} else {
|
||||
queue = queues->queue[queue_id];
|
||||
empty = nvgpu_engine_mem_queue_is_empty(queue);
|
||||
}
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
||||
bool nvgpu_pmu_fb_queue_enabled(struct pmu_queues *queues)
|
||||
{
|
||||
return queues->queue_type == QUEUE_TYPE_FB;
|
||||
}
|
||||
|
||||
struct nvgpu_engine_fb_queue *nvgpu_pmu_fb_queue(struct pmu_queues *queues,
|
||||
u32 queue_id)
|
||||
{
|
||||
return queues->fb_queue[queue_id];
|
||||
}
|
||||
|
||||
int nvgpu_pmu_queue_rewind(struct pmu_queues *queues, u32 queue_id,
|
||||
struct nvgpu_falcon *flcn)
|
||||
{
|
||||
struct nvgpu_engine_mem_queue *queue = queues->queue[queue_id];
|
||||
|
||||
if (queues->queue_type == QUEUE_TYPE_FB) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return nvgpu_engine_mem_queue_rewind(flcn, queue);
|
||||
}
|
||||
263
drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c
Normal file
263
drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c
Normal file
@@ -0,0 +1,263 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/pmu/seq.h>
|
||||
#include <nvgpu/bitops.h>
|
||||
#include <nvgpu/errno.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
|
||||
struct nvgpu_pmu;
|
||||
|
||||
void nvgpu_pmu_sequences_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_sequences *sequences)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
(void) memset(sequences->seq, 0,
|
||||
sizeof(struct pmu_sequence) * PMU_MAX_NUM_SEQUENCES);
|
||||
(void) memset(sequences->pmu_seq_tbl, 0,
|
||||
sizeof(sequences->pmu_seq_tbl));
|
||||
|
||||
for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) {
|
||||
sequences->seq[i].id = (u8)i;
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_sequences **sequences_p)
|
||||
{
|
||||
int err = 0;
|
||||
struct pmu_sequences *sequences;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (*sequences_p != NULL) {
|
||||
/* skip alloc/reinit for unrailgate sequence */
|
||||
nvgpu_pmu_dbg(g, "skip sequences init for unrailgate sequence");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
sequences = (struct pmu_sequences *)
|
||||
nvgpu_kzalloc(g, sizeof(struct pmu_sequences));
|
||||
if (sequences == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
sequences->seq = (struct pmu_sequence *)
|
||||
nvgpu_kzalloc(g, PMU_MAX_NUM_SEQUENCES *
|
||||
sizeof(struct pmu_sequence));
|
||||
if (sequences->seq == NULL) {
|
||||
nvgpu_kfree(g, sequences);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nvgpu_mutex_init(&sequences->pmu_seq_lock);
|
||||
|
||||
*sequences_p = sequences;
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_sequences_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_sequences *sequences)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (sequences == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_mutex_destroy(&sequences->pmu_seq_lock);
|
||||
if (sequences->seq != NULL) {
|
||||
nvgpu_kfree(g, sequences->seq);
|
||||
}
|
||||
nvgpu_kfree(g, sequences);
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_payload_free(struct gk20a *g, struct pmu_sequence *seq)
|
||||
{
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
seq->out_payload_fb_queue = false;
|
||||
seq->in_payload_fb_queue = false;
|
||||
seq->fbq_heap_offset = 0;
|
||||
seq->in_mem = NULL;
|
||||
seq->out_mem = NULL;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_seq_acquire(struct gk20a *g,
|
||||
struct pmu_sequences *sequences,
|
||||
struct pmu_sequence **pseq,
|
||||
pmu_callback callback, void *cb_params)
|
||||
{
|
||||
struct pmu_sequence *seq;
|
||||
unsigned long index;
|
||||
|
||||
nvgpu_mutex_acquire(&sequences->pmu_seq_lock);
|
||||
index = find_first_zero_bit(sequences->pmu_seq_tbl,
|
||||
sizeof(sequences->pmu_seq_tbl));
|
||||
if (index >= sizeof(sequences->pmu_seq_tbl)) {
|
||||
nvgpu_err(g, "no free sequence available");
|
||||
nvgpu_mutex_release(&sequences->pmu_seq_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
nvgpu_assert(index <= U32_MAX);
|
||||
nvgpu_set_bit((u32)index, sequences->pmu_seq_tbl);
|
||||
nvgpu_mutex_release(&sequences->pmu_seq_lock);
|
||||
|
||||
seq = &sequences->seq[index];
|
||||
seq->state = PMU_SEQ_STATE_PENDING;
|
||||
seq->callback = callback;
|
||||
seq->cb_params = cb_params;
|
||||
seq->out_payload = NULL;
|
||||
seq->in_payload_fb_queue = false;
|
||||
seq->out_payload_fb_queue = false;
|
||||
|
||||
*pseq = seq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_release(struct gk20a *g,
|
||||
struct pmu_sequences *sequences,
|
||||
struct pmu_sequence *seq)
|
||||
{
|
||||
seq->state = PMU_SEQ_STATE_FREE;
|
||||
seq->callback = NULL;
|
||||
seq->cb_params = NULL;
|
||||
seq->out_payload = NULL;
|
||||
|
||||
nvgpu_mutex_acquire(&sequences->pmu_seq_lock);
|
||||
nvgpu_clear_bit(seq->id, sequences->pmu_seq_tbl);
|
||||
nvgpu_mutex_release(&sequences->pmu_seq_lock);
|
||||
}
|
||||
|
||||
u16 nvgpu_pmu_seq_get_fbq_out_offset(struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->fbq_out_offset_in_queue_element;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_fbq_out_offset(struct pmu_sequence *seq, u16 size)
|
||||
{
|
||||
seq->fbq_out_offset_in_queue_element = size;
|
||||
}
|
||||
|
||||
u16 nvgpu_pmu_seq_get_buffer_size(struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->buffer_size_used;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_buffer_size(struct pmu_sequence *seq, u16 size)
|
||||
{
|
||||
seq->buffer_size_used = size;
|
||||
}
|
||||
|
||||
struct nvgpu_engine_fb_queue *nvgpu_pmu_seq_get_cmd_queue(
|
||||
struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->cmd_queue;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_cmd_queue(struct pmu_sequence *seq,
|
||||
struct nvgpu_engine_fb_queue *fb_queue)
|
||||
{
|
||||
seq->cmd_queue = fb_queue;
|
||||
}
|
||||
|
||||
u16 nvgpu_pmu_seq_get_fbq_heap_offset(struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->fbq_heap_offset;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_fbq_heap_offset(struct pmu_sequence *seq, u16 size)
|
||||
{
|
||||
seq->fbq_heap_offset = size;
|
||||
}
|
||||
|
||||
u8 *nvgpu_pmu_seq_get_out_payload(struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->out_payload;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_out_payload(struct pmu_sequence *seq, u8 *payload)
|
||||
{
|
||||
seq->out_payload = payload;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_in_payload_fb_queue(struct pmu_sequence *seq, bool state)
|
||||
{
|
||||
seq->in_payload_fb_queue = state;
|
||||
}
|
||||
|
||||
bool nvgpu_pmu_seq_get_out_payload_fb_queue(struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->out_payload_fb_queue;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_out_payload_fb_queue(struct pmu_sequence *seq,
|
||||
bool state)
|
||||
{
|
||||
seq->out_payload_fb_queue = state;
|
||||
}
|
||||
|
||||
u32 nvgpu_pmu_seq_get_fbq_element_index(struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->fbq_element_index;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_fbq_element_index(struct pmu_sequence *seq, u32 index)
|
||||
{
|
||||
seq->fbq_element_index = index;
|
||||
}
|
||||
|
||||
u8 nvgpu_pmu_seq_get_id(struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->id;
|
||||
}
|
||||
|
||||
enum pmu_seq_state nvgpu_pmu_seq_get_state(struct pmu_sequence *seq)
|
||||
{
|
||||
return seq->state;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_set_state(struct pmu_sequence *seq, enum pmu_seq_state state)
|
||||
{
|
||||
seq->state = state;
|
||||
}
|
||||
|
||||
struct pmu_sequence *nvgpu_pmu_sequences_get_seq(struct pmu_sequences *seqs,
|
||||
u8 id)
|
||||
{
|
||||
return &seqs->seq[id];
|
||||
}
|
||||
|
||||
void nvgpu_pmu_seq_callback(struct gk20a *g, struct pmu_sequence *seq,
|
||||
struct pmu_msg *msg, int err)
|
||||
{
|
||||
if (seq->callback != NULL) {
|
||||
seq->callback(g, msg, seq->cb_params, err);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user