gpu: nvgpu: PMU init message read from FB support

-Added NVGPU_SUPPORT_PMU_RTOS_FBQ feature to enable
 FBQ support.
-Add support to read PMU RTOS init message from
 FBQ message queue to process init message &
 construct FBQ for further communication
 with PMU RTOS ucode.
-Added functions to init FB command/message queues
 as per init message inputs from PMU RTOS ucode.

JIRA NVGPU-1578
Bug 2487534

Change-Id: Ib6c2b26af3927339bd4fb25396350b3f4d222737
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2004020
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-01-25 16:22:48 +05:30
committed by mobile promotions
parent 4e59575af2
commit b0b96732f7
6 changed files with 225 additions and 31 deletions

View File

@@ -365,6 +365,101 @@ exit:
return err;
}
static void pmu_read_init_msg_fb(struct gk20a *g, struct nvgpu_pmu *pmu,
u32 element_index, u32 size, void *buffer)
{
u32 fbq_msg_queue_ss_offset = 0U;
fbq_msg_queue_ss_offset = (u32)offsetof(
struct nv_pmu_super_surface,
fbq.msg_queue.element[element_index]);
nvgpu_mem_rd_n(g, &pmu->super_surface_buf, fbq_msg_queue_ss_offset,
buffer, size);
}
static int pmu_process_init_msg_fb(struct gk20a *g, struct nvgpu_pmu *pmu,
struct pmu_msg *msg)
{
u32 tail = 0U;
int err = 0;
nvgpu_log_fn(g, " ");
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET);
pmu_read_init_msg_fb(g, pmu, tail, PMU_MSG_HDR_SIZE,
(void *)&msg->hdr);
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
nvgpu_err(g, "FB MSG Q: expecting init msg");
err = -EINVAL;
goto exit;
}
pmu_read_init_msg_fb(g, pmu, tail, msg->hdr.size,
(void *)&msg->hdr);
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
nvgpu_err(g, "FB MSG Q: expecting pmu init msg");
err = -EINVAL;
goto exit;
}
/* Queue is not yet constructed, so inline next element code here.*/
tail++;
if (tail >= NV_PMU_FBQ_MSG_NUM_ELEMENTS) {
tail = 0U;
}
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET);
exit:
return err;
}
static int pmu_process_init_msg_dmem(struct gk20a *g, struct nvgpu_pmu *pmu,
struct pmu_msg *msg)
{
u32 tail = 0U;
int err = 0;
nvgpu_log_fn(g, " ");
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET);
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, tail,
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed");
goto exit;
}
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
nvgpu_err(g, "expecting init msg");
err = -EINVAL;
goto exit;
}
err = nvgpu_falcon_copy_from_dmem(pmu->flcn, tail + PMU_MSG_HDR_SIZE,
(u8 *)&msg->msg, (u32)msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed");
goto exit;
}
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
nvgpu_err(g, "expecting pmu init msg");
err = -EINVAL;
goto exit;
}
tail += ALIGN(msg->hdr.size, PMU_DMEM_ALIGNMENT);
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET);
exit:
return err;
}
int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
struct pmu_msg *msg)
{
@@ -372,33 +467,25 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
struct pmu_v *pv = &g->ops.pmu_ver;
union pmu_init_msg_pmu *init;
struct pmu_sha1_gid_data gid_data;
u32 i, j, tail = 0;
int err;
int err = 0;
u32 i = 0U;
u32 j = 0U;
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "init received\n");
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_GET);
nvgpu_falcon_copy_from_dmem(pmu->flcn, tail,
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
nvgpu_err(g, "expecting init msg");
return -EINVAL;
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_RTOS_FBQ)) {
err = pmu_process_init_msg_fb(g, pmu, msg);
} else {
err = pmu_process_init_msg_dmem(g, pmu, msg);
}
nvgpu_falcon_copy_from_dmem(pmu->flcn, tail + PMU_MSG_HDR_SIZE,
(u8 *)&msg->msg, (u32)msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
nvgpu_err(g, "expecting init msg");
return -EINVAL;
/* error check for above init message process*/
if (err != 0) {
goto exit;
}
tail += ALIGN(msg->hdr.size, PMU_DMEM_ALIGNMENT);
g->ops.pmu.pmu_msgq_tail(pmu, &tail, QUEUE_SET);
init = pv->get_pmu_msg_pmu_init_msg_ptr(&(msg->msg.init));
if (!pmu->gid_info.valid) {
u32 *gid_hdr_data = (u32 *)(gid_data.signature);
@@ -422,14 +509,30 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
}
}
for (i = 0; i < PMU_QUEUE_COUNT; i++) {
err = nvgpu_pmu_queue_init(pmu, i, init);
if (err != 0) {
for (j = 0; j < i; j++) {
nvgpu_pmu_queue_free(pmu, j);
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_RTOS_FBQ)) {
pmu->queue_type = QUEUE_TYPE_FB;
for (i = 0; i < PMU_QUEUE_COUNT; i++) {
err = nvgpu_pmu_queue_init_fb(pmu, i, init);
if (err != 0) {
for (j = 0; j < i; j++) {
nvgpu_pmu_queue_free(pmu, j);
}
nvgpu_err(g, "PMU queue init failed");
return err;
}
}
} else {
pmu->queue_type = QUEUE_TYPE_DMEM;
for (i = 0; i < PMU_QUEUE_COUNT; i++) {
err = nvgpu_pmu_queue_init(pmu, i, init);
if (err != 0) {
for (j = 0; j < i; j++) {
nvgpu_pmu_queue_free(pmu, j);
}
nvgpu_err(g, "PMU queue init failed");
return err;
}
nvgpu_err(g, "PMU queue init failed");
return err;
}
}
@@ -450,9 +553,9 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
nvgpu_pmu_state_change(g, PMU_STATE_INIT_RECEIVED, true);
nvgpu_pmu_dbg(g, "init received end\n");
return 0;
exit:
nvgpu_pmu_dbg(g, "init received end, err %x", err);
return err;
}
static void pmu_setup_hw_enable_elpg(struct gk20a *g)

View File

@@ -26,6 +26,7 @@
#include <nvgpu/timers.h>
#include <nvgpu/bug.h>
#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
#include <nvgpu/pmuif/gpmu_super_surf_if.h>
#include <nvgpu/falcon.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
@@ -103,7 +104,89 @@ int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token)
return g->ops.pmu.pmu_mutex_release(pmu, id, token);
}
/* PMU falcon queue init */
/* FB queue init */
int nvgpu_pmu_queue_init_fb(struct nvgpu_pmu *pmu,
u32 id, union pmu_init_msg_pmu *init)
{
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_falcon_queue_params params = {0};
u32 oflag = 0;
int err = 0;
u32 tmp_id = id;
/* init queue parameters */
if (PMU_IS_COMMAND_QUEUE(id)) {
/* currently PMU FBQ support SW command queue only */
if (!PMU_IS_SW_COMMAND_QUEUE(id)) {
pmu->queue[id] = NULL;
err = 0;
goto exit;
}
/*
* set OFLAG_WRITE for command queue
* i.e, push from nvgpu &
* pop form falcon ucode
*/
oflag = OFLAG_WRITE;
params.super_surface_mem =
&pmu->super_surface_buf;
params.fbq_offset = (u32)offsetof(
struct nv_pmu_super_surface,
fbq.cmd_queues.queue[id]);
params.size = NV_PMU_FBQ_CMD_NUM_ELEMENTS;
params.fbq_element_size = NV_PMU_FBQ_CMD_ELEMENT_SIZE;
} else if (PMU_IS_MESSAGE_QUEUE(id)) {
/*
* set OFLAG_READ for message queue
* i.e, push from falcon ucode &
* pop form nvgpu
*/
oflag = OFLAG_READ;
params.super_surface_mem =
&pmu->super_surface_buf;
params.fbq_offset = (u32)offsetof(
struct nv_pmu_super_surface,
fbq.msg_queue);
params.size = NV_PMU_FBQ_MSG_NUM_ELEMENTS;
params.fbq_element_size = NV_PMU_FBQ_MSG_ELEMENT_SIZE;
} else {
nvgpu_err(g, "invalid queue-id %d", id);
err = -EINVAL;
goto exit;
}
params.id = id;
params.oflag = oflag;
params.queue_type = QUEUE_TYPE_FB;
if (tmp_id == PMU_COMMAND_QUEUE_HPQ) {
tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3;
} else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) {
tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3;
} else if (tmp_id == PMU_MESSAGE_QUEUE) {
tmp_id = PMU_QUEUE_MSG_IDX_FOR_V5;
} else {
/* return if queue id not supported*/
goto exit;
}
params.index = init->v5.queue_index[tmp_id];
params.offset = init->v5.queue_offset;
err = nvgpu_falcon_queue_init(pmu->flcn, &pmu->queue[id], params);
if (err != 0) {
nvgpu_err(g, "queue-%d init failed", id);
}
exit:
return err;
}
/* DMEM queue init */
int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu,
u32 id, union pmu_init_msg_pmu *init)
{

View File

@@ -174,10 +174,13 @@ struct gk20a;
/* SEC2 RTOS support*/
#define NVGPU_SUPPORT_SEC2_RTOS 69
/* PMU RTOS FBQ support*/
#define NVGPU_SUPPORT_PMU_RTOS_FBQ 70
/*
* Must be greater than the largest bit offset in the above list.
*/
#define NVGPU_MAX_ENABLED_BITS 70U
#define NVGPU_MAX_ENABLED_BITS 71U
/**
* nvgpu_is_enabled - Check if the passed flag is enabled.

View File

@@ -325,6 +325,7 @@ struct nvgpu_pmu {
struct pmu_sha1_gid gid_info;
struct nvgpu_falcon_queue *queue[PMU_QUEUE_COUNT];
u32 queue_type;
struct pmu_sequence *seq;
unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE];
@@ -431,6 +432,9 @@ int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, u32 id,
union pmu_init_msg_pmu *init);
void nvgpu_pmu_queue_free(struct nvgpu_pmu *pmu, u32 id);
int nvgpu_pmu_queue_init_fb(struct nvgpu_pmu *pmu,
u32 id, union pmu_init_msg_pmu *init);
/* send a cmd to pmu */
int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
struct pmu_msg *msg, struct pmu_payload *payload,

View File

@@ -88,7 +88,7 @@ struct pmu_sha1_gid_data {
/* PMU INIT MSG */
enum {
PMU_INIT_MSG_TYPE_PMU_INIT = 0,
PMU_INIT_MSG_TYPE_PMU_INIT = 0U,
};
struct pmu_init_msg_pmu_v1 {

View File

@@ -1215,6 +1215,7 @@ int tu104_init_hal(struct gk20a *g)
nvgpu_set_enabled(g, NVGPU_SUPPORT_MULTIPLE_WPR, true);
nvgpu_set_enabled(g, NVGPU_FECS_TRACE_VA, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_SEC2_RTOS, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_PMU_RTOS_FBQ, false);
/* for now */
gops->clk.support_clk_freq_controller = false;