gpu: nvgpu: dGPU PMU init message changes

-dGPU PMU init message interface updated to support RPC style init
 PMU init message changed to RPC event & made needed changes to
 handle RPC event during init stage
-Added new RPC header PMU_RM_RPC_HEADER, header from PMU to NvGPU
 which will be part of RPC events received from PMU.
-GID info moved to super-surface for dGPU, so removed GID info
 fetch from DMEM for dGPU & kept support for iGPU only.
-PMU_UNIT_INIT value for dGPU init changed

JIRA NVGPU-3723

Change-Id: I016bd1150494007a56905db23b4769e693ecd5da
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2153141
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Mahantesh Kumbar
2019-07-26 22:03:33 +05:30
committed by mobile promotions
parent 0fa454ac31
commit 6be751ed11
7 changed files with 117 additions and 93 deletions

View File

@@ -417,8 +417,8 @@ static void *pmu_get_init_msg_ptr_v4(struct pmu_init_msg *init)
static u16 pmu_get_init_msg_sw_mngd_area_off_v5(
union pmu_init_msg_pmu *init_msg)
{
struct pmu_init_msg_pmu_v5 *init =
(struct pmu_init_msg_pmu_v5 *)(&init_msg->v5);
struct pmu_nvgpu_rpc_struct_cmdmgmt_init *init =
(struct pmu_nvgpu_rpc_struct_cmdmgmt_init *)(&init_msg->v5);
return init->sw_managed_area_offset;
}
@@ -435,8 +435,8 @@ static u16 pmu_get_init_msg_sw_mngd_area_off_v4(
static u16 pmu_get_init_msg_sw_mngd_area_size_v5(
union pmu_init_msg_pmu *init_msg)
{
struct pmu_init_msg_pmu_v5 *init =
(struct pmu_init_msg_pmu_v5 *)(&init_msg->v5);
struct pmu_nvgpu_rpc_struct_cmdmgmt_init *init =
(struct pmu_nvgpu_rpc_struct_cmdmgmt_init *)(&init_msg->v5);
return init->sw_managed_area_size;
}
@@ -917,33 +917,6 @@ static void pmu_get_init_msg_queue_params_v4(
*offset = init->queue_offset + current_ptr;
}
static void pmu_get_init_msg_queue_params_v5(
u32 id, void *init_msg, u32 *index, u32 *offset, u32 *size)
{
struct pmu_init_msg_pmu_v5 *init = init_msg;
u32 current_ptr = 0;
u32 i;
if (id == PMU_COMMAND_QUEUE_HPQ) {
id = PMU_QUEUE_HPQ_IDX_FOR_V3;
} else if (id == PMU_COMMAND_QUEUE_LPQ) {
id = PMU_QUEUE_LPQ_IDX_FOR_V3;
} else if (id == PMU_MESSAGE_QUEUE) {
id = PMU_QUEUE_MSG_IDX_FOR_V5;
} else {
return;
}
*index = init->queue_index[id];
*size = init->queue_size[id];
if (id != 0U) {
for (i = 0 ; i < id; i++) {
current_ptr += init->queue_size[i];
}
}
*offset = init->queue_offset + current_ptr;
}
static void *pmu_get_sequence_in_alloc_ptr_v3(struct pmu_sequence *seq)
{
return (void *)(&seq->in_v3);
@@ -1302,8 +1275,6 @@ int nvgpu_pmu_init_fw_ver_ops(struct gk20a *g,
pmu_allocation_get_fb_size_v3;
if (app_version == APP_VERSION_GV10X ||
app_version == APP_VERSION_TU10X) {
fw_ops->get_init_msg_queue_params =
pmu_get_init_msg_queue_params_v5;
fw_ops->get_init_msg_ptr =
pmu_get_init_msg_ptr_v5;
fw_ops->get_init_msg_sw_mngd_area_off =

View File

@@ -350,7 +350,7 @@ static int pmu_process_init_msg_fb(struct gk20a *g, struct nvgpu_pmu *pmu,
pmu_read_init_msg_fb(g, pmu, tail, PMU_MSG_HDR_SIZE,
(void *)&msg->hdr);
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
if (msg->hdr.unit_id != PMU_UNIT_INIT_DGPU) {
nvgpu_err(g, "FB MSG Q: expecting init msg");
err = -EINVAL;
goto exit;
@@ -358,8 +358,8 @@ static int pmu_process_init_msg_fb(struct gk20a *g, struct nvgpu_pmu *pmu,
pmu_read_init_msg_fb(g, pmu, tail, msg->hdr.size,
(void *)&msg->hdr);
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
if (msg->event_rpc.cmdmgmt_init.hdr.function !=
PMU_INIT_MSG_TYPE_PMU_INIT) {
nvgpu_err(g, "FB MSG Q: expecting pmu init msg");
err = -EINVAL;
goto exit;
@@ -419,54 +419,72 @@ exit:
return err;
}
static int pmu_gid_info_dmem_read(struct nvgpu_pmu *pmu,
union pmu_init_msg_pmu *init)
{
struct pmu_fw_ver_ops *fw_ops = &pmu->fw->ops;
struct pmu_sha1_gid *gid_info = &pmu->gid_info;
struct pmu_sha1_gid_data gid_data;
int err = 0;
if (!gid_info->valid) {
err = nvgpu_falcon_copy_from_dmem(pmu->flcn,
fw_ops->get_init_msg_sw_mngd_area_off(init),
(u8 *)&gid_data,
(u32)sizeof(struct pmu_sha1_gid_data), 0);
if (err != 0) {
nvgpu_err(pmu->g, "PMU falcon DMEM copy failed");
goto exit;
}
gid_info->valid =
(gid_data.signature == PMU_SHA1_GID_SIGNATURE);
if (gid_info->valid) {
if (sizeof(gid_info->gid) !=
sizeof(gid_data.gid)) {
WARN_ON(1);
}
nvgpu_memcpy((u8 *)gid_info->gid, (u8 *)gid_data.gid,
sizeof(gid_info->gid));
}
}
exit:
return err;
}
static int pmu_process_init_msg(struct nvgpu_pmu *pmu,
struct pmu_msg *msg)
{
struct gk20a *g = pmu->g;
struct pmu_fw_ver_ops *fw_ops = &g->pmu->fw->ops;
union pmu_init_msg_pmu *init;
struct pmu_sha1_gid_data gid_data;
int err = 0;
nvgpu_log_fn(g, " ");
nvgpu_pmu_dbg(g, "init received\n");
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_RTOS_FBQ)) {
err = pmu_process_init_msg_fb(g, pmu, msg);
} else {
err = pmu_process_init_msg_dmem(g, pmu, msg);
}
/* error check for above init message process*/
if (err != 0) {
goto exit;
}
(void)memset((void *)msg, 0x0, sizeof(struct pmu_msg));
init = fw_ops->get_init_msg_ptr(&(msg->msg.init));
if (!pmu->gid_info.valid) {
u32 *gid_hdr_data = &gid_data.signature;
err = nvgpu_falcon_copy_from_dmem(pmu->flcn,
fw_ops->get_init_msg_sw_mngd_area_off(init),
gid_data.sign_bytes,
(u32)sizeof(struct pmu_sha1_gid_data), 0);
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_PMU_RTOS_FBQ)) {
err = pmu_process_init_msg_fb(g, pmu, msg);
if (err != 0) {
goto exit;
}
} else {
err = pmu_process_init_msg_dmem(g, pmu, msg);
if (err != 0) {
nvgpu_err(g, "PMU falcon DMEM copy failed");
goto exit;
}
pmu->gid_info.valid =
(*gid_hdr_data == PMU_SHA1_GID_SIGNATURE);
if (pmu->gid_info.valid) {
WARN_ON(sizeof(pmu->gid_info.gid) !=
sizeof(gid_data.gid));
nvgpu_memcpy((u8 *)pmu->gid_info.gid,
(u8 *)gid_data.gid,
sizeof(pmu->gid_info.gid));
err = pmu_gid_info_dmem_read(pmu, init);
if (err != 0) {
goto exit;
}
}
@@ -474,7 +492,7 @@ static int pmu_process_init_msg(struct nvgpu_pmu *pmu,
nvgpu_pmu_super_surface_mem(g, pmu,
pmu->super_surface));
if (err != 0) {
return err;
goto exit;
}
nvgpu_pmu_dmem_allocator_init(g, pmu, &pmu->dmem, init);

View File

@@ -97,7 +97,7 @@ static int pmu_fb_queue_init(struct gk20a *g, struct pmu_queues *queues,
tmp_id = PMU_QUEUE_MSG_IDX_FOR_V5;
}
params.index = init->v5.queue_index[tmp_id];
params.index = init->v5.queue_phy_id[tmp_id];
err = nvgpu_engine_fb_queue_init(&queues->fb_queue[id], params);
if (err != 0) {

View File

@@ -64,19 +64,24 @@ struct pmu_sha1_gid_data {
struct pmu_msg {
struct pmu_hdr hdr;
union {
struct pmu_init_msg init;
struct pmu_perfmon_msg perfmon;
struct pmu_pg_msg pg;
struct pmu_rc_msg rc;
struct pmu_acr_msg acr;
struct nv_pmu_boardobj_msg boardobj;
struct nv_pmu_perf_msg perf;
struct nv_pmu_volt_msg volt;
struct nv_pmu_clk_msg clk;
struct nv_pmu_pmgr_msg pmgr;
struct nv_pmu_therm_msg therm;
struct nv_pmu_rpc_msg rpc;
} msg;
union {
struct pmu_init_msg init;
struct pmu_perfmon_msg perfmon;
struct pmu_pg_msg pg;
struct pmu_rc_msg rc;
struct pmu_acr_msg acr;
struct nv_pmu_boardobj_msg boardobj;
struct nv_pmu_perf_msg perf;
struct nv_pmu_volt_msg volt;
struct nv_pmu_clk_msg clk;
struct nv_pmu_pmgr_msg pmgr;
struct nv_pmu_therm_msg therm;
struct nv_pmu_rpc_msg rpc;
} msg;
union {
struct pmu_nvgpu_rpc_struct_cmdmgmt_init cmdmgmt_init;
} event_rpc;
};
};
int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu);

View File

@@ -108,4 +108,14 @@ struct nv_pmu_rpc_header {
u32 exec_time_pmu_ns;
};
/* PMU to NVGPU RPC header structure.*/
struct pmu_nvgpu_rpc_header {
/* Identifies the unit servicing requested RPC */
u8 unit_id;
/* Identifies the requested RPC(within the unit)*/
u8 function;
/* Time of RPC to transfer from PMU, to dispatch in the nvgpu */
struct falc_u64 rpc_transfer_time;
};
#endif /* NVGPU_PMUIF_CMN_H */

View File

@@ -42,6 +42,7 @@
#define PMU_UNIT_THERM U8(0x14)
#define PMU_UNIT_PMGR U8(0x18)
#define PMU_UNIT_VOLT U8(0x0E)
#define PMU_UNIT_INIT_DGPU U8(0x0F)
#define PMU_UNIT_END U8(0x23)
#define PMU_UNIT_INVALID U8(0xFF)

View File

@@ -112,26 +112,45 @@ struct pmu_init_msg_pmu_v4 {
u8 dummy[18];
};
struct pmu_init_msg_pmu_v5 {
u8 msg_type;
falcon_status flcn_status;
u8 queue_index[PMU_QUEUE_COUNT_FOR_V5];
/* RPC */
/* Defines the structure that holds data used to execute INIT RPC. */
struct pmu_nvgpu_rpc_struct_cmdmgmt_init {
/* Must be first field in RPC structure. */
struct pmu_nvgpu_rpc_header hdr;
/*
* Notifies the RM if the PMU has encountered any critical
* error that would prevent it to operate correctly
*/
falcon_status status;
/*
* PMU command and message queue locations and sizes are determined
* at PMU build-time. First one starts at @ref queuesStart and each
* next follows the previous one.
*/
u8 queue_phy_id[PMU_QUEUE_COUNT_FOR_V5];
/* Array of sizes for each queue. */
u16 queue_size[PMU_QUEUE_COUNT_FOR_V5];
u16 queue_offset;
/* Offset in DMEM to the first queue. */
u16 queues_start;
/* Offset in DMEM to the first byte of the nvgpu Managed Heap. */
u16 sw_managed_area_offset;
/* Size (in bytes) of the RM Managed Heap. */
u16 sw_managed_area_size;
u16 os_debug_entry_point;
u8 dummy[18];
u8 pad;
/*
* DMEM address of the PMUs DEBUG_INFO. Will be set to
* RM_OS_DEBUG_ENTRY_POINT_INVALID if an entry point is
* not provided
*/
u16 os_debug_entry_point;
/* BRSS data. */
u8 brss_data[24];
};
union pmu_init_msg_pmu {
struct pmu_init_msg_pmu_v1 v1;
struct pmu_init_msg_pmu_v4 v4;
struct pmu_init_msg_pmu_v5 v5;
struct pmu_nvgpu_rpc_struct_cmdmgmt_init v5;
};
struct pmu_init_msg {
@@ -139,7 +158,7 @@ struct pmu_init_msg {
u8 msg_type;
struct pmu_init_msg_pmu_v1 pmu_init_v1;
struct pmu_init_msg_pmu_v4 pmu_init_v4;
struct pmu_init_msg_pmu_v5 pmu_init_v5;
struct pmu_nvgpu_rpc_struct_cmdmgmt_init pmu_init_v5;
};
};