gpu: nvgpu: add a new PMU RPC: ASYNC_CMD_RESP RPC

- When DISALLOW cmd is sent from driver to PMU the actual
  completion of the disallow will be acknowledged by PMU
  via a new RPC: ASYNC_CMD_RESP.
- Disallow needs a delayed ACK from PMU in order to disable
  the ELPG.
- If ELPG is already engaged, the DISALLOW cmd will trigger
  ELPG exit and then transition to PMU_PG_STATE_DISALLOW.
- After this whole process is completed, PMU will send
  DISALLOW_ACK through ASYNC_CMD_RESP RPC.
- After disallow command is sent from the driver, NvGPU driver
  waits/polls for disallow command ack. This is sent immediately
  by RPC framework of PMU.
- Then, the driver will poll/wait for ASYNC_CMD_RESP event which
  is the delayed DISALLOW ACK.
- The driver captures the ASYNC_CMD_RESP RPC sent from PMU.
- set disallow_state to ELPG_OFF.
- If the driver does not wait/poll for this delayed disallow
  ack from PMU, it can result in pmu halt issues as PMU is still
  processing DISALLOW cmd but the driver progressed further which
  can result in errors.

Bug 3430273
Bug 3439350

Change-Id: If2acf8391d18cd3c6b8b07e3bf6577667ec99eea
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2631214
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Divya
2021-10-20 06:39:05 +00:00
committed by mobile promotions
parent b6d349dcf6
commit 6a21dd929f
6 changed files with 114 additions and 4 deletions

View File

@@ -201,6 +201,11 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
WARN_ON(true);
}
break;
case PMU_UNIT_PG:
if (pmu->pg->process_rpc_event != NULL) {
err = pmu->pg->process_rpc_event(g, (void *)&msg->hdr);
}
break;
default:
nvgpu_log_info(g, "Received invalid PMU unit event");
break;

View File

@@ -373,6 +373,53 @@ static int ga10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
return err;
}
static int ga10b_pmu_pg_handle_async_cmd_resp(struct gk20a *g, u32 ctrl_id,
u32 msg_id)
{
int err = 0;
switch (msg_id) {
case PMU_PG_MSG_ASYNC_CMD_DISALLOW:
if (ctrl_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
g->pmu->pg->disallow_state = PMU_ELPG_STAT_OFF;
} else if (ctrl_id == PMU_PG_ELPG_ENGINE_ID_MS_LTC) {
/* To-do for MS_LTC */
} else {
nvgpu_err(g, "Invalid engine id");
err = -EINVAL;
}
break;
default:
nvgpu_err(g, "Invalid message id: %d", msg_id);
err = -EINVAL;
break;
}
return err;
}
static int ga10b_pmu_pg_process_rpc_event(struct gk20a *g, void *pmumsg)
{
int err = 0;
struct pmu_rm_rpc_struct_lpwr_pg_async_cmd_resp *async_cmd;
struct pmu_nvgpu_rpc_pg_event *msg =
(struct pmu_nvgpu_rpc_pg_event *)pmumsg;
switch (msg->rpc_hdr.function) {
case PMU_NV_RPC_ID_LPWR_PG_ASYNC_CMD_RESP:
async_cmd =
(struct pmu_rm_rpc_struct_lpwr_pg_async_cmd_resp *)
(void *)(&msg->rpc_hdr);
err = ga10b_pmu_pg_handle_async_cmd_resp(g, async_cmd->ctrl_id,
async_cmd->msg_id);
break;
default:
nvgpu_err(g, "Invalid PMU RPC: 0x%x", msg->rpc_hdr.function);
err = -EINVAL;
break;
}
return err;
}
void nvgpu_ga10b_pg_sw_init(struct gk20a *g,
struct nvgpu_pmu_pg *pg)
{
@@ -392,4 +439,5 @@ void nvgpu_ga10b_pg_sw_init(struct gk20a *g,
pg->hw_load_zbc = NULL;
pg->rpc_handler = ga10b_pg_rpc_handler;
pg->init_send = ga10b_pmu_pg_init_send;
pg->process_rpc_event = ga10b_pmu_pg_process_rpc_event;
}

View File

@@ -266,6 +266,24 @@ struct pmu_rpc_struct_lpwr_loading_pg_ctrl_buf_load
u32 scratch[1];
};
/*!
* Defines the structure that holds data used to execute PG_ASYNC_CMD_RESP RPC.
*/
struct pmu_rm_rpc_struct_lpwr_pg_async_cmd_resp {
/*!
* Must be first field in RPC structure.
*/
struct nv_pmu_rpc_header hdr;
/*!
* Control ID of the Async PG Command.
*/
u8 ctrl_id;
/*!
* Message ID of the Async PG Command.
*/
u8 msg_id;
};
/*
* Brief Statistics structure for PG features
*/

View File

@@ -400,6 +400,10 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) {
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
if (pmu->pg->process_rpc_event != NULL) {
pmu->pg->disallow_state =
PMU_ELPG_STAT_OFF_PENDING;
}
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
pmu->pg->mscg_transition_state =
PMU_ELPG_STAT_OFF_PENDING;
@@ -439,6 +443,24 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
ret = -EBUSY;
goto exit_unlock;
}
/*
* PMU will send ASYNC_CMD_RESP when disallow
* command is successfully completed and ELPG
* is exited.
* Wait for DISALLOW_ACK RPC event from
* PMU.
*/
if (pmu->pg->process_rpc_event != NULL) {
ptr = &pmu->pg->disallow_state;
pmu_wait_message_cond(pmu,
nvgpu_get_poll_timeout(g),
ptr, PMU_ELPG_STAT_OFF);
if (*ptr != PMU_ELPG_STAT_OFF) {
nvgpu_err(g, "DISALLOW_ACK failed");
goto exit_unlock;
}
}
}
}

View File

@@ -72,6 +72,7 @@ struct nvgpu_pg_init {
struct nvgpu_pmu_pg {
u32 elpg_stat;
u32 disallow_state;
u32 elpg_ms_stat;
#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1U /* msec */
struct nvgpu_pg_init pg_init;
@@ -120,6 +121,7 @@ struct nvgpu_pmu_pg {
void (*rpc_handler)(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nv_pmu_rpc_header *rpc, struct rpc_handler_payload *rpc_payload);
int (*init_send)(struct gk20a *g, struct nvgpu_pmu *pmu, u8 pg_engine_id);
int (*process_rpc_event)(struct gk20a *g, void *pmumsg);
};
/*PG defines used by nvpgu-pmu*/

View File

@@ -39,12 +39,22 @@
/* RPC function calls supported by PG unit */
#define NV_PMU_RPC_ID_PG_LOADING_PRE_INIT 0x00U
#define NV_PMU_RPC_ID_PG_LOADING_POST_INIT 0x01U
#define NV_PMU_RPC_ID_PG_LOADING_INIT 0x0AU
#define NV_PMU_RPC_ID_PG_LOADING_INIT 0x0AU
#define NV_PMU_RPC_ID_PG_LOADING_BUF_LOAD 0x0BU
#define NV_PMU_RPC_ID_PG_ALLOW 0x04U
#define NV_PMU_RPC_ID_PG_DISALLOW 0x05U
#define NV_PMU_RPC_ID_PG_ALLOW 0x04U
#define NV_PMU_RPC_ID_PG_DISALLOW 0x05U
#define NV_PMU_RPC_ID_PG_THRESHOLD_UPDATE 0x06U
#define NV_PMU_RPC_ID_PG_SFM_UPDATE 0x08U
#define NV_PMU_RPC_ID_PG_SFM_UPDATE 0x08U
/* PG unit RPC functions sent by PMU */
#define PMU_NV_RPC_ID_LPWR_PG_ASYNC_CMD_RESP 0x00U
#define PMU_NV_RPC_ID_LPWR_PG_LOG_FLUSHED 0x01U
#define PMU_NV_RPC_ID_LPWR_PG_IDLE_SNAP 0x02U
/* Async PG message IDs */
enum {
PMU_PG_MSG_ASYNC_CMD_DISALLOW,
};
/* PG message */
enum {
@@ -422,4 +432,9 @@ struct pmu_pg_stats {
u32 pg_gating_deny_cnt;
};
struct pmu_nvgpu_rpc_pg_event {
struct pmu_hdr msg_hdr;
struct pmu_nvgpu_rpc_header rpc_hdr;
};
#endif /* NVGPU_PMUIF_PG_H */