gpu: nvgpu: async cmd resp for gv11b

- When DISALLOW cmd is sent from driver to PMU the actual
  completion of the disallow will be acknowledged by PMU
  via a PG EVENT: ASYNC_CMD_RESP.
- Disallow needs a delayed ACK from PMU in order to disable
  the ELPG.
- If ELPG is already engaged, the DISALLOW cmd will trigger
  ELPG exit and then transition to PMU_PG_STATE_DISALLOW.
- After this whole process is completed, PMU will send
  DISALLOW_ACK through ASYNC_CMD_RESP msg.
- After disallow command is sent from the driver, NvGPU driver
  waits/polls for disallow command ack. This is sent immediately
  by msg framework of PMU.
- Then, the driver will poll/wait for ASYNC_CMD_RESP event which
  is the delayed DISALLOW ACK.
- The driver captures the ASYNC_CMD_RESP sent from PMU.
- set disallow_state to ELPG_OFF.
- If the driver does not wait/poll for this delayed disallow
  ack from PMU, it can result in erros  as PMU is still
  processing DISALLOW cmd but the driver progressed further.

Bug 3580271

Change-Id: I332180c05b6a398107f065d54e9718b7038fb1b2
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2689500
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Divya
2022-03-29 19:18:49 +00:00
committed by mobile promotions
parent 43ba356132
commit fb019bf43a
6 changed files with 43 additions and 10 deletions

View File

@@ -202,8 +202,8 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
}
break;
case PMU_UNIT_PG:
if (pmu->pg->process_rpc_event != NULL) {
err = pmu->pg->process_rpc_event(g, (void *)&msg->hdr);
if (pmu->pg->process_pg_event != NULL) {
err = pmu->pg->process_pg_event(g, (void *)&msg->hdr);
}
break;
default:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -433,7 +433,7 @@ static int ga10b_pmu_pg_handle_idle_snap_rpc(struct gk20a *g,
return err;
}
static int ga10b_pmu_pg_process_rpc_event(struct gk20a *g, void *pmumsg)
static int ga10b_pmu_pg_process_pg_event(struct gk20a *g, void *pmumsg)
{
int err = 0;
struct pmu_nv_rpc_struct_lpwr_pg_async_cmd_resp *async_cmd;
@@ -483,5 +483,5 @@ void nvgpu_ga10b_pg_sw_init(struct gk20a *g,
pg->hw_load_zbc = NULL;
pg->rpc_handler = ga10b_pg_rpc_handler;
pg->init_send = ga10b_pmu_pg_init_send;
pg->process_rpc_event = ga10b_pmu_pg_process_rpc_event;
pg->process_pg_event = ga10b_pmu_pg_process_pg_event;
}

View File

@@ -27,6 +27,7 @@
#include <nvgpu/pmu/cmd.h>
#include <nvgpu/pmu/pmu_pg.h>
#include "pmu_pg.h"
#include "pg_sw_gv11b.h"
#include "pg_sw_gp106.h"
#include "pg_sw_gm20b.h"
@@ -134,6 +135,30 @@ int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
return 0;
}
static int gv11b_pmu_pg_process_pg_event(struct gk20a *g, void *pmumsg)
{
int err = 0;
struct pmu_msg *msg = (struct pmu_msg *) pmumsg;
switch (msg->msg.pg.async_cmd_resp.msg_id) {
case PMU_PG_MSG_ASYNC_CMD_DISALLOW:
if (msg->msg.pg.async_cmd_resp.ctrl_id ==
PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
g->pmu->pg->disallow_state = PMU_ELPG_STAT_OFF;
} else {
nvgpu_err(g, "Invalid engine id");
err = -EINVAL;
}
break;
default:
nvgpu_err(g, "Invalid message id: %d",
msg->msg.pg.async_cmd_resp.msg_id);
err = -EINVAL;
break;
}
return err;
}
void nvgpu_gv11b_pg_sw_init(struct gk20a *g,
struct nvgpu_pmu_pg *pg)
{
@@ -153,4 +178,5 @@ void nvgpu_gv11b_pg_sw_init(struct gk20a *g,
pg->hw_load_zbc = gm20b_pmu_pg_elpg_hw_load_zbc;
pg->rpc_handler = NULL;
pg->init_send = gm20b_pmu_pg_init_send;
pg->process_pg_event = gv11b_pmu_pg_process_pg_event;
}

View File

@@ -402,7 +402,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
if ((BIT32(pg_engine_id) & pg_engine_id_list) != 0U) {
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
if (pmu->pg->process_rpc_event != NULL) {
if (pmu->pg->process_pg_event != NULL) {
pmu->pg->disallow_state =
PMU_ELPG_STAT_OFF_PENDING;
}
@@ -453,7 +453,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
* Wait for DISALLOW_ACK RPC event from
* PMU.
*/
if (pmu->pg->process_rpc_event != NULL) {
if (pmu->pg->process_pg_event != NULL) {
ptr = &pmu->pg->disallow_state;
pmu_wait_message_cond(pmu,
nvgpu_get_poll_timeout(g),

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -128,7 +128,7 @@ struct nvgpu_pmu_pg {
void (*rpc_handler)(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nv_pmu_rpc_header *rpc, struct rpc_handler_payload *rpc_payload);
int (*init_send)(struct gk20a *g, struct nvgpu_pmu *pmu, u8 pg_engine_id);
int (*process_rpc_event)(struct gk20a *g, void *pmumsg);
int (*process_pg_event)(struct gk20a *g, void *pmumsg);
};
/*PG defines used by nvpgu-pmu*/

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -117,12 +117,19 @@ struct pmu_pg_msg_eng_buf_stat {
u8 status;
};
struct pmu_pg_msg_async_cmd_resp {
u8 msg_type;
u8 ctrl_id;
u8 msg_id;
};
struct pmu_pg_msg {
union {
u8 msg_type;
struct pmu_pg_msg_elpg_msg elpg_msg;
struct pmu_pg_msg_stat stat;
struct pmu_pg_msg_eng_buf_stat eng_buf_stat;
struct pmu_pg_msg_async_cmd_resp async_cmd_resp;
/* TBD: other pg messages */
union pmu_ap_msg ap_msg;
struct nv_pmu_rppg_msg rppg_msg;