gpu: nvgpu: ga10b: Add new RPC for AELPG

- Add AP_INIT RPC to initialize the AELPG feature.
- Add AP_CTRL_INIT_AND_ENABLE RPC to program some
  APCTRL values for Adaptive ELPG.
- Add AP_CTRL_ENABLE and AP_CTRL_DISABLE RPCs to
  send AELPG enable/disable request to PMU via sysfs
  node.
- Re-structure the rpc handler based on PG_LOADING
  and PG unit id. This is needed to handle different
  types of new RPCs from PMU.

JIRA NVGPU-7182

Change-Id: If00b00730507f17ff1883a67094f7e16da5b81ea
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2728286
(cherry picked from commit fffb58703bd718600e8c983dcd1c81d9abe83802)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2603161
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Divya
2021-09-30 09:14:05 +00:00
committed by mobile promotions
parent 3fb2a2e209
commit ee5053f7be
12 changed files with 453 additions and 71 deletions

View File

@@ -593,9 +593,14 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
}
break;
case PMU_UNIT_PG_LOADING:
if (pmu->pg->pg_loading_rpc_handler != NULL) {
pmu->pg->pg_loading_rpc_handler(g, pmu, &rpc,
rpc_payload);
}
break;
case PMU_UNIT_PG:
if (pmu->pg->rpc_handler != NULL) {
pmu->pg->rpc_handler(g, pmu, &rpc, rpc_payload);
if (pmu->pg->pg_rpc_handler != NULL) {
pmu->pg->pg_rpc_handler(g, pmu, &rpc, rpc_payload);
}
break;
default:

View File

@@ -300,15 +300,6 @@ static void ga10b_pg_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
nvgpu_log_fn(g, " ");
switch (rpc->function) {
case NV_PMU_RPC_ID_PG_LOADING_PRE_INIT:
nvgpu_pmu_dbg(g, "Reply to PG_PRE_INIT");
break;
case NV_PMU_RPC_ID_PG_LOADING_POST_INIT:
nvgpu_pmu_dbg(g, "Reply to PG_POST_INIT");
break;
case NV_PMU_RPC_ID_PG_LOADING_INIT:
nvgpu_pmu_dbg(g, "Reply to PG_INIT");
break;
case NV_PMU_RPC_ID_PG_THRESHOLD_UPDATE:
nvgpu_pmu_dbg(g, "Reply to PG_THRESHOLD_UPDATE");
break;
@@ -316,11 +307,6 @@ static void ga10b_pg_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
nvgpu_pmu_dbg(g, "Reply to PG_SFM_UPDATE");
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_ELPG_BOOTED, true);
break;
case NV_PMU_RPC_ID_PG_LOADING_BUF_LOAD:
nvgpu_pmu_dbg(g, "Reply to PG_LOADING_BUF_LOAD");
pmu->pg->buf_loaded = true;
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_ZBC, true);
break;
case NV_PMU_RPC_ID_PG_ALLOW:
nvgpu_pmu_dbg(g, "Reply to PG_ALLOW");
rpc_allow = (struct pmu_rpc_struct_lpwr_pg_ctrl_allow *)rpc_payload->rpc_buff;
@@ -346,10 +332,49 @@ static void ga10b_pg_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
case NV_PMU_RPC_ID_PG_PG_CTRL_STATS_GET:
nvgpu_pmu_dbg(g, "Reply to PG_STATS_GET");
break;
case NV_PMU_RPC_ID_PG_AP_CTRL_ENABLE:
nvgpu_pmu_dbg(g, "Reply to AP_CTRL_ENABLE");
break;
case NV_PMU_RPC_ID_PG_AP_CTRL_DISABLE:
nvgpu_pmu_dbg(g, "Reply to AP_CTRL_DISABLE");
break;
default:
nvgpu_err(g,
"unsupported PG rpc function : 0x%x", rpc->function);
break;
}
}
static void ga10b_pg_loading_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nv_pmu_rpc_header *rpc, struct rpc_handler_payload *rpc_payload)
{
nvgpu_log_fn(g, " ");
switch (rpc->function) {
case NV_PMU_RPC_ID_PG_LOADING_PRE_INIT:
nvgpu_pmu_dbg(g, "Reply to PG_PRE_INIT");
break;
case NV_PMU_RPC_ID_PG_LOADING_POST_INIT:
nvgpu_pmu_dbg(g, "Reply to PG_POST_INIT");
break;
case NV_PMU_RPC_ID_PG_LOADING_INIT:
nvgpu_pmu_dbg(g, "Reply to PG_INIT");
break;
case NV_PMU_RPC_ID_PG_LOADING_BUF_LOAD:
nvgpu_pmu_dbg(g, "Reply to PG_LOADING_BUF_LOAD");
pmu->pg->buf_loaded = true;
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_LOADING_ZBC, true);
break;
case NV_PMU_RPC_ID_PG_LOADING_AP_INIT:
nvgpu_pmu_dbg(g, "Reply to AP_INIT");
break;
case NV_PMU_RPC_ID_PG_LOADING_AP_CTRL_INIT_AND_ENABLE:
nvgpu_pmu_dbg(g, "Reply to AP_CTRL_INIT_AND_ENABLE");
break;
default:
nvgpu_err(g,
"unsupported PG_LOADING rpc function : 0x%x", rpc->function);
break;
}
}
@@ -465,6 +490,87 @@ static int ga10b_pmu_pg_process_pg_event(struct gk20a *g, void *pmumsg)
return err;
}
static int ga10b_pmu_pg_aelpg_init(struct gk20a *g)
{
struct pmu_rpc_struct_lpwr_loading_ap_init rpc;
struct nvgpu_pmu *pmu = g->pmu;
int status;
nvgpu_log_fn(g, " ");
(void) memset(&rpc, 0,
sizeof(struct pmu_rpc_struct_lpwr_loading_ap_init));
PMU_RPC_EXECUTE_CPB(status, pmu, PG_LOADING, AP_INIT, &rpc, 0);
return status;
}
static int ga10b_pmu_pg_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
{
struct pmu_rpc_struct_lpwr_loading_ap_ctrl_init_and_enable rpc;
struct nvgpu_pmu *pmu = g->pmu;
int status;
nvgpu_log_fn(g, " ");
(void) memset(&rpc, 0,
sizeof(struct pmu_rpc_struct_lpwr_loading_ap_ctrl_init_and_enable));
rpc.ctrl_id = ctrl_id;
rpc.min_idle_threshold_us = NV_PMU_PG_AP_IDLE_FILTER_MIN_DEFAULT_US;
rpc.max_idle_threshold_us = NV_PMU_PG_AP_IDLE_FILTER_MAX_DEFAULT_US;
rpc.breakeven_resident_time_us =
NV_PMU_PG_AP_BREAK_EVEN_RESIDENT_TIME_DEFAULT_US;
rpc.max_cycles_per_sample = NV_PMU_PG_AP_CYCLES_PER_SAMPLE_MAX_DEFAULT;
rpc.min_residency = NV_PMU_PG_AP_MIN_RESIDENCY_DEFAULT;
switch (ctrl_id) {
case PMU_AP_CTRL_ID_GRAPHICS:
rpc.base_multiplier = NV_PMU_PG_AP_BASE_MULTIPLIER_DEFAULT;
break;
default:
nvgpu_err(g, "Invalid ctrl_id:%u for %s", ctrl_id, __func__);
break;
}
PMU_RPC_EXECUTE_CPB(status, pmu, PG_LOADING,
AP_CTRL_INIT_AND_ENABLE, &rpc, 0);
return status;
}
static int ga10b_pmu_pg_aelpg_disable(struct gk20a *g, u8 ctrl_id)
{
struct pmu_rpc_struct_lpwr_ap_ctrl_disable rpc;
struct nvgpu_pmu *pmu = g->pmu;
int status;
nvgpu_log_fn(g, " ");
(void) memset(&rpc, 0,
sizeof(struct pmu_rpc_struct_lpwr_ap_ctrl_disable));
PMU_RPC_EXECUTE_CPB(status, pmu, PG, AP_CTRL_DISABLE, &rpc, 0);
return status;
}
static int ga10b_pmu_pg_aelpg_enable(struct gk20a *g, u8 ctrl_id)
{
struct pmu_rpc_struct_lpwr_ap_ctrl_enable rpc;
struct nvgpu_pmu *pmu = g->pmu;
int status;
nvgpu_log_fn(g, " ");
(void) memset(&rpc, 0,
sizeof(struct pmu_rpc_struct_lpwr_ap_ctrl_enable));
PMU_RPC_EXECUTE_CPB(status, pmu, PG, AP_CTRL_ENABLE, &rpc, 0);
return status;
}
void nvgpu_ga10b_pg_sw_init(struct gk20a *g,
struct nvgpu_pmu_pg *pg)
{
@@ -482,7 +588,12 @@ void nvgpu_ga10b_pg_sw_init(struct gk20a *g,
pg->alloc_dmem = NULL;
pg->load_buff = ga10b_pmu_pg_load_buff;
pg->hw_load_zbc = NULL;
pg->rpc_handler = ga10b_pg_rpc_handler;
pg->aelpg_init = ga10b_pmu_pg_aelpg_init;
pg->aelpg_init_and_enable = ga10b_pmu_pg_aelpg_init_and_enable;
pg->aelpg_enable = ga10b_pmu_pg_aelpg_enable;
pg->aelpg_disable = ga10b_pmu_pg_aelpg_disable;
pg->pg_loading_rpc_handler = ga10b_pg_loading_rpc_handler;
pg->pg_rpc_handler = ga10b_pg_rpc_handler;
pg->init_send = ga10b_pmu_pg_init_send;
pg->process_pg_event = ga10b_pmu_pg_process_pg_event;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -315,8 +315,8 @@ struct pmu_nv_rpc_struct_lpwr_pg_idle_snap {
};
/*
* Brief Statistics structure for PG features
*/
* Brief Statistics structure for PG features
*/
struct pmu_pg_stats_v3
{
/* Number of time PMU successfully engaged sleep state */
@@ -379,6 +379,116 @@ struct pmu_rpc_struct_lpwr_pg_ctrl_stats_get {
};
/*
* Defines the structure that holds data used to execute AP_INIT RPC.
*/
struct pmu_rpc_struct_lpwr_loading_ap_init {
/*
* [IN/OUT] Must be first field in RPC structure
*/
struct nv_pmu_rpc_header hdr;
/*
* [NONE] Must be last field in RPC structure.
* Used as variable size scrach space on RM managed DMEM heap
* for this RPC.
*/
u32 scratch[1];
};
/*
* Defines the structure that holds data used to execute
* AP_CTRL_INIT_AND_ENABLE RPC.
*/
struct pmu_rpc_struct_lpwr_loading_ap_ctrl_init_and_enable {
/*
* [IN/OUT] Must be first field in RPC structure.
*/
struct nv_pmu_rpc_header hdr;
/*
* [OUT] Address of the dmem for stats data
*/
u32 stats_dmem_offset;
/*
* [IN] Minimum idle threshold in Us
*/
u32 min_idle_threshold_us;
/*
* [IN] Maximum idle threshold in Us
*/
u32 max_idle_threshold_us;
/*
* [IN] Break-even resident time for one cycle of parent feature
*/
u16 breakeven_resident_time_us;
/*
* [IN] Maximum number of allowed power feature cycles per sample
*/
u16 max_cycles_per_sample;
/*
* [IN] Minimum targeted residency
*/
u8 min_residency;
/*
* [IN] AP_CTRL index
*/
u8 ctrl_id;
/*
* [IN] Base multipler for centralised LPWR callback
*/
u8 base_multiplier;
/*
* [IN] NV_TRUE if ApCtrl requires SW Histograms
*/
bool sw_hist_enabled;
/*
* [NONE] Must be last field in RPC structure.
* Used as variable size scrach space on RM managed DMEM heap
* for this RPC.
*/
u32 scratch[1];
};
/*
* Defines the structure that holds data used to execute AP_CTRL_ENABLE RPC.
*/
struct pmu_rpc_struct_lpwr_ap_ctrl_enable {
/*
* [IN/OUT] Must be first field in RPC structure.
*/
struct nv_pmu_rpc_header hdr;
/*
* [IN] AP_CTRL index
*/
u8 ctrl_id;
/*
* [NONE] Must be last field in RPC structure.
* Used as variable size scrach space on RM managed DMEM heap
* for this RPC.
*/
u32 scratch[1];
};
/*
* Defines the structure that holds data used to execute
* AP_CTRL_ENABLE RPC.
*/
struct pmu_rpc_struct_lpwr_ap_ctrl_disable {
/*
* [IN/OUT] Must be first field in RPC structure.
*/
struct nv_pmu_rpc_header hdr;
/*
* [IN] AP_CTRL index
*/
u8 ctrl_id;
/*
* [NONE] Must be last field in RPC structure.
* Used as variable size scrach space on RM managed DMEM heap
* for this RPC.
*/
u32 scratch[1];
};
void nvgpu_ga10b_pg_sw_init(struct gk20a *g, struct nvgpu_pmu_pg *pg);
u32 ga10b_pmu_pg_engines_list(struct gk20a *g);

View File

@@ -345,6 +345,74 @@ int gm20b_pmu_pg_init_send(struct gk20a *g, struct nvgpu_pmu *pmu,
return err;
}
int gm20b_pmu_pg_aelpg_init(struct gk20a *g)
{
/* Remove reliance on app_ctrl field. */
union pmu_ap_cmd ap_cmd;
int status;
ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
ap_cmd.init.pg_sampling_period_us = g->pmu->pg->aelpg_param[0];
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
return status;
}
int gm20b_pmu_pg_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
{
struct nvgpu_pmu *pmu = g->pmu;
int status = 0;
union pmu_ap_cmd ap_cmd;
ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
pmu->pg->aelpg_param[1];
ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
pmu->pg->aelpg_param[2];
ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
pmu->pg->aelpg_param[3];
ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
pmu->pg->aelpg_param[4];
switch (ctrl_id) {
case PMU_AP_CTRL_ID_GRAPHICS:
break;
default:
nvgpu_err(g, "Invalid ctrl_id:%u for %s", ctrl_id, __func__);
break;
}
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, true);
return status;
}
int gm20b_pmu_pg_aelpg_enable(struct gk20a *g, u8 ctrl_id)
{
int status = 0;
union pmu_ap_cmd ap_cmd;
/* Enable AELPG */
ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
return status;
}
int gm20b_pmu_pg_aelpg_disable(struct gk20a *g, u8 ctrl_id)
{
int status = 0;
union pmu_ap_cmd ap_cmd;
/* Enable AELPG */
ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
return status;
}
void nvgpu_gm20b_pg_sw_init(struct gk20a *g,
struct nvgpu_pmu_pg *pg)
{
@@ -365,6 +433,11 @@ void nvgpu_gm20b_pg_sw_init(struct gk20a *g,
pg->alloc_dmem = gm20b_pmu_pg_elpg_alloc_dmem;
pg->load_buff = gm20b_pmu_pg_elpg_load_buff;
pg->hw_load_zbc = gm20b_pmu_pg_elpg_hw_load_zbc;
pg->rpc_handler = NULL;
pg->pg_loading_rpc_handler = NULL;
pg->pg_rpc_handler = NULL;
pg->init_send = gm20b_pmu_pg_init_send;
pg->aelpg_init = gm20b_pmu_pg_aelpg_init;
pg->aelpg_init_and_enable = gm20b_pmu_pg_aelpg_init_and_enable;
pg->aelpg_enable = gm20b_pmu_pg_aelpg_enable;
pg->aelpg_disable = gm20b_pmu_pg_aelpg_disable;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -49,5 +49,8 @@ int gm20b_pmu_pg_elpg_load_buff(struct gk20a *g, struct nvgpu_pmu *pmu);
int gm20b_pmu_pg_elpg_hw_load_zbc(struct gk20a *g, struct nvgpu_pmu *pmu);
int gm20b_pmu_pg_init_send(struct gk20a *g, struct nvgpu_pmu *pmu,
u8 pg_engine_id);
int gm20b_pmu_pg_aelpg_init(struct gk20a *g);
int gm20b_pmu_pg_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
int gm20b_pmu_pg_aelpg_enable(struct gk20a *g, u8 ctrl_id);
int gm20b_pmu_pg_aelpg_disable(struct gk20a *g, u8 ctrl_id);
#endif /* NVGPU_PMU_PG_SW_GM20B_H */

View File

@@ -122,6 +122,11 @@ void nvgpu_gp10b_pg_sw_init(struct gk20a *g,
pg->alloc_dmem = gm20b_pmu_pg_elpg_alloc_dmem;
pg->load_buff = gm20b_pmu_pg_elpg_load_buff;
pg->hw_load_zbc = gm20b_pmu_pg_elpg_hw_load_zbc;
pg->rpc_handler = NULL;
pg->pg_loading_rpc_handler = NULL;
pg->pg_rpc_handler = NULL;
pg->init_send = gm20b_pmu_pg_init_send;
pg->aelpg_init = gm20b_pmu_pg_aelpg_init;
pg->aelpg_init_and_enable = gm20b_pmu_pg_aelpg_init_and_enable;
pg->aelpg_enable = gm20b_pmu_pg_aelpg_enable;
pg->aelpg_disable = gm20b_pmu_pg_aelpg_disable;
}

View File

@@ -176,7 +176,12 @@ void nvgpu_gv11b_pg_sw_init(struct gk20a *g,
pg->alloc_dmem = gm20b_pmu_pg_elpg_alloc_dmem;
pg->load_buff = gm20b_pmu_pg_elpg_load_buff;
pg->hw_load_zbc = gm20b_pmu_pg_elpg_hw_load_zbc;
pg->rpc_handler = NULL;
pg->pg_loading_rpc_handler = NULL;
pg->pg_rpc_handler = NULL;
pg->init_send = gm20b_pmu_pg_init_send;
pg->process_pg_event = gv11b_pmu_pg_process_pg_event;
pg->aelpg_init = gm20b_pmu_pg_aelpg_init;
pg->aelpg_init_and_enable = gm20b_pmu_pg_aelpg_init_and_enable;
pg->aelpg_enable = gm20b_pmu_pg_aelpg_enable;
pg->aelpg_disable = gm20b_pmu_pg_aelpg_disable;
}

View File

@@ -31,42 +31,79 @@ int nvgpu_aelpg_init(struct gk20a *g)
{
int status = 0;
/* Remove reliance on app_ctrl field. */
union pmu_ap_cmd ap_cmd;
if (g->pmu->pg->aelpg_init == NULL) {
nvgpu_err(g, "PG AELPG init function not assigned");
return -EINVAL;
}
status = g->pmu->pg->aelpg_init(g);
ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
ap_cmd.init.pg_sampling_period_us = g->pmu->pg->aelpg_param[0];
if (status != 0) {
nvgpu_err(g, "aelpg_init FAILED err=%d",
status);
} else {
nvgpu_pmu_dbg(g, "done");
}
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
return status;
}
int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
int nvgpu_aelpg_init_and_enable(struct gk20a *g, u32 ctrl_id)
{
struct nvgpu_pmu *pmu = g->pmu;
int status = 0;
union pmu_ap_cmd ap_cmd;
ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
pmu->pg->aelpg_param[1];
ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
pmu->pg->aelpg_param[2];
ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
pmu->pg->aelpg_param[3];
ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
pmu->pg->aelpg_param[4];
if (g->pmu->pg->aelpg_init_and_enable == NULL) {
nvgpu_err(g, "PG AELPG init and Enable function not assigned");
return -EINVAL;
}
status = g->pmu->pg->aelpg_init_and_enable(g, ctrl_id);
switch (ctrl_id) {
case PMU_AP_CTRL_ID_GRAPHICS:
break;
default:
nvgpu_err(g, "Invalid ctrl_id:%u for %s", ctrl_id, __func__);
break;
if (status != 0) {
nvgpu_err(g, "aelpg_init_and_enable FAILED err=%d",
status);
} else {
nvgpu_pmu_dbg(g, "done");
}
return status;
}
int nvgpu_aelpg_enable(struct gk20a *g, u32 ctrl_id)
{
int status = 0;
if (g->pmu->pg->aelpg_enable == NULL) {
nvgpu_err(g, "AELPG Enable function not assigned");
return -EINVAL;
}
status = g->pmu->pg->aelpg_enable(g, ctrl_id);
if (status != 0) {
nvgpu_err(g, "aelpg_enable FAILED err=%d",
status);
} else {
nvgpu_pmu_dbg(g, "done");
}
return status;
}
int nvgpu_aelpg_disable(struct gk20a *g, u32 ctrl_id)
{
int status = 0;
if (g->pmu->pg->aelpg_disable == NULL) {
nvgpu_err(g, "AELPG Disable function not assigned");
return -EINVAL;
}
status = g->pmu->pg->aelpg_disable(g, ctrl_id);
if (status != 0) {
nvgpu_err(g, "aelpg_disable FAILED err=%d",
status);
} else {
nvgpu_pmu_dbg(g, "done");
}
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, true);
return status;
}

View File

@@ -126,8 +126,16 @@ struct nvgpu_pmu_pg {
u8 pg_engine_id);
int (*load_buff)(struct gk20a *g, struct nvgpu_pmu *pmu);
int (*hw_load_zbc)(struct gk20a *g, struct nvgpu_pmu *pmu);
void (*rpc_handler)(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nv_pmu_rpc_header *rpc, struct rpc_handler_payload *rpc_payload);
int (*aelpg_init)(struct gk20a *g);
int (*aelpg_init_and_enable)(struct gk20a *g, u8 ctrl_id);
int (*aelpg_enable)(struct gk20a *g, u8 ctrl_id);
int (*aelpg_disable)(struct gk20a *g, u8 ctrl_id);
void (*pg_loading_rpc_handler)(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nv_pmu_rpc_header *rpc,
struct rpc_handler_payload *rpc_payload);
void (*pg_rpc_handler)(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nv_pmu_rpc_header *rpc,
struct rpc_handler_payload *rpc_payload);
int (*init_send)(struct gk20a *g, struct nvgpu_pmu *pmu, u8 pg_engine_id);
int (*process_pg_event)(struct gk20a *g, void *pmumsg);
};
@@ -166,7 +174,9 @@ int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
/* AELPG */
int nvgpu_aelpg_init(struct gk20a *g);
int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
int nvgpu_aelpg_init_and_enable(struct gk20a *g, u32 ctrl_id);
int nvgpu_aelpg_enable(struct gk20a *g, u32 ctrl_id);
int nvgpu_aelpg_disable(struct gk20a *g, u32 ctrl_id);
int nvgpu_pmu_ap_send_command(struct gk20a *g,
union pmu_ap_cmd *p_ap_cmd, bool b_block);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -38,6 +38,24 @@
#define PMU_AP_IDLE_MASK_HIST_IDX_2 (5)
#define PMU_AP_IDLE_MASK_HIST_IDX_3 (6)
/*
* Default values for AP parameters
*
* IDLE_FILTER_MIN_DEFAULT_US : Default minimum idle threshold value in usec
* IDLE_FILTER_MIN_DI_US : Minimum idle threshold for DI in usec
* IDLE_FILTER_MAX_DEFAULT_US : Default maximum idle threshold value in usec
* IDLE_FILTER_MAX_DI_US : Maximum idle threshold for DI in usec
* MIN_RESIDENCY_DEFAULT : Default minimum residency per AP sample
* BREAKEVEN_RESIDENT_TIME_DEFAULT_US : Default breakeven resident time per cycle
* BREAKEVEN_RESIDENT_TIME_DI_US : Breakeven resident time per cycle for DI
* CYCLES_PER_SAMPLE_MAX_DEFAULT : Maximum power feature cycles per AP sample
*/
#define NV_PMU_PG_AP_BASE_MULTIPLIER_DEFAULT (1)
#define NV_PMU_PG_AP_IDLE_FILTER_MIN_DEFAULT_US (100)
#define NV_PMU_PG_AP_IDLE_FILTER_MAX_DEFAULT_US (70000)
#define NV_PMU_PG_AP_MIN_RESIDENCY_DEFAULT (1)
#define NV_PMU_PG_AP_BREAK_EVEN_RESIDENT_TIME_DEFAULT_US (1300)
#define NV_PMU_PG_AP_CYCLES_PER_SAMPLE_MAX_DEFAULT (100)
/* Mapping between AP_CTRLs and Histograms */
#define PMU_AP_HISTOGRAM_IDX_GRAPHICS (PMU_AP_HISTOGRAM(1))

View File

@@ -36,16 +36,25 @@
#define PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE (0x00000007U)
#define PMU_PG_ELPG_ENGINE_MAX PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE
/* RPC function calls supported by PG unit */
#define NV_PMU_RPC_ID_PG_LOADING_PRE_INIT 0x00U
#define NV_PMU_RPC_ID_PG_LOADING_POST_INIT 0x01U
#define NV_PMU_RPC_ID_PG_LOADING_INIT 0x0AU
#define NV_PMU_RPC_ID_PG_LOADING_BUF_LOAD 0x0BU
/* RPC function calls supported by PG_LOADING unit */
#define NV_PMU_RPC_ID_PG_LOADING_PRE_INIT 0x00U
#define NV_PMU_RPC_ID_PG_LOADING_POST_INIT 0x01U
#define NV_PMU_RPC_ID_PG_LOADING_INIT 0x0AU
#define NV_PMU_RPC_ID_PG_LOADING_BUF_LOAD 0x0BU
#define NV_PMU_RPC_ID_PG_LOADING_AP_INIT 0x0FU
#define NV_PMU_RPC_ID_PG_LOADING_AP_CTRL_INIT_AND_ENABLE 0x10U
/* RPC calls serviced by PG unit */
#define NV_PMU_RPC_ID_PG_ALLOW 0x04U
#define NV_PMU_RPC_ID_PG_DISALLOW 0x05U
#define NV_PMU_RPC_ID_PG_THRESHOLD_UPDATE 0x06U
#define NV_PMU_RPC_ID_PG_PG_CTRL_STATS_GET 0x07U
#define NV_PMU_RPC_ID_PG_SFM_UPDATE 0x09U
#define NV_PMU_RPC_ID_PG_AP_CTRL_ENABLE 0x0EU
#define NV_PMU_RPC_ID_PG_AP_CTRL_DISABLE 0x0FU
#define NV_PMU_RPC_ID_PG_AP_CTRL_KICK 0x10U
/* PG unit RPC functions sent by PMU */
#define PMU_NV_RPC_ID_LPWR_PG_ASYNC_CMD_RESP 0x00U
@@ -76,7 +85,6 @@
enum {
PMU_PG_MSG_ASYNC_CMD_DISALLOW,
};
/* PG message */
enum {
PMU_PG_ELPG_MSG_INIT_ACK,

View File

@@ -668,7 +668,6 @@ static ssize_t aelpg_param_store(struct device *dev,
{
struct gk20a *g = get_gk20a(dev);
int status = 0;
union pmu_ap_cmd ap_cmd;
int *paramlist = NULL;
int ret = 0;
u32 defaultparam[5] = {
@@ -702,9 +701,11 @@ static ssize_t aelpg_param_store(struct device *dev,
*/
if (g->aelpg_enabled && nvgpu_pmu_get_fw_ready(g, g->pmu)) {
/* Disable AELPG */
ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
status = nvgpu_aelpg_disable(g, PMU_AP_CTRL_ID_GRAPHICS);
if (status != 0) {
nvgpu_err(g, "AELPG disable failed");
return count;
}
/* Enable AELPG */
nvgpu_aelpg_init(g);
@@ -739,7 +740,6 @@ static ssize_t aelpg_enable_store(struct device *dev,
struct gk20a *g = get_gk20a(dev);
unsigned long val = 0;
int status = 0;
union pmu_ap_cmd ap_cmd;
int err;
if (kstrtoul(buf, 10, &val) < 0)
@@ -759,15 +759,12 @@ static ssize_t aelpg_enable_store(struct device *dev,
if (val && !g->aelpg_enabled) {
g->aelpg_enabled = true;
/* Enable AELPG */
ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
status = nvgpu_aelpg_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
} else if (!val && g->aelpg_enabled) {
g->aelpg_enabled = false;
/* Disable AELPG */
ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
status = nvgpu_aelpg_disable(g, PMU_AP_CTRL_ID_GRAPHICS);
}
} else {
nvgpu_info(g, "PMU is not ready, AELPG request failed");