nvgpu: nvgpu: gv11b: Add/Update PMU cmds for ELPG.

This patch:
- Adds a PMU command needed for enabling ELPG.
  i.e. command to update sub-feature mask to enable ELPG.
- Adds a new version of PG-GR init param command function
  which uses updated command interface.

JIRA GPUT19X-20.

Change-Id: If969c018e2e28264fdc9c897892eb28b021d12f2
Signed-off-by: Deepak Goyal <dgoyal@nvidia.com>
Reviewed-on: http://git-master/r/1504873
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
This commit is contained in:
Vijayakumar
2017-05-26 11:43:36 +05:30
committed by mobile promotions
parent e50d046ab4
commit 3afd4af3a7

View File

@@ -27,6 +27,9 @@
#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
#define gv11b_dbg_pmu(fmt, arg...) \
gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
#define ALIGN_4KB 12
static bool gv11b_is_pmu_supported(struct gk20a *g)
@@ -147,6 +150,90 @@ static int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
return 0;
}
static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{
gk20a_dbg_fn("");
if (status != 0) {
nvgpu_err(g, "Sub-feature mask update cmd aborted\n");
return;
}
gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n",
msg->msg.pg.msg_type);
}
static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
void *param, u32 handle, u32 status)
{
gk20a_dbg_fn("");
if (status != 0) {
nvgpu_err(g, "GR PARAM cmd aborted\n");
return;
}
gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n",
msg->msg.pg.msg_type);
}
static int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_pg_cmd_gr_init_param_v1);
cmd.cmd.pg.gr_init_param_v1.cmd_type =
PMU_PG_CMD_ID_PG_PARAM;
cmd.cmd.pg.gr_init_param_v1.sub_cmd_id =
PMU_PG_PARAM_CMD_GR_INIT_PARAM;
cmd.cmd.pg.gr_init_param_v1.featuremask =
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n");
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_pg_param_msg, pmu, &seq, ~0);
} else
return -EINVAL;
return 0;
}
static int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
{
struct nvgpu_pmu *pmu = &g->pmu;
struct pmu_cmd cmd;
u32 seq;
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
cmd.hdr.size = PMU_CMD_HDR_SIZE +
sizeof(struct pmu_pg_cmd_sub_feature_mask_update);
cmd.cmd.pg.sf_mask_update.cmd_type =
PMU_PG_CMD_ID_PG_PARAM;
cmd.cmd.pg.sf_mask_update.sub_cmd_id =
PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE;
cmd.cmd.pg.sf_mask_update.ctrl_id =
PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
cmd.cmd.pg.sf_mask_update.enabled_mask =
PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n");
gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0);
} else
return -EINVAL;
return 0;
}
void gv11b_init_pmu_ops(struct gpu_ops *gops)
{
@@ -165,4 +252,6 @@ void gv11b_init_pmu_ops(struct gpu_ops *gops)
gops->pmu.pmu_msgq_tail = gk20a_pmu_msgq_tail;
gops->pmu.pmu_mutex_size = pwr_pmu_mutex__size_1_v;
gops->pmu.pmu_elpg_statistics = gp106_pmu_elpg_statistics;
gops->pmu.pmu_pg_init_param = gv11b_pg_gr_init;
gops->pmu.pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask;
}