gpu: nvgpu: Support ELPG feature on nvgpu-next

Changes:
 -Implemented pg init_send ops for legacy chips.
 -Implemented RPC response handler.
 -Added pg rpc function call macros for nvgpu-next.

NVGPU-5192
NVGPU-5195
NVGPU-5196

Signed-off-by: rmylavarapu <rmylavarapu@nvidia.com>
Change-Id: I4e99d3929d7db796434aaeaa6f5773e9aac9fd32
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2391029
Reviewed-by: automaticguardword <automaticguardword@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
rmylavarapu
2020-07-30 14:13:30 +05:30
committed by Alex Waterman
parent 2dfa05ba50
commit d0c01fc14c
9 changed files with 109 additions and 81 deletions

View File

@@ -30,6 +30,7 @@
#include <nvgpu/pmu/lsfm.h>
#include <nvgpu/pmu/super_surface.h>
#include <nvgpu/pmu/pmu_perfmon.h>
#include <nvgpu/pmu/pmu_pg.h>
#include <nvgpu/pmu/fw.h>
#include <nvgpu/pmu/seq.h>
@@ -578,6 +579,12 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
pmu->therm_rpc_handler(g, pmu, &rpc);
}
break;
case PMU_UNIT_PG_LOADING:
case PMU_UNIT_PG:
if (pmu->pg->rpc_handler != NULL) {
pmu->pg->rpc_handler(g, pmu, &rpc);
}
break;
default:
nvgpu_err(g, " Invalid RPC response, stats 0x%x",
rpc.flcn_status);

View File

@@ -267,6 +267,78 @@ int gm20b_pmu_pg_elpg_hw_load_zbc(struct gk20a *g, struct nvgpu_pmu *pmu)
pmu_handle_pg_buf_config_msg, pmu);
}
int gm20b_pmu_pg_init_send(struct gk20a *g, struct nvgpu_pmu *pmu,
u8 pg_engine_id)
{
int err = 0;
nvgpu_log_fn(g, " ");
g->ops.pmu.pmu_pg_idle_counter_config(g, pg_engine_id);
if (pmu->pg->init_param != NULL) {
err = pmu->pg->init_param(g, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "init_param failed err=%d", err);
return err;
}
}
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
if (pmu->pg->init == NULL) {
nvgpu_err(g, "PG init function not assigned");
return -EINVAL;
}
err = pmu->pg->init(g, pmu, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n");
return err;
}
/* alloc dmem for powergating state log */
nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
if (pmu->pg->alloc_dmem == NULL) {
nvgpu_err(g, "PG alloc dmem function not assigned");
return -EINVAL;
}
err = pmu->pg->alloc_dmem(g, pmu, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n");
return err;
}
/* disallow ELPG initially
* PMU ucode requires a disallow cmd before allow cmd
* set for wait_event PMU_ELPG_STAT_OFF */
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF;
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
pmu->pg->mscg_transition_state = PMU_ELPG_STAT_OFF;
}
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
if (pmu->pg->disallow == NULL) {
nvgpu_err(g, "PG disallow function not assigned");
return -EINVAL;
}
err = pmu->pg->disallow(g, pmu, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n");
return err;
}
if (pmu->pg->set_sub_feature_mask != NULL) {
err = pmu->pg->set_sub_feature_mask(g, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "set_sub_feature_mask failed err=%d",
err);
return err;
}
}
return err;
}
void nvgpu_gm20b_pg_sw_init(struct gk20a *g,
struct nvgpu_pmu_pg *pg)
{
@@ -285,4 +357,6 @@ void nvgpu_gm20b_pg_sw_init(struct gk20a *g,
pg->alloc_dmem = gm20b_pmu_pg_elpg_alloc_dmem;
pg->load_buff = gm20b_pmu_pg_elpg_load_buff;
pg->hw_load_zbc = gm20b_pmu_pg_elpg_hw_load_zbc;
pg->rpc_handler = NULL;
pg->init_send = gm20b_pmu_pg_init_send;
}

View File

@@ -47,5 +47,7 @@ int gm20b_pmu_pg_elpg_alloc_dmem(struct gk20a *g, struct nvgpu_pmu *pmu,
u8 pg_engine_id);
int gm20b_pmu_pg_elpg_load_buff(struct gk20a *g, struct nvgpu_pmu *pmu);
int gm20b_pmu_pg_elpg_hw_load_zbc(struct gk20a *g, struct nvgpu_pmu *pmu);
int gm20b_pmu_pg_init_send(struct gk20a *g, struct nvgpu_pmu *pmu,
u8 pg_engine_id);
#endif /* NVGPU_PMU_PG_SW_GM20B_H */

View File

@@ -117,5 +117,6 @@ void nvgpu_gp10b_pg_sw_init(struct gk20a *g,
pg->alloc_dmem = gm20b_pmu_pg_elpg_alloc_dmem;
pg->load_buff = gm20b_pmu_pg_elpg_load_buff;
pg->hw_load_zbc = gm20b_pmu_pg_elpg_hw_load_zbc;
pg->rpc_handler = NULL;
pg->init_send = gm20b_pmu_pg_init_send;
}

View File

@@ -144,5 +144,6 @@ void nvgpu_gv11b_pg_sw_init(struct gk20a *g,
pg->alloc_dmem = gm20b_pmu_pg_elpg_alloc_dmem;
pg->load_buff = gm20b_pmu_pg_elpg_load_buff;
pg->hw_load_zbc = gm20b_pmu_pg_elpg_hw_load_zbc;
pg->rpc_handler = NULL;
pg->init_send = gm20b_pmu_pg_init_send;
}

View File

@@ -491,79 +491,6 @@ void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
}
}
static int pmu_pg_init_send(struct gk20a *g, u8 pg_engine_id)
{
struct nvgpu_pmu *pmu = g->pmu;
int err = 0;
nvgpu_log_fn(g, " ");
g->ops.pmu.pmu_pg_idle_counter_config(g, pg_engine_id);
if (pmu->pg->init_param != NULL) {
err = pmu->pg->init_param(g, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "init_param failed err=%d", err);
return err;
}
}
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
if (pmu->pg->init == NULL) {
nvgpu_err(g, "PG init function not assigned");
return -EINVAL;
}
err = pmu->pg->init(g, pmu, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n");
return err;
}
/* alloc dmem for powergating state log */
nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
if (pmu->pg->alloc_dmem == NULL) {
nvgpu_err(g, "PG alloc dmem function not assigned");
return -EINVAL;
}
err = pmu->pg->alloc_dmem(g, pmu, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n");
return err;
}
/* disallow ELPG initially
* PMU ucode requires a disallow cmd before allow cmd
* set for wait_event PMU_ELPG_STAT_OFF */
if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
pmu->pg->elpg_stat = PMU_ELPG_STAT_OFF;
} else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
pmu->pg->mscg_transition_state = PMU_ELPG_STAT_OFF;
}
nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
if (pmu->pg->disallow == NULL) {
nvgpu_err(g, "PG disallow function not assigned");
return -EINVAL;
}
err = pmu->pg->disallow(g, pmu, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n");
return err;
}
if (pmu->pg->set_sub_feature_mask != NULL) {
err = pmu->pg->set_sub_feature_mask(g, pg_engine_id);
if (err != 0) {
nvgpu_err(g, "set_sub_feature_mask failed err=%d",
err);
return err;
}
}
return err;
}
static int pmu_pg_init_powergating(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{
@@ -590,8 +517,8 @@ static int pmu_pg_init_powergating(struct gk20a *g, struct nvgpu_pmu *pmu,
nvgpu_pmu_fw_state_change(g, pmu,
PMU_FW_STATE_ELPG_BOOTING, false);
}
/* Error print handled by pmu_pg_init_send */
err = pmu_pg_init_send(g, pg_engine_id);
/* Error print handled by pmu->pg->init_send */
err = pmu->pg->init_send(g, pmu, pg_engine_id);
if (err != 0) {
return err;
}
@@ -950,12 +877,15 @@ int nvgpu_pmu_pg_init(struct gk20a *g, struct nvgpu_pmu *pmu,
break;
case NVGPU_GPUID_GV11B:
#if defined(CONFIG_NVGPU_NEXT)
case NVGPU_NEXT_GPUID:
#endif
nvgpu_gv11b_pg_sw_init(g, *pg_p);
break;
#if defined(CONFIG_NVGPU_NEXT) && defined(CONFIG_NVGPU_NON_FUSA)
case NVGPU_NEXT_GPUID:
nvgpu_next_pg_sw_init(g, *pg_p);
break;
#endif
default:
nvgpu_kfree(g, *pg_p);
err = -EINVAL;

View File

@@ -112,6 +112,9 @@ struct nvgpu_pmu_pg {
u8 pg_engine_id);
int (*load_buff)(struct gk20a *g, struct nvgpu_pmu *pmu);
int (*hw_load_zbc)(struct gk20a *g, struct nvgpu_pmu *pmu);
void (*rpc_handler)(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nv_pmu_rpc_header *rpc);
int (*init_send)(struct gk20a *g, struct nvgpu_pmu *pmu, u8 pg_engine_id);
};
/*PG defines used by nvpgu-pmu*/

View File

@@ -30,6 +30,7 @@
#define PMU_UNIT_REWIND U8(0x00)
#define PMU_UNIT_CMDMGMT U8(0x01)
#define PMU_UNIT_PG U8(0x03)
#define PMU_UNIT_PG_LOADING U8(0x06)
#define PMU_UNIT_INIT U8(0x07)
#define PMU_UNIT_ACR U8(0x0A)
#define PMU_UNIT_PERFMON_T18X U8(0x11)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,6 +35,15 @@
#define PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE (0x00000005U)
#define PMU_PG_ELPG_ENGINE_MAX PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE
/* RPC function calls supported by PG unit */
#define NV_PMU_RPC_ID_PG_LOADING_PRE_INIT 0x00U
#define NV_PMU_RPC_ID_PG_LOADING_INIT 0x0AU
#define NV_PMU_RPC_ID_PG_LOADING_BUF_LOAD 0x0BU
#define NV_PMU_RPC_ID_PG_ALLOW 0x04U
#define NV_PMU_RPC_ID_PG_DISALLOW 0x05U
#define NV_PMU_RPC_ID_PG_THRESHOLD_UPDATE 0x06U
#define NV_PMU_RPC_ID_PG_SFM_UPDATE 0x08U
/* PG message */
enum {
PMU_PG_ELPG_MSG_INIT_ACK,