mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 02:52:51 +03:00
gpu: nvgpu: add rpc handlers for therm, volt, perfmon and acr
RPC handlers for therm, volt, perfmon and acr were open coded in the pmu_rpc_handler. Instead, add implementations to respective units. To avoid the dereferncing of struct nvgpu_pmu to avoid the circular dependency we pass gk20a struct as input to nvgpu_pmu_rpc_execute and other pmu_ipc.c functions. JIRA NVGPU-1970 Change-Id: I6ea046960936923e69242bf90e8e25958cfba85e Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2079145 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-by: Ankur Kishore <ankkishore@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
9545644033
commit
b8b02885af
@@ -24,6 +24,7 @@
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/pmuif/gpmuif_cmn.h>
|
||||
|
||||
#include "acr_priv.h"
|
||||
#include "acr_sw_gm20b.h"
|
||||
@@ -154,5 +155,3 @@ int nvgpu_acr_init(struct gk20a *g, struct nvgpu_acr **acr)
|
||||
done:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -33,6 +33,8 @@
|
||||
#include <nvgpu/string.h>
|
||||
#include <nvgpu/pmu/seq.h>
|
||||
#include <nvgpu/pmu/queue.h>
|
||||
#include <nvgpu/pmu/volt.h>
|
||||
#include <nvgpu/pmu/therm.h>
|
||||
|
||||
static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
||||
struct pmu_payload *payload, u32 queue_id)
|
||||
@@ -643,7 +645,6 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
struct nvgpu_pmu *pmu = &g->pmu;
|
||||
struct rpc_handler_payload *rpc_payload =
|
||||
(struct rpc_handler_payload *)param;
|
||||
struct nv_pmu_rpc_struct_perfmon_query *rpc_param;
|
||||
|
||||
(void) memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header));
|
||||
nvgpu_memcpy((u8 *)&rpc, (u8 *)rpc_payload->rpc_buff,
|
||||
@@ -673,49 +674,10 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
break;
|
||||
case PMU_UNIT_PERFMON_T18X:
|
||||
case PMU_UNIT_PERFMON:
|
||||
switch (rpc.function) {
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_INIT:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_INIT");
|
||||
pmu->perfmon_ready = true;
|
||||
break;
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_START:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_START");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_STOP:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_STOP");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_QUERY:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_QUERY");
|
||||
rpc_param = (struct nv_pmu_rpc_struct_perfmon_query *)
|
||||
rpc_payload->rpc_buff;
|
||||
pmu->load = rpc_param->sample_buffer[0];
|
||||
pmu->perfmon_query = 1;
|
||||
/* set perfmon_query to 1 after load is copied */
|
||||
break;
|
||||
}
|
||||
nvgpu_pmu_perfmon_rpc_handler(g, pmu, &rpc, rpc_payload);
|
||||
break;
|
||||
case PMU_UNIT_VOLT:
|
||||
switch (rpc.function) {
|
||||
case NV_PMU_RPC_ID_VOLT_BOARD_OBJ_GRP_CMD:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_VOLT_BOARD_OBJ_GRP_CMD");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_VOLT_VOLT_SET_VOLTAGE:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_VOLT_VOLT_SET_VOLTAGE");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_VOLT_VOLT_RAIL_GET_VOLTAGE:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_VOLT_VOLT_RAIL_GET_VOLTAGE");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_VOLT_LOAD:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_VOLT_LOAD");
|
||||
}
|
||||
nvgpu_pmu_volt_rpc_handler(g, &rpc);
|
||||
break;
|
||||
case PMU_UNIT_CLK:
|
||||
nvgpu_pmu_dbg(g, "reply PMU_UNIT_CLK");
|
||||
@@ -724,17 +686,8 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
|
||||
nvgpu_pmu_dbg(g, "reply PMU_UNIT_PERF");
|
||||
break;
|
||||
case PMU_UNIT_THERM:
|
||||
switch (rpc.function) {
|
||||
case NV_PMU_RPC_ID_THERM_BOARD_OBJ_GRP_CMD:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_THERM_BOARD_OBJ_GRP_CMD");
|
||||
break;
|
||||
default:
|
||||
nvgpu_pmu_dbg(g, "reply PMU_UNIT_THERM");
|
||||
break;
|
||||
}
|
||||
break;
|
||||
/* TBD case will be added */
|
||||
nvgpu_pmu_therm_rpc_handler(g, &rpc);
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g, " Invalid RPC response, stats 0x%x",
|
||||
rpc.flcn_status);
|
||||
|
||||
@@ -53,6 +53,41 @@ static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
|
||||
return unit_id;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_perfmon_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct nv_pmu_rpc_header *rpc,
|
||||
struct rpc_handler_payload *rpc_payload)
|
||||
{
|
||||
struct nv_pmu_rpc_struct_perfmon_query *rpc_param;
|
||||
|
||||
switch (rpc->function) {
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_INIT:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_INIT");
|
||||
pmu->perfmon_ready = true;
|
||||
break;
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_START:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_START");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_STOP:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_STOP");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_PERFMON_T18X_QUERY:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_PERFMON_QUERY");
|
||||
rpc_param = (struct nv_pmu_rpc_struct_perfmon_query *)
|
||||
rpc_payload->rpc_buff;
|
||||
pmu->load = rpc_param->sample_buffer[0];
|
||||
pmu->perfmon_query = 1;
|
||||
/* set perfmon_query to 1 after load is copied */
|
||||
break;
|
||||
default:
|
||||
nvgpu_pmu_dbg(g, "invalid reply");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu)
|
||||
{
|
||||
struct gk20a *g = gk20a_from_pmu(pmu);
|
||||
|
||||
@@ -256,3 +256,16 @@ int nvgpu_therm_configure_therm_alert(struct gk20a *g)
|
||||
exit:
|
||||
return status;
|
||||
}
|
||||
|
||||
void nvgpu_pmu_therm_rpc_handler(struct gk20a *g, struct nv_pmu_rpc_header *rpc)
|
||||
{
|
||||
switch (rpc->function) {
|
||||
case NV_PMU_RPC_ID_THERM_BOARD_OBJ_GRP_CMD:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_THERM_BOARD_OBJ_GRP_CMD");
|
||||
break;
|
||||
default:
|
||||
nvgpu_pmu_dbg(g, "reply PMU_UNIT_THERM");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,3 +137,28 @@ int nvgpu_volt_get_voltage(struct gk20a *g, u32 volt_domain, u32 *voltage_uv)
|
||||
return volt_rail_get_voltage(g,
|
||||
(u8)volt_domain, voltage_uv);
|
||||
}
|
||||
|
||||
void nvgpu_pmu_volt_rpc_handler(struct gk20a *g, struct nv_pmu_rpc_header *rpc)
|
||||
{
|
||||
switch (rpc->function) {
|
||||
case NV_PMU_RPC_ID_VOLT_BOARD_OBJ_GRP_CMD:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_VOLT_BOARD_OBJ_GRP_CMD");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_VOLT_VOLT_SET_VOLTAGE:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_VOLT_VOLT_SET_VOLTAGE");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_VOLT_VOLT_RAIL_GET_VOLTAGE:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_VOLT_VOLT_RAIL_GET_VOLTAGE");
|
||||
break;
|
||||
case NV_PMU_RPC_ID_VOLT_LOAD:
|
||||
nvgpu_pmu_dbg(g,
|
||||
"reply NV_PMU_RPC_ID_VOLT_LOAD");
|
||||
break;
|
||||
default:
|
||||
nvgpu_pmu_dbg(g, "invalid reply");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ struct gk20a;
|
||||
struct nvgpu_falcon;
|
||||
struct nvgpu_firmware;
|
||||
struct nvgpu_acr;
|
||||
struct nv_pmu_rpc_header;
|
||||
|
||||
int nvgpu_acr_init(struct gk20a *g, struct nvgpu_acr **acr);
|
||||
int nvgpu_acr_alloc_blob_prerequisite(struct gk20a *g, struct nvgpu_acr *acr,
|
||||
|
||||
@@ -341,6 +341,9 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
|
||||
u32 queue_id, pmu_callback callback, void *cb_param);
|
||||
|
||||
/* perfmon */
|
||||
void nvgpu_pmu_perfmon_rpc_handler(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct nv_pmu_rpc_header *rpc,
|
||||
struct rpc_handler_payload *rpc_payload);
|
||||
int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu);
|
||||
|
||||
@@ -31,5 +31,7 @@ int nvgpu_therm_domain_pmu_setup(struct gk20a *g);
|
||||
int nvgpu_therm_pmu_init_pmupstate(struct gk20a *g);
|
||||
void nvgpu_therm_pmu_free_pmupstate(struct gk20a *g);
|
||||
int nvgpu_therm_configure_therm_alert(struct gk20a *g);
|
||||
void nvgpu_pmu_therm_rpc_handler(struct gk20a *g,
|
||||
struct nv_pmu_rpc_header *rpc);
|
||||
|
||||
#endif /* NVGPU_PMU_THREM_H */
|
||||
|
||||
@@ -108,5 +108,6 @@ int nvgpu_volt_rail_volt_dev_register(struct gk20a *g, struct voltage_rail
|
||||
*pvolt_rail, u8 volt_dev_idx, u8 operation_type);
|
||||
u8 nvgpu_volt_rail_vbios_volt_domain_convert_to_internal
|
||||
(struct gk20a *g, u8 vbios_volt_domain);
|
||||
void nvgpu_pmu_volt_rpc_handler(struct gk20a *g, struct nv_pmu_rpc_header *rpc);
|
||||
|
||||
#endif /* NVGPU_PMU_VOLT_H */
|
||||
|
||||
Reference in New Issue
Block a user