diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c index 8636ed9a1..6522afddd 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c @@ -190,7 +190,7 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg) switch (msg->hdr.unit_id) { case PMU_UNIT_PERFMON: case PMU_UNIT_PERFMON_T18X: - err = nvgpu_pmu_handle_perfmon_event(pmu, &msg->msg.perfmon); + err = nvgpu_pmu_perfmon_event_handler(g, pmu, msg); break; case PMU_UNIT_PERF: if (g->ops.pmu_perf.handle_pmu_perf_event != NULL) { diff --git a/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon.c b/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon.c index b98dafce0..d976eca68 100644 --- a/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon.c +++ b/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -141,12 +142,13 @@ int nvgpu_pmu_initialize_perfmon(struct gk20a *g, struct nvgpu_pmu *pmu, break; case NVGPU_GPUID_GV11B: -#if defined(CONFIG_NVGPU_NEXT) - case NVGPU_NEXT_GPUID: -#endif nvgpu_gv11b_perfmon_sw_init(g, *perfmon_ptr); break; - +#if defined(CONFIG_NVGPU_NEXT) + case NVGPU_NEXT_GPUID: + nvgpu_next_perfmon_sw_init(g, *perfmon_ptr); + break; +#endif default: nvgpu_kfree(g, *perfmon_ptr); err = -EINVAL; @@ -457,24 +459,25 @@ void nvgpu_pmu_reset_load_counters(struct gk20a *g) gk20a_idle(g); } -int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, - struct pmu_perfmon_msg *msg) +int nvgpu_pmu_handle_perfmon_event(struct gk20a *g, + struct nvgpu_pmu *pmu, struct pmu_msg *msg) { - struct gk20a *g = pmu->g; - + struct pmu_perfmon_msg *perfmon_msg = &msg->msg.perfmon; nvgpu_log_fn(g, " "); - switch (msg->msg_type) { + switch (perfmon_msg->msg_type) { case PMU_PERFMON_MSG_ID_INCREASE_EVENT: nvgpu_pmu_dbg(g, "perfmon increase event: "); nvgpu_pmu_dbg(g, "state_id %d, ground_id %d, pct %d", - msg->gen.state_id, msg->gen.group_id, msg->gen.data); + perfmon_msg->gen.state_id, perfmon_msg->gen.group_id, + perfmon_msg->gen.data); (pmu->pmu_perfmon->perfmon_events_cnt)++; break; case PMU_PERFMON_MSG_ID_DECREASE_EVENT: nvgpu_pmu_dbg(g, "perfmon decrease event: "); nvgpu_pmu_dbg(g, "state_id %d, ground_id %d, pct %d", - msg->gen.state_id, msg->gen.group_id, msg->gen.data); + perfmon_msg->gen.state_id, perfmon_msg->gen.group_id, + perfmon_msg->gen.data); (pmu->pmu_perfmon->perfmon_events_cnt)++; break; case PMU_PERFMON_MSG_ID_INIT_EVENT: @@ -483,7 +486,45 @@ int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, break; default: nvgpu_pmu_dbg(g, "Invalid msgtype:%u for %s", - msg->msg_type, __func__); + perfmon_msg->msg_type, __func__); + break; + } + + /* restart sampling */ + if (pmu->pmu_perfmon->perfmon_sampling_enabled) { + return nvgpu_pmu_perfmon_start_sample(g, pmu, + pmu->pmu_perfmon); + } + + return 0; +} + +int nvgpu_pmu_handle_perfmon_event_rpc(struct gk20a *g, + struct nvgpu_pmu *pmu, struct pmu_msg *msg) +{ + struct pmu_nvgpu_rpc_perfmon_init *perfmon_rpc = + &msg->event_rpc.perfmon_init; + + + nvgpu_log_fn(g, " "); + + switch (perfmon_rpc->rpc_hdr.function) { + case PMU_RPC_ID_PERFMON_CHANGE_EVENT: + if (((struct pmu_nvgpu_rpc_perfmon_change *) + (void *)perfmon_rpc)->b_increase) { + nvgpu_pmu_dbg(g, "perfmon increase event"); + } else { + nvgpu_pmu_dbg(g, "perfmon decrease event"); + } + (pmu->pmu_perfmon->perfmon_events_cnt)++; + break; + case PMU_RPC_ID_PERFMON_INIT_EVENT: + nvgpu_pmu_dbg(g, "perfmon init event"); + pmu->pmu_perfmon->perfmon_ready = true; + break; + default: + nvgpu_pmu_dbg(g, "invalid perfmon event %d", + perfmon_rpc->rpc_hdr.function); break; } @@ -672,3 +713,9 @@ int nvgpu_pmu_perfmon_get_sample(struct gk20a *g, return perfmon->get_samples_rpc(pmu); } + +int nvgpu_pmu_perfmon_event_handler(struct gk20a *g, + struct nvgpu_pmu *pmu, struct pmu_msg *msg) +{ + return pmu->pmu_perfmon->perfmon_event_handler(g, pmu, msg); +} diff --git a/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon_sw_gm20b.c b/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon_sw_gm20b.c index 11caa163d..2a15d2ad2 100644 --- a/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon_sw_gm20b.c +++ b/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon_sw_gm20b.c @@ -36,5 +36,7 @@ void nvgpu_gm20b_perfmon_sw_init(struct gk20a *g, perfmon->stop_sampling = nvgpu_pmu_perfmon_stop_sampling; perfmon->get_samples_rpc = NULL; + perfmon->perfmon_event_handler = + nvgpu_pmu_handle_perfmon_event; } diff --git a/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon_sw_gv11b.c b/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon_sw_gv11b.c index 78e89529b..cdf4f9263 100644 --- a/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon_sw_gv11b.c +++ b/drivers/gpu/nvgpu/common/pmu/perfmon/pmu_perfmon_sw_gv11b.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,5 +36,7 @@ void nvgpu_gv11b_perfmon_sw_init(struct gk20a *g, nvgpu_pmu_perfmon_stop_sampling_rpc; perfmon->get_samples_rpc = nvgpu_pmu_perfmon_get_samples_rpc; + perfmon->perfmon_event_handler = + nvgpu_pmu_handle_perfmon_event; } diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/msg.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/msg.h index 660c03fe1..46e813ef3 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/msg.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/msg.h @@ -71,6 +71,8 @@ struct pmu_msg { } msg; union { struct pmu_nvgpu_rpc_struct_cmdmgmt_init cmdmgmt_init; + struct pmu_nvgpu_rpc_perfmon_init perfmon_init; + struct pmu_nvgpu_rpc_perfmon_change perfmon_change; } event_rpc; }; }; diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/pmu_perfmon.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/pmu_perfmon.h index f9f238b2d..5da205a4c 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/pmu_perfmon.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/pmu_perfmon.h @@ -1,5 +1,5 @@ /* | - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -30,9 +30,9 @@ struct gk20a; struct nvgpu_pmu; -struct pmu_perfmon_msg; struct rpc_handler_payload; struct nv_pmu_rpc_header; +struct pmu_msg; /* pmu load const defines */ #define PMU_BUSY_CYCLES_NORM_MAX (1000U) @@ -52,6 +52,8 @@ struct nvgpu_pmu_perfmon { int (*start_sampling)(struct nvgpu_pmu *pmu); int (*stop_sampling)(struct nvgpu_pmu *pmu); int (*get_samples_rpc)(struct nvgpu_pmu *pmu); + int (*perfmon_event_handler)(struct gk20a *g, + struct nvgpu_pmu *pmu, struct pmu_msg *msg); }; /* perfmon */ @@ -67,8 +69,10 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu); int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu); int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu); int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu); -int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu, - struct pmu_perfmon_msg *msg); +int nvgpu_pmu_handle_perfmon_event(struct gk20a *g, + struct nvgpu_pmu *pmu, struct pmu_msg *msg); +int nvgpu_pmu_handle_perfmon_event_rpc(struct gk20a *g, + struct nvgpu_pmu *pmu, struct pmu_msg *msg); int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu); int nvgpu_pmu_load_norm(struct gk20a *g, u32 *load); int nvgpu_pmu_load_update(struct gk20a *g); @@ -91,5 +95,7 @@ int nvgpu_pmu_perfmon_stop_sample(struct gk20a *g, struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon); int nvgpu_pmu_perfmon_get_sample(struct gk20a *g, struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon); +int nvgpu_pmu_perfmon_event_handler(struct gk20a *g, + struct nvgpu_pmu *pmu, struct pmu_msg *msg); #endif /* NVGPU_PMU_PERFMON_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/pmuif/perfmon.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/pmuif/perfmon.h index c7f1a8eb2..18a57447d 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/pmuif/perfmon.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/pmuif/perfmon.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -37,6 +37,12 @@ #define NV_PMU_PERFMON_MAX_COUNTERS 10U +/*! +* RPC Event calls serviced by PERFMON unit. +*/ +#define PMU_RPC_ID_PERFMON_CHANGE_EVENT 0x00U +#define PMU_RPC_ID_PERFMON_INIT_EVENT 0x01U + enum pmu_perfmon_cmd_start_fields { COUNTER_ALLOC }; @@ -168,6 +174,24 @@ struct pmu_perfmon_msg { }; }; +/*! + * Defines the structure that holds data used to execute CHANGE_EVENT RPC. + */ +struct pmu_nvgpu_rpc_perfmon_change { + struct pmu_nvgpu_rpc_header rpc_hdr; + bool b_increase; + u8 state_id; + u8 groupId; + u8 data; +}; + +/*! + * Defines the structure that holds data used to execute INIT_EVENT RPC. + */ +struct pmu_nvgpu_rpc_perfmon_init { + struct pmu_nvgpu_rpc_header rpc_hdr; +}; + /* PFERMON RPC interface*/ /* * RPC calls serviced by PERFMON unit.