mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Support Perfmon events for nvgpu-next
Created Perfmon events handling for nvgpu-next. Nvgpu-next pmu send perfmon events in the form of rpc events. Events are: - Change event: This gives information of whether it is increase/decrease event. - Init event: This gives information of perfmon init done in PMU. NVGPU-5202 NVGPU-5205 NVGPU-5206 Signed-off-by: rmylavarapu <rmylavarapu@nvidia.com> Change-Id: Ida7e77dbaf70d2b594a0801c91a168dcb4a860bd Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2395358 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Alex Waterman
parent
9937a40b60
commit
641cc6a59c
@@ -190,7 +190,7 @@ static int pmu_handle_event(struct nvgpu_pmu *pmu, struct pmu_msg *msg)
|
||||
switch (msg->hdr.unit_id) {
|
||||
case PMU_UNIT_PERFMON:
|
||||
case PMU_UNIT_PERFMON_T18X:
|
||||
err = nvgpu_pmu_handle_perfmon_event(pmu, &msg->msg.perfmon);
|
||||
err = nvgpu_pmu_perfmon_event_handler(g, pmu, msg);
|
||||
break;
|
||||
case PMU_UNIT_PERF:
|
||||
if (g->ops.pmu_perf.handle_pmu_perf_event != NULL) {
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/pmu.h>
|
||||
#include <nvgpu/pmu/cmd.h>
|
||||
#include <nvgpu/pmu/msg.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/pmu/pmuif/nvgpu_cmdif.h>
|
||||
@@ -141,12 +142,13 @@ int nvgpu_pmu_initialize_perfmon(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
break;
|
||||
|
||||
case NVGPU_GPUID_GV11B:
|
||||
#if defined(CONFIG_NVGPU_NEXT)
|
||||
case NVGPU_NEXT_GPUID:
|
||||
#endif
|
||||
nvgpu_gv11b_perfmon_sw_init(g, *perfmon_ptr);
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_NVGPU_NEXT)
|
||||
case NVGPU_NEXT_GPUID:
|
||||
nvgpu_next_perfmon_sw_init(g, *perfmon_ptr);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
nvgpu_kfree(g, *perfmon_ptr);
|
||||
err = -EINVAL;
|
||||
@@ -457,24 +459,25 @@ void nvgpu_pmu_reset_load_counters(struct gk20a *g)
|
||||
gk20a_idle(g);
|
||||
}
|
||||
|
||||
int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu,
|
||||
struct pmu_perfmon_msg *msg)
|
||||
int nvgpu_pmu_handle_perfmon_event(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_msg *msg)
|
||||
{
|
||||
struct gk20a *g = pmu->g;
|
||||
|
||||
struct pmu_perfmon_msg *perfmon_msg = &msg->msg.perfmon;
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
switch (msg->msg_type) {
|
||||
switch (perfmon_msg->msg_type) {
|
||||
case PMU_PERFMON_MSG_ID_INCREASE_EVENT:
|
||||
nvgpu_pmu_dbg(g, "perfmon increase event: ");
|
||||
nvgpu_pmu_dbg(g, "state_id %d, ground_id %d, pct %d",
|
||||
msg->gen.state_id, msg->gen.group_id, msg->gen.data);
|
||||
perfmon_msg->gen.state_id, perfmon_msg->gen.group_id,
|
||||
perfmon_msg->gen.data);
|
||||
(pmu->pmu_perfmon->perfmon_events_cnt)++;
|
||||
break;
|
||||
case PMU_PERFMON_MSG_ID_DECREASE_EVENT:
|
||||
nvgpu_pmu_dbg(g, "perfmon decrease event: ");
|
||||
nvgpu_pmu_dbg(g, "state_id %d, ground_id %d, pct %d",
|
||||
msg->gen.state_id, msg->gen.group_id, msg->gen.data);
|
||||
perfmon_msg->gen.state_id, perfmon_msg->gen.group_id,
|
||||
perfmon_msg->gen.data);
|
||||
(pmu->pmu_perfmon->perfmon_events_cnt)++;
|
||||
break;
|
||||
case PMU_PERFMON_MSG_ID_INIT_EVENT:
|
||||
@@ -483,7 +486,45 @@ int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu,
|
||||
break;
|
||||
default:
|
||||
nvgpu_pmu_dbg(g, "Invalid msgtype:%u for %s",
|
||||
msg->msg_type, __func__);
|
||||
perfmon_msg->msg_type, __func__);
|
||||
break;
|
||||
}
|
||||
|
||||
/* restart sampling */
|
||||
if (pmu->pmu_perfmon->perfmon_sampling_enabled) {
|
||||
return nvgpu_pmu_perfmon_start_sample(g, pmu,
|
||||
pmu->pmu_perfmon);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_pmu_handle_perfmon_event_rpc(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_msg *msg)
|
||||
{
|
||||
struct pmu_nvgpu_rpc_perfmon_init *perfmon_rpc =
|
||||
&msg->event_rpc.perfmon_init;
|
||||
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
switch (perfmon_rpc->rpc_hdr.function) {
|
||||
case PMU_RPC_ID_PERFMON_CHANGE_EVENT:
|
||||
if (((struct pmu_nvgpu_rpc_perfmon_change *)
|
||||
(void *)perfmon_rpc)->b_increase) {
|
||||
nvgpu_pmu_dbg(g, "perfmon increase event");
|
||||
} else {
|
||||
nvgpu_pmu_dbg(g, "perfmon decrease event");
|
||||
}
|
||||
(pmu->pmu_perfmon->perfmon_events_cnt)++;
|
||||
break;
|
||||
case PMU_RPC_ID_PERFMON_INIT_EVENT:
|
||||
nvgpu_pmu_dbg(g, "perfmon init event");
|
||||
pmu->pmu_perfmon->perfmon_ready = true;
|
||||
break;
|
||||
default:
|
||||
nvgpu_pmu_dbg(g, "invalid perfmon event %d",
|
||||
perfmon_rpc->rpc_hdr.function);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -672,3 +713,9 @@ int nvgpu_pmu_perfmon_get_sample(struct gk20a *g,
|
||||
|
||||
return perfmon->get_samples_rpc(pmu);
|
||||
}
|
||||
|
||||
int nvgpu_pmu_perfmon_event_handler(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_msg *msg)
|
||||
{
|
||||
return pmu->pmu_perfmon->perfmon_event_handler(g, pmu, msg);
|
||||
}
|
||||
|
||||
@@ -36,5 +36,7 @@ void nvgpu_gm20b_perfmon_sw_init(struct gk20a *g,
|
||||
perfmon->stop_sampling =
|
||||
nvgpu_pmu_perfmon_stop_sampling;
|
||||
perfmon->get_samples_rpc = NULL;
|
||||
perfmon->perfmon_event_handler =
|
||||
nvgpu_pmu_handle_perfmon_event;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -36,5 +36,7 @@ void nvgpu_gv11b_perfmon_sw_init(struct gk20a *g,
|
||||
nvgpu_pmu_perfmon_stop_sampling_rpc;
|
||||
perfmon->get_samples_rpc =
|
||||
nvgpu_pmu_perfmon_get_samples_rpc;
|
||||
perfmon->perfmon_event_handler =
|
||||
nvgpu_pmu_handle_perfmon_event;
|
||||
}
|
||||
|
||||
|
||||
@@ -71,6 +71,8 @@ struct pmu_msg {
|
||||
} msg;
|
||||
union {
|
||||
struct pmu_nvgpu_rpc_struct_cmdmgmt_init cmdmgmt_init;
|
||||
struct pmu_nvgpu_rpc_perfmon_init perfmon_init;
|
||||
struct pmu_nvgpu_rpc_perfmon_change perfmon_change;
|
||||
} event_rpc;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/* |
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -30,9 +30,9 @@
|
||||
|
||||
struct gk20a;
|
||||
struct nvgpu_pmu;
|
||||
struct pmu_perfmon_msg;
|
||||
struct rpc_handler_payload;
|
||||
struct nv_pmu_rpc_header;
|
||||
struct pmu_msg;
|
||||
|
||||
/* pmu load const defines */
|
||||
#define PMU_BUSY_CYCLES_NORM_MAX (1000U)
|
||||
@@ -52,6 +52,8 @@ struct nvgpu_pmu_perfmon {
|
||||
int (*start_sampling)(struct nvgpu_pmu *pmu);
|
||||
int (*stop_sampling)(struct nvgpu_pmu *pmu);
|
||||
int (*get_samples_rpc)(struct nvgpu_pmu *pmu);
|
||||
int (*perfmon_event_handler)(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_msg *msg);
|
||||
};
|
||||
|
||||
/* perfmon */
|
||||
@@ -67,8 +69,10 @@ int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu,
|
||||
struct pmu_perfmon_msg *msg);
|
||||
int nvgpu_pmu_handle_perfmon_event(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_msg *msg);
|
||||
int nvgpu_pmu_handle_perfmon_event_rpc(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_msg *msg);
|
||||
int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu);
|
||||
int nvgpu_pmu_load_norm(struct gk20a *g, u32 *load);
|
||||
int nvgpu_pmu_load_update(struct gk20a *g);
|
||||
@@ -91,5 +95,7 @@ int nvgpu_pmu_perfmon_stop_sample(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon);
|
||||
int nvgpu_pmu_perfmon_get_sample(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct nvgpu_pmu_perfmon *perfmon);
|
||||
int nvgpu_pmu_perfmon_event_handler(struct gk20a *g,
|
||||
struct nvgpu_pmu *pmu, struct pmu_msg *msg);
|
||||
|
||||
#endif /* NVGPU_PMU_PERFMON_H */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -37,6 +37,12 @@
|
||||
|
||||
#define NV_PMU_PERFMON_MAX_COUNTERS 10U
|
||||
|
||||
/*!
|
||||
* RPC Event calls serviced by PERFMON unit.
|
||||
*/
|
||||
#define PMU_RPC_ID_PERFMON_CHANGE_EVENT 0x00U
|
||||
#define PMU_RPC_ID_PERFMON_INIT_EVENT 0x01U
|
||||
|
||||
enum pmu_perfmon_cmd_start_fields {
|
||||
COUNTER_ALLOC
|
||||
};
|
||||
@@ -168,6 +174,24 @@ struct pmu_perfmon_msg {
|
||||
};
|
||||
};
|
||||
|
||||
/*!
|
||||
* Defines the structure that holds data used to execute CHANGE_EVENT RPC.
|
||||
*/
|
||||
struct pmu_nvgpu_rpc_perfmon_change {
|
||||
struct pmu_nvgpu_rpc_header rpc_hdr;
|
||||
bool b_increase;
|
||||
u8 state_id;
|
||||
u8 groupId;
|
||||
u8 data;
|
||||
};
|
||||
|
||||
/*!
|
||||
* Defines the structure that holds data used to execute INIT_EVENT RPC.
|
||||
*/
|
||||
struct pmu_nvgpu_rpc_perfmon_init {
|
||||
struct pmu_nvgpu_rpc_header rpc_hdr;
|
||||
};
|
||||
|
||||
/* PFERMON RPC interface*/
|
||||
/*
|
||||
* RPC calls serviced by PERFMON unit.
|
||||
|
||||
Reference in New Issue
Block a user