gpu: nvgpu: perform support_ls_pmu check

Perform support_ls_pmu check before dereferencing pmu from gpu struct g
to avoid the possibility of kernel panic when LS PMU support is not
enabled.

JIRA NVGPU-9283

Change-Id: I65caac449f884164d797dedc2041d6ee4292e326
Signed-off-by: Rajesh Devaraj <rdevaraj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2868250
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Ramalingam C <ramalingamc@nvidia.com>
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Rajesh Devaraj
2023-03-09 07:31:29 +00:00
committed by mobile promotions
parent 4ec683975a
commit 4f96f59c15

View File

@@ -173,9 +173,15 @@ void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
/* PG enable/disable */
int nvgpu_pmu_pg_global_enable(struct gk20a *g, bool enable_pg)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
int status = 0;
if (!g->support_ls_pmu) {
return 0;
}
pmu = g->pmu;
if (!is_pg_supported(g, pmu->pg)) {
return status;
}
@@ -244,14 +250,19 @@ static int pmu_enable_elpg_locked(struct gk20a *g, u8 pg_engine_id)
int nvgpu_pmu_enable_elpg(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
u8 pg_engine_id;
u32 pg_engine_id_list = 0;
int ret = 0;
if (!g->support_ls_pmu) {
return 0;
}
nvgpu_log_fn(g, " ");
pmu = g->pmu;
if (!is_pg_supported(g, g->pmu->pg)) {
return ret;
}
@@ -335,12 +346,18 @@ static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
int nvgpu_pmu_disable_elpg(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
int ret = 0;
u8 pg_engine_id;
u32 pg_engine_id_list = 0;
u32 *ptr = NULL;
if (!g->support_ls_pmu) {
return 0;
}
pmu = g->pmu;
nvgpu_log_fn(g, " ");
if (!is_pg_supported(g, pmu->pg)) {
@@ -475,9 +492,15 @@ exit_unlock:
int nvgpu_pmu_reenable_elpg(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
int ret = 0;
if (!g->support_ls_pmu) {
return 0;
}
pmu = g->pmu;
nvgpu_log_fn(g, " ");
if (!is_pg_supported(g, pmu->pg)) {
@@ -505,10 +528,16 @@ exit:
int nvgpu_pmu_disable_elpg_ms(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
int ret = 0;
u32 *ptr = NULL;
if (!g->support_ls_pmu) {
return 0;
}
pmu = g->pmu;
nvgpu_log_fn(g, " ");
if (!is_pg_supported(g, pmu->pg)) {
@@ -588,9 +617,15 @@ exit_unlock:
int nvgpu_pmu_enable_elpg_ms(struct gk20a *g)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
int status = 0;
if (!g->support_ls_pmu) {
return 0;
}
pmu = g->pmu;
nvgpu_log_fn(g, " ");
if (!is_pg_supported(g, g->pmu->pg)) {
@@ -791,10 +826,16 @@ static int pmu_pg_setup_hw_load_zbc(struct gk20a *g, struct nvgpu_pmu *pmu,
int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
u32 pg_engine_id_list = 0;
int err = 0;
if (!g->support_ls_pmu) {
return 0;
}
pmu = g->pmu;
if (!is_pg_supported(g, pmu->pg) || !pmu->pg->initialized) {
pg_stat_data->ingating_time = 0;
pg_stat_data->ungating_time = 0;
@@ -1138,7 +1179,13 @@ void nvgpu_pmu_pg_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, u8 state)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
if (!g->support_ls_pmu) {
return;
}
pmu = g->pmu;
if (!is_pg_supported(g, pmu->pg)) {
return;
@@ -1150,7 +1197,13 @@ void nvgpu_pmu_set_golden_image_initialized(struct gk20a *g, u8 state)
int nvgpu_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
struct pmu_pg_stats_data *pg_stat_data)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
if (!g->support_ls_pmu) {
return 0;
}
pmu = g->pmu;
if (!is_pg_supported(g, pmu->pg)) {
return 0;
@@ -1161,7 +1214,13 @@ int nvgpu_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
void nvgpu_pmu_save_zbc(struct gk20a *g, u32 entries)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
if (!g->support_ls_pmu) {
return;
}
pmu = g->pmu;
if (!is_pg_supported(g, pmu->pg)) {
return;
@@ -1172,7 +1231,13 @@ void nvgpu_pmu_save_zbc(struct gk20a *g, u32 entries)
bool nvgpu_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id)
{
struct nvgpu_pmu *pmu = g->pmu;
struct nvgpu_pmu *pmu;
if (!g->support_ls_pmu) {
return false;
}
pmu = g->pmu;
if (!is_pg_supported(g, pmu->pg)) {
return false;