gpu: nvgpu: reorder elpg and elpg_ms flags check

- In rmmod path, gr struct is freed first and then
  info_mem_destroy is called which calls elpg_ms
  protected call. This in turn waits for gr init
  where gr struct is accessed. This could lead to
  NULL access.
- move elpg and elpg_ms flag check before checking
  pg initialized and gr wait conditions to avoid
  the NULL access issue.

Bug 3848290

Change-Id: I088d89d7876405cc7abedb777884b442726e992f
Signed-off-by: Divya <dsinghatwari@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2811131
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Divya
2022-11-17 12:53:03 +00:00
committed by mobile promotions
parent 54c2b7a546
commit b4b98aba02

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -129,13 +129,19 @@ int nvgpu_pg_elpg_ms_enable(struct gk20a *g)
return 0; return 0;
} }
if (g->pmu->pg->initialized) { /*
* If elpg and elpg_ms flags are set to true
* and pg is initialized then only we wait
* for gr init. In rmmod path, the gr struct
* could be freed earlier. In order to avoid
* NULL access for gr, we check for these
* conditions then proceed further.
*/
if ((g->elpg_enabled) && (g->elpg_ms_enabled) &&
(g->pmu->pg->initialized)) {
g->ops.gr.init.wait_initialized(g); g->ops.gr.init.wait_initialized(g);
nvgpu_mutex_acquire(&g->cg_pg_lock); nvgpu_mutex_acquire(&g->cg_pg_lock);
if ((g->elpg_enabled) && (g->elpg_ms_enabled)) {
err = nvgpu_pmu_enable_elpg_ms(g); err = nvgpu_pmu_enable_elpg_ms(g);
}
nvgpu_mutex_release(&g->cg_pg_lock); nvgpu_mutex_release(&g->cg_pg_lock);
} }
#endif #endif
@@ -152,15 +158,18 @@ int nvgpu_pg_elpg_ms_disable(struct gk20a *g)
return 0; return 0;
} }
/*
* If elpg and elpg_ms flags are set to true
* then only we check further conditions.
*/
if ((g->elpg_enabled) && (g->elpg_ms_enabled)) {
if (g->pmu->pg->initialized) { if (g->pmu->pg->initialized) {
g->ops.gr.init.wait_initialized(g); g->ops.gr.init.wait_initialized(g);
nvgpu_mutex_acquire(&g->cg_pg_lock); nvgpu_mutex_acquire(&g->cg_pg_lock);
if ((g->elpg_enabled) && (g->elpg_ms_enabled)) {
err = nvgpu_pmu_disable_elpg_ms(g); err = nvgpu_pmu_disable_elpg_ms(g);
}
nvgpu_mutex_release(&g->cg_pg_lock); nvgpu_mutex_release(&g->cg_pg_lock);
} }
}
#endif #endif
return err; return err;
} }