mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: cleanup the seq for railgate seq
- Perfmon cmds are non-blocking calls and response may/may-not come during railgate sequence for the perfmon command sent as part of nvgpu_pmu_destroy call. - if response is missed then payload allocated will not be freed and allocation info will be present as part seq data structure. - This will be carried forward for multiple railgate/ rail-ungate sequence and that will cause the memleak when new allocation request is made for same seq-id. - Cleanup the sequence data struct as part of nvgpu_pmu_destroy call by freeing the memory if cb_params is not NULL. Bug 3747586 Bug 3722721 Change-Id: I1a0f192197769acec12993ae575277e38c9ca9ca Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2763054 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Divya Singhatwaria <dsinghatwari@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com> Tested-by: Divya Singhatwaria <dsinghatwari@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
dabf933944
commit
8c36750fd8
@@ -49,6 +49,28 @@ void nvgpu_pmu_sequences_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
}
|
||||
}
|
||||
|
||||
void nvgpu_pmu_sequences_cleanup(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_sequences *sequences)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
(void)pmu;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
if (sequences == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) {
|
||||
if (sequences->seq[i].cb_params != NULL) {
|
||||
nvgpu_info(g, "seq id-%d Free CBP ", sequences->seq[i].id);
|
||||
nvgpu_kfree(g, sequences->seq[i].cb_params);
|
||||
sequences->seq[i].cb_params = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_sequences **sequences_p)
|
||||
{
|
||||
|
||||
@@ -123,6 +123,8 @@ int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu)
|
||||
nvgpu_pmu_ss_fbq_flush(g, pmu);
|
||||
}
|
||||
|
||||
nvgpu_pmu_sequences_cleanup(g, pmu, pmu->sequences);
|
||||
|
||||
nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_OFF, false);
|
||||
nvgpu_pmu_set_fw_ready(g, pmu, false);
|
||||
nvgpu_pmu_lsfm_clean(g, pmu, pmu->lsfm);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -94,6 +94,8 @@ struct pmu_sequences {
|
||||
|
||||
void nvgpu_pmu_sequences_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_sequences *sequences);
|
||||
void nvgpu_pmu_sequences_cleanup(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_sequences *sequences);
|
||||
int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
struct pmu_sequences **sequences_p);
|
||||
void nvgpu_pmu_sequences_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,
|
||||
|
||||
Reference in New Issue
Block a user