From 8c36750fd8202a796e7d4e4ecf9ae03d3aa11dfa Mon Sep 17 00:00:00 2001 From: Mahantesh Kumbar Date: Thu, 18 Aug 2022 11:18:26 +0000 Subject: [PATCH] gpu: nvgpu: cleanup the seq for railgate seq - Perfmon cmds are non-blocking calls and response may/may-not come during railgate sequence for the perfmon command sent as part of nvgpu_pmu_destroy call. - if response is missed then payload allocated will not be freed and allocation info will be present as part seq data structure. - This will be carried forward for multiple railgate/ rail-ungate sequence and that will cause the memleak when new allocation request is made for same seq-id. - Cleanup the sequence data struct as part of nvgpu_pmu_destroy call by freeing the memory if cb_params is not NULL. Bug 3747586 Bug 3722721 Change-Id: I1a0f192197769acec12993ae575277e38c9ca9ca Signed-off-by: Mahantesh Kumbar Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2763054 Reviewed-by: svc-mobile-coverity Reviewed-by: svc-mobile-cert Reviewed-by: Divya Singhatwaria GVS: Gerrit_Virtual_Submit Tested-by: Divya Singhatwaria --- drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c | 22 ++++++++++++++++++++ drivers/gpu/nvgpu/common/pmu/pmu_rtos_init.c | 2 ++ drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h | 4 +++- 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c index fbe6b010c..544a7f243 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c @@ -49,6 +49,28 @@ void nvgpu_pmu_sequences_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu, } } +void nvgpu_pmu_sequences_cleanup(struct gk20a *g, struct nvgpu_pmu *pmu, + struct pmu_sequences *sequences) +{ + u32 i; + + (void)pmu; + + nvgpu_log_fn(g, " "); + + if (sequences == NULL) { + return; + } + + for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) { + if (sequences->seq[i].cb_params != NULL) { + nvgpu_info(g, "seq id-%d Free CBP ", sequences->seq[i].id); + nvgpu_kfree(g, sequences->seq[i].cb_params); + sequences->seq[i].cb_params = NULL; + } + } +} + int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu, struct pmu_sequences **sequences_p) { diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_rtos_init.c b/drivers/gpu/nvgpu/common/pmu/pmu_rtos_init.c index 36f5a71cf..4142567b2 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_rtos_init.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_rtos_init.c @@ -123,6 +123,8 @@ int nvgpu_pmu_destroy(struct gk20a *g, struct nvgpu_pmu *pmu) nvgpu_pmu_ss_fbq_flush(g, pmu); } + nvgpu_pmu_sequences_cleanup(g, pmu, pmu->sequences); + nvgpu_pmu_fw_state_change(g, pmu, PMU_FW_STATE_OFF, false); nvgpu_pmu_set_fw_ready(g, pmu, false); nvgpu_pmu_lsfm_clean(g, pmu, pmu->lsfm); diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h index 49d95ee46..2f82c54fb 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -94,6 +94,8 @@ struct pmu_sequences { void nvgpu_pmu_sequences_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu, struct pmu_sequences *sequences); +void nvgpu_pmu_sequences_cleanup(struct gk20a *g, struct nvgpu_pmu *pmu, + struct pmu_sequences *sequences); int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu, struct pmu_sequences **sequences_p); void nvgpu_pmu_sequences_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,