diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c index dd8929ede..4e05bbe5d 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_cmd.c @@ -104,6 +104,10 @@ static bool pmu_validate_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, } queue_size = nvgpu_pmu_queue_get_size(&pmu->queues, queue_id); + if (queue_size == 0) { + nvgpu_err(g, "queue_size invalid"); + return false; + } if (cmd->hdr.size > (queue_size >> 1)) { goto invalid_cmd; @@ -579,6 +583,11 @@ int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, if (nvgpu_pmu_fb_queue_enabled(&pmu->queues)) { fb_queue = nvgpu_pmu_fb_queue(&pmu->queues, queue_id); + if (fb_queue == NULL) { + nvgpu_pmu_seq_release(g, pmu->sequences, seq); + return -EINVAL; + } + /* Save the queue in the seq structure. */ nvgpu_pmu_seq_set_cmd_queue(seq, fb_queue); @@ -717,7 +726,13 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, u8 *rpc, if (status != 0) { nvgpu_err(g, "Failed to execute RPC status=0x%x, func=0x%x", status, rpc_header->function); - goto cleanup; + /* + * Since the cmd post has failed + * free the allocated memory and + * return the status + */ + nvgpu_kfree(g, rpc_payload); + return status; } /* @@ -735,24 +750,32 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, u8 *rpc, status = -ETIMEDOUT; goto cleanup; } else if (fw_ack_status == PMU_FW_ACK_DRIVER_SHUTDOWN) { - /* free allocated memory */ - nvgpu_kfree(g, rpc_payload); - /* release the sequence */ - seq = nvgpu_pmu_sequences_get_seq(pmu->sequences, - cmd.hdr.seq_id); - nvgpu_pmu_seq_release(g, pmu->sequences, seq); + status = 0; + goto cleanup; } else { /* copy back data to caller */ - nvgpu_memcpy(rpc, (u8 *)rpc_buff, size_rpc); - /* free allocated memory */ - nvgpu_kfree(g, rpc_payload); + nvgpu_memcpy((u8 *)rpc, (u8 *)rpc_buff, size_rpc); + seq = nvgpu_pmu_sequences_get_seq(pmu->sequences, + cmd.hdr.seq_id); + if (seq->seq_free_status == false) { + /* + * free allocated memory and set the + * seq_free_status true to synchronise + * the memory free + */ + nvgpu_kfree(g, rpc_payload); + seq->seq_free_status = true; + } } } return 0; cleanup: - nvgpu_kfree(g, rpc_payload); + seq = nvgpu_pmu_sequences_get_seq(pmu->sequences, + cmd.hdr.seq_id); + /* free allocated memory and release the seq */ + nvgpu_pmu_seq_free_release(g, pmu->sequences, seq); exit: return status; } diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c index ace5f5b95..39ec45d24 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_msg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -616,6 +616,8 @@ void nvgpu_pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg, struct nv_pmu_rpc_header rpc; struct rpc_handler_payload *rpc_payload = (struct rpc_handler_payload *)param; + struct pmu_sequence *seq = NULL; + struct nvgpu_pmu *pmu = g->pmu; (void)status; @@ -639,9 +641,11 @@ void nvgpu_pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg, exit: rpc_payload->complete = true; - /* free allocated memory */ + /* free allocated memory and release the sequence */ if (rpc_payload->is_mem_free_set) { - nvgpu_kfree(g, rpc_payload); + seq = nvgpu_pmu_sequences_get_seq(pmu->sequences, + msg->hdr.seq_id); + nvgpu_pmu_seq_free_release(g, pmu->sequences, seq); } } diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c index 4660e4f9b..5cc94ee13 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_queue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -238,7 +238,13 @@ u32 nvgpu_pmu_queue_get_size(struct pmu_queues *queues, u32 queue_id) if (queues->queue_type == QUEUE_TYPE_FB) { fb_queue = queues->fb_queue[queue_id]; - queue_size = nvgpu_engine_fb_queue_get_element_size(fb_queue); + if (fb_queue != NULL) { + queue_size = + nvgpu_engine_fb_queue_get_element_size(fb_queue); + } else { + /* when fb is NULL return size as 0 */ + return 0; + } } else { queue = queues->queue[queue_id]; queue_size = nvgpu_engine_mem_queue_get_size(queue); diff --git a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c index 544a7f243..230c60b1c 100644 --- a/drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c +++ b/drivers/gpu/nvgpu/common/pmu/ipc/pmu_seq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -49,6 +49,33 @@ void nvgpu_pmu_sequences_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu, } } +void nvgpu_pmu_seq_free_release(struct gk20a *g, + struct pmu_sequences *sequences, + struct pmu_sequence *seq) +{ + seq->state = PMU_SEQ_STATE_FREE; + seq->callback = NULL; + seq->out_payload = NULL; + + nvgpu_mutex_acquire(&sequences->pmu_seq_lock); + + /* + * check if the seq->id bit is set in seq table. + * if set then go ahead and free the cb_params if + * seq_free_status is false. This means that memory + * is still not freed. Then clear the seq->id bit. + */ + if (nvgpu_test_bit(seq->id, sequences->pmu_seq_tbl)) { + if (seq->cb_params != NULL && seq->seq_free_status == false) { + nvgpu_kfree(g, seq->cb_params); + seq->cb_params = NULL; + seq->seq_free_status = true; + nvgpu_clear_bit(seq->id, sequences->pmu_seq_tbl); + } + } + nvgpu_mutex_release(&sequences->pmu_seq_lock); +} + void nvgpu_pmu_sequences_cleanup(struct gk20a *g, struct nvgpu_pmu *pmu, struct pmu_sequences *sequences) { @@ -64,9 +91,9 @@ void nvgpu_pmu_sequences_cleanup(struct gk20a *g, struct nvgpu_pmu *pmu, for (i = 0; i < PMU_MAX_NUM_SEQUENCES; i++) { if (sequences->seq[i].cb_params != NULL) { - nvgpu_info(g, "seq id-%d Free CBP ", sequences->seq[i].id); - nvgpu_kfree(g, sequences->seq[i].cb_params); - sequences->seq[i].cb_params = NULL; + nvgpu_pmu_seq_free_release(g, sequences, + &sequences->seq[i]); + nvgpu_pmu_dbg(g, "sequences cleanup done"); } } } @@ -165,6 +192,7 @@ int nvgpu_pmu_seq_acquire(struct gk20a *g, seq->out_payload = NULL; seq->in_payload_fb_queue = false; seq->out_payload_fb_queue = false; + seq->seq_free_status = false; *pseq = seq; return 0; diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h b/drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h index 2f82c54fb..a26be2fe8 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu/seq.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -49,6 +49,7 @@ enum pmu_seq_state { struct pmu_sequence { u8 id; enum pmu_seq_state state; + bool seq_free_status; union { struct pmu_allocation_v1 in_v1; struct pmu_allocation_v2 in_v2; @@ -96,6 +97,9 @@ void nvgpu_pmu_sequences_sw_setup(struct gk20a *g, struct nvgpu_pmu *pmu, struct pmu_sequences *sequences); void nvgpu_pmu_sequences_cleanup(struct gk20a *g, struct nvgpu_pmu *pmu, struct pmu_sequences *sequences); +void nvgpu_pmu_seq_free_release(struct gk20a *g, + struct pmu_sequences *sequences, + struct pmu_sequence *seq); int nvgpu_pmu_sequences_init(struct gk20a *g, struct nvgpu_pmu *pmu, struct pmu_sequences **sequences_p); void nvgpu_pmu_sequences_deinit(struct gk20a *g, struct nvgpu_pmu *pmu,