From a45f684085adaf58f14c6e8d05531969491e7f66 Mon Sep 17 00:00:00 2001 From: Nagaraj P N Date: Thu, 27 Mar 2025 15:38:51 +0530 Subject: [PATCH] vse: allocate priv struct during init Also, update gmac crypto ctx free logic Bug 4881474 Change-Id: I5e2f10814b3a1f41ff098f2c602ee2de431f6cf5 Signed-off-by: Nagaraj P N Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3287699 GVS: buildbot_gerritrpt Reviewed-by: Sandeep Trasi Reviewed-by: Ambika Prasad --- drivers/crypto/tegra-hv-vse-safety.c | 182 ++++++++----------------- drivers/crypto/tegra-hv-vse.h | 2 + drivers/crypto/tegra-nvvse-cryptodev.c | 24 ++-- 3 files changed, 71 insertions(+), 137 deletions(-) diff --git a/drivers/crypto/tegra-hv-vse-safety.c b/drivers/crypto/tegra-hv-vse-safety.c index 23fb7cb9..145e6345 100644 --- a/drivers/crypto/tegra-hv-vse-safety.c +++ b/drivers/crypto/tegra-hv-vse-safety.c @@ -1320,7 +1320,7 @@ static int tegra_hv_vse_safety_sha_op(struct tegra_virtual_se_sha_context *sha_c struct tegra_virtual_se_ivc_msg_t ivc_req_msg = {0}; union tegra_virtual_se_sha_args *psha; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[sha_ctx->node_id].ivck; - struct tegra_vse_priv_data priv = {0}; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[sha_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; u64 msg_len = 0, temp_len = 0; uint32_t engine_id; @@ -1436,22 +1436,22 @@ static int tegra_hv_vse_safety_sha_op(struct tegra_virtual_se_sha_context *sha_c psha->op_hash.src_buf_size = msg_len; priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg.ivc_hdr.tag; - priv_data_ptr->priv_data = (unsigned int *)&priv; - priv.cmd = VIRTUAL_SE_PROCESS; - priv.se_dev = se_dev; - init_completion(&priv.alg_complete); + priv_data_ptr->priv_data = (unsigned int *)priv; + priv->cmd = VIRTUAL_SE_PROCESS; + priv->se_dev = se_dev; + init_completion(&priv->alg_complete); - err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, &priv, &ivc_req_msg, + err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, &ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), sha_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto exit; } - if (priv.rx_status != 0) { - err = status_to_errno(priv.rx_status); + if (priv->rx_status != 0) { + err = status_to_errno(priv->rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", - __func__, priv.rx_status); + __func__, priv->rx_status); goto exit; } @@ -1692,7 +1692,7 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, struct tegra_virtual_se_hmac_sha_args *phmac; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[hmac_ctx->node_id].ivck; int err = 0; - struct tegra_vse_priv_data priv = {0}; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[hmac_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; const struct tegra_vse_dma_buf *src, *hash, *match; @@ -1873,22 +1873,22 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, } priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg.ivc_hdr.tag; - priv_data_ptr->priv_data = (unsigned int *)&priv; - priv.cmd = VIRTUAL_SE_PROCESS; - priv.se_dev = se_dev; - init_completion(&priv.alg_complete); + priv_data_ptr->priv_data = (unsigned int *)priv; + priv->cmd = VIRTUAL_SE_PROCESS; + priv->se_dev = se_dev; + init_completion(&priv->alg_complete); - err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, &priv, &ivc_req_msg, + err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, &ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), hmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto unmap_exit; } - if (priv.rx_status != 0) { - err = status_to_errno(priv.rx_status); + if (priv->rx_status != 0) { + err = status_to_errno(priv->rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", - __func__, priv.rx_status); + __func__, priv->rx_status); goto unmap_exit; } @@ -1896,9 +1896,9 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, if (hmac_ctx->request_type == TEGRA_HV_VSE_HMAC_SHA_VERIFY) { if (se_dev->chipdata->hmac_verify_hw_support == false) { ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_HMAC_GET_VERIFY; - priv.cmd = VIRTUAL_SE_PROCESS; - init_completion(&priv.alg_complete); - err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, &priv, + priv->cmd = VIRTUAL_SE_PROCESS; + init_completion(&priv->alg_complete); + err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, &ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), hmac_ctx->node_id); if (err) { @@ -1907,15 +1907,15 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, goto unmap_exit; } - if (priv.rx_status == 0) { + if (priv->rx_status == 0) { hmac_ctx->result = 0; - } else if (priv.rx_status == TEGRA_VIRTUAL_SE_ERR_MAC_INVALID) { + } else if (priv->rx_status == TEGRA_VIRTUAL_SE_ERR_MAC_INVALID) { dev_dbg(se_dev->dev, "%s: tag mismatch", __func__); hmac_ctx->result = 1; } else { - err = status_to_errno(priv.rx_status); + err = status_to_errno(priv->rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", - __func__, priv.rx_status); + __func__, priv->rx_status); } } else { if (memcmp(match->buf_ptr, &matchcode, 4) == 0) { @@ -2163,16 +2163,11 @@ static int tegra_hv_vse_safety_process_aes_req(struct tegra_virtual_se_dev *se_d struct tegra_hv_ivc_cookie *pivck; int err = 0; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; - struct tegra_vse_priv_data *priv = NULL; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; union tegra_virtual_se_aes_args *aes; const struct tegra_vse_dma_buf *src; - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - err = -ENOMEM; - goto exit; - } priv->req = req; ivc_req_msg = @@ -2267,9 +2262,6 @@ static int tegra_hv_vse_safety_process_aes_req(struct tegra_virtual_se_dev *se_d err = status_to_errno(priv->rx_status); exit: - if (priv) - devm_kfree(se_dev->dev, priv); - if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); @@ -2491,7 +2483,7 @@ static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req, struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck; int err = 0; - struct tegra_vse_priv_data *priv = NULL; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; uint32_t tsec_fw_err; const struct tegra_vse_dma_buf *src, *mac, *fw_status; @@ -2501,12 +2493,6 @@ static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req, if (!ivc_req_msg) return -ENOMEM; - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - devm_kfree(se_dev->dev, ivc_req_msg); - return -ENOMEM; - } - ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; @@ -2631,7 +2617,6 @@ static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req, } free_mem: - devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); return err; @@ -2649,7 +2634,7 @@ static int tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported( struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck; int err = 0; - struct tegra_vse_priv_data *priv = NULL; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; u32 match_code = SE_HW_VALUE_MATCH_CODE; u32 mac_buf_size = 16; @@ -2660,12 +2645,6 @@ static int tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported( if (!ivc_req_msg) return -ENOMEM; - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - devm_kfree(se_dev->dev, ivc_req_msg); - return -ENOMEM; - } - src = tegra_hv_vse_get_dma_buf(cmac_ctx->node_id, AES_SRC_BUF_IDX, cmac_ctx->user_src_buf_size); if (!src) { @@ -2777,7 +2756,6 @@ static int tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported( } free_mem: - devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); return err; @@ -2795,7 +2773,7 @@ static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req, u32 blocks_to_process, last_block_bytes = 0; unsigned int total_len; int err = 0; - struct tegra_vse_priv_data *priv = NULL; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; const struct tegra_vse_dma_buf *src; @@ -2804,12 +2782,6 @@ static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req, if (!ivc_req_msg) return -ENOMEM; - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - devm_kfree(se_dev->dev, ivc_req_msg); - return -ENOMEM; - } - blocks_to_process = cmac_ctx->user_src_buf_size / TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE; /* num of bytes less than block size */ @@ -2964,7 +2936,6 @@ static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req, } free_mem: - devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); return err; @@ -3192,12 +3163,7 @@ int tegra_hv_vse_safety_tsec_get_keyload_status(uint32_t node_id, uint32_t *err_ if (!ivc_req_msg) return -ENOMEM; - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - devm_kfree(se_dev->dev, ivc_req_msg); - dev_err(se_dev->dev, "Priv Data allocation failed\n"); - return -ENOMEM; - } + priv = g_crypto_to_ivc_map[node_id].priv; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_tx = &ivc_req_msg->tx[0]; @@ -3240,7 +3206,6 @@ int tegra_hv_vse_safety_tsec_get_keyload_status(uint32_t node_id, uint32_t *err_ } free_exit: - devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); return err; @@ -3547,13 +3512,6 @@ static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *r if (!ivc_req_msg) return -ENOMEM; - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - dev_err(se_dev->dev, "Priv Data allocation failed\n"); - devm_kfree(se_dev->dev, ivc_req_msg); - return 0; - } - if (is_hw_req == CRYPTODEV_RNG) { src = tegra_hv_vse_get_dma_buf(rng_ctx->node_id, AES_SRC_BUF_IDX, TEGRA_VIRTUAL_SE_RNG_DT_SIZE); @@ -3561,8 +3519,11 @@ static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *r dev_err(se_dev->dev, "%s src is NULL\n", __func__); return -ENOMEM; } - } else + priv = g_crypto_to_ivc_map[rng_ctx->node_id].priv; + } else { src = &rng_ctx->hwrng_dma_buf; + priv = rng_ctx->priv; + } ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; @@ -3603,7 +3564,6 @@ static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *r } } exit: - devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); return dlen; @@ -3751,7 +3711,7 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req, struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; - struct tegra_vse_priv_data *priv = NULL; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; int err = 0; const struct tegra_vse_dma_buf *src, *aad, *tag; @@ -3805,13 +3765,6 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req, } } - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - - if (!priv) { - err = -ENOMEM; - goto free_exit; - } - ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { @@ -3967,9 +3920,6 @@ free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); - if (priv) - devm_kfree(se_dev->dev, priv); - return err; } @@ -3983,7 +3933,7 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req, struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; - struct tegra_vse_priv_data *priv = NULL; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; int err = 0; u32 match_code = SE_HW_VALUE_MATCH_CODE; @@ -4038,13 +3988,6 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req, } } - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - - if (!priv) { - err = -ENOMEM; - goto free_exit; - } - ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { @@ -4174,9 +4117,6 @@ free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); - if (priv) - devm_kfree(se_dev->dev, priv); - return err; } @@ -4409,11 +4349,7 @@ static int tegra_hv_vse_aes_gmac_sv_init(struct ahash_request *req) goto exit; } - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - err = -ENOMEM; - goto exit; - } + priv = g_crypto_to_ivc_map[gmac_ctx->node_id].priv; ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); @@ -4480,9 +4416,6 @@ free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); - if (priv) - devm_kfree(se_dev->dev, priv); - exit: return err; } @@ -4504,7 +4437,7 @@ static int tegra_hv_vse_aes_gmac_sv_op(struct ahash_request *req, struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck; - struct tegra_vse_priv_data *priv = NULL; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[gmac_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; int err = 0; const struct tegra_vse_dma_buf *aad, *tag; @@ -4539,12 +4472,6 @@ static int tegra_hv_vse_aes_gmac_sv_op(struct ahash_request *req, } } - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - err = -ENOMEM; - goto free_exit; - } - ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { @@ -4657,9 +4584,6 @@ free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); - if (priv) - devm_kfree(se_dev->dev, priv); - exit: return err; } @@ -4672,7 +4596,7 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck; - struct tegra_vse_priv_data *priv = NULL; + struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[gmac_ctx->node_id].priv; struct tegra_vse_tag *priv_data_ptr; int err = 0; u32 match_code = SE_HW_VALUE_MATCH_CODE; @@ -4737,12 +4661,6 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, } } - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - err = -ENOMEM; - goto free_exit; - } - ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { @@ -4851,9 +4769,6 @@ free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); - if (priv) - devm_kfree(se_dev->dev, priv); - exit: return err; } @@ -5529,6 +5444,12 @@ static int tegra_hv_vse_safety_register_hwrng(struct tegra_virtual_se_dev *se_de if (!rng_ctx->hwrng_dma_buf.buf_ptr) return -ENOMEM; + rng_ctx->priv = devm_kzalloc(se_dev->dev, sizeof(struct tegra_vse_priv_data), GFP_KERNEL); + if (!rng_ctx->priv) { + ret = -ENOMEM; + goto out; + } + vse_hwrng->name = "tegra_hv_vse_safety"; vse_hwrng->read = tegra_hv_vse_safety_hwrng_read; vse_hwrng->quality = 1024; @@ -5540,6 +5461,7 @@ out: if (rng_ctx) { dma_free_coherent(se_dev->dev, TEGRA_VIRTUAL_SE_RNG_DT_SIZE, rng_ctx->hwrng_dma_buf.buf_ptr, rng_ctx->hwrng_dma_buf.buf_iova); + devm_kfree(se_dev->dev, rng_ctx->priv); devm_kfree(se_dev->dev, rng_ctx); } if (vse_hwrng) @@ -6001,6 +5923,13 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev) crypto_dev = &g_crypto_to_ivc_map[s_node_id]; + crypto_dev->priv = devm_kzalloc(&pdev->dev, sizeof(struct tegra_vse_priv_data), GFP_KERNEL); + if (!crypto_dev->priv) { + dev_err(se_dev->dev, "Error: failed to allocate priv data\n"); + err = -ENOMEM; + goto exit; + } + err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_IVC_ID_OFFSET, &ivc_id); if (err) { @@ -6312,6 +6241,11 @@ static int tegra_hv_vse_safety_remove(struct platform_device *pdev) tegra_hv_vse_safety_unregister_hwrng(platform_get_drvdata(pdev)); + for (i = 0U; i < MAX_NUMBER_MISC_DEVICES; i++) + if ((g_crypto_to_ivc_map[i].node_in_use) + && (g_crypto_to_ivc_map[i].se_dev->dev == &pdev->dev)) + devm_kfree(&pdev->dev, g_crypto_to_ivc_map[i].priv); + for (i = 0; i < ARRAY_SIZE(sha_algs); i++) crypto_unregister_ahash(&sha_algs[i]); diff --git a/drivers/crypto/tegra-hv-vse.h b/drivers/crypto/tegra-hv-vse.h index 674367c3..d595a303 100644 --- a/drivers/crypto/tegra-hv-vse.h +++ b/drivers/crypto/tegra-hv-vse.h @@ -86,6 +86,7 @@ struct crypto_dev_to_ivc_map { bool node_in_use; bool is_zero_copy_node; struct tegra_virtual_se_dev *se_dev; + struct tegra_vse_priv_data *priv; }; struct tegra_virtual_se_dev { @@ -108,6 +109,7 @@ struct tegra_virtual_se_rng_context { /* Security Engine device */ struct tegra_virtual_se_dev *se_dev; struct tegra_vse_dma_buf hwrng_dma_buf; + struct tegra_vse_priv_data *priv; /*Crypto dev instance*/ uint32_t node_id; }; diff --git a/drivers/crypto/tegra-nvvse-cryptodev.c b/drivers/crypto/tegra-nvvse-cryptodev.c index 6fc1c47c..5b37c832 100644 --- a/drivers/crypto/tegra-nvvse-cryptodev.c +++ b/drivers/crypto/tegra-nvvse-cryptodev.c @@ -894,7 +894,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); if (driver_name == NULL) { CRYPTODEV_ERR("%s(): Failed to get driver name\n", __func__); - goto free_tfm; + goto out; } pr_debug("%s(): Algo name gmac-vse(aes), driver name %s\n", __func__, driver_name); @@ -912,7 +912,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx ret = crypto_ahash_setkey(tfm, key_as_keyslot, klen); if (ret) { CRYPTODEV_ERR("%s(): Failed to set keys for gmac-vse(aes): %d\n", __func__, ret); - goto free_buf; + goto out; } if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_SIGN) @@ -924,7 +924,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx if (ret) { CRYPTODEV_ERR("%s(): Failed to ahash_init for gmac-vse(aes): ret=%d\n", __func__, ret); - goto free_buf; + goto out; } sha_state->req = req; @@ -934,12 +934,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx memset(sha_state->result_buff, 0, TEGRA_NVVSE_AES_GCM_TAG_SIZE); ret = 0; - goto out; -free_buf: - kfree(sha_state->in_buf); -free_tfm: - crypto_free_ahash(tfm); out: return ret; } @@ -951,7 +946,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx, struct tegra_virtual_se_aes_gmac_context *gmac_ctx; struct crypto_ahash *tfm; uint8_t iv[TEGRA_NVVSE_AES_GCM_IV_LEN]; - struct ahash_request *req; + struct ahash_request *req = NULL; int ret = -EINVAL; if (ctx->is_zero_copy_node) { @@ -999,13 +994,13 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx, (gmac_sign_verify_ctl->tag_length != TEGRA_NVVSE_AES_GCM_TAG_SIZE)) { CRYPTODEV_ERR("%s(): Failed due to invalid tag length (%d) invalid", __func__, gmac_sign_verify_ctl->tag_length); - goto done; + goto free_req; } ret = tnvvse_crypto_aes_gmac_sign_verify_init(ctx, gmac_sign_verify_ctl, req); if (ret) { CRYPTODEV_ERR("%s(): Failed to init: %d\n", __func__, ret); - goto done; + goto free_req; } if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_SIGN) @@ -1021,7 +1016,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx, if (ret) { CRYPTODEV_ERR("%s(): Failed to ahash_update for gmac-vse(aes): %d\n", __func__, ret); - goto free_tfm; + goto free_req; } } else { if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_SIGN) { @@ -1041,7 +1036,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx, if (ret) { CRYPTODEV_ERR("%s(): Failed to ahash_finup for gmac-vse(aes): %d\n", __func__, ret); - goto free_tfm; + goto free_req; } } @@ -1050,6 +1045,9 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx, gmac_sign_verify_ctl->result = gmac_ctx->result; } +free_req: + ahash_request_free(req); + free_tfm: crypto_free_ahash(tfm);