crypto: tegra: Fix size of buffer allocated

Allocate the buffer based on the request instead of a fixed buffer
length. In operations which may require larger buffer size, a fixed
buffer may fail. Similar patch was added for AES algorithms. Fix the
same for HASH algorithms as well.

Bug 4908156

Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Change-Id: Idd2c1ceae1a85434a5a51154a17dce8c927bb66c
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3234055
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
(cherry picked from commit 3415677f0f)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3235205
This commit is contained in:
Akhil R
2024-10-21 11:33:32 +05:30
committed by Amulya Yarlagadda
parent ff0c2e64e9
commit f41b74b8c3
3 changed files with 49 additions and 32 deletions

View File

@@ -1572,6 +1572,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
return 0;
}
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf)
return -ENOMEM;
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -1618,14 +1623,26 @@ static int tegra_cmac_do_final(struct ahash_request *req)
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
return crypto_shash_tfm_digest(ctx->fallback_tfm,
rctx->datbuf.buf, 0, req->result);
NULL, 0, req->result);
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
if (rctx->residue.size) {
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf) {
ret = -ENOMEM;
goto out_free;
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
/* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen);
@@ -1640,8 +1657,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
writel(0, se->base + se->hw->regs->result + (i * 4));
out:
dma_free_coherent(se->dev, SE_SHA_BUFLEN,
rctx->datbuf.buf, rctx->datbuf.addr);
if (rctx->residue.size)
dma_free_coherent(se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
rctx->residue.buf, rctx->residue.addr);
return ret;
@@ -1757,15 +1776,9 @@ static int tegra_cmac_init(struct ahash_request *req)
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
goto resbuf_fail;
return -ENOMEM;
rctx->residue.size = 0;
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf)
goto datbuf_fail;
rctx->datbuf.size = 0;
/* Clear any previous result */
@@ -1773,12 +1786,6 @@ static int tegra_cmac_init(struct ahash_request *req)
writel(0, se->base + se->hw->regs->result + (i * 4));
return 0;
datbuf_fail:
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
rctx->residue.addr);
resbuf_fail:
return -ENOMEM;
}
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,