crypto: tegra: Fix size of buffer allocated

Allocate the buffer based on the request instead of a fixed buffer
length. In operations which may require larger buffer size, a fixed
buffer may fail. Similar patch was added for AES algorithms. Fix the
same for HASH algorithms as well.

Bug 4908156

Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Change-Id: Idd2c1ceae1a85434a5a51154a17dce8c927bb66c
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3234055
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
This commit is contained in:
Akhil R
2024-10-21 11:33:32 +05:30
committed by mobile promotions
parent 91416b264c
commit 3415677f0f
3 changed files with 49 additions and 32 deletions

View File

@@ -1572,6 +1572,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
return 0; return 0;
} }
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf)
return -ENOMEM;
/* Copy the previous residue first */ /* Copy the previous residue first */
if (rctx->residue.size) if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -1618,14 +1623,26 @@ static int tegra_cmac_do_final(struct ahash_request *req)
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
return crypto_shash_tfm_digest(ctx->fallback_tfm, return crypto_shash_tfm_digest(ctx->fallback_tfm,
rctx->datbuf.buf, 0, req->result); NULL, 0, req->result);
} }
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
rctx->datbuf.size = rctx->residue.size; rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size; rctx->total_len += rctx->residue.size;
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
if (rctx->residue.size) {
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf) {
ret = -ENOMEM;
goto out_free;
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
/* Prepare command and submit */ /* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(se, rctx); cmdlen = tegra_cmac_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen); ret = tegra_se_host1x_submit(se, cmdlen);
@@ -1640,8 +1657,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
writel(0, se->base + se->hw->regs->result + (i * 4)); writel(0, se->base + se->hw->regs->result + (i * 4));
out: out:
dma_free_coherent(se->dev, SE_SHA_BUFLEN, if (rctx->residue.size)
rctx->datbuf.buf, rctx->datbuf.addr); dma_free_coherent(se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
rctx->residue.buf, rctx->residue.addr); rctx->residue.buf, rctx->residue.addr);
return ret; return ret;
@@ -1757,15 +1776,9 @@ static int tegra_cmac_init(struct ahash_request *req)
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
&rctx->residue.addr, GFP_KERNEL); &rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf) if (!rctx->residue.buf)
goto resbuf_fail; return -ENOMEM;
rctx->residue.size = 0; rctx->residue.size = 0;
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf)
goto datbuf_fail;
rctx->datbuf.size = 0; rctx->datbuf.size = 0;
/* Clear any previous result */ /* Clear any previous result */
@@ -1773,12 +1786,6 @@ static int tegra_cmac_init(struct ahash_request *req)
writel(0, se->base + se->hw->regs->result + (i * 4)); writel(0, se->base + se->hw->regs->result + (i * 4));
return 0; return 0;
datbuf_fail:
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
rctx->residue.addr);
resbuf_fail:
return -ENOMEM;
} }
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,

View File

@@ -335,6 +335,11 @@ static int tegra_sha_do_update(struct ahash_request *req)
return 0; return 0;
} }
rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf)
return -ENOMEM;
/* Copy the previous residue first */ /* Copy the previous residue first */
if (rctx->residue.size) if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -371,6 +376,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
if (!(rctx->task & SHA_FINAL)) if (!(rctx->task & SHA_FINAL))
tegra_sha_copy_hash_result(ctx->se, rctx); tegra_sha_copy_hash_result(ctx->se, rctx);
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
return ret; return ret;
} }
@@ -383,15 +391,24 @@ static int tegra_sha_do_final(struct ahash_request *req)
u32 *cpuvaddr = se->cmdbuf->addr; u32 *cpuvaddr = se->cmdbuf->addr;
int size, ret = 0; int size, ret = 0;
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
rctx->datbuf.size = rctx->residue.size; rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size; rctx->total_len += rctx->residue.size;
rctx->config = tegra_sha_get_config(rctx->alg) | rctx->config = tegra_sha_get_config(rctx->alg) |
SE_SHA_DST_MEMORY; SE_SHA_DST_MEMORY;
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); if (rctx->residue.size) {
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf) {
ret = -ENOMEM;
goto out_free;
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, size); ret = tegra_se_host1x_submit(se, size);
if (ret) if (ret)
goto out; goto out;
@@ -400,8 +417,10 @@ static int tegra_sha_do_final(struct ahash_request *req)
memcpy(req->result, rctx->digest.buf, rctx->digest.size); memcpy(req->result, rctx->digest.buf, rctx->digest.size);
out: out:
dma_free_coherent(se->dev, SE_SHA_BUFLEN, if (rctx->residue.size)
rctx->datbuf.buf, rctx->datbuf.addr); dma_free_coherent(se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
rctx->residue.buf, rctx->residue.addr); rctx->residue.buf, rctx->residue.addr);
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
@@ -540,19 +559,11 @@ static int tegra_sha_init(struct ahash_request *req)
if (!rctx->residue.buf) if (!rctx->residue.buf)
goto resbuf_fail; goto resbuf_fail;
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf)
goto datbuf_fail;
return 0; return 0;
datbuf_fail:
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
rctx->residue.addr);
resbuf_fail: resbuf_fail:
dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->datbuf.addr); rctx->digest.addr);
digbuf_fail: digbuf_fail:
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -342,7 +342,6 @@
#define SE_CRYPTO_CTR_REG_COUNT 4 #define SE_CRYPTO_CTR_REG_COUNT 4
#define SE_MAX_KEYSLOT 15 #define SE_MAX_KEYSLOT 15
#define SE_MAX_MEM_ALLOC SZ_4M #define SE_MAX_MEM_ALLOC SZ_4M
#define SE_SHA_BUFLEN 0x2000
#define SHA_FIRST BIT(0) #define SHA_FIRST BIT(0)
#define SHA_UPDATE BIT(1) #define SHA_UPDATE BIT(1)