crypto: tegra: Transfer HASH init function to crypto engine

Ahash init() function was called asynchronous to the crypto engine queue.
This could corrupt the request context if there is any ongoing operation
for the same request. Queue the init function as well to the crypto
engine queue so that this scenario can be avoided.

Bug 4883011

Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Change-Id: I7d5e4629fbd47215c7d7748b675030c1cb63e5ea
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3328439
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Akhil R
2024-12-11 11:46:53 +05:30
committed by Jon Hunter
parent 77c91508d5
commit be083d7bc7
4 changed files with 216 additions and 165 deletions

View File

@@ -1794,6 +1794,45 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct
se->base + se->hw->regs->result + (i * 4));
}
static int tegra_cmac_do_init(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
int i;
rctx->total_len = 0;
rctx->datbuf.size = 0;
rctx->residue.size = 0;
rctx->task |= SHA_FIRST;
rctx->blk_size = crypto_ahash_blocksize(tfm);
rctx->digest.size = crypto_ahash_digestsize(tfm);
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
return -ENOMEM;
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
&rctx->digest.addr, GFP_KERNEL);
if (!rctx->digest.buf)
goto resbuf_free;
/* Clear any previous result */
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(0, se->base + se->hw->regs->result + (i * 4));
return 0;
resbuf_free:
dma_free_coherent(se->dev, rctx->blk_size * 2,
rctx->residue.buf, rctx->residue.addr);
return -ENOMEM;
}
static int tegra_cmac_do_update(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
@@ -1889,10 +1928,6 @@ static int tegra_cmac_do_final(struct ahash_request *req)
NULL, 0, req->result);
}
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = se->regcfg->cfg(ctx->final_alg, 0);
if (rctx->residue.size) {
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
&rctx->datbuf.addr, GFP_KERNEL);
@@ -1904,7 +1939,9 @@ static int tegra_cmac_do_final(struct ahash_request *req)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = se->regcfg->cfg(ctx->final_alg, 0);
/* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(se, rctx);
@@ -1942,16 +1979,31 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret;
if (rctx->task & SHA_INIT) {
ret = tegra_cmac_do_init(req);
if (ret)
goto out;
rctx->task &= ~SHA_INIT;
}
if (rctx->task & SHA_UPDATE) {
ret = tegra_cmac_do_update(req);
if (ret)
goto out;
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_cmac_do_final(req);
if (ret)
goto out;
rctx->task &= ~SHA_FINAL;
}
out:
crypto_finalize_hash_request(se->engine, req, ret);
return 0;
@@ -2020,58 +2072,6 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
static int tegra_cmac_init(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
int i;
rctx->total_len = 0;
rctx->datbuf.size = 0;
rctx->residue.size = 0;
rctx->key_id = 0;
rctx->task = SHA_FIRST;
rctx->blk_size = crypto_ahash_blocksize(tfm);
rctx->digest.size = crypto_ahash_digestsize(tfm);
/* Retrieve the key slot for CMAC */
if (ctx->key_id) {
rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id);
if (!rctx->key_id)
return -ENOMEM;
}
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
&rctx->digest.addr, GFP_KERNEL);
if (!rctx->digest.buf)
goto digbuf_fail;
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
goto resbuf_fail;
rctx->residue.size = 0;
rctx->datbuf.size = 0;
/* Clear any previous result */
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(0, se->base + se->hw->regs->result + (i * 4));
return 0;
resbuf_fail:
dma_free_coherent(se->dev, rctx->blk_size, rctx->digest.buf,
rctx->digest.addr);
digbuf_fail:
if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
return -ENOMEM;
}
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -2088,6 +2088,17 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
}
static int tegra_cmac_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
rctx->task = SHA_INIT;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -2127,8 +2138,7 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
tegra_cmac_init(req);
rctx->task |= SHA_UPDATE | SHA_FINAL;
rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}

View File

@@ -306,6 +306,44 @@ static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_re
se->base + se->hw->regs->result + (i * 4));
}
static int tegra_sha_do_init(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
if (ctx->fallback)
return tegra_sha_fallback_init(req);
rctx->total_len = 0;
rctx->datbuf.size = 0;
rctx->residue.size = 0;
rctx->key_id = ctx->key_id;
rctx->task |= SHA_FIRST;
rctx->alg = ctx->alg;
rctx->blk_size = crypto_ahash_blocksize(tfm);
rctx->digest.size = crypto_ahash_digestsize(tfm);
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
&rctx->digest.addr, GFP_KERNEL);
if (!rctx->digest.buf)
goto digbuf_fail;
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
goto resbuf_fail;
return 0;
resbuf_fail:
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
digbuf_fail:
return -ENOMEM;
}
static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
@@ -443,19 +481,34 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret = 0;
if (rctx->task & SHA_INIT) {
ret = tegra_sha_do_init(req);
if (ret)
goto out;
rctx->task &= ~SHA_INIT;
}
if (rctx->task & SHA_UPDATE) {
ret = tegra_sha_do_update(req);
if (ret)
goto out;
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_sha_do_final(req);
if (ret)
goto out;
rctx->task &= ~SHA_FINAL;
if (rctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
}
out:
crypto_finalize_hash_request(se->engine, req, ret);
return 0;
@@ -539,54 +592,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
static int tegra_sha_init(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
if (ctx->fallback)
return tegra_sha_fallback_init(req);
rctx->total_len = 0;
rctx->datbuf.size = 0;
rctx->residue.size = 0;
rctx->key_id = 0;
rctx->task = SHA_FIRST;
rctx->alg = ctx->alg;
rctx->blk_size = crypto_ahash_blocksize(tfm);
rctx->digest.size = crypto_ahash_digestsize(tfm);
/* Retrieve the key slot for HMAC */
if (ctx->key_id) {
rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id);
if (!rctx->key_id)
return -ENOMEM;
}
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
&rctx->digest.addr, GFP_KERNEL);
if (!rctx->digest.buf)
goto digbuf_fail;
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
goto resbuf_fail;
return 0;
resbuf_fail:
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
digbuf_fail:
if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
return -ENOMEM;
}
static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
unsigned int keylen)
{
@@ -612,6 +617,17 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
}
static int tegra_sha_init(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
rctx->task = SHA_INIT;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_update(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
@@ -663,8 +679,7 @@ static int tegra_sha_digest(struct ahash_request *req)
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
tegra_sha_init(req);
rctx->task |= SHA_UPDATE | SHA_FINAL;
rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}

View File

@@ -1169,6 +1169,58 @@ static int tegra_sm4_cmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct te
return i;
}
static int tegra_sm4_cmac_do_init(struct ahash_request *req)
{
struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
int i;
rctx->total_len = 0;
rctx->datbuf.size = 0;
rctx->residue.size = 0;
rctx->key_id = 0;
rctx->task = SHA_FIRST;
rctx->blk_size = crypto_ahash_blocksize(tfm);
rctx->digest.size = crypto_ahash_digestsize(tfm);
/* Retrieve the key slot for CMAC */
if (ctx->key_id) {
rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id);
if (!rctx->key_id)
return -ENOMEM;
}
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
&rctx->digest.addr, GFP_KERNEL);
if (!rctx->digest.buf)
goto digbuf_fail;
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
goto resbuf_fail;
rctx->residue.size = 0;
rctx->datbuf.size = 0;
/* Clear any previous result */
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(0, se->base + se->hw->regs->result + (i * 4));
return 0;
resbuf_fail:
dma_free_coherent(se->dev, rctx->blk_size, rctx->digest.buf,
rctx->digest.addr);
digbuf_fail:
if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
return -ENOMEM;
}
static int tegra_sm4_cmac_do_update(struct ahash_request *req)
{
struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req);
@@ -1290,16 +1342,31 @@ static int tegra_sm4_cmac_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret = -EINVAL;
if (rctx->task & SHA_INIT) {
ret = tegra_sm4_cmac_do_init(req);
if (ret)
goto out;
rctx->task &= ~SHA_INIT;
}
if (rctx->task & SHA_UPDATE) {
ret = tegra_sm4_cmac_do_update(req);
if (ret)
goto out;
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_sm4_cmac_do_final(req);
if (ret)
goto out;
rctx->task &= ~SHA_FINAL;
}
out:
crypto_finalize_hash_request(se->engine, req, ret);
return ret;
@@ -1342,58 +1409,6 @@ static void tegra_sm4_cmac_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
static int tegra_sm4_cmac_init(struct ahash_request *req)
{
struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
int i;
rctx->total_len = 0;
rctx->datbuf.size = 0;
rctx->residue.size = 0;
rctx->key_id = 0;
rctx->task = SHA_FIRST;
rctx->blk_size = crypto_ahash_blocksize(tfm);
rctx->digest.size = crypto_ahash_digestsize(tfm);
/* Retrieve the key slot for CMAC */
if (ctx->key_id) {
rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id);
if (!rctx->key_id)
return -ENOMEM;
}
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
&rctx->digest.addr, GFP_KERNEL);
if (!rctx->digest.buf)
goto digbuf_fail;
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
goto resbuf_fail;
rctx->residue.size = 0;
rctx->datbuf.size = 0;
/* Clear any previous result */
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(0, se->base + se->hw->regs->result + (i * 4));
return 0;
resbuf_fail:
dma_free_coherent(se->dev, rctx->blk_size, rctx->digest.buf,
rctx->digest.addr);
digbuf_fail:
if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
return -ENOMEM;
}
static int tegra_sm4_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -1407,6 +1422,17 @@ static int tegra_sm4_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
}
static int tegra_sm4_cmac_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req);
rctx->task = SHA_INIT;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sm4_cmac_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -1446,8 +1472,7 @@ static int tegra_sm4_cmac_digest(struct ahash_request *req)
struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req);
tegra_sm4_cmac_init(req);
rctx->task |= SHA_UPDATE | SHA_FINAL;
rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}

View File

@@ -532,8 +532,9 @@
#define SE_GCM_VERIFY_OK 0x5a5a5a5a
#define SHA_FIRST BIT(0)
#define SHA_UPDATE BIT(1)
#define SHA_FINAL BIT(2)
#define SHA_INIT BIT(1)
#define SHA_UPDATE BIT(2)
#define SHA_FINAL BIT(3)
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
#define CRYPTO_REGISTER(alg, x) \