Files
linux-nv-oot/drivers/crypto/tegra/tegra-se-hash.c
Akhil R f725f36d1e crypto: tegra: Use separate buffer for setkey
The buffer which sends the commands to host1x was shared for all tasks
in the engine. This causes a problem with the setkey() function as it
gets called asynchronous to the crypto engine queue. Modifying the same
cmdbuf in setkey() will corrupt the ongoing host1x task and in turn
break the encryption/decryption operation. Hence use a separate cmdbuf
for setkey().

Bug 4883011

Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Change-Id: Ia6a5376e2c8dfc98e11414666ebf9ade41f10fee
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3328437
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
2025-07-24 10:19:18 +00:00

1310 lines
35 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
*/
#include <nvidia/conftest.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
#include <crypto/sm3.h>
#include <crypto/internal/des.h>
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include "tegra-se.h"
struct tegra_sha_ctx {
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
struct crypto_engine_ctx enginectx;
#endif
struct tegra_se *se;
unsigned int alg;
bool fallback;
u32 key_id;
struct crypto_ahash *fallback_tfm;
};
struct tegra_sha_reqctx {
struct scatterlist *src_sg;
struct tegra_se_datbuf datbuf;
struct tegra_se_datbuf residue;
struct tegra_se_datbuf digest;
unsigned int alg;
unsigned int config;
unsigned int total_len;
unsigned int blk_size;
unsigned int task;
u32 key_id;
u32 result[HASH_RESULT_REG_COUNT];
struct ahash_request fallback_req;
};
static int tegra_sha_get_config(u32 alg)
{
int cfg = 0;
switch (alg) {
case SE_ALG_SHA1:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA1;
break;
case SE_ALG_HMAC_SHA224:
cfg |= SE_SHA_ENC_ALG_HMAC;
fallthrough;
case SE_ALG_SHA224:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA224;
break;
case SE_ALG_HMAC_SHA256:
cfg |= SE_SHA_ENC_ALG_HMAC;
fallthrough;
case SE_ALG_SHA256:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA256;
break;
case SE_ALG_HMAC_SHA384:
cfg |= SE_SHA_ENC_ALG_HMAC;
fallthrough;
case SE_ALG_SHA384:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA384;
break;
case SE_ALG_HMAC_SHA512:
cfg |= SE_SHA_ENC_ALG_HMAC;
fallthrough;
case SE_ALG_SHA512:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA512;
break;
case SE_ALG_SHA3_224:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA3_224;
break;
case SE_ALG_SHA3_256:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA3_256;
break;
case SE_ALG_SHA3_384:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA3_384;
break;
case SE_ALG_SHA3_512:
cfg |= SE_SHA_ENC_ALG_SHA;
cfg |= SE_SHA_ENC_MODE_SHA3_512;
break;
case SE_ALG_SM3_256:
cfg |= SE_SHA_ENC_ALG_SM3;
cfg |= SE_SHA_ENC_MODE_SM3_256;
break;
default:
return -EINVAL;
}
return cfg;
}
static int tegra_sha_fallback_init(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_ahash_init(&rctx->fallback_req);
}
static int tegra_sha_fallback_update(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.nbytes = req->nbytes;
rctx->fallback_req.src = req->src;
return crypto_ahash_update(&rctx->fallback_req);
}
static int tegra_sha_fallback_final(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.result = req->result;
return crypto_ahash_final(&rctx->fallback_req);
}
static int tegra_sha_fallback_finup(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.nbytes = req->nbytes;
rctx->fallback_req.src = req->src;
rctx->fallback_req.result = req->result;
return crypto_ahash_finup(&rctx->fallback_req);
}
static int tegra_sha_fallback_digest(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.nbytes = req->nbytes;
rctx->fallback_req.src = req->src;
rctx->fallback_req.result = req->result;
return crypto_ahash_digest(&rctx->fallback_req);
}
static int tegra_sha_fallback_import(struct ahash_request *req, const void *in)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_ahash_import(&rctx->fallback_req, in);
}
static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_ahash_export(&rctx->fallback_req, out);
}
static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
struct tegra_sha_reqctx *rctx)
{
u64 msg_len, msg_left;
int i = 0;
msg_len = rctx->total_len * 8;
msg_left = rctx->datbuf.size * 8;
/*
* If IN_ADDR_HI_0.SZ > SHA_MSG_LEFT_[0-3] to the HASH engine,
* HW treats it as the last buffer and process the data.
* Therefore, add an extra byte to msg_left if it is not the
* last buffer.
*/
if (rctx->task & SHA_UPDATE) {
msg_left += 8;
msg_len += 8;
}
cpuvaddr[i++] = host1x_opcode_setpayload(8);
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_MSG_LENGTH);
cpuvaddr[i++] = lower_32_bits(msg_len);
cpuvaddr[i++] = upper_32_bits(msg_len);
cpuvaddr[i++] = 0;
cpuvaddr[i++] = 0;
cpuvaddr[i++] = lower_32_bits(msg_left);
cpuvaddr[i++] = upper_32_bits(msg_left);
cpuvaddr[i++] = 0;
cpuvaddr[i++] = 0;
cpuvaddr[i++] = host1x_opcode_setpayload(6);
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG);
cpuvaddr[i++] = rctx->config;
if (rctx->task & SHA_FIRST) {
cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT;
rctx->task &= ~SHA_FIRST;
} else {
cpuvaddr[i++] = 0;
}
cpuvaddr[i++] = rctx->datbuf.addr;
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
SE_ADDR_HI_SZ(rctx->datbuf.size));
cpuvaddr[i++] = rctx->digest.addr;
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
SE_ADDR_HI_SZ(rctx->digest.size));
if (rctx->key_id) {
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG);
cpuvaddr[i++] = SE_AES_KEY_INDEX(rctx->key_id);
}
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION);
cpuvaddr[i++] = SE_SHA_OP_WRSTALL |
SE_SHA_OP_START |
SE_SHA_OP_LASTBUF;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x",
msg_len, msg_left, rctx->config);
return i;
}
static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
{
int i;
for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
}
static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
{
int i;
for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
writel(rctx->result[i],
se->base + se->hw->regs->result + (i * 4));
}
static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct tegra_se *se = ctx->se;
unsigned int nblks, nresidue, cmdlen, ret;
u32 *cpuvaddr = se->cmdbuf->addr;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
/*
* If nbytes is a multiple of block size and there is no residue,
* then reserve the last block as residue during final() to process.
*/
if (!nresidue && nblks) {
nresidue = rctx->blk_size;
nblks--;
}
rctx->src_sg = req->src;
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
rctx->total_len += rctx->datbuf.size;
/*
* If nbytes are less than a block size, copy it residue and
* return. The bytes will be processed in final()
*/
if (nblks < 1) {
scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
rctx->src_sg, 0, req->nbytes, 0);
rctx->residue.size += req->nbytes;
return 0;
}
rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf)
return -ENOMEM;
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
rctx->src_sg, 0, req->nbytes - nresidue, 0);
scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
req->nbytes - nresidue, nresidue, 0);
/* Update residue value with the residue after current block */
rctx->residue.size = nresidue;
rctx->config = tegra_sha_get_config(rctx->alg) |
SE_SHA_DST_HASH_REG;
/*
* If this is not the first 'update' call, paste the previous copied
* intermediate results to the registers so that it gets picked up.
* This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FIRST))
tegra_sha_paste_hash_result(se, rctx);
cmdlen = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/*
* If this is not the final update, copy the intermediate results
* from the registers so that it can be used in the next 'update'
* call. This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FINAL))
tegra_sha_copy_hash_result(se, rctx);
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
return ret;
}
static int tegra_sha_do_final(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
u32 *cpuvaddr = se->cmdbuf->addr;
int cmdlen, ret = 0;
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
SE_SHA_DST_MEMORY;
if (rctx->residue.size) {
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf) {
ret = -ENOMEM;
goto out_free;
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
cmdlen = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
goto out;
/* Copy result */
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
out:
if (rctx->residue.size)
dma_free_coherent(se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
rctx->residue.buf, rctx->residue.addr);
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
return ret;
}
static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
int ret = 0;
if (rctx->task & SHA_UPDATE) {
ret = tegra_sha_do_update(req);
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_sha_do_final(req);
rctx->task &= ~SHA_FINAL;
if (rctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
}
crypto_finalize_hash_request(se->engine, req, ret);
return 0;
}
static void tegra_sha_init_fallback(struct crypto_ahash *tfm, struct tegra_sha_ctx *ctx,
const char *algname)
{
unsigned int statesize;
ctx->fallback_tfm = crypto_alloc_ahash(algname, 0, CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback_tfm)) {
dev_warn(ctx->se->dev,
"failed to allocate fallback for %s\n", algname);
ctx->fallback_tfm = NULL;
return;
}
statesize = crypto_ahash_statesize(ctx->fallback_tfm);
if (statesize > sizeof(struct tegra_sha_reqctx))
crypto_hash_alg_common(tfm)->statesize = statesize;
/* Update reqsize if fallback is added */
crypto_ahash_set_reqsize(tfm,
sizeof(struct tegra_sha_reqctx) +
crypto_ahash_reqsize(ctx->fallback_tfm));
}
static int tegra_sha_cra_init(struct crypto_tfm *tfm)
{
struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
struct tegra_se_alg *se_alg;
const char *algname;
int ret;
algname = crypto_tfm_alg_name(tfm);
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
#else
se_alg = container_of(alg, struct tegra_se_alg, alg.ahash);
#endif
crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_sha_reqctx));
ctx->se = se_alg->se_dev;
ctx->fallback = false;
ctx->key_id = 0;
ret = se_algname_to_algid(algname);
if (ret < 0) {
dev_err(ctx->se->dev, "invalid algorithm\n");
return ret;
}
if (se_alg->alg_base)
tegra_sha_init_fallback(ahash_tfm, ctx, algname);
ctx->alg = ret;
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
ctx->enginectx.op.do_one_request = tegra_sha_do_one_req;
#endif
return 0;
}
static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
{
struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback_tfm)
crypto_free_ahash(ctx->fallback_tfm);
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
static int tegra_sha_init(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
if (ctx->fallback)
return tegra_sha_fallback_init(req);
rctx->total_len = 0;
rctx->datbuf.size = 0;
rctx->residue.size = 0;
rctx->key_id = 0;
rctx->task = SHA_FIRST;
rctx->alg = ctx->alg;
rctx->blk_size = crypto_ahash_blocksize(tfm);
rctx->digest.size = crypto_ahash_digestsize(tfm);
/* Retrieve the key slot for HMAC */
if (ctx->key_id) {
rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id);
if (!rctx->key_id)
return -ENOMEM;
}
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
&rctx->digest.addr, GFP_KERNEL);
if (!rctx->digest.buf)
goto digbuf_fail;
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
goto resbuf_fail;
return 0;
resbuf_fail:
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
digbuf_fail:
if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
return -ENOMEM;
}
static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
unsigned int keylen)
{
if (!ctx->fallback_tfm) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
ctx->fallback = true;
return crypto_ahash_setkey(ctx->fallback_tfm, key, keylen);
}
static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
if (aes_check_keylen(keylen))
return tegra_hmac_fallback_setkey(ctx, key, keylen);
ctx->fallback = false;
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
}
static int tegra_sha_update(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
if (ctx->fallback)
return tegra_sha_fallback_update(req);
rctx->task |= SHA_UPDATE;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_final(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
if (ctx->fallback)
return tegra_sha_fallback_final(req);
rctx->task |= SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_finup(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
if (ctx->fallback)
return tegra_sha_fallback_finup(req);
rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_digest(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
tegra_sha_init(req);
rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_export(struct ahash_request *req, void *out)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
if (ctx->fallback)
return tegra_sha_fallback_export(req, out);
memcpy(out, rctx, sizeof(*rctx));
return 0;
}
static int tegra_sha_import(struct ahash_request *req, const void *in)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
if (ctx->fallback)
return tegra_sha_fallback_import(req, in);
memcpy(rctx, in, sizeof(*rctx));
return 0;
}
static struct tegra_se_alg tegra_hash_algs[] = {
{
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "tegra-se-sha1",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha224",
.cra_driver_name = "tegra-se-sha224",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "tegra-se-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha384",
.cra_driver_name = "tegra-se-sha384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha512",
.cra_driver_name = "tegra-se-sha512",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA3_224_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha3-224",
.cra_driver_name = "tegra-se-sha3-224",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA3_224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA3_256_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha3-256",
.cra_driver_name = "tegra-se-sha3-256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA3_256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA3_384_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha3-384",
.cra_driver_name = "tegra-se-sha3-384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SHA3_512_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sha3-512",
.cra_driver_name = "tegra-se-sha3-512",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA3_512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg_base = "sha224",
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.setkey = tegra_hmac_setkey,
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "tegra-se-hmac-sha224",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg_base = "sha256",
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.setkey = tegra_hmac_setkey,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "tegra-se-hmac-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg_base = "sha384",
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.setkey = tegra_hmac_setkey,
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "tegra-se-hmac-sha384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}, {
.alg_base = "sha512",
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.setkey = tegra_hmac_setkey,
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "tegra-se-hmac-sha512",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
}
};
static struct tegra_se_alg tegra_sm3_algs[] = {
{
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_sha_init,
.update = tegra_sha_update,
.final = tegra_sha_final,
.finup = tegra_sha_finup,
.digest = tegra_sha_digest,
.export = tegra_sha_export,
.import = tegra_sha_import,
.halg.digestsize = SM3_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_sha_reqctx),
.halg.base = {
.cra_name = "sm3",
.cra_driver_name = "tegra-se-sm3",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_sha_cra_init,
.cra_exit = tegra_sha_cra_exit,
}
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_sha_do_one_req,
#endif
}
},
};
static int tegra_hash_kac_manifest(u32 user, u32 alg, u32 keylen)
{
int manifest;
manifest = SE_KAC_USER_NS;
switch (alg) {
case SE_ALG_HMAC_SHA224:
case SE_ALG_HMAC_SHA256:
case SE_ALG_HMAC_SHA384:
case SE_ALG_HMAC_SHA512:
manifest |= SE_KAC_HMAC;
break;
default:
return -EINVAL;
}
switch (keylen) {
case AES_KEYSIZE_128:
manifest |= SE_KAC_SIZE_128;
break;
case AES_KEYSIZE_192:
manifest |= SE_KAC_SIZE_192;
break;
case AES_KEYSIZE_256:
default:
manifest |= SE_KAC_SIZE_256;
break;
}
return manifest;
}
static int tegra_hash_kac2_manifest(u32 user, u32 alg, u32 keylen)
{
int manifest;
manifest = SE_KAC2_USER(user) | SE_KAC2_ORIGIN_SW;
manifest |= SE_KAC2_DECRYPT_EN | SE_KAC2_ENCRYPT_EN;
manifest |= SE_KAC2_SUBTYPE_SHA | SE_KAC2_TYPE_SYM;
switch (alg) {
case SE_ALG_HMAC_SHA224:
case SE_ALG_HMAC_SHA256:
case SE_ALG_HMAC_SHA384:
case SE_ALG_HMAC_SHA512:
manifest |= SE_KAC2_HMAC;
break;
default:
return -EINVAL;
}
switch (keylen) {
case AES_KEYSIZE_128:
manifest |= SE_KAC2_SIZE_128;
break;
case AES_KEYSIZE_192:
manifest |= SE_KAC2_SIZE_192;
break;
case AES_KEYSIZE_256:
default:
manifest |= SE_KAC2_SIZE_256;
break;
}
return manifest;
}
struct tegra_se_regcfg tegra234_hash_regcfg = {
.manifest = tegra_hash_kac_manifest,
};
struct tegra_se_regcfg tegra264_hash_regcfg = {
.manifest = tegra_hash_kac2_manifest,
};
static void tegra_hash_set_regcfg(struct tegra_se *se)
{
if (se->hw->kac_ver > 1)
se->regcfg = &tegra264_hash_regcfg;
else
se->regcfg = &tegra234_hash_regcfg;
}
int tegra_init_hash(struct tegra_se *se)
{
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
struct ahash_engine_alg *alg;
#else
struct ahash_alg *alg;
#endif
int i, ret;
tegra_hash_set_regcfg(se);
for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) {
tegra_hash_algs[i].se_dev = se;
alg = &tegra_hash_algs[i].alg.ahash;
ret = CRYPTO_REGISTER(ahash, alg);
if (ret) {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
dev_err(se->dev, "failed to register %s\n",
alg->base.halg.base.cra_name);
#else
dev_err(se->dev, "failed to register %s\n",
alg->halg.base.cra_name);
#endif
goto sha_err;
}
}
if (!se->hw->support_sm_alg)
return 0;
for (i = 0; i < ARRAY_SIZE(tegra_sm3_algs); i++) {
tegra_sm3_algs[i].se_dev = se;
alg = &tegra_sm3_algs[i].alg.ahash;
ret = CRYPTO_REGISTER(ahash, alg);
if (ret) {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
dev_err(se->dev, "failed to register %s\n",
alg->base.halg.base.cra_name);
#else
dev_err(se->dev, "failed to register %s\n",
alg->halg.base.cra_name);
#endif
goto sm3_err;
}
}
dev_info(se->dev, "registered HASH algorithms\n");
return 0;
sm3_err:
for (--i; i >= 0; i--)
CRYPTO_REGISTER(ahash, &tegra_sm3_algs[i].alg.ahash);
i = ARRAY_SIZE(tegra_hash_algs);
sha_err:
for (--i; i >= 0; i--)
CRYPTO_UNREGISTER(ahash, &tegra_hash_algs[i].alg.ahash);
return ret;
}
void tegra_deinit_hash(struct tegra_se *se)
{
int i;
for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++)
CRYPTO_UNREGISTER(ahash, &tegra_hash_algs[i].alg.ahash);
if (!se->hw->support_sm_alg)
return;
for (i = 0; i < ARRAY_SIZE(tegra_sm3_algs); i++)
CRYPTO_UNREGISTER(ahash, &tegra_sm3_algs[i].alg.ahash);
}