Files
linux-nv-oot/drivers/crypto/tegra/tegra-se-aes.c
Akhil R fc72250a45 crypto: tegra: Reserve keyslots to allocate dynamically
The HW supports only storing 15 keys at a time. This limits the number
of tfms that can work without failutes. Reserve keyslots to solve this
and use the reserved ones during the encryption/decryption operation.
This allow users to have the capability of hardware protected keys
and faster operations if there are limited number of tfms while not
halting the operation if there are more tfms.

Bug 4883011

Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Change-Id: I7de17eb1acf3b5a9f55a42e9df2aa8b64e20cb6d
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3347661
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
2025-07-24 10:19:18 +00:00

2481 lines
63 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
*/
#include <nvidia/conftest.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include "tegra-se.h"
struct tegra_aes_ctx {
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
struct crypto_engine_ctx enginectx;
#endif
struct tegra_se *se;
u32 alg;
u32 ivsize;
u32 key1_id;
u32 key2_id;
u32 keylen;
u8 key1[AES_MAX_KEY_SIZE];
u8 key2[AES_MAX_KEY_SIZE];
};
struct tegra_aes_reqctx {
struct tegra_se_datbuf datbuf;
bool encrypt;
u32 config;
u32 crypto_config;
u32 len;
u32 *iv;
u32 key1_id;
u32 key2_id;
};
struct tegra_aead_ctx {
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
struct crypto_engine_ctx enginectx;
#endif
struct tegra_se *se;
unsigned int authsize;
u32 alg;
u32 mac_alg;
u32 final_alg;
u32 verify_alg;
u32 keylen;
u32 key_id;
u8 key[AES_MAX_KEY_SIZE];
};
struct tegra_aead_reqctx {
struct tegra_se_datbuf inbuf;
struct tegra_se_datbuf outbuf;
struct scatterlist *src_sg;
struct scatterlist *dst_sg;
unsigned int assoclen;
unsigned int cryptlen;
unsigned int authsize;
bool encrypt;
u32 config;
u32 crypto_config;
u32 key_id;
u32 iv[4];
u8 authdata[16];
};
struct tegra_cmac_ctx {
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
struct crypto_engine_ctx enginectx;
#endif
struct tegra_se *se;
u32 alg;
u32 final_alg;
u32 key_id;
u32 keylen;
u8 key[AES_MAX_KEY_SIZE];
struct crypto_shash *fallback_tfm;
};
struct tegra_cmac_reqctx {
struct scatterlist *src_sg;
struct tegra_se_datbuf datbuf;
struct tegra_se_datbuf digest;
struct tegra_se_datbuf residue;
unsigned int total_len;
unsigned int blk_size;
unsigned int task;
u32 crypto_config;
u32 config;
u32 key_id;
u32 *iv;
u32 result[CMAC_RESULT_REG_COUNT];
};
/* increment counter (128-bit int) */
static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
{
do {
--bits;
nums += counter[bits];
counter[bits] = nums & 0xff;
nums >>= 8;
} while (bits && nums);
}
static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
{
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
unsigned int offset;
offset = req->cryptlen - ctx->ivsize;
if (rctx->encrypt)
memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
else
scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
}
static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
{
int num;
if (ctx->alg == SE_ALG_CBC) {
tegra_cbc_iv_copyback(req, ctx);
} else if (ctx->alg == SE_ALG_CTR) {
num = req->cryptlen / ctx->ivsize;
if (req->cryptlen % ctx->ivsize)
num++;
ctr_iv_inc(req->iv, ctx->ivsize, num);
}
}
static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
{
switch (alg) {
case SE_ALG_CMAC:
case SE_ALG_CMAC_FINAL:
case SE_ALG_GMAC:
case SE_ALG_GCM:
case SE_ALG_GCM_FINAL:
return 0;
case SE_ALG_CBC:
if (encrypt)
return SE_CRYPTO_CFG_CBC_ENCRYPT;
else
return SE_CRYPTO_CFG_CBC_DECRYPT;
case SE_ALG_ECB:
if (encrypt)
return SE_CRYPTO_CFG_ECB_ENCRYPT;
else
return SE_CRYPTO_CFG_ECB_DECRYPT;
case SE_ALG_XTS:
if (encrypt)
return SE_CRYPTO_CFG_XTS_ENCRYPT;
else
return SE_CRYPTO_CFG_XTS_DECRYPT;
case SE_ALG_CTR:
return SE_CRYPTO_CFG_CTR;
case SE_ALG_CBC_MAC:
return SE_CRYPTO_CFG_CBC_MAC;
default:
break;
}
return -EINVAL;
}
static int tegra234_aes_cfg(u32 alg, bool encrypt)
{
switch (alg) {
case SE_ALG_CBC:
case SE_ALG_ECB:
case SE_ALG_XTS:
case SE_ALG_CTR:
if (encrypt)
return SE_CFG_AES_ENCRYPT;
else
return SE_CFG_AES_DECRYPT;
case SE_ALG_GMAC:
if (encrypt)
return SE_CFG_GMAC_ENCRYPT;
else
return SE_CFG_GMAC_DECRYPT;
case SE_ALG_GCM:
if (encrypt)
return SE_CFG_GCM_ENCRYPT;
else
return SE_CFG_GCM_DECRYPT;
case SE_ALG_GCM_FINAL:
if (encrypt)
return SE_CFG_GCM_FINAL_ENCRYPT;
else
return SE_CFG_GCM_FINAL_DECRYPT;
case SE_ALG_CMAC:
return SE_CFG_CMAC | SE_AES_DST_HASH_REG;
case SE_ALG_CMAC_FINAL:
return SE_CFG_CMAC;
case SE_ALG_CBC_MAC:
return SE_AES_ENC_ALG_AES_ENC |
SE_AES_DST_HASH_REG;
}
return -EINVAL;
}
static unsigned int tegra_aes_prep_cmd(struct tegra_se *se, struct tegra_aes_reqctx *rctx)
{
unsigned int data_count, res_bits, i = 0, j;
u32 *cpuvaddr = se->cmdbuf->addr;
dma_addr_t addr = rctx->datbuf.addr;
data_count = rctx->len / AES_BLOCK_SIZE;
res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
/*
* Hardware processes data_count + 1 blocks.
* Reduce 1 block if there is no residue
*/
if (!res_bits)
data_count--;
if (rctx->iv) {
cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
cpuvaddr[i++] = rctx->iv[j];
}
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
SE_LAST_BLOCK_RES_BITS(res_bits);
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
cpuvaddr[i++] = rctx->config;
cpuvaddr[i++] = rctx->crypto_config;
/* Source address setting */
cpuvaddr[i++] = lower_32_bits(addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
/* Destination address setting */
cpuvaddr[i++] = lower_32_bits(addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
SE_ADDR_HI_SZ(rctx->len);
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
SE_AES_OP_START;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
return i;
}
static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
struct tegra_se *se = ctx->se;
unsigned int cmdlen;
int ret;
/* Keys in ctx might be stored in KDS. Copy it to request ctx */
if (ctx->key1_id)
rctx->key1_id = tegra_key_get_idx(ctx->se, ctx->key1_id);
/* Use reserved keyslots if keyslots are unavailable */
if (!ctx->key1_id || !rctx->key1_id) {
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
ctx->keylen, ctx->alg, &rctx->key1_id);
if (ret)
goto out;
}
rctx->key2_id = 0;
/* If there are 2 keys stored (for XTS), retrieve them both */
if (ctx->alg == SE_ALG_XTS) {
if (ctx->key2_id)
rctx->key2_id = tegra_key_get_idx(ctx->se, ctx->key2_id);
if (!ctx->key2_id || !rctx->key2_id) {
ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
ctx->keylen, ctx->alg, &rctx->key2_id);
if (ret)
goto key1_free;
}
}
/* Set buffer size as a multiple of AES_BLOCK_SIZE*/
rctx->datbuf.size = ((req->cryptlen / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE;
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf) {
ret = -ENOMEM;
goto key2_free;
}
rctx->iv = (u32 *)req->iv;
rctx->len = req->cryptlen;
/* Pad input to AES Block size */
if (ctx->alg != SE_ALG_XTS) {
if (rctx->len % AES_BLOCK_SIZE)
rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
}
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
/* Update crypto_config with Local KSLT IDs */
rctx->crypto_config |= SE_AES_KEY_INDEX(rctx->key1_id);
if (rctx->key2_id)
rctx->crypto_config |= SE_AES_KEY2_INDEX(rctx->key2_id);
/* Prepare the command and submit for execution */
cmdlen = tegra_aes_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* Copy the result */
tegra_aes_update_iv(req, ctx);
scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
/* Free the buffer */
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
key2_free:
if (tegra_key_is_reserved(rctx->key2_id))
tegra_key_invalidate_reserved(ctx->se, rctx->key2_id, ctx->alg);
else if (rctx->key2_id != ctx->key2_id)
tegra_key_invalidate(ctx->se, rctx->key2_id, ctx->alg);
key1_free:
if (tegra_key_is_reserved(rctx->key1_id))
tegra_key_invalidate_reserved(ctx->se, rctx->key1_id, ctx->alg);
else if (rctx->key1_id != ctx->key1_id)
tegra_key_invalidate(ctx->se, rctx->key1_id, ctx->alg);
out:
crypto_finalize_skcipher_request(se->engine, req, ret);
return 0;
}
static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
{
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct tegra_se_alg *se_alg;
const char *algname;
int ret;
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
#else
se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher);
#endif
crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
ctx->ivsize = crypto_skcipher_ivsize(tfm);
ctx->se = se_alg->se_dev;
ctx->key1_id = 0;
ctx->key2_id = 0;
ctx->keylen = 0;
algname = crypto_tfm_alg_name(&tfm->base);
ret = se_algname_to_algid(algname);
if (ret < 0) {
dev_err(ctx->se->dev, "invalid algorithm\n");
return ret;
}
ctx->alg = ret;
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
ctx->enginectx.op.do_one_request = tegra_aes_do_one_req;
#endif
return 0;
}
static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
{
struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
if (ctx->key1_id)
tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
if (ctx->key2_id)
tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
}
static int tegra_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
if (ret) {
ctx->keylen = keylen;
memcpy(ctx->key1, key, keylen);
}
return 0;
}
static int tegra_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 len = keylen / 2;
int ret;
ret = xts_verify_key(tfm, key, keylen);
if (ret || aes_check_keylen(len)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
ret = tegra_key_submit(ctx->se, key, len,
ctx->alg, &ctx->key1_id);
if (ret) {
ctx->keylen = len;
memcpy(ctx->key1, key, len);
}
ret = tegra_key_submit(ctx->se, key + len, len,
ctx->alg, &ctx->key2_id);
if (ret) {
ctx->keylen = len;
memcpy(ctx->key2, key + len, len);
}
return 0;
}
static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
{
int manifest;
manifest = SE_KAC_USER_NS;
switch (alg) {
case SE_ALG_CBC:
case SE_ALG_ECB:
case SE_ALG_CTR:
manifest |= SE_KAC_ENC;
break;
case SE_ALG_XTS:
manifest |= SE_KAC_XTS;
break;
case SE_ALG_GCM:
manifest |= SE_KAC_GCM;
break;
case SE_ALG_CMAC:
manifest |= SE_KAC_CMAC;
break;
case SE_ALG_CBC_MAC:
manifest |= SE_KAC_ENC;
break;
default:
return -EINVAL;
}
switch (keylen) {
case AES_KEYSIZE_128:
manifest |= SE_KAC_SIZE_128;
break;
case AES_KEYSIZE_192:
manifest |= SE_KAC_SIZE_192;
break;
case AES_KEYSIZE_256:
manifest |= SE_KAC_SIZE_256;
break;
default:
return -EINVAL;
}
return manifest;
}
static int tegra_aes_kac2_manifest(u32 user, u32 alg, u32 keylen)
{
int manifest;
manifest = SE_KAC2_USER(user) | SE_KAC2_ORIGIN_SW;
manifest |= SE_KAC2_DECRYPT_EN | SE_KAC2_ENCRYPT_EN;
manifest |= SE_KAC2_TYPE_SYM | SE_KAC2_SUBTYPE_AES;
switch (alg) {
case SE_ALG_CBC:
case SE_ALG_ECB:
case SE_ALG_CTR:
manifest |= SE_KAC2_ENC;
break;
case SE_ALG_XTS:
manifest |= SE_KAC2_XTS;
break;
case SE_ALG_GCM:
manifest |= SE_KAC2_GCM;
break;
case SE_ALG_CMAC:
manifest |= SE_KAC2_CMAC;
break;
case SE_ALG_CBC_MAC:
manifest |= SE_KAC2_ENC;
break;
default:
return -EINVAL;
}
switch (keylen) {
case AES_KEYSIZE_128:
manifest |= SE_KAC2_SIZE_128;
break;
case AES_KEYSIZE_192:
manifest |= SE_KAC2_SIZE_192;
break;
case AES_KEYSIZE_256:
manifest |= SE_KAC2_SIZE_256;
break;
default:
return -EINVAL;
}
return manifest;
}
static inline int tegra264_aes_crypto_cfg(u32 alg, bool encrypt)
{
u32 cfg = SE_AES_CRYPTO_CFG_SCC_DIS;
switch (alg) {
case SE_ALG_ECB:
case SE_ALG_SM4_ECB:
case SE_ALG_CMAC:
case SE_ALG_GMAC:
break;
case SE_ALG_CTR:
cfg |= SE_AES_IV_SEL_REG |
SE_AES_CRYPTO_CFG_CTR_CNTN(1);
break;
case SE_ALG_CBC:
case SE_ALG_CBC_MAC:
case SE_ALG_XTS:
case SE_ALG_GCM:
case SE_ALG_GCM_FINAL:
case SE_ALG_GCM_VERIFY:
cfg |= SE_AES_IV_SEL_REG;
break;
default:
return -EINVAL;
}
return cfg;
}
static int tegra264_aes_cfg(u32 alg, bool encrypt)
{
switch (alg) {
case SE_ALG_CBC:
if (encrypt)
return SE_CFG_CBC_ENCRYPT;
else
return SE_CFG_CBC_DECRYPT;
case SE_ALG_ECB:
if (encrypt)
return SE_CFG_ECB_ENCRYPT;
else
return SE_CFG_ECB_DECRYPT;
case SE_ALG_CTR:
if (encrypt)
return SE_CFG_CTR_ENCRYPT;
else
return SE_CFG_CTR_DECRYPT;
case SE_ALG_XTS:
if (encrypt)
return SE_CFG_XTS_ENCRYPT;
else
return SE_CFG_XTS_DECRYPT;
case SE_ALG_GMAC:
if (encrypt)
return SE_CFG_GMAC_ENCRYPT;
else
return SE_CFG_GMAC_DECRYPT;
case SE_ALG_GCM:
if (encrypt)
return SE_CFG_GCM_ENCRYPT;
else
return SE_CFG_GCM_DECRYPT;
case SE_ALG_GCM_FINAL:
if (encrypt)
return SE_CFG_GCM_FINAL_ENCRYPT;
else
return SE_CFG_GCM_FINAL_DECRYPT;
case SE_ALG_GCM_VERIFY:
return SE_CFG_GCM_VERIFY;
case SE_ALG_CMAC:
return SE_CFG_CMAC | SE_AES_DST_KEYTABLE;
case SE_ALG_CMAC_FINAL:
return SE_CFG_CMAC;
case SE_ALG_CBC_MAC:
return SE_CFG_CBC_MAC | SE_AES_DST_HASH_REG;
}
return -EINVAL;
}
static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
struct tegra_se *se = ctx->se;
if (ctx->alg != SE_ALG_XTS) {
if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
return -EINVAL;
}
} else if (req->cryptlen < XTS_BLOCK_SIZE) {
dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
return -EINVAL;
}
if (!req->cryptlen)
return 0;
rctx->encrypt = encrypt;
rctx->config = se->regcfg->cfg(ctx->alg, encrypt);
rctx->crypto_config = se->regcfg->crypto_cfg(ctx->alg, encrypt);
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
}
static int tegra_aes_encrypt(struct skcipher_request *req)
{
return tegra_aes_crypt(req, true);
}
static int tegra_aes_decrypt(struct skcipher_request *req)
{
return tegra_aes_crypt(req, false);
}
static struct tegra_se_alg tegra_aes_algs[] = {
{
.alg.skcipher = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_aes_cra_init,
.exit = tegra_aes_cra_exit,
.setkey = tegra_aes_setkey,
.encrypt = tegra_aes_encrypt,
.decrypt = tegra_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-tegra",
.cra_priority = 500,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_aes_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_aes_do_one_req,
#endif
}
}, {
.alg.skcipher = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_aes_cra_init,
.exit = tegra_aes_cra_exit,
.setkey = tegra_aes_setkey,
.encrypt = tegra_aes_encrypt,
.decrypt = tegra_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-tegra",
.cra_priority = 500,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_aes_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_aes_do_one_req,
#endif
}
}, {
.alg.skcipher = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_aes_cra_init,
.exit = tegra_aes_cra_exit,
.setkey = tegra_aes_setkey,
.encrypt = tegra_aes_encrypt,
.decrypt = tegra_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-tegra",
.cra_priority = 500,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct tegra_aes_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_aes_do_one_req,
#endif
}
}, {
.alg.skcipher = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_aes_cra_init,
.exit = tegra_aes_cra_exit,
.setkey = tegra_xts_setkey,
.encrypt = tegra_aes_encrypt,
.decrypt = tegra_aes_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-tegra",
.cra_priority = 500,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_aes_ctx),
.cra_alignmask = (__alignof__(u64) - 1),
.cra_module = THIS_MODULE,
},
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_aes_do_one_req,
#endif
}
},
};
struct tegra_se_regcfg tegra234_aes_regcfg = {
.cfg = tegra234_aes_cfg,
.crypto_cfg = tegra234_aes_crypto_cfg,
.manifest = tegra_aes_kac_manifest,
};
struct tegra_se_regcfg tegra264_aes_regcfg = {
.cfg = tegra264_aes_cfg,
.crypto_cfg = tegra264_aes_crypto_cfg,
.manifest = tegra_aes_kac2_manifest
};
static void tegra_aes_set_regcfg(struct tegra_se *se)
{
if (se->hw->kac_ver > 1)
se->regcfg = &tegra264_aes_regcfg;
else
se->regcfg = &tegra234_aes_regcfg;
}
static unsigned int tegra_gmac_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
unsigned int data_count, res_bits, i = 0, j;
u32 *cpuvaddr = se->cmdbuf->addr;
data_count = (rctx->assoclen / AES_BLOCK_SIZE);
res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
/*
* Hardware processes data_count + 1 blocks.
* Reduce 1 block if there is no residue
*/
if (!res_bits)
data_count--;
cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
cpuvaddr[i++] = rctx->iv[j];
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
SE_LAST_BLOCK_RES_BITS(res_bits);
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
cpuvaddr[i++] = rctx->config;
cpuvaddr[i++] = rctx->crypto_config;
cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
SE_ADDR_HI_SZ(rctx->assoclen);
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
SE_AES_OP_START;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
return i;
}
static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
unsigned int data_count, res_bits, i = 0, j;
u32 *cpuvaddr = se->cmdbuf->addr, op;
data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
SE_AES_OP_LASTBUF | SE_AES_OP_START;
/*
* If there is no assoc data,
* this will be the init command
*/
if (!rctx->assoclen)
op |= SE_AES_OP_INIT;
/*
* Hardware processes data_count + 1 blocks.
* Reduce 1 block if there is no residue
*/
if (!res_bits)
data_count--;
cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
cpuvaddr[i++] = rctx->iv[j];
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
SE_LAST_BLOCK_RES_BITS(res_bits);
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
cpuvaddr[i++] = rctx->config;
cpuvaddr[i++] = rctx->crypto_config;
/* Source Address */
cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
SE_ADDR_HI_SZ(rctx->cryptlen);
/* Destination Address */
cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
SE_ADDR_HI_SZ(rctx->cryptlen);
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
cpuvaddr[i++] = op;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
return i;
}
static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
struct tegra_aead_reqctx *rctx)
{
unsigned int i = 0, j;
u32 op;
op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
SE_AES_OP_LASTBUF | SE_AES_OP_START;
/*
* Set init for zero sized vector
*/
if (!rctx->assoclen && !rctx->cryptlen)
op |= SE_AES_OP_INIT;
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
cpuvaddr[i++] = rctx->assoclen * 8;
cpuvaddr[i++] = 0;
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
cpuvaddr[i++] = rctx->cryptlen * 8;
cpuvaddr[i++] = 0;
cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
cpuvaddr[i++] = rctx->iv[j];
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
cpuvaddr[i++] = rctx->config;
cpuvaddr[i++] = rctx->crypto_config;
cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
SE_ADDR_HI_SZ(rctx->authsize);
/* Destination Address */
cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
cpuvaddr[i++] = op;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
return i;
}
static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
{
struct tegra_se *se = ctx->se;
unsigned int cmdlen;
scatterwalk_map_and_copy(rctx->inbuf.buf,
rctx->src_sg, 0, rctx->assoclen, 0);
rctx->config = se->regcfg->cfg(ctx->mac_alg, rctx->encrypt);
rctx->crypto_config = se->regcfg->crypto_cfg(ctx->mac_alg, rctx->encrypt) |
SE_AES_KEY_INDEX(rctx->key_id);
cmdlen = tegra_gmac_prep_cmd(se, rctx);
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
{
struct tegra_se *se = ctx->se;
int cmdlen, ret;
scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
rctx->assoclen, rctx->cryptlen, 0);
rctx->config = se->regcfg->cfg(ctx->alg, rctx->encrypt);
rctx->crypto_config = se->regcfg->crypto_cfg(ctx->alg, rctx->encrypt) |
SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_crypt_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
/* Copy the result */
scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
rctx->assoclen, rctx->cryptlen, 1);
return 0;
}
static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
{
struct tegra_se *se = ctx->se;
u32 *cpuvaddr = se->cmdbuf->addr;
int cmdlen, ret, offset;
rctx->config = se->regcfg->cfg(ctx->final_alg, rctx->encrypt);
rctx->crypto_config = se->regcfg->crypto_cfg(ctx->final_alg, rctx->encrypt) |
SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
if (rctx->encrypt) {
/* Copy the result */
offset = rctx->assoclen + rctx->cryptlen;
scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
offset, rctx->authsize, 1);
}
return 0;
}
static int tegra_gcm_hw_verify(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx, u8 *mac)
{
struct tegra_se *se = ctx->se;
u32 result, *cpuvaddr = se->cmdbuf->addr;
int cmdlen, ret;
memcpy(rctx->inbuf.buf, mac, rctx->authsize);
rctx->inbuf.size = rctx->authsize;
rctx->config = se->regcfg->cfg(ctx->verify_alg, rctx->encrypt);
rctx->crypto_config = se->regcfg->crypto_cfg(ctx->verify_alg, rctx->encrypt) |
SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
memcpy(&result, rctx->outbuf.buf, 4);
if (result != SE_GCM_VERIFY_OK)
return -EBADMSG;
return 0;
}
static int tegra_gcm_do_verify(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
{
struct tegra_se *se = ctx->se;
int offset, ret;
u8 mac[16];
offset = rctx->assoclen + rctx->cryptlen;
scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
if (se->hw->support_aad_verify)
ret = tegra_gcm_hw_verify(ctx, rctx, mac);
else
ret = crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize);
if (ret)
return -EBADMSG;
return 0;
}
static inline int tegra_ccm_check_iv(const u8 *iv)
{
/* iv[0] gives value of q-1
* 2 <= q <= 8 as per NIST 800-38C notation
* 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
*/
if (iv[0] < 1 || iv[0] > 7) {
pr_debug("ccm_check_iv failed %d\n", iv[0]);
return -EINVAL;
}
return 0;
}
static unsigned int tegra_cbcmac_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
unsigned int data_count, i = 0;
u32 *cpuvaddr = se->cmdbuf->addr;
data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
cpuvaddr[i++] = rctx->config;
cpuvaddr[i++] = rctx->crypto_config;
cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
SE_ADDR_HI_SZ(rctx->inbuf.size);
cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | SE_AES_OP_START;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
return i;
}
static unsigned int tegra_ctr_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
unsigned int i = 0, j;
u32 *cpuvaddr = se->cmdbuf->addr;
cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
cpuvaddr[i++] = rctx->iv[j];
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
cpuvaddr[i++] = rctx->config;
cpuvaddr[i++] = rctx->crypto_config;
/* Source address setting */
cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
SE_ADDR_HI_SZ(rctx->inbuf.size);
/* Destination address setting */
cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
SE_ADDR_HI_SZ(rctx->inbuf.size);
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
SE_AES_OP_START;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
rctx->config, rctx->crypto_config);
return i;
}
static int tegra_ccm_do_cbcmac(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
int cmdlen;
rctx->config = se->regcfg->cfg(SE_ALG_CBC_MAC, rctx->encrypt);
rctx->crypto_config = se->regcfg->crypto_cfg(SE_ALG_CBC_MAC,
rctx->encrypt) | SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_cbcmac_prep_cmd(se, rctx);
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
{
__be32 data;
memset(block, 0, csize);
block += csize;
if (csize >= 4)
csize = 4;
else if (msglen > (1 << (8 * csize)))
return -EOVERFLOW;
data = cpu_to_be32(msglen);
memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
return 0;
}
static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
{
unsigned int q, t;
u8 *q_ptr, *iv = (u8 *)rctx->iv;
memcpy(nonce, rctx->iv, 16);
/*** 1. Prepare Flags Octet ***/
/* Encode t (mac length) */
t = rctx->authsize;
nonce[0] |= (((t - 2) / 2) << 3);
/* Adata */
if (rctx->assoclen)
nonce[0] |= (1 << 6);
/*** Encode Q - message length ***/
q = iv[0] + 1;
q_ptr = nonce + 16 - q;
return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
}
static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
{
int len = 0;
/* add control info for associated data
* RFC 3610 and NIST Special Publication 800-38C
*/
if (a < 65280) {
*(__be16 *)adata = cpu_to_be16(a);
len = 2;
} else {
*(__be16 *)adata = cpu_to_be16(0xfffe);
*(__be32 *)&adata[2] = cpu_to_be32(a);
len = 6;
}
return len;
}
static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
{
unsigned int padlen = 16 - (len % 16);
u8 padding[16] = {0};
if (padlen == 16)
return 0;
memcpy(buf, padding, padlen);
return padlen;
}
static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
{
unsigned int alen = 0, offset = 0;
u8 nonce[16], adata[16];
int ret;
ret = tegra_ccm_format_nonce(rctx, nonce);
if (ret)
return ret;
memcpy(rctx->inbuf.buf, nonce, 16);
offset = 16;
if (rctx->assoclen) {
alen = tegra_ccm_format_adata(adata, rctx->assoclen);
memcpy(rctx->inbuf.buf + offset, adata, alen);
offset += alen;
scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
rctx->src_sg, 0, rctx->assoclen, 0);
offset += rctx->assoclen;
offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
rctx->assoclen + alen);
}
return offset;
}
static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
u32 result[16];
int i, ret;
/* Read and clear Result */
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
result[i] = readl(se->base + se->hw->regs->result + (i * 4));
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(0, se->base + se->hw->regs->result + (i * 4));
if (rctx->encrypt) {
memcpy(rctx->authdata, result, rctx->authsize);
} else {
ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
if (ret)
return -EBADMSG;
}
return 0;
}
static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
/* Copy result */
scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
rctx->assoclen, rctx->cryptlen, 1);
if (rctx->encrypt)
scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
rctx->assoclen + rctx->cryptlen,
rctx->authsize, 1);
else
memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
return 0;
}
static int tegra_ccm_compute_auth(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
struct scatterlist *sg;
int offset, ret;
offset = tegra_ccm_format_blocks(rctx);
if (offset < 0)
return -EINVAL;
/* Copy plain text to the buffer */
sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
sg, rctx->assoclen,
rctx->cryptlen, 0);
offset += rctx->cryptlen;
offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
rctx->inbuf.size = offset;
ret = tegra_ccm_do_cbcmac(se, rctx);
if (ret)
return ret;
return tegra_ccm_mac_result(se, rctx);
}
static int tegra_ccm_do_ctr(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
{
unsigned int cmdlen, offset = 0;
struct scatterlist *sg = rctx->src_sg;
int ret;
rctx->config = se->regcfg->cfg(SE_ALG_CTR, rctx->encrypt);
rctx->crypto_config = se->regcfg->crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
SE_AES_KEY_INDEX(rctx->key_id);
/* Copy authdata in the top of buffer for encryption/decryption */
if (rctx->encrypt)
memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
else
scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
rctx->assoclen + rctx->cryptlen,
rctx->authsize, 0);
offset += rctx->authsize;
offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
/* If there is no cryptlen, proceed to submit the task */
if (rctx->cryptlen) {
scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
rctx->assoclen, rctx->cryptlen, 0);
offset += rctx->cryptlen;
offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
}
rctx->inbuf.size = offset;
/* Prepare command and submit */
cmdlen = tegra_ctr_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
return tegra_ccm_ctr_result(se, rctx);
}
static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
struct tegra_aead_reqctx *rctx)
{
u8 *iv = (u8 *)rctx->iv;
int ret, i;
memcpy(iv, req->iv, 16);
ret = tegra_ccm_check_iv(iv);
if (ret)
return ret;
/* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
* zero to encrypt auth tag.
* req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
*/
memset(iv + 15 - iv[0], 0, iv[0] + 1);
/* Clear any previous result */
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(0, se->base + se->hw->regs->result + (i * 4));
return 0;
}
static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req = container_of(areq, struct aead_request, base);
struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct tegra_se *se = ctx->se;
int ret;
rctx->src_sg = req->src;
rctx->dst_sg = req->dst;
rctx->assoclen = req->assoclen;
rctx->authsize = crypto_aead_authsize(tfm);
if (rctx->encrypt)
rctx->cryptlen = req->cryptlen;
else
rctx->cryptlen = req->cryptlen - ctx->authsize;
/* Keys in ctx might be stored in KDS. Copy it to local keyslot */
if (ctx->key_id)
rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id);
/* Use reserved keyslots if keyslots are unavailable */
if (!ctx->key_id || !rctx->key_id) {
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
ctx->keylen, ctx->alg, &rctx->key_id);
if (ret)
goto out;
}
rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
/* Allocate buffers required */
rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
&rctx->inbuf.addr, GFP_KERNEL);
if (!rctx->inbuf.buf) {
ret = -ENOMEM;
goto key_free;
}
rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
&rctx->outbuf.addr, GFP_KERNEL);
if (!rctx->outbuf.buf) {
ret = -ENOMEM;
goto inbuf_free;
}
ret = tegra_ccm_crypt_init(req, se, rctx);
if (ret)
goto out;
if (rctx->encrypt) {
/* CBC MAC Operation */
ret = tegra_ccm_compute_auth(se, rctx);
if (ret)
goto outbuf_free;
/* CTR operation */
ret = tegra_ccm_do_ctr(se, rctx);
if (ret)
goto outbuf_free;
} else {
/* CTR operation */
ret = tegra_ccm_do_ctr(se, rctx);
if (ret)
goto outbuf_free;
/* CBC MAC Operation */
ret = tegra_ccm_compute_auth(se, rctx);
if (ret)
goto outbuf_free;
}
outbuf_free:
dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
inbuf_free:
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
key_free:
if (tegra_key_is_reserved(rctx->key_id))
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
else if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
out:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
}
static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req = container_of(areq, struct aead_request, base);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
struct tegra_se *se = ctx->se;
int ret;
rctx->src_sg = req->src;
rctx->dst_sg = req->dst;
rctx->assoclen = req->assoclen;
rctx->authsize = crypto_aead_authsize(tfm);
if (rctx->encrypt)
rctx->cryptlen = req->cryptlen;
else
rctx->cryptlen = req->cryptlen - ctx->authsize;
/* Keys in ctx might be stored in KDS. Copy it to local keyslot */
if (ctx->key_id)
rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id);
/* Use reserved keyslots if keyslots are unavailable */
if (!ctx->key_id || !rctx->key_id) {
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
ctx->keylen, ctx->alg, &rctx->key_id);
if (ret)
goto key_err;
}
/* Allocate buffers required */
rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
&rctx->inbuf.addr, GFP_KERNEL);
if (!rctx->inbuf.buf) {
ret = -ENOMEM;
goto out_finalize;
}
rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
&rctx->outbuf.addr, GFP_KERNEL);
if (!rctx->outbuf.buf) {
ret = -ENOMEM;
goto outbuf_err;
}
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
rctx->iv[3] = (1 << 24);
/* If there is associated data perform GMAC operation */
if (rctx->assoclen) {
ret = tegra_gcm_do_gmac(ctx, rctx);
if (ret)
goto out;
}
/* GCM Encryption/Decryption operation */
if (rctx->cryptlen) {
ret = tegra_gcm_do_crypt(ctx, rctx);
if (ret)
goto out;
}
/* GCM_FINAL operation */
/* Need not do FINAL operation if hw supports MAC verification */
if (rctx->encrypt || !se->hw->support_aad_verify) {
ret = tegra_gcm_do_final(ctx, rctx);
if (ret)
goto out;
}
if (!rctx->encrypt)
ret = tegra_gcm_do_verify(ctx, rctx);
out:
if (tegra_key_is_reserved(rctx->key_id))
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
else if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
key_err:
dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
outbuf_err:
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
out_finalize:
/* Finalize the request if there are no errors */
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
}
static int tegra_ccm_cra_init(struct crypto_aead *tfm)
{
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_alg *alg = crypto_aead_alg(tfm);
struct tegra_se_alg *se_alg;
const char *algname;
int ret;
algname = crypto_tfm_alg_name(&tfm->base);
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
#else
se_alg = container_of(alg, struct tegra_se_alg, alg.aead);
#endif
crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
ctx->keylen = 0;
ret = se_algname_to_algid(algname);
if (ret < 0) {
dev_err(ctx->se->dev, "invalid algorithm\n");
return ret;
}
ctx->alg = ret;
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
ctx->enginectx.op.do_one_request = tegra_ccm_do_one_req;
#endif
return 0;
}
static int tegra_gcm_cra_init(struct crypto_aead *tfm)
{
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_alg *alg = crypto_aead_alg(tfm);
struct tegra_se_alg *se_alg;
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
#else
se_alg = container_of(alg, struct tegra_se_alg, alg.aead);
#endif
crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
ctx->keylen = 0;
ctx->alg = SE_ALG_GCM;
ctx->final_alg = SE_ALG_GCM_FINAL;
ctx->verify_alg = SE_ALG_GCM_VERIFY;
ctx->mac_alg = SE_ALG_GMAC;
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
ctx->enginectx.op.do_one_request = tegra_gcm_do_one_req;
#endif
return 0;
}
static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
switch (authsize) {
case 4:
case 6:
case 8:
case 10:
case 12:
case 14:
case 16:
break;
default:
return -EINVAL;
}
ctx->authsize = authsize;
return 0;
}
static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
ret = crypto_gcm_check_authsize(authsize);
if (ret)
return ret;
ctx->authsize = authsize;
return 0;
}
static void tegra_aead_cra_exit(struct crypto_aead *tfm)
{
struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
if (ctx->key_id)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
rctx->encrypt = encrypt;
return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
}
static int tegra_aead_encrypt(struct aead_request *req)
{
return tegra_aead_crypt(req, true);
}
static int tegra_aead_decrypt(struct aead_request *req)
{
return tegra_aead_crypt(req, false);
}
static int tegra_aead_setkey(struct crypto_aead *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
if (ret) {
ctx->keylen = keylen;
memcpy(ctx->key, key, keylen);
}
return 0;
}
static unsigned int tegra_cmac_prep_cmd(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
{
unsigned int data_count, res_bits = 0, i = 0, j;
u32 *cpuvaddr = se->cmdbuf->addr, op;
data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
if (!(rctx->task & SHA_UPDATE)) {
op |= SE_AES_OP_FINAL;
res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
}
if (!res_bits && data_count)
data_count--;
if (rctx->task & SHA_FIRST) {
/* T264 needs INIT to be set for first operation
* whereas T234 will return error if INIT is set
* Differentiate T264 and T234 based on CFG */
if ((rctx->config & SE_AES_DST_KEYTABLE) == SE_AES_DST_KEYTABLE)
op |= SE_AES_OP_INIT;
rctx->task &= ~SHA_FIRST;
cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
/* Load 0 IV */
for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
cpuvaddr[i++] = 0;
}
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
SE_LAST_BLOCK_RES_BITS(res_bits);
cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
cpuvaddr[i++] = rctx->config;
cpuvaddr[i++] = rctx->crypto_config;
/* Source Address */
cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
SE_ADDR_HI_SZ(rctx->datbuf.size);
/* Destination Address */
cpuvaddr[i++] = rctx->digest.addr;
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
SE_ADDR_HI_SZ(rctx->digest.size));
cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
cpuvaddr[i++] = op;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "cfg %#x\n", rctx->config);
return i;
}
static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
{
int i;
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
}
static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
{
int i;
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(rctx->result[i],
se->base + se->hw->regs->result + (i * 4));
}
static int tegra_cmac_do_init(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
int i;
rctx->total_len = 0;
rctx->datbuf.size = 0;
rctx->residue.size = 0;
rctx->task |= SHA_FIRST;
rctx->blk_size = crypto_ahash_blocksize(tfm);
rctx->digest.size = crypto_ahash_digestsize(tfm);
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
&rctx->residue.addr, GFP_KERNEL);
if (!rctx->residue.buf)
return -ENOMEM;
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
&rctx->digest.addr, GFP_KERNEL);
if (!rctx->digest.buf)
goto resbuf_free;
/* Clear any previous result */
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(0, se->base + se->hw->regs->result + (i * 4));
return 0;
resbuf_free:
dma_free_coherent(se->dev, rctx->blk_size * 2,
rctx->residue.buf, rctx->residue.addr);
return -ENOMEM;
}
static int tegra_cmac_do_update(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
unsigned int nblks, nresidue, cmdlen;
int ret;
if (!req->nbytes)
return 0;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
/*
* Reserve the last block as residue during final() to process.
*/
if (!nresidue && nblks) {
nresidue += rctx->blk_size;
nblks--;
}
rctx->src_sg = req->src;
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
rctx->total_len += rctx->datbuf.size;
rctx->config = se->regcfg->cfg(ctx->alg, 0);
rctx->crypto_config = se->regcfg->crypto_cfg(ctx->alg, 0) |
SE_AES_KEY_INDEX(rctx->key_id);
/*
* Keep one block and residue bytes in residue and
* return. The bytes will be processed in final()
*/
if (nblks < 1) {
scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
rctx->src_sg, 0, req->nbytes, 0);
rctx->residue.size += req->nbytes;
return 0;
}
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf)
return -ENOMEM;
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
rctx->src_sg, 0, req->nbytes - nresidue, 0);
scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
req->nbytes - nresidue, nresidue, 0);
/* Update residue value with the residue after current block */
rctx->residue.size = nresidue;
/*
* If this is not the first task, paste the previous copied
* intermediate results to the registers so that it gets picked up.
*/
if (!(rctx->task & SHA_FIRST))
tegra_cmac_paste_result(ctx->se, rctx);
cmdlen = tegra_cmac_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
tegra_cmac_copy_result(ctx->se, rctx);
return ret;
}
static int tegra_cmac_do_final(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
int ret = 0, i, cmdlen;
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
return crypto_shash_tfm_digest(ctx->fallback_tfm,
NULL, 0, req->result);
}
if (rctx->residue.size) {
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
&rctx->datbuf.addr, GFP_KERNEL);
if (!rctx->datbuf.buf) {
ret = -ENOMEM;
goto out_free;
}
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = se->regcfg->cfg(ctx->final_alg, 0);
/*
* If this is not the first task, paste the previous copied
* intermediate results to the registers so that it gets picked up.
*/
if (!(rctx->task & SHA_FIRST))
tegra_cmac_paste_result(ctx->se, rctx);
/* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
goto out;
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
writel(0, se->base + se->hw->regs->result + (i * 4));
if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
out:
if (rctx->residue.size)
dma_free_coherent(se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
rctx->residue.buf, rctx->residue.addr);
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
return ret;
}
static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
int ret;
if (rctx->task & SHA_INIT) {
ret = tegra_cmac_do_init(req);
if (ret)
goto out;
rctx->task &= ~SHA_INIT;
}
/* Keys in ctx might be stored in KDS. Copy it to local keyslot */
if (ctx->key_id)
rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id);
/* Use reserved keyslots if keyslots are unavailable */
if (!ctx->key_id || !rctx->key_id) {
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
ctx->keylen, ctx->alg, &rctx->key_id);
if (ret)
goto out;
}
if (rctx->task & SHA_UPDATE) {
ret = tegra_cmac_do_update(req);
if (ret)
goto out;
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_cmac_do_final(req);
if (ret)
goto out;
rctx->task &= ~SHA_FINAL;
}
out:
if (tegra_key_is_reserved(rctx->key_id))
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
else if (rctx->key_id != ctx->key_id)
tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg);
crypto_finalize_hash_request(se->engine, req, ret);
return 0;
}
static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
const char *algname)
{
unsigned int statesize;
ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback_tfm)) {
dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
ctx->fallback_tfm = NULL;
return;
}
statesize = crypto_shash_statesize(ctx->fallback_tfm);
if (statesize > sizeof(struct tegra_cmac_reqctx))
crypto_hash_alg_common(tfm)->statesize = statesize;
}
static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
{
struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
struct tegra_se_alg *se_alg;
const char *algname;
algname = crypto_tfm_alg_name(tfm);
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
#else
se_alg = container_of(alg, struct tegra_se_alg, alg.ahash);
#endif
crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
ctx->alg = SE_ALG_CMAC;
ctx->final_alg = SE_ALG_CMAC_FINAL;
#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
ctx->enginectx.op.do_one_request = tegra_cmac_do_one_req;
#endif
tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
return 0;
}
static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
{
struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback_tfm)
crypto_free_shash(ctx->fallback_tfm);
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
if (ctx->fallback_tfm)
crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
if (ret) {
ctx->keylen = keylen;
memcpy(ctx->key, key, keylen);
}
return 0;
}
static int tegra_cmac_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
rctx->task = SHA_INIT;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
rctx->task |= SHA_UPDATE;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
rctx->task |= SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_export(struct ahash_request *req, void *out)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
memcpy(out, rctx, sizeof(*rctx));
return 0;
}
static int tegra_cmac_import(struct ahash_request *req, const void *in)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
memcpy(rctx, in, sizeof(*rctx));
return 0;
}
static struct tegra_se_alg tegra_aead_algs[] = {
{
.alg.aead = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_gcm_cra_init,
.exit = tegra_aead_cra_exit,
.setkey = tegra_aead_setkey,
.setauthsize = tegra_gcm_setauthsize,
.encrypt = tegra_aead_encrypt,
.decrypt = tegra_aead_decrypt,
.maxauthsize = AES_BLOCK_SIZE,
.ivsize = GCM_AES_IV_SIZE,
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-tegra",
.cra_priority = 500,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct tegra_aead_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_gcm_do_one_req,
#endif
}
}, {
.alg.aead = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_ccm_cra_init,
.exit = tegra_aead_cra_exit,
.setkey = tegra_aead_setkey,
.setauthsize = tegra_ccm_setauthsize,
.encrypt = tegra_aead_encrypt,
.decrypt = tegra_aead_decrypt,
.maxauthsize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.chunksize = AES_BLOCK_SIZE,
.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-tegra",
.cra_priority = 500,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct tegra_aead_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_ccm_do_one_req,
#endif
}
}
};
static struct tegra_se_alg tegra_cmac_algs[] = {
{
.alg.ahash = {
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
.base = {
#endif
.init = tegra_cmac_init,
.setkey = tegra_cmac_setkey,
.update = tegra_cmac_update,
.final = tegra_cmac_final,
.finup = tegra_cmac_finup,
.digest = tegra_cmac_digest,
.export = tegra_cmac_export,
.import = tegra_cmac_import,
.halg.digestsize = AES_BLOCK_SIZE,
.halg.statesize = sizeof(struct tegra_cmac_reqctx),
.halg.base = {
.cra_name = "cmac(aes)",
.cra_driver_name = "cmac-aes-tegra",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_cmac_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_cmac_cra_init,
.cra_exit = tegra_cmac_cra_exit,
},
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
},
.op.do_one_request = tegra_cmac_do_one_req,
#endif
}
}
};
#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX
int tegra_init_aes(struct tegra_se *se)
{
struct aead_engine_alg *aead_alg;
struct ahash_engine_alg *ahash_alg;
struct skcipher_engine_alg *sk_alg;
int i, ret;
tegra_aes_set_regcfg(se);
for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
sk_alg = &tegra_aes_algs[i].alg.skcipher;
tegra_aes_algs[i].se_dev = se;
ret = CRYPTO_REGISTER(skcipher, sk_alg);
if (ret) {
dev_err(se->dev, "failed to register %s\n",
sk_alg->base.base.cra_name);
goto err_aes;
}
}
for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
aead_alg = &tegra_aead_algs[i].alg.aead;
tegra_aead_algs[i].se_dev = se;
ret = CRYPTO_REGISTER(aead, aead_alg);
if (ret) {
dev_err(se->dev, "failed to register %s\n",
aead_alg->base.base.cra_name);
goto err_aead;
}
}
for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
ahash_alg = &tegra_cmac_algs[i].alg.ahash;
tegra_cmac_algs[i].se_dev = se;
ret = CRYPTO_REGISTER(ahash, ahash_alg);
if (ret) {
dev_err(se->dev, "failed to register %s\n",
ahash_alg->base.halg.base.cra_name);
goto err_cmac;
}
}
return 0;
err_cmac:
for (--i; i >= 0; i--)
CRYPTO_UNREGISTER(ahash, &tegra_cmac_algs[i].alg.ahash);
i = ARRAY_SIZE(tegra_aead_algs);
err_aead:
for (--i; i >= 0; i--)
CRYPTO_UNREGISTER(aead, &tegra_aead_algs[i].alg.aead);
i = ARRAY_SIZE(tegra_aes_algs);
err_aes:
for (--i; i >= 0; i--)
CRYPTO_UNREGISTER(skcipher, &tegra_aes_algs[i].alg.skcipher);
return ret;
}
#else
int tegra_init_aes(struct tegra_se *se)
{
struct aead_alg *aead_alg;
struct ahash_alg *ahash_alg;
struct skcipher_alg *sk_alg;
int i, ret;
tegra_aes_set_regcfg(se);
for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
sk_alg = &tegra_aes_algs[i].alg.skcipher;
tegra_aes_algs[i].se_dev = se;
ret = CRYPTO_REGISTER(skcipher, sk_alg);
if (ret) {
dev_err(se->dev, "failed to register %s\n",
sk_alg->base.cra_name);
goto err_aes;
}
}
for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
aead_alg = &tegra_aead_algs[i].alg.aead;
tegra_aead_algs[i].se_dev = se;
ret = CRYPTO_REGISTER(aead, aead_alg);
if (ret) {
dev_err(se->dev, "failed to register %s\n",
aead_alg->base.cra_name);
goto err_aead;
}
}
for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
ahash_alg = &tegra_cmac_algs[i].alg.ahash;
tegra_cmac_algs[i].se_dev = se;
ret = CRYPTO_REGISTER(ahash, ahash_alg);
if (ret) {
dev_err(se->dev, "failed to register %s\n",
ahash_alg->halg.base.cra_name);
goto err_cmac;
}
}
return 0;
err_cmac:
for (--i; i >= 0; i--)
CRYPTO_UNREGISTER(ahash, &tegra_cmac_algs[i].alg.ahash);
i = ARRAY_SIZE(tegra_aead_algs);
err_aead:
for (--i; i >= 0; i--)
CRYPTO_UNREGISTER(aead, &tegra_aead_algs[i].alg.aead);
i = ARRAY_SIZE(tegra_aes_algs);
err_aes:
for (--i; i >= 0; i--)
CRYPTO_UNREGISTER(skcipher, &tegra_aes_algs[i].alg.skcipher);
return ret;
}
#endif
void tegra_deinit_aes(struct tegra_se *se)
{
int i;
for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
CRYPTO_UNREGISTER(skcipher, &tegra_aes_algs[i].alg.skcipher);
for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
CRYPTO_UNREGISTER(aead, &tegra_aead_algs[i].alg.aead);
for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
CRYPTO_UNREGISTER(ahash, &tegra_cmac_algs[i].alg.ahash);
}