diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index f35a7a1d..bf6b149c 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -5,9 +5,7 @@ ifdef CONFIG_TEGRA_HOST1X obj-m += tegra-hv-vse-safety.o obj-m += tegra-nvvse-cryptodev.o ifdef CONFIG_CRYPTO_ENGINE -ifndef CONFIG_SKIP_CRYPTO obj-m += tegra/ endif endif -endif obj-m += tegra-se-nvrng.o diff --git a/drivers/crypto/tegra/Makefile b/drivers/crypto/tegra/Makefile index 28cbedb6..0586fe4f 100644 --- a/drivers/crypto/tegra/Makefile +++ b/drivers/crypto/tegra/Makefile @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. ccflags-y += -I$(srctree.nvidia)/drivers/gpu/host1x/include diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index 532b99c4..a8d17199 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -4,11 +4,8 @@ * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine. */ -#include - #include #include -#include #include #include #include @@ -18,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -25,12 +23,9 @@ #include "tegra-se.h" struct tegra_aes_ctx { -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX struct crypto_engine_ctx enginectx; -#endif struct tegra_se *se; u32 alg; - u32 keylen; u32 ivsize; u32 key1_id; u32 key2_id; @@ -38,16 +33,15 @@ struct tegra_aes_ctx { struct tegra_aes_reqctx { struct tegra_se_datbuf datbuf; - struct tegra_se *se; bool encrypt; - u32 cfg; - u32 crypto_cfg; + u32 config; + u32 crypto_config; + u32 len; + u32 *iv; }; struct tegra_aead_ctx { -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX struct crypto_engine_ctx enginectx; -#endif struct tegra_se *se; unsigned int authsize; u32 alg; @@ -72,13 +66,11 @@ struct tegra_aead_reqctx { }; struct tegra_cmac_ctx { -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX struct crypto_engine_ctx enginectx; -#endif - struct crypto_shash *fallback_tfm; struct tegra_se *se; unsigned int alg; u32 key_id; + struct crypto_shash *fallback_tfm; }; struct tegra_cmac_reqctx { @@ -92,6 +84,7 @@ struct tegra_cmac_reqctx { u32 config; u32 key_id; u32 *iv; + u32 result[CMAC_RESULT_REG_COUNT]; }; /* increment counter (128-bit int) */ @@ -108,29 +101,28 @@ static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx) { struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); - unsigned int off; + unsigned int offset; - off = req->cryptlen - ctx->ivsize; + offset = req->cryptlen - ctx->ivsize; if (rctx->encrypt) - memcpy(req->iv, rctx->datbuf.buf + off, ctx->ivsize); + memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize); else - sg_pcopy_to_buffer(req->src, sg_nents(req->src), - req->iv, ctx->ivsize, off); + scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0); } static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx) { - int sz; + int num; if (ctx->alg == SE_ALG_CBC) { tegra_cbc_iv_copyback(req, ctx); } else if (ctx->alg == SE_ALG_CTR) { - sz = req->cryptlen / ctx->ivsize; + num = req->cryptlen / ctx->ivsize; if (req->cryptlen % ctx->ivsize) - sz++; + num++; - ctr_iv_inc(req->iv, ctx->ivsize, sz); + ctr_iv_inc(req->iv, ctx->ivsize, num); } } @@ -160,8 +152,6 @@ static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt) case SE_ALG_CTR: return SE_CRYPTO_CFG_CTR; - case SE_ALG_OFB: - return SE_CRYPTO_CFG_OFB; case SE_ALG_CBC_MAC: return SE_CRYPTO_CFG_CBC_MAC; @@ -179,7 +169,6 @@ static int tegra234_aes_cfg(u32 alg, bool encrypt) case SE_ALG_ECB: case SE_ALG_XTS: case SE_ALG_CTR: - case SE_ALG_OFB: if (encrypt) return SE_CFG_AES_ENCRYPT; else @@ -209,154 +198,104 @@ static int tegra234_aes_cfg(u32 alg, bool encrypt) case SE_ALG_CBC_MAC: return SE_AES_ENC_ALG_AES_ENC | SE_AES_DST_HASH_REG; - } return -EINVAL; - } -static int tegra_aes_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, u32 *iv, - int len, dma_addr_t addr, int cfg, int cryp_cfg) +static unsigned int tegra_aes_prep_cmd(struct tegra_se *se, struct tegra_aes_reqctx *rctx) { - int i = 0, j; + unsigned int data_count, res_bits, i = 0, j; + u32 *cpuvaddr = se->cmdbuf->addr; + dma_addr_t addr = rctx->datbuf.addr; - if (iv) { + data_count = rctx->len / AES_BLOCK_SIZE; + res_bits = (rctx->len % AES_BLOCK_SIZE) * 8; + + /* + * Hardware processes data_count + 1 blocks. + * Reduce 1 block if there is no residue + */ + if (!res_bits) + data_count--; + + if (rctx->iv) { cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->linear_ctr); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) - cpuvaddr[i++] = iv[j]; + cpuvaddr[i++] = rctx->iv[j]; } - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->last_blk, 1); - cpuvaddr[i++] = (len / AES_BLOCK_SIZE) - 1; - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->config, 6); - cpuvaddr[i++] = cfg; - cpuvaddr[i++] = cryp_cfg; + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | + SE_LAST_BLOCK_RES_BITS(res_bits); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; /* Source address setting */ cpuvaddr[i++] = lower_32_bits(addr); - cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(len); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len); /* Destination address setting */ cpuvaddr[i++] = lower_32_bits(addr); cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | - SE_ADDR_HI_SZ(len); + SE_ADDR_HI_SZ(rctx->len); - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | SE_AES_OP_START; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); - dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", cfg, cryp_cfg); + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); return i; } -static int tegra_aes_prep_req(struct crypto_engine *engine, void *areq) +static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) { - struct skcipher_request *req; - struct tegra_aes_ctx *ctx; - struct tegra_aes_reqctx *rctx; + struct skcipher_request *req = container_of(areq, struct skcipher_request, base); + struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); + struct tegra_se *se = ctx->se; + unsigned int cmdlen; int ret; - req = container_of(areq, struct skcipher_request, base); - ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - rctx = skcipher_request_ctx(req); - - rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) { - ret = -ENOMEM; - goto buf_err; - } + rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; rctx->datbuf.size = SE_AES_BUFLEN; - memset(rctx->datbuf.buf, 0, SE_AES_BUFLEN); + rctx->iv = (u32 *)req->iv; + rctx->len = req->cryptlen; - return 0; + /* Pad input to AES Block size */ + if (ctx->alg != SE_ALG_XTS) { + if (rctx->len % AES_BLOCK_SIZE) + rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); + } -buf_err: - crypto_finalize_skcipher_request(ctx->se->engine, req, ret); + scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); - return ret; -} + /* Prepare the command and submit for execution */ + cmdlen = tegra_aes_prep_cmd(se, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); -static int tegra_aes_unprep_req(struct crypto_engine *engine, void *areq) -{ - struct skcipher_request *req; - struct tegra_aes_ctx *ctx; - struct tegra_aes_reqctx *rctx; - - req = container_of(areq, struct skcipher_request, base); - ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - rctx = skcipher_request_ctx(req); + /* Copy the result */ + tegra_aes_update_iv(req, ctx); + scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); + /* Free the buffer */ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, rctx->datbuf.buf, rctx->datbuf.addr); - return 0; -} - -static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) -{ - unsigned int len, src_nents, dst_nents, size; - u32 *cpuvaddr, *iv, config, crypto_config; - struct tegra_aes_reqctx *rctx; - struct skcipher_request *req; - struct tegra_aes_ctx *ctx; - struct tegra_se *se; - int ret; - -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ret = tegra_aes_prep_req(engine, areq); - if (ret != 0) - return ret; -#endif - - req = container_of(areq, struct skcipher_request, base); - rctx = skcipher_request_ctx(req); - ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - se = ctx->se; - iv = (u32 *)req->iv; - - cpuvaddr = se->cmdbuf->addr; - len = req->cryptlen; - - /* Pad input to AES Block size */ - if (len % AES_BLOCK_SIZE) - len += AES_BLOCK_SIZE - (len % AES_BLOCK_SIZE); - - src_nents = sg_nents(req->src); - sg_copy_to_buffer(req->src, src_nents, rctx->datbuf.buf, req->cryptlen); - - config = tegra234_aes_cfg(ctx->alg, rctx->encrypt); - crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt); - crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); - if (ctx->key2_id) - crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); - - size = tegra_aes_prep_cmd(se, cpuvaddr, iv, len, rctx->datbuf.addr, - config, crypto_config); - - ret = tegra_se_host1x_submit(se, size); - - /* Copy the result */ - dst_nents = sg_nents(req->dst); - tegra_aes_update_iv(req, ctx); - sg_copy_from_buffer(req->dst, dst_nents, rctx->datbuf.buf, req->cryptlen); - crypto_finalize_skcipher_request(se->engine, req, ret); -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - if (ret != 0) - return ret; - - ret = tegra_aes_unprep_req(engine, areq); -#endif - return ret; + return 0; } static int tegra_aes_cra_init(struct crypto_skcipher *tfm) @@ -367,32 +306,27 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm) const char *algname; int ret; -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base); -#else se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher); -#endif crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx)); + ctx->ivsize = crypto_skcipher_ivsize(tfm); ctx->se = se_alg->se_dev; ctx->key1_id = 0; ctx->key2_id = 0; - ctx->ivsize = crypto_skcipher_ivsize(tfm); algname = crypto_tfm_alg_name(&tfm->base); ret = se_algname_to_algid(algname); if (ret < 0) { - dev_err(ctx->se->dev, "Invalid algorithm\n"); + dev_err(ctx->se->dev, "invalid algorithm\n"); return ret; } ctx->alg = ret; -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ctx->enginectx.op.prepare_request = tegra_aes_prep_req; + + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; ctx->enginectx.op.do_one_request = tegra_aes_do_one_req; - ctx->enginectx.op.unprepare_request = tegra_aes_unprep_req; -#endif return 0; } @@ -402,19 +336,19 @@ static void tegra_aes_cra_exit(struct crypto_skcipher *tfm) struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base); if (ctx->key1_id) - tegra_key_invalidate(ctx->se, ctx->key1_id); + tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg); if (ctx->key2_id) - tegra_key_invalidate(ctx->se, ctx->key2_id); + tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg); } static int tegra_aes_setkey(struct crypto_skcipher *tfm, - const u8 *key, u32 keylen) + const u8 *key, u32 keylen) { struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); if (aes_check_keylen(keylen)) { - dev_err(ctx->se->dev, "key length validation failed\n"); + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } @@ -422,27 +356,25 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm, } static int tegra_xts_setkey(struct crypto_skcipher *tfm, - const u8 *key, u32 keylen) + const u8 *key, u32 keylen) { struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 len = keylen / 2; int ret; - u32 len; - len = keylen / 2; - if (aes_check_keylen(len)) { - dev_err(ctx->se->dev, "key length validation failed\n"); + ret = xts_verify_key(tfm, key, keylen); + if (ret || aes_check_keylen(len)) { + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } ret = tegra_key_submit(ctx->se, key, len, - ctx->alg, &ctx->key1_id); + ctx->alg, &ctx->key1_id); if (ret) return ret; - ret = tegra_key_submit(ctx->se, key + len, len, - ctx->alg, &ctx->key2_id); - if (ret) - return ret; + return tegra_key_submit(ctx->se, key + len, len, + ctx->alg, &ctx->key2_id); return 0; } @@ -457,7 +389,6 @@ static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen) case SE_ALG_CBC: case SE_ALG_ECB: case SE_ALG_CTR: - case SE_ALG_OFB: manifest |= SE_KAC_ENC; break; case SE_ALG_XTS: @@ -493,191 +424,139 @@ static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen) return manifest; } -static int tegra_aes_encrypt(struct skcipher_request *req) -{ - struct tegra_aes_ctx *ctx; - struct tegra_aes_reqctx *rctx; +static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) - ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - rctx = skcipher_request_ctx(req); - rctx->encrypt = true; +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); + + if (ctx->alg != SE_ALG_XTS) { + if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) { + dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen); + return -EINVAL; + } + } else if (req->cryptlen < XTS_BLOCK_SIZE) { + dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen); + return -EINVAL; + } + + if (!req->cryptlen) + return 0; + + rctx->encrypt = encrypt; + rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); + rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); + + if (ctx->key2_id) + rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); } +static int tegra_aes_encrypt(struct skcipher_request *req) +{ + return tegra_aes_crypt(req, true); +} + static int tegra_aes_decrypt(struct skcipher_request *req) { - struct tegra_aes_ctx *ctx; - struct tegra_aes_reqctx *rctx; - - ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - rctx = skcipher_request_ctx(req); - rctx->encrypt = false; - - return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); + return tegra_aes_crypt(req, false); } static struct tegra_se_alg tegra_aes_algs[] = { { .alg.skcipher = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .init = tegra_aes_cra_init, + .exit = tegra_aes_cra_exit, + .setkey = tegra_aes_setkey, + .encrypt = tegra_aes_encrypt, + .decrypt = tegra_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, .base = { -#endif - .init = tegra_aes_cra_init, - .exit = tegra_aes_cra_exit, - .setkey = tegra_xts_setkey, - .encrypt = tegra_aes_encrypt, - .decrypt = tegra_aes_decrypt, - .min_keysize = 2 * AES_MIN_KEY_SIZE, - .max_keysize = 2 * AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - - .base = { - .cra_name = "xts(aes)", - .cra_driver_name = "xts-aes-tegra", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_aes_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_aes_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, }, -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_aes_do_one_req, -#endif } }, { .alg.skcipher = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .init = tegra_aes_cra_init, + .exit = tegra_aes_cra_exit, + .setkey = tegra_aes_setkey, + .encrypt = tegra_aes_encrypt, + .decrypt = tegra_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, .base = { -#endif - .init = tegra_aes_cra_init, - .exit = tegra_aes_cra_exit, - .setkey = tegra_aes_setkey, - .encrypt = tegra_aes_encrypt, - .decrypt = tegra_aes_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - - .base = { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-tegra", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_aes_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_aes_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, }, -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_aes_do_one_req, -#endif } }, { .alg.skcipher = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .init = tegra_aes_cra_init, + .exit = tegra_aes_cra_exit, + .setkey = tegra_aes_setkey, + .encrypt = tegra_aes_encrypt, + .decrypt = tegra_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, .base = { -#endif - .init = tegra_aes_cra_init, - .exit = tegra_aes_cra_exit, - .setkey = tegra_aes_setkey, - .encrypt = tegra_aes_encrypt, - .decrypt = tegra_aes_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - - .base = { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-tegra", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_aes_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct tegra_aes_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, }, -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_aes_do_one_req, -#endif } }, { .alg.skcipher = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .init = tegra_aes_cra_init, + .exit = tegra_aes_cra_exit, + .setkey = tegra_xts_setkey, + .encrypt = tegra_aes_encrypt, + .decrypt = tegra_aes_decrypt, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, .base = { -#endif - .init = tegra_aes_cra_init, - .exit = tegra_aes_cra_exit, - .setkey = tegra_aes_setkey, - .encrypt = tegra_aes_encrypt, - .decrypt = tegra_aes_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - - .base = { - .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-aes-tegra", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = AES_BLOCK_SIZE, + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-tegra", + .cra_priority = 500, + .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_aes_ctx), - .cra_alignmask = 0, + .cra_alignmask = (__alignof__(u64) - 1), .cra_module = THIS_MODULE, }, -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_aes_do_one_req, -#endif - } - }, { - .alg.skcipher = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif - .init = tegra_aes_cra_init, - .exit = tegra_aes_cra_exit, - .setkey = tegra_aes_setkey, - .encrypt = tegra_aes_encrypt, - .decrypt = tegra_aes_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - - .base = { - .cra_name = "ofb(aes)", - .cra_driver_name = "ofb-aes-tegra", - .cra_priority = 500, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_aes_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_aes_do_one_req, -#endif } }, }; -static int tegra_gmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, - struct tegra_aead_reqctx *rctx) +static unsigned int tegra_gmac_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - unsigned int i = 0; - unsigned int data_count, res_bits; + unsigned int data_count, res_bits, i = 0; + u32 *cpuvaddr = se->cmdbuf->addr; - data_count = (rctx->assoclen/AES_BLOCK_SIZE); + data_count = (rctx->assoclen / AES_BLOCK_SIZE); res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8; /* @@ -687,37 +566,35 @@ static int tegra_gmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, if (!res_bits) data_count--; - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | SE_LAST_BLOCK_RES_BITS(res_bits); - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->config, 4); + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4); cpuvaddr[i++] = rctx->config; cpuvaddr[i++] = rctx->crypto_config; cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | SE_ADDR_HI_SZ(rctx->assoclen); - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | SE_AES_OP_INIT | SE_AES_OP_LASTBUF | SE_AES_OP_START; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); return i; } -static int tegra_gcm_crypt_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, - struct tegra_aead_reqctx *rctx) +static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - unsigned int i = 0, j; - unsigned int data_count, res_bits; - u32 op; + unsigned int data_count, res_bits, i = 0, j; + u32 *cpuvaddr = se->cmdbuf->addr, op; - data_count = (rctx->cryptlen/AES_BLOCK_SIZE); + data_count = (rctx->cryptlen / AES_BLOCK_SIZE); res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8; op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | SE_AES_OP_LASTBUF | SE_AES_OP_START; @@ -737,15 +614,15 @@ static int tegra_gcm_crypt_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, data_count--; cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->linear_ctr); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) cpuvaddr[i++] = rctx->iv[j]; - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | SE_LAST_BLOCK_RES_BITS(res_bits); - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); cpuvaddr[i++] = rctx->config; cpuvaddr[i++] = rctx->crypto_config; @@ -759,10 +636,10 @@ static int tegra_gcm_crypt_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | SE_ADDR_HI_SZ(rctx->cryptlen); - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); cpuvaddr[i++] = op; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); @@ -771,9 +648,9 @@ static int tegra_gcm_crypt_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, } static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr, - struct tegra_aead_reqctx *rctx) + struct tegra_aead_reqctx *rctx) { - int i = 0, j; + unsigned int i = 0, j; u32 op; op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | @@ -785,21 +662,20 @@ static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr, if (!rctx->assoclen && !rctx->cryptlen) op |= SE_AES_OP_INIT; - - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->aad_len, 2); + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2); cpuvaddr[i++] = rctx->assoclen * 8; cpuvaddr[i++] = 0; - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2); + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2); cpuvaddr[i++] = rctx->cryptlen * 8; cpuvaddr[i++] = 0; cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->linear_ctr); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) cpuvaddr[i++] = rctx->iv[j]; - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); cpuvaddr[i++] = rctx->config; cpuvaddr[i++] = rctx->crypto_config; cpuvaddr[i++] = 0; @@ -808,12 +684,12 @@ static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr, /* Destination Address */ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | - SE_ADDR_HI_SZ(rctx->authsize); + SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */ - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); cpuvaddr[i++] = op; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); @@ -825,27 +701,24 @@ static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr, static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) { struct tegra_se *se = ctx->se; - u32 *cpuvaddr = se->cmdbuf->addr; - unsigned int nents, size; + unsigned int cmdlen; - nents = sg_nents(rctx->src_sg); scatterwalk_map_and_copy(rctx->inbuf.buf, - rctx->src_sg, 0, rctx->assoclen, 0); + rctx->src_sg, 0, rctx->assoclen, 0); rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) | SE_AES_KEY_INDEX(ctx->key_id); - size = tegra_gmac_prep_cmd(se, cpuvaddr, rctx); + cmdlen = tegra_gmac_prep_cmd(se, rctx); - return tegra_se_host1x_submit(se, size); + return tegra_se_host1x_submit(se, cmdlen); } static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) { struct tegra_se *se = ctx->se; - u32 *cpuvaddr = se->cmdbuf->addr; - int size, ret; + int cmdlen, ret; scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg, rctx->assoclen, rctx->cryptlen, 0); @@ -855,8 +728,8 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc SE_AES_KEY_INDEX(ctx->key_id); /* Prepare command and submit */ - size = tegra_gcm_crypt_prep_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, size); + cmdlen = tegra_gcm_crypt_prep_cmd(se, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); if (ret) return ret; @@ -871,23 +744,23 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc { struct tegra_se *se = ctx->se; u32 *cpuvaddr = se->cmdbuf->addr; - int size, ret, off; + int cmdlen, ret, offset; rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) | SE_AES_KEY_INDEX(ctx->key_id); /* Prepare command and submit */ - size = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, size); + cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); if (ret) return ret; if (rctx->encrypt) { /* Copy the result */ - off = rctx->assoclen + rctx->cryptlen; + offset = rctx->assoclen + rctx->cryptlen; scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, - off, rctx->authsize, 1); + offset, rctx->authsize, 1); } return 0; @@ -895,11 +768,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - unsigned int off; + unsigned int offset; u8 mac[16]; - off = rctx->assoclen + rctx->cryptlen; - scatterwalk_map_and_copy(mac, rctx->src_sg, off, rctx->authsize, 0); + offset = rctx->assoclen + rctx->cryptlen; + scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0); if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize)) return -EBADMSG; @@ -914,23 +787,24 @@ static inline int tegra_ccm_check_iv(const u8 *iv) * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation */ if (iv[0] < 1 || iv[0] > 7) { - pr_err("ccm_check_iv failed %d\n", iv[0]); + pr_debug("ccm_check_iv failed %d\n", iv[0]); return -EINVAL; } return 0; } -static int tegra_cbcmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegra_aead_reqctx *rctx) +static unsigned int tegra_cbcmac_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - unsigned int i = 0, data_count; + unsigned int data_count, i = 0; + u32 *cpuvaddr = se->cmdbuf->addr; - data_count = (rctx->inbuf.size/AES_BLOCK_SIZE) - 1; + data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1; - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count); - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); cpuvaddr[i++] = rctx->config; cpuvaddr[i++] = rctx->crypto_config; @@ -940,31 +814,32 @@ static int tegra_cbcmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegr cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | - SE_ADDR_HI_SZ(rctx->authsize); + SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */ - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | SE_AES_OP_START; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); return i; } -static int tegra_ctr_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegra_aead_reqctx *rctx) +static unsigned int tegra_ctr_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - int i = 0, j; + unsigned int i = 0, j; + u32 *cpuvaddr = se->cmdbuf->addr; cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->linear_ctr); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) cpuvaddr[i++] = rctx->iv[j]; - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1; - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); cpuvaddr[i++] = rctx->config; cpuvaddr[i++] = rctx->crypto_config; @@ -978,16 +853,16 @@ static int tegra_ctr_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegra_a cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | SE_ADDR_HI_SZ(rctx->inbuf.size); - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | SE_AES_OP_START; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", - rctx->config, rctx->crypto_config); + rctx->config, rctx->crypto_config); return i; } @@ -995,17 +870,17 @@ static int tegra_ctr_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegra_a static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) { struct tegra_se *se = ctx->se; - u32 *cpuvaddr = se->cmdbuf->addr; - int size; + int cmdlen; rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC, - rctx->encrypt) | SE_AES_KEY_INDEX(ctx->key_id); + rctx->encrypt) | + SE_AES_KEY_INDEX(ctx->key_id); /* Prepare command and submit */ - size = tegra_cbcmac_prep_cmd(se, cpuvaddr, rctx); + cmdlen = tegra_cbcmac_prep_cmd(se, rctx); - return tegra_se_host1x_submit(se, size); + return tegra_se_host1x_submit(se, cmdlen); } static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) @@ -1084,7 +959,7 @@ static int tegra_ccm_add_padding(u8 *buf, unsigned int len) static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx) { - unsigned int alen = 0, off = 0; + unsigned int alen = 0, offset = 0; u8 nonce[16], adata[16]; int ret; @@ -1093,22 +968,22 @@ static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx) return ret; memcpy(rctx->inbuf.buf, nonce, 16); - off = 16; + offset = 16; if (rctx->assoclen) { alen = tegra_ccm_format_adata(adata, rctx->assoclen); - memcpy(rctx->inbuf.buf + off, adata, alen); - off += alen; + memcpy(rctx->inbuf.buf + offset, adata, alen); + offset += alen; - scatterwalk_map_and_copy(rctx->inbuf.buf + off, - rctx->src_sg, 0, rctx->assoclen, 0); + scatterwalk_map_and_copy(rctx->inbuf.buf + offset, + rctx->src_sg, 0, rctx->assoclen, 0); - off += rctx->assoclen; - off += tegra_ccm_add_padding(rctx->inbuf.buf + off, + offset += rctx->assoclen; + offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->assoclen + alen); } - return off; + return offset; } static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx) @@ -1118,10 +993,10 @@ static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *r /* Read and clear Result */ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - result[i] = se_readl(se, se->hw->regs->result + (i * 4)); + result[i] = readl(se->base + se->hw->regs->result + (i * 4)); for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - se_writel(se, 0, se->hw->regs->result + (i * 4)); + writel(0, se->base + se->hw->regs->result + (i * 4)); if (rctx->encrypt) { memcpy(rctx->authdata, result, rctx->authsize); @@ -1154,22 +1029,22 @@ static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_ { struct tegra_se *se = ctx->se; struct scatterlist *sg; - int off, ret; + int offset, ret; - off = tegra_ccm_format_blocks(rctx); - if (off < 0) + offset = tegra_ccm_format_blocks(rctx); + if (offset < 0) return -EINVAL; /* Copy plain text to the buffer */ sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg; - scatterwalk_map_and_copy(rctx->inbuf.buf + off, + scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg, rctx->assoclen, rctx->cryptlen, 0); - off += rctx->cryptlen; - off += tegra_ccm_add_padding(rctx->inbuf.buf + off, rctx->cryptlen); + offset += rctx->cryptlen; + offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen); - rctx->inbuf.size = off; + rctx->inbuf.size = offset; ret = tegra_ccm_do_cbcmac(ctx, rctx); if (ret) @@ -1181,7 +1056,7 @@ static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) { struct tegra_se *se = ctx->se; - unsigned int size, off = 0; + unsigned int cmdlen, offset = 0; struct scatterlist *sg = rctx->src_sg; int ret; @@ -1197,22 +1072,22 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx rctx->assoclen + rctx->cryptlen, rctx->authsize, 0); - off += rctx->authsize; - off += tegra_ccm_add_padding(rctx->inbuf.buf + off, rctx->authsize); + offset += rctx->authsize; + offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize); /* If there is no cryptlen, proceed to submit the task */ if (rctx->cryptlen) { - scatterwalk_map_and_copy(rctx->inbuf.buf + off, sg, + scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg, rctx->assoclen, rctx->cryptlen, 0); - off += rctx->cryptlen; - off += tegra_ccm_add_padding(rctx->inbuf.buf + off, rctx->cryptlen); + offset += rctx->cryptlen; + offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen); } - rctx->inbuf.size = off; + rctx->inbuf.size = offset; /* Prepare command and submit */ - size = tegra_ctr_prep_cmd(se, se->cmdbuf->addr, rctx); - ret = tegra_se_host1x_submit(se, size); + cmdlen = tegra_ctr_prep_cmd(se, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); if (ret) return ret; @@ -1245,16 +1120,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, /* Clear any previous result */ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - se_writel(se, 0, se->hw->regs->result + (i * 4)); + writel(0, se->base + se->hw->regs->result + (i * 4)); return 0; } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX -static int tegra_aead_prep_req(struct crypto_engine *engine, void *areq); -static int tegra_aead_unprep_req(struct crypto_engine *engine, void *areq); -#endif - static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) { struct aead_request *req = container_of(areq, struct aead_request, base); @@ -1264,11 +1134,22 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret; -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ret = tegra_aead_prep_req(engine, areq); - if (ret != 0) - return ret; -#endif + /* Allocate buffers required */ + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) + return -ENOMEM; + + rctx->inbuf.size = SE_AES_BUFLEN; + + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = ENOMEM; + goto outbuf_err; + } + + rctx->outbuf.size = SE_AES_BUFLEN; ret = tegra_ccm_crypt_init(req, se, rctx); if (ret) @@ -1288,6 +1169,8 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) goto out; } else { rctx->cryptlen = req->cryptlen - ctx->authsize; + if (ret) + goto out; /* CTR operation */ ret = tegra_ccm_do_ctr(ctx, rctx); @@ -1301,13 +1184,15 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) } out: - crypto_finalize_aead_request(se->engine, req, ret); + dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.buf, rctx->outbuf.addr); + +outbuf_err: + dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.buf, rctx->inbuf.addr); + + crypto_finalize_aead_request(ctx->se->engine, req, ret); -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ret = tegra_aead_unprep_req(engine, areq); - if (ret != 0) - return ret; -#endif return 0; } @@ -1319,13 +1204,22 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aead_reqctx *rctx = aead_request_ctx(req); int ret; -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ret = tegra_aead_prep_req(engine, areq); - if (ret != 0) - return ret; -#endif + /* Allocate buffers required */ + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) + return -ENOMEM; - req = container_of(areq, struct aead_request, base); + rctx->inbuf.size = SE_AES_BUFLEN; + + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = ENOMEM; + goto outbuf_err; + } + + rctx->outbuf.size = SE_AES_BUFLEN; rctx->src_sg = req->src; rctx->dst_sg = req->dst; @@ -1363,56 +1257,15 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) ret = tegra_gcm_do_verify(ctx->se, rctx); out: - crypto_finalize_aead_request(ctx->se->engine, req, ret); -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ret = tegra_aead_unprep_req(engine, areq); - if (ret != 0) - return ret; -#endif - return 0; -} - -static int tegra_aead_prep_req(struct crypto_engine *engine, void *areq) -{ - struct aead_request *req = container_of(areq, struct aead_request, base); - struct tegra_aead_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct tegra_aead_reqctx *rctx = aead_request_ctx(req); - - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - goto inbuf_err; - - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->outbuf.addr, GFP_KERNEL); - if (!rctx->outbuf.buf) - goto outbuf_err; - - rctx->outbuf.size = SE_AES_BUFLEN; - - return 0; + dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.buf, rctx->outbuf.addr); outbuf_err: dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, rctx->inbuf.buf, rctx->inbuf.addr); -inbuf_err: - return -ENOMEM; -} - -static int tegra_aead_unprep_req(struct crypto_engine *engine, void *areq) -{ - struct aead_request *req = container_of(areq, struct aead_request, base); - struct tegra_aead_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct tegra_aead_reqctx *rctx = aead_request_ctx(req); - - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, - rctx->outbuf.buf, rctx->outbuf.addr); - - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, - rctx->inbuf.buf, rctx->inbuf.addr); + /* Finalize the request if there are no errors */ + crypto_finalize_aead_request(ctx->se->engine, req, ret); return 0; } @@ -1423,26 +1276,60 @@ static int tegra_ccm_cra_init(struct crypto_aead *tfm) struct aead_alg *alg = crypto_aead_alg(tfm); struct tegra_se_alg *se_alg; const char *algname; + int ret; algname = crypto_tfm_alg_name(&tfm->base); -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base); -#else se_alg = container_of(alg, struct tegra_se_alg, alg.aead); -#endif crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx)); ctx->se = se_alg->se_dev; ctx->key_id = 0; - ctx->alg = se_algname_to_algid(algname); -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ctx->enginectx.op.prepare_request = tegra_aead_prep_req; + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "invalid algorithm\n"); + return ret; + } + + ctx->alg = ret; + + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; ctx->enginectx.op.do_one_request = tegra_ccm_do_one_req; - ctx->enginectx.op.unprepare_request = tegra_aead_unprep_req; -#endif + + return 0; +} + +static int tegra_gcm_cra_init(struct crypto_aead *tfm) +{ + struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *alg = crypto_aead_alg(tfm); + struct tegra_se_alg *se_alg; + const char *algname; + int ret; + + algname = crypto_tfm_alg_name(&tfm->base); + + se_alg = container_of(alg, struct tegra_se_alg, alg.aead); + + crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx)); + + ctx->se = se_alg->se_dev; + ctx->key_id = 0; + + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "invalid algorithm\n"); + return ret; + } + + ctx->alg = ret; + + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; + ctx->enginectx.op.do_one_request = tegra_gcm_do_one_req; return 0; } @@ -1469,35 +1356,6 @@ static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize return 0; } -static int tegra_gcm_cra_init(struct crypto_aead *tfm) -{ - struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct aead_alg *alg = crypto_aead_alg(tfm); - struct tegra_se_alg *se_alg; - const char *algname; - - algname = crypto_tfm_alg_name(&tfm->base); -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base); -#else - se_alg = container_of(alg, struct tegra_se_alg, alg.aead); -#endif - - crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx)); - - ctx->se = se_alg->se_dev; - ctx->key_id = 0; - ctx->alg = se_algname_to_algid(algname); - -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ctx->enginectx.op.prepare_request = tegra_aead_prep_req; - ctx->enginectx.op.do_one_request = tegra_gcm_do_one_req; - ctx->enginectx.op.unprepare_request = tegra_aead_unprep_req; -#endif - - return 0; -} - static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -1517,7 +1375,7 @@ static void tegra_aead_cra_exit(struct crypto_aead *tfm) struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base); if (ctx->key_id) - tegra_key_invalidate(ctx->se, ctx->key_id); + tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } static int tegra_aead_crypt(struct aead_request *req, bool encrypt) @@ -1542,23 +1400,22 @@ static int tegra_aead_decrypt(struct aead_request *req) } static int tegra_aead_setkey(struct crypto_aead *tfm, - const u8 *key, u32 keylen) + const u8 *key, u32 keylen) { struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); if (aes_check_keylen(keylen)) { - dev_err(ctx->se->dev, "key length validation failed\n"); + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); } -static int tegra_cmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegra_cmac_reqctx *rctx) +static unsigned int tegra_cmac_prep_cmd(struct tegra_se *se, struct tegra_cmac_reqctx *rctx) { - unsigned int data_count, res_bits = 0; - int i = 0, j; - u32 op; + unsigned int data_count, res_bits = 0, i = 0, j; + u32 *cpuvaddr = se->cmdbuf->addr, op; data_count = (rctx->datbuf.size / AES_BLOCK_SIZE); @@ -1576,17 +1433,17 @@ static int tegra_cmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegra_ rctx->task &= ~SHA_FIRST; cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->linear_ctr); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); /* Load 0 IV */ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) cpuvaddr[i++] = 0; } - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | SE_LAST_BLOCK_RES_BITS(res_bits); - cpuvaddr[i++] = host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); cpuvaddr[i++] = rctx->config; cpuvaddr[i++] = rctx->crypto_config; @@ -1597,23 +1454,41 @@ static int tegra_cmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegra_ cpuvaddr[i++] = 0; cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE); - cpuvaddr[i++] = host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); cpuvaddr[i++] = op; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); return i; } +static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx) +{ + int i; + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); +} + +static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx) +{ + int i; + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(rctx->result[i], + se->base + se->hw->regs->result + (i * 4)); +} + static int tegra_cmac_do_update(struct ahash_request *req) { struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - unsigned int nblks, nresidue, size; + unsigned int nblks, nresidue, cmdlen; + int ret; if (!req->nbytes) return 0; @@ -1641,7 +1516,7 @@ static int tegra_cmac_do_update(struct ahash_request *req) */ if (nblks < 1) { scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, - rctx->src_sg, 0, req->nbytes, 0); + rctx->src_sg, 0, req->nbytes, 0); rctx->residue.size += req->nbytes; return 0; @@ -1652,17 +1527,34 @@ static int tegra_cmac_do_update(struct ahash_request *req) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size, - rctx->src_sg, 0, req->nbytes - nresidue, 0); + rctx->src_sg, 0, req->nbytes - nresidue, 0); scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg, - req->nbytes - nresidue, nresidue, 0); + req->nbytes - nresidue, nresidue, 0); /* Update residue value with the residue after current block */ rctx->residue.size = nresidue; - size = tegra_cmac_prep_cmd(se, se->cmdbuf->addr, rctx); + /* + * If this is not the first 'update' call, paste the previous copied + * intermediate results to the registers so that it gets picked up. + * This is to support the import/export functionality. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_cmac_paste_result(ctx->se, rctx); - return tegra_se_host1x_submit(se, size); + cmdlen = tegra_cmac_prep_cmd(se, rctx); + + ret = tegra_se_host1x_submit(se, cmdlen); + /* + * If this is not the final update, copy the intermediate results + * from the registers so that it can be used in the next 'update' + * call. This is to support the import/export functionality. + */ + if (!(rctx->task & SHA_FINAL)) + tegra_cmac_copy_result(ctx->se, rctx); + + return ret; } static int tegra_cmac_do_final(struct ahash_request *req) @@ -1672,7 +1564,7 @@ static int tegra_cmac_do_final(struct ahash_request *req) struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; u32 *result = (u32 *)req->result; - int ret = 0, i, size; + int ret = 0, i, cmdlen; if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { return crypto_shash_tfm_digest(ctx->fallback_tfm, @@ -1685,23 +1577,23 @@ static int tegra_cmac_do_final(struct ahash_request *req) rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); /* Prepare command and submit */ - size = tegra_cmac_prep_cmd(se, se->cmdbuf->addr, rctx); - ret = tegra_se_host1x_submit(se, size); + cmdlen = tegra_cmac_prep_cmd(se, rctx); + ret = tegra_se_host1x_submit(se, cmdlen); if (ret) goto out; /* Read and clear Result register */ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - result[i] = se_readl(ctx->se, se->hw->regs->result + (i * 4)); + result[i] = readl(se->base + se->hw->regs->result + (i * 4)); for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - se_writel(ctx->se, 0, se->hw->regs->result + (i * 4)); + writel(0, se->base + se->hw->regs->result + (i * 4)); out: dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + rctx->datbuf.buf, rctx->datbuf.addr); dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, - rctx->residue.buf, rctx->residue.addr); + rctx->residue.buf, rctx->residue.addr); return ret; } @@ -1712,7 +1604,7 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - int ret = -EINVAL; + int ret; if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); @@ -1726,42 +1618,59 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) crypto_finalize_hash_request(se->engine, req, ret); - return ret; + return 0; +} + +static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx, + const char *algname) +{ + unsigned int statesize; + + ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback_tfm)) { + dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname); + ctx->fallback_tfm = NULL; + return; + } + + statesize = crypto_shash_statesize(ctx->fallback_tfm); + + if (statesize > sizeof(struct tegra_cmac_reqctx)) + crypto_hash_alg_common(tfm)->statesize = statesize; } static int tegra_cmac_cra_init(struct crypto_tfm *tfm) { struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm); struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); struct tegra_se_alg *se_alg; const char *algname; + int ret; algname = crypto_tfm_alg_name(tfm); -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base); -#else se_alg = container_of(alg, struct tegra_se_alg, alg.ahash); -#endif - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct tegra_cmac_reqctx)); + crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx)); ctx->se = se_alg->se_dev; ctx->key_id = 0; - ctx->alg = se_algname_to_algid(algname); -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.do_one_request = tegra_cmac_do_one_req; - ctx->enginectx.op.unprepare_request = NULL; -#endif - ctx->fallback_tfm = crypto_alloc_shash(algname, 0, - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->fallback_tfm)) { - dev_warn(ctx->se->dev, "failed to allocate fallback for CMAC\n"); - ctx->fallback_tfm = NULL; + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "invalid algorithm\n"); + return ret; } + ctx->alg = ret; + + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; + ctx->enginectx.op.do_one_request = tegra_cmac_do_one_req; + + tegra_cmac_init_fallback(ahash_tfm, ctx, algname); + return 0; } @@ -1769,10 +1678,10 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm) { struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm); - tegra_key_invalidate(ctx->se, ctx->key_id); - if (ctx->fallback_tfm) crypto_free_shash(ctx->fallback_tfm); + + tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } static int tegra_cmac_init(struct ahash_request *req) @@ -1790,14 +1699,14 @@ static int tegra_cmac_init(struct ahash_request *req) rctx->blk_size = crypto_ahash_blocksize(tfm); rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, - &rctx->residue.addr, GFP_KERNEL); + &rctx->residue.addr, GFP_KERNEL); if (!rctx->residue.buf) goto resbuf_fail; rctx->residue.size = 0; rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); + &rctx->datbuf.addr, GFP_KERNEL); if (!rctx->datbuf.buf) goto datbuf_fail; @@ -1805,13 +1714,13 @@ static int tegra_cmac_init(struct ahash_request *req) /* Clear any previous result */ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - se_writel(se, 0, se->hw->regs->result + (i * 4)); + writel(0, se->base + se->hw->regs->result + (i * 4)); return 0; datbuf_fail: dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); + rctx->residue.addr); resbuf_fail: return -ENOMEM; } @@ -1822,7 +1731,7 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); if (aes_check_keylen(keylen)) { - dev_err(ctx->se->dev, "key length validation failed\n"); + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } @@ -1898,58 +1807,44 @@ static int tegra_cmac_import(struct ahash_request *req, const void *in) static struct tegra_se_alg tegra_aead_algs[] = { { .alg.aead = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .init = tegra_gcm_cra_init, + .exit = tegra_aead_cra_exit, + .setkey = tegra_aead_setkey, + .setauthsize = tegra_gcm_setauthsize, + .encrypt = tegra_aead_encrypt, + .decrypt = tegra_aead_decrypt, + .maxauthsize = AES_BLOCK_SIZE, + .ivsize = GCM_AES_IV_SIZE, .base = { -#endif - .init = tegra_gcm_cra_init, - .exit = tegra_aead_cra_exit, - .setkey = tegra_aead_setkey, - .setauthsize = tegra_gcm_setauthsize, - .encrypt = tegra_aead_encrypt, - .decrypt = tegra_aead_decrypt, - .maxauthsize = AES_BLOCK_SIZE, - .ivsize = GCM_AES_IV_SIZE, - .base = { - .cra_name = "gcm(aes)", - .cra_driver_name = "gcm-aes-tegra", - .cra_priority = 500, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_aead_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-tegra", + .cra_priority = 500, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct tegra_aead_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, }, -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_gcm_do_one_req, -#endif } }, { .alg.aead = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .init = tegra_ccm_cra_init, + .exit = tegra_aead_cra_exit, + .setkey = tegra_aead_setkey, + .setauthsize = tegra_ccm_setauthsize, + .encrypt = tegra_aead_encrypt, + .decrypt = tegra_aead_decrypt, + .maxauthsize = AES_BLOCK_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, .base = { -#endif - .init = tegra_ccm_cra_init, - .exit = tegra_aead_cra_exit, - .setkey = tegra_aead_setkey, - .setauthsize = tegra_ccm_setauthsize, - .encrypt = tegra_aead_encrypt, - .decrypt = tegra_aead_decrypt, - .maxauthsize = AES_BLOCK_SIZE, - .ivsize = AES_BLOCK_SIZE, - .chunksize = AES_BLOCK_SIZE, - .base = { - .cra_name = "ccm(aes)", - .cra_driver_name = "ccm-aes-tegra", - .cra_priority = 500, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tegra_aead_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, + .cra_name = "ccm(aes)", + .cra_driver_name = "ccm-aes-tegra", + .cra_priority = 500, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct tegra_aead_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, }, -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_ccm_do_one_req, -#endif } } }; @@ -1957,9 +1852,6 @@ static struct tegra_se_alg tegra_aead_algs[] = { static struct tegra_se_alg tegra_cmac_algs[] = { { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_cmac_init, .setkey = tegra_cmac_setkey, .update = tegra_cmac_update, @@ -1970,7 +1862,6 @@ static struct tegra_se_alg tegra_cmac_algs[] = { .import = tegra_cmac_import, .halg.digestsize = AES_BLOCK_SIZE, .halg.statesize = sizeof(struct tegra_cmac_reqctx), - .halg.base = { .cra_name = "cmac(aes)", .cra_driver_name = "tegra-se-cmac", @@ -1982,98 +1873,85 @@ static struct tegra_se_alg tegra_cmac_algs[] = { .cra_module = THIS_MODULE, .cra_init = tegra_cmac_cra_init, .cra_exit = tegra_cmac_cra_exit, - }, -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_cmac_do_one_req, -#endif + } } } }; int tegra_init_aes(struct tegra_se *se) { + struct aead_alg *aead_alg; + struct ahash_alg *ahash_alg; + struct skcipher_alg *sk_alg; int i, ret; se->manifest = tegra_aes_kac_manifest; for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) { + sk_alg = &tegra_aes_algs[i].alg.skcipher; tegra_aes_algs[i].se_dev = se; - ret = CRYPTO_REGISTER(skcipher, &tegra_aes_algs[i].alg.skcipher); + + ret = crypto_register_skcipher(sk_alg); if (ret) { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX dev_err(se->dev, "failed to register %s\n", - tegra_aes_algs[i].alg.skcipher.base.base.cra_name); -#else - dev_err(se->dev, "failed to register %s\n", - tegra_aes_algs[i].alg.skcipher.base.cra_name); -#endif + sk_alg->base.cra_name); goto err_aes; } } for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) { + aead_alg = &tegra_aead_algs[i].alg.aead; tegra_aead_algs[i].se_dev = se; - ret = CRYPTO_REGISTER(aead, &tegra_aead_algs[i].alg.aead); + + ret = crypto_register_aead(aead_alg); if (ret) { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX dev_err(se->dev, "failed to register %s\n", - tegra_aead_algs[i].alg.aead.base.base.cra_name); -#else - dev_err(se->dev, "failed to register %s\n", - tegra_aead_algs[i].alg.aead.base.cra_name); -#endif + aead_alg->base.cra_name); goto err_aead; } } for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) { + ahash_alg = &tegra_cmac_algs[i].alg.ahash; tegra_cmac_algs[i].se_dev = se; - ret = CRYPTO_REGISTER(ahash, &tegra_cmac_algs[i].alg.ahash); + + ret = crypto_register_ahash(ahash_alg); if (ret) { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX dev_err(se->dev, "failed to register %s\n", - tegra_cmac_algs[i].alg.ahash.base.halg.base.cra_name); -#else - dev_err(se->dev, "failed to register %s\n", - tegra_cmac_algs[i].alg.ahash.halg.base.cra_name); -#endif + ahash_alg->halg.base.cra_name); goto err_cmac; } } - dev_info(se->dev, "registered AES algorithms\n"); - return 0; err_cmac: - for (--i; i >= 0; i--) - CRYPTO_UNREGISTER(ahash, &tegra_cmac_algs[i].alg.ahash); + while (i--) + crypto_unregister_ahash(&tegra_cmac_algs[i].alg.ahash); i = ARRAY_SIZE(tegra_aead_algs); err_aead: - for (--i; i >= 0; i--) - CRYPTO_UNREGISTER(aead, &tegra_aead_algs[i].alg.aead); + while (i--) + crypto_unregister_aead(&tegra_aead_algs[i].alg.aead); i = ARRAY_SIZE(tegra_aes_algs); err_aes: - for (--i; i >= 0; i--) - CRYPTO_UNREGISTER(skcipher, &tegra_aes_algs[i].alg.skcipher); + while (i--) + crypto_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher); return ret; } -void tegra_deinit_aes(void) +void tegra_deinit_aes(struct tegra_se *se) { int i; for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) - CRYPTO_UNREGISTER(skcipher, &tegra_aes_algs[i].alg.skcipher); + crypto_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher); for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) - CRYPTO_UNREGISTER(aead, &tegra_aead_algs[i].alg.aead); + crypto_unregister_aead(&tegra_aead_algs[i].alg.aead); for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) - CRYPTO_UNREGISTER(ahash, &tegra_cmac_algs[i].alg.ahash); - + crypto_unregister_ahash(&tegra_cmac_algs[i].alg.ahash); } diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 757ccf4a..e5c3533b 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -4,10 +4,8 @@ * Crypto driver to handle HASH algorithms using NVIDIA Security Engine. */ -#include #include #include -#include #include #include #include @@ -24,18 +22,15 @@ #include "tegra-se.h" struct tegra_sha_ctx { -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX struct crypto_engine_ctx enginectx; -#endif - struct crypto_ahash *fallback_tfm; struct tegra_se *se; unsigned int alg; bool fallback; u32 key_id; + struct crypto_ahash *fallback_tfm; }; struct tegra_sha_reqctx { - struct ahash_request fallback_req; struct scatterlist *src_sg; struct tegra_se_datbuf datbuf; struct tegra_se_datbuf residue; @@ -46,6 +41,8 @@ struct tegra_sha_reqctx { unsigned int blk_size; unsigned int task; u32 key_id; + u32 result[HASH_RESULT_REG_COUNT]; + struct ahash_request fallback_req; }; static int tegra_sha_get_config(u32 alg) @@ -216,13 +213,13 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out) } static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, - struct tegra_sha_reqctx *rctx) + struct tegra_sha_reqctx *rctx) { u64 msg_len, msg_left; int i = 0; - msg_len = (u64)rctx->total_len * 8; - msg_left = (u64)rctx->datbuf.size * 8; + msg_len = rctx->total_len * 8; + msg_left = rctx->datbuf.size * 8; /* * If IN_ADDR_HI_0.SZ > SHA_MSG_LEFT_[0-3] to the HASH engine, @@ -236,7 +233,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, } cpuvaddr[i++] = host1x_opcode_setpayload(8); - cpuvaddr[i++] = host1x_opcode_incr_w(SE_SHA_MSG_LENGTH); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_MSG_LENGTH); cpuvaddr[i++] = lower_32_bits(msg_len); cpuvaddr[i++] = upper_32_bits(msg_len); cpuvaddr[i++] = 0; @@ -246,14 +243,15 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = 0; cpuvaddr[i++] = 0; cpuvaddr[i++] = host1x_opcode_setpayload(6); - cpuvaddr[i++] = host1x_opcode_incr_w(SE_SHA_CFG); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG); cpuvaddr[i++] = rctx->config; if (rctx->task & SHA_FIRST) { cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT; rctx->task &= ~SHA_FIRST; - } else + } else { cpuvaddr[i++] = 0; + } cpuvaddr[i++] = rctx->datbuf.addr; cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | @@ -263,30 +261,47 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, SE_ADDR_HI_SZ(rctx->digest.size)); if (rctx->key_id) { cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG); + cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG); cpuvaddr[i++] = SE_AES_KEY_INDEX(rctx->key_id); } cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_nonincr_w(SE_SHA_OPERATION); + cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION); cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START | SE_SHA_OP_LASTBUF; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x", - msg_len, msg_left, rctx->config); + msg_len, msg_left, rctx->config); return i; } +static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) +{ + int i; + + for (i = 0; i < HASH_RESULT_REG_COUNT; i++) + rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); +} + +static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) +{ + int i; + + for (i = 0; i < HASH_RESULT_REG_COUNT; i++) + writel(rctx->result[i], + se->base + se->hw->regs->result + (i * 4)); +} + static int tegra_sha_do_update(struct ahash_request *req) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned int nblks, nresidue, size; + unsigned int nblks, nresidue, size, ret; u32 *cpuvaddr = ctx->se->cmdbuf->addr; nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; @@ -311,7 +326,7 @@ static int tegra_sha_do_update(struct ahash_request *req) */ if (nblks < 1) { scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, - rctx->src_sg, 0, req->nbytes, 0); + rctx->src_sg, 0, req->nbytes, 0); rctx->residue.size += req->nbytes; return 0; @@ -322,10 +337,10 @@ static int tegra_sha_do_update(struct ahash_request *req) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size, - rctx->src_sg, 0, req->nbytes - nresidue, 0); + rctx->src_sg, 0, req->nbytes - nresidue, 0); scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg, - req->nbytes - nresidue, nresidue, 0); + req->nbytes - nresidue, nresidue, 0); /* Update residue value with the residue after current block */ rctx->residue.size = nresidue; @@ -333,9 +348,27 @@ static int tegra_sha_do_update(struct ahash_request *req) rctx->config = tegra_sha_get_config(rctx->alg) | SE_SHA_DST_HASH_REG; + /* + * If this is not the first 'update' call, paste the previous copied + * intermediate results to the registers so that it gets picked up. + * This is to support the import/export functionality. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_sha_paste_hash_result(ctx->se, rctx); + size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); - return tegra_se_host1x_submit(ctx->se, size); + ret = tegra_se_host1x_submit(ctx->se, size); + + /* + * If this is not the final update, copy the intermediate results + * from the registers so that it can be used in the next 'update' + * call. This is to support the import/export functionality. + */ + if (!(rctx->task & SHA_FINAL)) + tegra_sha_copy_hash_result(ctx->se, rctx); + + return ret; } static int tegra_sha_do_final(struct ahash_request *req) @@ -365,11 +398,11 @@ static int tegra_sha_do_final(struct ahash_request *req) out: dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + rctx->datbuf.buf, rctx->datbuf.addr); dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), - rctx->residue.buf, rctx->residue.addr); + rctx->residue.buf, rctx->residue.addr); dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, - rctx->digest.addr); + rctx->digest.addr); return ret; } @@ -380,7 +413,7 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - int ret = -EINVAL; + int ret = 0; if (rctx->task & SHA_UPDATE) { ret = tegra_sha_do_update(req); @@ -394,50 +427,67 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) crypto_finalize_hash_request(se->engine, req, ret); - return ret; + return 0; } -static void tegra_sha_init_fallback(struct tegra_sha_ctx *ctx, const char *algname) +static void tegra_sha_init_fallback(struct crypto_ahash *tfm, struct tegra_sha_ctx *ctx, + const char *algname) { + unsigned int statesize; + ctx->fallback_tfm = crypto_alloc_ahash(algname, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback_tfm)) { - dev_warn(ctx->se->dev, "failed to allocate fallback for %s %ld\n", - algname, PTR_ERR(ctx->fallback_tfm)); + dev_warn(ctx->se->dev, + "failed to allocate fallback for %s\n", algname); ctx->fallback_tfm = NULL; + return; } + + statesize = crypto_ahash_statesize(ctx->fallback_tfm); + + if (statesize > sizeof(struct tegra_sha_reqctx)) + crypto_hash_alg_common(tfm)->statesize = statesize; + + /* Update reqsize if fallback is added */ + crypto_ahash_set_reqsize(tfm, + sizeof(struct tegra_sha_reqctx) + + crypto_ahash_reqsize(ctx->fallback_tfm)); } static int tegra_sha_cra_init(struct crypto_tfm *tfm) { struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm); struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); struct tegra_se_alg *se_alg; const char *algname; + int ret; algname = crypto_tfm_alg_name(tfm); -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base); -#else se_alg = container_of(alg, struct tegra_se_alg, alg.ahash); -#endif - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct tegra_sha_reqctx)); + crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_sha_reqctx)); ctx->se = se_alg->se_dev; ctx->fallback = false; ctx->key_id = 0; - ctx->alg = se_algname_to_algid(algname); -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.do_one_request = tegra_sha_do_one_req; - ctx->enginectx.op.unprepare_request = NULL; -#endif + + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "invalid algorithm\n"); + return ret; + } if (se_alg->alg_base) - tegra_sha_init_fallback(ctx, algname); + tegra_sha_init_fallback(ahash_tfm, ctx, algname); + + ctx->alg = ret; + + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; + ctx->enginectx.op.do_one_request = tegra_sha_do_one_req; return 0; } @@ -449,7 +499,7 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm) if (ctx->fallback_tfm) crypto_free_ahash(ctx->fallback_tfm); - tegra_key_invalidate(ctx->se, ctx->key_id); + tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } static int tegra_sha_init(struct ahash_request *req) @@ -458,34 +508,31 @@ static int tegra_sha_init(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - const char *algname; if (ctx->fallback) return tegra_sha_fallback_init(req); - algname = crypto_tfm_alg_name(&tfm->base); - rctx->total_len = 0; rctx->datbuf.size = 0; rctx->residue.size = 0; rctx->key_id = ctx->key_id; rctx->task = SHA_FIRST; - rctx->alg = se_algname_to_algid(algname); + rctx->alg = ctx->alg; rctx->blk_size = crypto_ahash_blocksize(tfm); rctx->digest.size = crypto_ahash_digestsize(tfm); rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, - &rctx->digest.addr, GFP_KERNEL); + &rctx->digest.addr, GFP_KERNEL); if (!rctx->digest.buf) goto digbuf_fail; rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, - &rctx->residue.addr, GFP_KERNEL); + &rctx->residue.addr, GFP_KERNEL); if (!rctx->residue.buf) goto resbuf_fail; rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); + &rctx->datbuf.addr, GFP_KERNEL); if (!rctx->datbuf.buf) goto datbuf_fail; @@ -493,10 +540,10 @@ static int tegra_sha_init(struct ahash_request *req) datbuf_fail: dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); + rctx->residue.addr); resbuf_fail: dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, - rctx->datbuf.addr); + rctx->datbuf.addr); digbuf_fail: return -ENOMEM; } @@ -505,7 +552,7 @@ static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key, unsigned int keylen) { if (!ctx->fallback_tfm) { - dev_err(ctx->se->dev, "invalid key length\n"); + dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } @@ -593,9 +640,6 @@ static int tegra_sha_export(struct ahash_request *req, void *out) return tegra_sha_fallback_export(req, out); memcpy(out, rctx, sizeof(*rctx)); - /* - * TODO: Copy HASH_RESULT registers as well. - */ return 0; } @@ -617,9 +661,6 @@ static int tegra_sha_import(struct ahash_request *req, const void *in) static struct tegra_se_alg tegra_hash_algs[] = { { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -629,7 +670,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA1_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha1", .cra_driver_name = "tegra-se-sha1", @@ -642,16 +682,9 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -661,7 +694,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA224_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha224", .cra_driver_name = "tegra-se-sha224", @@ -674,16 +706,9 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -693,7 +718,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA256_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha256", .cra_driver_name = "tegra-se-sha256", @@ -706,16 +730,9 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -725,7 +742,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA384_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha384", .cra_driver_name = "tegra-se-sha384", @@ -738,16 +754,9 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -757,7 +766,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA512_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha512", .cra_driver_name = "tegra-se-sha512", @@ -770,16 +778,9 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -789,7 +790,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA3_224_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha3-224", .cra_driver_name = "tegra-se-sha3-224", @@ -802,16 +802,9 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -821,7 +814,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA3_256_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha3-256", .cra_driver_name = "tegra-se-sha3-256", @@ -834,16 +826,9 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -853,7 +838,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA3_384_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha3-384", .cra_driver_name = "tegra-se-sha3-384", @@ -866,16 +850,9 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -885,7 +862,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .import = tegra_sha_import, .halg.digestsize = SHA3_512_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "sha3-512", .cra_driver_name = "tegra-se-sha3-512", @@ -898,17 +874,10 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg_base = "sha224", .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -919,7 +888,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .setkey = tegra_hmac_setkey, .halg.digestsize = SHA224_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "hmac(sha224)", .cra_driver_name = "tegra-se-hmac-sha224", @@ -932,17 +900,10 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg_base = "sha256", .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -953,7 +914,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .setkey = tegra_hmac_setkey, .halg.digestsize = SHA256_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "hmac(sha256)", .cra_driver_name = "tegra-se-hmac-sha256", @@ -966,17 +926,10 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg_base = "sha384", .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -987,7 +940,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .setkey = tegra_hmac_setkey, .halg.digestsize = SHA384_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "hmac(sha384)", .cra_driver_name = "tegra-se-hmac-sha384", @@ -1000,17 +952,10 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } }, { .alg_base = "sha512", .alg.ahash = { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - .base = { -#endif .init = tegra_sha_init, .update = tegra_sha_update, .final = tegra_sha_final, @@ -1021,7 +966,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .setkey = tegra_hmac_setkey, .halg.digestsize = SHA512_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_sha_reqctx), - .halg.base = { .cra_name = "hmac(sha512)", .cra_driver_name = "tegra-se-hmac-sha512", @@ -1034,10 +978,6 @@ static struct tegra_se_alg tegra_hash_algs[] = { .cra_init = tegra_sha_cra_init, .cra_exit = tegra_sha_cra_exit, } -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - }, - .op.do_one_request = tegra_sha_do_one_req, -#endif } } }; @@ -1077,42 +1017,36 @@ static int tegra_hash_kac_manifest(u32 user, u32 alg, u32 keylen) int tegra_init_hash(struct tegra_se *se) { + struct ahash_alg *alg; int i, ret; se->manifest = tegra_hash_kac_manifest; for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) { - tegra_hash_algs[i].se_dev = se; - ret = CRYPTO_REGISTER(ahash, &tegra_hash_algs[i].alg.ahash); + alg = &tegra_hash_algs[i].alg.ahash; + + ret = crypto_register_ahash(alg); if (ret) { -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX dev_err(se->dev, "failed to register %s\n", - tegra_hash_algs[i].alg.ahash.base.halg.base.cra_name); -#else - dev_err(se->dev, "failed to register %s\n", - tegra_hash_algs[i].alg.ahash.halg.base.cra_name); -#endif + alg->halg.base.cra_name); goto sha_err; } } - dev_info(se->dev, "registered HASH algorithms\n"); - return 0; sha_err: - for (--i; i >= 0; i--) - CRYPTO_UNREGISTER(ahash, &tegra_hash_algs[i].alg.ahash); + while (i--) + crypto_unregister_ahash(&tegra_hash_algs[i].alg.ahash); return ret; } - -void tegra_deinit_hash(void) +void tegra_deinit_hash(struct tegra_se *se) { int i; for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) - CRYPTO_UNREGISTER(ahash, &tegra_hash_algs[i].alg.ahash); + crypto_unregister_ahash(&tegra_hash_algs[i].alg.ahash); } diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index b15544c6..e3b97ec7 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. /* * Crypto driver file to manage keys of NVIDIA Security Engine. */ @@ -16,66 +16,77 @@ #define SE_KEY_RSVD_MASK (BIT(0) | BIT(14) | BIT(15)) #define SE_KEY_VALID_MASK (SE_KEY_FULL_MASK & ~SE_KEY_RSVD_MASK) +/* Mutex lock to guard keyslots */ +static DEFINE_MUTEX(kslt_lock); + +/* Keyslot bitmask (0 = available, 1 = in use/not available) */ static u16 tegra_se_keyslots = SE_KEY_RSVD_MASK; static u16 tegra_keyslot_alloc(void) { u16 keyid; + mutex_lock(&kslt_lock); /* Check if all key slots are full */ - if (tegra_se_keyslots == GENMASK(SE_MAX_KEYSLOT, 0)) + if (tegra_se_keyslots == GENMASK(SE_MAX_KEYSLOT, 0)) { + mutex_unlock(&kslt_lock); return 0; + } keyid = ffz(tegra_se_keyslots); tegra_se_keyslots |= BIT(keyid); + mutex_unlock(&kslt_lock); + return keyid; } static void tegra_keyslot_free(u16 slot) { + mutex_lock(&kslt_lock); tegra_se_keyslots &= ~(BIT(slot)); + mutex_unlock(&kslt_lock); } static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr, - const u32 *key, u32 keylen, u16 slot, u32 alg) + const u32 *key, u32 keylen, u16 slot, u32 alg) { int i = 0, j; cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->op); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op); cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_DUMMY; cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->manifest); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->manifest); cpuvaddr[i++] = se->manifest(se->owner, alg, keylen); cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->key_dst); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_dst); cpuvaddr[i++] = SE_AES_KEY_DST_INDEX(slot); for (j = 0; j < keylen / 4; j++) { /* Set key address */ cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->key_addr); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_addr); cpuvaddr[i++] = j; /* Set key data */ cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->key_data); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_data); cpuvaddr[i++] = key[j]; } cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->config); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->config); cpuvaddr[i++] = SE_CFG_INS; cpuvaddr[i++] = host1x_opcode_setpayload(1); - cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->op); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op); cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF; - cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); @@ -87,15 +98,21 @@ static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr, static bool tegra_key_in_kslt(u32 keyid) { + bool ret; + if (keyid > SE_MAX_KEYSLOT) return false; - return ((BIT(keyid) & SE_KEY_VALID_MASK) && + mutex_lock(&kslt_lock); + ret = ((BIT(keyid) & SE_KEY_VALID_MASK) && (BIT(keyid) & tegra_se_keyslots)); + mutex_unlock(&kslt_lock); + + return ret; } static int tegra_key_insert(struct tegra_se *se, const u8 *key, - u32 keylen, u16 slot, u32 alg) + u32 keylen, u16 slot, u32 alg) { const u32 *keyval = (u32 *)key; u32 *addr = se->cmdbuf->addr, size; @@ -105,11 +122,16 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key, return tegra_se_host1x_submit(se, size); } -void tegra_key_invalidate(struct tegra_se *se, u32 keyid) +void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) { + u8 zkey[AES_MAX_KEY_SIZE] = {0}; + if (!keyid) return; + /* Overwrite the key with 0s */ + tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg); + tegra_keyslot_free(keyid); } @@ -120,8 +142,10 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3 /* Use the existing slot if it is already allocated */ if (!tegra_key_in_kslt(*keyid)) { *keyid = tegra_keyslot_alloc(); - if (!(*keyid)) + if (!(*keyid)) { + dev_err(se->dev, "failed to allocate key slot\n"); return -ENOMEM; + } } ret = tegra_key_insert(se, key, keylen, *keyid, alg); diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index 6bedb95e..c39dd66e 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. /* * Crypto driver for NVIDIA Security Engine in Tegra Chips */ #include #include -#include #include #include #include @@ -124,7 +123,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi struct tegra_se_cmdbuf *cmdbuf; struct device *dev = se->dev->parent; - cmdbuf = kzalloc(sizeof(struct tegra_se_cmdbuf), GFP_KERNEL); + cmdbuf = kzalloc(sizeof(*cmdbuf), GFP_KERNEL); if (!cmdbuf) return NULL; @@ -153,7 +152,7 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) return -ENOMEM; } - job->syncpt = host1x_syncpt_get(se->syncpt); + job->syncpt = host1x_syncpt_get(se->syncpt); job->syncpt_incrs = 1; job->client = &se->client; job->class = se->client.class; @@ -168,17 +167,17 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) ret = host1x_job_pin(job, se->dev); if (ret) { dev_err(se->dev, "failed to pin host1x job\n"); - goto err_job_pin; + goto job_put; } ret = host1x_job_submit(job); if (ret) { dev_err(se->dev, "failed to submit host1x job\n"); - goto err_job_submit; + goto job_unpin; } ret = host1x_syncpt_wait(job->syncpt, job->syncpt_end, - MAX_SCHEDULE_TIMEOUT, NULL); + MAX_SCHEDULE_TIMEOUT, NULL); if (ret) { dev_err(se->dev, "host1x job timed out\n"); return ret; @@ -187,9 +186,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) host1x_job_put(job); return 0; -err_job_submit: +job_unpin: host1x_job_unpin(job); -err_job_pin: +job_put: host1x_job_put(job); return ret; @@ -210,7 +209,7 @@ static int tegra_se_client_init(struct host1x_client *client) if (!se->syncpt) { dev_err(se->dev, "host1x syncpt allocation failed\n"); ret = -EINVAL; - goto err_syncpt; + goto channel_put; } se->syncpt_id = host1x_syncpt_id(se->syncpt); @@ -218,22 +217,22 @@ static int tegra_se_client_init(struct host1x_client *client) se->cmdbuf = tegra_se_host1x_bo_alloc(se, SZ_4K); if (!se->cmdbuf) { ret = -ENOMEM; - goto err_bo; + goto syncpt_put; } ret = se->hw->init_alg(se); if (ret) { dev_err(se->dev, "failed to register algorithms\n"); - goto err_alg_reg; + goto cmdbuf_put; } return 0; -err_alg_reg: +cmdbuf_put: tegra_se_cmdbuf_put(&se->cmdbuf->bo); -err_bo: +syncpt_put: host1x_syncpt_put(se->syncpt); -err_syncpt: +channel_put: host1x_channel_put(se->channel); return ret; @@ -243,7 +242,7 @@ static int tegra_se_client_deinit(struct host1x_client *client) { struct tegra_se *se = container_of(client, struct tegra_se, client); - se->hw->deinit_alg(); + se->hw->deinit_alg(se); tegra_se_cmdbuf_put(&se->cmdbuf->bo); host1x_syncpt_put(se->syncpt); host1x_channel_put(se->channel); @@ -256,7 +255,7 @@ static const struct host1x_client_ops tegra_se_client_ops = { .exit = tegra_se_client_deinit, }; -int tegra_se_host1x_register(struct tegra_se *se) +static int tegra_se_host1x_register(struct tegra_se *se) { INIT_LIST_HEAD(&se->client.list); se->client.dev = se->dev; @@ -269,38 +268,6 @@ int tegra_se_host1x_register(struct tegra_se *se) return 0; } -static int tegra_se_clk_init(struct tegra_se *se) -{ - int i, ret; - - se->num_clks = devm_clk_bulk_get_all(se->dev, &se->clks); - if (se->num_clks < 0) { - dev_err(se->dev, "failed to get clocks\n"); - return se->num_clks; - } - - for (i = 0; i < se->num_clks; i++) { - ret = clk_set_rate(se->clks[i].clk, ULONG_MAX); - if (ret) { - dev_err(se->dev, "failed to set %d clock rate", i); - return ret; - } - } - - ret = clk_bulk_prepare_enable(se->num_clks, se->clks); - if (ret) { - dev_err(se->dev, "failed to enable clocks\n"); - return ret; - } - - return 0; -} - -static void tegra_se_clk_deinit(struct tegra_se *se) -{ - clk_bulk_disable_unprepare(se->num_clks, se->clks); -} - static int tegra_se_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -312,61 +279,45 @@ static int tegra_se_probe(struct platform_device *pdev) return -ENOMEM; se->dev = dev; + se->owner = TEGRA_GPSE_ID; se->hw = device_get_match_data(&pdev->dev); se->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(se->base)) return PTR_ERR(se->base); - se->owner = TEGRA_GPSE_ID; - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39)); platform_set_drvdata(pdev, se); - ret = tegra_se_clk_init(se); - if (ret) { - dev_err(dev, "failed to init clocks\n"); - return ret; - } + se->clk = devm_clk_get_enabled(se->dev, NULL); + if (IS_ERR(se->clk)) + return dev_err_probe(dev, PTR_ERR(se->clk), + "failed to enable clocks\n"); - if (!tegra_dev_iommu_get_stream_id(dev, &se->stream_id)) { - dev_err(dev, "failed to get IOMMU stream ID\n"); - goto err_iommu_spec; - } + if (!tegra_dev_iommu_get_stream_id(dev, &se->stream_id)) + return dev_err_probe(dev, -ENODEV, + "failed to get IOMMU stream ID\n"); - se_writel(se, se->stream_id, SE_STREAM_ID); + writel(se->stream_id, se->base + SE_STREAM_ID); se->engine = crypto_engine_alloc_init(dev, 0); - if (!se->engine) { - dev_err(dev, "failed to init crypto engine\n"); - ret = -ENOMEM; - goto err_engine_alloc; - } + if (!se->engine) + return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n"); ret = crypto_engine_start(se->engine); if (ret) { - dev_err(dev, "failed to start crypto engine\n"); - goto err_engine_start; + crypto_engine_exit(se->engine); + return dev_err_probe(dev, ret, "failed to start crypto engine\n"); } ret = tegra_se_host1x_register(se); if (ret) { - dev_err(dev, "failed to init host1x params\n"); - goto err_host1x_init; + crypto_engine_stop(se->engine); + crypto_engine_exit(se->engine); + return dev_err_probe(dev, ret, "failed to init host1x params\n"); } return 0; - -err_host1x_init: - crypto_engine_stop(se->engine); -err_engine_start: - crypto_engine_exit(se->engine); -err_engine_alloc: - iommu_fwspec_free(se->dev); -err_iommu_spec: - tegra_se_clk_deinit(se); - - return ret; } static int tegra_se_remove(struct platform_device *pdev) @@ -377,7 +328,6 @@ static int tegra_se_remove(struct platform_device *pdev) crypto_engine_exit(se->engine); iommu_fwspec_free(se->dev); host1x_client_unregister(&se->client); - tegra_se_clk_deinit(se); return 0; } @@ -424,10 +374,10 @@ static const struct tegra_se_hw tegra234_hash_hw = { static const struct of_device_id tegra_se_of_match[] = { { - .compatible = "nvidia,tegra234-se2-aes", + .compatible = "nvidia,tegra234-se-aes", .data = &tegra234_aes_hw }, { - .compatible = "nvidia,tegra234-se4-hash", + .compatible = "nvidia,tegra234-se-hash", .data = &tegra234_hash_hw, }, { }, @@ -486,4 +436,4 @@ module_exit(tegra_se_module_exit); MODULE_DESCRIPTION("NVIDIA Tegra Security Engine Driver"); MODULE_AUTHOR("Akhil R "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index 15a34d19..cec31d3f 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -7,22 +7,18 @@ #ifndef _TEGRA_SE_H #define _TEGRA_SE_H -#include +#include + +#include #include #include -#include #include +#include #include #include #include -#include #include -#include -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX -#include -#endif -#define SE_MAX_INSTANCES 3 #define SE_OWNERSHIP 0x14 #define SE_OWNERSHIP_UID(x) FIELD_GET(GENMASK(7, 0), x) #define TEGRA_GPSE_ID 3 @@ -67,11 +63,8 @@ #define SE_SHA_ENC_ALG_HMAC SE_SHA_CFG_ENC_ALG(7) #define SE_SHA_ENC_ALG_KDF SE_SHA_CFG_ENC_ALG(8) #define SE_SHA_ENC_ALG_KEY_INVLD SE_SHA_CFG_ENC_ALG(10) -#define SE_SHA_ENC_ALG_KEY_MOV SE_SHA_CFG_ENC_ALG(11) #define SE_SHA_ENC_ALG_KEY_INQUIRE SE_SHA_CFG_ENC_ALG(12) #define SE_SHA_ENC_ALG_INS SE_SHA_CFG_ENC_ALG(13) -#define SE_SHA_ENC_ALG_CLONE SE_SHA_CFG_ENC_ALG(14) -#define SE_SHA_ENC_ALG_LOCK SE_SHA_CFG_ENC_ALG(15) #define SE_SHA_OP_LASTBUF FIELD_PREP(BIT(16), 1) #define SE_SHA_OP_WRSTALL FIELD_PREP(BIT(15), 1) @@ -311,12 +304,6 @@ SE_AES_CORE_SEL_DECRYPT | \ SE_AES_IV_SEL_REG) -#define SE_CRYPTO_CFG_OFB (SE_AES_INPUT_SEL_AESOUT | \ - SE_AES_VCTRAM_SEL_MEMORY | \ - SE_AES_XOR_POS_BOTTOM | \ - SE_AES_CORE_SEL_ENCRYPT | \ - SE_AES_IV_SEL_REG) - #define SE_CRYPTO_CFG_CTR (SE_AES_INPUT_SEL_LINEAR_CTR | \ SE_AES_VCTRAM_SEL_MEMORY | \ SE_AES_XOR_POS_BOTTOM | \ @@ -356,34 +343,17 @@ #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M #define SE_AES_BUFLEN 0x8000 -#define SE_SHA_BUFLEN SZ_4M +#define SE_SHA_BUFLEN 0x2000 #define SHA_FIRST BIT(0) #define SHA_UPDATE BIT(1) #define SHA_FINAL BIT(2) -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX -#define CRYPTO_REGISTER(alg, x) \ - crypto_engine_register_##alg(x) -#else -#define CRYPTO_REGISTER(alg, x) \ - crypto_register_##alg(x) -#endif - -#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX -#define CRYPTO_UNREGISTER(alg, x) \ - crypto_engine_unregister_##alg(x) -#else -#define CRYPTO_UNREGISTER(alg, x) \ - crypto_unregister_##alg(x) -#endif - /* Security Engine operation modes */ enum se_aes_alg { SE_ALG_CBC, /* Cipher Block Chaining (CBC) mode */ SE_ALG_ECB, /* Electronic Codebook (ECB) mode */ SE_ALG_CTR, /* Counter (CTR) mode */ - SE_ALG_OFB, /* Output feedback (CFB) mode */ SE_ALG_XTS, /* XTS mode */ SE_ALG_GMAC, /* GMAC mode */ SE_ALG_GCM, /* GCM mode */ @@ -416,15 +386,9 @@ struct tegra_se_alg { const char *alg_base; union { -#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX - struct skcipher_alg skcipher; - struct aead_alg aead; - struct ahash_alg ahash; -#else - struct skcipher_engine_alg skcipher; - struct aead_engine_alg aead; - struct ahash_engine_alg ahash; -#endif + struct skcipher_alg skcipher; + struct aead_alg aead; + struct ahash_alg ahash; } alg; }; @@ -446,7 +410,7 @@ struct tegra_se_regs { struct tegra_se_hw { const struct tegra_se_regs *regs; int (*init_alg)(struct tegra_se *se); - void (*deinit_alg)(void); + void (*deinit_alg)(struct tegra_se *se); bool support_sm_alg; u32 host1x_class; u32 kac_ver; @@ -455,18 +419,17 @@ struct tegra_se_hw { struct tegra_se { int (*manifest)(u32 user, u32 alg, u32 keylen); const struct tegra_se_hw *hw; - struct crypto_engine *engine; - struct host1x_channel *channel; struct host1x_client client; - struct host1x_syncpt *syncpt; - struct clk_bulk_data *clks; + struct host1x_channel *channel; struct tegra_se_cmdbuf *cmdbuf; + struct crypto_engine *engine; + struct host1x_syncpt *syncpt; struct device *dev; + struct clk *clk; unsigned int opcode_addr; unsigned int stream_id; unsigned int syncpt_id; void __iomem *base; - int num_clks; u32 owner; }; @@ -476,8 +439,8 @@ struct tegra_se_cmdbuf { struct device *dev; struct kref ref; struct host1x_bo bo; - u32 words; ssize_t size; + u32 words; }; struct tegra_se_datbuf { @@ -492,8 +455,6 @@ static inline int se_algname_to_algid(const char *name) return SE_ALG_CBC; else if (!strcmp(name, "ecb(aes)")) return SE_ALG_ECB; - else if (!strcmp(name, "ofb(aes)")) - return SE_ALG_OFB; else if (!strcmp(name, "ctr(aes)")) return SE_ALG_CTR; else if (!strcmp(name, "xts(aes)")) @@ -536,87 +497,61 @@ static inline int se_algname_to_algid(const char *name) } /* Functions */ -int tegra_init_aead(struct tegra_se *se); int tegra_init_aes(struct tegra_se *se); int tegra_init_hash(struct tegra_se *se); -void tegra_deinit_aead(void); -void tegra_deinit_aes(void); -void tegra_deinit_hash(void); - -int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); -unsigned int tegra_key_get_idx(struct tegra_se *se, u32 keyid); -void tegra_key_invalidate(struct tegra_se *se, u32 keyid); - -int tegra_se_host1x_register(struct tegra_se *se); +void tegra_deinit_aes(struct tegra_se *se); +void tegra_deinit_hash(struct tegra_se *se); +int tegra_key_submit(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid); +void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); int tegra_se_host1x_submit(struct tegra_se *se, u32 size); -static inline void se_writel(struct tegra_se *se, unsigned int val, - unsigned int offset) -{ - writel_relaxed(val, se->base + offset); -} - -static inline u32 se_readl(struct tegra_se *se, unsigned int offset) -{ - return readl_relaxed(se->base + offset); -} - - -/**** - * - * HOST1x OPCODES - * - ****/ - +/* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) { return (9 << 28) | payload; } -#define host1x_opcode_incr_w(x) __host1x_opcode_incr_w((x) / 4) -static inline u32 __host1x_opcode_incr_w(unsigned int offset) +static inline u32 host1x_opcode_incr_w(unsigned int offset) { /* 22-bit offset supported */ return (10 << 28) | offset; } -#define host1x_opcode_nonincr_w(x) __host1x_opcode_nonincr_w((x) / 4) -static inline u32 __host1x_opcode_nonincr_w(unsigned int offset) +static inline u32 host1x_opcode_nonincr_w(unsigned int offset) { /* 22-bit offset supported */ return (11 << 28) | offset; } -#define host1x_opcode_incr(x, y) __host1x_opcode_incr((x) / 4, y) -static inline u32 __host1x_opcode_incr(unsigned int offset, unsigned int count) +static inline u32 host1x_opcode_incr(unsigned int offset, unsigned int count) { - return (1 << 28) | (offset << 16) | count; + return (1 << 28) | (offset << 16) | count; } -#define host1x_opcode_nonincr(x, y) __host1x_opcode_nonincr((x) / 4, y) -static inline u32 __host1x_opcode_nonincr(unsigned int offset, unsigned int count) +static inline u32 host1x_opcode_nonincr(unsigned int offset, unsigned int count) { - return (2 << 28) | (offset << 16) | count; + return (2 << 28) | (offset << 16) | count; } static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v) { - return (v & 0xff) << 10; + return (v & 0xff) << 10; } static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v) { - return (v & 0x3ff) << 0; + return (v & 0x3ff) << 0; } static inline u32 host1x_uclass_wait_syncpt_r(void) { - return 0x8; + return 0x8; } static inline u32 host1x_uclass_incr_syncpt_r(void) { - return 0x0; + return 0x0; } #if !defined(NV_TEGRA_DEV_IOMMU_GET_STREAM_ID_PRESENT) @@ -635,4 +570,9 @@ static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream } #endif +#define se_host1x_opcode_incr_w(x) host1x_opcode_incr_w((x) / 4) +#define se_host1x_opcode_nonincr_w(x) host1x_opcode_nonincr_w((x) / 4) +#define se_host1x_opcode_incr(x, y) host1x_opcode_incr((x) / 4, y) +#define se_host1x_opcode_nonincr(x, y) host1x_opcode_nonincr((x) / 4, y) + #endif /*_TEGRA_SE_H*/