From a29b51cd583078197c3cf8633122ffe9d8304fc6 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Wed, 17 Jul 2024 18:41:20 +0530 Subject: [PATCH] crypto: tegra: Add Tegra SE driver for T264 Add Tegra Security Engine driver which supports AES-ECB/CBC/CTR/XTS SHA1/SHA2/SHA3 AES-GCM, AES CCM, SM4, SM3 algorithms. Signed-off-by: Akhil R Change-Id: I86be2fcc485c31988496395183cb44a386907668 --- drivers/crypto/tegra/Makefile | 1 + drivers/crypto/tegra/tegra-se-aes.c | 466 +++++-- drivers/crypto/tegra/tegra-se-hash.c | 141 ++- drivers/crypto/tegra/tegra-se-kds.c | 236 +++- drivers/crypto/tegra/tegra-se-key.c | 166 ++- drivers/crypto/tegra/tegra-se-main.c | 98 ++ drivers/crypto/tegra/tegra-se-sm4.c | 1701 ++++++++++++++++++++++++++ drivers/crypto/tegra/tegra-se.h | 276 ++++- 8 files changed, 2966 insertions(+), 119 deletions(-) create mode 100644 drivers/crypto/tegra/tegra-se-sm4.c diff --git a/drivers/crypto/tegra/Makefile b/drivers/crypto/tegra/Makefile index cc8083e7..ae882816 100644 --- a/drivers/crypto/tegra/Makefile +++ b/drivers/crypto/tegra/Makefile @@ -7,6 +7,7 @@ tegra-se-objs := tegra-se-key.o tegra-se-main.o tegra-se-y += tegra-se-aes.o tegra-se-y += tegra-se-hash.o +tegra-se-y += tegra-se-sm4.o obj-m += tegra-se.o obj-m += tegra-se-kds.o diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index a7c972bd..58761d3a 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -42,6 +42,8 @@ struct tegra_aes_reqctx { u32 crypto_config; u32 len; u32 *iv; + u32 key1_id; + u32 key2_id; }; struct tegra_aead_ctx { @@ -51,6 +53,9 @@ struct tegra_aead_ctx { struct tegra_se *se; unsigned int authsize; u32 alg; + u32 mac_alg; + u32 final_alg; + u32 verify_alg; u32 keylen; u32 key_id; }; @@ -76,7 +81,8 @@ struct tegra_cmac_ctx { struct crypto_engine_ctx enginectx; #endif struct tegra_se *se; - unsigned int alg; + u32 alg; + u32 final_alg; u32 key_id; struct crypto_shash *fallback_tfm; }; @@ -84,6 +90,7 @@ struct tegra_cmac_ctx { struct tegra_cmac_reqctx { struct scatterlist *src_sg; struct tegra_se_datbuf datbuf; + struct tegra_se_datbuf digest; struct tegra_se_datbuf residue; unsigned int total_len; unsigned int blk_size; @@ -138,6 +145,7 @@ static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt) { switch (alg) { case SE_ALG_CMAC: + case SE_ALG_CMAC_FINAL: case SE_ALG_GMAC: case SE_ALG_GCM: case SE_ALG_GCM_FINAL: @@ -201,12 +209,16 @@ static int tegra234_aes_cfg(u32 alg, bool encrypt) return SE_CFG_GCM_FINAL_DECRYPT; case SE_ALG_CMAC: + return SE_CFG_CMAC | SE_AES_DST_HASH_REG; + + case SE_ALG_CMAC_FINAL: return SE_CFG_CMAC; case SE_ALG_CBC_MAC: return SE_AES_ENC_ALG_AES_ENC | SE_AES_DST_HASH_REG; } + return -EINVAL; } @@ -272,12 +284,32 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) unsigned int cmdlen; int ret; + /* Keys in ctx might be stored in KDS. Copy it to request ctx */ + rctx->key1_id = tegra_key_get_idx(ctx->se, ctx->key1_id); + if (!rctx->key1_id) { + ret = -ENOMEM; + goto out; + } + + rctx->key2_id = 0; + + /* If there are 2 keys stored (for XTS), retrieve them both */ + if (ctx->key2_id) { + rctx->key2_id = tegra_key_get_idx(ctx->se, ctx->key2_id); + if (!rctx->key2_id) { + ret = -ENOMEM; + goto key1_free; + } + } + /* Set buffer size as a multiple of AES_BLOCK_SIZE*/ rctx->datbuf.size = ((req->cryptlen / AES_BLOCK_SIZE) + 1) * AES_BLOCK_SIZE; rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - return -ENOMEM; + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto key2_free; + } rctx->iv = (u32 *)req->iv; rctx->len = req->cryptlen; @@ -290,6 +322,11 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); + /* Update crypto_config with Local KSLT IDs */ + rctx->crypto_config |= SE_AES_KEY_INDEX(rctx->key1_id); + if (rctx->key2_id) + rctx->crypto_config |= SE_AES_KEY2_INDEX(rctx->key2_id); + /* Prepare the command and submit for execution */ cmdlen = tegra_aes_prep_cmd(se, rctx); ret = tegra_se_host1x_submit(se, cmdlen); @@ -302,6 +339,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); +key2_free: + if (rctx->key2_id != ctx->key2_id) + tegra_key_invalidate(ctx->se, rctx->key2_id, ctx->alg); +key1_free: + if (rctx->key1_id != ctx->key1_id) + tegra_key_invalidate(ctx->se, rctx->key1_id, ctx->alg); +out: crypto_finalize_skcipher_request(se->engine, req, ret); return 0; @@ -439,12 +483,147 @@ static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen) return manifest; } -static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) +static int tegra_aes_kac2_manifest(u32 user, u32 alg, u32 keylen) +{ + int manifest; + manifest = SE_KAC2_USER(user) | SE_KAC2_ORIGIN_SW; + manifest |= SE_KAC2_DECRYPT_EN | SE_KAC2_ENCRYPT_EN; + manifest |= SE_KAC2_TYPE_SYM | SE_KAC2_SUBTYPE_AES; + + switch (alg) { + case SE_ALG_CBC: + case SE_ALG_ECB: + case SE_ALG_CTR: + manifest |= SE_KAC2_ENC; + break; + case SE_ALG_XTS: + manifest |= SE_KAC2_XTS; + break; + case SE_ALG_GCM: + manifest |= SE_KAC2_GCM; + break; + case SE_ALG_CMAC: + manifest |= SE_KAC2_CMAC; + break; + case SE_ALG_CBC_MAC: + manifest |= SE_KAC2_ENC; + break; + + default: + return -EINVAL; + } + + switch (keylen) { + case AES_KEYSIZE_128: + manifest |= SE_KAC2_SIZE_128; + break; + case AES_KEYSIZE_192: + manifest |= SE_KAC2_SIZE_192; + break; + case AES_KEYSIZE_256: + manifest |= SE_KAC2_SIZE_256; + break; + default: + return -EINVAL; + } + + return manifest; +} + +static inline int tegra264_aes_crypto_cfg(u32 alg, bool encrypt) +{ + u32 cfg = SE_AES_CRYPTO_CFG_SCC_DIS; + + switch (alg) { + case SE_ALG_ECB: + case SE_ALG_SM4_ECB: + case SE_ALG_CMAC: + case SE_ALG_GMAC: + break; + case SE_ALG_CTR: + cfg |= SE_AES_IV_SEL_REG | + SE_AES_CRYPTO_CFG_CTR_CNTN(1); + break; + case SE_ALG_CBC: + case SE_ALG_CBC_MAC: + case SE_ALG_XTS: + case SE_ALG_GCM: + case SE_ALG_GCM_FINAL: + case SE_ALG_GCM_VERIFY: + cfg |= SE_AES_IV_SEL_REG; + break; + default: + return -EINVAL; + } + + return cfg; +} + +static int tegra264_aes_cfg(u32 alg, bool encrypt) +{ + switch (alg) { + case SE_ALG_CBC: + if (encrypt) + return SE_CFG_CBC_ENCRYPT; + else + return SE_CFG_CBC_DECRYPT; + case SE_ALG_ECB: + if (encrypt) + return SE_CFG_ECB_ENCRYPT; + else + return SE_CFG_ECB_DECRYPT; + case SE_ALG_CTR: + if (encrypt) + return SE_CFG_CTR_ENCRYPT; + else + return SE_CFG_CTR_DECRYPT; + case SE_ALG_XTS: + if (encrypt) + return SE_CFG_XTS_ENCRYPT; + else + return SE_CFG_XTS_DECRYPT; + case SE_ALG_GMAC: + if (encrypt) + return SE_CFG_GMAC_ENCRYPT; + else + return SE_CFG_GMAC_DECRYPT; + + case SE_ALG_GCM: + if (encrypt) + return SE_CFG_GCM_ENCRYPT; + else + return SE_CFG_GCM_DECRYPT; + + case SE_ALG_GCM_FINAL: + if (encrypt) + return SE_CFG_GCM_FINAL_ENCRYPT; + else + return SE_CFG_GCM_FINAL_DECRYPT; + + case SE_ALG_GCM_VERIFY: + return SE_CFG_GCM_VERIFY; + + case SE_ALG_CMAC: + return SE_CFG_CMAC | SE_AES_DST_KEYTABLE; + + case SE_ALG_CMAC_FINAL: + return SE_CFG_CMAC; + + case SE_ALG_CBC_MAC: + return SE_CFG_CBC_MAC | SE_AES_DST_HASH_REG; + + } + + return -EINVAL; +} + +static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); + struct tegra_se *se = ctx->se; if (ctx->alg != SE_ALG_XTS) { if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) { @@ -460,12 +639,8 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) return 0; rctx->encrypt = encrypt; - rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); - rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); - - if (ctx->key2_id) - rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); + rctx->config = se->regcfg->cfg(ctx->alg, encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->alg, encrypt); return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); } @@ -593,10 +768,29 @@ static struct tegra_se_alg tegra_aes_algs[] = { } }, }; +struct tegra_se_regcfg tegra234_aes_regcfg = { + .cfg = tegra234_aes_cfg, + .crypto_cfg = tegra234_aes_crypto_cfg, + .manifest = tegra_aes_kac_manifest, +}; + +struct tegra_se_regcfg tegra264_aes_regcfg = { + .cfg = tegra264_aes_cfg, + .crypto_cfg = tegra264_aes_crypto_cfg, + .manifest = tegra_aes_kac2_manifest +}; + +static void tegra_aes_set_regcfg(struct tegra_se *se) +{ + if (se->hw->kac_ver > 1) + se->regcfg = &tegra264_aes_regcfg; + else + se->regcfg = &tegra234_aes_regcfg; +} static unsigned int tegra_gmac_prep_cmd(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - unsigned int data_count, res_bits, i = 0; + unsigned int data_count, res_bits, i = 0, j; u32 *cpuvaddr = se->cmdbuf->addr; data_count = (rctx->assoclen / AES_BLOCK_SIZE); @@ -609,6 +803,11 @@ static unsigned int tegra_gmac_prep_cmd(struct tegra_se *se, struct tegra_aead_r if (!res_bits) data_count--; + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = rctx->iv[j]; + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | SE_LAST_BLOCK_RES_BITS(res_bits); @@ -616,6 +815,7 @@ static unsigned int tegra_gmac_prep_cmd(struct tegra_se *se, struct tegra_aead_r cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4); cpuvaddr[i++] = rctx->config; cpuvaddr[i++] = rctx->crypto_config; + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | SE_ADDR_HI_SZ(rctx->assoclen); @@ -629,6 +829,8 @@ static unsigned int tegra_gmac_prep_cmd(struct tegra_se *se, struct tegra_aead_r cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + return i; } @@ -687,6 +889,7 @@ static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_se *se, struct tegra_a host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + return i; } @@ -721,8 +924,9 @@ static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); cpuvaddr[i++] = rctx->config; cpuvaddr[i++] = rctx->crypto_config; - cpuvaddr[i++] = 0; - cpuvaddr[i++] = 0; + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | + SE_ADDR_HI_SZ(rctx->authsize); /* Destination Address */ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); @@ -749,9 +953,9 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg, 0, rctx->assoclen, 0); - rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + rctx->config = se->regcfg->cfg(ctx->mac_alg, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->mac_alg, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); cmdlen = tegra_gmac_prep_cmd(se, rctx); @@ -766,9 +970,9 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg, rctx->assoclen, rctx->cryptlen, 0); - rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + rctx->config = se->regcfg->cfg(ctx->alg, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->alg, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_gcm_crypt_prep_cmd(se, rctx); @@ -789,9 +993,9 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc u32 *cpuvaddr = se->cmdbuf->addr; int cmdlen, ret, offset; - rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + rctx->config = se->regcfg->cfg(ctx->final_alg, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->final_alg, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); @@ -809,15 +1013,48 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc return 0; } -static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx) +static int tegra_gcm_hw_verify(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx, u8 *mac) { - unsigned int offset; + struct tegra_se *se = ctx->se; + u32 result, *cpuvaddr = se->cmdbuf->addr; + int size, ret; + + memcpy(rctx->inbuf.buf, mac, rctx->authsize); + rctx->inbuf.size = rctx->authsize; + + rctx->config = se->regcfg->cfg(ctx->verify_alg, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->verify_alg, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); + + /* Prepare command and submit */ + size = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, size); + if (ret) + return ret; + + memcpy(&result, rctx->outbuf.buf, 4); + + if (result != SE_GCM_VERIFY_OK) + return -EBADMSG; + + return 0; +} + +static int tegra_gcm_do_verify(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + int offset, ret; u8 mac[16]; offset = rctx->assoclen + rctx->cryptlen; scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0); - if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize)) + if (se->hw->support_aad_verify) + ret = tegra_gcm_hw_verify(ctx, rctx, mac); + else + ret = crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize); + + if (ret) return -EBADMSG; return 0; @@ -860,13 +1097,14 @@ static unsigned int tegra_cbcmac_prep_cmd(struct tegra_se *se, struct tegra_aead SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); - cpuvaddr[i++] = SE_AES_OP_WRSTALL | - SE_AES_OP_LASTBUF | SE_AES_OP_START; + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | SE_AES_OP_START; cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + return i; } @@ -910,15 +1148,13 @@ static unsigned int tegra_ctr_prep_cmd(struct tegra_se *se, struct tegra_aead_re return i; } -static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +static int tegra_ccm_do_cbcmac(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - struct tegra_se *se = ctx->se; int cmdlen; - rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC, - rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + rctx->config = se->regcfg->cfg(SE_ALG_CBC_MAC, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(SE_ALG_CBC_MAC, + rctx->encrypt) | SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_cbcmac_prep_cmd(se, rctx); @@ -1068,9 +1304,8 @@ static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *r return 0; } -static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +static int tegra_ccm_compute_auth(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - struct tegra_se *se = ctx->se; struct scatterlist *sg; int offset, ret; @@ -1089,23 +1324,22 @@ static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_ rctx->inbuf.size = offset; - ret = tegra_ccm_do_cbcmac(ctx, rctx); + ret = tegra_ccm_do_cbcmac(se, rctx); if (ret) return ret; return tegra_ccm_mac_result(se, rctx); } -static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) +static int tegra_ccm_do_ctr(struct tegra_se *se, struct tegra_aead_reqctx *rctx) { - struct tegra_se *se = ctx->se; unsigned int cmdlen, offset = 0; struct scatterlist *sg = rctx->src_sg; int ret; - rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + rctx->config = se->regcfg->cfg(SE_ALG_CTR, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(SE_ALG_CTR, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); /* Copy authdata in the top of buffer for encryption/decryption */ if (rctx->encrypt) @@ -1181,19 +1415,26 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) else rctx->cryptlen = req->cryptlen - ctx->authsize; + /* Keys in ctx might be stored in KDS. Copy it to local keyslot */ + rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id); + if (!rctx->key_id) + goto out; + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; /* Allocate buffers required */ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - return -ENOMEM; + if (!rctx->inbuf.buf) { + ret = -ENOMEM; + goto key_free; + } rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; - goto outbuf_err; + goto inbuf_free; } ret = tegra_ccm_crypt_init(req, se, rctx); @@ -1203,34 +1444,38 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) if (rctx->encrypt) { /* CBC MAC Operation */ - ret = tegra_ccm_compute_auth(ctx, rctx); + ret = tegra_ccm_compute_auth(se, rctx); if (ret) - goto out; + goto outbuf_free; /* CTR operation */ - ret = tegra_ccm_do_ctr(ctx, rctx); + ret = tegra_ccm_do_ctr(se, rctx); if (ret) - goto out; + goto outbuf_free; } else { /* CTR operation */ - ret = tegra_ccm_do_ctr(ctx, rctx); + ret = tegra_ccm_do_ctr(se, rctx); if (ret) - goto out; + goto outbuf_free; /* CBC MAC Operation */ - ret = tegra_ccm_compute_auth(ctx, rctx); + ret = tegra_ccm_compute_auth(se, rctx); if (ret) - goto out; + goto outbuf_free; } -out: +outbuf_free: dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); -outbuf_err: +inbuf_free: dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); - +key_free: + /* Free the keyslot if it is cloned for this request */ + if (rctx->key_id != ctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); +out: crypto_finalize_aead_request(ctx->se->engine, req, ret); return 0; @@ -1242,6 +1487,7 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); struct tegra_aead_reqctx *rctx = aead_request_ctx(req); + struct tegra_se *se = ctx->se; int ret; rctx->src_sg = req->src; @@ -1254,6 +1500,12 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) else rctx->cryptlen = req->cryptlen - ctx->authsize; + + /* Keys in ctx might be stored in KDS. Copy it to local keyslot */ + rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id); + if (!rctx->key_id) + goto key_err; + /* Allocate buffers required */ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, @@ -1288,17 +1540,22 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) } /* GCM_FINAL operation */ - ret = tegra_gcm_do_final(ctx, rctx); - if (ret) - goto out; + /* Need not do FINAL operation if hw supports MAC verification */ + if (rctx->encrypt || !se->hw->support_aad_verify) { + ret = tegra_gcm_do_final(ctx, rctx); + if (ret) + goto out; + } if (!rctx->encrypt) - ret = tegra_gcm_do_verify(ctx->se, rctx); + ret = tegra_gcm_do_verify(ctx, rctx); out: + if (rctx->key_id != ctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); +key_err: dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); - outbuf_err: dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); @@ -1352,10 +1609,6 @@ static int tegra_gcm_cra_init(struct crypto_aead *tfm) struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_alg *alg = crypto_aead_alg(tfm); struct tegra_se_alg *se_alg; - const char *algname; - int ret; - - algname = crypto_tfm_alg_name(&tfm->base); #ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base); @@ -1368,13 +1621,10 @@ static int tegra_gcm_cra_init(struct crypto_aead *tfm) ctx->se = se_alg->se_dev; ctx->key_id = 0; - ret = se_algname_to_algid(algname); - if (ret < 0) { - dev_err(ctx->se->dev, "invalid algorithm\n"); - return ret; - } - - ctx->alg = ret; + ctx->alg = SE_ALG_GCM; + ctx->final_alg = SE_ALG_GCM_FINAL; + ctx->verify_alg = SE_ALG_GCM_VERIFY; + ctx->mac_alg = SE_ALG_GMAC; #ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX ctx->enginectx.op.prepare_request = NULL; @@ -1480,6 +1730,12 @@ static unsigned int tegra_cmac_prep_cmd(struct tegra_se *se, struct tegra_cmac_r data_count--; if (rctx->task & SHA_FIRST) { + /* T264 needs INIT to be set for first operation + * whereas T234 will return error if INIT is set + * Differentiate T264 and T234 based on CFG */ + if ((rctx->config & SE_AES_DST_KEYTABLE) == SE_AES_DST_KEYTABLE) + op |= SE_AES_OP_INIT; + rctx->task &= ~SHA_FIRST; cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); @@ -1501,8 +1757,11 @@ static unsigned int tegra_cmac_prep_cmd(struct tegra_se *se, struct tegra_cmac_r cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr); cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | SE_ADDR_HI_SZ(rctx->datbuf.size); - cpuvaddr[i++] = 0; - cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE); + + /* Destination Address */ + cpuvaddr[i++] = rctx->digest.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | + SE_ADDR_HI_SZ(rctx->digest.size)); cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); cpuvaddr[i++] = op; @@ -1511,6 +1770,8 @@ static unsigned int tegra_cmac_prep_cmd(struct tegra_se *se, struct tegra_cmac_r cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + dev_dbg(se->dev, "cfg %#x\n", rctx->config); + return i; } @@ -1557,8 +1818,9 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->src_sg = req->src; rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; rctx->total_len += rctx->datbuf.size; - rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); - rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id); + rctx->config = se->regcfg->cfg(ctx->alg, 0); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->alg, 0) | + SE_AES_KEY_INDEX(rctx->key_id); /* * Keep one block and residue bytes in residue and @@ -1618,7 +1880,6 @@ static int tegra_cmac_do_final(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - u32 *result = (u32 *)req->result; int ret = 0, i, cmdlen; if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { @@ -1628,7 +1889,7 @@ static int tegra_cmac_do_final(struct ahash_request *req) rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; - rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); + rctx->config = se->regcfg->cfg(ctx->final_alg, 0); if (rctx->residue.size) { rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, @@ -1649,13 +1910,14 @@ static int tegra_cmac_do_final(struct ahash_request *req) if (ret) goto out; - /* Read and clear Result register */ - for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - result[i] = readl(se->base + se->hw->regs->result + (i * 4)); + memcpy(req->result, rctx->digest.buf, rctx->digest.size); for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) writel(0, se->base + se->hw->regs->result + (i * 4)); + if (rctx->key_id != ctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); + out: if (rctx->residue.size) dma_free_coherent(se->dev, rctx->datbuf.size, @@ -1663,6 +1925,9 @@ out: out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, rctx->residue.buf, rctx->residue.addr); + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); + return ret; } @@ -1716,7 +1981,6 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm) struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); struct tegra_se_alg *se_alg; const char *algname; - int ret; algname = crypto_tfm_alg_name(tfm); #ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX @@ -1729,14 +1993,9 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm) ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->alg = SE_ALG_CMAC; + ctx->final_alg = SE_ALG_CMAC_FINAL; - ret = se_algname_to_algid(algname); - if (ret < 0) { - dev_err(ctx->se->dev, "invalid algorithm\n"); - return ret; - } - - ctx->alg = ret; #ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX ctx->enginectx.op.prepare_request = NULL; @@ -1770,13 +2029,27 @@ static int tegra_cmac_init(struct ahash_request *req) rctx->total_len = 0; rctx->datbuf.size = 0; rctx->residue.size = 0; + rctx->key_id = 0; rctx->task = SHA_FIRST; rctx->blk_size = crypto_ahash_blocksize(tfm); + rctx->digest.size = crypto_ahash_digestsize(tfm); + + /* Retrieve the key slot for CMAC */ + if (ctx->key_id) { + rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id); + if (!rctx->key_id) + return -ENOMEM; + } + + rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, + &rctx->digest.addr, GFP_KERNEL); + if (!rctx->digest.buf) + goto digbuf_fail; rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, &rctx->residue.addr, GFP_KERNEL); if (!rctx->residue.buf) - return -ENOMEM; + goto resbuf_fail; rctx->residue.size = 0; rctx->datbuf.size = 0; @@ -1786,6 +2059,15 @@ static int tegra_cmac_init(struct ahash_request *req) writel(0, se->base + se->hw->regs->result + (i * 4)); return 0; + +resbuf_fail: + dma_free_coherent(se->dev, rctx->blk_size, rctx->digest.buf, + rctx->digest.addr); +digbuf_fail: + if (rctx->key_id != ctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); + + return -ENOMEM; } static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, @@ -1944,7 +2226,7 @@ static struct tegra_se_alg tegra_cmac_algs[] = { .halg.statesize = sizeof(struct tegra_cmac_reqctx), .halg.base = { .cra_name = "cmac(aes)", - .cra_driver_name = "tegra-se-cmac", + .cra_driver_name = "cmac-aes-tegra", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = AES_BLOCK_SIZE, @@ -1970,7 +2252,7 @@ int tegra_init_aes(struct tegra_se *se) struct skcipher_engine_alg *sk_alg; int i, ret; - se->manifest = tegra_aes_kac_manifest; + tegra_aes_set_regcfg(se); for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) { sk_alg = &tegra_aes_algs[i].alg.skcipher; @@ -2033,7 +2315,7 @@ int tegra_init_aes(struct tegra_se *se) struct skcipher_alg *sk_alg; int i, ret; - se->manifest = tegra_aes_kac_manifest; + tegra_aes_set_regcfg(se); for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) { sk_alg = &tegra_aes_algs[i].alg.skcipher; diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index bdcc3239..2ad081c1 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -106,6 +107,11 @@ static int tegra_sha_get_config(u32 alg) cfg |= SE_SHA_ENC_ALG_SHA; cfg |= SE_SHA_ENC_MODE_SHA3_512; break; + + case SE_ALG_SM3_256: + cfg |= SE_SHA_ENC_ALG_SM3; + cfg |= SE_SHA_ENC_MODE_SM3_256; + break; default: return -EINVAL; } @@ -445,6 +451,9 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) if (rctx->task & SHA_FINAL) { ret = tegra_sha_do_final(req); rctx->task &= ~SHA_FINAL; + + if (rctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); } crypto_finalize_hash_request(se->engine, req, ret); @@ -543,12 +552,19 @@ static int tegra_sha_init(struct ahash_request *req) rctx->total_len = 0; rctx->datbuf.size = 0; rctx->residue.size = 0; - rctx->key_id = ctx->key_id; + rctx->key_id = 0; rctx->task = SHA_FIRST; rctx->alg = ctx->alg; rctx->blk_size = crypto_ahash_blocksize(tfm); rctx->digest.size = crypto_ahash_digestsize(tfm); + /* Retrieve the key slot for HMAC */ + if (ctx->key_id) { + rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id); + if (!rctx->key_id) + return -ENOMEM; + } + rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, &rctx->digest.addr, GFP_KERNEL); if (!rctx->digest.buf) @@ -565,6 +581,9 @@ resbuf_fail: dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, rctx->digest.addr); digbuf_fail: + if (rctx->key_id != ctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); + return -ENOMEM; } @@ -1093,6 +1112,42 @@ static struct tegra_se_alg tegra_hash_algs[] = { } }; +static struct tegra_se_alg tegra_sm3_algs[] = { + { + .alg.ahash = { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .base = { +#endif + .init = tegra_sha_init, + .update = tegra_sha_update, + .final = tegra_sha_final, + .finup = tegra_sha_finup, + .digest = tegra_sha_digest, + .export = tegra_sha_export, + .import = tegra_sha_import, + .halg.digestsize = SM3_DIGEST_SIZE, + .halg.statesize = sizeof(struct tegra_sha_reqctx), + + .halg.base = { + .cra_name = "sm3", + .cra_driver_name = "tegra-se-sm3", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sha_cra_init, + .cra_exit = tegra_sha_cra_exit, + } +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + }, + .op.do_one_request = tegra_sha_do_one_req, +#endif + } + }, +}; + static int tegra_hash_kac_manifest(u32 user, u32 alg, u32 keylen) { int manifest; @@ -1126,6 +1181,57 @@ static int tegra_hash_kac_manifest(u32 user, u32 alg, u32 keylen) return manifest; } +static int tegra_hash_kac2_manifest(u32 user, u32 alg, u32 keylen) +{ + int manifest; + + manifest = SE_KAC2_USER(user) | SE_KAC2_ORIGIN_SW; + manifest |= SE_KAC2_DECRYPT_EN | SE_KAC2_ENCRYPT_EN; + manifest |= SE_KAC2_SUBTYPE_SHA | SE_KAC2_TYPE_SYM; + + switch (alg) { + case SE_ALG_HMAC_SHA224: + case SE_ALG_HMAC_SHA256: + case SE_ALG_HMAC_SHA384: + case SE_ALG_HMAC_SHA512: + manifest |= SE_KAC2_HMAC; + break; + default: + return -EINVAL; + } + + switch (keylen) { + case AES_KEYSIZE_128: + manifest |= SE_KAC2_SIZE_128; + break; + case AES_KEYSIZE_192: + manifest |= SE_KAC2_SIZE_192; + break; + case AES_KEYSIZE_256: + default: + manifest |= SE_KAC2_SIZE_256; + break; + } + + return manifest; +} + +struct tegra_se_regcfg tegra234_hash_regcfg = { + .manifest = tegra_hash_kac_manifest, +}; + +struct tegra_se_regcfg tegra264_hash_regcfg = { + .manifest = tegra_hash_kac2_manifest, +}; + +static void tegra_hash_set_regcfg(struct tegra_se *se) +{ + if (se->hw->kac_ver > 1) + se->regcfg = &tegra264_hash_regcfg; + else + se->regcfg = &tegra234_hash_regcfg; +} + int tegra_init_hash(struct tegra_se *se) { #ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX @@ -1135,7 +1241,7 @@ int tegra_init_hash(struct tegra_se *se) #endif int i, ret; - se->manifest = tegra_hash_kac_manifest; + tegra_hash_set_regcfg(se); for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) { tegra_hash_algs[i].se_dev = se; @@ -1154,8 +1260,33 @@ int tegra_init_hash(struct tegra_se *se) } } + if (!se->hw->support_sm_alg) + return 0; + + for (i = 0; i < ARRAY_SIZE(tegra_sm3_algs); i++) { + tegra_sm3_algs[i].se_dev = se; + alg = &tegra_sm3_algs[i].alg.ahash; + ret = CRYPTO_REGISTER(ahash, alg); + if (ret) { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + dev_err(se->dev, "failed to register %s\n", + alg->base.halg.base.cra_name); +#else + dev_err(se->dev, "failed to register %s\n", + alg->halg.base.cra_name); +#endif + goto sm3_err; + } + } + + dev_info(se->dev, "registered HASH algorithms\n"); + return 0; +sm3_err: + for (--i; i >= 0; i--) + CRYPTO_REGISTER(ahash, &tegra_sm3_algs[i].alg.ahash); + i = ARRAY_SIZE(tegra_hash_algs); sha_err: for (--i; i >= 0; i--) CRYPTO_UNREGISTER(ahash, &tegra_hash_algs[i].alg.ahash); @@ -1169,4 +1300,10 @@ void tegra_deinit_hash(struct tegra_se *se) for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) CRYPTO_UNREGISTER(ahash, &tegra_hash_algs[i].alg.ahash); + + if (!se->hw->support_sm_alg) + return; + + for (i = 0; i < ARRAY_SIZE(tegra_sm3_algs); i++) + CRYPTO_UNREGISTER(ahash, &tegra_sm3_algs[i].alg.ahash); } diff --git a/drivers/crypto/tegra/tegra-se-kds.c b/drivers/crypto/tegra/tegra-se-kds.c index 1949060d..d524ea45 100644 --- a/drivers/crypto/tegra/tegra-se-kds.c +++ b/drivers/crypto/tegra/tegra-se-kds.c @@ -1,15 +1,239 @@ // SPDX-License-Identifier: GPL-2.0-only -// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +/* + * Crypto driver for NVIDIA Security Engine for block cipher operations. + */ +#include +#include #include +#include +#include -/* Dummy implementation for module */ -static int __init tegra_se_kds_dummy_init(void) +#include "tegra-se.h" + +#define KDS_ALLOC_MUTEX 0 +#define KDS_ALLOC_RGN_ATTR0 0x4 +#define KDS_ALLOC_RGN_ATTR1 0x8 +#define KDS_ALLOC_OP_TRIG 0x18 +#define KDS_ALLOC_OP_STATUS 0x1c + +#define KDS_MUTEX_MST_ID(x) FIELD_PREP(GENMASK(13, 8), x) +#define KDS_MUTEX_BUSY BIT(4) +#define KDS_MUTEX_OP(x) FIELD_PREP(BIT(0), x) +#define KDS_MUTEX_REQ KDS_MUTEX_OP(1) +#define KDS_MUTEX_RELEASE KDS_MUTEX_OP(0) + +#define KDS_RGN_ATTR_OWNER(x) FIELD_PREP(GENMASK(31, 24), x) +#define KDS_RGN_ATTR_TYPE(x) FIELD_PREP(GENMASK(21, 20), x) +#define KDS_RGN_ATTR_TYPE_NORMAL KDS_RGN_ATTR_TYPE(0) + +#define KDS_RGN_ATTR_MAX_KSIZE_256 FIELD_PREP(GENMASK(17, 16), 1) +#define KDS_RGN_ATTR_NUM_KEYS(x) FIELD_PREP(GENMASK(15, 0), x) + +#define KDS_ALLOC_OP_STATUS_FIELD(x) FIELD_PREP(GENMASK(1, 0), x) +#define KDS_ALLOC_OP_IDLE KDS_ALLOC_OP_STATUS_FIELD(0) +#define KDS_ALLOC_OP_BUSY KDS_ALLOC_OP_STATUS_FIELD(1) +#define KDS_ALLOC_OP_PASS KDS_ALLOC_OP_STATUS_FIELD(2) +#define KDS_ALLOC_OP_FAIL KDS_ALLOC_OP_STATUS_FIELD(3) +#define KDS_ALLOC_RGN_ID_MASK GENMASK(14, 4) + +#define SE_KSLT_KEY_ID_MASK GENMASK(15, 0) +#define SE_KSLT_REGION_ID_MASK GENMASK(25, 16) + +#define SE_KSLT_TABLE_ID_MASK GENMASK(31, 26) +#define SE_KSLT_TABLE_ID(x) FIELD_PREP(SE_KSLT_TABLE_ID_MASK, x) +#define SE_KSLT_TABLE_ID_GLOBAL SE_KSLT_TABLE_ID(48) + +#define KDS_TIMEOUT 100000 /* 100 msec */ +#define KDS_MAX_KEYID 63 +#define KDS_ID_VALID_MASK GENMASK(KDS_MAX_KEYID, 0) +#define TEGRA_GPSE 3 + +static u32 kds_region_id; +static u64 kds_keyid = BIT(0); + +struct tegra_kds { + struct device *dev; + void __iomem *base; + u32 owner; + u32 id; +}; + +static u16 tegra_kds_keyid_alloc(void) +{ + u16 keyid; + + /* Check if all key slots are full */ + if (kds_keyid == GENMASK(KDS_MAX_KEYID, 0)) + return 0; + + keyid = ffz(kds_keyid); + kds_keyid |= BIT(keyid); + + return keyid; +} + +static void tegra_kds_keyid_free(u32 id) +{ + kds_keyid &= ~(BIT(id)); +} + +static inline void kds_writel(struct tegra_kds *kds, unsigned int offset, + unsigned int val) +{ + writel_relaxed(val, kds->base + offset); +} + +static inline u32 kds_readl(struct tegra_kds *kds, unsigned int offset) +{ + return readl_relaxed(kds->base + offset); +} + +static int kds_mutex_lock(struct tegra_kds *kds) +{ + u32 val; + int ret; + + ret = readl_relaxed_poll_timeout(kds->base + KDS_ALLOC_MUTEX, + val, !(val & KDS_MUTEX_BUSY), + 10, KDS_TIMEOUT); + + if (ret) + return ret; + + val = KDS_MUTEX_MST_ID(TEGRA_GPSE) | + KDS_MUTEX_REQ; + + kds_writel(kds, KDS_ALLOC_MUTEX, val); + + return 0; +} + +static void kds_mutex_unlock(struct tegra_kds *kds) +{ + u32 val; + + val = KDS_MUTEX_MST_ID(TEGRA_GPSE) | + KDS_MUTEX_RELEASE; + + kds_writel(kds, KDS_ALLOC_MUTEX, val); +} + +static int tegra_kds_region_setup(struct tegra_kds *kds) +{ + u32 val, region_attr; + int ret; + + region_attr = KDS_RGN_ATTR_OWNER(TEGRA_GPSE) | + KDS_RGN_ATTR_TYPE_NORMAL | + KDS_RGN_ATTR_MAX_KSIZE_256 | + KDS_RGN_ATTR_NUM_KEYS(64); + + ret = kds_mutex_lock(kds); + if (ret) + return ret; + + kds_writel(kds, KDS_ALLOC_RGN_ATTR0, region_attr); + kds_writel(kds, KDS_ALLOC_RGN_ATTR1, BIT(TEGRA_GPSE)); + kds_writel(kds, KDS_ALLOC_OP_TRIG, 1); + + ret = readl_relaxed_poll_timeout(kds->base + KDS_ALLOC_OP_STATUS, + val, !(val & KDS_ALLOC_OP_BUSY), 10, KDS_TIMEOUT); + + if (ret) { + dev_err(kds->dev, "Region allocation timed out val\n"); + goto out; + } + + if (KDS_ALLOC_OP_STATUS_FIELD(val) == KDS_ALLOC_OP_FAIL) { + dev_err(kds->dev, "Region allocation failed\n"); + ret = -EINVAL; + goto out; + } + + kds->id = FIELD_GET(KDS_ALLOC_RGN_ID_MASK, val); + kds_region_id = kds->id; + dev_info(kds->dev, "Allocated Global Key ID table with ID %#x\n", kds->id); + +out: + kds_mutex_unlock(kds); + + return ret; +} + +bool tegra_key_in_kds(u32 keyid) +{ + if (!((keyid & SE_KSLT_TABLE_ID_MASK) == SE_KSLT_TABLE_ID_GLOBAL)) + return false; + + return ((BIT(keyid & SE_KSLT_KEY_ID_MASK) & KDS_ID_VALID_MASK) && + (BIT(keyid & SE_KSLT_KEY_ID_MASK) & kds_keyid)); +} +EXPORT_SYMBOL(tegra_key_in_kds); + +u32 tegra_kds_get_id(void) +{ + u32 kds_id, keyid; + + keyid = tegra_kds_keyid_alloc(); + if (!keyid) + return -ENOMEM; + + kds_id = SE_KSLT_TABLE_ID_GLOBAL | + FIELD_PREP(SE_KSLT_REGION_ID_MASK, kds_region_id) | + FIELD_PREP(SE_KSLT_KEY_ID_MASK, keyid); + + return kds_id; +} +EXPORT_SYMBOL(tegra_kds_get_id); + +void tegra_kds_free_id(u32 keyid) +{ + tegra_kds_keyid_free(keyid & 0xff); +} +EXPORT_SYMBOL(tegra_kds_free_id); + +static int tegra_kds_probe(struct platform_device *pdev) +{ + struct tegra_kds *kds; + + kds = devm_kzalloc(&pdev->dev, sizeof(struct tegra_kds), GFP_KERNEL); + if (!kds) + return -ENOMEM; + + kds->dev = &pdev->dev; + kds->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(kds->base)) + return PTR_ERR(kds->base); + + return tegra_kds_region_setup(kds); +} + +static int tegra_kds_remove(struct platform_device *pdev) { return 0; } -device_initcall(tegra_se_kds_dummy_init); -MODULE_AUTHOR("Laxman Dewangan "); -MODULE_DESCRIPTION("Dummy Tegra SE KDS driver"); +static const struct of_device_id tegra_kds_of_match[] = { + { + .compatible = "nvidia,tegra264-kds", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_kds_of_match); + +static struct platform_driver tegra_kds_driver = { + .driver = { + .name = "tegra-kds", + .of_match_table = tegra_kds_of_match, + }, + .probe = tegra_kds_probe, + .remove = tegra_kds_remove, +}; + +module_platform_driver(tegra_kds_driver); + +MODULE_DESCRIPTION("NVIDIA Tegra Key Distribution System Driver"); +MODULE_AUTHOR("Akhil R "); MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index e3b97ec7..91ea9e24 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -59,7 +59,7 @@ static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->manifest); - cpuvaddr[i++] = se->manifest(se->owner, alg, keylen); + cpuvaddr[i++] = se->regcfg->manifest(se->owner, alg, keylen); cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_dst); @@ -91,7 +91,69 @@ static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr, host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); dev_dbg(se->dev, "key-slot %u key-manifest %#x\n", - slot, se->manifest(se->owner, alg, keylen)); + slot, se->regcfg->manifest(se->owner, alg, keylen)); + + return i; +} + +static unsigned int tegra_key_prep_mov_cmd(struct tegra_se *se, u32 *cpuvaddr, + u32 src_keyid, u32 tgt_keyid) +{ + int i = 0; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_DUMMY; + + cpuvaddr[i++] = host1x_opcode_setpayload(2); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->src_kslt); + cpuvaddr[i++] = src_keyid; + cpuvaddr[i++] = tgt_keyid; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->config); + cpuvaddr[i++] = SE_CFG_MOV; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_START | + SE_AES_OP_LASTBUF; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "keymov: src keyid %u target keyid %u\n", src_keyid, tgt_keyid); + + return i; +} + +static unsigned int tegra_key_prep_invld_cmd(struct tegra_se *se, u32 *cpuvaddr, u32 keyid) +{ + int i = 0; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_DUMMY; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->tgt_kslt); + cpuvaddr[i++] = keyid; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->config); + cpuvaddr[i++] = SE_CFG_INVLD; + + cpuvaddr[i++] = host1x_opcode_setpayload(1); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_START | + SE_AES_OP_LASTBUF; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "invalidate keyid %u\n", keyid); return i; } @@ -122,6 +184,64 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key, return tegra_se_host1x_submit(se, size); } +static int tegra_key_move_to_kds(struct tegra_se *se, u32 slot, u32 kds_id) +{ + u32 src_keyid, size; + int ret; + + src_keyid = SE_KSLT_REGION_ID_SYM | slot; + size = tegra_key_prep_mov_cmd(se, se->cmdbuf->addr, src_keyid, kds_id); + + ret = tegra_se_host1x_submit(se, size); + if (ret) + return ret; + + return 0; +} + +static unsigned int tegra_kac_get_from_kds(struct tegra_se *se, u32 keyid, u16 slot) +{ + u32 tgt_keyid, size; + int ret; + + tgt_keyid = SE_KSLT_REGION_ID_SYM | slot; + size = tegra_key_prep_mov_cmd(se, se->cmdbuf->addr, keyid, tgt_keyid); + + ret = tegra_se_host1x_submit(se, size); + if (ret) + tegra_keyslot_free(slot); + + return ret; +} + +static void tegra_key_kds_invalidate(struct tegra_se *se, u32 keyid) +{ + unsigned int size; + + size = tegra_key_prep_invld_cmd(se, se->cmdbuf->addr, keyid); + tegra_se_host1x_submit(se, size); + tegra_kds_free_id(keyid); +} + +unsigned int tegra_key_get_idx(struct tegra_se *se, u32 keyid) +{ + u16 slot; + + if (tegra_key_in_kslt(keyid)) + return keyid; + + if (!tegra_key_in_kds(keyid)) + return 0; + + slot = tegra_keyslot_alloc(); + if (!slot) + return 0; + + tegra_kac_get_from_kds(se, keyid, slot); + + return slot; +} + void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) { u8 zkey[AES_MAX_KEY_SIZE] = {0}; @@ -129,14 +249,17 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) if (!keyid) return; - /* Overwrite the key with 0s */ - tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg); - - tegra_keyslot_free(keyid); + if (tegra_key_in_kds(keyid)) { + tegra_key_kds_invalidate(se, keyid); + } else { + tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg); + tegra_keyslot_free(keyid); + } } int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid) { + u32 kds_id, orig_id = *keyid; int ret; /* Use the existing slot if it is already allocated */ @@ -152,5 +275,36 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3 if (ret) return ret; + if (!se->hw->support_kds) + return 0; + + /* + * Move the key to KDS and free the slot if HW supports. + * The key will have to be brought back to local KSLT for any task. + */ + + /* If it is a valid key, invalidate it */ + if (tegra_key_in_kds(orig_id)) + tegra_key_kds_invalidate(se, orig_id); + + kds_id = tegra_kds_get_id(); + if (!kds_id) { + /* Not a fatal error. Key can still reside in KSLT */ + dev_err(se->dev, "Failed to get KDS slot.The key is in local key slot\n"); + return 0; + } + + ret = tegra_key_move_to_kds(se, *keyid, kds_id); + if (ret) { + /* Not a fatal error. Key can still reside in KSLT */ + dev_err(se->dev, "Failed to move key to KDS. The key is in local key slot\n"); + tegra_kds_free_id(kds_id); + return 0; + } + + /* Free the local keyslot. */ + tegra_key_invalidate(se, *keyid, alg); + *keyid = kds_id; + return 0; } diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index 99415702..d4b80218 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -334,6 +334,36 @@ static int tegra_se_remove(struct platform_device *pdev) return 0; } +static const struct tegra_se_regs tegra234_aes0_regs = { + .config = SE_AES0_CFG, + .op = SE_AES0_OPERATION, + .last_blk = SE_AES0_LAST_BLOCK, + .linear_ctr = SE_AES0_LINEAR_CTR, + .aad_len = SE_AES0_AAD_LEN, + .cryp_msg_len = SE_AES0_CRYPTO_MSG_LEN, + .manifest = SE_AES0_KEYMANIFEST, + .key_addr = SE_AES0_KEY_ADDR, + .key_data = SE_AES0_KEY_DATA, + .key_dst = SE_AES0_KEY_DST, + .result = SE_AES0_CMAC_RESULT, +}; + +static const struct tegra_se_regs tegra264_aes0_regs = { + .config = SE_AES0_CFG, + .op = SE_AES0_OPERATION, + .last_blk = SE_AES0_LAST_BLOCK, + .linear_ctr = SE_AES0_LINEAR_CTR, + .aad_len = SE_AES0_AAD_LEN, + .cryp_msg_len = SE_AES0_CRYPTO_MSG_LEN, + .manifest = SE_AES0_KAC2_KEYMANIFEST, + .key_addr = SE_AES0_KEY_ADDR, + .key_data = SE_AES0_KEY_DATA, + .key_dst = SE_AES0_KEY_DST, + .src_kslt = SE_AES0_SRC_KSLT, + .tgt_kslt = SE_AES0_TGT_KSLT, + .result = SE_AES0_CMAC_RESULT, +}; + static const struct tegra_se_regs tegra234_aes1_regs = { .config = SE_AES1_CFG, .op = SE_AES1_OPERATION, @@ -348,6 +378,22 @@ static const struct tegra_se_regs tegra234_aes1_regs = { .result = SE_AES1_CMAC_RESULT, }; +static const struct tegra_se_regs tegra264_aes1_regs = { + .config = SE_AES1_CFG, + .op = SE_AES1_OPERATION, + .last_blk = SE_AES1_LAST_BLOCK, + .linear_ctr = SE_AES1_LINEAR_CTR, + .aad_len = SE_AES1_AAD_LEN, + .cryp_msg_len = SE_AES1_CRYPTO_MSG_LEN, + .manifest = SE_AES1_KAC2_KEYMANIFEST, + .key_addr = SE_AES1_KEY_ADDR, + .key_data = SE_AES1_KEY_DATA, + .key_dst = SE_AES1_KEY_DST, + .src_kslt = SE_AES1_SRC_KSLT, + .tgt_kslt = SE_AES1_TGT_KSLT, + .result = SE_AES1_CMAC_RESULT, +}; + static const struct tegra_se_regs tegra234_hash_regs = { .config = SE_SHA_CFG, .op = SE_SHA_OPERATION, @@ -358,6 +404,18 @@ static const struct tegra_se_regs tegra234_hash_regs = { .result = SE_SHA_HASH_RESULT, }; +static const struct tegra_se_regs tegra264_hash_regs = { + .config = SE_SHA_CFG, + .op = SE_SHA_OPERATION, + .manifest = SE_SHA_KAC2_KEYMANIFEST, + .key_addr = SE_SHA_KEY_ADDR, + .key_data = SE_SHA_KEY_DATA, + .key_dst = SE_SHA_KEY_DST, + .src_kslt = SE_SHA_SRC_KSLT, + .tgt_kslt = SE_SHA_TGT_KSLT, + .result = SE_SHA_HASH_RESULT, +}; + static const struct tegra_se_hw tegra234_aes_hw = { .regs = &tegra234_aes1_regs, .kac_ver = 1, @@ -366,6 +424,16 @@ static const struct tegra_se_hw tegra234_aes_hw = { .deinit_alg = tegra_deinit_aes, }; +const struct tegra_se_hw tegra264_aes_hw = { + .regs = &tegra264_aes1_regs, + .kac_ver = 2, + .support_sm_alg = true, + .support_kds = false, // FIXME: Bug 4663009 + .host1x_class = 0x3b, + .init_alg = tegra_init_aes, + .deinit_alg = tegra_deinit_aes, +}; + static const struct tegra_se_hw tegra234_hash_hw = { .regs = &tegra234_hash_regs, .kac_ver = 1, @@ -374,6 +442,27 @@ static const struct tegra_se_hw tegra234_hash_hw = { .deinit_alg = tegra_deinit_hash, }; +static const struct tegra_se_hw tegra264_hash_hw = { + .regs = &tegra264_hash_regs, + .kac_ver = 2, + .support_sm_alg = true, + .support_kds = false, // FIXME: Bug 4663009 + .host1x_class = 0x3d, + .init_alg = tegra_init_hash, + .deinit_alg = tegra_deinit_hash, +}; + +static const struct tegra_se_hw tegra264_sm4_hw = { + .regs = &tegra264_aes0_regs, + .kac_ver = 2, + .host1x_class = 0x3a, + .support_kds = false, // FIXME: Bug 4663009 + .support_aad_verify = true, + .support_sm_alg = true, + .init_alg = tegra_init_sm4, + .deinit_alg = tegra_deinit_sm4, +}; + static const struct of_device_id tegra_se_of_match[] = { { .compatible = "nvidia,tegra234-se-aes", @@ -381,6 +470,15 @@ static const struct of_device_id tegra_se_of_match[] = { }, { .compatible = "nvidia,tegra234-se-hash", .data = &tegra234_hash_hw, + }, { + .compatible = "nvidia,tegra264-se-aes", + .data = &tegra264_aes_hw + }, { + .compatible = "nvidia,tegra264-se-hash", + .data = &tegra264_hash_hw, + }, { + .compatible = "nvidia,tegra264-se-sm4", + .data = &tegra264_sm4_hw }, { }, }; diff --git a/drivers/crypto/tegra/tegra-se-sm4.c b/drivers/crypto/tegra/tegra-se-sm4.c new file mode 100644 index 00000000..ca9f2afd --- /dev/null +++ b/drivers/crypto/tegra/tegra-se-sm4.c @@ -0,0 +1,1701 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Crypto driver for NVIDIA Security Engine for block cipher operations. + * + * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tegra-se.h" + +struct tegra_sm4_ctx { +#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + struct crypto_engine_ctx enginectx; +#endif + struct tegra_se *se; + u32 alg; + u32 keylen; + u32 ivsize; + u32 key1_id; + u32 key2_id; +}; + +struct tegra_sm4_reqctx { + struct tegra_se_datbuf datbuf; + struct tegra_se *se; + bool encrypt; + u32 cfg; + u32 crypto_cfg; + u32 key1_id; + u32 key2_id; +}; + +struct tegra_sm4_gcm_ctx { +#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + struct crypto_engine_ctx enginectx; +#endif + struct tegra_se *se; + unsigned int authsize; + u32 alg; + u32 mac_alg; + u32 final_alg; + u32 verify_alg; + u32 keylen; + u32 key_id; +}; + +struct tegra_sm4_gcm_reqctx { + struct tegra_se_datbuf inbuf; + struct tegra_se_datbuf outbuf; + struct scatterlist *src_sg; + struct scatterlist *dst_sg; + unsigned int assoclen; + unsigned int cryptlen; + unsigned int authsize; + bool encrypt; + u32 config; + u32 crypto_config; + u32 key_id; + u32 iv[4]; + u8 authdata[16]; +}; + +struct tegra_sm4_cmac_ctx { +#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + struct crypto_engine_ctx enginectx; +#endif + struct tegra_se *se; + u32 alg; + u32 final_alg; + u32 key_id; + struct crypto_shash *fallback_tfm; +}; + +struct tegra_sm4_cmac_reqctx { + struct scatterlist *src_sg; + struct tegra_se_datbuf datbuf; + struct tegra_se_datbuf digest; + struct tegra_se_datbuf residue; + unsigned int total_len; + unsigned int blk_size; + unsigned int task; + u32 config; + u32 crypto_config; + u32 key_id; + u32 result[CMAC_RESULT_REG_COUNT]; + u32 *iv; +}; + +/* increment counter (128-bit int) */ +static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) +{ + do { + --bits; + nums += counter[bits]; + counter[bits] = nums & 0xff; + nums >>= 8; + } while (bits && nums); +} + +static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_sm4_ctx *ctx) +{ + struct tegra_sm4_reqctx *rctx = skcipher_request_ctx(req); + unsigned int off; + + off = req->cryptlen - ctx->ivsize; + + if (rctx->encrypt) + memcpy(req->iv, rctx->datbuf.buf + off, ctx->ivsize); + else + sg_pcopy_to_buffer(req->src, sg_nents(req->src), + req->iv, ctx->ivsize, off); +} + +static void tegra_sm4_update_iv(struct skcipher_request *req, struct tegra_sm4_ctx *ctx) +{ + int sz; + + if (ctx->alg == SE_ALG_SM4_CBC) { + tegra_cbc_iv_copyback(req, ctx); + } else if (ctx->alg == SE_ALG_SM4_CTR) { + sz = req->cryptlen / ctx->ivsize; + if (req->cryptlen % ctx->ivsize) + sz++; + + ctr_iv_inc(req->iv, ctx->ivsize, sz); + } +} + +static int tegra_sm4_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, u32 *iv, + int len, dma_addr_t addr, int cfg, int cryp_cfg) +{ + int i = 0, j; + + if (iv) { + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = iv[j]; + } + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = (len / AES_BLOCK_SIZE) - 1; + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = cfg; + cpuvaddr[i++] = cryp_cfg; + + /* Source address setting */ + cpuvaddr[i++] = lower_32_bits(addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(len); + + /* Destination address setting */ + cpuvaddr[i++] = lower_32_bits(addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | + SE_ADDR_HI_SZ(len); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | + SE_AES_OP_START; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", cfg, cryp_cfg); + + return i; +} + +static int tegra_sm4_do_one_req(struct crypto_engine *engine, void *areq) +{ + unsigned int len, src_nents, dst_nents, size; + u32 *cpuvaddr, *iv, config, crypto_config; + struct tegra_sm4_reqctx *rctx; + struct skcipher_request *req; + struct tegra_sm4_ctx *ctx; + struct tegra_se *se; + int ret; + + req = container_of(areq, struct skcipher_request, base); + rctx = skcipher_request_ctx(req); + ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + se = ctx->se; + iv = (u32 *)req->iv; + + /* Keys in ctx might be stored in KDS. Copy it to request ctx */ + rctx->key1_id = tegra_key_get_idx(ctx->se, ctx->key1_id); + if (!rctx->key1_id) { + ret = -ENOMEM; + goto out; + } + + rctx->key2_id = 0; + + /* If there are 2 keys stored (for XTS), retrieve them both */ + if (ctx->key2_id) { + rctx->key2_id = tegra_key_get_idx(ctx->se, ctx->key2_id); + if (!rctx->key2_id) { + ret = -ENOMEM; + goto key1_free; + } + } + + rctx->datbuf.size = req->cryptlen; + rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto key2_free; + } + + cpuvaddr = se->cmdbuf->addr; + len = req->cryptlen; + + /* Pad input to AES block size */ + if (len % AES_BLOCK_SIZE) + len += AES_BLOCK_SIZE - (len % AES_BLOCK_SIZE); + + src_nents = sg_nents(req->src); + sg_copy_to_buffer(req->src, src_nents, rctx->datbuf.buf, req->cryptlen); + + config = se->regcfg->cfg(ctx->alg, rctx->encrypt); + crypto_config = se->regcfg->crypto_cfg(ctx->alg, rctx->encrypt); + crypto_config |= SE_AES_KEY_INDEX(rctx->key1_id); + if (rctx->key2_id) + crypto_config |= SE_AES_KEY2_INDEX(rctx->key2_id); + + /* Prepare the command and submit */ + size = tegra_sm4_prep_cmd(se, cpuvaddr, iv, len, rctx->datbuf.addr, + config, crypto_config); + + ret = tegra_se_host1x_submit(se, size); + + /* Copy the result */ + dst_nents = sg_nents(req->dst); + tegra_sm4_update_iv(req, ctx); + sg_copy_from_buffer(req->dst, dst_nents, rctx->datbuf.buf, req->cryptlen); + + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); + +key2_free: + /* Free the keyslots if it is cloned for this request */ + if (rctx->key2_id != ctx->key2_id) + tegra_key_invalidate(ctx->se, rctx->key2_id, ctx->alg); +key1_free: + if (rctx->key1_id != ctx->key1_id) + tegra_key_invalidate(ctx->se, rctx->key1_id, ctx->alg); +out: + crypto_finalize_skcipher_request(se->engine, req, ret); + + return ret; +} + +static int tegra_sm4_cra_init(struct crypto_skcipher *tfm) +{ + struct tegra_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct tegra_se_alg *se_alg; + const char *algname; + int ret; + +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base); +#else + se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher); +#endif + + crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_sm4_reqctx)); + + ctx->se = se_alg->se_dev; + ctx->key1_id = 0; + ctx->key2_id = 0; + ctx->ivsize = crypto_skcipher_ivsize(tfm); + + algname = crypto_tfm_alg_name(&tfm->base); + ret = se_algname_to_algid(algname); + if (ret < 0) { + dev_err(ctx->se->dev, "Invalid algorithm\n"); + return ret; + } + + ctx->alg = ret; + +#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + ctx->enginectx.op.do_one_request = tegra_sm4_do_one_req; +#endif + + return 0; +} + +static void tegra_sm4_cra_exit(struct crypto_skcipher *tfm) +{ + struct tegra_sm4_ctx *ctx = crypto_tfm_ctx(&tfm->base); + + if (ctx->key1_id) + tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg); + + if (ctx->key2_id) + tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg); +} + +static int tegra_sm4_setkey(struct crypto_skcipher *tfm, + const u8 *key, u32 keylen) +{ + struct tegra_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (aes_check_keylen(keylen)) { + dev_err(ctx->se->dev, "key length validation failed\n"); + return -EINVAL; + } + + return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); +} + +static int tegra_sm4_xts_setkey(struct crypto_skcipher *tfm, + const u8 *key, u32 keylen) +{ + struct tegra_sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + u32 len; + + len = keylen / 2; + if (aes_check_keylen(len)) { + dev_err(ctx->se->dev, "key length validation failed\n"); + return -EINVAL; + } + + ret = tegra_key_submit(ctx->se, key, len, + ctx->alg, &ctx->key1_id); + if (ret) + return ret; + + ret = tegra_key_submit(ctx->se, key + len, len, + ctx->alg, &ctx->key2_id); + if (ret) + return ret; + + return 0; +} + +static int tegra_sm4_kac2_manifest(u32 user, u32 alg, u32 keylen) +{ + int manifest; + + manifest = SE_KAC2_USER(user) | SE_KAC2_ORIGIN_SW; + manifest |= SE_KAC2_DECRYPT_EN | SE_KAC2_ENCRYPT_EN; + manifest |= SE_KAC2_TYPE_SYM | SE_KAC2_SUBTYPE_SM4; + + switch (alg) { + case SE_ALG_SM4_CBC: + case SE_ALG_SM4_ECB: + case SE_ALG_SM4_CTR: + case SE_ALG_SM4_OFB: + manifest |= SE_KAC2_ENC; + break; + case SE_ALG_SM4_XTS: + manifest |= SE_KAC2_XTS; + break; + case SE_ALG_SM4_GCM: + manifest |= SE_KAC2_GCM; + break; + case SE_ALG_SM4_CMAC: + manifest |= SE_KAC2_CMAC; + break; + + default: + return -EINVAL; + } + + switch (keylen) { + case AES_KEYSIZE_128: + manifest |= SE_KAC2_SIZE_128; + break; + case AES_KEYSIZE_192: + manifest |= SE_KAC2_SIZE_192; + break; + case AES_KEYSIZE_256: + manifest |= SE_KAC2_SIZE_256; + break; + default: + return -EINVAL; + } + + return manifest; +} + +static inline int tegra264_sm4_crypto_cfg(u32 alg, bool encrypt) +{ + u32 cfg = SE_AES_CRYPTO_CFG_SCC_DIS; + + switch (alg) { + case SE_ALG_SM4_ECB: + break; + + case SE_ALG_SM4_CTR: + cfg |= SE_AES_IV_SEL_REG | + SE_AES_CRYPTO_CFG_CTR_CNTN(1); + break; + case SE_ALG_SM4_CBC: + case SE_ALG_SM4_OFB: + case SE_ALG_SM4_XTS: + cfg |= SE_AES_IV_SEL_REG; + break; + default: + return -EINVAL; + case SE_ALG_SM4_CMAC: + case SE_ALG_SM4_GMAC: + break; + case SE_ALG_SM4_GCM: + case SE_ALG_SM4_GCM_FINAL: + case SE_ALG_SM4_GCM_VERIFY: + cfg |= SE_AES_IV_SEL_REG; + break; + } + + return cfg; +} + +static int tegra264_sm4_cfg(u32 alg, bool encrypt) +{ + switch (alg) { + case SE_ALG_SM4_CBC: + if (encrypt) + return SE_CFG_SM4_CBC_ENCRYPT; + else + return SE_CFG_SM4_CBC_DECRYPT; + case SE_ALG_SM4_ECB: + if (encrypt) + return SE_CFG_SM4_ECB_ENCRYPT; + else + return SE_CFG_SM4_ECB_DECRYPT; + case SE_ALG_SM4_CTR: + if (encrypt) + return SE_CFG_SM4_CTR_ENCRYPT; + else + return SE_CFG_SM4_CTR_DECRYPT; + case SE_ALG_SM4_OFB: + if (encrypt) + return SE_CFG_SM4_OFB_ENCRYPT; + else + return SE_CFG_SM4_OFB_DECRYPT; + case SE_ALG_SM4_XTS: + if (encrypt) + return SE_CFG_SM4_XTS_ENCRYPT; + else + return SE_CFG_SM4_XTS_DECRYPT; + case SE_ALG_SM4_GMAC: + if (encrypt) + return SE_CFG_SM4_GMAC_ENCRYPT; + else + return SE_CFG_SM4_GMAC_DECRYPT; + + case SE_ALG_SM4_GCM: + if (encrypt) + return SE_CFG_SM4_GCM_ENCRYPT; + else + return SE_CFG_SM4_GCM_DECRYPT; + + case SE_ALG_SM4_GCM_FINAL: + if (encrypt) + return SE_CFG_SM4_GCM_FINAL_ENCRYPT; + else + return SE_CFG_SM4_GCM_FINAL_DECRYPT; + + case SE_ALG_SM4_GCM_VERIFY: + return SE_CFG_SM4_GCM_VERIFY; + + case SE_ALG_SM4_CMAC: + return SE_CFG_SM4_CMAC | SE_AES_DST_KEYTABLE; + + case SE_ALG_SM4_CMAC_FINAL: + return SE_CFG_SM4_CMAC; + } + + return -EINVAL; +} + +static int tegra_sm4_encrypt(struct skcipher_request *req) +{ + struct tegra_sm4_ctx *ctx; + struct tegra_sm4_reqctx *rctx; + + ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + rctx = skcipher_request_ctx(req); + rctx->encrypt = true; + + return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sm4_decrypt(struct skcipher_request *req) +{ + struct tegra_sm4_ctx *ctx; + struct tegra_sm4_reqctx *rctx; + + ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + rctx = skcipher_request_ctx(req); + rctx->encrypt = false; + + return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); +} + +static struct tegra_se_alg tegra_sm4_algs[] = { + { + .alg.skcipher = { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .base = { +#endif + .init = tegra_sm4_cra_init, + .exit = tegra_sm4_cra_exit, + .setkey = tegra_sm4_xts_setkey, + .encrypt = tegra_sm4_encrypt, + .decrypt = tegra_sm4_decrypt, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "xts(sm4)", + .cra_driver_name = "xts-sm4-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sm4_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + }, + .op.do_one_request = tegra_sm4_do_one_req, +#endif + } + }, { + .alg.skcipher = { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .base = { +#endif + .init = tegra_sm4_cra_init, + .exit = tegra_sm4_cra_exit, + .setkey = tegra_sm4_setkey, + .encrypt = tegra_sm4_encrypt, + .decrypt = tegra_sm4_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(sm4)", + .cra_driver_name = "cbc-sm4-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sm4_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + }, + .op.do_one_request = tegra_sm4_do_one_req, +#endif + } + }, { + .alg.skcipher = { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .base = { +#endif + .init = tegra_sm4_cra_init, + .exit = tegra_sm4_cra_exit, + .setkey = tegra_sm4_setkey, + .encrypt = tegra_sm4_encrypt, + .decrypt = tegra_sm4_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ecb(sm4)", + .cra_driver_name = "ecb-sm4-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sm4_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + }, + .op.do_one_request = tegra_sm4_do_one_req, +#endif + } + }, { + .alg.skcipher = { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .base = { +#endif + .init = tegra_sm4_cra_init, + .exit = tegra_sm4_cra_exit, + .setkey = tegra_sm4_setkey, + .encrypt = tegra_sm4_encrypt, + .decrypt = tegra_sm4_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ctr(sm4)", + .cra_driver_name = "ctr-sm4-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sm4_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + }, + .op.do_one_request = tegra_sm4_do_one_req, +#endif + } + }, { + .alg.skcipher = { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .base = { +#endif + .init = tegra_sm4_cra_init, + .exit = tegra_sm4_cra_exit, + .setkey = tegra_sm4_setkey, + .encrypt = tegra_sm4_encrypt, + .decrypt = tegra_sm4_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ofb(sm4)", + .cra_driver_name = "ofb-sm4-tegra", + .cra_priority = 500, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sm4_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + }, + .op.do_one_request = tegra_sm4_do_one_req, +#endif + } + }, +}; + +static int tegra_sm4_gmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, + struct tegra_sm4_gcm_reqctx *rctx) +{ + unsigned int i = 0, j; + unsigned int data_count, res_bits; + + data_count = (rctx->assoclen/AES_BLOCK_SIZE); + res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8; + + /* + * Hardware processes data_count + 1 blocks. + * Reduce 1 block if there is no residue + */ + if (!res_bits) + data_count--; + + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = rctx->iv[j]; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | + SE_LAST_BLOCK_RES_BITS(res_bits); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | + SE_ADDR_HI_SZ(rctx->assoclen); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | + SE_AES_OP_INIT | SE_AES_OP_LASTBUF | + SE_AES_OP_START; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + + return i; +} + +static int tegra_sm4_gcm_crypt_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, + struct tegra_sm4_gcm_reqctx *rctx) +{ + unsigned int i = 0, j; + unsigned int data_count, res_bits; + u32 op; + + data_count = (rctx->cryptlen/AES_BLOCK_SIZE); + res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8; + op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | + SE_AES_OP_LASTBUF | SE_AES_OP_START; + + /* + * If there is no assoc data, + * this will be the init command + */ + if (!rctx->assoclen) + op |= SE_AES_OP_INIT; + + /* + * Hardware processes data_count + 1 blocks. + * Reduce 1 block if there is no residue + */ + if (!res_bits) + data_count--; + + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = rctx->iv[j]; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | + SE_LAST_BLOCK_RES_BITS(res_bits); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + + /* Source Address */ + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | + SE_ADDR_HI_SZ(rctx->cryptlen); + + /* Destination Address */ + cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | + SE_ADDR_HI_SZ(rctx->cryptlen); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = op; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + + return i; +} + +static int tegra_sm4_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr, + struct tegra_sm4_gcm_reqctx *rctx) +{ + int i = 0, j; + u32 op; + + op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | + SE_AES_OP_LASTBUF | SE_AES_OP_START; + + /* + * Set init for zero sized vector + */ + if (!rctx->assoclen && !rctx->cryptlen) + op |= SE_AES_OP_INIT; + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2); + cpuvaddr[i++] = rctx->assoclen * 8; + cpuvaddr[i++] = 0; + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2); + cpuvaddr[i++] = rctx->cryptlen * 8; + cpuvaddr[i++] = 0; + + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = rctx->iv[j]; + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | + SE_ADDR_HI_SZ(rctx->authsize); + + /* Destination Address */ + cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | + SE_ADDR_HI_SZ(rctx->authsize); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = op; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); + + + return i; +} + +static int tegra_sm4_gcm_do_gmac(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm4_gcm_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + unsigned int nents, size; + + nents = sg_nents(rctx->src_sg); + scatterwalk_map_and_copy(rctx->inbuf.buf, + rctx->src_sg, 0, rctx->assoclen, 0); + + rctx->config = se->regcfg->cfg(ctx->mac_alg, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->mac_alg, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); + + size = tegra_sm4_gmac_prep_cmd(se, cpuvaddr, rctx); + + return tegra_se_host1x_submit(se, size); +} + +static int tegra_sm4_gcm_do_crypt(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm4_gcm_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + int size, ret; + + scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg, + rctx->assoclen, rctx->cryptlen, 0); + + rctx->config = se->regcfg->cfg(ctx->alg, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->alg, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); + + /* Prepare command and submit */ + size = tegra_sm4_gcm_crypt_prep_cmd(se, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, size); + if (ret) + return ret; + + /* Copy the result */ + scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, + rctx->assoclen, rctx->cryptlen, 1); + + return 0; +} + +static int tegra_sm4_gcm_do_final(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm4_gcm_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + u32 *cpuvaddr = se->cmdbuf->addr; + int size, ret, off; + + rctx->config = se->regcfg->cfg(ctx->final_alg, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->final_alg, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); + + /* Prepare command and submit */ + size = tegra_sm4_gcm_prep_final_cmd(se, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, size); + if (ret) + return ret; + + if (rctx->encrypt) { + /* Copy the result */ + off = rctx->assoclen + rctx->cryptlen; + scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, + off, rctx->authsize, 1); + } + + return 0; +} + +static int tegra_sm4_gcm_hw_verify(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm4_gcm_reqctx *rctx, u8 *mac) +{ + struct tegra_se *se = ctx->se; + u32 result, *cpuvaddr = se->cmdbuf->addr; + int size, ret; + + memcpy(rctx->inbuf.buf, mac, rctx->authsize); + rctx->inbuf.size = rctx->authsize; + + rctx->config = se->regcfg->cfg(ctx->verify_alg, rctx->encrypt); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->verify_alg, rctx->encrypt) | + SE_AES_KEY_INDEX(rctx->key_id); + + /* Prepare command and submit */ + size = tegra_sm4_gcm_prep_final_cmd(se, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, size); + if (ret) + return ret; + + memcpy(&result, rctx->outbuf.buf, 4); + + if (result != SE_GCM_VERIFY_OK) + return -EBADMSG; + + return 0; +} + +static int tegra_sm4_gcm_do_verify(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm4_gcm_reqctx *rctx) +{ + struct tegra_se *se = ctx->se; + int off, ret; + u8 mac[16]; + + off = rctx->assoclen + rctx->cryptlen; + scatterwalk_map_and_copy(mac, rctx->src_sg, off, rctx->authsize, 0); + + if (se->hw->support_aad_verify) + ret = tegra_sm4_gcm_hw_verify(ctx, rctx, mac); + else + ret = crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize); + + if (ret) + return -EBADMSG; + + return 0; +} + +static int tegra_sm4_gcm_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = container_of(areq, struct aead_request, base); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct tegra_sm4_gcm_ctx *ctx = crypto_aead_ctx(tfm); + struct tegra_sm4_gcm_reqctx *rctx = aead_request_ctx(req); + struct tegra_se *se = ctx->se; + int ret, keyid; + + rctx->src_sg = req->src; + rctx->dst_sg = req->dst; + rctx->assoclen = req->assoclen; + rctx->authsize = crypto_aead_authsize(tfm); + + if (rctx->encrypt) + rctx->cryptlen = req->cryptlen; + else + rctx->cryptlen = req->cryptlen - ctx->authsize; + + /* Keys in ctx might be stored in KDS. Copy it to local keyslot */ + keyid = tegra_key_get_idx(ctx->se, ctx->key_id); + if (!keyid) + goto out; + + rctx->key_id = keyid; + + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) + goto key_free; + + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) + goto inbuf_free; + + memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); + rctx->iv[3] = (1 << 24); + + /* If there is associated data perform GMAC operation */ + if (rctx->assoclen) { + ret = tegra_sm4_gcm_do_gmac(ctx, rctx); + if (ret) + goto outbuf_free; + } + + /* GCM Encryption/Decryption operation */ + if (rctx->cryptlen) { + ret = tegra_sm4_gcm_do_crypt(ctx, rctx); + if (ret) + goto outbuf_free; + } + + /* GCM_FINAL operation */ + /* Need not do FINAL operation if hw supports MAC verification */ + if (rctx->encrypt || !se->hw->support_aad_verify) { + ret = tegra_sm4_gcm_do_final(ctx, rctx); + if (ret) + goto outbuf_free; + } + + if (!rctx->encrypt) + ret = tegra_sm4_gcm_do_verify(ctx, rctx); + +outbuf_free: + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, + rctx->outbuf.buf, rctx->outbuf.addr); +inbuf_free: + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, + rctx->inbuf.buf, rctx->inbuf.addr); +key_free: + /* Free the keyslot if it is cloned for this request */ + if (rctx->key_id != ctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); + +out: + crypto_finalize_aead_request(se->engine, req, ret); + + return 0; +} + +static int tegra_sm4_gcm_cra_init(struct crypto_aead *tfm) +{ + struct tegra_sm4_gcm_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *alg = crypto_aead_alg(tfm); + struct tegra_se_alg *se_alg; + +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base); +#else + se_alg = container_of(alg, struct tegra_se_alg, alg.aead); +#endif + + crypto_aead_set_reqsize(tfm, sizeof(struct tegra_sm4_gcm_reqctx)); + + ctx->se = se_alg->se_dev; + ctx->key_id = 0; + ctx->alg = SE_ALG_SM4_GCM; + ctx->final_alg = SE_ALG_SM4_GCM_FINAL; + ctx->verify_alg = SE_ALG_SM4_GCM_VERIFY; + ctx->mac_alg = SE_ALG_SM4_GMAC; + +#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + ctx->enginectx.op.do_one_request = tegra_sm4_gcm_do_one_req; +#endif + + return 0; +} + +static int tegra_sm4_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + struct tegra_sm4_gcm_ctx *ctx = crypto_aead_ctx(tfm); + int ret; + + ret = crypto_gcm_check_authsize(authsize); + if (ret) + return ret; + + ctx->authsize = authsize; + + return 0; +} + +static void tegra_sm4_gcm_cra_exit(struct crypto_aead *tfm) +{ + struct tegra_sm4_gcm_ctx *ctx = crypto_tfm_ctx(&tfm->base); + + if (ctx->key_id) + tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); +} + +static int tegra_sm4_gcm_crypt(struct aead_request *req, bool encrypt) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct tegra_sm4_gcm_ctx *ctx = crypto_aead_ctx(tfm); + struct tegra_sm4_gcm_reqctx *rctx = aead_request_ctx(req); + + rctx->encrypt = encrypt; + + return crypto_transfer_aead_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sm4_gcm_encrypt(struct aead_request *req) +{ + return tegra_sm4_gcm_crypt(req, true); +} + +static int tegra_sm4_gcm_decrypt(struct aead_request *req) +{ + return tegra_sm4_gcm_crypt(req, false); +} + +static int tegra_sm4_gcm_setkey(struct crypto_aead *tfm, + const u8 *key, u32 keylen) +{ + struct tegra_sm4_gcm_ctx *ctx = crypto_aead_ctx(tfm); + + if (aes_check_keylen(keylen)) { + dev_err(ctx->se->dev, "key length validation failed\n"); + return -EINVAL; + } + + return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); +} + +static int tegra_sm4_cmac_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, struct tegra_sm4_cmac_reqctx *rctx) +{ + unsigned int data_count, res_bits = 0; + int i = 0, j; + u32 op; + + data_count = (rctx->datbuf.size / AES_BLOCK_SIZE); + + op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF; + + if (!(rctx->task & SHA_UPDATE)) { + op |= SE_AES_OP_FINAL; + res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8; + } + + if (!res_bits && data_count) + data_count--; + + if (rctx->task & SHA_FIRST) { + op |= SE_AES_OP_INIT; + rctx->task &= ~SHA_FIRST; + + cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); + /* Load 0 IV */ + for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) + cpuvaddr[i++] = 0; + } + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); + cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | + SE_LAST_BLOCK_RES_BITS(res_bits); + + cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); + cpuvaddr[i++] = rctx->config; + cpuvaddr[i++] = rctx->crypto_config; + + /* Source Address */ + cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr); + cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | + SE_ADDR_HI_SZ(rctx->datbuf.size); + + /* Destination Address */ + cpuvaddr[i++] = rctx->digest.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | + SE_ADDR_HI_SZ(rctx->digest.size)); + + cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); + cpuvaddr[i++] = op; + + cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); + cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | + host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); + + dev_dbg(se->dev, "cfg %#x\n", rctx->config); + + return i; +} + +static int tegra_sm4_cmac_do_update(struct ahash_request *req) +{ + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + unsigned int nblks, nresidue, size; + + nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; + nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; + + /* + * Reserve the last block as residue during final() to process. + */ + if (!nresidue && nblks) { + nresidue += rctx->blk_size; + nblks--; + } + + rctx->src_sg = req->src; + rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; + rctx->total_len += rctx->datbuf.size; + rctx->config = se->regcfg->cfg(ctx->alg, 0); + rctx->crypto_config = se->regcfg->crypto_cfg(ctx->alg, 0) | + SE_AES_KEY_INDEX(rctx->key_id); + + /* + * Keep one block and residue bytes in residue and + * return. The bytes will be processed in final() + */ + if (nblks < 1) { + scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, + rctx->src_sg, 0, req->nbytes, 0); + + rctx->residue.size += req->nbytes; + return 0; + } + + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + + /* Copy the previous residue first */ + if (rctx->residue.size) + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + + scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size, + rctx->src_sg, 0, req->nbytes - nresidue, 0); + + scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg, + req->nbytes - nresidue, nresidue, 0); + + /* Update residue value with the residue after current block */ + rctx->residue.size = nresidue; + + size = tegra_sm4_cmac_prep_cmd(se, se->cmdbuf->addr, rctx); + + return tegra_se_host1x_submit(se, size); +} + +static int tegra_sm4_cmac_do_final(struct ahash_request *req) +{ + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int ret = 0, i, size; + + rctx->datbuf.size = rctx->residue.size; + rctx->total_len += rctx->residue.size; + rctx->config = se->regcfg->cfg(ctx->final_alg, 0); + + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + } + + /* Prepare command and submit */ + size = tegra_sm4_cmac_prep_cmd(se, se->cmdbuf->addr, rctx); + ret = tegra_se_host1x_submit(se, size); + if (ret) + goto out; + + /* Read and clear Result register */ + memcpy(req->result, rctx->digest.buf, rctx->digest.size); + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + if (rctx->key_id != ctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); + +out: + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: + dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, + rctx->residue.buf, rctx->residue.addr); + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); + + return ret; +} + +static int tegra_sm4_cmac_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct ahash_request *req = ahash_request_cast(areq); + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int ret = -EINVAL; + + if (rctx->task & SHA_UPDATE) { + ret = tegra_sm4_cmac_do_update(req); + rctx->task &= ~SHA_UPDATE; + } + + if (rctx->task & SHA_FINAL) { + ret = tegra_sm4_cmac_do_final(req); + rctx->task &= ~SHA_FINAL; + } + + crypto_finalize_hash_request(se->engine, req, ret); + + return ret; +} + +static int tegra_sm4_cmac_cra_init(struct crypto_tfm *tfm) +{ + struct tegra_sm4_cmac_ctx *ctx = crypto_tfm_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct tegra_se_alg *se_alg; + const char *algname; + + algname = crypto_tfm_alg_name(tfm); + +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base); +#else + se_alg = container_of(alg, struct tegra_se_alg, alg.ahash); +#endif + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct tegra_sm4_cmac_reqctx)); + + ctx->se = se_alg->se_dev; + ctx->key_id = 0; + ctx->alg = SE_ALG_SM4_CMAC; + ctx->final_alg = SE_ALG_SM4_CMAC_FINAL; + +#ifndef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + ctx->enginectx.op.do_one_request = tegra_sm4_cmac_do_one_req; +#endif + + return 0; +} + +static void tegra_sm4_cmac_cra_exit(struct crypto_tfm *tfm) +{ + struct tegra_sm4_cmac_ctx *ctx = crypto_tfm_ctx(tfm); + + tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); +} + +static int tegra_sm4_cmac_init(struct ahash_request *req) +{ + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int i; + + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = 0; + rctx->task = SHA_FIRST; + rctx->blk_size = crypto_ahash_blocksize(tfm); + rctx->digest.size = crypto_ahash_digestsize(tfm); + + /* Retrieve the key slot for CMAC */ + if (ctx->key_id) { + rctx->key_id = tegra_key_get_idx(ctx->se, ctx->key_id); + if (!rctx->key_id) + return -ENOMEM; + } + + rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, + &rctx->digest.addr, GFP_KERNEL); + if (!rctx->digest.buf) + goto digbuf_fail; + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + goto resbuf_fail; + + rctx->residue.size = 0; + rctx->datbuf.size = 0; + + /* Clear any previous result */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + return 0; + +resbuf_fail: + dma_free_coherent(se->dev, rctx->blk_size, rctx->digest.buf, + rctx->digest.addr); +digbuf_fail: + if (rctx->key_id != ctx->key_id) + tegra_key_invalidate(ctx->se, rctx->key_id, ctx->alg); + + return -ENOMEM; +} + +static int tegra_sm4_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + + if (aes_check_keylen(keylen)) { + dev_err(ctx->se->dev, "key length validation failed\n"); + return -EINVAL; + } + + return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); +} + +static int tegra_sm4_cmac_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task |= SHA_UPDATE; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sm4_cmac_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task |= SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sm4_cmac_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task |= SHA_UPDATE | SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sm4_cmac_digest(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + + tegra_sm4_cmac_init(req); + rctx->task |= SHA_UPDATE | SHA_FINAL; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); +} + +static int tegra_sm4_cmac_export(struct ahash_request *req, void *out) +{ + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + u32 result_reg = ctx->se->hw->regs->result; + int i; + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + rctx->result[i] = readl(ctx->se->base + result_reg + (i * 4)); + + memcpy(out, rctx, sizeof(*rctx)); + + return 0; +} + +static int tegra_sm4_cmac_import(struct ahash_request *req, const void *in) +{ + struct tegra_sm4_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sm4_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + u32 result_reg = ctx->se->hw->regs->result; + int i; + + memcpy(rctx, in, sizeof(*rctx)); + + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(rctx->result[i], ctx->se->base + result_reg + (i * 4)); + + return 0; +} + +static struct tegra_se_alg tegra_sm4_gcm_algs[] = { + { + .alg.aead = { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .base = { +#endif + + .init = tegra_sm4_gcm_cra_init, + .exit = tegra_sm4_gcm_cra_exit, + .setkey = tegra_sm4_gcm_setkey, + .setauthsize = tegra_sm4_gcm_setauthsize, + .encrypt = tegra_sm4_gcm_encrypt, + .decrypt = tegra_sm4_gcm_decrypt, + .maxauthsize = AES_BLOCK_SIZE, + .ivsize = GCM_AES_IV_SIZE, + .base = { + .cra_name = "gcm(sm4)", + .cra_driver_name = "gcm-sm4-tegra", + .cra_priority = 500, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sm4_gcm_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + }, + .op.do_one_request = tegra_sm4_gcm_do_one_req, +#endif + } + } +}; + +static struct tegra_se_alg tegra_sm4_cmac_algs[] = { + { + .alg.ahash = { +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + .base = { +#endif + .init = tegra_sm4_cmac_init, + .setkey = tegra_sm4_cmac_setkey, + .update = tegra_sm4_cmac_update, + .final = tegra_sm4_cmac_final, + .finup = tegra_sm4_cmac_finup, + .digest = tegra_sm4_cmac_digest, + .export = tegra_sm4_cmac_export, + .import = tegra_sm4_cmac_import, + .halg.digestsize = AES_BLOCK_SIZE, + .halg.statesize = sizeof(struct tegra_sm4_cmac_reqctx), + + .halg.base = { + .cra_name = "cmac(sm4)", + .cra_driver_name = "cmac-sm4-tegra", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct tegra_sm4_cmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = tegra_sm4_cmac_cra_init, + .cra_exit = tegra_sm4_cmac_cra_exit, + }, +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX + }, + .op.do_one_request = tegra_sm4_cmac_do_one_req, +#endif + } + } +}; + +struct tegra_se_regcfg tegra264_sm4_regcfg = { + .cfg = tegra264_sm4_cfg, + .crypto_cfg = tegra264_sm4_crypto_cfg, + .manifest = tegra_sm4_kac2_manifest +}; + +#ifdef NV_CONFTEST_REMOVE_STRUCT_CRYPTO_ENGINE_CTX +int tegra_init_sm4(struct tegra_se *se) +{ + struct aead_engine_alg *aead_alg; + struct ahash_engine_alg *ahash_alg; + struct skcipher_engine_alg *sk_alg; + int i, ret; + + se->regcfg = &tegra264_sm4_regcfg; + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_algs); i++) { + tegra_sm4_algs[i].se_dev = se; + sk_alg = &tegra_sm4_algs[i].alg.skcipher; + ret = CRYPTO_REGISTER(skcipher, sk_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + sk_alg->base.base.cra_name); + goto sm4_err; + } + } + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_gcm_algs); i++) { + tegra_sm4_gcm_algs[i].se_dev = se; + aead_alg = &tegra_sm4_gcm_algs[i].alg.aead; + ret = CRYPTO_REGISTER(aead, aead_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + aead_alg->base.base.cra_name); + goto aead_err; + } + } + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_cmac_algs); i++) { + tegra_sm4_cmac_algs[i].se_dev = se; + ahash_alg = &tegra_sm4_cmac_algs[i].alg.ahash; + ret = CRYPTO_REGISTER(ahash, ahash_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + ahash_alg->base.halg.base.cra_name); + goto cmac_err; + } + } + + return 0; + + +cmac_err: + for (--i; i >= 0; i--) + CRYPTO_UNREGISTER(ahash, &tegra_sm4_cmac_algs[i].alg.ahash); + + i = ARRAY_SIZE(tegra_sm4_gcm_algs); +aead_err: + for (--i; i >= 0; i--) + CRYPTO_UNREGISTER(aead, &tegra_sm4_gcm_algs[i].alg.aead); + + i = ARRAY_SIZE(tegra_sm4_algs); +sm4_err: + for (--i; i >= 0; i--) + CRYPTO_UNREGISTER(skcipher, &tegra_sm4_algs[i].alg.skcipher); + + return ret; +} +#else +int tegra_init_sm4(struct tegra_se *se) +{ + struct aead_alg *aead_alg; + struct ahash_alg *ahash_alg; + struct skcipher_alg *sk_alg; + int i, ret; + + se->regcfg = &tegra264_sm4_regcfg; + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_algs); i++) { + tegra_sm4_algs[i].se_dev = se; + sk_alg = &tegra_sm4_algs[i].alg.skcipher; + ret = CRYPTO_REGISTER(skcipher, sk_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + sk_alg->base.cra_name); + goto sm4_err; + } + } + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_gcm_algs); i++) { + tegra_sm4_gcm_algs[i].se_dev = se; + aead_alg = &tegra_sm4_gcm_algs[i].alg.aead; + ret = CRYPTO_REGISTER(aead, aead_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + aead_alg->base.cra_name); + goto aead_err; + } + } + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_cmac_algs); i++) { + tegra_sm4_cmac_algs[i].se_dev = se; + ahash_alg = &tegra_sm4_cmac_algs[i].alg.ahash; + ret = CRYPTO_REGISTER(ahash, ahash_alg); + if (ret) { + dev_err(se->dev, "failed to register %s\n", + ahash_alg->halg.base.cra_name); + goto cmac_err; + } + } + + return 0; + + +cmac_err: + for (--i; i >= 0; i--) + CRYPTO_UNREGISTER(ahash, &tegra_sm4_cmac_algs[i].alg.ahash); + + i = ARRAY_SIZE(tegra_sm4_gcm_algs); +aead_err: + for (--i; i >= 0; i--) + CRYPTO_UNREGISTER(aead, &tegra_sm4_gcm_algs[i].alg.aead); + + i = ARRAY_SIZE(tegra_sm4_algs); +sm4_err: + for (--i; i >= 0; i--) + CRYPTO_UNREGISTER(skcipher, &tegra_sm4_algs[i].alg.skcipher); + + return ret; +} +#endif + +void tegra_deinit_sm4(struct tegra_se *se) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_gcm_algs); i++) + CRYPTO_UNREGISTER(aead, &tegra_sm4_gcm_algs[i].alg.aead); + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_cmac_algs); i++) + CRYPTO_UNREGISTER(ahash, &tegra_sm4_cmac_algs[i].alg.ahash); + + for (i = 0; i < ARRAY_SIZE(tegra_sm4_algs); i++) + CRYPTO_UNREGISTER(skcipher, &tegra_sm4_algs[i].alg.skcipher); +} diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index 78521384..56b29495 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -29,6 +29,7 @@ #define SE_SHA_KEY_ADDR 0x4094 #define SE_SHA_KEY_DATA 0x4098 #define SE_SHA_KEYMANIFEST 0x409c +#define SE_SHA_KAC2_KEYMANIFEST 0x4178 #define SE_SHA_CRYPTO_CFG 0x40a4 #define SE_SHA_KEY_DST 0x40a8 #define SE_SHA_SRC_KSLT 0x4180 @@ -102,6 +103,7 @@ #define SE_AES0_CMAC_RESULT 0x10c4 #define SE_AES0_SRC_KSLT 0x1100 #define SE_AES0_TGT_KSLT 0x1104 +#define SE_AES0_KAC2_KEYMANIFEST 0x1108 #define SE_AES0_KEYMANIFEST 0x1114 #define SE_AES0_AAD_LEN 0x112c #define SE_AES0_CRYPTO_MSG_LEN 0x1134 @@ -117,35 +119,66 @@ #define SE_AES1_CMAC_RESULT 0x20c4 #define SE_AES1_SRC_KSLT 0x2100 #define SE_AES1_TGT_KSLT 0x2104 +#define SE_AES1_KAC2_KEYMANIFEST 0x2108 #define SE_AES1_KEYMANIFEST 0x2114 #define SE_AES1_AAD_LEN 0x212c #define SE_AES1_CRYPTO_MSG_LEN 0x2134 #define SE_AES_CFG_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x) +#define SE_AES_ENC_MODE_ECB SE_AES_CFG_ENC_MODE(0) +#define SE_AES_ENC_MODE_CBC SE_AES_CFG_ENC_MODE(1) +#define SE_AES_ENC_MODE_OFB SE_AES_CFG_ENC_MODE(2) #define SE_AES_ENC_MODE_GMAC SE_AES_CFG_ENC_MODE(3) #define SE_AES_ENC_MODE_GCM SE_AES_CFG_ENC_MODE(4) #define SE_AES_ENC_MODE_GCM_FINAL SE_AES_CFG_ENC_MODE(5) +#define SE_AES_ENC_MODE_KW SE_AES_CFG_ENC_MODE(6) #define SE_AES_ENC_MODE_CMAC SE_AES_CFG_ENC_MODE(7) +#define SE_AES_ENC_MODE_CTR SE_AES_CFG_ENC_MODE(10) +#define SE_AES_ENC_MODE_XTS SE_AES_CFG_ENC_MODE(11) #define SE_AES_ENC_MODE_CBC_MAC SE_AES_CFG_ENC_MODE(12) +#define SE_AES_ENC_MODE_AESKW_CRYPT SE_AES_CFG_ENC_MODE(13) +#define SE_AES_ENC_MODE_CTR_DRBG_INSTANTIATE_DF SE_AES_CFG_ENC_MODE(14) +#define SE_AES_ENC_MODE_CTR_DRBG_GENKEY SE_AES_CFG_ENC_MODE(15) +#define SE_AES_ENC_MODE_CTR_DRBG_GENRND SE_AES_CFG_ENC_MODE(16) +#define SE_AES_ENC_MODE_KDF_CMAC_AES SE_AES_CFG_ENC_MODE(20) +#define SE_AES_ENC_MODE_GCM2 SE_AES_CFG_ENC_MODE(21) #define SE_AES_CFG_DEC_MODE(x) FIELD_PREP(GENMASK(23, 16), x) +#define SE_AES_DEC_MODE_ECB SE_AES_CFG_DEC_MODE(0) +#define SE_AES_DEC_MODE_CBC SE_AES_CFG_DEC_MODE(1) +#define SE_AES_DEC_MODE_OFB SE_AES_CFG_DEC_MODE(2) #define SE_AES_DEC_MODE_GMAC SE_AES_CFG_DEC_MODE(3) #define SE_AES_DEC_MODE_GCM SE_AES_CFG_DEC_MODE(4) #define SE_AES_DEC_MODE_GCM_FINAL SE_AES_CFG_DEC_MODE(5) +#define SE_AES_DEC_MODE_KW SE_AES_CFG_DEC_MODE(6) +#define SE_AES_DEC_MODE_CMAC SE_AES_CFG_DEC_MODE(7) +#define SE_AES_DEC_MODE_CMAC_VERIFY SE_AES_CFG_DEC_MODE(8) +#define SE_AES_DEC_MODE_GCM_VERIFY SE_AES_CFG_DEC_MODE(9) +#define SE_AES_DEC_MODE_CTR SE_AES_CFG_DEC_MODE(10) +#define SE_AES_DEC_MODE_XTS SE_AES_CFG_DEC_MODE(11) #define SE_AES_DEC_MODE_CBC_MAC SE_AES_CFG_DEC_MODE(12) +#define SE_AES_DEC_MODE_AESKW_CRYPT SE_AES_CFG_DEC_MODE(13) +#define SE_AES_DEC_MODE_GCM2 SE_AES_CFG_DEC_MODE(21) #define SE_AES_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x) #define SE_AES_ENC_ALG_NOP SE_AES_CFG_ENC_ALG(0) #define SE_AES_ENC_ALG_AES_ENC SE_AES_CFG_ENC_ALG(1) #define SE_AES_ENC_ALG_RNG SE_AES_CFG_ENC_ALG(2) #define SE_AES_ENC_ALG_SHA SE_AES_CFG_ENC_ALG(3) +#define SE_AES_ENC_ALG_SM4_ENC SE_AES_CFG_ENC_ALG(5) #define SE_AES_ENC_ALG_HMAC SE_AES_CFG_ENC_ALG(7) #define SE_AES_ENC_ALG_KDF SE_AES_CFG_ENC_ALG(8) +#define SE_AES_ENC_ALG_KEY_INVLD SE_AES_CFG_ENC_ALG(10) +#define SE_AES_ENC_ALG_KEY_MOV SE_AES_CFG_ENC_ALG(11) +#define SE_AES_ENC_ALG_KEY_INQUIRE SE_AES_CFG_ENC_ALG(12) #define SE_AES_ENC_ALG_INS SE_AES_CFG_ENC_ALG(13) +#define SE_AES_ENC_ALG_CLONE SE_AES_CFG_ENC_ALG(14) +#define SE_AES_ENC_ALG_LOCK SE_AES_CFG_ENC_ALG(15) #define SE_AES_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x) #define SE_AES_DEC_ALG_NOP SE_AES_CFG_DEC_ALG(0) #define SE_AES_DEC_ALG_AES_DEC SE_AES_CFG_DEC_ALG(1) +#define SE_AES_DEC_ALG_SM4_DEC SE_AES_CFG_DEC_ALG(5) #define SE_AES_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x) #define SE_AES_DST_MEMORY SE_AES_CFG_DST(0) @@ -232,6 +265,56 @@ #define SE_KAC_USER_NS FIELD_PREP(GENMASK(6, 4), 3) +#define SE_KAC2_USER(x) FIELD_PREP(GENMASK(29, 22), x) +#define SE_KAC2_USER_GPSE SE_KAC2_USER(3) +#define SE_KAC2_USER_GCSE SE_KAC2_USER(5) + +#define SE_KAC2_CLONEABLE FIELD_PREP(BIT(21), 1) +#define SE_KAC2_EXPORTABLE FIELD_PREP(BIT(20), 1) +#define SE_KAC2_DECRYPT_EN FIELD_PREP(BIT(19), 1) +#define SE_KAC2_ENCRYPT_EN FIELD_PREP(BIT(18), 1) + +#define SE_KAC2_PURPOSE(x) FIELD_PREP(GENMASK(17, 12), x) +#define SE_KAC2_ENC SE_KAC2_PURPOSE(0) +#define SE_KAC2_CMAC SE_KAC2_PURPOSE(1) +#define SE_KAC2_HMAC SE_KAC2_PURPOSE(2) +#define SE_KAC2_GCM_KW SE_KAC2_PURPOSE(3) +#define SE_KAC2_HMAC_KDK SE_KAC2_PURPOSE(6) +#define SE_KAC2_HMAC_KDD SE_KAC2_PURPOSE(7) +#define SE_KAC2_HMAC_KDD_KUW SE_KAC2_PURPOSE(8) +#define SE_KAC2_XTS SE_KAC2_PURPOSE(9) +#define SE_KAC2_GCM SE_KAC2_PURPOSE(10) +#define SE_KAC2_CMAC_KDK SE_KAC2_PURPOSE(12) +#define SE_KAC2_AES_KW SE_KAC2_PURPOSE(13) +#define SE_KAC2_CTR_DRBG_ENT SE_KAC2_PURPOSE(14) +#define SE_KAC2_CTR_DRBG_KV SE_KAC2_PURPOSE(15) +#define SE_KAC2_GCM_HWIV SE_KAC2_PURPOSE(16) +#define SE_KAC2_HARDEN_HMAC_KDK SE_KAC2_PURPOSE(17) +#define SE_KAC2_HARDEN_HMAC_KDD SE_KAC2_PURPOSE(18) + +#define SE_KAC2_SIZE(x) FIELD_PREP(GENMASK(11, 8), x) +#define SE_KAC2_SIZE_128 SE_KAC2_SIZE(0) +#define SE_KAC2_SIZE_192 SE_KAC2_SIZE(1) +#define SE_KAC2_SIZE_256 SE_KAC2_SIZE(2) + +#define SE_KAC2_ORIGIN_SW FIELD_PREP(BIT(7), 1) + +#define SE_KAC2_SUBTYPE(x) FIELD_PREP(GENMASK(6, 3), x) +#define SE_KAC2_SUBTYPE_AES SE_KAC2_SUBTYPE(0) +#define SE_KAC2_SUBTYPE_SHA SE_KAC2_SUBTYPE(0) +#define SE_KAC2_SUBTYPE_SM4 SE_KAC2_SUBTYPE(1) +#define SE_KAC2_SUBTYPE_SM3 SE_KAC2_SUBTYPE(1) + +#define SE_KAC2_TYPE(x) FIELD_PREP(GENMASK(2, 0), x) +#define SE_KAC2_TYPE_SYM SE_KAC2_TYPE(2) + +#define SE_KSLT_TABLE_ID_MASK GENMASK(31, 26) +#define SE_KSLT_TABLE_ID(x) FIELD_PREP(SE_KSLT_TABLE_ID_MASK, x) +#define SE_KSLT_TABLE_ID_GLOBAL SE_KSLT_TABLE_ID(48) + +#define SE_KSLT_REGION_ID(x) FIELD_PREP(GENMASK(25, 16), x) +#define SE_KSLT_REGION_ID_SYM SE_KSLT_REGION_ID(2) + #define SE_AES_KEY_DST_INDEX(x) FIELD_PREP(GENMASK(11, 8), x) #define SE_ADDR_HI_MSB(x) FIELD_PREP(GENMASK(31, 24), x) #define SE_ADDR_HI_SZ(x) FIELD_PREP(GENMASK(23, 0), x) @@ -245,12 +328,10 @@ SE_AES_DST_MEMORY) #define SE_CFG_GMAC_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \ - SE_AES_DEC_ALG_NOP | \ SE_AES_ENC_MODE_GMAC | \ SE_AES_DST_MEMORY) -#define SE_CFG_GMAC_DECRYPT (SE_AES_ENC_ALG_NOP | \ - SE_AES_DEC_ALG_AES_DEC | \ +#define SE_CFG_GMAC_DECRYPT (SE_AES_DEC_ALG_AES_DEC | \ SE_AES_DEC_MODE_GMAC | \ SE_AES_DST_MEMORY) @@ -259,31 +340,136 @@ SE_AES_ENC_MODE_GCM | \ SE_AES_DST_MEMORY) -#define SE_CFG_GCM_DECRYPT (SE_AES_ENC_ALG_NOP | \ - SE_AES_DEC_ALG_AES_DEC | \ +#define SE_CFG_GCM_DECRYPT (SE_AES_DEC_ALG_AES_DEC | \ SE_AES_DEC_MODE_GCM | \ SE_AES_DST_MEMORY) #define SE_CFG_GCM_FINAL_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \ - SE_AES_DEC_ALG_NOP | \ SE_AES_ENC_MODE_GCM_FINAL | \ SE_AES_DST_MEMORY) -#define SE_CFG_GCM_FINAL_DECRYPT (SE_AES_ENC_ALG_NOP | \ - SE_AES_DEC_ALG_AES_DEC | \ +#define SE_CFG_GCM_FINAL_DECRYPT (SE_AES_DEC_ALG_AES_DEC | \ SE_AES_DEC_MODE_GCM_FINAL | \ SE_AES_DST_MEMORY) +#define SE_CFG_GCM_VERIFY (SE_AES_DEC_ALG_AES_DEC | \ + SE_AES_DEC_MODE_GCM_VERIFY | \ + SE_AES_DST_MEMORY) + #define SE_CFG_CMAC (SE_AES_ENC_ALG_AES_ENC | \ - SE_AES_ENC_MODE_CMAC | \ - SE_AES_DST_HASH_REG) + SE_AES_ENC_MODE_CMAC) #define SE_CFG_CBC_MAC (SE_AES_ENC_ALG_AES_ENC | \ SE_AES_ENC_MODE_CBC_MAC) +#define SE_CFG_SM4_ENCRYPT (SE_AES_ENC_ALG_SM4_ENC | \ + SE_AES_DEC_ALG_NOP | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_DECRYPT (SE_AES_ENC_ALG_NOP | \ + SE_AES_DEC_ALG_SM4_DEC | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_GMAC_ENCRYPT (SE_AES_ENC_ALG_SM4_ENC | \ + SE_AES_ENC_MODE_GMAC | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_GMAC_DECRYPT (SE_AES_DEC_ALG_SM4_DEC | \ + SE_AES_DEC_MODE_GMAC | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_GCM_ENCRYPT (SE_AES_ENC_ALG_SM4_ENC | \ + SE_AES_ENC_MODE_GCM | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_GCM_DECRYPT (SE_AES_DEC_ALG_SM4_DEC | \ + SE_AES_DEC_MODE_GCM | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_GCM_FINAL_ENCRYPT (SE_AES_ENC_ALG_SM4_ENC | \ + SE_AES_ENC_MODE_GCM_FINAL | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_GCM_FINAL_DECRYPT (SE_AES_DEC_ALG_SM4_DEC | \ + SE_AES_DEC_MODE_GCM_FINAL | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_GCM_VERIFY (SE_AES_DEC_ALG_SM4_DEC | \ + SE_AES_DEC_MODE_GCM_VERIFY | \ + SE_AES_DST_MEMORY) + +#define SE_CFG_SM4_CMAC (SE_AES_ENC_ALG_SM4_ENC | \ + SE_AES_ENC_MODE_CMAC) + #define SE_CFG_INS (SE_AES_ENC_ALG_INS | \ SE_AES_DEC_ALG_NOP) +#define SE_CFG_MOV (SE_AES_ENC_ALG_KEY_MOV | \ + SE_AES_DEC_ALG_NOP) + +#define SE_CFG_INVLD (SE_AES_ENC_ALG_KEY_INVLD | \ + SE_AES_DEC_ALG_NOP) + +#define SE_CFG_ECB_ENCRYPT (SE_AES_ENC_MODE_ECB | \ + SE_CFG_AES_ENCRYPT) + +#define SE_CFG_ECB_DECRYPT (SE_AES_DEC_MODE_ECB | \ + SE_CFG_AES_DECRYPT) + +#define SE_CFG_CBC_ENCRYPT (SE_AES_ENC_MODE_CBC | \ + SE_CFG_AES_ENCRYPT) + +#define SE_CFG_CBC_DECRYPT (SE_AES_DEC_MODE_CBC | \ + SE_CFG_AES_DECRYPT) + +#define SE_CFG_OFB_ENCRYPT (SE_AES_ENC_MODE_OFB | \ + SE_CFG_AES_ENCRYPT) + +#define SE_CFG_OFB_DECRYPT (SE_AES_DEC_MODE_OFB | \ + SE_CFG_AES_DECRYPT) + +#define SE_CFG_CTR_ENCRYPT (SE_AES_ENC_MODE_CTR | \ + SE_CFG_AES_ENCRYPT) + +#define SE_CFG_CTR_DECRYPT (SE_AES_DEC_MODE_CTR | \ + SE_CFG_AES_DECRYPT) + +#define SE_CFG_XTS_ENCRYPT (SE_AES_ENC_MODE_XTS | \ + SE_CFG_AES_ENCRYPT) + +#define SE_CFG_XTS_DECRYPT (SE_AES_DEC_MODE_XTS | \ + SE_CFG_AES_DECRYPT) + +#define SE_CFG_SM4_ECB_ENCRYPT (SE_AES_ENC_MODE_ECB | \ + SE_CFG_SM4_ENCRYPT) + +#define SE_CFG_SM4_ECB_DECRYPT (SE_AES_DEC_MODE_ECB | \ + SE_CFG_SM4_DECRYPT) + +#define SE_CFG_SM4_CBC_ENCRYPT (SE_AES_ENC_MODE_CBC | \ + SE_CFG_SM4_ENCRYPT) + +#define SE_CFG_SM4_CBC_DECRYPT (SE_AES_DEC_MODE_CBC | \ + SE_CFG_SM4_DECRYPT) + +#define SE_CFG_SM4_OFB_ENCRYPT (SE_AES_ENC_MODE_OFB | \ + SE_CFG_SM4_ENCRYPT) + +#define SE_CFG_SM4_OFB_DECRYPT (SE_AES_DEC_MODE_OFB | \ + SE_CFG_SM4_DECRYPT) + +#define SE_CFG_SM4_CTR_ENCRYPT (SE_AES_ENC_MODE_CTR | \ + SE_CFG_SM4_ENCRYPT) + +#define SE_CFG_SM4_CTR_DECRYPT (SE_AES_DEC_MODE_CTR | \ + SE_CFG_SM4_DECRYPT) + +#define SE_CFG_SM4_XTS_ENCRYPT (SE_AES_ENC_MODE_XTS | \ + SE_CFG_SM4_ENCRYPT) + +#define SE_CFG_SM4_XTS_DECRYPT (SE_AES_DEC_MODE_XTS | \ + SE_CFG_SM4_ENCRYPT) + #define SE_CRYPTO_CFG_ECB_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \ SE_AES_XOR_POS_BYPASS | \ SE_AES_CORE_SEL_ENCRYPT) @@ -343,6 +529,8 @@ #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M +#define SE_GCM_VERIFY_OK 0x5a5a5a5a + #define SHA_FIRST BIT(0) #define SHA_UPDATE BIT(1) #define SHA_FINAL BIT(2) @@ -372,8 +560,23 @@ enum se_aes_alg { SE_ALG_GMAC, /* GMAC mode */ SE_ALG_GCM, /* GCM mode */ SE_ALG_GCM_FINAL, /* GCM FINAL mode */ - SE_ALG_CMAC, /* Cipher-based MAC (CMAC) mode */ - SE_ALG_CBC_MAC, /* CBC MAC mode */ + SE_ALG_GCM_VERIFY, /* GCM Verify */ + SE_ALG_CMAC, /* Cipher-based MAC (CMAC) mode */ + SE_ALG_CMAC_FINAL, /* Cipher-based MAC (CMAC) mode final task */ + SE_ALG_CBC_MAC, /* CBC MAC mode */ + + /* ShāngMì 4 Algorithms */ + SE_ALG_SM4_CBC, + SE_ALG_SM4_ECB, + SE_ALG_SM4_CTR, + SE_ALG_SM4_OFB, + SE_ALG_SM4_XTS, + SE_ALG_SM4_GMAC, + SE_ALG_SM4_GCM, + SE_ALG_SM4_GCM_FINAL, + SE_ALG_SM4_GCM_VERIFY, + SE_ALG_SM4_CMAC, + SE_ALG_SM4_CMAC_FINAL, }; enum se_hash_alg { @@ -387,6 +590,7 @@ enum se_hash_alg { SE_ALG_SHA3_256, /* Secure Hash Algorithm3-256 (SHA3-256) mode */ SE_ALG_SHA3_384, /* Secure Hash Algorithm3-384 (SHA3-384) mode */ SE_ALG_SHA3_512, /* Secure Hash Algorithm3-512 (SHA3-512) mode */ + SE_ALG_SM3_256, /* ShangMi 3 - 256 */ SE_ALG_SHAKE128, /* Secure Hash Algorithm3 (SHAKE128) mode */ SE_ALG_SHAKE256, /* Secure Hash Algorithm3 (SHAKE256) mode */ SE_ALG_HMAC_SHA224, /* Hash based MAC (HMAC) - 224 */ @@ -424,23 +628,33 @@ struct tegra_se_regs { u32 key_addr; u32 key_data; u32 key_dst; + u32 src_kslt; + u32 tgt_kslt; u32 result; }; +struct tegra_se_regcfg { + int (*cfg)(u32 alg, bool encrypt); + int (*crypto_cfg)(u32 alg, bool encrypt); + int (*manifest)(u32 user, u32 alg, u32 keylen); +}; + struct tegra_se_hw { const struct tegra_se_regs *regs; int (*init_alg)(struct tegra_se *se); void (*deinit_alg)(struct tegra_se *se); + bool support_kds; bool support_sm_alg; + bool support_aad_verify; u32 host1x_class; u32 kac_ver; }; struct tegra_se { - int (*manifest)(u32 user, u32 alg, u32 keylen); const struct tegra_se_hw *hw; struct host1x_client client; struct host1x_channel *channel; + struct tegra_se_regcfg *regcfg; struct tegra_se_cmdbuf *cmdbuf; struct crypto_engine *engine; struct host1x_syncpt *syncpt; @@ -481,11 +695,38 @@ static inline int se_algname_to_algid(const char *name) return SE_ALG_XTS; else if (!strcmp(name, "cmac(aes)")) return SE_ALG_CMAC; + else if (!strcmp(name, "cmac(aes)-final")) + return SE_ALG_CMAC_FINAL; else if (!strcmp(name, "gcm(aes)")) return SE_ALG_GCM; + else if (!strcmp(name, "gcm(aes)-mac")) + return SE_ALG_GMAC; + else if (!strcmp(name, "gcm(aes)-final")) + return SE_ALG_GCM_FINAL; else if (!strcmp(name, "ccm(aes)")) return SE_ALG_CBC_MAC; + else if (!strcmp(name, "cbc(sm4)")) + return SE_ALG_SM4_CBC; + else if (!strcmp(name, "ecb(sm4)")) + return SE_ALG_SM4_ECB; + else if (!strcmp(name, "ofb(sm4)")) + return SE_ALG_SM4_OFB; + else if (!strcmp(name, "ctr(sm4)")) + return SE_ALG_SM4_CTR; + else if (!strcmp(name, "xts(sm4)")) + return SE_ALG_SM4_XTS; + else if (!strcmp(name, "cmac(sm4)")) + return SE_ALG_SM4_CMAC; + else if (!strcmp(name, "cmac(sm4)-final")) + return SE_ALG_SM4_CMAC_FINAL; + else if (!strcmp(name, "gcm(sm4)")) + return SE_ALG_SM4_GCM; + else if (!strcmp(name, "gcm(sm4)-mac")) + return SE_ALG_SM4_GMAC; + else if (!strcmp(name, "gcm(sm4)-final")) + return SE_ALG_SM4_GCM_FINAL; + else if (!strcmp(name, "sha1")) return SE_ALG_SHA1; else if (!strcmp(name, "sha224")) @@ -504,6 +745,8 @@ static inline int se_algname_to_algid(const char *name) return SE_ALG_SHA3_384; else if (!strcmp(name, "sha3-512")) return SE_ALG_SHA3_512; + else if (!strcmp(name, "sm3")) + return SE_ALG_SM3_256; else if (!strcmp(name, "hmac(sha224)")) return SE_ALG_HMAC_SHA224; else if (!strcmp(name, "hmac(sha256)")) @@ -519,13 +762,20 @@ static inline int se_algname_to_algid(const char *name) /* Functions */ int tegra_init_aes(struct tegra_se *se); int tegra_init_hash(struct tegra_se *se); +int tegra_init_sm4(struct tegra_se *se); void tegra_deinit_aes(struct tegra_se *se); void tegra_deinit_hash(struct tegra_se *se); +void tegra_deinit_sm4(struct tegra_se *se); int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); +unsigned int tegra_key_get_idx(struct tegra_se *se, u32 keyid); int tegra_se_host1x_submit(struct tegra_se *se, u32 size); +u32 tegra_kds_get_id(void); +void tegra_kds_free_id(u32 keyid); +bool tegra_key_in_kds(u32 keyid); + /* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) {