mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
crypto: tegra: Reserve keyslots to allocate dynamically
The HW supports only storing 15 keys at a time. This limits the number of tfms that can work without failutes. Reserve keyslots to solve this and use the reserved ones during the encryption/decryption operation. This allow users to have the capability of hardware protected keys and faster operations if there are limited number of tfms while not halting the operation if there are more tfms. Bug 4883011 Change-Id: I220f1e8205dde1f078be6ed4cb09b699b6d5dfa2 Signed-off-by: Akhil R <akhilrajeev@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3263283 GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Reviewed-by: Brad Griffis <bgriffis@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
2f3c077115
commit
15d7ca57b1
@@ -33,6 +33,9 @@ struct tegra_aes_ctx {
|
||||
u32 ivsize;
|
||||
u32 key1_id;
|
||||
u32 key2_id;
|
||||
u32 keylen;
|
||||
u8 key1[AES_MAX_KEY_SIZE];
|
||||
u8 key2[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aes_reqctx {
|
||||
@@ -42,6 +45,8 @@ struct tegra_aes_reqctx {
|
||||
u32 crypto_config;
|
||||
u32 len;
|
||||
u32 *iv;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aead_ctx {
|
||||
@@ -53,6 +58,7 @@ struct tegra_aead_ctx {
|
||||
u32 alg;
|
||||
u32 keylen;
|
||||
u32 key_id;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aead_reqctx {
|
||||
@@ -64,8 +70,8 @@ struct tegra_aead_reqctx {
|
||||
unsigned int cryptlen;
|
||||
unsigned int authsize;
|
||||
bool encrypt;
|
||||
u32 config;
|
||||
u32 crypto_config;
|
||||
u32 config;
|
||||
u32 key_id;
|
||||
u32 iv[4];
|
||||
u8 authdata[16];
|
||||
@@ -78,6 +84,8 @@ struct tegra_cmac_ctx {
|
||||
struct tegra_se *se;
|
||||
unsigned int alg;
|
||||
u32 key_id;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
struct crypto_shash *fallback_tfm;
|
||||
};
|
||||
|
||||
@@ -92,6 +100,8 @@ struct tegra_cmac_reqctx {
|
||||
u32 config;
|
||||
u32 key_id;
|
||||
u32 *iv;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
u32 result[CMAC_RESULT_REG_COUNT];
|
||||
};
|
||||
|
||||
@@ -269,7 +279,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct tegra_se *se = ctx->se;
|
||||
unsigned int cmdlen;
|
||||
unsigned int cmdlen, key1_id, key2_id;
|
||||
int ret;
|
||||
|
||||
/* Set buffer size as a multiple of AES_BLOCK_SIZE*/
|
||||
@@ -281,6 +291,8 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
|
||||
rctx->iv = (u32 *)req->iv;
|
||||
rctx->len = req->cryptlen;
|
||||
key1_id = ctx->key1_id;
|
||||
key2_id = ctx->key2_id;
|
||||
|
||||
/* Pad input to AES Block size */
|
||||
if (ctx->alg != SE_ALG_XTS) {
|
||||
@@ -290,6 +302,29 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
|
||||
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
|
||||
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
|
||||
|
||||
if (!key1_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
|
||||
ctx->keylen, ctx->alg, &key1_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
|
||||
|
||||
if (ctx->alg == SE_ALG_XTS) {
|
||||
if (!key2_id) {
|
||||
ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
|
||||
ctx->keylen, ctx->alg, &key2_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
|
||||
}
|
||||
|
||||
/* Prepare the command and submit for execution */
|
||||
cmdlen = tegra_aes_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
@@ -298,10 +333,17 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
tegra_aes_update_iv(req, ctx);
|
||||
scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
|
||||
|
||||
out:
|
||||
/* Free the buffer */
|
||||
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
|
||||
if (tegra_key_is_reserved(key1_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
|
||||
|
||||
if (tegra_key_is_reserved(key2_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
|
||||
|
||||
crypto_finalize_skcipher_request(se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -327,6 +369,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key1_id = 0;
|
||||
ctx->key2_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
algname = crypto_tfm_alg_name(&tfm->base);
|
||||
ret = se_algname_to_algid(algname);
|
||||
@@ -361,13 +404,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm,
|
||||
const u8 *key, u32 keylen)
|
||||
{
|
||||
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key1, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_xts_setkey(struct crypto_skcipher *tfm,
|
||||
@@ -385,11 +435,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm,
|
||||
|
||||
ret = tegra_key_submit(ctx->se, key, len,
|
||||
ctx->alg, &ctx->key1_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
ctx->keylen = len;
|
||||
memcpy(ctx->key1, key, len);
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key + len, len,
|
||||
ret = tegra_key_submit(ctx->se, key + len, len,
|
||||
ctx->alg, &ctx->key2_id);
|
||||
if (ret) {
|
||||
ctx->keylen = len;
|
||||
memcpy(ctx->key2, key + len, len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -460,12 +516,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
|
||||
return 0;
|
||||
|
||||
rctx->encrypt = encrypt;
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
|
||||
|
||||
if (ctx->key2_id)
|
||||
rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
|
||||
|
||||
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
|
||||
}
|
||||
@@ -751,7 +801,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
cmdlen = tegra_gmac_prep_cmd(se, rctx);
|
||||
|
||||
@@ -791,7 +841,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
|
||||
@@ -918,7 +968,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
|
||||
rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cbcmac_prep_cmd(se, rctx);
|
||||
@@ -1105,7 +1155,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Copy authdata in the top of buffer for encryption/decryption */
|
||||
if (rctx->encrypt)
|
||||
@@ -1175,6 +1225,7 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
rctx->dst_sg = req->dst;
|
||||
rctx->assoclen = req->assoclen;
|
||||
rctx->authsize = crypto_aead_authsize(tfm);
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
if (rctx->encrypt)
|
||||
rctx->cryptlen = req->cryptlen;
|
||||
@@ -1200,6 +1251,13 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rctx->encrypt) {
|
||||
|
||||
/* CBC MAC Operation */
|
||||
@@ -1231,6 +1289,9 @@ outbuf_err:
|
||||
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
rctx->inbuf.buf, rctx->inbuf.addr);
|
||||
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_aead_request(ctx->se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -1248,6 +1309,7 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
rctx->dst_sg = req->dst;
|
||||
rctx->assoclen = req->assoclen;
|
||||
rctx->authsize = crypto_aead_authsize(tfm);
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
if (rctx->encrypt)
|
||||
rctx->cryptlen = req->cryptlen;
|
||||
@@ -1273,6 +1335,13 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
|
||||
rctx->iv[3] = (1 << 24);
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If there is associated data perform GMAC operation */
|
||||
if (rctx->assoclen) {
|
||||
ret = tegra_gcm_do_gmac(ctx, rctx);
|
||||
@@ -1303,7 +1372,9 @@ outbuf_err:
|
||||
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
rctx->inbuf.buf, rctx->inbuf.addr);
|
||||
|
||||
/* Finalize the request if there are no errors */
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_aead_request(ctx->se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -1329,6 +1400,7 @@ static int tegra_ccm_cra_init(struct crypto_aead *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1367,6 +1439,7 @@ static int tegra_gcm_cra_init(struct crypto_aead *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1453,13 +1526,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm,
|
||||
const u8 *key, u32 keylen)
|
||||
{
|
||||
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int tegra_cmac_prep_cmd(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
|
||||
@@ -1558,7 +1638,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
|
||||
rctx->total_len += rctx->datbuf.size;
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
|
||||
rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
|
||||
rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/*
|
||||
* Keep one block and residue bytes in residue and
|
||||
@@ -1673,7 +1753,14 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_UPDATE) {
|
||||
ret = tegra_cmac_do_update(req);
|
||||
@@ -1684,6 +1771,9 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
ret = tegra_cmac_do_final(req);
|
||||
rctx->task &= ~SHA_FINAL;
|
||||
}
|
||||
out:
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_hash_request(se->engine, req, ret);
|
||||
|
||||
@@ -1729,6 +1819,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1780,6 +1871,7 @@ static int tegra_cmac_init(struct ahash_request *req)
|
||||
|
||||
rctx->residue.size = 0;
|
||||
rctx->datbuf.size = 0;
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
/* Clear any previous result */
|
||||
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
|
||||
@@ -1792,6 +1884,7 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
@@ -1801,7 +1894,13 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
if (ctx->fallback_tfm)
|
||||
crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_cmac_update(struct ahash_request *req)
|
||||
|
||||
@@ -584,13 +584,18 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen))
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
ctx->fallback = false;
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret)
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_sha_update(struct ahash_request *req)
|
||||
|
||||
@@ -147,7 +147,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
|
||||
if (!tegra_key_in_kslt(*keyid)) {
|
||||
*keyid = tegra_keyslot_alloc();
|
||||
if (!(*keyid)) {
|
||||
dev_err(se->dev, "failed to allocate key slot\n");
|
||||
dev_dbg(se->dev, "failed to allocate key slot\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@@ -158,3 +158,20 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
{
|
||||
u8 zkey[AES_MAX_KEY_SIZE] = {0};
|
||||
|
||||
if (!keyid)
|
||||
return;
|
||||
|
||||
/* Overwrite the key with 0s */
|
||||
tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
|
||||
}
|
||||
|
||||
inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
return tegra_key_insert(se, key, keylen, *keyid, alg);
|
||||
}
|
||||
|
||||
@@ -343,6 +343,9 @@
|
||||
#define SE_MAX_KEYSLOT 15
|
||||
#define SE_MAX_MEM_ALLOC SZ_4M
|
||||
|
||||
#define TEGRA_AES_RESERVED_KSLT 14
|
||||
#define TEGRA_XTS_RESERVED_KSLT 15
|
||||
|
||||
#define SHA_FIRST BIT(0)
|
||||
#define SHA_UPDATE BIT(1)
|
||||
#define SHA_FINAL BIT(2)
|
||||
@@ -525,8 +528,32 @@ void tegra_deinit_hash(struct tegra_se *se);
|
||||
int tegra_key_submit(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
|
||||
|
||||
|
||||
static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
*keyid = TEGRA_AES_RESERVED_KSLT;
|
||||
return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
|
||||
}
|
||||
|
||||
static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
*keyid = TEGRA_XTS_RESERVED_KSLT;
|
||||
return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
|
||||
}
|
||||
|
||||
static inline bool tegra_key_is_reserved(u32 keyid)
|
||||
{
|
||||
return ((keyid == TEGRA_AES_RESERVED_KSLT) ||
|
||||
(keyid == TEGRA_XTS_RESERVED_KSLT));
|
||||
}
|
||||
|
||||
/* HOST1x OPCODES */
|
||||
static inline u32 host1x_opcode_setpayload(unsigned int payload)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user