nvvse: cryptodev: user nonce, TSEC alg, ivcDB

Updating Kernel 5.15 with recent linux driver changes.

Jira ESSS-504
bug 3979224

Mentioned below are the details of merging changes -

NVVSE: Added support for TSEC algorithm.

- Add support for TSEC CMAC Sign/Verify commands
- Add support for command to check TSEC Keyload status
- Don't use skip_key parameter as keyslot id is always needed by VSE driver

Jira ESSS-267

nvvse: cryptodev: Add support for user nonce

Changes:

   - Add support for userNonce and zero copy flag for
     TLS and zero copy features
   - Updaeted encrypt/decrypt function call to use
     user nonce.

Jira ESSS-415

nvvse: Updated IVCCFG offset, IVC DB parameters

- Reading max buffer size and gcm dec buffer size from DT
- Update elements of IVC DB.

Jira ESSS-417
Jira ESSS-484
Jira ESSS-468
bug 3974121

Signed-off-by: Advaya Andhare <aandhare@nvidia.com>
Change-Id: Ic7c4580dc4f443db9f7e4fabfb7ec49de2973ed3
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2862329
Reviewed-by: Rakesh Babu Bodla <rbodla@nvidia.com>
Reviewed-by: Manish Bhardwaj <mbhardwaj@nvidia.com>
Reviewed-by: Nagaraj P N <nagarajp@nvidia.com>
Reviewed-by: Leo Chiu <lchiu@nvidia.com>
Reviewed-by: Vipin Kumar <vipink@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Advaya Andhare
2022-11-17 06:09:45 +00:00
committed by mobile promotions
parent 38371d55ab
commit 9fdc09d25a
4 changed files with 660 additions and 75 deletions

View File

@@ -4,7 +4,7 @@
*
* Support for Tegra Virtual Security Engine hardware crypto algorithms.
*
* Copyright (c) 2019-2022, NVIDIA Corporation. All Rights Reserved.
* Copyright (c) 2019-2023, NVIDIA Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -87,6 +87,11 @@
#define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_CMAC_VERIFY 0x36
#define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GMAC_VERIFY 0x37
#define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GCM_DEC 0x38
#define TEGRA_VIRTUAL_SE_CMD_TSEC_SIGN 0x40
#define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_TSEC_SIGN 0x41
#define TEGRA_VIRTUAL_SE_CMD_TSEC_VERIFY 0x42
#define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_TSEC_VERIFY 0x43
#define TEGRA_VIRTUAL_TSEC_CMD_GET_KEYLOAD_STATUS 0x44
#define TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_INIT (0x29U)
#define TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_SIGN (0x30U)
@@ -139,18 +144,28 @@
#define TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN ((1U << 24) - 1U)
#define TEGRA_VIRTUAL_SE_MAX_GCMDEC_BUFLEN (0x500000U) /* 5 MB */
#define TEGRA_VIRTUAL_TSEC_MAX_SUPPORTED_BUFLEN (8U * 1024U) /* 8 KB */
#define TEGRA_VIRTUAL_SE_ERR_MAC_INVALID 11
#define MAX_NUMBER_MISC_DEVICES 40U
#define MAX_NUMBER_MISC_DEVICES 46U
#define MAX_IVC_Q_PRIORITY 2U
#define TEGRA_IVC_ID_OFFSET 0U
#define TEGRA_SE_ENGINE_ID_OFFSET 1U
#define TEGRA_CRYPTO_DEV_ID_OFFSET 2U
#define TEGRA_IVC_PRIORITY_OFFSET 3U
#define TEGRA_CHANNEL_GROUPID_OFFSET 6U
#define TEGRA_MAX_BUFFER_SIZE 4U
#define TEGRA_CHANNEL_GROUPID_OFFSET 5U
#define TEGRA_GCM_SUPPORTED_FLAG_OFFSET 7U
#define TEGRA_IVCCFG_ARRAY_LEN 8U
#define TEGRA_GCM_DEC_BUFFER_SIZE 8U
#define TEGRA_IVCCFG_ARRAY_LEN 9U
#define VSE_MSG_ERR_TSEC_KEYLOAD_FAILED 21U
#define VSE_MSG_ERR_TSEC_KEYLOAD_STATUS_CHECK_TIMEOUT 20U
#define NVVSE_STATUS_SE_SERVER_TSEC_KEYLOAD_FAILED 76U
#define NVVSE_STATUS_SE_SERVER_TSEC_KEYLOAD_TIMEOUT 21U
#define NVVSE_STATUS_SE_SERVER_ERROR 102U
static struct crypto_dev_to_ivc_map g_crypto_to_ivc_map[MAX_NUMBER_MISC_DEVICES];
@@ -173,7 +188,7 @@ enum tegra_virtual_se_command {
VIRTUAL_SE_KEY_SLOT,
VIRTUAL_SE_PROCESS,
VIRTUAL_CMAC_PROCESS,
VIRTUAL_SE_AES_GCM_ENC_PROCESS,
VIRTUAL_SE_AES_GCM_ENC_PROCESS
};
/* CMAC response */
@@ -323,6 +338,32 @@ union tegra_virtual_se_sha_args {
} op_hash;
} __attribute__((__packed__));
struct tegra_virtual_tsec_args {
/**
* Keyslot index for keyslot containing TSEC key
*/
uint32_t keyslot;
/**
* Size of input buffer in bytes.
* The maximum size is given by the macro TEGRA_VIRTUAL_TSEC_MAX_SUPPORTED_BUFLEN
*/
uint32_t src_buf_size;
/**
* IOVA address of the input buffer.
* Although it is a 64-bit integer, only least significant 40 bits are
* used because only a 40-bit address space is supported.
*/
uint64_t src_addr;
/**
* For CMAC Verify, this array contains the value to be verified.
* Not used for CMAC Sign.
*/
uint8_t cmac_result[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE];
};
struct tegra_virtual_se_ivc_resp_msg_t {
u32 tag;
u32 cmd;
@@ -346,6 +387,7 @@ struct tegra_virtual_se_ivc_tx_msg_t {
union {
union tegra_virtual_se_aes_args aes;
union tegra_virtual_se_sha_args sha;
struct tegra_virtual_tsec_args tsec;
};
};
@@ -416,6 +458,7 @@ enum se_engine_id {
VIRTUAL_SE_AES0,
VIRTUAL_SE_AES1,
VIRTUAL_SE_SHA = 2,
VIRTUAL_SE_TSEC = 6,
VIRTUAL_MAX_SE_ENGINE_NUM = 7
};
@@ -1637,12 +1680,12 @@ static int tegra_hv_vse_safety_process_aes_req(struct tegra_virtual_se_dev *se_d
g_crypto_to_ivc_map[aes_ctx->node_id].vse_thread_start = true;
/*
* If first byte of iv is 1 and the request is for AES CBC/CTR encryption,
* If req->iv[0] is 1 and the request is for AES CBC/CTR encryption,
* it means that generation of random IV is required.
*/
if (req_ctx->encrypt &&
(req_ctx->op_mode == AES_CBC ||
req_ctx->op_mode == AES_CTR) &&
req_ctx->op_mode == AES_CTR) && (aes_ctx->user_nonce == 0U) &&
req->iv[0] == 1) {
//Random IV generation is required
err = tegra_hv_vse_safety_aes_gen_random_iv(se_dev, req,
@@ -1680,7 +1723,7 @@ static int tegra_hv_vse_safety_process_aes_req(struct tegra_virtual_se_dev *se_d
if (((req_ctx->op_mode == AES_CBC)
|| (req_ctx->op_mode == AES_CTR))
&& req_ctx->encrypt == true)
&& req_ctx->encrypt == true && aes_ctx->user_nonce == 0U)
memcpy(req->iv, priv->iv, TEGRA_VIRTUAL_SE_AES_IV_SIZE);
} else {
dev_err(se_dev->dev,
@@ -1959,7 +2002,6 @@ static int tegra_hv_vse_safety_cmac_op(struct ahash_request *req, bool is_last)
DMA_TO_DEVICE, &num_mapped_sgs);
if (err)
goto free_mem;
}
/* get the last block bytes from the sg_dma buffer using miter */
@@ -2049,6 +2091,145 @@ free_mem:
}
static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req)
{
struct tegra_virtual_se_aes_cmac_context *cmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_vse_cmac_req_data *cmac_req_data;
struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine];
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg;
struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck;
int err = 0;
struct tegra_vse_priv_data *priv = NULL;
struct tegra_vse_tag *priv_data_ptr;
dma_addr_t src_buf_addr;
void *src_buf = NULL;
if ((req->nbytes == 0) || (req->nbytes > TEGRA_VIRTUAL_TSEC_MAX_SUPPORTED_BUFLEN)) {
dev_err(se_dev->dev, "%s: input buffer size is invalid\n", __func__);
return -EINVAL;
}
ivc_req_msg = devm_kzalloc(se_dev->dev,
sizeof(*ivc_req_msg), GFP_KERNEL);
if (!ivc_req_msg)
return -ENOMEM;
priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
devm_kfree(se_dev->dev, ivc_req_msg);
return -ENOMEM;
}
cmac_req_data = (struct tegra_vse_cmac_req_data *) req->priv;
ivc_tx = &ivc_req_msg->tx[0];
ivc_hdr = &ivc_req_msg->ivc_hdr;
ivc_hdr->num_reqs = 1;
ivc_hdr->header_magic[0] = 'N';
ivc_hdr->header_magic[1] = 'V';
ivc_hdr->header_magic[2] = 'D';
ivc_hdr->header_magic[3] = 'A';
ivc_hdr->engine = g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine;
g_crypto_to_ivc_map[cmac_ctx->node_id].vse_thread_start = true;
src_buf = dma_alloc_coherent(se_dev->dev, req->nbytes,
&src_buf_addr, GFP_KERNEL);
if (!src_buf) {
err = -ENOMEM;
goto free_mem;
}
/* copy aad from sgs to buffer*/
sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src),
src_buf, req->nbytes, 0);
ivc_tx->tsec.src_addr = src_buf_addr;
ivc_tx->tsec.src_buf_size = req->nbytes;
ivc_tx->tsec.keyslot = cmac_ctx->aes_keyslot;
if (cmac_req_data->request_type == CMAC_SIGN) {
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_TSEC_SIGN;
} else {
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_TSEC_VERIFY;
memcpy(ivc_tx->tsec.cmac_result,
req->result,
TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE);
}
priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag;
priv_data_ptr->priv_data = (unsigned int *)priv;
priv->cmd = VIRTUAL_SE_PROCESS;
priv->se_dev = se_dev;
init_completion(&priv->alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg,
sizeof(struct tegra_virtual_se_ivc_msg_t), cmac_ctx->node_id);
if (err) {
dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err);
goto unmap_exit;
}
if (priv->rx_status != 0) {
err = status_to_errno(priv->rx_status);
dev_err(se_dev->dev, "%s: SE server returned error %u\n",
__func__, priv->rx_status);
goto unmap_exit;
}
if (cmac_req_data->request_type == CMAC_SIGN)
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_TSEC_SIGN;
else
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_TSEC_VERIFY;
priv->cmd = VIRTUAL_CMAC_PROCESS;
init_completion(&priv->alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg,
sizeof(struct tegra_virtual_se_ivc_msg_t), cmac_ctx->node_id);
if (err) {
dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err);
goto unmap_exit;
}
if (cmac_req_data->request_type == CMAC_SIGN) {
if (priv->rx_status == 0) {
memcpy(req->result,
priv->cmac.data,
TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE);
}
} else {
if (priv->rx_status == 0)
cmac_req_data->result = 0;
else
cmac_req_data->result = 1;
}
if ((priv->rx_status != 0) &&
(priv->rx_status != TEGRA_VIRTUAL_SE_ERR_MAC_INVALID)) {
err = status_to_errno(priv->rx_status);
dev_err(se_dev->dev, "%s: SE server returned error %u\n",
__func__, priv->rx_status);
}
unmap_exit:
if (src_buf)
dma_free_coherent(se_dev->dev, req->nbytes, src_buf, src_buf_addr);
free_mem:
devm_kfree(se_dev->dev, priv);
devm_kfree(se_dev->dev, ivc_req_msg);
return err;
}
static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req, bool is_last)
{
struct tegra_virtual_se_aes_cmac_context *cmac_ctx =
@@ -2270,7 +2451,7 @@ static int tegra_hv_vse_safety_cmac_init(struct ahash_request *req)
se_dev->dev, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE,
&cmac_ctx->hash_result_addr, GFP_KERNEL);
if (!cmac_ctx->hash_result) {
dev_err(se_dev->dev, "Cannot allocate memory for cmac result\n");
dev_err(se_dev->dev, "Cannot allocate memory for CMAC result\n");
return -ENOMEM;
}
cmac_ctx->is_first = true;
@@ -2301,7 +2482,6 @@ static void tegra_hv_vse_safety_cmac_req_deinit(struct ahash_request *req)
static int tegra_hv_vse_safety_cmac_update(struct ahash_request *req)
{
struct tegra_virtual_se_aes_cmac_context *cmac_ctx = NULL;
struct tegra_virtual_se_dev *se_dev;
int ret = 0;
@@ -2332,24 +2512,32 @@ static int tegra_hv_vse_safety_cmac_update(struct ahash_request *req)
ret = tegra_hv_vse_safety_cmac_sv_op(req, false);
else
ret = tegra_hv_vse_safety_cmac_op(req, false);
if (ret)
dev_err(se_dev->dev, "tegra_se_cmac_update failed - %d\n", ret);
return ret;
}
static int tegra_hv_tsec_safety_cmac_update(struct ahash_request *req)
{
pr_err("%s cmac_update is not supported for tsec\n", __func__);
return -EINVAL;
}
static int tegra_hv_vse_safety_cmac_final(struct ahash_request *req)
{
struct tegra_virtual_se_aes_cmac_context *cmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine];
g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine];
/* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended))
return -ENODEV;
return 0;
pr_err("%s cmac_final is not supported\n", __func__);
return -EINVAL;
}
static int tegra_hv_vse_safety_cmac_finup(struct ahash_request *req)
@@ -2384,6 +2572,7 @@ static int tegra_hv_vse_safety_cmac_finup(struct ahash_request *req)
ret = tegra_hv_vse_safety_cmac_sv_op(req, true);
else
ret = tegra_hv_vse_safety_cmac_op(req, true);
if (ret)
dev_err(se_dev->dev, "tegra_se_cmac_finup failed - %d\n", ret);
@@ -2392,6 +2581,42 @@ static int tegra_hv_vse_safety_cmac_finup(struct ahash_request *req)
return ret;
}
static int tegra_hv_tsec_safety_cmac_finup(struct ahash_request *req)
{
struct tegra_virtual_se_aes_cmac_context *cmac_ctx = NULL;
struct tegra_virtual_se_dev *se_dev = NULL;
int ret = 0;
if (!req) {
pr_err("%s TSEC request not valid\n", __func__);
return -EINVAL;
}
cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
if (!cmac_ctx) {
pr_err("%s TSEC req_ctx not valid\n", __func__);
return -EINVAL;
}
if (!cmac_ctx->req_context_initialized) {
pr_err("%s Request ctx not initialized\n", __func__);
return -EINVAL;
}
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine];
/* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended))
return -ENODEV;
ret = tegra_hv_vse_safety_tsec_sv_op(req);
if (ret)
dev_err(se_dev->dev, "tegra_se_tsec_finup failed - %d\n", ret);
tegra_hv_vse_safety_cmac_req_deinit(req);
return ret;
}
static int tegra_hv_vse_safety_cmac_digest(struct ahash_request *req)
{
@@ -2407,6 +2632,82 @@ static int tegra_hv_vse_safety_cmac_digest(struct ahash_request *req)
return tegra_hv_vse_safety_cmac_init(req) ?: tegra_hv_vse_safety_cmac_final(req);
}
int tegra_hv_vse_safety_tsec_get_keyload_status(uint32_t node_id, uint32_t *err_code)
{
struct tegra_virtual_se_dev *se_dev = NULL;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL;
struct tegra_hv_ivc_cookie *pivck = NULL;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL;
struct tegra_vse_priv_data *priv = NULL;
struct tegra_vse_tag *priv_data_ptr = NULL;
int err = 0;
if (node_id >= MAX_NUMBER_MISC_DEVICES)
return -ENODEV;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[node_id].se_engine];
ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg),
GFP_KERNEL);
if (!ivc_req_msg)
return -ENOMEM;
priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
devm_kfree(se_dev->dev, ivc_req_msg);
dev_err(se_dev->dev, "Priv Data allocation failed\n");
return -ENOMEM;
}
ivc_hdr = &ivc_req_msg->ivc_hdr;
ivc_tx = &ivc_req_msg->tx[0];
ivc_hdr->num_reqs = 1;
ivc_hdr->header_magic[0] = 'N';
ivc_hdr->header_magic[1] = 'V';
ivc_hdr->header_magic[2] = 'D';
ivc_hdr->header_magic[3] = 'A';
g_crypto_to_ivc_map[node_id].vse_thread_start = true;
ivc_hdr->engine = g_crypto_to_ivc_map[node_id].se_engine;
ivc_tx->cmd = TEGRA_VIRTUAL_TSEC_CMD_GET_KEYLOAD_STATUS;
priv_data_ptr =
(struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag;
priv_data_ptr->priv_data = (unsigned int *)priv;
priv->cmd = VIRTUAL_SE_PROCESS;
priv->se_dev = se_dev;
init_completion(&priv->alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg,
sizeof(struct tegra_virtual_se_ivc_msg_t), node_id);
if (err) {
dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err);
goto free_exit;
}
if (priv->rx_status != 0U) {
err = -EINVAL;
if (priv->rx_status == VSE_MSG_ERR_TSEC_KEYLOAD_FAILED)
*err_code = NVVSE_STATUS_SE_SERVER_TSEC_KEYLOAD_FAILED;
else if (priv->rx_status == VSE_MSG_ERR_TSEC_KEYLOAD_STATUS_CHECK_TIMEOUT)
*err_code = NVVSE_STATUS_SE_SERVER_TSEC_KEYLOAD_TIMEOUT;
else
*err_code = NVVSE_STATUS_SE_SERVER_ERROR;
} else {
err = 0;
*err_code = 0U;
}
free_exit:
devm_kfree(se_dev->dev, priv);
devm_kfree(se_dev->dev, ivc_req_msg);
return err;
}
EXPORT_SYMBOL(tegra_hv_vse_safety_tsec_get_keyload_status);
static int tegra_hv_vse_safety_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -2912,10 +3213,11 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req, bool encrypt)
if (encrypt) {
/*
* If first byte of iv is 1 and the request is for AES CBC/CTR encryption,
* If req->iv[0] is 1 and the request is for AES CBC/CTR encryption,
* it means that generation of random IV is required.
* IV generation is not required if user nonce is provided.
*/
if (req->iv[0] == 1) {
if (req->iv[0] == 1 && aes_ctx->user_nonce == 0U) {
//Random IV generation is required
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT_INIT;
priv->cmd = VIRTUAL_SE_PROCESS;
@@ -2952,6 +3254,9 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req, bool encrypt)
sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src),
ivc_tx->aes.op_gcm.expected_tag, TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE,
req->assoclen + cryptlen);
} else {
if (aes_ctx->user_nonce != 0U)
memcpy(ivc_tx->aes.op_gcm.iv, req->iv, crypto_aead_ivsize(tfm));
}
ivc_tx->aes.op_gcm.src_addr_hi = cryptlen;
@@ -2992,9 +3297,10 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req, bool encrypt)
}
if (encrypt) {
/* copy iv to req for encryption*/
memcpy(req->iv, priv->iv, crypto_aead_ivsize(tfm));
if (aes_ctx->user_nonce == 0U) {
/* copy iv to req for encryption*/
memcpy(req->iv, priv->iv, crypto_aead_ivsize(tfm));
}
/* copy tag to req for encryption */
sg_pcopy_from_buffer(req->dst, sg_nents(req->dst),
tag_buf, aes_ctx->authsize,
@@ -3680,6 +3986,29 @@ static struct skcipher_alg aes_algs[] = {
},
};
static struct ahash_alg tsec_alg = {
.init = tegra_hv_vse_safety_cmac_init,
.update = tegra_hv_tsec_safety_cmac_update,
.final = tegra_hv_vse_safety_cmac_final,
.finup = tegra_hv_tsec_safety_cmac_finup,
.digest = tegra_hv_vse_safety_cmac_digest,
.setkey = tegra_hv_vse_safety_cmac_setkey,
.halg.digestsize = TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE,
.halg.statesize = TEGRA_VIRTUAL_SE_AES_CMAC_STATE_SIZE,
.halg.base = {
.cra_name = "cmac-tsec(aes)",
.cra_driver_name = "tegra-hv-vse-safety-tsec(aes)",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct tegra_virtual_se_aes_cmac_context),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_hv_vse_safety_cmac_cra_init,
.cra_exit = tegra_hv_vse_safety_cmac_cra_exit,
}
};
static struct ahash_alg cmac_alg = {
.init = tegra_hv_vse_safety_cmac_init,
.update = tegra_hv_vse_safety_cmac_update,
@@ -4341,6 +4670,14 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto exit;
}
err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN
+ TEGRA_MAX_BUFFER_SIZE, &crypto_dev->max_buffer_size);
if (err) {
pr_err("Error: invalid max buffer size. err %d\n", err);
err = -ENODEV;
goto exit;
}
err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN
+ TEGRA_CHANNEL_GROUPID_OFFSET, &crypto_dev->channel_grp_id);
if (err) {
@@ -4357,6 +4694,15 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto exit;
}
err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN
+ TEGRA_GCM_DEC_BUFFER_SIZE, &crypto_dev->gcm_dec_buffer_size);
if (err || (crypto_dev->gcm_dec_supported != GCM_DEC_OP_SUPPORTED &&
crypto_dev->gcm_dec_buffer_size != 0)) {
pr_err("Error: invalid gcm decrypt buffer size. err %d\n", err);
err = -ENODEV;
goto exit;
}
dev_info(se_dev->dev, "Virtual SE channel number: %d", ivc_id);
crypto_dev->ivck = tegra_hv_ivc_reserve(NULL, ivc_id, NULL);
@@ -4466,7 +4812,14 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
}
}
}
if (engine_id == VIRTUAL_SE_TSEC) {
err = crypto_register_ahash(&tsec_alg);
if (err) {
dev_err(&pdev->dev,
"Tsec alg register failed. Err %d\n", err);
goto exit;
}
}
se_dev->engine_id = engine_id;
/* Set Engine suspended state to false*/

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __TEGRA_HV_VSE_H
@@ -22,8 +22,10 @@ struct crypto_dev_to_ivc_map {
uint32_t se_engine;
uint32_t node_id;
uint32_t priority;
uint32_t max_buffer_size;
uint32_t channel_grp_id;
enum tegra_gcm_dec_supported gcm_dec_supported;
uint32_t gcm_dec_buffer_size;
struct tegra_hv_ivc_cookie *ivck;
struct completion tegra_vse_complete;
struct task_struct *tegra_vse_task;
@@ -75,9 +77,11 @@ struct tegra_virtual_se_aes_context {
u32 authsize;
/*Crypto dev instance*/
uint32_t node_id;
/* Flag to indicate user nonce*/
uint8_t user_nonce;
};
/* Security Engine AES CMAC context */
/* Security Engine/TSEC AES CMAC context */
struct tegra_virtual_se_aes_cmac_context {
unsigned int digest_size;
u8 *hash_result; /* Intermediate hash result */
@@ -142,4 +146,7 @@ struct tegra_virtual_se_req_context {
/* API to get ivc db from hv_vse driver */
struct crypto_dev_to_ivc_map *tegra_hv_vse_get_db(void);
/* API to get tsec keyload status from vse driver */
int tegra_hv_vse_safety_tsec_get_keyload_status(uint32_t node_id, uint32_t *err_code);
#endif /*__TEGRA_HV_VSE_H*/

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All Rights Reserved.
* Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All Rights Reserved.
*
* Tegra NVVSE crypto device for crypto operation to NVVSE linux library.
*
@@ -52,6 +52,7 @@
#define GCM_PT_MAX_LEN (16*1024*1024 - 1) /* 16MB */
#define GCM_AAD_MAX_LEN (16*1024*1024 - 1) /* 16MB */
#define GMAC_MAX_LEN (16*1024*1024 - 1) /* 16MB */
#define TSEC_MAX_LEN (8U * 1024U) /* 8KB */
/** Defines the Maximum Random Number length supported */
#define NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED 512U
@@ -62,8 +63,6 @@
*/
#define NVVSE_MAX_ALLOCATED_SHA_RESULT_BUFF_SIZE 256U
#define MAX_NUMBER_MISC_DEVICES 40U
#define MISC_DEVICE_NAME_LEN 32U
static struct miscdevice *g_misc_devices[MAX_NUMBER_MISC_DEVICES];
@@ -419,6 +418,156 @@ stop_sha:
return ret;
}
static int tnvvse_crypto_tsec_get_keyload_status(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_tsec_get_keyload_status *tsec_keyload_status)
{
return tegra_hv_vse_safety_tsec_get_keyload_status(ctx->node_id,
&tsec_keyload_status->err_code);
}
static int tnvvtsec_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_aes_cmac_sign_verify_ctl *aes_cmac_ctl)
{
struct crypto_ahash *tfm;
char *result, *src_buffer;
const char *driver_name;
struct ahash_request *req;
struct tnvvse_crypto_completion sha_complete;
struct tegra_virtual_se_aes_cmac_context *cmac_ctx;
char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,};
struct tnvvse_cmac_req_data priv_data;
int ret = -ENOMEM;
struct scatterlist sg[1];
uint32_t total = 0;
uint8_t *hash_buff;
result = kzalloc(64, GFP_KERNEL);
if (!result)
return -ENOMEM;
tfm = crypto_alloc_ahash("cmac-tsec(aes)", 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
pr_err("%s(): Failed to allocate ahash for cmac-tsec(aes): %d\n", __func__, ret);
goto free_result;
}
cmac_ctx = crypto_ahash_ctx(tfm);
cmac_ctx->node_id = ctx->node_id;
driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
if (driver_name == NULL) {
pr_err("%s(): get_driver_name for cmac-tsec(aes) returned NULL", __func__);
ret = -EINVAL;
goto free_tfm;
}
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("%s(): Failed to allocate request for cmac-tsec(aes)\n", __func__);
goto free_tfm;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tnvvse_crypto_complete, &sha_complete);
init_completion(&sha_complete.restart);
sha_complete.req_err = 0;
crypto_ahash_clear_flags(tfm, ~0U);
if (aes_cmac_ctl->cmac_type == TEGRA_NVVSE_AES_CMAC_SIGN)
priv_data.request_type = CMAC_SIGN;
else
priv_data.request_type = CMAC_VERIFY;
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES %x",
aes_cmac_ctl->key_slot);
if (ret >= AES_KEYSLOT_NAME_SIZE) {
pr_err("%s(): Buffer overflow while setting key for cmac-tsec(aes): %d\n",
__func__, ret);
ret = -EINVAL;
goto free_req;
}
req->priv = &priv_data;
priv_data.result = 0;
ret = crypto_ahash_setkey(tfm, key_as_keyslot, aes_cmac_ctl->key_length);
if (ret) {
pr_err("%s(): Failed to set keys for cmac-tsec(aes): %d\n", __func__, ret);
ret = -EINVAL;
goto free_req;
}
ret = wait_async_op(&sha_complete, crypto_ahash_init(req));
if (ret) {
pr_err("%s(): Failed to initialize ahash: %d\n", __func__, ret);
ret = -EINVAL;
goto free_req;
}
if (aes_cmac_ctl->cmac_type == TEGRA_NVVSE_AES_CMAC_VERIFY) {
/* Copy digest */
ret = copy_from_user((void *)result,
(void __user *)aes_cmac_ctl->cmac_buffer,
TEGRA_NVVSE_AES_CMAC_LEN);
if (ret) {
pr_err("%s(): Failed to copy_from_user: %d\n", __func__, ret);
ret = -EINVAL;
goto free_req;
}
}
total = aes_cmac_ctl->data_length;
src_buffer = aes_cmac_ctl->src_buffer;
if (total > TSEC_MAX_LEN) {
pr_err("%s(): Unsupported buffer size: %u\n", __func__, total);
ret = -EINVAL;
goto free_req;
}
hash_buff = kcalloc(total, sizeof(uint8_t), GFP_KERNEL);
if (hash_buff == NULL) {
ret = -ENOMEM;
goto free_req;
}
ret = copy_from_user((void *)hash_buff, (void __user *)src_buffer, total);
if (ret) {
pr_err("%s(): Failed to copy_from_user: %d\n", __func__, ret);
goto free_xbuf;
}
sg_init_one(&sg[0], hash_buff, total);
ahash_request_set_crypt(req, sg, result, total);
ret = wait_async_op(&sha_complete, crypto_ahash_finup(req));
if (ret) {
pr_err("%s(): Failed to ahash_finup: %d\n", __func__, ret);
goto free_xbuf;
}
if (aes_cmac_ctl->cmac_type == TEGRA_NVVSE_AES_CMAC_SIGN) {
ret = copy_to_user((void __user *)aes_cmac_ctl->cmac_buffer, (const void *)result,
crypto_ahash_digestsize(tfm));
if (ret)
pr_err("%s(): Failed to copy_to_user: %d\n", __func__, ret);
} else {
aes_cmac_ctl->result = priv_data.result;
}
free_xbuf:
kfree(hash_buff);
free_req:
ahash_request_free(req);
free_tfm:
crypto_free_ahash(tfm);
free_result:
kfree(result);
return ret;
}
static int tnvvse_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_aes_cmac_sign_verify_ctl *aes_cmac_ctl)
{
@@ -455,7 +604,6 @@ static int tnvvse_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
pr_err("%s(): Failed to get_driver_name for cmac-vse(aes) returned NULL", __func__);
goto free_tfm;
}
pr_debug("%s(): Algo name cmac(aes), driver name %s\n", __func__, driver_name);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -485,7 +633,7 @@ static int tnvvse_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
aes_cmac_ctl->key_slot);
if (ret >= AES_KEYSLOT_NAME_SIZE) {
pr_err("%s(): Buffer overflow while setting key for cmac-vse(aes): %d\n",
__func__, ret);
__func__, ret);
ret = -EINVAL;
goto free_xbuf;
}
@@ -518,6 +666,7 @@ static int tnvvse_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
hash_buff = xbuf[0];
total = aes_cmac_ctl->data_length;
src_buffer = aes_cmac_ctl->src_buffer;
while (true) {
size = (total < PAGE_SIZE) ? total : PAGE_SIZE;
ret = copy_from_user((void *)hash_buff, (void __user *)src_buffer, size);
@@ -915,6 +1064,7 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
aes_ctx = crypto_skcipher_ctx(tfm);
aes_ctx->node_id = ctx->node_id;
aes_ctx->user_nonce = aes_enc_dec_ctl->user_nonce;
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -943,34 +1093,33 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
crypto_skcipher_clear_flags(tfm, ~0);
if (!aes_enc_dec_ctl->skip_key) {
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES %x",
aes_enc_dec_ctl->key_slot);
if (ret >= AES_KEYSLOT_NAME_SIZE) {
pr_err("%s(): Buffer overflow while preparing key for %s: %d\n",
__func__, aes_algo[aes_enc_dec_ctl->aes_mode], ret);
ret = -EINVAL;
goto free_req;
}
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES %x",
aes_enc_dec_ctl->key_slot);
if (ret >= AES_KEYSLOT_NAME_SIZE) {
pr_err("%s(): Buffer overflow while preparing key for %s: %d\n",
__func__, aes_algo[aes_enc_dec_ctl->aes_mode], ret);
ret = -EINVAL;
goto free_req;
}
klen = strlen(key_as_keyslot);
if(klen != 16) {
pr_err("%s(): key length is invalid, length %d, key %s\n", __func__, klen, key_as_keyslot);
ret = -EINVAL;
goto free_req;
}
/* Null key is only allowed in SE driver */
if (!strstr(driver_name, "tegra")) {
ret = -EINVAL;
pr_err("%s(): Failed to identify as tegra se driver\n", __func__);
goto free_req;
}
klen = strlen(key_as_keyslot);
if (klen != 16) {
pr_err("%s(): key length is invalid, length %d, key %s\n", __func__, klen,
key_as_keyslot);
ret = -EINVAL;
goto free_req;
}
/* Null key is only allowed in SE driver */
if (!strstr(driver_name, "tegra")) {
ret = -EINVAL;
pr_err("%s(): Failed to identify as tegra se driver\n", __func__);
goto free_req;
}
ret = crypto_skcipher_setkey(tfm, key_as_keyslot, aes_enc_dec_ctl->key_length);
if (ret < 0) {
pr_err("%s(): Failed to set key: %d\n", __func__, ret);
goto free_req;
}
ret = crypto_skcipher_setkey(tfm, key_as_keyslot, aes_enc_dec_ctl->key_length);
if (ret < 0) {
pr_err("%s(): Failed to set key: %d\n", __func__, ret);
goto free_req;
}
ret = alloc_bufs(xbuf);
@@ -1009,10 +1158,10 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
tnvvse_crypto_complete, &tcrypt_complete);
tcrypt_complete.req_err = 0;
/* Set first byte of IV to 1 for first encryption request and 0 for other
/* Set first byte of next_block_iv to 1 for first encryption request and 0 for other
* encryption requests. This is used to invoke generation of random IV.
*/
if (aes_enc_dec_ctl->is_encryption) {
if (aes_enc_dec_ctl->is_encryption && (aes_enc_dec_ctl->user_nonce == 0U)) {
if (first_loop && !aes_enc_dec_ctl->is_non_first_call)
next_block_iv[0] = 1;
else
@@ -1046,7 +1195,8 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
goto free_xbuf;
}
if (first_loop && aes_enc_dec_ctl->is_encryption) {
if ((first_loop && aes_enc_dec_ctl->is_encryption) &&
(aes_enc_dec_ctl->user_nonce == 0U)) {
if (aes_enc_dec_ctl->aes_mode == TEGRA_NVVSE_AES_MODE_CBC)
memcpy(aes_enc_dec_ctl->initial_vector, req->iv, TEGRA_NVVSE_AES_IV_LEN);
else if (aes_enc_dec_ctl->aes_mode == TEGRA_NVVSE_AES_MODE_CTR)
@@ -1190,6 +1340,7 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
aes_ctx = crypto_aead_ctx(tfm);
aes_ctx->node_id = ctx->node_id;
aes_ctx->user_nonce = aes_enc_dec_ctl->user_nonce;
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -1222,21 +1373,19 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
crypto_aead_clear_flags(tfm, ~0);
if (!aes_enc_dec_ctl->skip_key) {
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES %x",
aes_enc_dec_ctl->key_slot);
if (ret >= AES_KEYSLOT_NAME_SIZE) {
pr_err("%s(): Buffer overflow while preparing key for gcm(aes): %d\n",
__func__, ret);
ret = -EINVAL;
goto free_req;
}
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES %x",
aes_enc_dec_ctl->key_slot);
if (ret >= AES_KEYSLOT_NAME_SIZE) {
pr_err("%s(): Buffer overflow while preparing key for gcm(aes): %d\n",
__func__, ret);
ret = -EINVAL;
goto free_req;
}
ret = crypto_aead_setkey(tfm, key_as_keyslot, aes_enc_dec_ctl->key_length);
if (ret < 0) {
pr_err("%s(): Failed to set key: %d\n", __func__, ret);
goto free_req;
}
ret = crypto_aead_setkey(tfm, key_as_keyslot, aes_enc_dec_ctl->key_length);
if (ret < 0) {
pr_err("%s(): Failed to set key: %d\n", __func__, ret);
goto free_req;
}
ret = crypto_aead_setauthsize(tfm, aes_enc_dec_ctl->tag_length);
@@ -1258,10 +1407,10 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
aead_request_set_ad(req, aad_length);
memset(iv, 0, TEGRA_NVVSE_AES_GCM_IV_LEN);
if (!enc)
if (!enc || aes_enc_dec_ctl->user_nonce != 0U)
memcpy(iv, aes_enc_dec_ctl->initial_vector, TEGRA_NVVSE_AES_GCM_IV_LEN);
else if (enc && !aes_enc_dec_ctl->is_non_first_call)
/* Set first byte of IV to 1 for first encryption request. This is used to invoke
/* Set first byte of iv to 1 for first encryption request. This is used to invoke
* generation of random IV.
*/
iv[0] = 1;
@@ -1501,8 +1650,9 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
goto free_buf;
}
}
memcpy(aes_enc_dec_ctl->initial_vector, req->iv, TEGRA_NVVSE_AES_GCM_IV_LEN);
if (aes_enc_dec_ctl->user_nonce == 0U)
memcpy(aes_enc_dec_ctl->initial_vector, req->iv,
TEGRA_NVVSE_AES_GCM_IV_LEN);
}
free_buf:
@@ -1569,6 +1719,11 @@ static int tnvvse_crypto_get_ivc_db(struct tegra_nvvse_get_ivc_db *get_ivc_db)
get_ivc_db->ivc_id[i] = hv_vse_db[i].ivc_id;
get_ivc_db->se_engine[i] = hv_vse_db[i].se_engine;
get_ivc_db->node_id[i] = hv_vse_db[i].node_id;
get_ivc_db->priority[i] = hv_vse_db[i].priority;
get_ivc_db->max_buffer_size[i] = hv_vse_db[i].max_buffer_size;
get_ivc_db->channel_grp_id[i] = hv_vse_db[i].channel_grp_id;
get_ivc_db->gcm_dec_supported[i] = hv_vse_db[i].gcm_dec_supported;
get_ivc_db->gcm_dec_buffer_size[i] = hv_vse_db[i].gcm_dec_buffer_size;
}
return ret;
@@ -1663,6 +1818,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
struct tegra_nvvse_aes_gmac_init_ctl aes_gmac_init_ctl;
struct tegra_nvvse_aes_gmac_sign_verify_ctl aes_gmac_sign_verify_ctl;
struct tegra_nvvse_get_ivc_db get_ivc_db;
struct tegra_nvvse_tsec_get_keyload_status tsec_keyload_status;
int ret = 0;
/*
@@ -1834,6 +1990,44 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
break;
case NVVSE_IOCTL_CMDID_TSEC_SIGN_VERIFY:
arg_aes_cmac_sign_verify_ctl = (void __user *)arg;
ret = copy_from_user(&aes_cmac_sign_verify_ctl, (void __user *)arg,
sizeof(aes_cmac_sign_verify_ctl));
if (ret) {
pr_err("%s(): Failed to copy_from_user tsec_sign_verify:%d\n",
__func__, ret);
goto out;
}
ret = tnvvtsec_crypto_aes_cmac_sign_verify(ctx, &aes_cmac_sign_verify_ctl);
if (ret)
goto out;
if (aes_cmac_sign_verify_ctl.cmac_type == TEGRA_NVVSE_AES_CMAC_VERIFY) {
ret = copy_to_user(&arg_aes_cmac_sign_verify_ctl->result,
&aes_cmac_sign_verify_ctl.result,
sizeof(uint8_t));
if (ret)
pr_err("%s(): Failed to copy_to_user:%d\n", __func__, ret);
}
break;
case NVVSE_IOCTL_CMDID_TSEC_GET_KEYLOAD_STATUS:
ret = tnvvse_crypto_tsec_get_keyload_status(ctx, &tsec_keyload_status);
if (ret) {
pr_err("%s(): Failed to get keyload status:%d\n", __func__, ret);
goto out;
}
ret = copy_to_user((void __user *)arg, &tsec_keyload_status,
sizeof(tsec_keyload_status));
if (ret) {
pr_err("%s(): Failed to copy_to_user tsec_keyload_status:%d\n",
__func__, ret);
goto out;
}
break;
default:
pr_err("%s(): invalid ioctl code(%d[0x%08x])", __func__, ioctl_num, ioctl_num);
ret = -EINVAL;