vse: Add keyslot management related changes

JIRA ESSS-1834

Signed-off-by: Khushi <khushi@nvidia.com>
Change-Id: I5276228f9e1f42c945336e641f5180777ccf9006
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3318026
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: Leo Chiu <lchiu@nvidia.com>
Reviewed-by: Sandeep Trasi <strasi@nvidia.com>
This commit is contained in:
Khushi
2025-03-12 09:28:52 +00:00
committed by Jon Hunter
parent 025c3b6b37
commit 3e98bab231
4 changed files with 599 additions and 134 deletions

View File

@@ -138,6 +138,10 @@
#define TEGRA_VIRTUAL_SE_CMD_AES_SET_KEY 0xF1 #define TEGRA_VIRTUAL_SE_CMD_AES_SET_KEY 0xF1
#define TEGRA_VIRTUAL_SE_CMD_AES_ALLOC_KEY 0xF0 #define TEGRA_VIRTUAL_SE_CMD_AES_ALLOC_KEY 0xF0
#define TEGRA_VIRTUAL_SE_CMD_ALLOC_KEY 0x00040001U
#define TEGRA_VIRTUAL_SE_CMD_RELEASE_KEY 0x00040002U
#define TEGRA_VIRTUAL_SE_CMD_RELEASE_KEY_USING_GRIP 0x00040003U
#define TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT_INIT (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ #define TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT_INIT (TEGRA_VIRTUAL_SE_CMD_ENG_AES \
| TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC \
| TEGRA_VIRTUAL_SE_CMD_SET_AES_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_SET_AES_ENC_DEC \
@@ -393,6 +397,13 @@ struct tegra_virtual_se_addr64_buf_size {
u32 buf_size; u32 buf_size;
}; };
struct key_args {
uint8_t keyslot[KEYSLOT_SIZE_BYTES];
uint32_t key_instance;
uint32_t key_grp_id;
uint32_t token_id;
};
union tegra_virtual_se_aes_args { union tegra_virtual_se_aes_args {
struct keyiv { struct keyiv {
u8 slot[KEYSLOT_SIZE_BYTES]; u8 slot[KEYSLOT_SIZE_BYTES];
@@ -404,6 +415,8 @@ union tegra_virtual_se_aes_args {
} key; } key;
struct aes_encdec { struct aes_encdec {
u8 keyslot[KEYSLOT_SIZE_BYTES]; u8 keyslot[KEYSLOT_SIZE_BYTES];
uint32_t key_instance;
uint32_t release_keyslot;
u32 mode; u32 mode;
u32 ivsel; u32 ivsel;
u8 lctr[TEGRA_VIRTUAL_SE_AES_LCTR_SIZE]; u8 lctr[TEGRA_VIRTUAL_SE_AES_LCTR_SIZE];
@@ -412,7 +425,6 @@ union tegra_virtual_se_aes_args {
u32 src_buf_size; u32 src_buf_size;
u64 dst_addr; u64 dst_addr;
u32 dst_buf_size; u32 dst_buf_size;
u32 key_length;
} op; } op;
struct aes_gcm { struct aes_gcm {
@@ -422,6 +434,9 @@ union tegra_virtual_se_aes_args {
*/ */
uint8_t keyslot[KEYSLOT_SIZE_BYTES]; uint8_t keyslot[KEYSLOT_SIZE_BYTES];
uint32_t key_instance;
uint32_t token_id;
uint32_t release_keyslot;
uint64_t dst_addr; uint64_t dst_addr;
uint32_t dst_buf_size; uint32_t dst_buf_size;
uint64_t src_addr; uint64_t src_addr;
@@ -435,12 +450,6 @@ union tegra_virtual_se_aes_args {
/* TODO: ESLC-6207: use lctr instead*/ /* TODO: ESLC-6207: use lctr instead*/
uint8_t iv[12]; uint8_t iv[12];
/**
* Key length in bytes.
*
* Supported key length is 16 bytes
*/
uint32_t key_length;
/* Config for AES-GMAC request */ /* Config for AES-GMAC request */
uint32_t config; uint32_t config;
u8 expected_tag[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; u8 expected_tag[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE];
@@ -449,12 +458,12 @@ union tegra_virtual_se_aes_args {
} op_gcm; } op_gcm;
struct aes_cmac_sv { struct aes_cmac_sv {
u8 keyslot[KEYSLOT_SIZE_BYTES]; u8 keyslot[KEYSLOT_SIZE_BYTES];
uint32_t token_id;
u32 config; u32 config;
u32 lastblock_len; u32 lastblock_len;
u8 lastblock[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; u8 lastblock[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE];
u64 src_addr; u64 src_addr;
u32 src_buf_size; u32 src_buf_size;
u32 key_length;
u8 cmac_result[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; u8 cmac_result[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE];
u64 mac_addr; u64 mac_addr;
u64 mac_comp_res_addr; u64 mac_comp_res_addr;
@@ -480,6 +489,7 @@ union tegra_virtual_se_sha_args {
struct tegra_virtual_se_hmac_sha_args { struct tegra_virtual_se_hmac_sha_args {
u8 keyslot[KEYSLOT_SIZE_BYTES]; u8 keyslot[KEYSLOT_SIZE_BYTES];
uint32_t token_id;
u32 mode; u32 mode;
u32 lastblock_len; u32 lastblock_len;
u8 lastblock[TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT]; u8 lastblock[TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT];
@@ -555,6 +565,7 @@ struct tegra_virtual_se_ivc_tx_msg_t {
u32 tag; u32 tag;
u32 cmd; u32 cmd;
union { union {
struct key_args keys;
union tegra_virtual_se_aes_args aes; union tegra_virtual_se_aes_args aes;
union tegra_virtual_se_sha_args sha; union tegra_virtual_se_sha_args sha;
struct tegra_virtual_tsec_args tsec[TEGRA_HV_VSE_MAX_TSEC_TASKS_PER_SUBMIT]; struct tegra_virtual_tsec_args tsec[TEGRA_HV_VSE_MAX_TSEC_TASKS_PER_SUBMIT];
@@ -1038,6 +1049,293 @@ static const struct tegra_vse_dma_buf *tegra_hv_vse_get_dma_buf(
return &g_node_dma[node_id].se_dma_buf[buf_idx]; return &g_node_dma[node_id].se_dma_buf[buf_idx];
} }
int tegra_hv_vse_allocate_keyslot(struct tegra_vse_key_slot_ctx *key_slot,
uint32_t node_id)
{
struct tegra_virtual_se_dev *se_dev = NULL;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_hv_ivc_cookie *pivck;
struct tegra_vse_priv_data *priv = NULL;
struct tegra_vse_tag *priv_data_ptr;
int err = 0;
if (node_id >= MAX_NUMBER_MISC_DEVICES) {
VSE_ERR("%s: node_id is invalid\n", __func__);
return -EINVAL;
}
se_dev = g_crypto_to_ivc_map[node_id].se_dev;
if (!se_dev) {
VSE_ERR("%s: se_dev is NULL\n", __func__);
return -EINVAL;
}
if (!se_dev->chipdata->allocate_key_slot_supported) {
dev_err(se_dev->dev, "%s: Allocate Keyslot is not supported\n", __func__);
return -EINVAL;
}
if (!key_slot) {
dev_err(se_dev->dev, "%s: key slot params is NULL\n", __func__);
return -EINVAL;
}
if (atomic_read(&se_dev->se_suspended)) {
dev_err(se_dev->dev, "Engine is in suspended state\n");
return -ENODEV;
}
pivck = g_crypto_to_ivc_map[node_id].ivck;
if (!pivck) {
dev_err(se_dev->dev, "No IVC channel\n");
return -ENODEV;
}
priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL);
if (!ivc_req_msg) {
err = -ENOMEM;
goto free_priv;
}
ivc_tx = &ivc_req_msg->tx[0];
ivc_hdr = &ivc_req_msg->ivc_hdr;
ivc_hdr->num_reqs = 1;
ivc_hdr->header_magic[0] = 'N';
ivc_hdr->header_magic[1] = 'V';
ivc_hdr->header_magic[2] = 'D';
ivc_hdr->header_magic[3] = 'A';
ivc_hdr->engine = g_crypto_to_ivc_map[node_id].engine_id;
priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag;
priv_data_ptr->priv_data = (unsigned int *)priv;
priv->cmd = VIRTUAL_SE_KEY_SLOT;
priv->se_dev = se_dev;
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_ALLOC_KEY;
memcpy(ivc_tx->keys.keyslot, key_slot->key_id, KEYSLOT_SIZE_BYTES);
ivc_tx->keys.key_grp_id = key_slot->key_grp_id;
ivc_tx->keys.token_id = key_slot->token_id;
g_crypto_to_ivc_map[node_id].vse_thread_start = true;
init_completion(&priv->alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg,
sizeof(struct tegra_virtual_se_ivc_msg_t), node_id);
if (err) {
dev_err(se_dev->dev, "Failed to send IVC message: %d\n", err);
goto free_mem;
}
if (priv->rx_status) {
dev_err(se_dev->dev, "Key slot allocation failed with error: %d\n",
priv->rx_status);
err = -EINVAL;
goto free_mem;
}
key_slot->key_instance_idx = priv->slot_num;
free_mem:
devm_kfree(se_dev->dev, ivc_req_msg);
free_priv:
devm_kfree(se_dev->dev, priv);
return err;
}
EXPORT_SYMBOL(tegra_hv_vse_allocate_keyslot);
int tegra_hv_vse_close_keyslot(uint32_t node_id, uint32_t key_grp_id)
{
struct tegra_virtual_se_dev *se_dev = NULL;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_hv_ivc_cookie *pivck;
struct tegra_vse_priv_data *priv = NULL;
struct tegra_vse_tag *priv_data_ptr;
int err = 0;
if (node_id >= MAX_NUMBER_MISC_DEVICES) {
VSE_ERR("%s: node_id is invalid\n", __func__);
return -EINVAL;
}
se_dev = g_crypto_to_ivc_map[node_id].se_dev;
if (!se_dev) {
VSE_ERR("%s: se_dev is NULL\n", __func__);
return -EINVAL;
}
if (!se_dev->chipdata->allocate_key_slot_supported) {
dev_err(se_dev->dev, "%s: allocate_key_slot_supported is not supported\n", __func__);
return -EINVAL;
}
if (atomic_read(&se_dev->se_suspended)) {
dev_err(se_dev->dev, "Engine is in suspended state\n");
return -ENODEV;
}
pivck = g_crypto_to_ivc_map[node_id].ivck;
if (!pivck) {
dev_err(se_dev->dev, "No IVC channel\n");
return -ENODEV;
}
priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL);
if (!ivc_req_msg) {
err = -ENOMEM;
goto free_priv;
}
ivc_tx = &ivc_req_msg->tx[0];
ivc_hdr = &ivc_req_msg->ivc_hdr;
ivc_hdr->num_reqs = 1;
ivc_hdr->header_magic[0] = 'N';
ivc_hdr->header_magic[1] = 'V';
ivc_hdr->header_magic[2] = 'D';
ivc_hdr->header_magic[3] = 'A';
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_RELEASE_KEY_USING_GRIP;
ivc_tx->keys.key_grp_id = key_grp_id;
priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag;
priv_data_ptr->priv_data = (unsigned int *)priv;
priv->cmd = VIRTUAL_SE_KEY_SLOT;
priv->se_dev = se_dev;
g_crypto_to_ivc_map[node_id].vse_thread_start = true;
init_completion(&priv->alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg,
sizeof(struct tegra_virtual_se_ivc_msg_t), node_id);
if (err) {
dev_err(se_dev->dev, "Failed to send IVC message: %d\n", err);
goto free_mem;
}
if (priv->rx_status) {
dev_err(se_dev->dev, "Key slot release failed with error: %d\n",
priv->rx_status);
err = -EINVAL;
}
free_mem:
devm_kfree(se_dev->dev, ivc_req_msg);
free_priv:
devm_kfree(se_dev->dev, priv);
return err;
}
EXPORT_SYMBOL(tegra_hv_vse_close_keyslot);
int tegra_hv_vse_release_keyslot(struct tegra_vse_key_slot_ctx *key_slot, uint32_t node_id)
{
struct tegra_virtual_se_dev *se_dev = NULL;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_hv_ivc_cookie *pivck;
struct tegra_vse_priv_data *priv = NULL;
struct tegra_vse_tag *priv_data_ptr;
int err = 0;
if (node_id >= MAX_NUMBER_MISC_DEVICES) {
VSE_ERR("%s: node_id is invalid\n", __func__);
return -EINVAL;
}
se_dev = g_crypto_to_ivc_map[node_id].se_dev;
if (!se_dev) {
VSE_ERR("%s: se_dev is NULL\n", __func__);
return -EINVAL;
}
if (!se_dev->chipdata->allocate_key_slot_supported) {
dev_err(se_dev->dev, "%s: allocate_key_slot_supported is not supported\n", __func__);
return -EINVAL;
}
if (!key_slot) {
dev_err(se_dev->dev, "%s: key slot params is NULL\n", __func__);
return -EINVAL;
}
/* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) {
dev_err(se_dev->dev, "Engine is in suspended state\n");
return -ENODEV;
}
pivck = g_crypto_to_ivc_map[node_id].ivck;
if (!pivck) {
dev_err(se_dev->dev, "No IVC channel\n");
return -ENODEV;
}
priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL);
if (!ivc_req_msg) {
err = -ENOMEM;
goto free_priv;
}
ivc_tx = &ivc_req_msg->tx[0];
ivc_hdr = &ivc_req_msg->ivc_hdr;
ivc_hdr->num_reqs = 1;
ivc_hdr->header_magic[0] = 'N';
ivc_hdr->header_magic[1] = 'V';
ivc_hdr->header_magic[2] = 'D';
ivc_hdr->header_magic[3] = 'A';
ivc_hdr->engine = g_crypto_to_ivc_map[node_id].engine_id;
priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag;
priv_data_ptr->priv_data = (unsigned int *)priv;
priv->cmd = VIRTUAL_SE_KEY_SLOT;
priv->se_dev = se_dev;
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_RELEASE_KEY;
memcpy(ivc_tx->keys.keyslot, key_slot->key_id, KEYSLOT_SIZE_BYTES);
ivc_tx->keys.token_id = key_slot->token_id;
ivc_tx->keys.key_instance = key_slot->key_instance_idx;
g_crypto_to_ivc_map[node_id].vse_thread_start = true;
init_completion(&priv->alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg,
sizeof(struct tegra_virtual_se_ivc_msg_t), node_id);
if (err) {
dev_err(se_dev->dev, "Failed to send IVC message: %d\n", err);
goto free_mem;
}
if (priv->rx_status) {
dev_err(se_dev->dev, "Key slot release failed with error: %d\n",
priv->rx_status);
err = -EINVAL;
}
free_mem:
devm_kfree(se_dev->dev, ivc_req_msg);
free_priv:
devm_kfree(se_dev->dev, priv);
return err;
}
EXPORT_SYMBOL(tegra_hv_vse_release_keyslot);
static int tegra_vse_validate_hmac_sha_params(struct tegra_virtual_se_hmac_sha_context *hmac_ctx, static int tegra_vse_validate_hmac_sha_params(struct tegra_virtual_se_hmac_sha_context *hmac_ctx,
bool is_last) bool is_last)
{ {
@@ -1619,18 +1917,13 @@ static int tegra_hv_vse_safety_hmac_sha_setkey(struct crypto_ahash *tfm, const u
se_dev = g_crypto_to_ivc_map[hmac_ctx->node_id].se_dev; se_dev = g_crypto_to_ivc_map[hmac_ctx->node_id].se_dev;
if (keylen != 32) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
return -EINVAL;
}
/* format: 'NVSEAES 1234567\0' */ /* format: 'NVSEAES 1234567\0' */
is_keyslot_label = sscanf(key, "%s", label) == 1 && is_keyslot_label = sscanf(key, "%s", label) == 1 &&
!strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL); !strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL);
if (is_keyslot_label) { if (is_keyslot_label) {
hmac_ctx->keylen = keylen; memcpy(hmac_ctx->key_slot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
memcpy(hmac_ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
hmac_ctx->is_key_slot_allocated = true; hmac_ctx->is_key_slot_allocated = true;
} else { } else {
dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key); dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key);
@@ -1771,7 +2064,9 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req,
phmac->msg_total_length[3] = 0; phmac->msg_total_length[3] = 0;
phmac->msg_left_length[2] = 0; phmac->msg_left_length[2] = 0;
phmac->msg_left_length[3] = 0; phmac->msg_left_length[3] = 0;
memcpy(phmac->keyslot, hmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(phmac->keyslot, hmac_ctx->key_slot, KEYSLOT_SIZE_BYTES);
if (se_dev->chipdata->allocate_key_slot_supported)
phmac->token_id = hmac_ctx->token_id;
phmac->src_addr = src->buf_iova; phmac->src_addr = src->buf_iova;
if (hmac_ctx->request_type == TEGRA_HV_VSE_HMAC_SHA_SIGN) if (hmac_ctx->request_type == TEGRA_HV_VSE_HMAC_SHA_SIGN)
@@ -2054,6 +2349,7 @@ static int tegra_hv_vse_safety_hmac_sha_finup(struct ahash_request *req)
dev_err(se_dev->dev, "tegra_se_hmac_sha_finup failed - %d\n", ret); dev_err(se_dev->dev, "tegra_se_hmac_sha_finup failed - %d\n", ret);
hmac_ctx->is_key_slot_allocated = false; hmac_ctx->is_key_slot_allocated = false;
req_ctx->req_context_initialized = false; req_ctx->req_context_initialized = false;
return ret; return ret;
@@ -2115,8 +2411,9 @@ static void tegra_hv_vse_safety_prepare_cmd(struct tegra_virtual_se_dev *se_dev,
else else
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_DECRYPT; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_DECRYPT;
memcpy(aes->op.keyslot, aes_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(aes->op.keyslot, aes_ctx->key_slot, KEYSLOT_SIZE_BYTES);
aes->op.key_length = aes_ctx->keylen; aes->op.release_keyslot = aes_ctx->release_key_flag;
aes->op.key_instance = aes_ctx->key_instance_idx;
aes->op.mode = req_ctx->op_mode; aes->op.mode = req_ctx->op_mode;
aes->op.ivsel = AES_ORIGINAL_IV; aes->op.ivsel = AES_ORIGINAL_IV;
memcpy(aes->op.lctr, aes_ctx->iv, memcpy(aes->op.lctr, aes_ctx->iv,
@@ -2146,8 +2443,8 @@ static int tegra_hv_vse_safety_aes_gen_random_iv(
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT_INIT; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT_INIT;
priv->cmd = VIRTUAL_SE_PROCESS; priv->cmd = VIRTUAL_SE_PROCESS;
memcpy(aes->op.keyslot, aes_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(aes->op.keyslot, aes_ctx->key_slot, KEYSLOT_SIZE_BYTES);
aes->op.key_length = aes_ctx->keylen; aes->op.key_instance = aes_ctx->key_instance_idx;
pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck;
init_completion(&priv->alg_complete); init_completion(&priv->alg_complete);
@@ -2550,7 +2847,7 @@ static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req,
ivc_tx->tsec[0U].dst_addr = mac->buf_iova; ivc_tx->tsec[0U].dst_addr = mac->buf_iova;
ivc_tx->tsec[0U].fw_status_addr = fw_status->buf_iova; ivc_tx->tsec[0U].fw_status_addr = fw_status->buf_iova;
ivc_tx->tsec[0U].src_buf_size = cmac_ctx->user_src_buf_size; ivc_tx->tsec[0U].src_buf_size = cmac_ctx->user_src_buf_size;
ivc_tx->tsec[0U].keyslot = *((uint64_t *)cmac_ctx->aes_keyslot); ivc_tx->tsec[0U].keyslot = *((uint64_t *)cmac_ctx->key_slot);
if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) { if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) {
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_TSEC_SIGN; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_TSEC_SIGN;
@@ -2710,8 +3007,8 @@ static int tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported(
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMAC_VERIFY; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMAC_VERIFY;
ivc_tx->aes.op_cmac_sv.mac_comp_res_addr = comp->buf_iova; ivc_tx->aes.op_cmac_sv.mac_comp_res_addr = comp->buf_iova;
} }
memcpy(ivc_tx->aes.op_cmac_sv.keyslot, cmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->aes.op_cmac_sv.keyslot, cmac_ctx->key_slot, KEYSLOT_SIZE_BYTES);
ivc_tx->aes.op_cmac_sv.key_length = cmac_ctx->keylen; ivc_tx->aes.op_cmac_sv.token_id = cmac_ctx->token_id;
ivc_tx->aes.op_cmac_sv.config = 0; ivc_tx->aes.op_cmac_sv.config = 0;
if (cmac_ctx->b_is_sm4 == 1U) if (cmac_ctx->b_is_sm4 == 1U)
@@ -2867,8 +3164,7 @@ static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req,
else else
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMAC_VERIFY; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMAC_VERIFY;
memcpy(ivc_tx->aes.op_cmac_sv.keyslot, cmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->aes.op_cmac_sv.keyslot, cmac_ctx->key_slot, KEYSLOT_SIZE_BYTES);
ivc_tx->aes.op_cmac_sv.key_length = cmac_ctx->keylen;
ivc_tx->aes.op_cmac_sv.src_buf_size = blocks_to_process * TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE; ivc_tx->aes.op_cmac_sv.src_buf_size = blocks_to_process * TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE;
ivc_tx->aes.op_cmac_sv.config = 0; ivc_tx->aes.op_cmac_sv.config = 0;
if (is_last == true) if (is_last == true)
@@ -3423,18 +3719,12 @@ static int tegra_hv_vse_safety_cmac_setkey(struct crypto_ahash *tfm, const u8 *k
se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if ((keylen != 16) && (keylen != 32)) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
return -EINVAL;
}
/* format: 'NVSEAES 1234567\0' */ /* format: 'NVSEAES 1234567\0' */
is_keyslot_label = sscanf(key, "%s", label) == 1 && is_keyslot_label = sscanf(key, "%s", label) == 1 &&
!strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL); !strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL);
if (is_keyslot_label) { if (is_keyslot_label) {
ctx->keylen = keylen; memcpy(ctx->key_slot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
ctx->is_key_slot_allocated = true; ctx->is_key_slot_allocated = true;
} else { } else {
dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key); dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key);
@@ -3471,18 +3761,12 @@ static int tegra_hv_vse_safety_aes_setkey(struct crypto_skcipher *tfm,
se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if ((keylen != 16) && (keylen != 32)) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
return -EINVAL;
}
/* format: 'NVSEAES 1234567\0' */ /* format: 'NVSEAES 1234567\0' */
is_keyslot_label = sscanf(key, "%s", label) == 1 && is_keyslot_label = sscanf(key, "%s", label) == 1 &&
!strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL); !strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL);
if (is_keyslot_label) { if (is_keyslot_label) {
ctx->keylen = keylen; memcpy(ctx->key_slot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
ctx->is_key_slot_allocated = true; ctx->is_key_slot_allocated = true;
} else { } else {
dev_err(se_dev->dev, "%s: Invalid keyslot label %s", __func__, key); dev_err(se_dev->dev, "%s: Invalid keyslot label %s", __func__, key);
@@ -3629,18 +3913,12 @@ static int tegra_vse_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if ((keylen != 16) && (keylen != 32)) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
return -EINVAL;
}
/* format: 'NVSEAES 1234567\0' */ /* format: 'NVSEAES 1234567\0' */
is_keyslot_label = sscanf(key, "%s", label) == 1 && is_keyslot_label = sscanf(key, "%s", label) == 1 &&
!strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL); !strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL);
if (is_keyslot_label) { if (is_keyslot_label) {
ctx->keylen = keylen; memcpy(ctx->key_slot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
ctx->is_key_slot_allocated = true; ctx->is_key_slot_allocated = true;
} else { } else {
dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key); dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key);
@@ -3813,8 +4091,7 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req,
g_crypto_to_ivc_map[aes_ctx->node_id].vse_thread_start = true; g_crypto_to_ivc_map[aes_ctx->node_id].vse_thread_start = true;
memcpy(ivc_tx->aes.op_gcm.keyslot, aes_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->aes.op_gcm.keyslot, aes_ctx->key_slot, KEYSLOT_SIZE_BYTES);
ivc_tx->aes.op_gcm.key_length = aes_ctx->keylen;
if (encrypt) { if (encrypt) {
/* /*
@@ -4036,8 +4313,8 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req,
g_crypto_to_ivc_map[aes_ctx->node_id].vse_thread_start = true; g_crypto_to_ivc_map[aes_ctx->node_id].vse_thread_start = true;
memcpy(ivc_tx->aes.op_gcm.keyslot, aes_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->aes.op_gcm.keyslot, aes_ctx->key_slot, KEYSLOT_SIZE_BYTES);
ivc_tx->aes.op_gcm.key_length = aes_ctx->keylen; ivc_tx->aes.op_gcm.token_id = aes_ctx->token_id;
if (encrypt) { if (encrypt) {
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GCM_CMD_ENCRYPT; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GCM_CMD_ENCRYPT;
@@ -4288,19 +4565,12 @@ static int tegra_hv_vse_aes_gmac_setkey(struct crypto_ahash *tfm, const u8 *key,
se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if ((keylen != 16) && (keylen != 32)) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
err = -EINVAL;
goto exit;
}
/* format: 'NVSEAES 1234567\0' */ /* format: 'NVSEAES 1234567\0' */
is_keyslot_label = sscanf(key, "%s", label) == 1 && is_keyslot_label = sscanf(key, "%s", label) == 1 &&
(!strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL)); (!strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL));
if (is_keyslot_label) { if (is_keyslot_label) {
ctx->keylen = keylen; memcpy(ctx->key_slot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
ctx->is_key_slot_allocated = true; ctx->is_key_slot_allocated = true;
} else { } else {
dev_err(se_dev->dev, dev_err(se_dev->dev,
@@ -4393,8 +4663,8 @@ static int tegra_hv_vse_aes_gmac_sv_init(struct ahash_request *req)
priv->se_dev = se_dev; priv->se_dev = se_dev;
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_INIT; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_INIT;
memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->key_slot, KEYSLOT_SIZE_BYTES);
ivc_tx->aes.op_gcm.key_length = gmac_ctx->keylen; ivc_tx->aes.op_gcm.key_instance = gmac_ctx->key_instance_idx;
g_crypto_to_ivc_map[gmac_ctx->node_id].vse_thread_start = true; g_crypto_to_ivc_map[gmac_ctx->node_id].vse_thread_start = true;
pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck; pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck;
@@ -4415,6 +4685,7 @@ static int tegra_hv_vse_aes_gmac_sv_init(struct ahash_request *req)
} }
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GMAC_IV; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GMAC_IV;
ivc_tx->aes.op_gcm.key_instance = gmac_ctx->key_instance_idx;
priv->cmd = VIRTUAL_SE_AES_GCM_ENC_PROCESS; priv->cmd = VIRTUAL_SE_AES_GCM_ENC_PROCESS;
init_completion(&priv->alg_complete); init_completion(&priv->alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg,
@@ -4519,8 +4790,7 @@ static int tegra_hv_vse_aes_gmac_sv_op(struct ahash_request *req,
else else
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_VERIFY; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_VERIFY;
memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->key_slot, KEYSLOT_SIZE_BYTES);
ivc_tx->aes.op_gcm.key_length = gmac_ctx->keylen;
ivc_tx->aes.op_gcm.aad_buf_size = gmac_ctx->user_aad_buf_size; ivc_tx->aes.op_gcm.aad_buf_size = gmac_ctx->user_aad_buf_size;
ivc_tx->aes.op_gcm.aad_addr = (u32)(aad->buf_iova & U32_MAX); ivc_tx->aes.op_gcm.aad_addr = (u32)(aad->buf_iova & U32_MAX);
@@ -4709,8 +4979,9 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req,
else else
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_VERIFY; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_VERIFY;
memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->key_slot, KEYSLOT_SIZE_BYTES);
ivc_tx->aes.op_gcm.key_length = gmac_ctx->keylen; ivc_tx->aes.op_gcm.key_instance = gmac_ctx->key_instance_idx;
ivc_tx->aes.op_gcm.release_keyslot = gmac_ctx->release_key_flag;
ivc_tx->aes.op_gcm.aad_buf_size = gmac_ctx->user_aad_buf_size; ivc_tx->aes.op_gcm.aad_buf_size = gmac_ctx->user_aad_buf_size;
ivc_tx->aes.op_gcm.aad_addr = aad_addr; ivc_tx->aes.op_gcm.aad_addr = aad_addr;
@@ -5289,6 +5560,7 @@ static const struct tegra_vse_soc_info t234_vse_sinfo = {
.gcm_hw_iv_supported = false, .gcm_hw_iv_supported = false,
.hmac_verify_hw_support = false, .hmac_verify_hw_support = false,
.zero_copy_supported = false, .zero_copy_supported = false,
.allocate_key_slot_supported = false,
}; };
static const struct tegra_vse_soc_info se_51_vse_sinfo = { static const struct tegra_vse_soc_info se_51_vse_sinfo = {
@@ -5297,6 +5569,7 @@ static const struct tegra_vse_soc_info se_51_vse_sinfo = {
.gcm_hw_iv_supported = true, .gcm_hw_iv_supported = true,
.hmac_verify_hw_support = true, .hmac_verify_hw_support = true,
.zero_copy_supported = true, .zero_copy_supported = true,
.allocate_key_slot_supported = true,
}; };
static const struct of_device_id tegra_hv_vse_safety_of_match[] = { static const struct of_device_id tegra_hv_vse_safety_of_match[] = {

View File

@@ -19,6 +19,7 @@ struct tegra_vse_soc_info {
bool gcm_hw_iv_supported; bool gcm_hw_iv_supported;
bool hmac_verify_hw_support; bool hmac_verify_hw_support;
bool zero_copy_supported; bool zero_copy_supported;
bool allocate_key_slot_supported;
}; };
/* GCM Operation Supported Flag */ /* GCM Operation Supported Flag */
@@ -45,6 +46,13 @@ struct tegra_vse_membuf_ctx {
struct dma_buf_attachment *attach; struct dma_buf_attachment *attach;
}; };
struct tegra_vse_key_slot_ctx {
uint8_t key_id[KEYSLOT_SIZE_BYTES];
uint8_t token_id;
uint32_t key_instance_idx;
uint32_t key_grp_id;
};
struct tegra_vse_node_dma { struct tegra_vse_node_dma {
struct device *se_dev; struct device *se_dev;
struct device *gpcdma_dev; struct device *gpcdma_dev;
@@ -118,10 +126,15 @@ struct tegra_virtual_se_aes_context {
/* Security Engine device */ /* Security Engine device */
struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_dev *se_dev;
struct skcipher_request *req; struct skcipher_request *req;
/* Security Engine key slot */ /** [in] Holds the key id */
u8 aes_keyslot[KEYSLOT_SIZE_BYTES]; uint8_t key_slot[KEYSLOT_SIZE_BYTES];
/* key length in bytes */ /** [in] Holds the token id */
u32 keylen; uint8_t token_id;
/** [inout] Holds the Key instance index */
uint32_t key_instance_idx;
/** [in] Holds the release key flag */
uint32_t release_key_flag;
/* AES operation mode */ /* AES operation mode */
u32 op_mode; u32 op_mode;
/* Is key slot */ /* Is key slot */
@@ -154,9 +167,10 @@ struct tegra_virtual_se_aes_cmac_context {
unsigned int digest_size; unsigned int digest_size;
bool is_first; /* Represents first block */ bool is_first; /* Represents first block */
bool req_context_initialized; /* Mark initialization status */ bool req_context_initialized; /* Mark initialization status */
u8 aes_keyslot[KEYSLOT_SIZE_BYTES]; /** [in] Holds the key id */
/* key length in bits */ uint8_t key_slot[KEYSLOT_SIZE_BYTES];
u32 keylen; /** [in] Holds the token id */
uint8_t token_id;
bool is_key_slot_allocated; bool is_key_slot_allocated;
/*Crypto dev instance*/ /*Crypto dev instance*/
uint32_t node_id; uint32_t node_id;
@@ -182,9 +196,13 @@ struct tegra_virtual_se_aes_gmac_context {
u32 authsize; u32 authsize;
/* Mark initialization status */ /* Mark initialization status */
bool req_context_initialized; bool req_context_initialized;
u8 aes_keyslot[KEYSLOT_SIZE_BYTES]; /** [in] Holds the key id */
/* key length in bits */ uint8_t key_slot[KEYSLOT_SIZE_BYTES];
u32 keylen; /** [inout] Holds the Key instance index */
uint32_t key_instance_idx;
/** [in] Holds the release key flag */
uint32_t release_key_flag;
/* Flag to indicate if key slot is allocated*/
bool is_key_slot_allocated; bool is_key_slot_allocated;
/*Crypto dev instance*/ /*Crypto dev instance*/
uint32_t node_id; uint32_t node_id;
@@ -240,10 +258,10 @@ struct tegra_virtual_se_hmac_sha_context {
/* Represents first block */ /* Represents first block */
bool is_first; bool is_first;
bool is_key_slot_allocated; bool is_key_slot_allocated;
/* Keyslot for HMAC-SHA request */ /** [in] Holds the key id */
u8 aes_keyslot[KEYSLOT_SIZE_BYTES]; uint8_t key_slot[KEYSLOT_SIZE_BYTES];
/* key length in bits */ /** [in] Holds the token id */
u32 keylen; uint8_t token_id;
/*Crypto dev instance*/ /*Crypto dev instance*/
uint32_t node_id; uint32_t node_id;
uint8_t *user_src_buf; uint8_t *user_src_buf;
@@ -284,4 +302,10 @@ int tegra_hv_vse_safety_unmap_membuf(struct tegra_virtual_se_membuf_context *ctx
/* API to Unmap all memory buffers corresponding to a node id */ /* API to Unmap all memory buffers corresponding to a node id */
void tegra_hv_vse_safety_unmap_all_membufs(uint32_t node_id); void tegra_hv_vse_safety_unmap_all_membufs(uint32_t node_id);
int tegra_hv_vse_allocate_keyslot(struct tegra_vse_key_slot_ctx *key_slot_params, uint32_t node_id);
int tegra_hv_vse_release_keyslot(struct tegra_vse_key_slot_ctx *key_slot_params, uint32_t node_id);
int tegra_hv_vse_close_keyslot(uint32_t node_id, uint32_t key_grp_id);
#endif /*__TEGRA_HV_VSE_H*/ #endif /*__TEGRA_HV_VSE_H*/

View File

@@ -124,6 +124,8 @@ struct tnvvse_crypto_ctx {
char *sha_result; char *sha_result;
uint32_t node_id; uint32_t node_id;
bool is_zero_copy_node; bool is_zero_copy_node;
uint32_t allocated_key_slot_count;
uint32_t key_grp_id;
}; };
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) #if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
@@ -193,6 +195,71 @@ fail:
return status; return status;
} }
static int tnvvse_crypto_allocate_key_slot(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_allocate_key_slot_ctl *key_slot_allocate_ctl)
{
struct tegra_vse_key_slot_ctx key_slot_params;
int err = 0;
if (!ctx || !key_slot_allocate_ctl) {
CRYPTODEV_ERR("%s: Invalid parameters\n", __func__);
return -EINVAL;
}
ctx->key_grp_id = current->pid;
memset(&key_slot_params, 0, sizeof(key_slot_params));
memcpy(key_slot_params.key_id, key_slot_allocate_ctl->key_id, KEYSLOT_SIZE_BYTES);
key_slot_params.token_id = key_slot_allocate_ctl->token_id;
key_slot_params.key_grp_id = ctx->key_grp_id;
err = tegra_hv_vse_allocate_keyslot(&key_slot_params, ctx->node_id);
if (err) {
CRYPTODEV_ERR("%s: Failed to allocate key slot, error: %d\n",
__func__, err);
return err;
}
ctx->allocated_key_slot_count += 1U;
key_slot_allocate_ctl->key_instance_idx = key_slot_params.key_instance_idx;
return 0;
}
static int tnvvse_crypto_release_key_slot(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_release_key_slot_ctl *key_slot_release_ctl)
{
int err = 0;
struct tegra_vse_key_slot_ctx vse_key_slot;
if (!ctx) {
CRYPTODEV_ERR("%s: Invalid context\n", __func__);
return -EINVAL;
}
if (!key_slot_release_ctl) {
CRYPTODEV_ERR("Key slot release ctl is NULL\n");
return -EINVAL;
}
if (ctx->allocated_key_slot_count == 0) {
CRYPTODEV_ERR("No key slots allocated to release\n");
return -EINVAL;
}
memset(&vse_key_slot, 0, sizeof(vse_key_slot));
memcpy(vse_key_slot.key_id, key_slot_release_ctl->key_id, sizeof(vse_key_slot.key_id));
vse_key_slot.key_instance_idx = key_slot_release_ctl->key_instance_idx;
err = tegra_hv_vse_release_keyslot(&vse_key_slot, ctx->node_id);
if (err) {
CRYPTODEV_ERR("Failed to release key slot: %d\n", err);
return err;
}
ctx->allocated_key_slot_count -= 1U;
return 0;
}
static int tnvvse_crypto_validate_sha_update_req(struct tnvvse_crypto_ctx *ctx, static int tnvvse_crypto_validate_sha_update_req(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_sha_update_ctl *sha_update_ctl) struct tegra_nvvse_sha_update_ctl *sha_update_ctl)
{ {
@@ -514,8 +581,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
(void)snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); (void)snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES ");
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, hmac_sha_ctl->key_slot, memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, hmac_sha_ctl->key_slot,
KEYSLOT_SIZE_BYTES); KEYSLOT_SIZE_BYTES);
ret = crypto_ahash_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES);
ret = crypto_ahash_setkey(tfm, key_as_keyslot, hmac_sha_ctl->key_length);
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to set keys for hmac: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to set keys for hmac: %d\n", __func__, ret);
goto free_tfm; goto free_tfm;
@@ -535,6 +601,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
sha_state->hmac_sha_total_msg_length += hmac_sha_ctl->data_length; sha_state->hmac_sha_total_msg_length += hmac_sha_ctl->data_length;
sha_state->hmac_sha_init_done = 1; sha_state->hmac_sha_init_done = 1;
hmac_ctx->total_count = sha_state->hmac_sha_total_msg_length; hmac_ctx->total_count = sha_state->hmac_sha_total_msg_length;
hmac_ctx->token_id = hmac_sha_ctl->token_id;
if (hmac_sha_ctl->is_first == 1) if (hmac_sha_ctl->is_first == 1)
hmac_ctx->is_first = true; hmac_ctx->is_first = true;
@@ -656,7 +723,7 @@ static int tnvvtsec_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_cmac_ctl->key_slot, KEYSLOT_SIZE_BYTES); memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_cmac_ctl->key_slot, KEYSLOT_SIZE_BYTES);
cmac_ctx->result = 0; cmac_ctx->result = 0;
ret = crypto_ahash_setkey(tfm, key_as_keyslot, aes_cmac_ctl->key_length); ret = crypto_ahash_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES);
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to set keys for cmac-tsec(aes): %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to set keys for cmac-tsec(aes): %d\n", __func__, ret);
ret = -EINVAL; ret = -EINVAL;
@@ -762,9 +829,9 @@ static int tnvvse_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES ");
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_cmac_ctl->key_slot, KEYSLOT_SIZE_BYTES); memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_cmac_ctl->key_slot, KEYSLOT_SIZE_BYTES);
cmac_ctx->token_id = aes_cmac_ctl->token_id;
cmac_ctx->result = 0; cmac_ctx->result = 0;
ret = crypto_ahash_setkey(tfm, key_as_keyslot, aes_cmac_ctl->key_length); ret = crypto_ahash_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES);
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to set keys for cmac-vse(aes): %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to set keys for cmac-vse(aes): %d\n", __func__, ret);
goto free_req; goto free_req;
@@ -807,7 +874,7 @@ static int tnvvse_crypto_aes_gmac_init(struct tnvvse_crypto_ctx *ctx,
struct ahash_request *req; struct ahash_request *req;
const char *driver_name; const char *driver_name;
uint8_t iv[TEGRA_NVVSE_AES_GCM_IV_LEN]; uint8_t iv[TEGRA_NVVSE_AES_GCM_IV_LEN];
int ret = -ENOMEM, klen; int ret = -ENOMEM;
tfm = crypto_alloc_ahash("gmac-vse(aes)", 0, 0); tfm = crypto_alloc_ahash("gmac-vse(aes)", 0, 0);
if (IS_ERR(tfm)) { if (IS_ERR(tfm)) {
@@ -842,8 +909,7 @@ static int tnvvse_crypto_aes_gmac_init(struct tnvvse_crypto_ctx *ctx,
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES ");
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, gmac_init_ctl->key_slot, KEYSLOT_SIZE_BYTES); memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, gmac_init_ctl->key_slot, KEYSLOT_SIZE_BYTES);
klen = gmac_init_ctl->key_length; ret = crypto_ahash_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES);
ret = crypto_ahash_setkey(tfm, key_as_keyslot, klen);
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to set keys for gmac-vse(aes): %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to set keys for gmac-vse(aes): %d\n", __func__, ret);
goto free_req; goto free_req;
@@ -851,6 +917,7 @@ static int tnvvse_crypto_aes_gmac_init(struct tnvvse_crypto_ctx *ctx,
memset(iv, 0, TEGRA_NVVSE_AES_GCM_IV_LEN); memset(iv, 0, TEGRA_NVVSE_AES_GCM_IV_LEN);
gmac_ctx->request_type = TEGRA_HV_VSE_GMAC_INIT; gmac_ctx->request_type = TEGRA_HV_VSE_GMAC_INIT;
gmac_ctx->key_instance_idx = gmac_init_ctl->key_instance_idx;
gmac_ctx->iv = iv; gmac_ctx->iv = iv;
ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_init(req)); ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_init(req));
@@ -878,7 +945,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx
char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,}; char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,};
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
const char *driver_name; const char *driver_name;
int ret = -EINVAL, klen; int ret = -EINVAL;
if (!req) { if (!req) {
CRYPTODEV_ERR("%s AES-GMAC request not valid\n", __func__); CRYPTODEV_ERR("%s AES-GMAC request not valid\n", __func__);
@@ -894,6 +961,8 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx
gmac_ctx = crypto_ahash_ctx(tfm); gmac_ctx = crypto_ahash_ctx(tfm);
gmac_ctx->node_id = ctx->node_id; gmac_ctx->node_id = ctx->node_id;
gmac_ctx->b_is_sm4 = gmac_sign_verify_ctl->b_is_sm4; gmac_ctx->b_is_sm4 = gmac_sign_verify_ctl->b_is_sm4;
gmac_ctx->release_key_flag = gmac_sign_verify_ctl->release_key_flag;
gmac_ctx->key_instance_idx = gmac_sign_verify_ctl->key_instance_idx;
driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
if (driver_name == NULL) { if (driver_name == NULL) {
@@ -911,9 +980,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES ");
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, gmac_sign_verify_ctl->key_slot, memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, gmac_sign_verify_ctl->key_slot,
KEYSLOT_SIZE_BYTES); KEYSLOT_SIZE_BYTES);
ret = crypto_ahash_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES);
klen = gmac_sign_verify_ctl->key_length;
ret = crypto_ahash_setkey(tfm, key_as_keyslot, klen);
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to set keys for gmac-vse(aes): %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to set keys for gmac-vse(aes): %d\n", __func__, ret);
goto out; goto out;
@@ -1007,6 +1074,9 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
goto free_req; goto free_req;
} }
gmac_ctx->release_key_flag = gmac_sign_verify_ctl->release_key_flag;
gmac_ctx->key_instance_idx = gmac_sign_verify_ctl->key_instance_idx;
if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_SIGN) if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_SIGN)
gmac_ctx->request_type = TEGRA_HV_VSE_GMAC_SIGN; gmac_ctx->request_type = TEGRA_HV_VSE_GMAC_SIGN;
else else
@@ -1044,6 +1114,9 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
} }
} }
if (gmac_sign_verify_ctl->release_key_flag)
ctx->allocated_key_slot_count -= 1;
if (gmac_sign_verify_ctl->is_last) { if (gmac_sign_verify_ctl->is_last) {
if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_VERIFY) if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_VERIFY)
gmac_sign_verify_ctl->result = gmac_ctx->result; gmac_sign_verify_ctl->result = gmac_ctx->result;
@@ -1135,6 +1208,8 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
aes_ctx = crypto_skcipher_ctx(tfm); aes_ctx = crypto_skcipher_ctx(tfm);
aes_ctx->node_id = ctx->node_id; aes_ctx->node_id = ctx->node_id;
aes_ctx->user_nonce = aes_enc_dec_ctl->user_nonce; aes_ctx->user_nonce = aes_enc_dec_ctl->user_nonce;
aes_ctx->release_key_flag = aes_enc_dec_ctl->release_key_flag;
if (aes_enc_dec_ctl->is_non_first_call != 0U) if (aes_enc_dec_ctl->is_non_first_call != 0U)
aes_ctx->b_is_first = 0U; aes_ctx->b_is_first = 0U;
else { else {
@@ -1158,16 +1233,6 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
pr_debug("%s(): The skcipher driver name is %s for %s\n", pr_debug("%s(): The skcipher driver name is %s for %s\n",
__func__, driver_name, aes_algo[aes_enc_dec_ctl->aes_mode]); __func__, driver_name, aes_algo[aes_enc_dec_ctl->aes_mode]);
if (((aes_enc_dec_ctl->key_length & CRYPTO_KEY_LEN_MASK) != TEGRA_CRYPTO_KEY_128_SIZE) &&
((aes_enc_dec_ctl->key_length & CRYPTO_KEY_LEN_MASK) != TEGRA_CRYPTO_KEY_192_SIZE) &&
((aes_enc_dec_ctl->key_length & CRYPTO_KEY_LEN_MASK) != TEGRA_CRYPTO_KEY_256_SIZE) &&
((aes_enc_dec_ctl->key_length & CRYPTO_KEY_LEN_MASK) != TEGRA_CRYPTO_KEY_512_SIZE)) {
ret = -EINVAL;
CRYPTODEV_ERR("%s(): crypt_req keylen(%d) invalid",
__func__, aes_enc_dec_ctl->key_length);
goto free_req;
}
crypto_skcipher_clear_flags(tfm, ~0); crypto_skcipher_clear_flags(tfm, ~0);
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES ");
@@ -1181,14 +1246,14 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
goto free_req; goto free_req;
} }
ret = crypto_skcipher_setkey(tfm, key_as_keyslot, aes_enc_dec_ctl->key_length); ret = crypto_skcipher_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES);
if (ret < 0) { if (ret < 0) {
CRYPTODEV_ERR("%s(): Failed to set key: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to set key: %d\n", __func__, ret);
goto free_req; goto free_req;
} }
aes_ctx->user_src_buf_size = aes_enc_dec_ctl->data_length; aes_ctx->user_src_buf_size = aes_enc_dec_ctl->data_length;
aes_ctx->key_instance_idx = aes_enc_dec_ctl->key_instance_idx;
init_completion(&tcrypt_complete.restart); init_completion(&tcrypt_complete.restart);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
@@ -1284,6 +1349,9 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
} }
} }
if (aes_enc_dec_ctl->release_key_flag)
ctx->allocated_key_slot_count -= 1;
free_req: free_req:
skcipher_request_free(req); skcipher_request_free(req);
@@ -1349,15 +1417,6 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
pr_debug("%s(): The aead driver name is %s for gcm-vse(aes)\n", pr_debug("%s(): The aead driver name is %s for gcm-vse(aes)\n",
__func__, driver_name); __func__, driver_name);
if ((aes_enc_dec_ctl->key_length != TEGRA_CRYPTO_KEY_128_SIZE) &&
(aes_enc_dec_ctl->key_length != TEGRA_CRYPTO_KEY_192_SIZE) &&
(aes_enc_dec_ctl->key_length != TEGRA_CRYPTO_KEY_256_SIZE)) {
ret = -EINVAL;
CRYPTODEV_ERR("%s(): crypt_req keylen(%d) invalid",
__func__, aes_enc_dec_ctl->key_length);
goto free_req;
}
if (aes_enc_dec_ctl->tag_length != TEGRA_NVVSE_AES_GCM_TAG_SIZE) { if (aes_enc_dec_ctl->tag_length != TEGRA_NVVSE_AES_GCM_TAG_SIZE) {
ret = -EINVAL; ret = -EINVAL;
CRYPTODEV_ERR("%s(): crypt_req taglen(%d) invalid", CRYPTODEV_ERR("%s(): crypt_req taglen(%d) invalid",
@@ -1371,7 +1430,7 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_enc_dec_ctl->key_slot, memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_enc_dec_ctl->key_slot,
KEYSLOT_SIZE_BYTES); KEYSLOT_SIZE_BYTES);
ret = crypto_aead_setkey(tfm, key_as_keyslot, aes_enc_dec_ctl->key_length); ret = crypto_aead_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES);
if (ret < 0) { if (ret < 0) {
CRYPTODEV_ERR("%s(): Failed to set key: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to set key: %d\n", __func__, ret);
goto free_req; goto free_req;
@@ -1410,7 +1469,7 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
aes_ctx->user_src_buf = aes_enc_dec_ctl->src_buffer; aes_ctx->user_src_buf = aes_enc_dec_ctl->src_buffer;
aes_ctx->user_tag_buf = aes_enc_dec_ctl->tag_buffer; aes_ctx->user_tag_buf = aes_enc_dec_ctl->tag_buffer;
aes_ctx->user_dst_buf = aes_enc_dec_ctl->dest_buffer; aes_ctx->user_dst_buf = aes_enc_dec_ctl->dest_buffer;
aes_ctx->token_id = aes_enc_dec_ctl->token_id;
/* this field is unused by VSE driver and is being set only to pass the validation /* this field is unused by VSE driver and is being set only to pass the validation
* check in crypto_aead_decrypt. * check in crypto_aead_decrypt.
*/ */
@@ -1590,6 +1649,7 @@ static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp)
return -ENOMEM; return -ENOMEM;
} }
ctx->node_id = node_id; ctx->node_id = node_id;
ctx->allocated_key_slot_count = 0;
ctx->is_zero_copy_node = is_zero_copy_node; ctx->is_zero_copy_node = is_zero_copy_node;
ctx->rng_buff = kzalloc(NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED, GFP_KERNEL); ctx->rng_buff = kzalloc(NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED, GFP_KERNEL);
@@ -1638,6 +1698,9 @@ static int tnvvse_crypto_dev_release(struct inode *inode, struct file *filp)
nvvse_devnode[ctx->node_id].node_in_use = false; nvvse_devnode[ctx->node_id].node_in_use = false;
} }
if (ctx->allocated_key_slot_count > 0)
tegra_hv_vse_close_keyslot(ctx->node_id, ctx->key_grp_id);
kfree(ctx->sha_result); kfree(ctx->sha_result);
kfree(ctx->rng_buff); kfree(ctx->rng_buff);
kfree(ctx->sha_state.sha_intermediate_digest); kfree(ctx->sha_state.sha_intermediate_digest);
@@ -1669,6 +1732,10 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
struct tegra_nvvse_map_membuf_ctl *map_membuf_ctl; struct tegra_nvvse_map_membuf_ctl *map_membuf_ctl;
struct tegra_nvvse_unmap_membuf_ctl __user *arg_unmap_membuf_ctl; struct tegra_nvvse_unmap_membuf_ctl __user *arg_unmap_membuf_ctl;
struct tegra_nvvse_unmap_membuf_ctl *unmap_membuf_ctl; struct tegra_nvvse_unmap_membuf_ctl *unmap_membuf_ctl;
struct tegra_nvvse_allocate_key_slot_ctl __user *arg_key_slot_allocate_ctl;
struct tegra_nvvse_allocate_key_slot_ctl *key_slot_allocate_ctl;
struct tegra_nvvse_release_key_slot_ctl __user *arg_key_slot_release_ctl;
struct tegra_nvvse_release_key_slot_ctl *key_slot_release_ctl;
int ret = 0; int ret = 0;
/* /*
@@ -1689,6 +1756,8 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
case NVVSE_IOCTL_CMDID_AES_GMAC_SIGN_VERIFY: case NVVSE_IOCTL_CMDID_AES_GMAC_SIGN_VERIFY:
case NVVSE_IOCTL_CMDID_MAP_MEMBUF: case NVVSE_IOCTL_CMDID_MAP_MEMBUF:
case NVVSE_IOCTL_CMDID_UNMAP_MEMBUF: case NVVSE_IOCTL_CMDID_UNMAP_MEMBUF:
case NVVSE_IOCTL_CMDID_ALLOCATE_KEY_SLOT:
case NVVSE_IOCTL_CMDID_RELEASE_KEY_SLOT:
break; break;
default: default:
CRYPTODEV_ERR("%s(): unsupported zero copy node command(%08x)\n", __func__, CRYPTODEV_ERR("%s(): unsupported zero copy node command(%08x)\n", __func__,
@@ -2079,8 +2148,84 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
kfree(unmap_membuf_ctl); kfree(unmap_membuf_ctl);
break; break;
case NVVSE_IOCTL_CMDID_ALLOCATE_KEY_SLOT:
key_slot_allocate_ctl = kzalloc(sizeof(*key_slot_allocate_ctl), GFP_KERNEL);
if (!key_slot_allocate_ctl) {
CRYPTODEV_ERR("%s(): failed to allocate memory\n", __func__);
ret = -ENOMEM;
goto release_lock;
}
arg_key_slot_allocate_ctl = (void __user *)arg;
ret = copy_from_user(key_slot_allocate_ctl, arg_key_slot_allocate_ctl,
sizeof(*key_slot_allocate_ctl));
if (ret) {
CRYPTODEV_ERR("%s(): Failed to copy_from_user key_slot_allocate_ctl:%d\n",
__func__, ret);
kfree(key_slot_allocate_ctl);
goto release_lock;
}
ret = tnvvse_crypto_allocate_key_slot(ctx, key_slot_allocate_ctl);
if (ret) {
CRYPTODEV_ERR("%s(): Failed to allocate key slot:%d\n", __func__, ret);
kfree(key_slot_allocate_ctl);
goto release_lock;
}
ret = copy_to_user(arg_key_slot_allocate_ctl, key_slot_allocate_ctl,
sizeof(*key_slot_allocate_ctl));
if (ret) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user key_slot_allocate_ctl:%d\n",
__func__, ret);
kfree(key_slot_allocate_ctl);
goto release_lock;
}
kfree(key_slot_allocate_ctl);
break;
case NVVSE_IOCTL_CMDID_RELEASE_KEY_SLOT:
key_slot_release_ctl = kzalloc(sizeof(*key_slot_release_ctl), GFP_KERNEL);
if (!key_slot_release_ctl) {
CRYPTODEV_ERR("%s(): failed to allocate memory\n", __func__);
ret = -ENOMEM;
goto release_lock;
}
arg_key_slot_release_ctl = (void __user *)arg;
ret = copy_from_user(key_slot_release_ctl, arg_key_slot_release_ctl,
sizeof(*key_slot_release_ctl));
if (ret) {
CRYPTODEV_ERR("%s(): Failed to copy_from_user key_slot_release_ctl:%d\n",
__func__, ret);
kfree(key_slot_release_ctl);
goto release_lock;
}
ret = tnvvse_crypto_release_key_slot(ctx, key_slot_release_ctl);
if (ret) {
CRYPTODEV_ERR("%s(): Failed to release key slot:%d\n", __func__, ret);
kfree(key_slot_release_ctl);
goto release_lock;
}
ret = copy_to_user(arg_key_slot_release_ctl, key_slot_release_ctl,
sizeof(*key_slot_release_ctl));
if (ret) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user key_slot_release_ctl:%d\n",
__func__, ret);
kfree(key_slot_release_ctl);
goto release_lock;
}
kfree(key_slot_release_ctl);
break;
default: default:
CRYPTODEV_ERR("%s(): invalid ioctl code(%d[0x%08x])", __func__, ioctl_num, ioctl_num); CRYPTODEV_ERR("%s(): invalid ioctl code(%d[0x%08x])",
__func__, ioctl_num, ioctl_num);
ret = -EINVAL; ret = -EINVAL;
break; break;
} }

View File

@@ -32,6 +32,8 @@
#define TEGRA_NVVSE_CMDID_HMAC_SHA_SIGN_VERIFY 15 #define TEGRA_NVVSE_CMDID_HMAC_SHA_SIGN_VERIFY 15
#define TEGRA_NVVSE_CMDID_MAP_MEMBUF 17 #define TEGRA_NVVSE_CMDID_MAP_MEMBUF 17
#define TEGRA_NVVSE_CMDID_UNMAP_MEMBUF 18 #define TEGRA_NVVSE_CMDID_UNMAP_MEMBUF 18
#define TEGRA_NVVSE_CMDID_ALLOCATE_KEY_SLOT 19
#define TEGRA_NVVSE_CMDID_RELEASE_KEY_SLOT 20
/** Defines the length of the AES-CBC Initial Vector */ /** Defines the length of the AES-CBC Initial Vector */
#define TEGRA_NVVSE_AES_IV_LEN 16U #define TEGRA_NVVSE_AES_IV_LEN 16U
@@ -122,6 +124,28 @@ enum tegra_nvvse_cmac_type {
TEGRA_NVVSE_AES_CMAC_VERIFY, TEGRA_NVVSE_AES_CMAC_VERIFY,
}; };
struct tegra_nvvse_allocate_key_slot_ctl {
/** [in] Holds the key id */
uint8_t key_id[KEYSLOT_SIZE_BYTES];
/** [in] Holds the token id */
uint8_t token_id;
/** [out] Holds the Key instance index */
uint32_t key_instance_idx;
};
#define NVVSE_IOCTL_CMDID_ALLOCATE_KEY_SLOT _IOWR(TEGRA_NVVSE_IOC_MAGIC, \
TEGRA_NVVSE_CMDID_ALLOCATE_KEY_SLOT, \
struct tegra_nvvse_allocate_key_slot_ctl)
struct tegra_nvvse_release_key_slot_ctl {
/** [in] Holds the key id */
uint8_t key_id[KEYSLOT_SIZE_BYTES];
/** [in] Holds the Key instance index */
uint32_t key_instance_idx;
};
#define NVVSE_IOCTL_CMDID_RELEASE_KEY_SLOT _IOWR(TEGRA_NVVSE_IOC_MAGIC, \
TEGRA_NVVSE_CMDID_RELEASE_KEY_SLOT, \
struct tegra_nvvse_release_key_slot_ctl)
/** /**
* \brief Holds SHA Update Header Params * \brief Holds SHA Update Header Params
*/ */
@@ -176,10 +200,8 @@ struct tegra_nvvse_hmac_sha_sv_ctl {
uint8_t is_last; uint8_t is_last;
/** [in] Holds a keyslot handle which is used for HMAC-SHA operation */ /** [in] Holds a keyslot handle which is used for HMAC-SHA operation */
uint8_t key_slot[KEYSLOT_SIZE_BYTES]; uint8_t key_slot[KEYSLOT_SIZE_BYTES];
/** [in] Holds the Key length /** [in] Holds the token id */
* Supported keylength is only 16 bytes and 32 bytes uint8_t token_id;
*/
uint8_t key_length;
/** [in] Holds a pointer to the input source buffer for which /** [in] Holds a pointer to the input source buffer for which
* HMAC-SHA is to be calculated/verified. * HMAC-SHA is to be calculated/verified.
*/ */
@@ -219,11 +241,15 @@ struct tegra_nvvse_aes_enc_dec_ctl {
uint8_t is_non_first_call; uint8_t is_non_first_call;
/** [in] Holds a keyslot number */ /** [in] Holds a keyslot number */
uint8_t key_slot[KEYSLOT_SIZE_BYTES]; uint8_t key_slot[KEYSLOT_SIZE_BYTES];
/** [in] Holds the Key length */ /** [in] Holds the token id */
/** Supported keylengths are 16 and 32 bytes */ uint8_t token_id;
uint8_t key_length; /** [inout] Holds the Key instance index
/** [in] Holds whether key configuration is required or not, 0 means do key configuration */ * This field is programmed by SE server and returned by NVVSE RM to client during
uint8_t skip_key; * key slot allocation call (NvVseAllocateKeySlot()).
*/
uint32_t key_instance_idx;
/** [in] Holds the release key flag */
uint32_t release_key_flag;
/** [in] Holds an AES Mode */ /** [in] Holds an AES Mode */
enum tegra_nvvse_aes_mode aes_mode; enum tegra_nvvse_aes_mode aes_mode;
/** [in] Holds a Boolean that specifies nonce is passed by user or not. /** [in] Holds a Boolean that specifies nonce is passed by user or not.
@@ -295,9 +321,8 @@ struct tegra_nvvse_aes_enc_dec_ctl {
struct tegra_nvvse_aes_gmac_init_ctl { struct tegra_nvvse_aes_gmac_init_ctl {
/** [in] Holds a keyslot number */ /** [in] Holds a keyslot number */
uint8_t key_slot[KEYSLOT_SIZE_BYTES]; uint8_t key_slot[KEYSLOT_SIZE_BYTES];
/** [in] Holds the Key length */ /** [in] Holds the key instance index */
/** Supported keylengths are 16 and 32 bytes */ uint32_t key_instance_idx;
uint8_t key_length;
/** [out] Initial Vector (IV) used for GMAC Sign and Verify */ /** [out] Initial Vector (IV) used for GMAC Sign and Verify */
uint8_t IV[TEGRA_NVVSE_AES_GCM_IV_LEN]; uint8_t IV[TEGRA_NVVSE_AES_GCM_IV_LEN];
}; };
@@ -325,10 +350,10 @@ struct tegra_nvvse_aes_gmac_sign_verify_ctl {
uint8_t is_last; uint8_t is_last;
/** [in] Holds a keyslot handle which is used for GMAC operation */ /** [in] Holds a keyslot handle which is used for GMAC operation */
uint8_t key_slot[KEYSLOT_SIZE_BYTES]; uint8_t key_slot[KEYSLOT_SIZE_BYTES];
/** [in] Holds the Key length /** [in] Holds the key instance index */
* Supported keylength is only 16 bytes and 32 bytes uint32_t key_instance_idx;
*/ /** [in] Holds the release key flag */
uint8_t key_length; uint32_t release_key_flag;
/** [in] Holds the Length of the input source buffer. /** [in] Holds the Length of the input source buffer.
* data_length shall not be "0" supported for single part sign and verify * data_length shall not be "0" supported for single part sign and verify
* data_length shall be multiple of 16 bytes if it is not the last chunk * data_length shall be multiple of 16 bytes if it is not the last chunk
@@ -407,10 +432,8 @@ struct tegra_nvvse_aes_cmac_sign_verify_ctl {
uint8_t is_last; uint8_t is_last;
/** [in] Holds a keyslot handle which is used for CMAC operation */ /** [in] Holds a keyslot handle which is used for CMAC operation */
uint8_t key_slot[KEYSLOT_SIZE_BYTES]; uint8_t key_slot[KEYSLOT_SIZE_BYTES];
/** [in] Holds the Key length /** [in] Holds the token id */
* Supported keylength is only 16 bytes and 32 bytes uint8_t token_id;
*/
uint8_t key_length;
/** [in] Holds the Length of the input source buffer. /** [in] Holds the Length of the input source buffer.
* data_length shall not be "0" supported for single part sign and verify * data_length shall not be "0" supported for single part sign and verify
* data_length shall be multiple of 16 bytes if it is not the last chunk * data_length shall be multiple of 16 bytes if it is not the last chunk