vse: Static violation fix driver

Bug 5225204

JIRA ESSS-1846

Change-Id: I25268475765572b0cce18b78b7eda436e1c55d56
Signed-off-by: Khushi <khushi@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3339815
Reviewed-by: svcacv <svcacv@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Sandeep Trasi <strasi@nvidia.com>
Reviewed-by: Leo Chiu <lchiu@nvidia.com>
This commit is contained in:
Khushi
2025-04-14 06:22:31 +00:00
committed by Jon Hunter
parent bb2c8b1fc4
commit ae4f81c13d
3 changed files with 342 additions and 160 deletions

View File

@@ -258,7 +258,7 @@
#define TEGRA_VIRTUAL_SE_TIMEOUT_1S 1000000 #define TEGRA_VIRTUAL_SE_TIMEOUT_1S 1000000
#define TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE 16U
#define TEGRA_VIRTUAL_SE_AES_CMAC_STATE_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_CMAC_STATE_SIZE 16
@@ -321,6 +321,7 @@
#define SHA_HASH_BUF_SIZE 1024U #define SHA_HASH_BUF_SIZE 1024U
#define NVVSE_TSEC_CMD_STATUS_ERR_MASK ((uint32_t)0xFFFFFFU) #define NVVSE_TSEC_CMD_STATUS_ERR_MASK ((uint32_t)0xFFFFFFU)
#define UINT8_MAX (255)
#define VSE_ERR(...) pr_err("tegra_hv_vse_safety " __VA_ARGS__) #define VSE_ERR(...) pr_err("tegra_hv_vse_safety " __VA_ARGS__)
@@ -352,7 +353,7 @@ enum rng_call {
/* CMAC response */ /* CMAC response */
struct tegra_vse_cmac_data { struct tegra_vse_cmac_data {
u8 status; u32 status;
u8 data[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; u8 data[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE];
}; };
@@ -371,7 +372,7 @@ struct tegra_vse_priv_data {
struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_dev *se_dev;
struct completion alg_complete; struct completion alg_complete;
int cmd; int cmd;
int slot_num; uint8_t slot_num;
struct scatterlist sg; struct scatterlist sg;
void *buf; void *buf;
dma_addr_t buf_addr; dma_addr_t buf_addr;
@@ -635,7 +636,7 @@ struct tegra_virtual_se_aes_req_context {
/* Operation type */ /* Operation type */
bool encrypt; bool encrypt;
/* Engine id */ /* Engine id */
u8 engine_id; uint32_t engine_id;
}; };
enum se_engine_id { enum se_engine_id {
@@ -908,7 +909,8 @@ static int tegra_hv_vse_safety_send_ivc(
timeout--; timeout--;
} }
if (length > sizeof(struct tegra_virtual_se_ivc_msg_t)) { if ((length <= 0) ||
length > sizeof(struct tegra_virtual_se_ivc_msg_t)) {
dev_err(se_dev->dev, dev_err(se_dev->dev,
"Wrong write msg len %d\n", length); "Wrong write msg len %d\n", length);
return -E2BIG; return -E2BIG;
@@ -1019,7 +1021,7 @@ static int tegra_hv_vse_safety_send_ivc_wait(
} }
err = host1x_syncpt_wait(sp, priv->syncpt_threshold, err = host1x_syncpt_wait(sp, priv->syncpt_threshold,
(u32)SE_MAX_SCHEDULE_TIMEOUT, NULL); SE_MAX_SCHEDULE_TIMEOUT, NULL);
if (err) { if (err) {
dev_err(se_dev->dev, "timed out for syncpt %u threshold %u err %d\n", dev_err(se_dev->dev, "timed out for syncpt %u threshold %u err %d\n",
priv->syncpt_id, priv->syncpt_threshold, err); priv->syncpt_id, priv->syncpt_threshold, err);
@@ -1302,6 +1304,11 @@ int tegra_hv_vse_release_keyslot(struct tegra_vse_key_slot_ctx *key_slot, uint32
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_RELEASE_KEY; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_RELEASE_KEY;
memcpy(ivc_tx->keys.keyslot, key_slot->key_id, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->keys.keyslot, key_slot->key_id, KEYSLOT_SIZE_BYTES);
ivc_tx->keys.token_id = key_slot->token_id; ivc_tx->keys.token_id = key_slot->token_id;
if (key_slot->key_instance_idx > UINT8_MAX) {
dev_err(se_dev->dev, "Key instance index is greater than UINT8_MAX\n");
err = -EINVAL;
goto free_mem;
}
ivc_tx->keys.key_instance = key_slot->key_instance_idx; ivc_tx->keys.key_instance = key_slot->key_instance_idx;
priv->cmd = VIRTUAL_SE_KEY_SLOT; priv->cmd = VIRTUAL_SE_KEY_SLOT;
@@ -1628,6 +1635,7 @@ static int tegra_hv_vse_safety_sha_op(struct tegra_virtual_se_sha_context *sha_c
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[sha_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[sha_ctx->node_id].priv;
u64 msg_len = 0, temp_len = 0; u64 msg_len = 0, temp_len = 0;
uint32_t engine_id; uint32_t engine_id;
uint64_t ret = 0;
int err = 0; int err = 0;
const struct tegra_vse_dma_buf *plaintext, *hash_result; const struct tegra_vse_dma_buf *plaintext, *hash_result;
bool is_zero_copy; bool is_zero_copy;
@@ -1759,10 +1767,11 @@ static int tegra_hv_vse_safety_sha_op(struct tegra_virtual_se_sha_context *sha_c
} }
if (is_last && sha_ctx->digest_size > 0) { if (is_last && sha_ctx->digest_size > 0) {
err = copy_to_user(sha_ctx->user_digest_buffer, hash_result->buf_ptr, ret = copy_to_user(sha_ctx->user_digest_buffer, hash_result->buf_ptr,
sha_ctx->digest_size); sha_ctx->digest_size);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy dst_buf: %d\n", __func__, err); dev_err(se_dev->dev, "%s(): Failed to copy dst_buf\n", __func__);
err = -EFAULT;
goto exit; goto exit;
} }
} }
@@ -1996,6 +2005,7 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req,
struct tegra_virtual_se_hmac_sha_args *phmac; struct tegra_virtual_se_hmac_sha_args *phmac;
struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[hmac_ctx->node_id].ivck; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[hmac_ctx->node_id].ivck;
int err = 0; int err = 0;
uint64_t ret = 0;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[hmac_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[hmac_ctx->node_id].priv;
const struct tegra_vse_dma_buf *src, *hash, *match; const struct tegra_vse_dma_buf *src, *hash, *match;
@@ -2234,11 +2244,14 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req,
} }
} }
} else { } else {
err = copy_to_user(hmac_ctx->user_digest_buffer, ret = copy_to_user(hmac_ctx->user_digest_buffer,
hash->buf_ptr, hash->buf_ptr,
TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH); TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH);
if (err) if (ret) {
VSE_ERR("%s(): Failed to copy dst_buf: %d\n", __func__, err); VSE_ERR("%s(): Failed to copy dst_buf\n", __func__);
err = -EFAULT;
goto unmap_exit;
}
} }
} }
@@ -2469,6 +2482,7 @@ static int tegra_hv_vse_safety_process_aes_req(struct tegra_virtual_se_dev *se_d
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL;
struct tegra_hv_ivc_cookie *pivck; struct tegra_hv_ivc_cookie *pivck;
int err = 0; int err = 0;
uint64_t ret = 0;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv;
union tegra_virtual_se_aes_args *aes; union tegra_virtual_se_aes_args *aes;
@@ -2495,12 +2509,15 @@ static int tegra_hv_vse_safety_process_aes_req(struct tegra_virtual_se_dev *se_d
return -ENOMEM; return -ENOMEM;
} }
if (aes_ctx->user_src_buf_size > 0) {
err = copy_from_user(src->buf_ptr, aes_ctx->user_src_buf, err = copy_from_user(src->buf_ptr, aes_ctx->user_src_buf,
aes_ctx->user_src_buf_size); aes_ctx->user_src_buf_size);
if (err) { if (err) {
dev_err(req_ctx->se_dev->dev, "%s(): Failed to copy src_buf: %d\n", __func__, err); dev_err(req_ctx->se_dev->dev, "%s(): Failed to copy src_buf: %d\n",
__func__, err);
goto exit; goto exit;
} }
}
pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck;
ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr = &ivc_req_msg->ivc_hdr;
@@ -2549,12 +2566,15 @@ static int tegra_hv_vse_safety_process_aes_req(struct tegra_virtual_se_dev *se_d
} }
if (priv->rx_status == 0U) { if (priv->rx_status == 0U) {
err = copy_to_user(aes_ctx->user_dst_buf, src->buf_ptr, if (aes_ctx->user_src_buf_size > 0) {
ret = copy_to_user(aes_ctx->user_dst_buf, src->buf_ptr,
aes_ctx->user_src_buf_size); aes_ctx->user_src_buf_size);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy dst_buf: %d\n", __func__, err); dev_err(se_dev->dev, "%s(): Failed to copy dst_buf\n", __func__);
err = -EFAULT;
goto exit; goto exit;
} }
}
if ((is_aes_mode_valid(req_ctx->op_mode) == 1) if ((is_aes_mode_valid(req_ctx->op_mode) == 1)
&& (req_ctx->encrypt == true) && (aes_ctx->user_nonce == 0U)) && (req_ctx->encrypt == true) && (aes_ctx->user_nonce == 0U))
memcpy(aes_ctx->iv, priv->iv, TEGRA_VIRTUAL_SE_AES_IV_SIZE); memcpy(aes_ctx->iv, priv->iv, TEGRA_VIRTUAL_SE_AES_IV_SIZE);
@@ -2788,6 +2808,7 @@ static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req,
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg;
struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck;
int err = 0; int err = 0;
uint64_t ret = 0;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv;
uint32_t tsec_fw_err; uint32_t tsec_fw_err;
const struct tegra_vse_dma_buf *src, *mac, *fw_status; const struct tegra_vse_dma_buf *src, *mac, *fw_status;
@@ -2840,7 +2861,7 @@ static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req,
ivc_tx->tsec[0U].dst_addr = mac->buf_iova; ivc_tx->tsec[0U].dst_addr = mac->buf_iova;
ivc_tx->tsec[0U].fw_status_addr = fw_status->buf_iova; ivc_tx->tsec[0U].fw_status_addr = fw_status->buf_iova;
ivc_tx->tsec[0U].src_buf_size = cmac_ctx->user_src_buf_size; ivc_tx->tsec[0U].src_buf_size = cmac_ctx->user_src_buf_size;
ivc_tx->tsec[0U].keyslot = *((uint64_t *)cmac_ctx->key_slot); memcpy(&ivc_tx->tsec[0U].keyslot, cmac_ctx->key_slot, sizeof(uint64_t));
if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) { if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) {
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_TSEC_SIGN; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_TSEC_SIGN;
@@ -2892,10 +2913,11 @@ static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req,
if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) { if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) {
tsec_fw_err = (*((uint32_t *)fw_status->buf_ptr) & NVVSE_TSEC_CMD_STATUS_ERR_MASK); tsec_fw_err = (*((uint32_t *)fw_status->buf_ptr) & NVVSE_TSEC_CMD_STATUS_ERR_MASK);
if (tsec_fw_err == 0U) { if (tsec_fw_err == 0U) {
err = copy_to_user(cmac_ctx->user_mac_buf, mac->buf_ptr, ret = copy_to_user(cmac_ctx->user_mac_buf, mac->buf_ptr,
TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE);
if (err) { if (ret) {
VSE_ERR("%s(): Failed to copy mac_buf: %d\n", __func__, err); VSE_ERR("%s(): Failed to copy mac_buf\n", __func__);
err = -EFAULT;
goto free_mem; goto free_mem;
} }
} else { } else {
@@ -2936,6 +2958,7 @@ static int tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported(
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg;
struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck;
int err = 0; int err = 0;
uint64_t ret = 0;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv;
u32 match_code = SE_HW_VALUE_MATCH_CODE; u32 match_code = SE_HW_VALUE_MATCH_CODE;
u32 mac_buf_size = 16; u32 mac_buf_size = 16;
@@ -3040,11 +3063,12 @@ static int tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported(
} }
if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) { if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) {
if (priv->rx_status == 0) { if (priv->rx_status == 0) {
err = copy_to_user(cmac_ctx->user_mac_buf, (uint8_t *)mac->buf_ptr, ret = copy_to_user(cmac_ctx->user_mac_buf, (uint8_t *)mac->buf_ptr,
TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy mac_buf: %d\n", dev_err(se_dev->dev, "%s(): Failed to copy mac_buf\n",
__func__, err); __func__);
err = -EFAULT;
goto free_mem; goto free_mem;
} }
} }
@@ -3073,6 +3097,7 @@ static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req,
u32 blocks_to_process, last_block_bytes = 0; u32 blocks_to_process, last_block_bytes = 0;
unsigned int total_len; unsigned int total_len;
int err = 0; int err = 0;
uint64_t ret = 0;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[cmac_ctx->node_id].priv;
const struct tegra_vse_dma_buf *src; const struct tegra_vse_dma_buf *src;
@@ -3209,11 +3234,12 @@ static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req,
if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) { if (cmac_ctx->request_type == TEGRA_HV_VSE_CMAC_SIGN) {
if (priv->rx_status == 0) { if (priv->rx_status == 0) {
err = copy_to_user(cmac_ctx->user_mac_buf, priv->cmac.data, ret = copy_to_user(cmac_ctx->user_mac_buf, priv->cmac.data,
TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy mac_buf: %d\n", dev_err(se_dev->dev, "%s(): Failed to copy mac_buf\n",
__func__, err); __func__);
err = -EFAULT;
goto free_mem; goto free_mem;
} }
} }
@@ -3292,8 +3318,7 @@ static void tegra_hv_vse_safety_cmac_req_deinit(struct ahash_request *req)
static int tegra_hv_vse_safety_cmac_update(struct ahash_request *req) static int tegra_hv_vse_safety_cmac_update(struct ahash_request *req)
{ {
struct tegra_virtual_se_aes_cmac_context *cmac_ctx = struct tegra_virtual_se_aes_cmac_context *cmac_ctx;
crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_dev *se_dev;
int ret = 0; int ret = 0;
@@ -3811,6 +3836,12 @@ static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *r
/* calculate aligned size to the next multiple of TEGRA_VIRTUAL_SE_RNG_DT_SIZE */ /* calculate aligned size to the next multiple of TEGRA_VIRTUAL_SE_RNG_DT_SIZE */
src = tegra_hv_vse_get_dma_buf(rng_ctx->node_id, AES_SRC_BUF_IDX, aligned_size); src = tegra_hv_vse_get_dma_buf(rng_ctx->node_id, AES_SRC_BUF_IDX, aligned_size);
if (!src) { if (!src) {
if (aligned_size < TEGRA_VIRTUAL_SE_RNG_DT_SIZE) {
dev_err(se_dev->dev,
"%s: aligned_size %u is less than RNG_DT_SIZE %u\n",
__func__, aligned_size, TEGRA_VIRTUAL_SE_RNG_DT_SIZE);
return -EINVAL;
}
aligned_size -= TEGRA_VIRTUAL_SE_RNG_DT_SIZE; aligned_size -= TEGRA_VIRTUAL_SE_RNG_DT_SIZE;
/* If the aligned size is greater than the max dma_buf, /* If the aligned size is greater than the max dma_buf,
* decrease the aligned size by one alignment unit. * decrease the aligned size by one alignment unit.
@@ -3849,7 +3880,7 @@ static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *r
else else
chunk_size = min(bytes_remaining, rng_ctx->hwrng_dma_buf.buf_len); chunk_size = min(bytes_remaining, rng_ctx->hwrng_dma_buf.buf_len);
aligned_size = chunk_size & ~(TEGRA_VIRTUAL_SE_RNG_DT_SIZE - 1); aligned_size = chunk_size & (~(TEGRA_VIRTUAL_SE_RNG_DT_SIZE - 1U));
if (aligned_size < TEGRA_VIRTUAL_SE_RNG_DT_SIZE) if (aligned_size < TEGRA_VIRTUAL_SE_RNG_DT_SIZE)
aligned_size = TEGRA_VIRTUAL_SE_RNG_DT_SIZE; aligned_size = TEGRA_VIRTUAL_SE_RNG_DT_SIZE;
@@ -3870,6 +3901,11 @@ static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *r
rdata_addr = (rdata + offset); rdata_addr = (rdata + offset);
memcpy(rdata_addr, src->buf_ptr, copy_size); memcpy(rdata_addr, src->buf_ptr, copy_size);
bytes_remaining -= copy_size; bytes_remaining -= copy_size;
if (offset > UINT_MAX - copy_size) {
dev_err(se_dev->dev, "%s: offset %u is greater than UINT_MAX\n",
__func__, offset);
goto exit;
} else
offset += copy_size; offset += copy_size;
} }
@@ -4017,6 +4053,7 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req,
struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv;
int err = 0; int err = 0;
uint64_t ret = 0;
const struct tegra_vse_dma_buf *src, *aad, *tag; const struct tegra_vse_dma_buf *src, *aad, *tag;
if (aes_ctx->user_aad_buf_size > 0) { if (aes_ctx->user_aad_buf_size > 0) {
@@ -4061,7 +4098,7 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req,
if (encrypt) { if (encrypt) {
tag = tegra_hv_vse_get_dma_buf(aes_ctx->node_id, AES_TAG_BUF_IDX, tag = tegra_hv_vse_get_dma_buf(aes_ctx->node_id, AES_TAG_BUF_IDX,
TEGRA_VIRTUAL_SE_AES_GCM_TAG_IV_SIZE); TEGRA_VIRTUAL_SE_AES_GCM_TAG_IV_SIZE);
if (!tag->buf_ptr) { if (!tag) {
dev_err(se_dev->dev, "%s tag_buf is NULL\n", __func__); dev_err(se_dev->dev, "%s tag_buf is NULL\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto free_exit; goto free_exit;
@@ -4145,7 +4182,7 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req,
ivc_tx->aes.op_gcm.src_buf_size = aes_ctx->user_src_buf_size; ivc_tx->aes.op_gcm.src_buf_size = aes_ctx->user_src_buf_size;
ivc_tx->aes.op_gcm.dst_buf_size = aes_ctx->user_src_buf_size; ivc_tx->aes.op_gcm.dst_buf_size = aes_ctx->user_src_buf_size;
if (aes_ctx->user_src_buf_size > 0) { if (aes_ctx->user_src_buf_size > 0) {
ivc_tx->aes.op_gcm.src_addr = (uint32_t)src->buf_iova; ivc_tx->aes.op_gcm.src_addr = src->buf_iova;
/* same source buffer can be used for destination buffer */ /* same source buffer can be used for destination buffer */
ivc_tx->aes.op_gcm.dst_addr = ivc_tx->aes.op_gcm.src_addr; ivc_tx->aes.op_gcm.dst_addr = ivc_tx->aes.op_gcm.src_addr;
} }
@@ -4181,10 +4218,11 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req,
memcpy(aes_ctx->iv, priv->iv, crypto_aead_ivsize(tfm)); memcpy(aes_ctx->iv, priv->iv, crypto_aead_ivsize(tfm));
} }
if (aes_ctx->user_tag_buf_size > 0) { if (aes_ctx->user_tag_buf_size > 0) {
err = copy_to_user(aes_ctx->user_tag_buf, tag->buf_ptr, ret = copy_to_user(aes_ctx->user_tag_buf, tag->buf_ptr,
aes_ctx->user_tag_buf_size); aes_ctx->user_tag_buf_size);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy tag_buf %d\n", __func__, err); dev_err(se_dev->dev, "%s(): Failed to copy tag_buf\n", __func__);
err = -EFAULT;
goto free_exit; goto free_exit;
} }
} }
@@ -4209,10 +4247,11 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req,
} }
if (aes_ctx->user_src_buf_size > 0) { if (aes_ctx->user_src_buf_size > 0) {
err = copy_to_user(aes_ctx->user_dst_buf, src->buf_ptr, ret = copy_to_user(aes_ctx->user_dst_buf, src->buf_ptr,
aes_ctx->user_src_buf_size); aes_ctx->user_src_buf_size);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy dst_buf %d\n", __func__, err); dev_err(se_dev->dev, "%s(): Failed to copy dst_buf\n", __func__);
err = -EFAULT;
goto free_exit; goto free_exit;
} }
} }
@@ -4236,6 +4275,7 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req,
struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[aes_ctx->node_id].priv;
int err = 0; int err = 0;
uint64_t ret = 0;
u32 match_code = SE_HW_VALUE_MATCH_CODE; u32 match_code = SE_HW_VALUE_MATCH_CODE;
u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE; u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE;
const struct tegra_vse_dma_buf *src, *aad, *tag, *comp; const struct tegra_vse_dma_buf *src, *aad, *tag, *comp;
@@ -4339,7 +4379,7 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req,
ivc_tx->aes.op_gcm.src_buf_size = aes_ctx->user_src_buf_size; ivc_tx->aes.op_gcm.src_buf_size = aes_ctx->user_src_buf_size;
ivc_tx->aes.op_gcm.dst_buf_size = aes_ctx->user_src_buf_size; ivc_tx->aes.op_gcm.dst_buf_size = aes_ctx->user_src_buf_size;
if (aes_ctx->user_src_buf_size > 0) { if (aes_ctx->user_src_buf_size > 0) {
ivc_tx->aes.op_gcm.src_addr = (uint32_t)src->buf_iova; ivc_tx->aes.op_gcm.src_addr = src->buf_iova;
ivc_tx->aes.op_gcm.src_buf_size |= (uint32_t)((src->buf_iova >> 8) ivc_tx->aes.op_gcm.src_buf_size |= (uint32_t)((src->buf_iova >> 8)
& ~((1U << 24) - 1U)); & ~((1U << 24) - 1U));
@@ -4387,10 +4427,11 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req,
} }
/* copy tag to req for encryption */ /* copy tag to req for encryption */
if (aes_ctx->user_tag_buf_size > 0) { if (aes_ctx->user_tag_buf_size > 0) {
err = copy_to_user(aes_ctx->user_tag_buf, tag->buf_ptr, ret = copy_to_user(aes_ctx->user_tag_buf, tag->buf_ptr,
aes_ctx->user_tag_buf_size); aes_ctx->user_tag_buf_size);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy tag_buf %d\n", __func__, err); dev_err(se_dev->dev, "%s(): Failed to copy tag_buf\n", __func__);
err = -EFAULT;
goto free_exit; goto free_exit;
} }
} }
@@ -4404,10 +4445,11 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req,
} }
if (aes_ctx->user_src_buf_size > 0) { if (aes_ctx->user_src_buf_size > 0) {
err = copy_to_user(aes_ctx->user_dst_buf, src->buf_ptr, ret = copy_to_user(aes_ctx->user_dst_buf, src->buf_ptr,
aes_ctx->user_src_buf_size); aes_ctx->user_src_buf_size);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy dst_buf %d\n", __func__, err); dev_err(se_dev->dev, "%s(): Failed to copy dst_buf\n", __func__);
err = -EFAULT;
goto free_exit; goto free_exit;
} }
} }
@@ -4502,9 +4544,11 @@ static int tegra_vse_aes_gmac_sv_check_params(struct ahash_request *req, bool is
if (gmac_ctx->node_id >= MAX_NUMBER_MISC_DEVICES) { if (gmac_ctx->node_id >= MAX_NUMBER_MISC_DEVICES) {
dev_err(se_dev->dev, "%s: Node id is not valid\n", __func__); dev_err(se_dev->dev, "%s: Node id is not valid\n", __func__);
err = -EINVAL; return -EINVAL;
} }
is_zero_copy = g_crypto_to_ivc_map[gmac_ctx->node_id].is_zero_copy_node;
if (gmac_ctx->is_key_slot_allocated == false) { if (gmac_ctx->is_key_slot_allocated == false) {
dev_err(se_dev->dev, "%s: keyslot is not allocated\n", __func__); dev_err(se_dev->dev, "%s: keyslot is not allocated\n", __func__);
err = -EINVAL; err = -EINVAL;
@@ -4515,13 +4559,12 @@ static int tegra_vse_aes_gmac_sv_check_params(struct ahash_request *req, bool is
err = -EINVAL; err = -EINVAL;
} }
is_zero_copy = g_crypto_to_ivc_map[gmac_ctx->node_id].is_zero_copy_node;
if (!is_zero_copy) { if (!is_zero_copy) {
if (gmac_ctx->user_aad_buf == NULL) { if (gmac_ctx->user_aad_buf == NULL) {
dev_err(se_dev->dev, "%s: aad buf is NULL\n", __func__); dev_err(se_dev->dev, "%s: aad buf is NULL\n", __func__);
err = -EINVAL; err = -EINVAL;
} }
if (is_last != 0U) { if (is_last != 0) {
if (gmac_ctx->authsize > 0 && gmac_ctx->user_tag_buf == NULL) { if (gmac_ctx->authsize > 0 && gmac_ctx->user_tag_buf == NULL) {
dev_err(se_dev->dev, dev_err(se_dev->dev,
"%s: tag buf length exceeds max supported size\n", __func__); "%s: tag buf length exceeds max supported size\n", __func__);
@@ -4530,12 +4573,13 @@ static int tegra_vse_aes_gmac_sv_check_params(struct ahash_request *req, bool is
} }
} else { } else {
if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) { if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) {
if (is_last == 1U && gmac_ctx->user_tag_iova == 0) { if (is_last == 1 && gmac_ctx->user_tag_iova == 0) {
dev_err(se_dev->dev, "%s: user tag iova is invalid\n", __func__); dev_err(se_dev->dev, "%s: user tag iova is invalid\n", __func__);
err = -EINVAL; err = -EINVAL;
} }
} }
} }
return err; return err;
} }
@@ -4733,6 +4777,7 @@ static int tegra_hv_vse_aes_gmac_sv_op(struct ahash_request *req,
struct tegra_hv_ivc_cookie *pivck; struct tegra_hv_ivc_cookie *pivck;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[gmac_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[gmac_ctx->node_id].priv;
int err = 0; int err = 0;
uint64_t ret = 0;
const struct tegra_vse_dma_buf *aad, *tag; const struct tegra_vse_dma_buf *aad, *tag;
se_dev = g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev; se_dev = g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev;
@@ -4840,11 +4885,12 @@ static int tegra_hv_vse_aes_gmac_sv_op(struct ahash_request *req,
if (is_last && gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) { if (is_last && gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) {
/* copy tag to req for last GMAC_SIGN requests */ /* copy tag to req for last GMAC_SIGN requests */
if (gmac_ctx->authsize > 0) { if (gmac_ctx->authsize > 0) {
err = copy_to_user(gmac_ctx->user_tag_buf, tag->buf_ptr, ret = copy_to_user(gmac_ctx->user_tag_buf, tag->buf_ptr,
gmac_ctx->authsize); gmac_ctx->authsize);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy mac_buf: %d\n", dev_err(se_dev->dev, "%s(): Failed to copy mac_buf\n",
__func__, err); __func__);
err = -EFAULT;
goto free_exit; goto free_exit;
} }
} }
@@ -4890,6 +4936,7 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req,
struct tegra_hv_ivc_cookie *pivck; struct tegra_hv_ivc_cookie *pivck;
struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[gmac_ctx->node_id].priv; struct tegra_vse_priv_data *priv = g_crypto_to_ivc_map[gmac_ctx->node_id].priv;
int err = 0; int err = 0;
uint64_t ret = 0;
u32 match_code = SE_HW_VALUE_MATCH_CODE; u32 match_code = SE_HW_VALUE_MATCH_CODE;
u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE; u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE;
const struct tegra_vse_dma_buf *aad, *tag, *comp; const struct tegra_vse_dma_buf *aad, *tag, *comp;
@@ -5037,11 +5084,12 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req,
if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) { if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) {
/* copy tag to req for last GMAC_SIGN requests */ /* copy tag to req for last GMAC_SIGN requests */
if (!is_zero_copy && (gmac_ctx->authsize > 0)) { if (!is_zero_copy && (gmac_ctx->authsize > 0)) {
err = copy_to_user(gmac_ctx->user_tag_buf, tag->buf_ptr, ret = copy_to_user(gmac_ctx->user_tag_buf, tag->buf_ptr,
gmac_ctx->authsize); gmac_ctx->authsize);
if (err) { if (ret) {
dev_err(se_dev->dev, "%s(): Failed to copy mac_buf: %d\n", dev_err(se_dev->dev, "%s(): Failed to copy mac_buf\n",
__func__, err); __func__);
err = -EFAULT;
goto free_exit; goto free_exit;
} }
} }
@@ -5704,7 +5752,12 @@ static int tegra_hv_vse_safety_hwrng_read(struct hwrng *rng, void *buf, size_t s
return 0; return 0;
ctx = (struct tegra_virtual_se_rng_context *)rng->priv; ctx = (struct tegra_virtual_se_rng_context *)rng->priv;
return tegra_hv_vse_safety_get_random(ctx, buf, size, HW_RNG);
if (size > UINT_MAX) {
VSE_ERR("%s: size %zu is greater than UINT_MAX\n", __func__, size);
return -EINVAL;
}
return tegra_hv_vse_safety_get_random(ctx, buf, (unsigned int)size, HW_RNG);
} }
#endif /* CONFIG_HW_RANDOM */ #endif /* CONFIG_HW_RANDOM */
@@ -5815,7 +5868,8 @@ static int se_get_nvhost_dev(struct tegra_virtual_se_dev *se_dev)
return 0; return 0;
} }
static int tegra_vse_validate_ivc_node_id(uint32_t ivc_id, uint32_t instance_id, int32_t engine_id) static int tegra_vse_validate_ivc_node_id(uint32_t ivc_id, uint32_t instance_id,
unsigned int engine_id)
{ {
uint32_t cnt; uint32_t cnt;
@@ -6084,11 +6138,11 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
struct device_node *np; struct device_node *np;
int err = 0; int err = 0;
int i; int i;
unsigned int ivc_id; uint32_t ivc_id;
unsigned int mempool_id; unsigned int mempool_id;
unsigned int engine_id; unsigned int engine_id;
const struct of_device_id *match; const struct of_device_id *match;
struct tegra_vse_soc_info *pdata = NULL; const struct tegra_vse_soc_info *pdata = NULL;
static uint32_t s_node_id; static uint32_t s_node_id;
uint32_t ivc_cnt, cnt, instance_id; uint32_t ivc_cnt, cnt, instance_id;
bool has_zero_copy_prop; bool has_zero_copy_prop;
@@ -6197,7 +6251,7 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Error: No device match found\n"); dev_err(&pdev->dev, "Error: No device match found\n");
return -ENODEV; return -ENODEV;
} }
pdata = (struct tegra_vse_soc_info *)match->data; pdata = (const struct tegra_vse_soc_info *)match->data;
} else { } else {
pdata = pdata =
(struct tegra_vse_soc_info *)pdev->id_entry->driver_data; (struct tegra_vse_soc_info *)pdev->id_entry->driver_data;
@@ -6337,12 +6391,18 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
dev_info(se_dev->dev, "Virtual SE channel number: %d", ivc_id); dev_info(se_dev->dev, "Virtual SE channel number: %d", ivc_id);
if (ivc_id >= 0) {
crypto_dev->ivck = tegra_hv_ivc_reserve(NULL, ivc_id, NULL); crypto_dev->ivck = tegra_hv_ivc_reserve(NULL, ivc_id, NULL);
if (IS_ERR_OR_NULL(crypto_dev->ivck)) { if (IS_ERR_OR_NULL(crypto_dev->ivck)) {
dev_err(&pdev->dev, "Failed reserve channel number\n"); dev_err(&pdev->dev, "Failed reserve channel number\n");
err = -ENODEV; err = -ENODEV;
goto exit; goto exit;
} }
} else {
dev_err(se_dev->dev, "Failed to get irq for node id\n");
err = -EINVAL;
goto exit;
}
tegra_hv_ivc_channel_reset(crypto_dev->ivck); tegra_hv_ivc_channel_reset(crypto_dev->ivck);
@@ -6400,13 +6460,19 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto exit; goto exit;
} }
if (request_irq(crypto_dev->ivck->irq, if (crypto_dev->ivck->irq >= 0) {
if (request_irq((uint32_t)crypto_dev->ivck->irq,
tegra_vse_irq_handler, 0, "vse", &crypto_dev->node_id)) { tegra_vse_irq_handler, 0, "vse", &crypto_dev->node_id)) {
dev_err(se_dev->dev, "Failed to request irq %d for node id %u\n", dev_err(se_dev->dev, "Failed to request irq %d for node id %u\n",
crypto_dev->ivck->irq, s_node_id); crypto_dev->ivck->irq, s_node_id);
err = -EINVAL; err = -EINVAL;
goto exit; goto exit;
} }
} else {
dev_err(se_dev->dev, "Failed to get irq for node id\n");
err = -EINVAL;
goto exit;
}
crypto_dev->wait_interrupt = FIRST_REQ_INTERRUPT; crypto_dev->wait_interrupt = FIRST_REQ_INTERRUPT;
err = tegra_hv_vse_allocate_se_dma_bufs(&g_node_dma[s_node_id], se_dev->dev, err = tegra_hv_vse_allocate_se_dma_bufs(&g_node_dma[s_node_id], se_dev->dev,
crypto_dev); crypto_dev);
@@ -6503,9 +6569,8 @@ exit:
return err; return err;
} }
static void tegra_hv_vse_safety_shutdown(struct platform_device *pdev) static void tegra_hv_vse_safety_shutdown(struct tegra_virtual_se_dev *se_dev)
{ {
struct tegra_virtual_se_dev *se_dev = platform_get_drvdata(pdev);
uint32_t cnt; uint32_t cnt;
/* skip checking pending request for the node with "nvidia,gcm-dma-support" /* skip checking pending request for the node with "nvidia,gcm-dma-support"
@@ -6530,6 +6595,13 @@ static void tegra_hv_vse_safety_shutdown(struct platform_device *pdev)
} }
} }
static void tegra_hv_vse_safety_shutdown_wrapper(struct platform_device *pdev)
{
struct tegra_virtual_se_dev *se_dev = platform_get_drvdata(pdev);
tegra_hv_vse_safety_shutdown(se_dev);
}
static int tegra_hv_vse_safety_remove(struct platform_device *pdev) static int tegra_hv_vse_safety_remove(struct platform_device *pdev)
{ {
int i; int i;
@@ -6552,28 +6624,55 @@ static int tegra_hv_vse_safety_remove(struct platform_device *pdev)
#if defined(CONFIG_PM) #if defined(CONFIG_PM)
static int tegra_hv_vse_safety_suspend(struct device *dev) static int tegra_hv_vse_safety_suspend(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); int i;
if (gcm_supports_dma) {
if (gpcdma_dev == dev)
return 0;
}
for (i = 0U; i < MAX_NUMBER_MISC_DEVICES; i++) {
if ((g_crypto_to_ivc_map[i].node_in_use)
&& (g_crypto_to_ivc_map[i].se_dev->dev == dev))
break;
}
/* Keep engine in suspended state */ /* Keep engine in suspended state */
tegra_hv_vse_safety_shutdown(pdev); if (i >= MAX_NUMBER_MISC_DEVICES) {
dev_err(dev, "Failed to find se_dev for dev %s\n", dev->kobj.name);
return -ENODEV;
}
tegra_hv_vse_safety_shutdown(g_crypto_to_ivc_map[i].se_dev);
return 0; return 0;
} }
static int tegra_hv_vse_safety_resume(struct device *dev) static int tegra_hv_vse_safety_resume(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); int i;
struct tegra_virtual_se_dev *se_dev = platform_get_drvdata(pdev);
/* skip checking pending request for the node with "nvidia,gcm-dma-support" /* skip checking pending request for the node with "nvidia,gcm-dma-support"
* which only used to allocate buffer for gpcdma * which only used to allocate buffer for gpcdma
* for other vse nodes which doesn't have "nvidia,gcm-dma-support", * for other vse nodes which doesn't have "nvidia,gcm-dma-support",
* it will still set engine suspend state to 1. * it will still set engine suspend state to 1.
*/ */
if (gcm_supports_dma) if (gcm_supports_dma) {
if (gpcdma_dev == dev)
return 0; return 0;
}
for (i = 0U; i < MAX_NUMBER_MISC_DEVICES; i++) {
if ((g_crypto_to_ivc_map[i].node_in_use)
&& (g_crypto_to_ivc_map[i].se_dev->dev == dev)) {
break;
}
}
/* Set engine to suspend state to 1 to make it as false */ /* Set engine to suspend state to 1 to make it as false */
atomic_set(&se_dev->se_suspended, 0); if (i >= MAX_NUMBER_MISC_DEVICES) {
VSE_ERR("%s(): Failed to find se_dev for dev\n", __func__);
return -ENODEV;
}
atomic_set(&g_crypto_to_ivc_map[i].se_dev->se_suspended, 0);
return 0; return 0;
} }
@@ -6599,7 +6698,7 @@ static int tegra_hv_vse_safety_remove_wrapper(struct platform_device *pdev)
static struct platform_driver tegra_hv_vse_safety_driver = { static struct platform_driver tegra_hv_vse_safety_driver = {
.probe = tegra_hv_vse_safety_probe, .probe = tegra_hv_vse_safety_probe,
.remove = tegra_hv_vse_safety_remove_wrapper, .remove = tegra_hv_vse_safety_remove_wrapper,
.shutdown = tegra_hv_vse_safety_shutdown, .shutdown = tegra_hv_vse_safety_shutdown_wrapper,
.driver = { .driver = {
.name = "tegra_hv_vse_safety", .name = "tegra_hv_vse_safety",
.owner = THIS_MODULE, .owner = THIS_MODULE,

View File

@@ -49,7 +49,7 @@ struct tegra_vse_membuf_ctx {
struct tegra_vse_key_slot_ctx { struct tegra_vse_key_slot_ctx {
uint8_t key_id[KEYSLOT_SIZE_BYTES]; uint8_t key_id[KEYSLOT_SIZE_BYTES];
uint8_t token_id; uint8_t token_id;
uint32_t key_instance_idx; uint8_t key_instance_idx;
uint32_t key_grp_id; uint32_t key_grp_id;
}; };
@@ -102,7 +102,7 @@ struct tegra_virtual_se_dev {
unsigned int engine_id; unsigned int engine_id;
/* Engine suspend state */ /* Engine suspend state */
atomic_t se_suspended; atomic_t se_suspended;
struct tegra_vse_soc_info *chipdata; const struct tegra_vse_soc_info *chipdata;
#if defined(CONFIG_HW_RANDOM) #if defined(CONFIG_HW_RANDOM)
/* Integration with hwrng framework */ /* Integration with hwrng framework */
struct hwrng *hwrng; struct hwrng *hwrng;
@@ -273,7 +273,7 @@ struct tegra_virtual_se_hmac_sha_context {
struct tegra_virtual_se_membuf_context { struct tegra_virtual_se_membuf_context {
int fd; int fd;
int64_t iova; uint64_t iova;
uint32_t node_id; uint32_t node_id;
}; };

View File

@@ -206,7 +206,13 @@ static int tnvvse_crypto_allocate_key_slot(struct tnvvse_crypto_ctx *ctx,
return -EINVAL; return -EINVAL;
} }
ctx->key_grp_id = current->pid; if (current->pid >= 0) {
ctx->key_grp_id = (uint32_t)current->pid;
} else {
CRYPTODEV_ERR("%s(): Invalid PID\n", __func__);
return -EINVAL;
}
memset(&key_slot_params, 0, sizeof(key_slot_params)); memset(&key_slot_params, 0, sizeof(key_slot_params));
memcpy(key_slot_params.key_id, key_slot_allocate_ctl->key_id, KEYSLOT_SIZE_BYTES); memcpy(key_slot_params.key_id, key_slot_allocate_ctl->key_id, KEYSLOT_SIZE_BYTES);
key_slot_params.token_id = key_slot_allocate_ctl->token_id; key_slot_params.token_id = key_slot_allocate_ctl->token_id;
@@ -240,13 +246,19 @@ static int tnvvse_crypto_release_key_slot(struct tnvvse_crypto_ctx *ctx,
CRYPTODEV_ERR("Key slot release ctl is NULL\n"); CRYPTODEV_ERR("Key slot release ctl is NULL\n");
return -EINVAL; return -EINVAL;
} }
if (ctx->allocated_key_slot_count == 0) { if (ctx->allocated_key_slot_count == 0U) {
CRYPTODEV_ERR("No key slots allocated to release\n"); CRYPTODEV_ERR("No key slots allocated to release\n");
return -EINVAL; return -EINVAL;
} }
memset(&vse_key_slot, 0, sizeof(vse_key_slot)); memset(&vse_key_slot, 0, sizeof(vse_key_slot));
memcpy(vse_key_slot.key_id, key_slot_release_ctl->key_id, sizeof(vse_key_slot.key_id)); memcpy(vse_key_slot.key_id, key_slot_release_ctl->key_id, sizeof(vse_key_slot.key_id));
if (key_slot_release_ctl->key_instance_idx > 255) {
CRYPTODEV_ERR("key_instance_idx value %u exceeds maximum allowed value 255\n",
key_slot_release_ctl->key_instance_idx);
return -EINVAL;
}
vse_key_slot.key_instance_idx = key_slot_release_ctl->key_instance_idx; vse_key_slot.key_instance_idx = key_slot_release_ctl->key_instance_idx;
err = tegra_hv_vse_release_keyslot(&vse_key_slot, ctx->node_id); err = tegra_hv_vse_release_keyslot(&vse_key_slot, ctx->node_id);
@@ -282,7 +294,7 @@ static int tnvvse_crypto_validate_sha_update_req(struct tnvvse_crypto_ctx *ctx,
} }
if (sha_update_ctl->init_only != 0U) { if (sha_update_ctl->init_only != 0U) {
if (sha_state->sha_init_done != 0U) { if (sha_state->sha_init_done != 0) {
CRYPTODEV_INFO("%s(): SHA init is already done\n", __func__); CRYPTODEV_INFO("%s(): SHA init is already done\n", __func__);
ret = -EAGAIN; ret = -EAGAIN;
goto exit; goto exit;
@@ -309,6 +321,20 @@ static int tnvvse_crypto_validate_sha_update_req(struct tnvvse_crypto_ctx *ctx,
goto exit; goto exit;
} }
if ((sha_state->sha_init_done == 0) && (sha_update_ctl->is_first == 0U)) {
CRYPTODEV_ERR("%s(): SHA First req is not yet received\n", __func__);
ret = -EINVAL;
goto exit;
}
if (((sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE128) ||
(sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE256)) &&
sha_update_ctl->digest_size == 0) {
CRYPTODEV_ERR("%s: Digest Buffer Size is invalid\n", __func__);
ret = -EINVAL;
goto exit;
}
if (sha_update_ctl->input_buffer_size == 0U) { if (sha_update_ctl->input_buffer_size == 0U) {
if (sha_update_ctl->is_last == 0U) { if (sha_update_ctl->is_last == 0U) {
CRYPTODEV_ERR("%s(): zero length non-last request is not supported\n", CRYPTODEV_ERR("%s(): zero length non-last request is not supported\n",
@@ -407,8 +433,14 @@ static int tnvvse_crypto_sha_update(struct tnvvse_crypto_ctx *ctx,
if (sha_update_ctl->is_first != 0U) if (sha_update_ctl->is_first != 0U)
sha_state->sha_init_done = 1U; sha_state->sha_init_done = 1U;
if ((sha_type < TEGRA_NVVSE_SHA_TYPE_SHA256) || (sha_type >= TEGRA_NVVSE_SHA_TYPE_MAX)) {
CRYPTODEV_ERR("%s(): SHA Type requested %d is not supported\n", __func__, sha_type);
ret = -EINVAL;
goto exit;
}
tfm = crypto_alloc_ahash(sha_alg_names[sha_type], 0, 0); tfm = crypto_alloc_ahash(sha_alg_names[sha_type], 0, 0);
if (IS_ERR(tfm)) { if (!tfm || IS_ERR(tfm)) {
CRYPTODEV_ERR("%s(): Failed to load transform for %s:%ld\n", CRYPTODEV_ERR("%s(): Failed to load transform for %s:%ld\n",
__func__, sha_alg_names[sha_type], PTR_ERR(tfm)); __func__, sha_alg_names[sha_type], PTR_ERR(tfm));
ret = PTR_ERR(tfm); ret = PTR_ERR(tfm);
@@ -421,7 +453,7 @@ static int tnvvse_crypto_sha_update(struct tnvvse_crypto_ctx *ctx,
crypto_free_ahash(tfm); crypto_free_ahash(tfm);
CRYPTODEV_ERR("%s(): Failed to allocate request\n", __func__); CRYPTODEV_ERR("%s(): Failed to allocate request\n", __func__);
ret = -ENOMEM; ret = -ENOMEM;
goto exit; goto free_tfm;
} }
sha_ctx = crypto_ahash_ctx(tfm); sha_ctx = crypto_ahash_ctx(tfm);
@@ -432,6 +464,11 @@ static int tnvvse_crypto_sha_update(struct tnvvse_crypto_ctx *ctx,
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tnvvse_crypto_complete, &sha_complete); tnvvse_crypto_complete, &sha_complete);
if (sha_state->sha_total_msg_length > (ULONG_MAX - sha_ctx->user_src_buf_size)) {
CRYPTODEV_ERR("%s(): Total message length overflow\n", __func__);
ret = -EINVAL;
goto free_req;
}
sha_ctx->user_src_buf_size = sha_update_ctl->input_buffer_size; sha_ctx->user_src_buf_size = sha_update_ctl->input_buffer_size;
sha_state->sha_total_msg_length += sha_ctx->user_src_buf_size; sha_state->sha_total_msg_length += sha_ctx->user_src_buf_size;
@@ -453,26 +490,26 @@ static int tnvvse_crypto_sha_update(struct tnvvse_crypto_ctx *ctx,
ret = wait_async_op(&sha_complete, crypto_ahash_init(req)); ret = wait_async_op(&sha_complete, crypto_ahash_init(req));
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to initialize ahash: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to initialize ahash: %d\n", __func__, ret);
sha_state->sha_init_done = 0; sha_state->sha_init_done = 0U;
sha_state->sha_total_msg_length = 0; sha_state->sha_total_msg_length = 0U;
goto free_tfm; goto free_req;
} }
if (sha_update_ctl->is_last == 0) { if (sha_update_ctl->is_last == 0U) {
ret = wait_async_op(&sha_complete, crypto_ahash_update(req)); ret = wait_async_op(&sha_complete, crypto_ahash_update(req));
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to ahash_update: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to ahash_update: %d\n", __func__, ret);
sha_state->sha_init_done = 0; sha_state->sha_init_done = 0U;
sha_state->sha_total_msg_length = 0; sha_state->sha_total_msg_length = 0U;
goto free_tfm; goto free_req;
} }
} else { } else {
ret = wait_async_op(&sha_complete, crypto_ahash_finup(req)); ret = wait_async_op(&sha_complete, crypto_ahash_finup(req));
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to ahash_finup: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to ahash_finup: %d\n", __func__, ret);
sha_state->sha_init_done = 0; sha_state->sha_init_done = 0U;
sha_state->sha_total_msg_length = 0; sha_state->sha_total_msg_length = 0U;
goto free_tfm; goto free_req;
} }
if ((sha_type != TEGRA_NVVSE_SHA_TYPE_SHAKE128) if ((sha_type != TEGRA_NVVSE_SHA_TYPE_SHAKE128)
@@ -482,18 +519,19 @@ static int tnvvse_crypto_sha_update(struct tnvvse_crypto_ctx *ctx,
__func__, sha_alg_names[sha_type], __func__, sha_alg_names[sha_type],
sha_update_ctl->digest_size); sha_update_ctl->digest_size);
ret = -EINVAL; ret = -EINVAL;
goto free_tfm; goto free_req;
} }
} }
/* Reset sha state */ /* Reset sha state */
sha_state->sha_init_done = 0; sha_state->sha_init_done = 0U;
sha_state->sha_total_msg_length = 0; sha_state->sha_total_msg_length = 0U;
} }
free_tfm: free_req:
if (req) if (req)
ahash_request_free(req); ahash_request_free(req);
free_tfm:
if (tfm) if (tfm)
crypto_free_ahash(tfm); crypto_free_ahash(tfm);
@@ -571,7 +609,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
} }
tfm = crypto_alloc_ahash("hmac-sha256-vse", 0, 0); tfm = crypto_alloc_ahash("hmac-sha256-vse", 0, 0);
if (IS_ERR(tfm)) { if (!tfm || IS_ERR(tfm)) {
ret = PTR_ERR(tfm); ret = PTR_ERR(tfm);
CRYPTODEV_ERR("%s(): Failed to allocate ahash for hmac-sha256-vse: %d\n", CRYPTODEV_ERR("%s(): Failed to allocate ahash for hmac-sha256-vse: %d\n",
__func__, ret); __func__, ret);
@@ -587,7 +625,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
crypto_free_ahash(tfm); crypto_free_ahash(tfm);
CRYPTODEV_ERR("%s(): Failed to allocate request for cmac-vse(aes)\n", __func__); CRYPTODEV_ERR("%s(): Failed to allocate request for cmac-vse(aes)\n", __func__);
ret = -ENOMEM; ret = -ENOMEM;
goto exit; goto free_tfm;
} }
init_completion(&hmac_sha_complete.restart); init_completion(&hmac_sha_complete.restart);
@@ -601,7 +639,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
ret = crypto_ahash_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES); ret = crypto_ahash_setkey(tfm, key_as_keyslot, KEYSLOT_SIZE_BYTES);
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to set keys for hmac: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to set keys for hmac: %d\n", __func__, ret);
goto free_tfm; goto free_req;
} }
hmac_ctx->user_src_buf_size = hmac_sha_ctl->data_length; hmac_ctx->user_src_buf_size = hmac_sha_ctl->data_length;
@@ -615,6 +653,17 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
hmac_ctx->result = 0; hmac_ctx->result = 0;
if (sha_state->hmac_sha_total_msg_length > (ULONG_MAX - hmac_sha_ctl->data_length)) {
CRYPTODEV_ERR("%s(): Total message length would overflow\n", __func__);
ret = -EOVERFLOW;
goto free_req;
}
if (sha_state->hmac_sha_total_msg_length > (ULONG_MAX - hmac_sha_ctl->data_length)) {
CRYPTODEV_ERR("%s(): Total message length would overflow\n", __func__);
ret = -EOVERFLOW;
goto free_tfm;
}
sha_state->hmac_sha_total_msg_length += hmac_sha_ctl->data_length; sha_state->hmac_sha_total_msg_length += hmac_sha_ctl->data_length;
sha_state->hmac_sha_init_done = 1; sha_state->hmac_sha_init_done = 1;
hmac_ctx->total_count = sha_state->hmac_sha_total_msg_length; hmac_ctx->total_count = sha_state->hmac_sha_total_msg_length;
@@ -630,7 +679,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
CRYPTODEV_ERR("%s(): Failed to initialize ahash: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to initialize ahash: %d\n", __func__, ret);
sha_state->hmac_sha_init_done = 0; sha_state->hmac_sha_init_done = 0;
sha_state->hmac_sha_total_msg_length = 0UL; sha_state->hmac_sha_total_msg_length = 0UL;
goto free_tfm; goto free_req;
} }
hmac_ctx->user_src_buf = hmac_sha_ctl->src_buffer; hmac_ctx->user_src_buf = hmac_sha_ctl->src_buffer;
@@ -642,7 +691,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
CRYPTODEV_ERR("%s(): Failed to ahash_update: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to ahash_update: %d\n", __func__, ret);
sha_state->hmac_sha_init_done = 0; sha_state->hmac_sha_init_done = 0;
sha_state->hmac_sha_total_msg_length = 0UL; sha_state->hmac_sha_total_msg_length = 0UL;
goto free_tfm; goto free_req;
} }
} else { } else {
ret = wait_async_op(&hmac_sha_complete, crypto_ahash_finup(req)); ret = wait_async_op(&hmac_sha_complete, crypto_ahash_finup(req));
@@ -650,7 +699,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
CRYPTODEV_ERR("%s(): Failed to ahash_finup: %d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to ahash_finup: %d\n", __func__, ret);
sha_state->hmac_sha_init_done = 0; sha_state->hmac_sha_init_done = 0;
sha_state->hmac_sha_total_msg_length = 0UL; sha_state->hmac_sha_total_msg_length = 0UL;
goto free_tfm; goto free_req;
} }
if (hmac_sha_ctl->hmac_sha_type == TEGRA_NVVSE_HMAC_SHA_VERIFY) if (hmac_sha_ctl->hmac_sha_type == TEGRA_NVVSE_HMAC_SHA_VERIFY)
@@ -660,9 +709,10 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
sha_state->hmac_sha_total_msg_length = 0; sha_state->hmac_sha_total_msg_length = 0;
} }
free_tfm: free_req:
if (req) if (req)
ahash_request_free(req); ahash_request_free(req);
free_tfm:
if (tfm) if (tfm)
crypto_free_ahash(tfm); crypto_free_ahash(tfm);
@@ -1139,7 +1189,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
} }
if (gmac_sign_verify_ctl->release_key_flag) if (gmac_sign_verify_ctl->release_key_flag)
ctx->allocated_key_slot_count -= 1; ctx->allocated_key_slot_count -= 1U;
if (gmac_sign_verify_ctl->is_last) { if (gmac_sign_verify_ctl->is_last) {
if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_VERIFY) if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_VERIFY)
@@ -1207,6 +1257,7 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
struct crypto_skcipher *tfm; struct crypto_skcipher *tfm;
struct skcipher_request *req = NULL; struct skcipher_request *req = NULL;
int ret = 0; int ret = 0;
uint64_t err = 0;
struct tnvvse_crypto_completion tcrypt_complete; struct tnvvse_crypto_completion tcrypt_complete;
struct tegra_virtual_se_aes_context *aes_ctx; struct tegra_virtual_se_aes_context *aes_ctx;
char aes_algo[5][20] = {"cbc-vse(aes)", "ctr-vse(aes)", "gcm-vse(aes)", "cbc-vse(aes)", char aes_algo[5][20] = {"cbc-vse(aes)", "ctr-vse(aes)", "gcm-vse(aes)", "cbc-vse(aes)",
@@ -1257,7 +1308,7 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
pr_debug("%s(): The skcipher driver name is %s for %s\n", pr_debug("%s(): The skcipher driver name is %s for %s\n",
__func__, driver_name, aes_algo[aes_enc_dec_ctl->aes_mode]); __func__, driver_name, aes_algo[aes_enc_dec_ctl->aes_mode]);
crypto_skcipher_clear_flags(tfm, ~0); crypto_skcipher_clear_flags(tfm, ~0U);
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES ");
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_enc_dec_ctl->key_slot, memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_enc_dec_ctl->key_slot,
@@ -1332,10 +1383,12 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
crypto_skcipher_decrypt(req); crypto_skcipher_decrypt(req);
if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
/* crypto driver is asynchronous */ /* crypto driver is asynchronous */
ret = wait_for_completion_timeout(&tcrypt_complete.restart, err = wait_for_completion_timeout(&tcrypt_complete.restart,
msecs_to_jiffies(5000)); msecs_to_jiffies(5000));
if (ret == 0) if (err == 0) {
ret = -ETIMEDOUT;
goto free_req; goto free_req;
}
if (tcrypt_complete.req_err < 0) { if (tcrypt_complete.req_err < 0) {
ret = tcrypt_complete.req_err; ret = tcrypt_complete.req_err;
@@ -1373,8 +1426,8 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
} }
} }
if (aes_enc_dec_ctl->release_key_flag) if (aes_enc_dec_ctl->release_key_flag && ctx->allocated_key_slot_count > 0U)
ctx->allocated_key_slot_count -= 1; ctx->allocated_key_slot_count -= 1U;
free_req: free_req:
skcipher_request_free(req); skcipher_request_free(req);
@@ -1392,6 +1445,7 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
struct crypto_aead *tfm; struct crypto_aead *tfm;
struct aead_request *req = NULL; struct aead_request *req = NULL;
int32_t ret = 0; int32_t ret = 0;
uint64_t err = 0;
struct tnvvse_crypto_completion tcrypt_complete; struct tnvvse_crypto_completion tcrypt_complete;
struct tegra_virtual_se_aes_context *aes_ctx; struct tegra_virtual_se_aes_context *aes_ctx;
const char *driver_name; const char *driver_name;
@@ -1448,7 +1502,7 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
goto free_req; goto free_req;
} }
crypto_aead_clear_flags(tfm, ~0); crypto_aead_clear_flags(tfm, ~0U);
ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); ret = snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES ");
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_enc_dec_ctl->key_slot, memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, aes_enc_dec_ctl->key_slot,
@@ -1502,10 +1556,12 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
/* crypto driver is asynchronous */ /* crypto driver is asynchronous */
ret = wait_for_completion_timeout(&tcrypt_complete.restart, err = wait_for_completion_timeout(&tcrypt_complete.restart,
msecs_to_jiffies(5000)); msecs_to_jiffies(5000));
if (ret == 0) if (err == 0) {
ret = -ETIMEDOUT;
goto free_req; goto free_req;
}
if (tcrypt_complete.req_err < 0) { if (tcrypt_complete.req_err < 0) {
ret = tcrypt_complete.req_err; ret = tcrypt_complete.req_err;
@@ -1537,6 +1593,7 @@ static int tnvvse_crypto_get_aes_drng(struct tnvvse_crypto_ctx *ctx,
struct tegra_virtual_se_rng_context *rng_ctx; struct tegra_virtual_se_rng_context *rng_ctx;
struct crypto_rng *rng; struct crypto_rng *rng;
int ret = -ENOMEM; int ret = -ENOMEM;
uint64_t err = 0;
if ((aes_drng_ctl->data_length > ctx->max_rng_buff) || if ((aes_drng_ctl->data_length > ctx->max_rng_buff) ||
(aes_drng_ctl->data_length == 0U)) { (aes_drng_ctl->data_length == 0U)) {
@@ -1565,12 +1622,14 @@ static int tnvvse_crypto_get_aes_drng(struct tnvvse_crypto_ctx *ctx,
goto free_rng; goto free_rng;
} }
ret = copy_to_user((void __user *)aes_drng_ctl->dest_buff, err = copy_to_user((void __user *)aes_drng_ctl->dest_buff,
(const void *)ctx->rng_buff, aes_drng_ctl->data_length); (const void *)ctx->rng_buff, aes_drng_ctl->data_length);
if (ret) { if (err) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user for length %d: %d\n", CRYPTODEV_ERR("%s(): Failed to copy_to_user for length %d\n",
__func__, aes_drng_ctl->data_length, ret); __func__, aes_drng_ctl->data_length);
} ret = -EFAULT;
} else
ret = 0;
free_rng: free_rng:
crypto_free_rng(rng); crypto_free_rng(rng);
@@ -1673,7 +1732,7 @@ static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp)
return -ENOMEM; return -ENOMEM;
} }
ctx->node_id = node_id; ctx->node_id = node_id;
ctx->allocated_key_slot_count = 0; ctx->allocated_key_slot_count = 0U;
ctx->is_zero_copy_node = is_zero_copy_node; ctx->is_zero_copy_node = is_zero_copy_node;
ctx->rng_buff = kzalloc(NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED, GFP_KERNEL); ctx->rng_buff = kzalloc(NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED, GFP_KERNEL);
@@ -1722,7 +1781,7 @@ static int tnvvse_crypto_dev_release(struct inode *inode, struct file *filp)
nvvse_devnode[ctx->node_id].node_in_use = false; nvvse_devnode[ctx->node_id].node_in_use = false;
} }
if (ctx->allocated_key_slot_count > 0) if (ctx->allocated_key_slot_count > 0U)
tegra_hv_vse_close_keyslot(ctx->node_id, ctx->key_grp_id); tegra_hv_vse_close_keyslot(ctx->node_id, ctx->key_grp_id);
kfree(ctx->sha_result); kfree(ctx->sha_result);
@@ -1761,6 +1820,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
struct tegra_nvvse_release_key_slot_ctl __user *arg_key_slot_release_ctl; struct tegra_nvvse_release_key_slot_ctl __user *arg_key_slot_release_ctl;
struct tegra_nvvse_release_key_slot_ctl *key_slot_release_ctl; struct tegra_nvvse_release_key_slot_ctl *key_slot_release_ctl;
int ret = 0; int ret = 0;
uint64_t err = 0;
/* /*
* Avoid processing ioctl if the file has been closed. * Avoid processing ioctl if the file has been closed.
@@ -1823,6 +1883,15 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
goto release_lock; goto release_lock;
} }
if ((sha_update_ctl->sha_type < TEGRA_NVVSE_SHA_TYPE_SHA256) ||
(sha_update_ctl->sha_type > TEGRA_NVVSE_SHA_TYPE_MAX)) {
CRYPTODEV_ERR("%s(): Invalid sha_type value: %d\n", __func__,
sha_update_ctl->sha_type);
kfree(sha_update_ctl);
ret = -EINVAL;
goto release_lock;
}
ret = tnvvse_crypto_sha_update(ctx, sha_update_ctl); ret = tnvvse_crypto_sha_update(ctx, sha_update_ctl);
kfree(sha_update_ctl); kfree(sha_update_ctl);
@@ -1857,7 +1926,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
ret = copy_to_user(&arg_hmac_sha_sv_ctl->result, &hmac_sha_sv_ctl->result, ret = copy_to_user(&arg_hmac_sha_sv_ctl->result, &hmac_sha_sv_ctl->result,
sizeof(uint8_t)); sizeof(uint8_t));
if (ret) if (ret)
CRYPTODEV_ERR("%s(): Failed to copy_to_user:%d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to copy_to_user\n", __func__);
} }
kfree(hmac_sha_sv_ctl); kfree(hmac_sha_sv_ctl);
@@ -1903,7 +1972,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
aes_enc_dec_ctl->initial_counter, aes_enc_dec_ctl->initial_counter,
sizeof(aes_enc_dec_ctl->initial_counter)); sizeof(aes_enc_dec_ctl->initial_counter));
if (ret) { if (ret) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user:%d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to copy_to_user\n", __func__);
kfree(aes_enc_dec_ctl); kfree(aes_enc_dec_ctl);
goto release_lock; goto release_lock;
} }
@@ -1936,10 +2005,11 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
} }
/* Copy IV returned by VSE */ /* Copy IV returned by VSE */
ret = copy_to_user(arg_aes_gmac_init_ctl->IV, aes_gmac_init_ctl->IV, err = copy_to_user(arg_aes_gmac_init_ctl->IV, aes_gmac_init_ctl->IV,
sizeof(aes_gmac_init_ctl->IV)); sizeof(aes_gmac_init_ctl->IV));
if (ret) { if (err) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user:%d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to copy_to_user\n", __func__);
ret = -EFAULT;
kfree(aes_gmac_init_ctl); kfree(aes_gmac_init_ctl);
goto release_lock; goto release_lock;
} }
@@ -1972,11 +2042,15 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
} }
if (aes_gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_VERIFY) { if (aes_gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_VERIFY) {
ret = copy_to_user(&arg_aes_gmac_sign_verify_ctl->result, err = copy_to_user(&arg_aes_gmac_sign_verify_ctl->result,
&aes_gmac_sign_verify_ctl->result, &aes_gmac_sign_verify_ctl->result,
sizeof(uint8_t)); sizeof(uint8_t));
if (ret) if (err) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user:%d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to copy_to_user\n", __func__);
ret = -EFAULT;
kfree(aes_gmac_sign_verify_ctl);
goto release_lock;
}
} }
kfree(aes_gmac_sign_verify_ctl); kfree(aes_gmac_sign_verify_ctl);
@@ -2123,11 +2197,12 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
goto release_lock; goto release_lock;
} }
ret = copy_to_user(arg_map_membuf_ctl, map_membuf_ctl, err = copy_to_user(arg_map_membuf_ctl, map_membuf_ctl,
sizeof(*map_membuf_ctl)); sizeof(*map_membuf_ctl));
if (ret) { if (err) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user map_membuf_ctl:%d\n", CRYPTODEV_ERR("%s(): Failed to copy_to_user map_membuf_ctl\n",
__func__, ret); __func__);
ret = -EFAULT;
kfree(map_membuf_ctl); kfree(map_membuf_ctl);
goto release_lock; goto release_lock;
} }
@@ -2160,11 +2235,12 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
goto release_lock; goto release_lock;
} }
ret = copy_to_user(arg_unmap_membuf_ctl, unmap_membuf_ctl, err = copy_to_user(arg_unmap_membuf_ctl, unmap_membuf_ctl,
sizeof(*unmap_membuf_ctl)); sizeof(*unmap_membuf_ctl));
if (ret) { if (err) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user unmap_membuf_ctl:%d\n", CRYPTODEV_ERR("%s(): Failed to copy_to_user unmap_membuf_ctl\n",
__func__, ret); __func__);
ret = -EFAULT;
kfree(unmap_membuf_ctl); kfree(unmap_membuf_ctl);
goto release_lock; goto release_lock;
} }
@@ -2198,11 +2274,12 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
goto release_lock; goto release_lock;
} }
ret = copy_to_user(arg_key_slot_allocate_ctl, key_slot_allocate_ctl, err = copy_to_user(arg_key_slot_allocate_ctl, key_slot_allocate_ctl,
sizeof(*key_slot_allocate_ctl)); sizeof(*key_slot_allocate_ctl));
if (ret) { if (err) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user key_slot_allocate_ctl:%d\n", CRYPTODEV_ERR("%s(): Failed to copy_to_user key_slot_allocate_ctl\n",
__func__, ret); __func__);
ret = -EFAULT;
kfree(key_slot_allocate_ctl); kfree(key_slot_allocate_ctl);
goto release_lock; goto release_lock;
} }
@@ -2235,11 +2312,12 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
goto release_lock; goto release_lock;
} }
ret = copy_to_user(arg_key_slot_release_ctl, key_slot_release_ctl, err = copy_to_user(arg_key_slot_release_ctl, key_slot_release_ctl,
sizeof(*key_slot_release_ctl)); sizeof(*key_slot_release_ctl));
if (ret) { if (err) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user key_slot_release_ctl:%d\n", CRYPTODEV_ERR("%s(): Failed to copy_to_user key_slot_release_ctl\n",
__func__, ret); __func__);
ret = -EFAULT;
kfree(key_slot_release_ctl); kfree(key_slot_release_ctl);
goto release_lock; goto release_lock;
} }
@@ -2284,6 +2362,7 @@ static long tnvvse_crypto_info_dev_ioctl(struct file *filp,
{ {
struct tegra_nvvse_get_ivc_db *get_ivc_db; struct tegra_nvvse_get_ivc_db *get_ivc_db;
int ret = 0; int ret = 0;
uint64_t err = 0;
if (ioctl_num == NVVSE_IOCTL_CMDID_GET_IVC_DB) { if (ioctl_num == NVVSE_IOCTL_CMDID_GET_IVC_DB) {
get_ivc_db = kzalloc(sizeof(*get_ivc_db), GFP_KERNEL); get_ivc_db = kzalloc(sizeof(*get_ivc_db), GFP_KERNEL);
@@ -2300,10 +2379,11 @@ static long tnvvse_crypto_info_dev_ioctl(struct file *filp,
goto end; goto end;
} }
ret = copy_to_user((void __user *)arg, &ivc_database, sizeof(ivc_database)); err = copy_to_user((void __user *)arg, &ivc_database, sizeof(ivc_database));
if (ret) { if (err) {
CRYPTODEV_ERR("%s(): Failed to copy_to_user ivc_database:%d\n", __func__, ret); CRYPTODEV_ERR("%s(): Failed to copy_to_user ivc_database\n", __func__);
kfree(get_ivc_db); kfree(get_ivc_db);
ret = -EFAULT;
goto end; goto end;
} }
@@ -2348,7 +2428,7 @@ static int __init tnvvse_crypto_device_init(void)
}; };
char const numbers[] = "0123456789"; char const numbers[] = "0123456789";
char *node_name; char *node_name;
uint32_t str_len; size_t str_len;
CRYPTODEV_INFO("%s(): init start\n", __func__); CRYPTODEV_INFO("%s(): init start\n", __func__);
@@ -2420,7 +2500,7 @@ static int __init tnvvse_crypto_device_init(void)
} }
str_len = strlen(node_prefix[ivc_db[cnt].engine_id]); str_len = strlen(node_prefix[ivc_db[cnt].engine_id]);
if (str_len > (MISC_DEVICE_NAME_LEN - 3U)) { if (str_len > (MISC_DEVICE_NAME_LEN - 3)) {
CRYPTODEV_ERR("%s: buffer overflown for misc dev %u\n", __func__, cnt); CRYPTODEV_ERR("%s: buffer overflown for misc dev %u\n", __func__, cnt);
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
@@ -2461,6 +2541,9 @@ fail:
nvvse_devnode[ctr].g_misc_devices = NULL; nvvse_devnode[ctr].g_misc_devices = NULL;
mutex_destroy(&nvvse_devnode[ctr].lock); mutex_destroy(&nvvse_devnode[ctr].lock);
} }
kfree(misc);
return ret; return ret;
} }
module_init(tnvvse_crypto_device_init); module_init(tnvvse_crypto_device_init);