diff --git a/drivers/crypto/tegra-hv-vse-safety.c b/drivers/crypto/tegra-hv-vse-safety.c index 22df88c9..9405d064 100644 --- a/drivers/crypto/tegra-hv-vse-safety.c +++ b/drivers/crypto/tegra-hv-vse-safety.c @@ -939,6 +939,7 @@ static int tegra_hv_vse_safety_send_ivc_wait( /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) { + dev_err(se_dev->dev, "Engine is in suspended state\n"); err = -ENODEV; goto exit; } @@ -1149,8 +1150,10 @@ static int tegra_vse_validate_cmac_params(struct tegra_virtual_se_aes_cmac_conte static int tegra_vse_validate_aes_rng_param(struct tegra_virtual_se_rng_context *rng_ctx) { - if (rng_ctx == NULL) + if (rng_ctx == NULL) { + VSE_ERR("%s: rng_ctx is NULL\n", __func__); return -EINVAL; + } if (rng_ctx->node_id >= MAX_NUMBER_MISC_DEVICES) { VSE_ERR("%s: Node id is not valid\n", __func__); @@ -1511,8 +1514,10 @@ static int tegra_hv_vse_safety_sha_update(struct ahash_request *req) se_dev = g_crypto_to_ivc_map[sha_ctx->node_id].se_dev; /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) + if (atomic_read(&se_dev->se_suspended)) { + VSE_ERR("%s: Engine is in suspended state\n", __func__); return -ENODEV; + } ret = tegra_hv_vse_safety_sha_op(sha_ctx, false); if (ret) @@ -1576,12 +1581,14 @@ static int tegra_hv_vse_safety_sha_finup(struct ahash_request *req) static int tegra_hv_vse_safety_sha_final(struct ahash_request *req) { // Unsupported + VSE_ERR("%s: This callback is not supported\n", __func__); return -EINVAL; } static int tegra_hv_vse_safety_sha_digest(struct ahash_request *req) { // Unsupported + VSE_ERR("%s: This callback is not supported\n", __func__); return -EINVAL; } @@ -1600,8 +1607,10 @@ static int tegra_hv_vse_safety_hmac_sha_setkey(struct crypto_ahash *tfm, const u } hmac_ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - if (!hmac_ctx) + if (!hmac_ctx) { + VSE_ERR("%s: HMAC SHA ctx not valid\n", __func__); return -EINVAL; + } if (hmac_ctx->node_id >= MAX_NUMBER_MISC_DEVICES) { VSE_ERR("%s: Node id is not valid\n", __func__); @@ -1910,7 +1919,7 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, if (priv->rx_status == 0) { hmac_ctx->result = 0; } else if (priv->rx_status == TEGRA_VIRTUAL_SE_ERR_MAC_INVALID) { - dev_dbg(se_dev->dev, "%s: tag mismatch", __func__); + dev_info(se_dev->dev, "%s: tag mismatch", __func__); hmac_ctx->result = 1; } else { err = status_to_errno(priv->rx_status); @@ -1921,7 +1930,7 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, if (memcmp(match->buf_ptr, &matchcode, 4) == 0) { hmac_ctx->result = 0; } else if (memcmp(match->buf_ptr, &mismatch_code, 4) == 0) { - dev_dbg(se_dev->dev, "%s: tag mismatch", __func__); + dev_info(se_dev->dev, "%s: tag mismatch", __func__); hmac_ctx->result = 1; } else { dev_err(se_dev->dev, "%s: invalid tag match code", @@ -1980,8 +1989,10 @@ static int tegra_hv_vse_safety_hmac_sha_update(struct ahash_request *req) se_dev = g_crypto_to_ivc_map[hmac_ctx->node_id].se_dev; /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) + if (atomic_read(&se_dev->se_suspended)) { + VSE_ERR("%s: Engine is in suspended state\n", __func__); return -ENODEV; + } ret = tegra_hv_vse_safety_hmac_sha_sv_op(req, hmac_ctx, false); if (ret) @@ -2033,8 +2044,10 @@ static int tegra_hv_vse_safety_hmac_sha_finup(struct ahash_request *req) se_dev = g_crypto_to_ivc_map[hmac_ctx->node_id].se_dev; /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) + if (atomic_read(&se_dev->se_suspended)) { + VSE_ERR("%s: Engine is in suspended state\n", __func__); return -ENODEV; + } ret = tegra_hv_vse_safety_hmac_sha_sv_op(req, hmac_ctx, true); if (ret) @@ -2049,12 +2062,14 @@ static int tegra_hv_vse_safety_hmac_sha_finup(struct ahash_request *req) static int tegra_hv_vse_safety_hmac_sha_final(struct ahash_request *req) { // Unsupported + VSE_ERR("%s: This callback is not supported\n", __func__); return -EINVAL; } static int tegra_hv_vse_safety_hmac_sha_digest(struct ahash_request *req) { // Unsupported + VSE_ERR("%s: This callback is not supported\n", __func__); return -EINVAL; } @@ -2968,8 +2983,10 @@ static int tegra_hv_vse_safety_cmac_init(struct ahash_request *req) se_dev = g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev; /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) + if (atomic_read(&se_dev->se_suspended)) { + VSE_ERR("%s: Engine is in suspended state\n", __func__); return -ENODEV; + } cmac_ctx->digest_size = crypto_ahash_digestsize(tfm); cmac_ctx->is_first = true; @@ -3018,8 +3035,10 @@ static int tegra_hv_vse_safety_cmac_update(struct ahash_request *req) se_dev = g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev; /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) + if (atomic_read(&se_dev->se_suspended)) { + VSE_ERR("%s: Engine is in suspended state\n", __func__); return -ENODEV; + } /* Do not process data in given request */ if (se_dev->chipdata->cmac_hw_verify_supported) ret = tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported(req, cmac_ctx, false); @@ -3115,8 +3134,10 @@ static int tegra_hv_tsec_safety_cmac_finup(struct ahash_request *req) se_dev = g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev; /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) + if (atomic_read(&se_dev->se_suspended)) { + VSE_ERR("%s: Engine is in suspended state\n", __func__); return -ENODEV; + } ret = tegra_hv_vse_safety_tsec_sv_op(req, cmac_ctx); if (ret) @@ -3135,8 +3156,10 @@ static int tegra_hv_vse_safety_cmac_digest(struct ahash_request *req) g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev; /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) + if (atomic_read(&se_dev->se_suspended)) { + VSE_ERR("%s: Engine is in suspended state\n", __func__); return -ENODEV; + } return tegra_hv_vse_safety_cmac_init(req) ?: tegra_hv_vse_safety_cmac_final(req); } @@ -3494,10 +3517,13 @@ static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *r struct tegra_vse_tag *priv_data_ptr; const struct tegra_vse_dma_buf *src; - if (atomic_read(&se_dev->se_suspended)) + if (atomic_read(&se_dev->se_suspended)) { + VSE_ERR("%s: Engine is in suspended state\n", __func__); return -ENODEV; + } if (dlen == 0) { + VSE_ERR("%s: Zero Data length is not supported\n", __func__); return -EINVAL; } @@ -4098,7 +4124,7 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req, } else { if (memcmp(comp->buf_ptr, &match_code, 4) != 0) { if (memcmp(comp->buf_ptr, &mismatch_code, 4) == 0) - dev_dbg(se_dev->dev, "%s: tag mismatch\n", __func__); + dev_info(se_dev->dev, "%s: tag mismatch\n", __func__); err = -EINVAL; goto free_exit; } @@ -4566,9 +4592,10 @@ static int tegra_hv_vse_aes_gmac_sv_op(struct ahash_request *req, } if (priv->rx_status != 0) { - if (priv->rx_status == 11U) + if (priv->rx_status == TEGRA_VIRTUAL_SE_ERR_MAC_INVALID) { + dev_info(se_dev->dev, "%s: tag mismatch", __func__); gmac_ctx->result = 1; - else + } else err = status_to_errno(priv->rx_status); } else { gmac_ctx->result = 0; @@ -4752,10 +4779,13 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, } else { if (memcmp(comp->buf_ptr, &match_code, 4) == 0) gmac_ctx->result = 0; - else if (memcmp(comp->buf_ptr, &mismatch_code, 4) == 0) + else if (memcmp(comp->buf_ptr, &mismatch_code, 4) == 0) { + dev_info(se_dev->dev, "%s: tag mismatch", __func__); gmac_ctx->result = 1; - else + } else { + dev_err(se_dev->dev, "%s: invalid tag match code", __func__); err = -EINVAL; + } } } @@ -5787,6 +5817,8 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev) static bool s_tsec_alg_register_done; bool is_aes_alg, is_sha_alg, is_tsec_alg; + dev_info(&pdev->dev, "probe start\n"); + gcm_supports_dma = of_property_read_bool(pdev->dev.of_node, "nvidia,gcm-dma-support"); if (gcm_supports_dma) { @@ -6181,6 +6213,8 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev) atomic_set(&se_dev->se_suspended, 0); platform_set_drvdata(pdev, se_dev); + dev_info(&pdev->dev, "probe success\n"); + return 0; release_bufs: tegra_hv_vse_release_se_dma_bufs(se_dev->dev); diff --git a/drivers/crypto/tegra-nvvse-cryptodev.c b/drivers/crypto/tegra-nvvse-cryptodev.c index 5b37c832..2382042e 100644 --- a/drivers/crypto/tegra-nvvse-cryptodev.c +++ b/drivers/crypto/tegra-nvvse-cryptodev.c @@ -65,6 +65,7 @@ #define MISC_DEVICE_NAME_LEN 33U #define CRYPTODEV_ERR(...) pr_err("tegra_nvvse_cryptodev " __VA_ARGS__) +#define CRYPTODEV_INFO(...) pr_info("tegra_nvvse_cryptodev " __VA_ARGS__) struct nvvse_devnode { struct miscdevice *g_misc_devices; @@ -72,6 +73,9 @@ struct nvvse_devnode { bool node_in_use; } nvvse_devnode[MAX_NUMBER_MISC_DEVICES]; +/* Info device node support */ +static struct miscdevice *nvvse_info_device; + static struct tegra_nvvse_get_ivc_db ivc_database; /* SHA Algorithm Names */ @@ -198,7 +202,7 @@ static int tnvvse_crypto_validate_sha_update_req(struct tnvvse_crypto_ctx *ctx, if (sha_update_ctl->init_only != 0U) { if (sha_state->sha_init_done != 0U) { - CRYPTODEV_ERR("%s(): SHA init is already done\n", __func__); + CRYPTODEV_INFO("%s(): SHA init is already done\n", __func__); ret = -EAGAIN; goto exit; } else { @@ -1660,7 +1664,6 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, struct tegra_nvvse_aes_drng_ctl *aes_drng_ctl; struct tegra_nvvse_aes_gmac_init_ctl *aes_gmac_init_ctl; struct tegra_nvvse_aes_gmac_sign_verify_ctl *aes_gmac_sign_verify_ctl; - struct tegra_nvvse_get_ivc_db *get_ivc_db; struct tegra_nvvse_tsec_get_keyload_status *tsec_keyload_status; struct tegra_nvvse_map_membuf_ctl __user *arg_map_membuf_ctl; struct tegra_nvvse_map_membuf_ctl *map_membuf_ctl; @@ -1940,30 +1943,6 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, kfree(aes_drng_ctl); break; - case NVVSE_IOCTL_CMDID_GET_IVC_DB: - get_ivc_db = kzalloc(sizeof(*get_ivc_db), GFP_KERNEL); - if (!get_ivc_db) { - CRYPTODEV_ERR("%s(): failed to allocate memory\n", __func__); - ret = -ENOMEM; - goto release_lock; - } - - ret = tnvvse_crypto_get_ivc_db(get_ivc_db); - if (ret) { - CRYPTODEV_ERR("%s(): Failed to get ivc database get_ivc_db:%d\n", __func__, ret); - kfree(get_ivc_db); - goto release_lock; - } - - ret = copy_to_user((void __user *)arg, &ivc_database, sizeof(ivc_database)); - if (ret) { - CRYPTODEV_ERR("%s(): Failed to copy_to_user ivc_database:%d\n", __func__, ret); - kfree(get_ivc_db); - goto release_lock; - } - - kfree(get_ivc_db); - break; case NVVSE_IOCTL_CMDID_TSEC_SIGN_VERIFY: aes_cmac_sign_verify_ctl = kzalloc(sizeof(*aes_cmac_sign_verify_ctl), GFP_KERNEL); @@ -2119,6 +2098,63 @@ static const struct file_operations tnvvse_crypto_fops = { .unlocked_ioctl = tnvvse_crypto_dev_ioctl, }; +static int tnvvse_crypto_info_dev_open(struct inode *inode, struct file *filp) +{ + /* No context needed for the info device */ + return 0; +} + +static int tnvvse_crypto_info_dev_release(struct inode *inode, struct file *filp) +{ + /* No cleanup needed for the info device */ + return 0; +} + +static long tnvvse_crypto_info_dev_ioctl(struct file *filp, + unsigned int ioctl_num, unsigned long arg) +{ + struct tegra_nvvse_get_ivc_db *get_ivc_db; + int ret = 0; + + if (ioctl_num == NVVSE_IOCTL_CMDID_GET_IVC_DB) { + get_ivc_db = kzalloc(sizeof(*get_ivc_db), GFP_KERNEL); + if (!get_ivc_db) { + CRYPTODEV_ERR("%s(): failed to allocate memory\n", __func__); + ret = -ENOMEM; + goto end; + } + + ret = tnvvse_crypto_get_ivc_db(get_ivc_db); + if (ret) { + CRYPTODEV_ERR("%s(): Failed to get ivc database get_ivc_db:%d\n", __func__, ret); + kfree(get_ivc_db); + goto end; + } + + ret = copy_to_user((void __user *)arg, &ivc_database, sizeof(ivc_database)); + if (ret) { + CRYPTODEV_ERR("%s(): Failed to copy_to_user ivc_database:%d\n", __func__, ret); + kfree(get_ivc_db); + goto end; + } + + kfree(get_ivc_db); + } else { + CRYPTODEV_ERR("%s(): invalid ioctl code(%d[0x%08x])", __func__, ioctl_num, ioctl_num); + ret = -EINVAL; + } + +end: + return ret; +} + +static const struct file_operations tnvvse_crypto_info_fops = { + .owner = THIS_MODULE, + .open = tnvvse_crypto_info_dev_open, + .release = tnvvse_crypto_info_dev_release, + .unlocked_ioctl = tnvvse_crypto_info_dev_ioctl, +}; + static int __init tnvvse_crypto_device_init(void) { uint32_t cnt, ctr; @@ -2145,10 +2181,30 @@ static int __init tnvvse_crypto_device_init(void) char *node_name; uint32_t str_len; + CRYPTODEV_INFO("%s(): init start\n", __func__); + /* get ivc databse */ tnvvse_crypto_get_ivc_db(&ivc_database); ivc_db = tegra_hv_vse_get_db(); + /* Register the info device node */ + nvvse_info_device = kzalloc(sizeof(struct miscdevice), GFP_KERNEL); + if (nvvse_info_device == NULL) { + CRYPTODEV_ERR("%s(): failed to allocate memory for info device\n", __func__); + return -ENOMEM; + } + + nvvse_info_device->minor = MISC_DYNAMIC_MINOR; + nvvse_info_device->fops = &tnvvse_crypto_info_fops; + nvvse_info_device->name = "tegra-nvvse-crypto-info"; + + ret = misc_register(nvvse_info_device); + if (ret != 0) { + CRYPTODEV_ERR("%s: info device registration failed err %d\n", __func__, ret); + kfree(nvvse_info_device); + return ret; + } + for (cnt = 0; cnt < MAX_NUMBER_MISC_DEVICES; cnt++) { if (ivc_db[cnt].node_in_use != true) @@ -2157,12 +2213,14 @@ static int __init tnvvse_crypto_device_init(void) /* Dynamic initialisation of misc device */ misc = kzalloc(sizeof(struct miscdevice), GFP_KERNEL); if (misc == NULL) { + CRYPTODEV_ERR("%s(): failed to allocate memory for misc device\n", __func__); ret = -ENOMEM; goto fail; } node_name = kzalloc(MISC_DEVICE_NAME_LEN, GFP_KERNEL); if (node_name == NULL) { + CRYPTODEV_ERR("%s(): failed to allocate memory for node name\n", __func__); ret = -ENOMEM; goto fail; } @@ -2215,14 +2273,24 @@ static int __init tnvvse_crypto_device_init(void) mutex_init(&nvvse_devnode[cnt].lock); } + CRYPTODEV_INFO("%s(): init success\n", __func__); + return ret; fail: + /* Cleanup the info device if needed */ + if (nvvse_info_device) { + misc_deregister(nvvse_info_device); + kfree(nvvse_info_device); + nvvse_info_device = NULL; + } + for (ctr = 0; ctr < cnt; ctr++) { misc_deregister(nvvse_devnode[ctr].g_misc_devices); kfree(nvvse_devnode[ctr].g_misc_devices->name); kfree(nvvse_devnode[ctr].g_misc_devices); nvvse_devnode[ctr].g_misc_devices = NULL; + mutex_destroy(&nvvse_devnode[ctr].lock); } return ret; } @@ -2232,6 +2300,13 @@ static void __exit tnvvse_crypto_device_exit(void) { uint32_t ctr; + /* Unregister the info device node */ + if (nvvse_info_device != NULL) { + misc_deregister(nvvse_info_device); + kfree(nvvse_info_device); + nvvse_info_device = NULL; + } + for (ctr = 0; ctr < MAX_NUMBER_MISC_DEVICES; ctr++) { if (nvvse_devnode[ctr].g_misc_devices != NULL) { misc_deregister(nvvse_devnode[ctr].g_misc_devices);