crypto: sync crypto driver from kernel/nvidia

Using this patch we are pulling in the missing changes
from kernel/nvidia directory.

JIRA ESLC-6885

Signed-off-by: Manish Bhardwaj <mbhardwaj@nvidia.com>
Change-Id: I9376e14971be0f4d40f759858fcc9db90a7b9d72
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2785534
Reviewed-by: Suresh Venkatachalam <skathirampat@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Manish Bhardwaj
2022-10-02 14:02:01 +05:30
committed by mobile promotions
parent aa0b664723
commit 27f17e56d1
3 changed files with 323 additions and 6 deletions

View File

@@ -0,0 +1,141 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __TEGRA_HV_VSE_H
#define __TEGRA_HV_VSE_H
struct tegra_vse_soc_info {
bool cmac_hw_padding_supported;
bool gcm_decrypt_supported;
};
/* GCM Operation Supported Flag */
enum tegra_gcm_dec_supported {
GCM_DEC_OP_NOT_SUPPORTED,
GCM_DEC_OP_SUPPORTED,
};
struct crypto_dev_to_ivc_map {
uint32_t ivc_id;
uint32_t se_engine;
uint32_t node_id;
uint32_t priority;
uint32_t channel_grp_id;
enum tegra_gcm_dec_supported gcm_dec_supported;
struct tegra_hv_ivc_cookie *ivck;
struct completion tegra_vse_complete;
struct task_struct *tegra_vse_task;
bool vse_thread_start;
struct mutex se_ivc_lock;
};
struct tegra_virtual_se_dev {
struct device *dev;
/* Engine id */
unsigned int engine_id;
/* Engine suspend state */
atomic_t se_suspended;
struct tegra_vse_soc_info *chipdata;
#if defined(CONFIG_HW_RANDOM)
/* Integration with hwrng framework */
struct hwrng *hwrng;
#endif /* CONFIG_HW_RANDOM */
struct platform_device *host1x_pdev;
struct crypto_dev_to_ivc_map *crypto_to_ivc_map;
};
/* Security Engine random number generator context */
struct tegra_virtual_se_rng_context {
/* Security Engine device */
struct tegra_virtual_se_dev *se_dev;
/* RNG buffer pointer */
u32 *rng_buf;
/* RNG buffer dma address */
dma_addr_t rng_buf_adr;
/*Crypto dev instance*/
uint32_t node_id;
};
/* Security Engine AES context */
struct tegra_virtual_se_aes_context {
/* Security Engine device */
struct tegra_virtual_se_dev *se_dev;
struct skcipher_request *req;
/* Security Engine key slot */
u32 aes_keyslot;
/* key length in bytes */
u32 keylen;
/* AES operation mode */
u32 op_mode;
/* Is key slot */
bool is_key_slot_allocated;
/* size of GCM tag*/
u32 authsize;
/*Crypto dev instance*/
uint32_t node_id;
};
/* Security Engine AES CMAC context */
struct tegra_virtual_se_aes_cmac_context {
unsigned int digest_size;
u8 *hash_result; /* Intermediate hash result */
dma_addr_t hash_result_addr; /* Intermediate hash result dma addr */
bool is_first; /* Represents first block */
bool req_context_initialized; /* Mark initialization status */
u32 aes_keyslot;
/* key length in bits */
u32 keylen;
bool is_key_slot_allocated;
/*Crypto dev instance*/
uint32_t node_id;
};
/* Security Engine AES GMAC context */
struct tegra_virtual_se_aes_gmac_context {
/* size of GCM tag*/
u32 authsize;
/* Mark initialization status */
bool req_context_initialized;
u32 aes_keyslot;
/* key length in bits */
u32 keylen;
bool is_key_slot_allocated;
/*Crypto dev instance*/
uint32_t node_id;
};
/* Security Engine SHA context */
struct tegra_virtual_se_sha_context {
/* Security Engine device */
struct tegra_virtual_se_dev *se_dev;
/* SHA operation mode */
u32 op_mode;
unsigned int digest_size;
u8 mode;
/*Crypto dev instance*/
uint32_t node_id;
};
/* Security Engine request context */
struct tegra_virtual_se_req_context {
/* Security Engine device */
struct tegra_virtual_se_dev *se_dev;
unsigned int digest_size;
unsigned int intermediate_digest_size;
u8 mode; /* SHA operation mode */
u8 *sha_buf; /* Buffer to store residual data */
dma_addr_t sha_buf_addr; /* DMA address to residual data */
u8 *hash_result; /* Intermediate hash result */
dma_addr_t hash_result_addr; /* Intermediate hash result dma addr */
u64 total_count; /* Total bytes in all the requests */
u32 residual_bytes; /* Residual byte count */
u32 blk_size; /* SHA block size */
bool is_first; /* Represents first block */
bool req_context_initialized; /* Mark initialization status */
bool force_align; /* Enforce buffer alignment */
/*Crypto dev instance*/
uint32_t node_id;
};
#endif /*__TEGRA_HV_VSE_H*/

View File

@@ -19,15 +19,24 @@
#include <linux/mutex.h>
#include <linux/version.h>
#include <linux/string.h>
#include <linux/platform/tegra/common.h>
#include <soc/tegra/fuse.h>
#include <crypto/rng.h>
#include <crypto/hash.h>
#include <crypto/akcipher.h>
#include <crypto/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
#include <uapi/misc/tegra-nvvse-cryptodev.h>
#include <asm/barrier.h>
#include "tegra-hv-vse.h"
#define NBUFS 2
#define XBUFSIZE 8
#define AES_IV_SIZE 16
@@ -53,6 +62,11 @@
*/
#define NVVSE_MAX_ALLOCATED_SHA_RESULT_BUFF_SIZE 256U
#define MAX_NUMBER_MISC_DEVICES 40U
#define MISC_DEVICE_NAME_LEN 32U
static struct miscdevice *g_misc_devices[MAX_NUMBER_MISC_DEVICES];
/* SHA Algorithm Names */
static const char *sha_alg_names[] = {
"sha256-vse",
@@ -90,6 +104,7 @@ struct tnvvse_crypto_ctx {
char *rng_buff;
uint32_t max_rng_buff;
char *sha_result;
uint32_t node_id;
};
enum tnvvse_gmac_request_type {
@@ -174,6 +189,7 @@ static int tnvvse_crypto_sha_init(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_sha_init_ctl *init_ctl)
{
struct crypto_sha_state *sha_state = &ctx->sha_state;
struct tegra_virtual_se_sha_context *sha_ctx;
struct crypto_ahash *tfm;
struct ahash_request *req;
const char *driver_name;
@@ -195,6 +211,9 @@ static int tnvvse_crypto_sha_init(struct tnvvse_crypto_ctx *ctx,
goto out;
}
sha_ctx = crypto_ahash_ctx(tfm);
sha_ctx->node_id = ctx->node_id;
driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));;
if (driver_name == NULL) {
pr_err("%s(): Failed to get driver name\n", __func__);
@@ -408,6 +427,7 @@ static int tnvvse_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
const char *driver_name;
struct ahash_request *req;
struct tnvvse_crypto_completion sha_complete;
struct tegra_virtual_se_aes_cmac_context *cmac_ctx;
unsigned long *xbuf[XBUFSIZE];
char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,};
struct tnvvse_cmac_req_data priv_data;
@@ -427,6 +447,9 @@ static int tnvvse_crypto_aes_cmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
goto free_result;
}
cmac_ctx = crypto_ahash_ctx(tfm);
cmac_ctx->node_id = ctx->node_id;
driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
if (driver_name == NULL) {
pr_err("%s(): Failed to get_driver_name for cmac-vse(aes) returned NULL", __func__);
@@ -550,6 +573,7 @@ static int tnvvse_crypto_aes_gmac_init(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_aes_gmac_init_ctl *gmac_init_ctl)
{
struct crypto_sha_state *sha_state = &ctx->sha_state;
struct tegra_virtual_se_aes_gmac_context *gmac_ctx;
char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,};
struct crypto_ahash *tfm;
struct ahash_request *req;
@@ -566,6 +590,9 @@ static int tnvvse_crypto_aes_gmac_init(struct tnvvse_crypto_ctx *ctx,
goto out;
}
gmac_ctx = crypto_ahash_ctx(tfm);
gmac_ctx->node_id = ctx->node_id;
driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
if (driver_name == NULL) {
pr_err("%s(): Failed to get driver name\n", __func__);
@@ -625,6 +652,7 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx
struct tegra_nvvse_aes_gmac_sign_verify_ctl *gmac_sign_verify_ctl)
{
struct crypto_sha_state *sha_state = &ctx->sha_state;
struct tegra_virtual_se_aes_gmac_context *gmac_ctx;
char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,};
struct crypto_ahash *tfm;
struct ahash_request *req;
@@ -640,6 +668,9 @@ static int tnvvse_crypto_aes_gmac_sign_verify_init(struct tnvvse_crypto_ctx *ctx
goto out;
}
gmac_ctx = crypto_ahash_ctx(tfm);
gmac_ctx->node_id = ctx->node_id;
driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
if (driver_name == NULL) {
pr_err("%s(): Failed to get driver name\n", __func__);
@@ -856,6 +887,7 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
int ret = 0, size = 0;
unsigned long total = 0;
struct tnvvse_crypto_completion tcrypt_complete;
struct tegra_virtual_se_aes_context *aes_ctx;
char aes_algo[5][15] = {"cbc-vse(aes)", "ecb-vse(aes)", "ctr-vse(aes)"};
const char *driver_name;
char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,};
@@ -881,6 +913,9 @@ static int tnvvse_crypto_aes_enc_dec(struct tnvvse_crypto_ctx *ctx,
goto out;
}
aes_ctx = crypto_skcipher_ctx(tfm);
aes_ctx->node_id = ctx->node_id;
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("%s(): Failed to allocate skcipher request\n", __func__);
@@ -1123,6 +1158,7 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
uint32_t in_sz, out_sz, aad_length, data_length, tag_length;
uint32_t i, idx, offset, data_length_copied, data_length_remaining, tag_length_copied;
struct tnvvse_crypto_completion tcrypt_complete;
struct tegra_virtual_se_aes_context *aes_ctx;
const char *driver_name;
char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,};
uint8_t iv[TEGRA_NVVSE_AES_GCM_IV_LEN];
@@ -1152,6 +1188,9 @@ static int tnvvse_crypto_aes_enc_dec_gcm(struct tnvvse_crypto_ctx *ctx,
goto out;
}
aes_ctx = crypto_aead_ctx(tfm);
aes_ctx->node_id = ctx->node_id;
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("%s(): Failed to allocate skcipher request\n", __func__);
@@ -1481,6 +1520,7 @@ out:
static int tnvvse_crypto_get_aes_drng(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_aes_drng_ctl *aes_drng_ctl)
{
struct tegra_virtual_se_rng_context *rng_ctx;
struct crypto_rng *rng;
int ret = -ENOMEM;
@@ -1491,6 +1531,9 @@ static int tnvvse_crypto_get_aes_drng(struct tnvvse_crypto_ctx *ctx,
goto out;
}
rng_ctx = crypto_rng_ctx(rng);
rng_ctx->node_id = ctx->node_id;
memset(ctx->rng_buff, 0, ctx->max_rng_buff);
ret = crypto_rng_get_bytes(rng, ctx->rng_buff, aes_drng_ctl->data_length);
if (ret < 0) {
@@ -1515,6 +1558,8 @@ out:
static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp)
{
struct tnvvse_crypto_ctx *ctx;
char root_path_buf[512];
const char *root_path, *str;
int ret = 0;
ctx = kzalloc(sizeof(struct tnvvse_crypto_ctx), GFP_KERNEL);
@@ -1538,10 +1583,27 @@ static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp)
goto free_rng_buf;
}
/* get the node id from file name */
root_path = dentry_path_raw(filp->f_path.dentry, root_path_buf, sizeof(root_path_buf));
str = strrchr(root_path, '-');
if (str == NULL) {
pr_err("%s: invalid dev node name\n", __func__);
ret = -EINVAL;
goto free_sha_buf;
}
if (kstrtou32(str+1, 10, &ctx->node_id)) {
pr_err("%s: invalid crypto dev instance passed\n", __func__);
ret = -EINVAL;
goto free_sha_buf;
}
filp->private_data = ctx;
return ret;
free_sha_buf:
kfree(ctx->sha_result);
free_rng_buf:
kfree(ctx->rng_buff);
free_mutex:
@@ -1756,13 +1818,74 @@ static const struct file_operations tnvvse_crypto_fops = {
.unlocked_ioctl = tnvvse_crypto_dev_ioctl,
};
static struct miscdevice tnvvse_crypto_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "tegra-nvvse-crypto",
.fops = &tnvvse_crypto_fops,
};
static int __init tnvvse_crypto_device_init(void)
{
uint32_t cnt, ctr;
int ret = 0;
struct miscdevice *misc;
module_misc_device(tnvvse_crypto_device);
for (cnt = 0; cnt < MAX_NUMBER_MISC_DEVICES; cnt++) {
/* Dynamic initialisation of misc device */
misc = kzalloc(sizeof(struct miscdevice), GFP_KERNEL);
if (misc == NULL) {
ret = -ENOMEM;
goto fail;
} else {
misc->minor = MISC_DYNAMIC_MINOR;
misc->fops = &tnvvse_crypto_fops;
misc->name = kzalloc(MISC_DEVICE_NAME_LEN, GFP_KERNEL);
if (misc->name == NULL) {
ret = -ENOMEM;
goto fail;
}
ret = snprintf((char *)misc->name, MISC_DEVICE_NAME_LEN,
"tegra-nvvse-crypto-%u", cnt);
if (ret >= MISC_DEVICE_NAME_LEN) {
pr_err("%s: buffer overflown for misc dev %u\n", __func__, cnt);
ret = -EINVAL;
goto fail;
}
}
ret = misc_register(misc);
if (ret != 0) {
pr_err("%s: misc dev %u registeration failed err %d\n", __func__, cnt, ret);
goto fail;
}
g_misc_devices[cnt] = misc;
}
return ret;
fail:
for (ctr = 0; ctr < cnt; ctr++) {
misc_deregister(g_misc_devices[ctr]);
kfree(g_misc_devices[ctr]->name);
kfree(g_misc_devices[ctr]);
g_misc_devices[ctr] = NULL;
}
return ret;
}
module_init(tnvvse_crypto_device_init);
static void __exit tnvvse_crypto_device_exit(void)
{
uint32_t ctr;
for (ctr = 0; ctr < MAX_NUMBER_MISC_DEVICES; ctr++) {
if (g_misc_devices[ctr] != NULL) {
misc_deregister(g_misc_devices[ctr]);
kfree(g_misc_devices[ctr]->name);
kfree(g_misc_devices[ctr]);
g_misc_devices[ctr] = NULL;
}
}
}
module_exit(tnvvse_crypto_device_exit);
MODULE_DESCRIPTION("Tegra NVVSE Crypto device driver.");
MODULE_AUTHOR("NVIDIA Corporation");

View File

@@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef __MACH_TEGRA_COMMON_H
#define __MACH_TEGRA_COMMON_H
extern struct smp_operations tegra_smp_ops;
extern phys_addr_t tegra_tsec_start;
extern phys_addr_t tegra_tsec_size;
#ifdef CONFIG_CACHE_L2X0
void tegra_init_cache(bool init);
#else
static inline void tegra_init_cache(bool init) {}
#endif
extern void tegra_cpu_die(unsigned int cpu);
extern int tegra_cpu_kill(unsigned int cpu);
extern phys_addr_t tegra_avp_kernel_start;
extern phys_addr_t tegra_avp_kernel_size;
void ahb_gizmo_writel(unsigned long val, void __iomem *reg);
extern struct device tegra_generic_cma_dev;
extern struct device tegra_vpr_cma_dev;
extern int tegra_with_secure_firmware;
extern struct device tegra_generic_dev;
extern struct device tegra_vpr_dev;
#ifdef CONFIG_TEGRA_VPR
extern struct dma_resize_notifier_ops vpr_dev_ops;
#endif
u32 tegra_get_bct_strapping(void);
u32 tegra_get_fuse_opt_subrevision(void);
enum tegra_revision tegra_chip_get_revision(void);
void __init display_tegra_dt_info(void);
bool tegra_is_vpr_resize_enabled(void);
void tegra_register_idle_unidle(int (*do_idle)(void *),
int (*do_unidle)(void *),
void *data);
void tegra_unregister_idle_unidle(int (*do_idle)(void *));
static inline int tegra_cpu_is_secure(void)
{
return tegra_with_secure_firmware;
}
int tegra_state_idx_from_name(char *state_name);
#endif