// SPDX-License-Identifier: GPL-2.0-only /* * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. * All rights reserved. * * Cryptographic API. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "tegra-hv-vse.h" #define SE_MAX_SCHEDULE_TIMEOUT LONG_MAX #define TEGRA_HV_VSE_SHA_MAX_LL_NUM_1 1 #define TEGRA_HV_VSE_AES_CMAC_MAX_LL_NUM 1 #define TEGRA_HV_VSE_MAX_TASKS_PER_SUBMIT 1 #define TEGRA_HV_VSE_TIMEOUT (msecs_to_jiffies(10000)) #define TEGRA_HV_VSE_SHA_MAX_BLOCK_SIZE 128 #define TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_GCM_TAG_IV_SIZE 32 #define TEGRA_VIRTUAL_SE_AES_MIN_KEY_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_MAX_KEY_SIZE 32 #define TEGRA_VIRTUAL_SE_AES_IV_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_GCM_IV_SIZE 12 #define TEGRA_VIRTUAL_SE_AES_MAX_IV_SIZE TEGRA_VIRTUAL_SE_AES_IV_SIZE /* Virtual Security Engines */ #define TEGRA_VIRTUAL_SE_CMD_ENG_AES 0x01000000U #define TEGRA_VIRTUAL_SE_CMD_ENG_SHA 0x02000000U #define TEGRA_VIRTUAL_SE_CMD_ENG_TSEC 0x03000000U /* Command categories for AES Engine */ #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC 0x00010000U #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH 0x00020000U #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_RNG 0x00030000U /* Command categories for SHA Engine */ #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_SHA 0x00010000U #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_HMAC 0x00030000U /* Command categories for TSEC Engine */ #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_KEYS 0x00010000U #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_AUTH 0x00020000U /* Command sets for Encryption/Decryption with AES Engine */ #define TEGRA_VIRTUAL_SE_CMD_SET_AES_ENC_DEC 0x00001000U #define TEGRA_VIRTUAL_SE_CMD_SET_GCM_ENC_DEC 0x00002000U /* Command sets for Authentication using AES engine */ #define TEGRA_VIRTUAL_SE_CMD_SET_CMAC 0x00001000U #define TEGRA_VIRTUAL_SE_CMD_SET_GMAC 0x00002000U /* Commands in the AES Encryption/Decryption set */ #define TEGRA_VIRTUAL_SE_CMD_OP_AES_ENC_INIT 0x00000001U #define TEGRA_VIRTUAL_SE_CMD_OP_AES_ENC 0x00000002U #define TEGRA_VIRTUAL_SE_CMD_OP_AES_DEC 0x00000003U /* Commands in the GCM Encryption/Decryption set */ #define TEGRA_VIRTUAL_SE_CMD_OP_GCM_ENC 0x00000001U #define TEGRA_VIRTUAL_SE_CMD_OP_GCM_DEC 0x00000002U #define TEGRA_VIRTUAL_SE_CMD_OP_GCM_GET_DEC 0x00000003U /* Commands in the CMAC Authentication set*/ #define TEGRA_VIRTUAL_SE_CMD_OP_CMAC_SIGN 0x00000001U #define TEGRA_VIRTUAL_SE_CMD_OP_CMAC_VERIFY 0x00000002U #define TEGRA_VIRTUAL_SE_CMD_OP_CMAC_GET_SIGN 0x00000003U #define TEGRA_VIRTUAL_SE_CMD_OP_CMAC_GET_VERIFY 0x00000004U /* Commands in the GMAC Authentication set */ #define TEGRA_VIRTUAL_SE_CMD_OP_GMAC_INIT 0x00000001U #define TEGRA_VIRTUAL_SE_CMD_OP_GMAC_SIGN 0x00000002U #define TEGRA_VIRTUAL_SE_CMD_OP_GMAC_VERIFY 0x00000003U #define TEGRA_VIRTUAL_SE_CMD_OP_GMAC_GET_VERIFY 0x00000004U #define TEGRA_VIRTUAL_SE_CMD_OP_GMAC_GET_IV 0x00000005U /* Commands in the AES RNG Category*/ #define TEGRA_VIRTUAL_SE_CMD_OP_AES_RNG 0x00000001U /* Commands in the SHA Category */ #define TEGRA_VIRTUAL_SE_CMD_OP_SHA 0x00000001U /* Commands in the HMAC Category */ #define TEGRA_VIRTUAL_SE_CMD_OP_HMAC_SIGN 0x00000001U #define TEGRA_VIRTUAL_SE_CMD_OP_HMAC_VERIFY 0x00000002U #define TEGRA_VIRTUAL_SE_CMD_OP_HMAC_GET_VERIFY 0x00000004U /* Commands in the TSEC keys category */ #define TEGRA_VIRTUAL_SE_CMD_OP_TSEC_KEYLOAD_STATUS 0x00000001U /* Commands in the TSEC Authentication category */ #define TEGRA_VIRTUAL_SE_CMD_OP_TSEC_CMAC_SIGN \ TEGRA_VIRTUAL_SE_CMD_OP_CMAC_SIGN #define TEGRA_VIRTUAL_SE_CMD_OP_TSEC_GET_CMAC_SIGN \ TEGRA_VIRTUAL_SE_CMD_OP_CMAC_GET_SIGN #define TEGRA_VIRTUAL_SE_CMD_OP_TSEC_CMAC_VERIFY \ TEGRA_VIRTUAL_SE_CMD_OP_CMAC_VERIFY #define TEGRA_VIRTUAL_SE_CMD_OP_TSEC_GET_CMAC_VERIFY \ TEGRA_VIRTUAL_SE_CMD_OP_CMAC_GET_VERIFY #define TEGRA_VIRTUAL_SE_CMD_AES_SET_KEY 0xF1 #define TEGRA_VIRTUAL_SE_CMD_AES_ALLOC_KEY 0xF0 #define TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT_INIT (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_SET_AES_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_OP_AES_ENC_INIT) #define TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_SET_AES_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_OP_AES_ENC) #define TEGRA_VIRTUAL_SE_CMD_AES_DECRYPT (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_SET_AES_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_OP_AES_DEC) #define TEGRA_VIRTUAL_SE_CMD_AES_CMAC 0x23 #define TEGRA_VIRTUAL_SE_CMD_AES_CMAC_GEN_SUBKEY 0x24 #define TEGRA_VIRTUAL_SE_CMD_AES_RNG_DBRG (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_RNG \ | TEGRA_VIRTUAL_SE_CMD_OP_AES_RNG) #define TEGRA_VIRTUAL_SE_CMD_AES_GCM_CMD_ENCRYPT (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_SET_GCM_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_OP_GCM_ENC) #define TEGRA_VIRTUAL_SE_CMD_AES_GCM_CMD_DECRYPT (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_SET_GCM_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_OP_GCM_DEC) #define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GCM_DEC (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_SET_GCM_ENC_DEC \ | TEGRA_VIRTUAL_SE_CMD_OP_GCM_GET_DEC) #define TEGRA_VIRTUAL_SE_CMD_AES_CMAC_SIGN (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_CMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_CMAC_SIGN) #define TEGRA_VIRTUAL_SE_CMD_AES_CMAC_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_CMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_CMAC_VERIFY) #define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_CMAC_SIGN (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_CMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_CMAC_GET_SIGN) #define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_CMAC_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_CMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_CMAC_GET_VERIFY) #define TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_INIT (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_GMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_GMAC_INIT) #define TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_SIGN (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_GMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_GMAC_SIGN) #define TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_GMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_GMAC_VERIFY) #define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GMAC_IV (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_GMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_GMAC_GET_IV) #define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GMAC_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_AES \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_AUTH \ | TEGRA_VIRTUAL_SE_CMD_SET_GMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_GMAC_GET_VERIFY) #define TEGRA_VIRTUAL_TSEC_CMD_GET_KEYLOAD_STATUS (TEGRA_VIRTUAL_SE_CMD_ENG_TSEC \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_KEYS \ | TEGRA_VIRTUAL_SE_CMD_OP_TSEC_KEYLOAD_STATUS) #define TEGRA_VIRTUAL_SE_CMD_TSEC_SIGN (TEGRA_VIRTUAL_SE_CMD_ENG_TSEC \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_AUTH \ | TEGRA_VIRTUAL_SE_CMD_OP_TSEC_CMAC_SIGN) #define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_TSEC_SIGN (TEGRA_VIRTUAL_SE_CMD_ENG_TSEC \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_AUTH \ | TEGRA_VIRTUAL_SE_CMD_OP_TSEC_GET_CMAC_SIGN) #define TEGRA_VIRTUAL_SE_CMD_TSEC_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_TSEC \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_AUTH \ | TEGRA_VIRTUAL_SE_CMD_OP_TSEC_CMAC_VERIFY) #define TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_TSEC_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_TSEC \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_AUTH \ | TEGRA_VIRTUAL_SE_CMD_OP_TSEC_GET_CMAC_VERIFY) #define TEGRA_VIRTUAL_SE_AES_GMAC_SV_CFG_FIRST_REQ_SHIFT (0x00U) #define TEGRA_VIRTUAL_SE_AES_GMAC_SV_CFG_LAST_REQ_SHIFT (0x01U) #define TEGRA_VIRTUAL_SE_CMD_SHA_HASH (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_SHA \ | TEGRA_VIRTUAL_SE_CMD_OP_SHA) #define TEGRA_VIRTUAL_SE_CMD_HMAC_SIGN (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_HMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_HMAC_SIGN) #define TEGRA_VIRTUAL_SE_CMD_HMAC_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_HMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_HMAC_VERIFY) #define TEGRA_VIRTUAL_SE_CMD_HMAC_GET_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_HMAC \ | TEGRA_VIRTUAL_SE_CMD_OP_HMAC_GET_VERIFY) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT (512 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_576BIT (576 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_832BIT (832 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1024BIT (1024 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT (1088 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1344BIT (1344 / 8) #define TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH (32U) #define SHA3_STATE_SIZE 200 #define TEGRA_VIRTUAL_SE_TIMEOUT_1S 1000000 #define TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_CMAC_STATE_SIZE 16 #define TEGRA_VIRTUAL_SE_MAX_BUFFER_SIZE 0x1000000 #define TEGRA_VIRTUAL_SE_AES_KEYTBL_TYPE_KEY 1 #define TEGRA_VIRTUAL_SE_AES_KEYTBL_TYPE_OIV 2 #define TEGRA_VIRTUAL_SE_AES_KEYTBL_TYPE_UIV 4 #define TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL "NVSEAES" #define TEGRA_VIRTUAL_SE_AES_LCTR_SIZE 16 #define TEGRA_VIRTUAL_SE_AES_LCTR_CNTN 1 #define TEGRA_VIRTUAL_SE_AES_CMAC_CONFIG_NONLASTBLK 0x00 #define TEGRA_VIRTUAL_SE_AES_CMAC_CONFIG_LASTBLK 0x01 #define TEGRA_VIRTUAL_SE_AES_CMAC_CONFIG_FINAL 0x02 #define TEGRA_VIRTUAL_SE_AES_CMAC_SV_CONFIG_FIRSTREQ 0x01 #define TEGRA_VIRTUAL_SE_AES_CMAC_SV_CONFIG_LASTREQ 0x02 #define TEGRA_VIRTUAL_SE_RNG_IV_SIZE 16 #define TEGRA_VIRTUAL_SE_RNG_DT_SIZE 16 #define TEGRA_VIRTUAL_SE_RNG_KEY_SIZE 16 #define TEGRA_VIRTUAL_SE_RNG_SEED_SIZE (TEGRA_VIRTUAL_SE_RNG_IV_SIZE + \ TEGRA_VIRTUAL_SE_RNG_KEY_SIZE + \ TEGRA_VIRTUAL_SE_RNG_DT_SIZE) #define TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN ((1U << 24) - 1U) #define TEGRA_VIRTUAL_SE_MAX_GCMDEC_BUFLEN (0x500000U) /* 5 MB */ #define TEGRA_VIRTUAL_TSEC_MAX_SUPPORTED_BUFLEN (8U * 1024U) /* 8 KB */ #define TEGRA_VIRTUAL_SE_ERR_MAC_INVALID 11 #define MAX_NUMBER_MISC_DEVICES 46U #define MAX_IVC_Q_PRIORITY 2U #define TEGRA_IVC_ID_OFFSET 0U #define TEGRA_SE_ENGINE_ID_OFFSET 1U #define TEGRA_CRYPTO_DEV_ID_OFFSET 2U #define TEGRA_IVC_PRIORITY_OFFSET 3U #define TEGRA_MAX_BUFFER_SIZE 4U #define TEGRA_CHANNEL_GROUPID_OFFSET 5U #define TEGRA_GCM_SUPPORTED_FLAG_OFFSET 7U #define TEGRA_GCM_DEC_BUFFER_SIZE 8U #define TEGRA_GCM_DEC_MEMPOOL_ID 9U #define TEGRA_GCM_DEC_MEMPOOL_SIZE 10U #define TEGRA_IVCCFG_ARRAY_LEN 11U #define VSE_MSG_ERR_TSEC_KEYLOAD_FAILED 21U #define VSE_MSG_ERR_TSEC_KEYLOAD_STATUS_CHECK_TIMEOUT 20U #define NVVSE_STATUS_SE_SERVER_TSEC_KEYLOAD_FAILED 105U #define NVVSE_STATUS_SE_SERVER_TSEC_KEYLOAD_TIMEOUT 150U #define NVVSE_STATUS_SE_SERVER_ERROR 102U #define SE_HW_VALUE_MATCH_CODE 0x5A5A5A5A #define SE_HW_VALUE_MISMATCH_CODE 0xBDBDBDBD static struct crypto_dev_to_ivc_map g_crypto_to_ivc_map[MAX_NUMBER_MISC_DEVICES]; static struct tegra_vse_node_dma g_node_dma[MAX_NUMBER_MISC_DEVICES]; static bool gcm_supports_dma; static struct device *gpcdma_dev; static bool s_set_sha_algs; /* Security Engine Linked List */ struct tegra_virtual_se_ll { dma_addr_t addr; /* DMA buffer address */ u32 data_len; /* Data length in DMA buffer */ }; struct tegra_vse_tag { unsigned int *priv_data; }; /* Tegra Virtual Security Engine commands */ enum tegra_virtual_se_command { VIRTUAL_SE_AES_CRYPTO, VIRTUAL_SE_KEY_SLOT, VIRTUAL_SE_PROCESS, VIRTUAL_CMAC_PROCESS, VIRTUAL_SE_AES_GCM_ENC_PROCESS }; /* CMAC response */ struct tegra_vse_cmac_data { u8 status; u8 data[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; }; enum cmac_request_type { CMAC_SIGN = 0U, CMAC_VERIFY }; /* * @enum vse_sym_cipher_choice * @brief Symmetric cipher to be used for CMAC sign/verify * Currently two choices are supported - AES, SM4. */ enum vse_sym_cipher_choice { VSE_SYM_CIPH_AES = 0, VSE_SYM_CIPH_SM4 = 0xFFFFFFFF }; /* CMAC request data */ struct tegra_vse_cmac_req_data { enum cmac_request_type request_type; /* For CMAC_VERIFY tag comparison result */ uint8_t result; }; enum gmac_request_type { GMAC_INIT = 0U, GMAC_SIGN, GMAC_VERIFY }; /* GMAC request data */ struct tegra_vse_gmac_req_data { enum gmac_request_type request_type; /* Return IV after GMAC_INIT and pass IV during GMAC_VERIFY*/ unsigned char *iv; bool is_first; /* For GMAC_VERIFY tag comparison result */ uint8_t result; }; enum hmac_sha_request_type { HMAC_SHA_SIGN = 0U, HMAC_SHA_VERIFY }; /* HMAC-SHA request data */ struct tegra_vse_hmac_sha_req_data { /* Enum to specify HMAC-SHA request type i.e. SIGN/VERIFY */ enum hmac_sha_request_type request_type; /* Expected digest for HMAC_SHA_VERIFY request */ char *expected_digest; /* Hash comparison result for HMAC_SHA_VERIFY request */ uint8_t result; }; struct tegra_vse_priv_data { struct skcipher_request *req; struct tegra_virtual_se_dev *se_dev; struct completion alg_complete; int cmd; int slot_num; struct scatterlist sg; void *buf; dma_addr_t buf_addr; u32 rx_status; u8 iv[TEGRA_VIRTUAL_SE_AES_MAX_IV_SIZE]; struct tegra_vse_cmac_data cmac; uint32_t syncpt_id; uint32_t syncpt_threshold; uint32_t syncpt_id_valid; }; struct tegra_virtual_se_addr { u32 lo; u32 hi; }; struct tegra_virtual_se_addr64_buf_size { u64 addr; u32 buf_size; }; union tegra_virtual_se_aes_args { struct keyiv { u8 slot[KEYSLOT_SIZE_BYTES]; u32 length; u32 type; u8 data[32]; u8 oiv[TEGRA_VIRTUAL_SE_AES_IV_SIZE]; u8 uiv[TEGRA_VIRTUAL_SE_AES_IV_SIZE]; } key; struct aes_encdec { u8 keyslot[KEYSLOT_SIZE_BYTES]; u32 mode; u32 ivsel; u8 lctr[TEGRA_VIRTUAL_SE_AES_LCTR_SIZE]; u32 ctr_cntn; u64 src_addr; u32 src_buf_size; u64 dst_addr; u32 dst_buf_size; u32 key_length; } op; struct aes_gcm { /** * keyslot handle returned by TOS as part of load key operation. * It must be the first variable in the structure. */ uint8_t keyslot[KEYSLOT_SIZE_BYTES]; uint64_t dst_addr; uint32_t dst_buf_size; uint64_t src_addr; uint32_t src_buf_size; uint64_t aad_addr; uint32_t aad_buf_size; uint64_t tag_addr; uint32_t tag_buf_size; /* TODO: ESLC-6207: use lctr instead*/ uint8_t iv[12]; /** * Key length in bytes. * * Supported key length is 16 bytes */ uint32_t key_length; /* Config for AES-GMAC request */ uint32_t config; u8 expected_tag[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; uint64_t gcm_vrfy_res_addr; enum vse_sym_cipher_choice sym_ciph; } op_gcm; struct aes_cmac_sv { u8 keyslot[KEYSLOT_SIZE_BYTES]; u32 config; u32 lastblock_len; u8 lastblock[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; u64 src_addr; u32 src_buf_size; u32 key_length; u8 cmac_result[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; u64 mac_addr; u64 mac_comp_res_addr; enum vse_sym_cipher_choice sym_ciph; } op_cmac_sv; struct aes_rng { struct tegra_virtual_se_addr dst_addr; } op_rng; }; union tegra_virtual_se_sha_args { struct hash { u32 msg_total_length[4]; u32 msg_left_length[4]; u32 hash[50]; u64 dst; u64 src_addr; u32 src_buf_size; u32 mode; u32 hash_length; } op_hash; } __attribute__((__packed__)); struct tegra_virtual_se_hmac_sha_args { u8 keyslot[KEYSLOT_SIZE_BYTES]; u32 mode; u32 lastblock_len; u8 lastblock[TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT]; u32 msg_total_length[4]; u32 msg_left_length[4]; u64 dst_addr; u64 src_addr; u32 src_buf_size; u8 expected_hmac_sha[TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH]; uint64_t hmac_addr; }; struct tegra_virtual_tsec_args { /** * Keyslot index for keyslot containing TSEC key */ uint64_t keyslot; /** * Size of input buffer in bytes. * The maximum size is given by the macro TEGRA_VIRTUAL_TSEC_MAX_SUPPORTED_BUFLEN */ uint32_t src_buf_size; /** * IOVA address of the input buffer. * Although it is a 64-bit integer, only least significant 40 bits are * used because only a 40-bit address space is supported. */ uint64_t src_addr; /** * For CMAC Verify, this array contains the value to be verified. * Not used for CMAC Sign. */ uint8_t cmac_result[TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE]; }; struct tegra_virtual_se_ivc_resp_msg_t { u32 tag; u32 cmd; u32 status; union { /** The init vector of AES-CBC encryption */ unsigned char iv[TEGRA_VIRTUAL_SE_AES_IV_SIZE]; /** Hash result for AES CMAC */ unsigned char cmac_result[TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE]; /** Keyslot for non */ unsigned char keyslot; }; uint32_t syncpt_id; uint32_t syncpt_threshold; uint32_t syncpt_id_valid; }; struct tegra_virtual_se_ivc_tx_msg_t { u32 tag; u32 cmd; union { union tegra_virtual_se_aes_args aes; union tegra_virtual_se_sha_args sha; struct tegra_virtual_tsec_args tsec; struct tegra_virtual_se_hmac_sha_args hmac; }; }; struct tegra_virtual_se_ivc_hdr_t { u8 header_magic[4]; u32 num_reqs; u32 engine; u8 tag[0x10]; u32 status; }; struct tegra_virtual_se_ivc_msg_t { struct tegra_virtual_se_ivc_hdr_t ivc_hdr; union { struct tegra_virtual_se_ivc_tx_msg_t tx[TEGRA_HV_VSE_MAX_TASKS_PER_SUBMIT]; struct tegra_virtual_se_ivc_resp_msg_t rx[TEGRA_HV_VSE_MAX_TASKS_PER_SUBMIT]; }; }; struct sha_zero_length_vector { unsigned int size; char *digest; }; /* Tegra Virtual Security Engine operation modes */ enum tegra_virtual_se_op_mode { /* (SM3-256) mode */ VIRTUAL_SE_OP_MODE_SM3 = 0, /* Secure Hash Algorithm-256 (SHA256) mode */ VIRTUAL_SE_OP_MODE_SHA256 = 5, /* Secure Hash Algorithm-384 (SHA384) mode */ VIRTUAL_SE_OP_MODE_SHA384, /* Secure Hash Algorithm-512 (SHA512) mode */ VIRTUAL_SE_OP_MODE_SHA512, /* Secure Hash Algorithm-3 (SHA3-256) mode */ VIRTUAL_SE_OP_MODE_SHA3_256 = 10, /* Secure Hash Algorithm-3 (SHA3-384) mode */ VIRTUAL_SE_OP_MODE_SHA3_384, /* Secure Hash Algorithm-3 (SHA3-512) mode */ VIRTUAL_SE_OP_MODE_SHA3_512, /* Secure Hash Algorithm-3 (SHAKE128) mode */ VIRTUAL_SE_OP_MODE_SHAKE128, /* Secure Hash Algorithm-3 (SHAKE256) mode */ VIRTUAL_SE_OP_MODE_SHAKE256, }; enum tegra_virtual_se_aes_op_mode { AES_CBC = 0U, AES_CTR = 2U, AES_SM4_CBC = 0x10000U, AES_SM4_CTR = 0x10002U, }; /* Security Engine request context */ struct tegra_virtual_se_aes_req_context { /* Security Engine device */ struct tegra_virtual_se_dev *se_dev; /* Security Engine operation mode */ enum tegra_virtual_se_aes_op_mode op_mode; /* Operation type */ bool encrypt; /* Engine id */ u8 engine_id; }; enum se_engine_id { VIRTUAL_SE_AES0, VIRTUAL_SE_AES1, VIRTUAL_SE_SHA = 2, VIRTUAL_SE_TSEC = 6, VIRTUAL_GCSE1_SHA = 7, VIRTUAL_GCSE2_SHA = 8, VIRTUAL_MAX_SE_ENGINE_NUM = 9 }; enum tegra_virtual_se_aes_iv_type { AES_ORIGINAL_IV, AES_UPDATED_IV, AES_IV_REG }; static struct tegra_virtual_se_dev *g_virtual_se_dev[VIRTUAL_MAX_SE_ENGINE_NUM]; struct crypto_dev_to_ivc_map *tegra_hv_vse_get_db(void) { return &g_crypto_to_ivc_map[0]; } EXPORT_SYMBOL(tegra_hv_vse_get_db); static int status_to_errno(u32 err) { switch (err) { case 1: /* VSE_MSG_ERR_INVALID_CMD */ case 3: /* VSE_MSG_ERR_INVALID_ARGS */ case 11: /* VSE_MSG_ERR_MAC_INVALID */ return -EINVAL; case 4: /* VSE_MSG_ERR_INVALID_KEY */ case 5: /* VSE_MSG_ERR_CTR_OVERFLOW */ case 6: /* VSE_MSG_ERR_INVALID_SUBKEY */ case 7: /* VSE_MSG_ERR_CTR_NONCE_INVALID */ case 8: /* VSE_MSG_ERR_GCM_IV_INVALID */ case 9: /* VSE_MSG_ERR_GCM_NONCE_INVALID */ case 10: /* VSE_MSG_ERR_GMAC_INVALID_PARAMS */ return -EPERM; } return err; } static int32_t validate_header( struct tegra_virtual_se_dev *se_dev, struct tegra_virtual_se_ivc_hdr_t *pivc_hdr, bool *is_dummy) { int32_t ret = -EIO; if ((pivc_hdr->header_magic[0] == (uint8_t)'N') && (pivc_hdr->header_magic[1] == (uint8_t)'V') && (pivc_hdr->header_magic[2] == (uint8_t)'D') && (pivc_hdr->header_magic[3] == (uint8_t)'A')) { pr_debug("Message header\n"); *is_dummy = false; ret = 0; } else if ((pivc_hdr->header_magic[0] == (uint8_t)'D') && (pivc_hdr->header_magic[1] == (uint8_t)'I') && (pivc_hdr->header_magic[2] == (uint8_t)'S') && (pivc_hdr->header_magic[3] == (uint8_t)'C')) { pr_debug("Filler\n"); *is_dummy = true; ret = 0; } else { dev_err(se_dev->dev, "Invalid message header value.\n"); } return ret; } static int is_aes_mode_valid(uint32_t opmode) { int ret = 0; if ((opmode == (uint32_t)AES_CBC) || (opmode == (uint32_t)AES_SM4_CBC) || (opmode == (uint32_t)AES_SM4_CTR) || (opmode == (uint32_t)AES_CTR)) { ret = 1; } return ret; } static int read_and_validate_dummy_msg( struct tegra_virtual_se_dev *se_dev, struct tegra_hv_ivc_cookie *pivck, uint32_t node_id, bool *is_dummy) { int err = 0, read_size = -1; struct tegra_virtual_se_ivc_msg_t *ivc_msg; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t); ivc_msg = devm_kzalloc(se_dev->dev, size_ivc_msg, GFP_KERNEL); if (!ivc_msg) return -ENOMEM; read_size = tegra_hv_ivc_read(pivck, ivc_msg, size_ivc_msg); if (read_size > 0 && read_size < size_ivc_msg) { devm_kfree(se_dev->dev, ivc_msg); dev_err(se_dev->dev, "Wrong read msg len %d\n", read_size); return -EINVAL; } ivc_hdr = &(ivc_msg->ivc_hdr); err = validate_header(se_dev, ivc_hdr, is_dummy); devm_kfree(se_dev->dev, ivc_msg); return err; } static int read_and_validate_valid_msg( struct tegra_virtual_se_dev *se_dev, struct tegra_hv_ivc_cookie *pivck, uint32_t node_id, bool *is_dummy, bool waited) { struct tegra_vse_tag *p_dat; struct tegra_vse_priv_data *priv; struct tegra_virtual_se_ivc_msg_t *ivc_msg; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_aes_req_context *req_ctx; struct tegra_virtual_se_ivc_resp_msg_t *ivc_rx; enum ivc_irq_state *irq_state; int read_size = -1, err = 0; size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t); irq_state = &(g_crypto_to_ivc_map[node_id].wait_interrupt); if (!tegra_hv_ivc_can_read(pivck)) { *irq_state = INTERMEDIATE_REQ_INTERRUPT; dev_info(se_dev->dev, "%s(): no valid message, await interrupt.\n", __func__); return -EAGAIN; } ivc_msg = devm_kzalloc(se_dev->dev, size_ivc_msg, GFP_KERNEL); if (!ivc_msg) return -ENOMEM; read_size = tegra_hv_ivc_read(pivck, ivc_msg, size_ivc_msg); if (read_size > 0 && read_size < size_ivc_msg) { dev_err(se_dev->dev, "Wrong read msg len %d\n", read_size); err = -EINVAL; goto deinit; } ivc_hdr = &(ivc_msg->ivc_hdr); err = validate_header(se_dev, ivc_hdr, is_dummy); if (err != 0) goto deinit; if (*is_dummy) { dev_err(se_dev->dev, "%s(): Wrong response sequence\n", __func__); goto deinit; } p_dat = (struct tegra_vse_tag *)ivc_msg->ivc_hdr.tag; priv = (struct tegra_vse_priv_data *)p_dat->priv_data; if (!priv) { pr_err("%s no call back info\n", __func__); goto deinit; } priv->syncpt_id = ivc_msg->rx[0].syncpt_id; priv->syncpt_threshold = ivc_msg->rx[0].syncpt_threshold; priv->syncpt_id_valid = ivc_msg->rx[0].syncpt_id_valid; switch (priv->cmd) { case VIRTUAL_SE_AES_CRYPTO: priv->rx_status = ivc_msg->rx[0].status; req_ctx = skcipher_request_ctx(priv->req); if ((!priv->rx_status) && (req_ctx->encrypt == true) && (is_aes_mode_valid(req_ctx->op_mode) == 1)) { memcpy(priv->iv, ivc_msg->rx[0].iv, TEGRA_VIRTUAL_SE_AES_IV_SIZE); } break; case VIRTUAL_SE_KEY_SLOT: ivc_rx = &ivc_msg->rx[0]; priv->slot_num = ivc_rx->keyslot; break; case VIRTUAL_SE_PROCESS: ivc_rx = &ivc_msg->rx[0]; priv->rx_status = ivc_rx->status; break; case VIRTUAL_CMAC_PROCESS: ivc_rx = &ivc_msg->rx[0]; priv->rx_status = ivc_rx->status; priv->cmac.status = ivc_rx->status; if (!ivc_rx->status) { memcpy(priv->cmac.data, ivc_rx->cmac_result, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); } break; case VIRTUAL_SE_AES_GCM_ENC_PROCESS: ivc_rx = &ivc_msg->rx[0]; priv->rx_status = ivc_rx->status; if (!ivc_rx->status) memcpy(priv->iv, ivc_rx->iv, TEGRA_VIRTUAL_SE_AES_GCM_IV_SIZE); break; default: dev_err(se_dev->dev, "Unknown command\n"); waited = false; } if (waited) complete(&priv->alg_complete); deinit: devm_kfree(se_dev->dev, ivc_msg); return err; } static int tegra_hv_vse_safety_send_ivc( struct tegra_virtual_se_dev *se_dev, struct tegra_hv_ivc_cookie *pivck, void *pbuf, int length) { u32 timeout; int err = 0; timeout = TEGRA_VIRTUAL_SE_TIMEOUT_1S; while (tegra_hv_ivc_channel_notified(pivck) != 0) { if (!timeout) { dev_err(se_dev->dev, "ivc reset timeout\n"); return -EINVAL; } udelay(1); timeout--; } timeout = TEGRA_VIRTUAL_SE_TIMEOUT_1S; while (tegra_hv_ivc_can_write(pivck) == 0) { if (!timeout) { dev_err(se_dev->dev, "ivc send message timeout\n"); return -EINVAL; } udelay(1); timeout--; } if (length > sizeof(struct tegra_virtual_se_ivc_msg_t)) { dev_err(se_dev->dev, "Wrong write msg len %d\n", length); return -E2BIG; } err = tegra_hv_ivc_write(pivck, pbuf, length); if (err < 0) { dev_err(se_dev->dev, "ivc write error!!! error=%d\n", err); return err; } return 0; } static int tegra_hv_vse_safety_send_ivc_wait( struct tegra_virtual_se_dev *se_dev, struct tegra_hv_ivc_cookie *pivck, struct tegra_vse_priv_data *priv, void *pbuf, int length, uint32_t node_id) { struct host1x_syncpt *sp; struct host1x *host1x; int err; bool is_dummy = false; u64 time_left; enum ivc_irq_state *irq_state; mutex_lock(&g_crypto_to_ivc_map[node_id].se_ivc_lock); if (!se_dev->host1x_pdev) { dev_err(se_dev->dev, "host1x pdev not initialized\n"); err = -ENODATA; goto exit; } host1x = platform_get_drvdata(se_dev->host1x_pdev); if (!host1x) { dev_err(se_dev->dev, "No platform data for host1x!\n"); err = -ENODATA; goto exit; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) { err = -ENODEV; goto exit; } err = tegra_hv_vse_safety_send_ivc(se_dev, pivck, pbuf, length); if (err) { dev_err(se_dev->dev, "\n %s send ivc failed %d\n", __func__, err); goto exit; } mutex_lock(&(g_crypto_to_ivc_map[node_id].irq_state_lock)); irq_state = &(se_dev->crypto_to_ivc_map[node_id].wait_interrupt); if (*irq_state == NO_INTERRUPT) { err = read_and_validate_dummy_msg(se_dev, pivck, node_id, &is_dummy); if (err != 0) { dev_err(se_dev->dev, "Failed to read and validate dummy message.\n"); mutex_unlock(&(g_crypto_to_ivc_map[node_id].irq_state_lock)); goto exit; } if (is_dummy) { err = read_and_validate_valid_msg(se_dev, pivck, node_id, &is_dummy, false); if (err != 0 && err != -EAGAIN) { dev_err(se_dev->dev, "Failed to read & validate valid message.\n"); mutex_unlock(&(g_crypto_to_ivc_map[node_id].irq_state_lock)); goto exit; } mutex_unlock(&(g_crypto_to_ivc_map[node_id].irq_state_lock)); if (err == -EAGAIN) { err = 0; pr_debug("%s(): wait_interrupt = %u", __func__, *irq_state); time_left = wait_for_completion_timeout(&priv->alg_complete, TEGRA_HV_VSE_TIMEOUT); if (time_left == 0) { dev_err(se_dev->dev, "%s timeout\n", __func__); err = -ETIMEDOUT; goto exit; } } pr_debug("%s(): wait_interrupt = %u", __func__, *irq_state); } else { dev_err(se_dev->dev, "%s(): Invalid resonse sequence, expected dummy message.\n", __func__); mutex_unlock(&(g_crypto_to_ivc_map[node_id].irq_state_lock)); goto exit; } } else { mutex_unlock(&(g_crypto_to_ivc_map[node_id].irq_state_lock)); time_left = wait_for_completion_timeout(&priv->alg_complete, TEGRA_HV_VSE_TIMEOUT); if (time_left == 0) { dev_err(se_dev->dev, "%s timeout\n", __func__); err = -ETIMEDOUT; goto exit; } } /* If this is not last request then wait using nvhost API*/ if (priv->syncpt_id_valid) { sp = host1x_syncpt_get_by_id_noref(host1x, priv->syncpt_id); if (!sp) { dev_err(se_dev->dev, "No syncpt for syncpt id %d\n", priv->syncpt_id); err = -ENODATA; goto exit; } err = host1x_syncpt_wait(sp, priv->syncpt_threshold, (u32)SE_MAX_SCHEDULE_TIMEOUT, NULL); if (err) { dev_err(se_dev->dev, "timed out for syncpt %u threshold %u err %d\n", priv->syncpt_id, priv->syncpt_threshold, err); err = -ETIMEDOUT; goto exit; } } exit: mutex_unlock(&g_crypto_to_ivc_map[node_id].se_ivc_lock); return err; } static int tegra_hv_vse_safety_prepare_ivc_linked_list( struct tegra_virtual_se_dev *se_dev, struct scatterlist *sg, u32 total_len, int max_ll_len, int block_size, int *num_lists, enum dma_data_direction dir, unsigned int *num_mapped_sgs, struct tegra_virtual_se_addr64_buf_size *src_addr64) { struct scatterlist *src_sg; int err = 0; int sg_count = 0; int i = 0; int len, process_len; u32 addr, addr_offset; src_sg = sg; while (src_sg && total_len) { err = dma_map_sg(se_dev->dev, src_sg, 1, dir); if (!err) { dev_err(se_dev->dev, "dma_map_sg() error\n"); err = -EINVAL; goto exit; } sg_count++; len = min(src_sg->length, total_len); addr = sg_dma_address(src_sg); addr_offset = 0; while (len >= TEGRA_VIRTUAL_SE_MAX_BUFFER_SIZE) { process_len = TEGRA_VIRTUAL_SE_MAX_BUFFER_SIZE - block_size; if (i >= max_ll_len) { dev_err(se_dev->dev, "Unsupported no. of list %d\n", i); err = -EINVAL; goto exit; } src_addr64[i].addr = (uint64_t)(addr + addr_offset); src_addr64[i].buf_size = process_len; i++; addr_offset += process_len; total_len -= process_len; len -= process_len; } if (len) { if (i >= max_ll_len) { dev_err(se_dev->dev, "Unsupported no. of list %d\n", i); err = -EINVAL; goto exit; } src_addr64[i].addr = (uint64_t)(addr + addr_offset); src_addr64[i].buf_size = len; i++; } total_len -= len; src_sg = sg_next(src_sg); } *num_lists = (i + *num_lists); *num_mapped_sgs = sg_count; return 0; exit: src_sg = sg; while (src_sg && sg_count--) { dma_unmap_sg(se_dev->dev, src_sg, 1, dir); src_sg = sg_next(src_sg); } *num_mapped_sgs = 0; return err; } static int tegra_hv_vse_safety_send_sha_data(struct tegra_virtual_se_dev *se_dev, struct ahash_request *req, struct tegra_virtual_se_ivc_msg_t *ivc_req_msg, u32 count, bool islast) { struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL; struct tegra_virtual_se_sha_context *sha_ctx; struct tegra_hv_ivc_cookie *pivck; struct tegra_vse_priv_data *priv = NULL; struct tegra_virtual_se_req_context *req_ctx; struct tegra_vse_tag *priv_data_ptr; union tegra_virtual_se_sha_args *psha = NULL; int err = 0; u64 total_count = 0, msg_len = 0; if (!req) { pr_err("%s: SHA request not valid\n", __func__); return -EINVAL; } sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); pivck = g_crypto_to_ivc_map[sha_ctx->node_id].ivck; if (!ivc_req_msg) { dev_err(se_dev->dev, "%s Invalid ivc_req_msg\n", __func__); return -EINVAL; } priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; req_ctx = ahash_request_ctx(req); total_count = req_ctx->total_count; ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->engine = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_SHA_HASH; psha = &(ivc_tx->sha); psha->op_hash.mode = req_ctx->mode; psha->op_hash.msg_total_length[0] = count; psha->op_hash.msg_total_length[1] = 0; psha->op_hash.msg_total_length[2] = 0; psha->op_hash.msg_total_length[3] = 0; psha->op_hash.msg_left_length[0] = count; psha->op_hash.msg_left_length[1] = 0; psha->op_hash.msg_left_length[2] = 0; psha->op_hash.msg_left_length[3] = 0; psha->op_hash.hash_length = sha_ctx->digest_size; if (islast) { psha->op_hash.msg_total_length[0] = total_count & 0xFFFFFFFF; psha->op_hash.msg_total_length[1] = total_count >> 32; } else { msg_len = count + 8; psha->op_hash.msg_left_length[0] = msg_len & 0xFFFFFFFF; psha->op_hash.msg_left_length[1] = msg_len >> 32; if (req_ctx->is_first) { psha->op_hash.msg_total_length[0] = msg_len & 0xFFFFFFFF; psha->op_hash.msg_total_length[1] = msg_len >> 32; req_ctx->is_first = false; } else { msg_len += 8; psha->op_hash.msg_total_length[0] = msg_len & 0xFFFFFFFF; psha->op_hash.msg_total_length[1] = msg_len >> 32; } } ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->num_reqs = 1; priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_SE_PROCESS; priv->se_dev = se_dev; g_crypto_to_ivc_map[sha_ctx->node_id].vse_thread_start = true; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), sha_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto exit; } exit: devm_kfree(se_dev->dev, priv); return err; } static int tegra_hv_vse_safety_sha_send_one(struct ahash_request *req, u32 nbytes, bool islast) { struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_sha_context *sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); uint8_t *residual_data_buf = sha_ctx->residual_plaintext->buf_ptr; int err = 0; uint32_t engine_id; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) return -ENOMEM; if (islast == true && (req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256)) { residual_data_buf[nbytes] = 0xff; nbytes++; req_ctx->total_count++; } ivc_tx = &ivc_req_msg->tx[0]; ivc_tx->sha.op_hash.src_addr = sha_ctx->residual_plaintext->buf_iova; ivc_tx->sha.op_hash.src_buf_size = nbytes; ivc_tx->sha.op_hash.dst = (u64)sha_ctx->hash_result->buf_iova; memcpy(ivc_tx->sha.op_hash.hash, sha_ctx->hash_result->buf_ptr, req_ctx->intermediate_digest_size); err = tegra_hv_vse_safety_send_sha_data(se_dev, req, ivc_req_msg, nbytes, islast); if (err) { dev_err(se_dev->dev, "%s error %d\n", __func__, err); goto exit; } exit: devm_kfree(se_dev->dev, ivc_req_msg); return err; } static int tegra_hv_vse_safety_sha_fast_path(struct ahash_request *req, bool is_last, bool process_cur_req) { struct tegra_virtual_se_dev *se_dev; u32 bytes_process_in_req = 0, num_blks; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_sha_context *sha_ctx; int err = 0; u32 nbytes_in_req = req->nbytes; uint32_t engine_id; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; /* process_cur_req is_last : * false false : update() -> hash * true true : finup(), digest() -> hash * true : finup(), digest(), final() -> result */ ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) return -ENOMEM; if ((process_cur_req == false && is_last == false) || (process_cur_req == true && is_last == true)) { /* When calling update(), if req->nbytes is aligned with * req_ctx->blk_size, reduce req->nbytes with req_ctx->blk_size * to avoid hashing zero length input at the end. */ if (req_ctx->residual_bytes == req_ctx->blk_size) { err = tegra_hv_vse_safety_sha_send_one(req, req_ctx->residual_bytes, false); if (err) { dev_err(se_dev->dev, "%s: failed to send residual data %u\n", __func__, req_ctx->residual_bytes); goto free; } req_ctx->residual_bytes = 0; } num_blks = nbytes_in_req / req_ctx->blk_size; req_ctx->residual_bytes = nbytes_in_req - (num_blks * req_ctx->blk_size); if (num_blks > 0 && req_ctx->residual_bytes == 0) { /* blk_size aligned. reduce size with one blk and * handle it in the next call. */ req_ctx->residual_bytes = req_ctx->blk_size; req_ctx->total_count += req_ctx->residual_bytes; num_blks--; sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), sha_ctx->residual_plaintext->buf_ptr, req_ctx->residual_bytes, num_blks * req_ctx->blk_size); } else { /* not aligned at all */ req_ctx->total_count += req_ctx->residual_bytes; sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), sha_ctx->residual_plaintext->buf_ptr, req_ctx->residual_bytes, num_blks * req_ctx->blk_size); } nbytes_in_req -= req_ctx->residual_bytes; dev_dbg(se_dev->dev, "%s: req_ctx->residual_bytes %u\n", __func__, req_ctx->residual_bytes); if (num_blks > 0) { ivc_tx = &ivc_req_msg->tx[0]; bytes_process_in_req = num_blks * req_ctx->blk_size; dev_dbg(se_dev->dev, "%s: bytes_process_in_req %u\n", __func__, bytes_process_in_req); sg_copy_to_buffer(req->src, sg_nents(req->src), sha_ctx->plaintext->buf_ptr, bytes_process_in_req); ivc_tx->sha.op_hash.src_addr = sha_ctx->plaintext->buf_iova; ivc_tx->sha.op_hash.src_buf_size = bytes_process_in_req; ivc_tx->sha.op_hash.dst = (u64)sha_ctx->hash_result->buf_iova; memcpy(ivc_tx->sha.op_hash.hash, sha_ctx->hash_result->buf_ptr, req_ctx->intermediate_digest_size); req_ctx->total_count += bytes_process_in_req; err = tegra_hv_vse_safety_send_sha_data(se_dev, req, ivc_req_msg, bytes_process_in_req, false); if (err) { dev_err(se_dev->dev, "%s error %d\n", __func__, err); goto free; } } if (req_ctx->residual_bytes > 0 && req_ctx->residual_bytes < req_ctx->blk_size) { /* At this point, the buffer is not aligned with * blk_size. Thus, buffer alignment need to be done via * slow path. */ req_ctx->force_align = true; } } if (is_last) { /* handle the last data in finup() , digest() */ if (req_ctx->residual_bytes > 0) { err = tegra_hv_vse_safety_sha_send_one(req, req_ctx->residual_bytes, true); if (err) { dev_err(se_dev->dev, "%s: failed to send last data %u\n", __func__, req_ctx->residual_bytes); goto free; } req_ctx->residual_bytes = 0; } if (req->result) { memcpy(req->result, sha_ctx->hash_result->buf_ptr, req_ctx->digest_size); } else { dev_err(se_dev->dev, "Invalid clinet result buffer\n"); } } free: devm_kfree(se_dev->dev, ivc_req_msg); return err; } static int tegra_hv_vse_safety_sha_slow_path(struct ahash_request *req, bool is_last, bool process_cur_req) { struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_sha_context *sha_ctx; u32 nblk_bytes = 0, num_blks, buflen = SZ_4M; u32 length = 0, skip = 0, offset = 0; u64 total_bytes = 0, left_bytes = 0; int err = 0; bool data_processed; uint32_t engine_id; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; if ((process_cur_req == false && is_last == false) || (process_cur_req == true && is_last == true)) { total_bytes = req_ctx->residual_bytes + req->nbytes; num_blks = total_bytes / req_ctx->blk_size; nblk_bytes = num_blks * req_ctx->blk_size; offset = req_ctx->residual_bytes; /* if blk_size aligned, reduce 1 blk_size for the last hash */ if ((total_bytes - nblk_bytes) == 0) total_bytes -= req_ctx->blk_size; left_bytes = req->nbytes; data_processed = false; while (total_bytes >= req_ctx->blk_size) { /* Copy to linear buffer */ num_blks = total_bytes / req_ctx->blk_size; nblk_bytes = num_blks * req_ctx->blk_size; length = min(buflen, nblk_bytes) - offset; sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), sha_ctx->residual_plaintext->buf_ptr + offset, length, skip); skip += length; req_ctx->total_count += length; /* Hash */ err = tegra_hv_vse_safety_sha_send_one(req, length + offset, false); if (err) { dev_err(se_dev->dev, "%s: failed to send one %u\n", __func__, length + offset); return err; } total_bytes -= (length + offset); left_bytes -= length; offset = 0; data_processed = true; } if (data_processed == true) { /* Processed in while() loop */ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), sha_ctx->residual_plaintext->buf_ptr, left_bytes, skip); req_ctx->total_count += left_bytes; req_ctx->residual_bytes = left_bytes; } else { /* Accumulate the request */ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), sha_ctx->residual_plaintext->buf_ptr + req_ctx->residual_bytes, req->nbytes, skip); req_ctx->total_count += req->nbytes; req_ctx->residual_bytes += req->nbytes; } if (req_ctx->force_align == true && req_ctx->residual_bytes == req_ctx->blk_size) { /* At this point, the buffer is aligned with blk_size. * Thus, the next call can use fast path. */ req_ctx->force_align = false; } } if (is_last) { /* handle the last data in finup() , digest() */ if (req_ctx->residual_bytes > 0) { err = tegra_hv_vse_safety_sha_send_one(req, req_ctx->residual_bytes, true); if (err) { dev_err(se_dev->dev, "%s: failed to send last data%u\n", __func__, req_ctx->residual_bytes); return err; } req_ctx->residual_bytes = 0; } if (req->result) { memcpy(req->result, sha_ctx->hash_result->buf_ptr, req_ctx->digest_size); } else { dev_err(se_dev->dev, "Invalid clinet result buffer\n"); } } return err; } static int tegra_hv_vse_safety_sha_op(struct ahash_request *req, bool is_last, bool process_cur_req) { struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_sha_context *sha_ctx; u32 mode; u32 num_blks; int ret; uint32_t engine_id; struct sha_zero_length_vector zero_vec[] = { { .size = SM3_DIGEST_SIZE, .digest = "\x1a\xb2\x1d\x83\x55\xcf\xa1\x7f" "\x8e\x61\x19\x48\x31\xe8\x1a\x8f" "\x22\xbe\xc8\xc7\x28\xfe\xfb\x74" "\x7e\xd0\x35\xeb\x50\x82\xaa\x2b", }, { .size = SHA256_DIGEST_SIZE, .digest = "\xe3\xb0\xc4\x42\x98\xfc\x1c\x14" "\x9a\xfb\xf4\xc8\x99\x6f\xb9\x24" "\x27\xae\x41\xe4\x64\x9b\x93\x4c" "\xa4\x95\x99\x1b\x78\x52\xb8\x55", }, { .size = SHA384_DIGEST_SIZE, .digest = "\x38\xb0\x60\xa7\x51\xac\x96\x38" "\x4c\xd9\x32\x7e\xb1\xb1\xe3\x6a" "\x21\xfd\xb7\x11\x14\xbe\x07\x43" "\x4c\x0c\xc7\xbf\x63\xf6\xe1\xda" "\x27\x4e\xde\xbf\xe7\x6f\x65\xfb" "\xd5\x1a\xd2\xf1\x48\x98\xb9\x5b", }, { .size = SHA512_DIGEST_SIZE, .digest = "\xcf\x83\xe1\x35\x7e\xef\xb8\xbd" "\xf1\x54\x28\x50\xd6\x6d\x80\x07" "\xd6\x20\xe4\x05\x0b\x57\x15\xdc" "\x83\xf4\xa9\x21\xd3\x6c\xe9\xce" "\x47\xd0\xd1\x3c\x5d\x85\xf2\xb0" "\xff\x83\x18\xd2\x87\x7e\xec\x2f" "\x63\xb9\x31\xbd\x47\x41\x7a\x81" "\xa5\x38\x32\x7a\xf9\x27\xda\x3e", }, { .size = SHA3_256_DIGEST_SIZE, .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66" "\x51\xc1\x47\x56\xa0\x61\xd6\x62" "\xf5\x80\xff\x4d\xe4\x3b\x49\xfa" "\x82\xd8\x0a\x4b\x80\xf8\x43\x4a", }, { .size = SHA3_384_DIGEST_SIZE, .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d" "\x01\x10\x7d\x85\x2e\x4c\x24\x85" "\xc5\x1a\x50\xaa\xaa\x94\xfc\x61" "\x99\x5e\x71\xbb\xee\x98\x3a\x2a" "\xc3\x71\x38\x31\x26\x4a\xdb\x47" "\xfb\x6b\xd1\xe0\x58\xd5\xf0\x04", }, { .size = SHA3_512_DIGEST_SIZE, .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5" "\xc8\xb5\x67\xdc\x18\x5a\x75\x6e" "\x97\xc9\x82\x16\x4f\xe2\x58\x59" "\xe0\xd1\xdc\xc1\x47\x5c\x80\xa6" "\x15\xb2\x12\x3a\xf1\xf5\xf9\x4c" "\x11\xe3\xe9\x40\x2c\x3a\xc5\x58" "\xf5\x00\x19\x9d\x95\xb6\xd3\xe3" "\x01\x75\x85\x86\x28\x1d\xcd\x26", } }; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; if (req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256) { if (req_ctx->digest_size == 0) { dev_info(se_dev->dev, "digest size is 0\n"); return 0; } } if (req->nbytes == 0) { if (req_ctx->total_count > 0) { if (is_last == false) { dev_info(se_dev->dev, "empty packet\n"); return 0; } if (req_ctx->residual_bytes > 0) { /*final() */ ret = tegra_hv_vse_safety_sha_send_one(req, req_ctx->residual_bytes, is_last); if (ret) { dev_err(se_dev->dev, "%s: failed to send last data %u\n", __func__, req_ctx->residual_bytes); return ret; } req_ctx->residual_bytes = 0; } if (is_last) { if (req->result) { memcpy(req->result, sha_ctx->hash_result->buf_ptr, req_ctx->digest_size); } else { dev_err(se_dev->dev, "Invalid clinet result buffer\n"); } } return 0; } if (req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256) { ret = tegra_hv_vse_safety_sha_send_one(req, 0, is_last); if (ret) { dev_err(se_dev->dev, "%s: failed to send last data\n", __func__); return ret; } if (is_last) { if (req->result) { memcpy(req->result, sha_ctx->hash_result->buf_ptr, req_ctx->digest_size); } else { dev_err(se_dev->dev, "Invalid clinet result buffer\n"); } } return 0; } /* If the request length is zero, SW WAR for zero length SHA * operation since SE HW can't accept zero length SHA operation */ if (req_ctx->mode == VIRTUAL_SE_OP_MODE_SM3) mode = VIRTUAL_SE_OP_MODE_SM3; else if (req_ctx->mode < VIRTUAL_SE_OP_MODE_SHA3_256) mode = req_ctx->mode - VIRTUAL_SE_OP_MODE_SHA256 + 1; else mode = req_ctx->mode - VIRTUAL_SE_OP_MODE_SHA256 - 1; if (is_last) { if (req->result) { memcpy(req->result, zero_vec[mode].digest, zero_vec[mode].size); } else { dev_err(se_dev->dev, "Invalid clinet result buffer\n"); } } return 0; } num_blks = req->nbytes / req_ctx->blk_size; if (sg_nents(req->src) > 1) req_ctx->force_align = true; if (req_ctx->force_align == false && num_blks > 0) ret = tegra_hv_vse_safety_sha_fast_path(req, is_last, process_cur_req); else ret = tegra_hv_vse_safety_sha_slow_path(req, is_last, process_cur_req); return ret; } static int tegra_hv_vse_safety_sha_init(struct ahash_request *req) { struct crypto_ahash *tfm; struct tegra_virtual_se_req_context *req_ctx; struct tegra_virtual_se_sha_context *sha_ctx; struct tegra_virtual_se_dev *se_dev; uint32_t hash_result_len; uint32_t engine_id; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; if (!req) { dev_err(se_dev->dev, "SHA request not valid\n"); return -EINVAL; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; req_ctx = ahash_request_ctx(req); if (!req_ctx) { dev_err(se_dev->dev, "SHA req_ctx not valid\n"); return -EINVAL; } tfm = crypto_ahash_reqtfm(req); if (!tfm) { dev_err(se_dev->dev, "SHA transform not valid\n"); return -EINVAL; } sha_ctx = crypto_ahash_ctx(tfm); req_ctx->digest_size = crypto_ahash_digestsize(tfm); if (strcmp(crypto_ahash_alg_name(tfm), "sha256-vse") == 0) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA256; req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT; req_ctx->intermediate_digest_size = SHA256_DIGEST_SIZE; } else if (strcmp(crypto_ahash_alg_name(tfm), "sha384-vse") == 0) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA384; req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1024BIT; /* * The intermediate digest size of SHA384 is same as SHA512 */ req_ctx->intermediate_digest_size = SHA512_DIGEST_SIZE; } else if (strcmp(crypto_ahash_alg_name(tfm), "sha512-vse") == 0) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA512; req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1024BIT; req_ctx->intermediate_digest_size = SHA512_DIGEST_SIZE; } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-256-vse") == 0) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_256; req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT; req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-384-vse") == 0) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_384; req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_832BIT; req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-512-vse") == 0) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_512; req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_576BIT; req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; } else if (strcmp(crypto_ahash_alg_name(tfm), "shake128-vse") == 0) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SHAKE128; req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1344BIT; req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; req_ctx->digest_size = sha_ctx->digest_size; } else if (strcmp(crypto_ahash_alg_name(tfm), "shake256-vse") == 0) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SHAKE256; req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT; req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; req_ctx->digest_size = sha_ctx->digest_size; } else if ((strcmp(crypto_ahash_alg_name(tfm), "sm3-vse") == 0) && (se_dev->chipdata->sm_supported)) { req_ctx->mode = VIRTUAL_SE_OP_MODE_SM3; req_ctx->blk_size = SM3_BLOCK_SIZE; req_ctx->intermediate_digest_size = SM3_DIGEST_SIZE; } else { dev_err(se_dev->dev, "Invalid SHA Mode\n"); return -EINVAL; } sha_ctx->residual_plaintext = &g_node_dma[sha_ctx->node_id].se_dma_buf[0]; if (!sha_ctx->residual_plaintext->buf_ptr) { dev_err(se_dev->dev, "%s res_buf is NULL\n", __func__); return -ENOMEM; } sha_ctx->plaintext = &g_node_dma[sha_ctx->node_id].se_dma_buf[1]; if (!sha_ctx->plaintext->buf_ptr) { dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__); return -ENOMEM; } if ((req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128) || (req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256)) { hash_result_len = sha_ctx->digest_size; } else { hash_result_len = req_ctx->intermediate_digest_size; } sha_ctx->hash_result = &g_node_dma[sha_ctx->node_id].se_dma_buf[2]; if (!sha_ctx->hash_result->buf_ptr) { dev_err(se_dev->dev, "%s hash_result is NULL\n", __func__); return -ENOMEM; } if (hash_result_len > sha_ctx->hash_result->buf_len) { dev_err(se_dev->dev, "%s hash_result buffer size insufficient\n", __func__); return -ENOMEM; } req_ctx->total_count = 0; req_ctx->is_first = true; req_ctx->residual_bytes = 0; req_ctx->req_context_initialized = true; req_ctx->force_align = false; return 0; } static void tegra_hv_vse_safety_sha_req_deinit(struct ahash_request *req) { struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_sha_context *sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); sha_ctx->residual_plaintext = NULL; sha_ctx->plaintext = NULL; sha_ctx->hash_result = NULL; req_ctx->req_context_initialized = false; } static int tegra_hv_vse_safety_sha_update(struct ahash_request *req) { struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_req_context *req_ctx; int ret = 0; uint32_t engine_id; struct tegra_virtual_se_sha_context *sha_ctx; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); req_ctx = ahash_request_ctx(req); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; if (!req) { dev_err(se_dev->dev, "SHA request not valid\n"); return -EINVAL; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; req_ctx = ahash_request_ctx(req); if (!req_ctx->req_context_initialized) { dev_err(se_dev->dev, "%s Request ctx not initialized\n", __func__); return -EINVAL; } ret = tegra_hv_vse_safety_sha_op(req, false, false); if (ret) dev_err(se_dev->dev, "tegra_se_sha_update failed - %d\n", ret); return ret; } static int tegra_hv_vse_safety_sha_finup(struct ahash_request *req) { struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_req_context *req_ctx; int ret = 0; uint32_t engine_id; struct tegra_virtual_se_sha_context *sha_ctx; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); req_ctx = ahash_request_ctx(req); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; if (!req) { dev_err(se_dev->dev, "SHA request not valid\n"); return -EINVAL; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; req_ctx = ahash_request_ctx(req); if (!req_ctx->req_context_initialized) { dev_err(se_dev->dev, "%s Request ctx not initialized\n", __func__); return -EINVAL; } ret = tegra_hv_vse_safety_sha_op(req, true, true); if (ret) dev_err(se_dev->dev, "tegra_se_sha_finup failed - %d\n", ret); tegra_hv_vse_safety_sha_req_deinit(req); return ret; } static int tegra_hv_vse_safety_sha_final(struct ahash_request *req) { struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_req_context *req_ctx; int ret = 0; uint32_t engine_id; struct tegra_virtual_se_sha_context *sha_ctx; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); req_ctx = ahash_request_ctx(req); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; if (!req) { dev_err(se_dev->dev, "SHA request not valid\n"); return -EINVAL; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; req_ctx = ahash_request_ctx(req); if (!req_ctx->req_context_initialized) { dev_err(se_dev->dev, "%s Request ctx not initialized\n", __func__); return -EINVAL; } /* Do not process data in given request */ ret = tegra_hv_vse_safety_sha_op(req, true, false); if (ret) dev_err(se_dev->dev, "tegra_se_sha_final failed - %d\n", ret); tegra_hv_vse_safety_sha_req_deinit(req); return ret; } static int tegra_hv_vse_safety_sha_digest(struct ahash_request *req) { struct tegra_virtual_se_dev *se_dev; int ret = 0; uint32_t engine_id; struct tegra_virtual_se_sha_context *sha_ctx; struct tegra_virtual_se_req_context *req_ctx; sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); req_ctx = ahash_request_ctx(req); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; if (!req) { dev_err(se_dev->dev, "SHA request not valid\n"); return -EINVAL; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; ret = tegra_hv_vse_safety_sha_init(req); if (ret) { dev_err(se_dev->dev, "%s init failed - %d\n", __func__, ret); return ret; } ret = tegra_hv_vse_safety_sha_op(req, true, true); if (ret) dev_err(se_dev->dev, "tegra_se_sha_digest failed - %d\n", ret); tegra_hv_vse_safety_sha_req_deinit(req); return ret; } static int tegra_hv_vse_safety_hmac_sha_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_virtual_se_hmac_sha_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); struct tegra_virtual_se_dev *se_dev; int err = 0; s8 label[TEGRA_VIRTUAL_SE_AES_MAX_KEY_SIZE]; bool is_keyslot_label; if (!ctx) return -EINVAL; se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].se_engine]; if (keylen != 32) { dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); return -EINVAL; } /* format: 'NVSEAES 1234567\0' */ is_keyslot_label = sscanf(key, "%s", label) == 1 && !strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL); if (is_keyslot_label) { ctx->keylen = keylen; memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES); ctx->is_key_slot_allocated = true; } else { dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key); return -EINVAL; } return err; } static int tegra_hv_vse_safety_hmac_sha_init(struct ahash_request *req) { struct crypto_ahash *tfm; struct tegra_virtual_se_req_context *req_ctx; struct tegra_virtual_se_hmac_sha_context *hmac_ctx; struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[VIRTUAL_SE_SHA]; if (!req) { dev_err(se_dev->dev, "HMAC SHA request not valid\n"); return -EINVAL; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; req_ctx = ahash_request_ctx(req); if (!req_ctx) { dev_err(se_dev->dev, "HMAC SHA req_ctx not valid\n"); return -EINVAL; } tfm = crypto_ahash_reqtfm(req); if (!tfm) { dev_err(se_dev->dev, "HMAC SHA transform not valid\n"); return -EINVAL; } hmac_ctx = crypto_ahash_ctx(tfm); hmac_ctx->digest_size = crypto_ahash_digestsize(tfm); if (!hmac_ctx->is_key_slot_allocated) { pr_err("%s key is not allocated\n", __func__); return -EINVAL; } if (strcmp(crypto_ahash_alg_name(tfm), "hmac-sha256-vse") == 0) { hmac_ctx->mode = VIRTUAL_SE_OP_MODE_SHA256; hmac_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT; } else { dev_err(se_dev->dev, "Invalid HMAC-SHA Alg\n"); return -EINVAL; } req_ctx->total_count = 0; req_ctx->is_first = true; req_ctx->req_context_initialized = true; return 0; } static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, bool is_last) { struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_hmac_sha_context *hmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_vse_hmac_sha_req_data *hmac_req_data; struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].se_engine]; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_msg_t ivc_req_msg = {0}; struct tegra_virtual_se_hmac_sha_args *phmac; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[hmac_ctx->node_id].ivck; int err = 0; struct tegra_vse_priv_data priv = {0}; struct tegra_vse_tag *priv_data_ptr; u32 cmd = 0; void *src_buf = NULL; dma_addr_t src_buf_addr; void *hash_buf = NULL; dma_addr_t hash_buf_addr; void *verify_result_buf = NULL; dma_addr_t verify_result_addr; void *match_code_buf = NULL; dma_addr_t match_code_addr; u32 matchcode = SE_HW_VALUE_MATCH_CODE; u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE; u32 match_code_buf_size = 4; u32 blocks_to_process, last_block_bytes = 0; u64 msg_len = 0, temp_len = 0; if ((req->nbytes == 0) || (req->nbytes > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN)) { dev_err(se_dev->dev, "%s: input buffer size is invalid\n", __func__); return -EINVAL; } if (!is_last) { if (req->nbytes % TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT != 0) { dev_err(se_dev->dev, "%s: non-last buffer size is invalid\n", __func__); return -EINVAL; } } hmac_req_data = (struct tegra_vse_hmac_sha_req_data *) req->priv; src_buf = dma_alloc_coherent( se_dev->dev, req->nbytes, &src_buf_addr, GFP_KERNEL); if (!src_buf) { dev_err(se_dev->dev, "Cannot allocate memory for source buffer\n"); return -ENOMEM; } if (hmac_req_data->request_type == HMAC_SHA_SIGN) { hash_buf = dma_alloc_coherent( se_dev->dev, hmac_ctx->digest_size, &hash_buf_addr, GFP_KERNEL); if (!hash_buf) { dev_err(se_dev->dev, "Cannot allocate memory for hash buffer\n"); err = -ENOMEM; goto unmap_exit; } memset(hash_buf, 0, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH); cmd = TEGRA_VIRTUAL_SE_CMD_HMAC_SIGN; } else { cmd = TEGRA_VIRTUAL_SE_CMD_HMAC_VERIFY; } if ((se_dev->chipdata->hmac_verify_hw_support == true) && (is_last && (hmac_req_data->request_type == HMAC_SHA_VERIFY))) { verify_result_buf = dma_alloc_coherent( se_dev->dev, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH, &verify_result_addr, GFP_KERNEL); if (!verify_result_buf) { dev_err(se_dev->dev, "Cannot allocate memory for verify_result_buf buffer\n"); err = -ENOMEM; goto unmap_exit; } match_code_buf = dma_alloc_coherent( se_dev->dev, match_code_buf_size, &match_code_addr, GFP_KERNEL); if (!match_code_buf) { dev_err(se_dev->dev, "Cannot allocate memory for match_code_buf buffer\n"); err = -ENOMEM; goto unmap_exit; } } g_crypto_to_ivc_map[hmac_ctx->node_id].vse_thread_start = true; msg_len = req->nbytes; temp_len = msg_len; req_ctx->total_count += msg_len; ivc_tx = &ivc_req_msg.tx[0]; ivc_hdr = &ivc_req_msg.ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = VIRTUAL_SE_SHA; ivc_tx->cmd = cmd; phmac = &(ivc_tx->hmac); phmac->mode = hmac_ctx->mode; phmac->msg_total_length[2] = 0; phmac->msg_total_length[3] = 0; phmac->msg_left_length[2] = 0; phmac->msg_left_length[3] = 0; memcpy(phmac->keyslot, hmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); phmac->src_addr = src_buf_addr; if (hmac_req_data->request_type == HMAC_SHA_SIGN) phmac->dst_addr = hash_buf_addr; if (is_last) { /* Set msg left length equal to input buffer size */ phmac->msg_left_length[0] = msg_len & 0xFFFFFFFF; phmac->msg_left_length[1] = msg_len >> 32; /* Set msg total length equal to sum of all input buffer size */ phmac->msg_total_length[0] = req_ctx->total_count & 0xFFFFFFFF; phmac->msg_total_length[1] = req_ctx->total_count >> 32; } else { /* Set msg left length greater than input buffer size */ temp_len += 8; phmac->msg_left_length[0] = temp_len & 0xFFFFFFFF; phmac->msg_left_length[1] = temp_len >> 32; /* Set msg total length greater than msg left length for non-first request */ if (req_ctx->is_first) req_ctx->is_first = false; else temp_len += 8; phmac->msg_total_length[0] = temp_len & 0xFFFFFFFF; phmac->msg_total_length[1] = temp_len >> 32; } if (se_dev->chipdata->hmac_verify_hw_support == false) { if (is_last && (hmac_req_data->request_type == HMAC_SHA_VERIFY)) { blocks_to_process = msg_len / hmac_ctx->blk_size; /* num of bytes less than block size */ if ((req->nbytes % hmac_ctx->blk_size) || blocks_to_process == 0) { last_block_bytes = msg_len % hmac_ctx->blk_size; } else { /* decrement num of blocks */ blocks_to_process--; last_block_bytes = hmac_ctx->blk_size; } if (blocks_to_process > 0) sg_copy_to_buffer(req->src, (u32)sg_nents(req->src), src_buf, blocks_to_process * hmac_ctx->blk_size); phmac->src_buf_size = blocks_to_process * hmac_ctx->blk_size; phmac->lastblock_len = last_block_bytes; sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), phmac->lastblock, last_block_bytes, blocks_to_process * hmac_ctx->blk_size); memcpy(phmac->expected_hmac_sha, hmac_req_data->expected_digest, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH); } else { phmac->src_buf_size = msg_len; phmac->lastblock_len = 0; sg_copy_to_buffer(req->src, (u32)sg_nents(req->src), src_buf, msg_len); } } else { phmac->src_buf_size = msg_len; phmac->lastblock_len = 0; sg_copy_to_buffer(req->src, (u32)sg_nents(req->src), src_buf, msg_len); if (is_last && (hmac_req_data->request_type == HMAC_SHA_VERIFY)) { phmac->hmac_addr = verify_result_addr; memcpy(verify_result_buf, hmac_req_data->expected_digest, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH); phmac->dst_addr = match_code_addr; } } priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg.ivc_hdr.tag; priv_data_ptr->priv_data = (unsigned int *)&priv; priv.cmd = VIRTUAL_SE_PROCESS; priv.se_dev = se_dev; init_completion(&priv.alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, &priv, &ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), hmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto unmap_exit; } if (priv.rx_status != 0) { err = status_to_errno(priv.rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv.rx_status); goto unmap_exit; } if (is_last) { if (hmac_req_data->request_type == HMAC_SHA_VERIFY) { if (se_dev->chipdata->hmac_verify_hw_support == false) { ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_HMAC_GET_VERIFY; priv.cmd = VIRTUAL_SE_PROCESS; init_completion(&priv.alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, &priv, &ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), hmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto unmap_exit; } if (priv.rx_status == 0) { hmac_req_data->result = 0; } else if (priv.rx_status == TEGRA_VIRTUAL_SE_ERR_MAC_INVALID) { dev_dbg(se_dev->dev, "%s: tag mismatch", __func__); hmac_req_data->result = 1; } else { err = status_to_errno(priv.rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv.rx_status); } } else { if (memcmp(match_code_buf, &matchcode, 4) == 0) { hmac_req_data->result = 0; } else if (memcmp(match_code_buf, &mismatch_code, 4) == 0) { dev_dbg(se_dev->dev, "%s: tag mismatch", __func__); hmac_req_data->result = 1; } else { dev_err(se_dev->dev, "%s: invalid tag match code", __func__); err = -EINVAL; } } } else { memcpy(req->result, hash_buf, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH); } } unmap_exit: if (src_buf) dma_free_coherent(se_dev->dev, msg_len, src_buf, src_buf_addr); if (hash_buf) dma_free_coherent(se_dev->dev, hmac_ctx->digest_size, hash_buf, hash_buf_addr); if (verify_result_buf) dma_free_coherent(se_dev->dev, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH, verify_result_buf, verify_result_addr); if (match_code_buf) dma_free_coherent(se_dev->dev, match_code_buf_size, match_code_buf, match_code_addr); return err; } static int tegra_hv_vse_safety_hmac_sha_update(struct ahash_request *req) { struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_hmac_sha_context *hmac_ctx; struct tegra_virtual_se_dev *se_dev; int ret = 0; if (!req) { pr_err("%s HMAC SHA request not valid\n", __func__); return -EINVAL; } if (!req_ctx->req_context_initialized) { pr_err("%s Request ctx not initialized\n", __func__); return -EINVAL; } hmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!hmac_ctx) { pr_err("%s HMAC SHA req_ctx not valid\n", __func__); return -EINVAL; } if (!hmac_ctx->is_key_slot_allocated) { pr_err("%s key is not allocated\n", __func__); return -EINVAL; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; ret = tegra_hv_vse_safety_hmac_sha_sv_op(req, false); if (ret) dev_err(se_dev->dev, "tegra_se_hmac_sha_update failed - %d\n", ret); return ret; } static int tegra_hv_vse_safety_hmac_sha_finup(struct ahash_request *req) { struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_hmac_sha_context *hmac_ctx = NULL; struct tegra_virtual_se_dev *se_dev; int ret = 0; if (!req) { pr_err("%s HMAC-SHA request not valid\n", __func__); return -EINVAL; } hmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!hmac_ctx) { pr_err("%s HMAC-SHA req_ctx not valid\n", __func__); return -EINVAL; } if (!hmac_ctx->is_key_slot_allocated) { pr_err("%s key is not allocated\n", __func__); return -EINVAL; } if (!req_ctx->req_context_initialized) { pr_err("%s Request ctx not initialized\n", __func__); return -EINVAL; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; ret = tegra_hv_vse_safety_hmac_sha_sv_op(req, true); if (ret) dev_err(se_dev->dev, "tegra_se_hmac_sha_finup failed - %d\n", ret); hmac_ctx->is_key_slot_allocated = false; req_ctx->req_context_initialized = false; return ret; } static int tegra_hv_vse_safety_hmac_sha_final(struct ahash_request *req) { // Unsupported return -EINVAL; } static int tegra_hv_vse_safety_hmac_sha_digest(struct ahash_request *req) { // Unsupported return -EINVAL; } static int tegra_hv_vse_safety_sha_export(struct ahash_request *req, void *out) { struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); memcpy(out, req_ctx, sizeof(*req_ctx)); return 0; } static int tegra_hv_vse_safety_sha_import(struct ahash_request *req, const void *in) { struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); memcpy(req_ctx, in, sizeof(*req_ctx)); return 0; } static int tegra_hv_vse_safety_sha_cra_init(struct crypto_tfm *tfm) { crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct tegra_virtual_se_req_context)); return 0; } static void tegra_hv_vse_safety_sha_cra_exit(struct crypto_tfm *tfm) { } static void tegra_hv_vse_safety_prepare_cmd(struct tegra_virtual_se_dev *se_dev, struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx, struct tegra_virtual_se_aes_req_context *req_ctx, struct tegra_virtual_se_aes_context *aes_ctx, struct skcipher_request *req) { union tegra_virtual_se_aes_args *aes; aes = &ivc_tx->aes; if (req_ctx->encrypt == true) ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT; else ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_DECRYPT; memcpy(aes->op.keyslot, aes_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); aes->op.key_length = aes_ctx->keylen; aes->op.mode = req_ctx->op_mode; aes->op.ivsel = AES_ORIGINAL_IV; if (req->iv) { memcpy(aes->op.lctr, req->iv, TEGRA_VIRTUAL_SE_AES_LCTR_SIZE); if ((req_ctx->op_mode == AES_CTR) || (req_ctx->op_mode == AES_SM4_CTR)) aes->op.ctr_cntn = TEGRA_VIRTUAL_SE_AES_LCTR_CNTN; else if ((req_ctx->op_mode == AES_CBC) || (req_ctx->op_mode == AES_SM4_CBC)) { if (req_ctx->encrypt == true && aes_ctx->user_nonce == 1U && aes_ctx->b_is_first != 1U) aes->op.ivsel = AES_UPDATED_IV; else aes->op.ivsel = AES_IV_REG; } } } static int tegra_hv_vse_safety_aes_gen_random_iv( struct tegra_virtual_se_dev *se_dev, struct skcipher_request *req, struct tegra_vse_priv_data *priv, struct tegra_virtual_se_ivc_msg_t *ivc_req_msg) { struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = &ivc_req_msg->tx[0]; struct tegra_hv_ivc_cookie *pivck; union tegra_virtual_se_aes_args *aes = &ivc_tx->aes; struct tegra_virtual_se_aes_context *aes_ctx; int err = 0; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT_INIT; priv->cmd = VIRTUAL_SE_PROCESS; aes_ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); memcpy(aes->op.keyslot, aes_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); aes->op.key_length = aes_ctx->keylen; pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), aes_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); return err; } err = status_to_errno(priv->rx_status); if (err) { dev_err(se_dev->dev, "\n %s IV generation failed %d\n", __func__, err); } return err; } static int tegra_hv_vse_safety_process_aes_req(struct tegra_virtual_se_dev *se_dev, struct skcipher_request *req) { struct tegra_virtual_se_aes_req_context *req_ctx; struct tegra_virtual_se_aes_context *aes_ctx; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL; struct tegra_hv_ivc_cookie *pivck; int err = 0; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; union tegra_virtual_se_aes_args *aes; priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto exit; } priv->req = req; ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { err = -ENOMEM; goto exit; } ivc_tx = &ivc_req_msg->tx[0]; aes = &ivc_tx->aes; req_ctx = skcipher_request_ctx(req); aes_ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); priv->buf = aes_ctx->src->buf_ptr; priv->buf_addr = aes_ctx->src->buf_iova; sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), priv->buf, req->cryptlen, 0); if (unlikely(!aes_ctx->is_key_slot_allocated)) { dev_err(se_dev->dev, "AES Key slot not allocated\n"); goto exit; } pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; ivc_hdr = &ivc_req_msg->ivc_hdr; //Currently we support only one request per IVC message ivc_hdr->num_reqs = 1U; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = g_crypto_to_ivc_map[aes_ctx->node_id].se_engine; priv_data_ptr = (struct tegra_vse_tag *)ivc_hdr->tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->se_dev = se_dev; g_crypto_to_ivc_map[aes_ctx->node_id].vse_thread_start = true; /* * If req->iv[0] is 1 and the request is for AES CBC/CTR encryption, * it means that generation of random IV is required. * If userNonce is not provided random IV generation is needed. */ if (req_ctx->encrypt && (is_aes_mode_valid(req_ctx->op_mode) == 1) && (aes_ctx->user_nonce == 0U) && (req->iv[0] == 1)) { //Random IV generation is required err = tegra_hv_vse_safety_aes_gen_random_iv(se_dev, req, priv, ivc_req_msg); if (err) goto exit; } priv->cmd = VIRTUAL_SE_AES_CRYPTO; tegra_hv_vse_safety_prepare_cmd(se_dev, ivc_tx, req_ctx, aes_ctx, req); aes->op.src_addr = (u64)priv->buf_addr; aes->op.src_buf_size = req->cryptlen; aes->op.dst_addr = (u64)priv->buf_addr; aes->op.dst_buf_size = req->cryptlen; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), aes_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto exit; } if (priv->rx_status == 0U) { sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), priv->buf, req->cryptlen, 0); if ((is_aes_mode_valid(req_ctx->op_mode) == 1) && (req_ctx->encrypt == true) && (aes_ctx->user_nonce == 0U)) memcpy(req->iv, priv->iv, TEGRA_VIRTUAL_SE_AES_IV_SIZE); } else { dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); } err = status_to_errno(priv->rx_status); exit: if (priv) devm_kfree(se_dev->dev, priv); if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); return err; } static int tegra_hv_vse_safety_aes_cra_init(struct crypto_skcipher *tfm) { tfm->reqsize = sizeof(struct tegra_virtual_se_aes_req_context); return 0; } static void tegra_hv_vse_safety_aes_cra_exit(struct crypto_skcipher *tfm) { /* nothing to do as user releases the keyslot through tzvault TA */ } static int tegra_hv_vse_safety_aes_cbc_encrypt(struct skcipher_request *req) { int err = 0; struct tegra_virtual_se_aes_req_context *req_ctx = NULL; struct tegra_virtual_se_aes_context *aes_ctx; if (!req) { pr_err("NULL req received by %s", __func__); return -EINVAL; } if (unlikely(!req->iv)) { /* If IV is not set we cannot determine whether * random IV generation is required. */ pr_err("%s: Unable to determine if random IV generation is needed\n", __func__); return -EINVAL; } aes_ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); req_ctx = skcipher_request_ctx(req); req_ctx->encrypt = true; req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].se_engine; req_ctx->se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) { pr_err("%s: SM4 CBC is not supported for selected platform\n", __func__); return -EINVAL; } if (aes_ctx->b_is_sm4 == 1U) req_ctx->op_mode = AES_SM4_CBC; else req_ctx->op_mode = AES_CBC; aes_ctx->src = &g_node_dma[aes_ctx->node_id].se_dma_buf[0]; if (!aes_ctx->src->buf_ptr) { dev_err(req_ctx->se_dev->dev, "%s src_buf is NULL\n", __func__); return -ENOMEM; } err = tegra_hv_vse_safety_process_aes_req(req_ctx->se_dev, req); if (err) dev_err(req_ctx->se_dev->dev, "%s failed with error %d\n", __func__, err); return err; } static int tegra_hv_vse_safety_aes_cbc_decrypt(struct skcipher_request *req) { int err = 0; struct tegra_virtual_se_aes_req_context *req_ctx = NULL; struct tegra_virtual_se_aes_context *aes_ctx; if (!req) { pr_err("NULL req received by %s", __func__); return -EINVAL; } aes_ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); req_ctx = skcipher_request_ctx(req); req_ctx->encrypt = false; req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].se_engine; req_ctx->se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) { pr_err("%s: SM4 CBC is not supported for selected platform\n", __func__); return -EINVAL; } if (aes_ctx->b_is_sm4 == 1U) req_ctx->op_mode = AES_SM4_CBC; else req_ctx->op_mode = AES_CBC; aes_ctx->src = &g_node_dma[aes_ctx->node_id].se_dma_buf[0]; if (!aes_ctx->src->buf_ptr) { dev_err(req_ctx->se_dev->dev, "%s src_buf is NULL\n", __func__); return -ENOMEM; } err = tegra_hv_vse_safety_process_aes_req(req_ctx->se_dev, req); if (err) dev_err(req_ctx->se_dev->dev, "%s failed with error %d\n", __func__, err); return err; } static int tegra_hv_vse_safety_aes_ctr_encrypt(struct skcipher_request *req) { int err = 0; struct tegra_virtual_se_aes_req_context *req_ctx = NULL; struct tegra_virtual_se_aes_context *aes_ctx; if (!req) { pr_err("NULL req received by %s", __func__); return -EINVAL; } if (unlikely(!req->iv)) { /* If IV is not set we cannot determine whether * random IV generation is required. */ pr_err("%s: Unable to determine if random IV generation is needed\n", __func__); return -EINVAL; } aes_ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); req_ctx = skcipher_request_ctx(req); req_ctx->encrypt = true; req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].se_engine; req_ctx->se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) { pr_err("%s: SM4 CTR is not supported for selected platform\n", __func__); return -EINVAL; } if (aes_ctx->b_is_sm4 == 1U) req_ctx->op_mode = AES_SM4_CTR; else req_ctx->op_mode = AES_CTR; aes_ctx->src = &g_node_dma[aes_ctx->node_id].se_dma_buf[0]; if (!aes_ctx->src->buf_ptr) { dev_err(req_ctx->se_dev->dev, "%s src_buf is NULL\n", __func__); return -ENOMEM; } err = tegra_hv_vse_safety_process_aes_req(req_ctx->se_dev, req); if (err) dev_err(req_ctx->se_dev->dev, "%s failed with error %d\n", __func__, err); return err; } static int tegra_hv_vse_safety_aes_ctr_decrypt(struct skcipher_request *req) { int err = 0; struct tegra_virtual_se_aes_req_context *req_ctx = NULL; struct tegra_virtual_se_aes_context *aes_ctx; if (!req) { pr_err("NULL req received by %s", __func__); return -EINVAL; } aes_ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); req_ctx = skcipher_request_ctx(req); req_ctx->encrypt = false; req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].se_engine; req_ctx->se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) { pr_err("%s: SM4 CTR is not supported for selected platform\n", __func__); return -EINVAL; } if (aes_ctx->b_is_sm4 == 1U) req_ctx->op_mode = AES_SM4_CTR; else req_ctx->op_mode = AES_CTR; aes_ctx->src = &g_node_dma[aes_ctx->node_id].se_dma_buf[0]; if (!aes_ctx->src->buf_ptr) { dev_err(req_ctx->se_dev->dev, "%s src_buf is NULL\n", __func__); return -ENOMEM; } err = tegra_hv_vse_safety_process_aes_req(req_ctx->se_dev, req); if (err) dev_err(req_ctx->se_dev->dev, "%s failed with error %d\n", __func__, err); return err; } static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req) { struct tegra_virtual_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_vse_cmac_req_data *cmac_req_data; struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck; int err = 0; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; dma_addr_t src_buf_addr; void *src_buf = NULL; if ((req->nbytes == 0) || (req->nbytes > TEGRA_VIRTUAL_TSEC_MAX_SUPPORTED_BUFLEN)) { dev_err(se_dev->dev, "%s: input buffer size is invalid\n", __func__); return -EINVAL; } ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) return -ENOMEM; priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { devm_kfree(se_dev->dev, ivc_req_msg); return -ENOMEM; } cmac_req_data = (struct tegra_vse_cmac_req_data *) req->priv; ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine; g_crypto_to_ivc_map[cmac_ctx->node_id].vse_thread_start = true; src_buf = dma_alloc_coherent(se_dev->dev, req->nbytes, &src_buf_addr, GFP_KERNEL); if (!src_buf) { err = -ENOMEM; goto free_mem; } /* copy aad from sgs to buffer*/ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), src_buf, req->nbytes, 0); ivc_tx->tsec.src_addr = src_buf_addr; ivc_tx->tsec.src_buf_size = req->nbytes; ivc_tx->tsec.keyslot = *((uint64_t *)cmac_ctx->aes_keyslot); if (cmac_req_data->request_type == CMAC_SIGN) { ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_TSEC_SIGN; } else { ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_TSEC_VERIFY; memcpy(ivc_tx->tsec.cmac_result, req->result, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); } priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_SE_PROCESS; priv->se_dev = se_dev; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), cmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto unmap_exit; } if (priv->rx_status != 0) { err = status_to_errno(priv->rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); goto unmap_exit; } if (cmac_req_data->request_type == CMAC_SIGN) ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_TSEC_SIGN; else ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_TSEC_VERIFY; priv->cmd = VIRTUAL_CMAC_PROCESS; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), cmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto unmap_exit; } if (cmac_req_data->request_type == CMAC_SIGN) { if (priv->rx_status == 0) { memcpy(req->result, priv->cmac.data, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); } } else { if (priv->rx_status == 0) cmac_req_data->result = 0; else cmac_req_data->result = 1; } if ((priv->rx_status != 0) && (priv->rx_status != TEGRA_VIRTUAL_SE_ERR_MAC_INVALID)) { err = status_to_errno(priv->rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); } unmap_exit: if (src_buf) dma_free_coherent(se_dev->dev, req->nbytes, src_buf, src_buf_addr); free_mem: devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); return err; } static int tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported( struct ahash_request *req, bool is_last) { struct tegra_virtual_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_vse_cmac_req_data *cmac_req_data; struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck; int err = 0; int num_lists = 0; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; unsigned int num_mapped_sgs = 0; u8 *mac_buf = NULL; u8 *mac_comp = NULL; dma_addr_t mac_buf_addr; dma_addr_t mac_comp_buf_addr; u32 match_code = SE_HW_VALUE_MATCH_CODE; struct tegra_virtual_se_addr64_buf_size src_addr64; ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) return -ENOMEM; priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { devm_kfree(se_dev->dev, ivc_req_msg); return -ENOMEM; } mac_buf = dma_alloc_coherent(se_dev->dev, 16, &mac_buf_addr, GFP_KERNEL); if (!mac_buf) { err = -ENOMEM; goto free_mem; } cmac_req_data = (struct tegra_vse_cmac_req_data *) req->priv; ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; if (cmac_req_data->request_type == CMAC_VERIFY) { mac_comp = dma_alloc_coherent(se_dev->dev, 4, &mac_comp_buf_addr, GFP_KERNEL); if (!mac_comp) { err = -ENOMEM; goto free_mem; } } g_crypto_to_ivc_map[cmac_ctx->node_id].vse_thread_start = true; err = tegra_hv_vse_safety_prepare_ivc_linked_list(se_dev, req->src, req->nbytes, TEGRA_HV_VSE_AES_CMAC_MAX_LL_NUM, TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE, &num_lists, DMA_TO_DEVICE, &num_mapped_sgs, &src_addr64); if (err) goto free_mem; ivc_tx->aes.op_cmac_sv.src_addr = src_addr64.addr; ivc_tx->aes.op_cmac_sv.src_buf_size = src_addr64.buf_size; ivc_hdr->engine = g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine; if (cmac_req_data->request_type == CMAC_SIGN) ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMAC_SIGN; else ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMAC_VERIFY; memcpy(ivc_tx->aes.op_cmac_sv.keyslot, cmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); ivc_tx->aes.op_cmac_sv.key_length = cmac_ctx->keylen; ivc_tx->aes.op_cmac_sv.config = 0; if (cmac_ctx->b_is_sm4 == 1U) ivc_tx->aes.op_cmac_sv.sym_ciph = VSE_SYM_CIPH_SM4; else ivc_tx->aes.op_cmac_sv.sym_ciph = VSE_SYM_CIPH_AES; if (is_last == true) ivc_tx->aes.op_cmac_sv.config |= TEGRA_VIRTUAL_SE_AES_CMAC_SV_CONFIG_LASTREQ; if (cmac_ctx->is_first) { ivc_tx->aes.op_cmac_sv.config |= TEGRA_VIRTUAL_SE_AES_CMAC_SV_CONFIG_FIRSTREQ; if (cmac_req_data->request_type == CMAC_VERIFY) { memcpy(mac_buf, req->result, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); } cmac_ctx->is_first = false; } ivc_tx->aes.op_cmac_sv.mac_addr = mac_buf_addr; ivc_tx->aes.op_cmac_sv.mac_comp_res_addr = mac_comp_buf_addr; priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_CMAC_PROCESS; priv->se_dev = se_dev; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), cmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_mem; } if (priv->rx_status != 0) { err = status_to_errno(priv->rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); goto free_mem; } if (cmac_req_data->request_type == CMAC_SIGN) { if (priv->rx_status == 0) { memcpy(req->result, mac_buf, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); } } else { if (memcmp(mac_comp, &match_code, 4) == 0) cmac_req_data->result = 0; else cmac_req_data->result = 1; } free_mem: devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); if (mac_buf) dma_free_coherent(se_dev->dev, 16, mac_buf, mac_buf_addr); if (mac_comp) dma_free_coherent(se_dev->dev, 4, mac_comp, mac_comp_buf_addr); return err; } static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req, bool is_last) { struct tegra_virtual_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_vse_cmac_req_data *cmac_req_data; struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[cmac_ctx->node_id].ivck; struct scatterlist *src_sg; u32 blocks_to_process, last_block_bytes = 0; unsigned int total_len; int err = 0; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; if ((req->nbytes == 0) || (req->nbytes > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN)) { dev_err(se_dev->dev, "%s: input buffer size is invalid\n", __func__); return -EINVAL; } blocks_to_process = req->nbytes / TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE; /* num of bytes less than block size */ if ((req->nbytes % TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE) || blocks_to_process == 0) { last_block_bytes = req->nbytes % TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE; } else { /* decrement num of blocks */ blocks_to_process--; last_block_bytes = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE; } ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) return -ENOMEM; priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { devm_kfree(se_dev->dev, ivc_req_msg); return -ENOMEM; } cmac_req_data = (struct tegra_vse_cmac_req_data *) req->priv; ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; src_sg = req->src; g_crypto_to_ivc_map[cmac_ctx->node_id].vse_thread_start = true; /* first process all blocks except last block */ if (blocks_to_process) { total_len = blocks_to_process * TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE; sg_copy_to_buffer(req->src, sg_nents(req->src), cmac_ctx->src->buf_ptr, total_len); ivc_tx->aes.op_cmac_sv.src_addr = cmac_ctx->src->buf_iova; ivc_tx->aes.op_cmac_sv.src_buf_size = total_len; } ivc_tx->aes.op_cmac_sv.lastblock_len = last_block_bytes; if (cmac_ctx->b_is_sm4 == 1U) { ivc_tx->aes.op_cmac_sv.sym_ciph = VSE_SYM_CIPH_SM4; } else { ivc_tx->aes.op_cmac_sv.sym_ciph = VSE_SYM_CIPH_AES; } sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), ivc_tx->aes.op_cmac_sv.lastblock, last_block_bytes, blocks_to_process * TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE); ivc_hdr->engine = g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine; if (cmac_req_data->request_type == CMAC_SIGN) ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMAC_SIGN; else ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMAC_VERIFY; memcpy(ivc_tx->aes.op_cmac_sv.keyslot, cmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); ivc_tx->aes.op_cmac_sv.key_length = cmac_ctx->keylen; ivc_tx->aes.op_cmac_sv.src_buf_size = blocks_to_process * TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE; ivc_tx->aes.op_cmac_sv.config = 0; if (is_last == true) ivc_tx->aes.op_cmac_sv.config |= TEGRA_VIRTUAL_SE_AES_CMAC_SV_CONFIG_LASTREQ; if (cmac_ctx->is_first) { ivc_tx->aes.op_cmac_sv.config |= TEGRA_VIRTUAL_SE_AES_CMAC_SV_CONFIG_FIRSTREQ; if (cmac_req_data->request_type == CMAC_VERIFY) { memcpy(ivc_tx->aes.op_cmac_sv.cmac_result, req->result, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); } cmac_ctx->is_first = false; } priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_SE_PROCESS; priv->se_dev = se_dev; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), cmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_mem; } if (priv->rx_status != 0) { err = status_to_errno(priv->rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); goto free_mem; } if (is_last) { if (cmac_req_data->request_type == CMAC_SIGN) ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_CMAC_SIGN; else ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_CMAC_VERIFY; priv->cmd = VIRTUAL_CMAC_PROCESS; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), cmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_mem; } if (cmac_req_data->request_type == CMAC_SIGN) { if (priv->rx_status == 0) { memcpy(req->result, priv->cmac.data, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE); } } else { if (priv->rx_status == 0) cmac_req_data->result = 0; else cmac_req_data->result = 1; } if ((priv->rx_status != 0) && (priv->rx_status != TEGRA_VIRTUAL_SE_ERR_MAC_INVALID)) { err = status_to_errno(priv->rx_status); dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); } } free_mem: devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); return err; } static int tegra_hv_vse_safety_cmac_init(struct ahash_request *req) { struct tegra_virtual_se_dev *se_dev; struct crypto_ahash *tfm; struct tegra_virtual_se_aes_cmac_context *cmac_ctx; if (!req) { pr_err("%s AES-CMAC request not valid\n", __func__); return -EINVAL; } tfm = crypto_ahash_reqtfm(req); if (!tfm) { pr_err("%s AES-CMAC transform not valid\n", __func__); return -EINVAL; } cmac_ctx = crypto_ahash_ctx(tfm); if (!cmac_ctx) { pr_err("%s AES-CMAC req_ctx not valid\n", __func__); return -EINVAL; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; cmac_ctx->digest_size = crypto_ahash_digestsize(tfm); cmac_ctx->hash_result = dma_alloc_coherent( se_dev->dev, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE, &cmac_ctx->hash_result_addr, GFP_KERNEL); if (!cmac_ctx->hash_result) { dev_err(se_dev->dev, "Cannot allocate memory for CMAC result\n"); return -ENOMEM; } cmac_ctx->is_first = true; cmac_ctx->req_context_initialized = true; return 0; } static void tegra_hv_vse_safety_cmac_req_deinit(struct ahash_request *req) { struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_aes_cmac_context *cmac_ctx; cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!cmac_ctx) { pr_err("%s AES-CMAC req_ctx not valid\n", __func__); return; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; dma_free_coherent( se_dev->dev, TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE, cmac_ctx->hash_result, cmac_ctx->hash_result_addr); cmac_ctx->hash_result = NULL; cmac_ctx->req_context_initialized = false; } static int tegra_hv_vse_safety_cmac_update(struct ahash_request *req) { struct tegra_virtual_se_aes_cmac_context *cmac_ctx = NULL; struct tegra_virtual_se_dev *se_dev; int ret = 0; if (!req) { pr_err("%s AES-CMAC request not valid\n", __func__); return -EINVAL; } cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!cmac_ctx) { pr_err("%s AES-CMAC req_ctx not valid\n", __func__); return -EINVAL; } if (!cmac_ctx->req_context_initialized) { pr_err("%s Request ctx not initialized\n", __func__); return -EINVAL; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; cmac_ctx->src = &g_node_dma[cmac_ctx->node_id].se_dma_buf[0]; if (!cmac_ctx->src->buf_ptr) { dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__); return -ENOMEM; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; /* Do not process data in given request */ if (se_dev->chipdata->cmac_hw_verify_supported) ret = tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported(req, false); else ret = tegra_hv_vse_safety_cmac_sv_op(req, false); if (ret) dev_err(se_dev->dev, "tegra_se_cmac_update failed - %d\n", ret); return ret; } static int tegra_hv_tsec_safety_cmac_update(struct ahash_request *req) { pr_err("%s cmac_update is not supported for tsec\n", __func__); return -EINVAL; } static int tegra_hv_vse_safety_cmac_final(struct ahash_request *req) { struct tegra_virtual_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; pr_err("%s cmac_final is not supported\n", __func__); return -EINVAL; } static int tegra_hv_vse_safety_cmac_finup(struct ahash_request *req) { struct tegra_virtual_se_aes_cmac_context *cmac_ctx = NULL; struct tegra_virtual_se_dev *se_dev; int ret = 0; if (!req) { pr_err("%s AES-CMAC request not valid\n", __func__); return -EINVAL; } cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!cmac_ctx) { pr_err("%s AES-CMAC req_ctx not valid\n", __func__); return -EINVAL; } if (!cmac_ctx->req_context_initialized) { pr_err("%s Request ctx not initialized\n", __func__); return -EINVAL; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; cmac_ctx->src = &g_node_dma[cmac_ctx->node_id].se_dma_buf[0]; if (!cmac_ctx->src->buf_ptr) { dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__); return -ENOMEM; } /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; /* Do not process data in given request */ if (se_dev->chipdata->cmac_hw_verify_supported) ret = tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported(req, true); else ret = tegra_hv_vse_safety_cmac_sv_op(req, true); if (ret) dev_err(se_dev->dev, "tegra_se_cmac_finup failed - %d\n", ret); tegra_hv_vse_safety_cmac_req_deinit(req); return ret; } static int tegra_hv_tsec_safety_cmac_finup(struct ahash_request *req) { struct tegra_virtual_se_aes_cmac_context *cmac_ctx = NULL; struct tegra_virtual_se_dev *se_dev = NULL; int ret = 0; if (!req) { pr_err("%s TSEC request not valid\n", __func__); return -EINVAL; } cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!cmac_ctx) { pr_err("%s TSEC req_ctx not valid\n", __func__); return -EINVAL; } if (!cmac_ctx->req_context_initialized) { pr_err("%s Request ctx not initialized\n", __func__); return -EINVAL; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; ret = tegra_hv_vse_safety_tsec_sv_op(req); if (ret) dev_err(se_dev->dev, "tegra_se_tsec_finup failed - %d\n", ret); tegra_hv_vse_safety_cmac_req_deinit(req); return ret; } static int tegra_hv_vse_safety_cmac_digest(struct ahash_request *req) { struct tegra_virtual_se_aes_cmac_context *cmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; return tegra_hv_vse_safety_cmac_init(req) ?: tegra_hv_vse_safety_cmac_final(req); } int tegra_hv_vse_safety_tsec_get_keyload_status(uint32_t node_id, uint32_t *err_code) { struct tegra_virtual_se_dev *se_dev = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; struct tegra_hv_ivc_cookie *pivck = NULL; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr = NULL; int err = 0; if (node_id >= MAX_NUMBER_MISC_DEVICES) return -ENODEV; se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[node_id].se_engine]; pivck = g_crypto_to_ivc_map[node_id].ivck; ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) return -ENOMEM; priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { devm_kfree(se_dev->dev, ivc_req_msg); dev_err(se_dev->dev, "Priv Data allocation failed\n"); return -ENOMEM; } ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; g_crypto_to_ivc_map[node_id].vse_thread_start = true; ivc_hdr->engine = g_crypto_to_ivc_map[node_id].se_engine; ivc_tx->cmd = TEGRA_VIRTUAL_TSEC_CMD_GET_KEYLOAD_STATUS; priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_SE_PROCESS; priv->se_dev = se_dev; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0U) { err = -EINVAL; if (priv->rx_status == VSE_MSG_ERR_TSEC_KEYLOAD_FAILED) *err_code = NVVSE_STATUS_SE_SERVER_TSEC_KEYLOAD_FAILED; else if (priv->rx_status == VSE_MSG_ERR_TSEC_KEYLOAD_STATUS_CHECK_TIMEOUT) *err_code = NVVSE_STATUS_SE_SERVER_TSEC_KEYLOAD_TIMEOUT; else *err_code = NVVSE_STATUS_SE_SERVER_ERROR; } else { err = 0; *err_code = 0U; } free_exit: devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); return err; } EXPORT_SYMBOL(tegra_hv_vse_safety_tsec_get_keyload_status); static int tegra_hv_vse_safety_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_virtual_se_aes_cmac_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); struct tegra_virtual_se_dev *se_dev; int err = 0; s8 label[TEGRA_VIRTUAL_SE_AES_MAX_KEY_SIZE]; bool is_keyslot_label; if (!ctx) return -EINVAL; se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].se_engine]; if ((keylen != 16) && (keylen != 32)) { dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); return -EINVAL; } /* format: 'NVSEAES 1234567\0' */ is_keyslot_label = sscanf(key, "%s", label) == 1 && !strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL); if (is_keyslot_label) { ctx->keylen = keylen; memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES); ctx->is_key_slot_allocated = true; } else { dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key); return -EINVAL; } return err; } static int tegra_hv_vse_safety_cmac_cra_init(struct crypto_tfm *tfm) { crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct tegra_virtual_se_aes_cmac_context)); return 0; } static void tegra_hv_vse_safety_cmac_cra_exit(struct crypto_tfm *tfm) { /* nothing to do as user releases the keyslot through tzvault TA */ } static int tegra_hv_vse_safety_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, u32 keylen) { struct tegra_virtual_se_aes_context *ctx = crypto_skcipher_ctx(tfm); struct tegra_virtual_se_dev *se_dev; s8 label[TEGRA_VIRTUAL_SE_AES_MAX_KEY_SIZE]; int err = 0; bool is_keyslot_label; if (!ctx) return -EINVAL; se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].se_engine]; if ((keylen != 16) && (keylen != 32)) { dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); return -EINVAL; } /* format: 'NVSEAES 1234567\0' */ is_keyslot_label = sscanf(key, "%s", label) == 1 && !strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL); if (is_keyslot_label) { ctx->keylen = keylen; memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES); ctx->is_key_slot_allocated = true; } else { dev_err(se_dev->dev, "%s: Invalid keyslot label %s", __func__, key); err = -EINVAL; } return err; } static int tegra_hv_vse_safety_rng_drbg_init(struct crypto_tfm *tfm) { return 0; } static void tegra_hv_vse_safety_rng_drbg_exit(struct crypto_tfm *tfm) { return; } static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *rng_ctx, u8 *rdata, unsigned int dlen) { struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[rng_ctx->node_id].se_engine]; u8 *rdata_addr; int err = 0, j, num_blocks, data_len = 0; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[rng_ctx->node_id].ivck; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; if (atomic_read(&se_dev->se_suspended)) return -ENODEV; if (dlen == 0) { return -EINVAL; } num_blocks = (dlen / TEGRA_VIRTUAL_SE_RNG_DT_SIZE); data_len = (dlen % TEGRA_VIRTUAL_SE_RNG_DT_SIZE); if (data_len == 0) num_blocks = num_blocks - 1; ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) return -ENOMEM; priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(se_dev->dev, "Priv Data allocation failed\n"); devm_kfree(se_dev->dev, ivc_req_msg); return 0; } rng_ctx->rng_buf = dma_alloc_coherent(se_dev->dev, TEGRA_VIRTUAL_SE_RNG_DT_SIZE, &rng_ctx->rng_buf_adr, GFP_KERNEL); if (!rng_ctx->rng_buf) return -ENOMEM; ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = g_crypto_to_ivc_map[rng_ctx->node_id].se_engine; priv_data_ptr = (struct tegra_vse_tag *)ivc_hdr->tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_SE_PROCESS; priv->se_dev = se_dev; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_RNG_DBRG; for (j = 0; j <= num_blocks; j++) { ivc_tx->aes.op_rng.dst_addr.lo = rng_ctx->rng_buf_adr & 0xFFFFFFFF; ivc_tx->aes.op_rng.dst_addr.hi = (rng_ctx->rng_buf_adr >> 32) | TEGRA_VIRTUAL_SE_RNG_DT_SIZE; init_completion(&priv->alg_complete); g_crypto_to_ivc_map[rng_ctx->node_id].vse_thread_start = true; err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), rng_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto exit; } rdata_addr = (rdata + (j * TEGRA_VIRTUAL_SE_RNG_DT_SIZE)); if (data_len && num_blocks == j) { memcpy(rdata_addr, rng_ctx->rng_buf, data_len); } else { memcpy(rdata_addr, rng_ctx->rng_buf, TEGRA_VIRTUAL_SE_RNG_DT_SIZE); } } exit: devm_kfree(se_dev->dev, priv); devm_kfree(se_dev->dev, ivc_req_msg); if (rng_ctx->rng_buf) dma_free_coherent(se_dev->dev, TEGRA_VIRTUAL_SE_RNG_DT_SIZE, rng_ctx->rng_buf, rng_ctx->rng_buf_adr); return dlen; } static int tegra_hv_vse_safety_rng_drbg_get_random(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *rdata, unsigned int dlen) { return tegra_hv_vse_safety_get_random(crypto_rng_ctx(tfm), rdata, dlen); } static int tegra_hv_vse_safety_rng_drbg_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { return 0; } static int tegra_vse_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, u32 keylen) { /* copied from normal aes keyset, will remove if no modification needed*/ struct tegra_virtual_se_aes_context *ctx = crypto_aead_ctx(tfm); struct tegra_virtual_se_dev *se_dev; s8 label[TEGRA_VIRTUAL_SE_AES_MAX_KEY_SIZE]; int err = 0; bool is_keyslot_label; if (!ctx) return -EINVAL; se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].se_engine]; if ((keylen != 16) && (keylen != 32)) { dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); return -EINVAL; } /* format: 'NVSEAES 1234567\0' */ is_keyslot_label = sscanf(key, "%s", label) == 1 && !strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL); if (is_keyslot_label) { ctx->keylen = keylen; memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES); ctx->is_key_slot_allocated = true; } else { dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key); err = -EINVAL; } return err; } static int tegra_vse_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct tegra_virtual_se_aes_context *ctx = crypto_aead_ctx(tfm); switch (authsize) { case 16: ctx->authsize = authsize; break; default: return -EINVAL; } return 0; } static int tegra_vse_aes_gcm_init(struct crypto_aead *tfm) { return 0; } static void tegra_vse_aes_gcm_exit(struct crypto_aead *tfm) { /* nothing to do as user unloads the key manually with tzvault*/ } static int tegra_vse_aes_gcm_check_params(struct aead_request *req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm); uint32_t cryptlen; struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; if (aes_ctx->authsize != TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE) { dev_err(se_dev->dev, "Wrong GCM authsize, expected: 0x%x received: 0x%x\n", TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE, aes_ctx->authsize); return -EINVAL; } if (!encrypt) { if (req->cryptlen < aes_ctx->authsize) { dev_err(se_dev->dev, "%s: gcm_dec cryptlen is invalid\n", __func__); return -EINVAL; } cryptlen = req->cryptlen - aes_ctx->authsize; if (cryptlen > TEGRA_VIRTUAL_SE_MAX_GCMDEC_BUFLEN) { dev_err(se_dev->dev, "%s: dec srcbuf len is invalid %d\n", __func__, cryptlen); return -EINVAL; } } else { cryptlen = req->cryptlen; if (cryptlen > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN) { dev_err(se_dev->dev, "%s: enc srcbuf len is invalid %d\n", __func__, cryptlen); return -EINVAL; } } if (req->assoclen > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN) { dev_err(se_dev->dev, "%s: assoclen is invalid\n", __func__); return -EINVAL; } if (unlikely(!aes_ctx->is_key_slot_allocated)) { dev_err(se_dev->dev, "%s: AES Key slot not allocated\n", __func__); return -EINVAL; } return 0; } static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm); struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; int err = 0; uint32_t cryptlen = 0; void *aad_buf = NULL; void *src_buf = NULL; void *tag_buf = NULL; dma_addr_t aad_buf_addr; dma_addr_t src_buf_addr; dma_addr_t tag_buf_addr; err = tegra_vse_aes_gcm_check_params(req, encrypt); if (err != 0) goto free_exit; if (encrypt) cryptlen = req->cryptlen; else cryptlen = req->cryptlen - aes_ctx->authsize; if (req->assoclen > 0) { aad_buf = aes_ctx->aad->buf_ptr; aad_buf_addr = aes_ctx->aad->buf_iova; if (!aad_buf) { pr_err("%s aad_buf is NULL\n", __func__); err = -ENOMEM; goto free_exit; } /* copy aad from sgs to buffer*/ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), aad_buf, req->assoclen, 0); } if (cryptlen > 0) { src_buf = aes_ctx->src->buf_ptr; src_buf_addr = aes_ctx->src->buf_iova; if (!src_buf) { pr_err("%s enc src_buf is NULL\n", __func__); err = -ENOMEM; goto free_exit; } /* copy src from sgs to buffer*/ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), src_buf, cryptlen, req->assoclen); } if (encrypt) { tag_buf = aes_ctx->tag->buf_ptr; tag_buf_addr = aes_ctx->tag->buf_iova; if (!tag_buf) { pr_err("%s tag_buf is NULL\n", __func__); err = -ENOMEM; goto free_exit; } } priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto free_exit; } ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { err = -ENOMEM; goto free_exit; } ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = g_crypto_to_ivc_map[aes_ctx->node_id].se_engine; priv_data_ptr = (struct tegra_vse_tag *)ivc_hdr->tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->se_dev = se_dev; g_crypto_to_ivc_map[aes_ctx->node_id].vse_thread_start = true; memcpy(ivc_tx->aes.op_gcm.keyslot, aes_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); ivc_tx->aes.op_gcm.key_length = aes_ctx->keylen; if (encrypt) { /* * If req->iv[0] is 1 and the request is for AES CBC/CTR encryption, * it means that generation of random IV is required. * IV generation is not required if user nonce is provided. */ if (req->iv[0] == 1 && aes_ctx->user_nonce == 0U) { //Random IV generation is required ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_ENCRYPT_INIT; priv->cmd = VIRTUAL_SE_PROCESS; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), aes_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } err = status_to_errno(priv->rx_status); if (err) { dev_err(se_dev->dev, "\n %s IV generation failed %d\n", __func__, err); goto free_exit; } priv->cmd = VIRTUAL_SE_AES_GCM_ENC_PROCESS; } else { priv->cmd = VIRTUAL_SE_PROCESS; } ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GCM_CMD_ENCRYPT; } else { priv->cmd = VIRTUAL_SE_PROCESS; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GCM_CMD_DECRYPT; } if (!encrypt) { /* copy iv for decryption*/ memcpy(ivc_tx->aes.op_gcm.iv, req->iv, crypto_aead_ivsize(tfm)); /* copy expected tag */ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), ivc_tx->aes.op_gcm.expected_tag, TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE, req->assoclen + cryptlen); } else { if (aes_ctx->user_nonce != 0U) memcpy(ivc_tx->aes.op_gcm.iv, req->iv, crypto_aead_ivsize(tfm)); } ivc_tx->aes.op_gcm.src_buf_size = cryptlen; ivc_tx->aes.op_gcm.dst_buf_size = cryptlen; if (cryptlen > 0) { ivc_tx->aes.op_gcm.src_addr = (uint32_t)src_buf_addr; /* same source buffer can be used for destination buffer */ ivc_tx->aes.op_gcm.dst_addr = ivc_tx->aes.op_gcm.src_addr; } ivc_tx->aes.op_gcm.aad_buf_size = req->assoclen; if (req->assoclen > 0) ivc_tx->aes.op_gcm.aad_addr = aad_buf_addr; if (encrypt) { ivc_tx->aes.op_gcm.tag_buf_size = aes_ctx->authsize; ivc_tx->aes.op_gcm.tag_addr = tag_buf_addr; } init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), aes_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0) { dev_err(se_dev->dev, "%s: SE Server returned error %u\n", __func__, priv->rx_status); err = status_to_errno(priv->rx_status); goto free_exit; } if (encrypt) { if (aes_ctx->user_nonce == 0U) { /* copy iv to req for encryption*/ memcpy(req->iv, priv->iv, crypto_aead_ivsize(tfm)); } /* copy tag to req for encryption */ sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), tag_buf, aes_ctx->authsize, req->assoclen + cryptlen); } else { priv->cmd = VIRTUAL_SE_PROCESS; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GCM_DEC; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), aes_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0) { dev_err(se_dev->dev, "%s: SE Server returned error %u\n", __func__, priv->rx_status); err = status_to_errno(priv->rx_status); goto free_exit; } } sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), src_buf, cryptlen, req->assoclen); free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); if (priv) devm_kfree(se_dev->dev, priv); return err; } static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm); struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[aes_ctx->node_id].ivck; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; int err = 0; uint32_t cryptlen = 0; u32 match_code = SE_HW_VALUE_MATCH_CODE; u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE; void *aad_buf = NULL; void *src_buf = NULL; uint8_t *tag_buf = NULL; void *mac_buf = NULL; dma_addr_t aad_buf_addr; dma_addr_t src_buf_addr; dma_addr_t tag_buf_addr; dma_addr_t mac_buf_addr; err = tegra_vse_aes_gcm_check_params(req, encrypt); if (err != 0) goto free_exit; mac_buf = dma_alloc_coherent(se_dev->dev, 4, &mac_buf_addr, GFP_KERNEL); if (!mac_buf) { err = -ENOMEM; goto free_exit; } if (encrypt) cryptlen = req->cryptlen; else cryptlen = req->cryptlen - aes_ctx->authsize; if (req->assoclen > 0) { aad_buf = dma_alloc_coherent(se_dev->dev, req->assoclen, &aad_buf_addr, GFP_KERNEL); if (!aad_buf) { err = -ENOMEM; goto free_exit; } /* copy aad from sgs to buffer*/ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), aad_buf, req->assoclen, 0); } if (cryptlen > 0) { if (encrypt) { src_buf = dma_alloc_coherent(se_dev->dev, cryptlen, &src_buf_addr, GFP_KERNEL); if (!src_buf) { err = -ENOMEM; goto free_exit; } } else { src_buf = dma_alloc_coherent(se_dev->dev, cryptlen, &src_buf_addr, GFP_KERNEL); if (!src_buf) { err = -ENOMEM; goto free_exit; } } /* copy src from sgs to buffer*/ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), src_buf, cryptlen, req->assoclen); } if (encrypt) { if (aes_ctx->user_nonce == 0U) tag_buf = dma_alloc_coherent(se_dev->dev, TEGRA_VIRTUAL_SE_AES_GCM_TAG_IV_SIZE, &tag_buf_addr, GFP_KERNEL); else tag_buf = dma_alloc_coherent(se_dev->dev, aes_ctx->authsize, &tag_buf_addr, GFP_KERNEL); if (!tag_buf) { err = -ENOMEM; goto free_exit; } } priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto free_exit; } ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { err = -ENOMEM; goto free_exit; } ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = g_crypto_to_ivc_map[aes_ctx->node_id].se_engine; priv_data_ptr = (struct tegra_vse_tag *)ivc_hdr->tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->se_dev = se_dev; g_crypto_to_ivc_map[aes_ctx->node_id].vse_thread_start = true; memcpy(ivc_tx->aes.op_gcm.keyslot, aes_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); ivc_tx->aes.op_gcm.key_length = aes_ctx->keylen; if (encrypt) { ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GCM_CMD_ENCRYPT; priv->cmd = VIRTUAL_SE_PROCESS; } else { priv->cmd = VIRTUAL_SE_PROCESS; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GCM_CMD_DECRYPT; } if (!encrypt) { /* copy iv for decryption*/ memcpy(ivc_tx->aes.op_gcm.iv, req->iv, crypto_aead_ivsize(tfm)); /* copy expected tag */ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), ivc_tx->aes.op_gcm.expected_tag, TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE, req->assoclen + cryptlen); } else { if (aes_ctx->user_nonce != 0U) memcpy(ivc_tx->aes.op_gcm.iv, req->iv, crypto_aead_ivsize(tfm)); } ivc_tx->aes.op_gcm.src_buf_size = cryptlen; ivc_tx->aes.op_gcm.dst_buf_size = cryptlen; if (cryptlen > 0) { ivc_tx->aes.op_gcm.src_addr = (uint32_t)src_buf_addr; ivc_tx->aes.op_gcm.src_buf_size |= (uint32_t)((src_buf_addr >> 8) & ~((1U << 24) - 1U)); /* same source buffer can be used for destination buffer */ ivc_tx->aes.op_gcm.dst_addr = ivc_tx->aes.op_gcm.src_addr; ivc_tx->aes.op_gcm.dst_buf_size = ivc_tx->aes.op_gcm.src_buf_size; } ivc_tx->aes.op_gcm.aad_buf_size = req->assoclen; if (req->assoclen > 0) ivc_tx->aes.op_gcm.aad_addr = aad_buf_addr; if (encrypt) { if (aes_ctx->user_nonce == 0U) ivc_tx->aes.op_gcm.tag_buf_size = TEGRA_VIRTUAL_SE_AES_GCM_TAG_IV_SIZE; else ivc_tx->aes.op_gcm.tag_buf_size = aes_ctx->authsize; ivc_tx->aes.op_gcm.tag_addr = tag_buf_addr; } else { ivc_tx->aes.op_gcm.gcm_vrfy_res_addr = mac_buf_addr; } init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), aes_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0) { dev_err(se_dev->dev, "%s: SE Server returned error %u\n", __func__, priv->rx_status); err = status_to_errno(priv->rx_status); goto free_exit; } if (encrypt) { if (aes_ctx->user_nonce == 0U) { /* copy iv to req for encryption*/ memcpy(req->iv, &tag_buf[16], crypto_aead_ivsize(tfm)); } /* copy tag to req for encryption */ sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), tag_buf, aes_ctx->authsize, req->assoclen + cryptlen); } else { if (memcmp(mac_buf, &match_code, 4) != 0) { if (memcmp(mac_buf, &mismatch_code, 4) == 0) dev_dbg(se_dev->dev, "%s: tag mismatch\n", __func__); err = -EINVAL; goto free_exit; } } sg_pcopy_from_buffer(req->dst, sg_nents(req->dst), src_buf, cryptlen, req->assoclen); free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); if (priv) devm_kfree(se_dev->dev, priv); if (tag_buf) dma_free_coherent(se_dev->dev, ((encrypt) && (aes_ctx->user_nonce == 0U)) ? (TEGRA_VIRTUAL_SE_AES_GCM_TAG_IV_SIZE):(aes_ctx->authsize), tag_buf, tag_buf_addr); if (src_buf) dma_free_coherent(se_dev->dev, cryptlen, src_buf, src_buf_addr); if (aad_buf) dma_free_coherent(se_dev->dev, req->assoclen, aad_buf, aad_buf_addr); if (mac_buf) dma_free_coherent(se_dev->dev, 4, mac_buf, mac_buf_addr); return err; } static int tegra_vse_aes_gcm_encrypt(struct aead_request *req) { struct crypto_aead *tfm; struct tegra_virtual_se_aes_context *aes_ctx; struct tegra_virtual_se_dev *se_dev; int err = 0; if (!req) { pr_err("%s: req is invalid\n", __func__); return -EINVAL; } tfm = crypto_aead_reqtfm(req); aes_ctx = crypto_aead_ctx(tfm); se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; aes_ctx->src = &g_node_dma[aes_ctx->node_id].se_dma_buf[0]; if (!aes_ctx->src->buf_ptr) { dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__); return -EINVAL; } aes_ctx->aad = &g_node_dma[aes_ctx->node_id].se_dma_buf[1]; if (!aes_ctx->aad->buf_ptr) { dev_err(se_dev->dev, "%s aad_buf is NULL\n", __func__); return -EINVAL; } aes_ctx->tag = &g_node_dma[aes_ctx->node_id].se_dma_buf[2]; if (!aes_ctx->tag->buf_ptr) { dev_err(se_dev->dev, "%s tag_buf is NULL\n", __func__); return -EINVAL; } if (unlikely(!req->iv)) { /* If IV is not set we cannot determine whether * random IV generation is required. */ pr_err("%s: Unable to determine if random IV generation is needed\n", __func__); return -EINVAL; } if (se_dev->chipdata->gcm_hw_iv_supported) err = tegra_vse_aes_gcm_enc_dec_hw_support(req, true); else err = tegra_vse_aes_gcm_enc_dec(req, true); if (err) dev_err(se_dev->dev, "%s failed %d\n", __func__, err); return err; } static int tegra_vse_aes_gcm_decrypt(struct aead_request *req) { struct crypto_aead *tfm; struct tegra_virtual_se_aes_context *aes_ctx; struct tegra_virtual_se_dev *se_dev; int err = 0; if (!req) { pr_err("%s: req is invalid\n", __func__); return -EINVAL; } tfm = crypto_aead_reqtfm(req); aes_ctx = crypto_aead_ctx(tfm); se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; aes_ctx->aad = &g_node_dma[aes_ctx->node_id].se_dma_buf[0]; if (!aes_ctx->aad->buf_ptr) { dev_err(se_dev->dev, "%s aad_buf is NULL\n", __func__); return -EINVAL; } if (!se_dev->chipdata->gcm_hw_iv_supported) { if ((req->cryptlen - aes_ctx->authsize) > g_crypto_to_ivc_map[aes_ctx->node_id].mempool.buf_len) aes_ctx->src = &g_node_dma[aes_ctx->node_id].gpc_dma_buf; else aes_ctx->src = &g_crypto_to_ivc_map[aes_ctx->node_id].mempool; } else aes_ctx->src = &g_node_dma[aes_ctx->node_id].se_dma_buf[1]; if (!aes_ctx->src->buf_ptr) { dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__); return -EINVAL; } if (g_crypto_to_ivc_map[aes_ctx->node_id].gcm_dec_supported == GCM_DEC_OP_SUPPORTED) { if (se_dev->chipdata->gcm_hw_iv_supported) err = tegra_vse_aes_gcm_enc_dec_hw_support(req, false); else err = tegra_vse_aes_gcm_enc_dec(req, false); if (err) dev_err(se_dev->dev, "%s failed %d\n", __func__, err); } else { err = -EACCES; dev_err(se_dev->dev, "%s failed for node_id %u\n", __func__, aes_ctx->node_id); } return err; } static int tegra_hv_vse_safety_gmac_cra_init(struct crypto_tfm *tfm) { crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct tegra_virtual_se_aes_gmac_context)); return 0; } static void tegra_hv_vse_safety_gmac_cra_exit(struct crypto_tfm *tfm) { /* nothing to do as user releases the keyslot through tzvault TA */ } static int tegra_hv_vse_aes_gmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_virtual_se_aes_gmac_context *ctx = crypto_ahash_ctx(tfm); struct tegra_virtual_se_dev *se_dev; s8 label[TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL_SIZE]; int err = 0; bool is_keyslot_label; if (!ctx) { pr_err("%s: gmac ctx invalid", __func__); err = -EINVAL; goto exit; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].se_engine]; if ((keylen != 16) && (keylen != 32)) { dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); err = -EINVAL; goto exit; } /* format: 'NVSEAES 1234567\0' */ is_keyslot_label = sscanf(key, "%s", label) == 1 && (!strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL)); if (is_keyslot_label) { ctx->keylen = keylen; memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES); ctx->is_key_slot_allocated = true; } else { dev_err(se_dev->dev, "\n %s: Invalid keyslot label: %s\n", __func__, key); err = -EINVAL; } exit: return err; } static int tegra_hv_vse_aes_gmac_sv_init(struct ahash_request *req) { struct tegra_virtual_se_dev *se_dev; struct crypto_ahash *tfm = NULL; struct tegra_virtual_se_aes_gmac_context *gmac_ctx = NULL; struct tegra_vse_gmac_req_data *gmac_req_data = NULL; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; struct tegra_hv_ivc_cookie *pivck; struct tegra_vse_tag *priv_data_ptr = NULL; struct tegra_vse_priv_data *priv = NULL; int err = 0; if (!req) { pr_err("%s: request invalid\n", __func__); err = -EINVAL; goto exit; } tfm = crypto_ahash_reqtfm(req); if (!tfm) { pr_err("%s: transform not valid\n", __func__); err = -EINVAL; goto exit; } gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!gmac_ctx) { pr_err("%s: req ctx invalid\n", __func__); err = -EINVAL; goto exit; } if (gmac_ctx->is_key_slot_allocated == false) { pr_err("%s: keyslot is not allocated\n", __func__); err = -EPERM; goto exit; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) { dev_err(se_dev->dev, "%s: engine is in suspended state", __func__); err = -ENODEV; goto exit; } gmac_req_data = (struct tegra_vse_gmac_req_data *) req->priv; if ((gmac_req_data->request_type == GMAC_VERIFY) || (gmac_req_data->request_type == GMAC_SIGN)) { /* Initialize GMAC ctx */ gmac_ctx->authsize = crypto_ahash_digestsize(tfm); gmac_ctx->req_context_initialized = true; gmac_ctx->aad = &g_node_dma[gmac_ctx->node_id].se_dma_buf[0]; if (!gmac_ctx->aad->buf_ptr) { dev_err(se_dev->dev, "%s aad_buf is NULL\n", __func__); return -EINVAL; } gmac_ctx->tag = &g_node_dma[gmac_ctx->node_id].se_dma_buf[2]; if (!gmac_ctx->tag->buf_ptr) { dev_err(se_dev->dev, "%s tag_buf is NULL\n", __func__); return -EINVAL; } /* Exit as GMAC_INIT request need not be sent to SE Server for SIGN/VERIFY */ err = 0; goto exit; } priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto exit; } ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { err = -ENOMEM; goto free_exit; } ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine; priv_data_ptr = (struct tegra_vse_tag *)ivc_hdr->tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_SE_PROCESS; priv->se_dev = se_dev; ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_INIT; memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); ivc_tx->aes.op_gcm.key_length = gmac_ctx->keylen; g_crypto_to_ivc_map[gmac_ctx->node_id].vse_thread_start = true; pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), gmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0) { dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); err = status_to_errno(priv->rx_status); goto free_exit; } ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GMAC_IV; priv->cmd = VIRTUAL_SE_AES_GCM_ENC_PROCESS; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), gmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0) { dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); err = status_to_errno(priv->rx_status); goto free_exit; } memcpy(gmac_req_data->iv, priv->iv, TEGRA_VIRTUAL_SE_AES_GCM_IV_SIZE); free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); if (priv) devm_kfree(se_dev->dev, priv); exit: return err; } static void tegra_hv_vse_aes_gmac_deinit(struct ahash_request *req) { struct tegra_virtual_se_aes_gmac_context *gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); gmac_ctx->aad = NULL; gmac_ctx->tag = NULL; gmac_ctx->is_key_slot_allocated = false; gmac_ctx->req_context_initialized = false; } static int tegra_vse_aes_gmac_sv_check_params(struct ahash_request *req) { struct tegra_virtual_se_aes_gmac_context *gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine]; int err = 0; /* Validate aad buf len */ if (req->nbytes > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN) { dev_err(se_dev->dev, "%s: aad buf length exceeds max supported size\n", __func__); err = -EINVAL; } return err; } static int tegra_hv_vse_aes_gmac_sv_op(struct ahash_request *req, bool is_last) { struct tegra_virtual_se_aes_gmac_context *gmac_ctx = NULL; struct tegra_vse_gmac_req_data *gmac_req_data; struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; void *aad_buf = NULL; void *tag_buf = NULL; dma_addr_t aad_buf_addr; dma_addr_t tag_buf_addr; int err = 0; gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!gmac_ctx) { pr_err("%s: transform not valid\n", __func__); err = -EINVAL; goto exit; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine]; pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck; gmac_req_data = (struct tegra_vse_gmac_req_data *) req->priv; err = tegra_vse_aes_gmac_sv_check_params(req); if (err != 0) goto exit; aad_buf = gmac_ctx->aad->buf_ptr; aad_buf_addr = gmac_ctx->aad->buf_iova; if (!aad_buf) { err = -ENOMEM; goto exit; } /* copy aad from sgs to buffer*/ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), aad_buf, req->nbytes, 0); if (gmac_req_data->request_type == GMAC_SIGN) { tag_buf = gmac_ctx->tag->buf_ptr; tag_buf_addr = gmac_ctx->tag->buf_iova; if (!tag_buf) { err = -ENOMEM; goto free_exit; } } priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto free_exit; } ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { err = -ENOMEM; goto free_exit; } ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine; priv_data_ptr = (struct tegra_vse_tag *)ivc_hdr->tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_SE_PROCESS; priv->se_dev = se_dev; if (gmac_req_data->request_type == GMAC_SIGN) ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_SIGN; else ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_VERIFY; memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); ivc_tx->aes.op_gcm.key_length = gmac_ctx->keylen; ivc_tx->aes.op_gcm.aad_buf_size = req->nbytes; ivc_tx->aes.op_gcm.aad_addr = (u32)(aad_buf_addr & U32_MAX); if (gmac_req_data->request_type == GMAC_SIGN) { ivc_tx->aes.op_gcm.tag_buf_size = gmac_ctx->authsize; ivc_tx->aes.op_gcm.tag_addr = (u32)(tag_buf_addr & U32_MAX); } if (gmac_req_data->is_first) ivc_tx->aes.op_gcm.config |= (1 << TEGRA_VIRTUAL_SE_AES_GMAC_SV_CFG_FIRST_REQ_SHIFT); if (is_last == true) { ivc_tx->aes.op_gcm.config |= (1 << TEGRA_VIRTUAL_SE_AES_GMAC_SV_CFG_LAST_REQ_SHIFT); if (gmac_req_data->request_type == GMAC_VERIFY) { memcpy(ivc_tx->aes.op_gcm.iv, gmac_req_data->iv, TEGRA_VIRTUAL_SE_AES_GCM_IV_SIZE); memcpy(ivc_tx->aes.op_gcm.expected_tag, req->result, gmac_ctx->authsize); } } g_crypto_to_ivc_map[gmac_ctx->node_id].vse_thread_start = true; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), gmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0) { dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); err = status_to_errno(priv->rx_status); goto free_exit; } else { if (is_last && gmac_req_data->request_type == GMAC_SIGN) { /* copy tag to req for last GMAC_SIGN requests */ memcpy(req->result, tag_buf, gmac_ctx->authsize); } } if (is_last && gmac_req_data->request_type == GMAC_VERIFY) { ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_CMD_GET_GMAC_VERIFY; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), gmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0) { if (priv->rx_status == 11U) gmac_req_data->result = 1; else err = status_to_errno(priv->rx_status); } else { gmac_req_data->result = 0; } } free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); if (priv) devm_kfree(se_dev->dev, priv); exit: return err; } static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, bool is_last) { struct tegra_virtual_se_aes_gmac_context *gmac_ctx = NULL; struct tegra_vse_gmac_req_data *gmac_req_data; struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_hv_ivc_cookie *pivck; struct tegra_vse_priv_data *priv = NULL; struct tegra_vse_tag *priv_data_ptr; void *aad_buf = NULL; void *tag_buf = NULL; u8 *mac_comp = NULL; dma_addr_t aad_buf_addr; dma_addr_t tag_buf_addr; int err = 0; dma_addr_t mac_comp_buf_addr; u32 match_code = SE_HW_VALUE_MATCH_CODE; u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE; gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!gmac_ctx) { pr_err("%s: transform not valid\n", __func__); err = -EINVAL; goto exit; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine]; pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck; gmac_req_data = (struct tegra_vse_gmac_req_data *) req->priv; err = tegra_vse_aes_gmac_sv_check_params(req); if (err != 0) goto exit; aad_buf = dma_alloc_coherent(se_dev->dev, req->nbytes, &aad_buf_addr, GFP_KERNEL); if (!aad_buf) { err = -ENOMEM; goto exit; } /* copy aad from sgs to buffer*/ sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), aad_buf, req->nbytes, 0); if (gmac_req_data->request_type == GMAC_SIGN) { tag_buf = dma_alloc_coherent(se_dev->dev, gmac_ctx->authsize, &tag_buf_addr, GFP_KERNEL); if (!tag_buf) { err = -ENOMEM; goto free_exit; } } priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto free_exit; } ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), GFP_KERNEL); if (!ivc_req_msg) { err = -ENOMEM; goto free_exit; } mac_comp = dma_alloc_coherent(se_dev->dev, 4, &mac_comp_buf_addr, GFP_KERNEL); if (!mac_comp) { err = -ENOMEM; goto free_exit; } ivc_tx = &ivc_req_msg->tx[0]; ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr->num_reqs = 1; ivc_hdr->header_magic[0] = 'N'; ivc_hdr->header_magic[1] = 'V'; ivc_hdr->header_magic[2] = 'D'; ivc_hdr->header_magic[3] = 'A'; ivc_hdr->engine = g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine; priv_data_ptr = (struct tegra_vse_tag *)ivc_hdr->tag; priv_data_ptr->priv_data = (unsigned int *)priv; priv->cmd = VIRTUAL_SE_PROCESS; priv->se_dev = se_dev; if (gmac_req_data->request_type == GMAC_SIGN) ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_SIGN; else ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_AES_GMAC_CMD_VERIFY; memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); ivc_tx->aes.op_gcm.key_length = gmac_ctx->keylen; ivc_tx->aes.op_gcm.aad_buf_size = req->nbytes; ivc_tx->aes.op_gcm.aad_addr = (u32)(aad_buf_addr & U32_MAX); if (gmac_req_data->request_type == GMAC_SIGN) { ivc_tx->aes.op_gcm.tag_buf_size = gmac_ctx->authsize; ivc_tx->aes.op_gcm.tag_addr = (u32)(tag_buf_addr & U32_MAX); } if (gmac_req_data->is_first) ivc_tx->aes.op_gcm.config |= (1 << TEGRA_VIRTUAL_SE_AES_GMAC_SV_CFG_FIRST_REQ_SHIFT); if (is_last == true) { ivc_tx->aes.op_gcm.config |= (1 << TEGRA_VIRTUAL_SE_AES_GMAC_SV_CFG_LAST_REQ_SHIFT); if (gmac_req_data->request_type == GMAC_VERIFY) { memcpy(ivc_tx->aes.op_gcm.iv, gmac_req_data->iv, TEGRA_VIRTUAL_SE_AES_GCM_IV_SIZE); memcpy(ivc_tx->aes.op_gcm.expected_tag, req->result, gmac_ctx->authsize); ivc_tx->aes.op_gcm.gcm_vrfy_res_addr = mac_comp_buf_addr; } } if (gmac_ctx->b_is_sm4 == 1U) ivc_tx->aes.op_gcm.sym_ciph = VSE_SYM_CIPH_SM4; else ivc_tx->aes.op_gcm.sym_ciph = VSE_SYM_CIPH_AES; g_crypto_to_ivc_map[gmac_ctx->node_id].vse_thread_start = true; init_completion(&priv->alg_complete); err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t), gmac_ctx->node_id); if (err) { dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); goto free_exit; } if (priv->rx_status != 0) { dev_err(se_dev->dev, "%s: SE server returned error %u\n", __func__, priv->rx_status); err = status_to_errno(priv->rx_status); goto free_exit; } else { if (is_last && gmac_req_data->request_type == GMAC_SIGN) /* copy tag to req for last GMAC_SIGN requests */ memcpy(req->result, tag_buf, gmac_ctx->authsize); } if (is_last && gmac_req_data->request_type == GMAC_VERIFY) { if (memcmp(mac_comp, &match_code, 4) == 0) gmac_req_data->result = 0; else if (memcmp(mac_comp, &mismatch_code, 4) == 0) gmac_req_data->result = 1; else err = -EINVAL; } free_exit: if (ivc_req_msg) devm_kfree(se_dev->dev, ivc_req_msg); if (priv) devm_kfree(se_dev->dev, priv); if (tag_buf) dma_free_coherent(se_dev->dev, gmac_ctx->authsize, tag_buf, tag_buf_addr); if (mac_comp) dma_free_coherent(se_dev->dev, gmac_ctx->authsize, mac_comp, mac_comp_buf_addr); if (aad_buf) dma_free_coherent(se_dev->dev, req->nbytes, aad_buf, aad_buf_addr); exit: return err; } static int tegra_hv_vse_aes_gmac_sv_update(struct ahash_request *req) { struct tegra_virtual_se_aes_gmac_context *gmac_ctx = NULL; struct tegra_virtual_se_dev *se_dev; int ret = 0; if (!req) { pr_err("%s: request not valid\n", __func__); ret = -EINVAL; goto exit; } gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!gmac_ctx->req_context_initialized) { pr_err("%s Request ctx not initialized\n", __func__); ret = -EPERM; goto exit; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) { dev_err(se_dev->dev, "%s: engine is in suspended state\n", __func__); ret = -ENODEV; goto exit; } if (se_dev->chipdata->gcm_hw_iv_supported) ret = tegra_hv_vse_aes_gmac_sv_op_hw_support(req, false); else ret = tegra_hv_vse_aes_gmac_sv_op(req, false); if (ret) dev_err(se_dev->dev, "%s failed %d\n", __func__, ret); exit: return ret; } static int tegra_hv_vse_aes_gmac_sv_finup(struct ahash_request *req) { struct tegra_virtual_se_aes_gmac_context *gmac_ctx = NULL; struct tegra_virtual_se_dev *se_dev; int ret = 0; if (!req) { pr_err("%s: request not valid\n", __func__); ret = -EINVAL; goto exit; } gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); if (!gmac_ctx->req_context_initialized) { pr_err("%s: Request ctx not initialized\n", __func__); ret = -EPERM; goto exit; } se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine]; /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) { dev_err(se_dev->dev, "%s: engine is in suspended state\n", __func__); ret = -ENODEV; goto exit; } if (se_dev->chipdata->gcm_hw_iv_supported) ret = tegra_hv_vse_aes_gmac_sv_op_hw_support(req, true); else ret = tegra_hv_vse_aes_gmac_sv_op(req, true); if (ret) dev_err(se_dev->dev, "%s failed %d\n", __func__, ret); tegra_hv_vse_aes_gmac_deinit(req); exit: return ret; } static int tegra_hv_vse_aes_gmac_sv_final(struct ahash_request *req) { struct tegra_virtual_se_aes_gmac_context *gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].se_engine]; dev_err(se_dev->dev, "%s: final not supported", __func__); return -EPERM; } #define HV_SAFETY_AES_CTX_SIZE sizeof(struct tegra_virtual_se_aes_context) static struct rng_alg rng_alg = { .generate = tegra_hv_vse_safety_rng_drbg_get_random, .seed = tegra_hv_vse_safety_rng_drbg_reset, .seedsize = TEGRA_VIRTUAL_SE_RNG_SEED_SIZE, .base = { .cra_name = "rng_drbg", .cra_driver_name = "rng_drbg-aes-tegra", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_RNG, .cra_ctxsize = sizeof(struct tegra_virtual_se_rng_context), .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_rng_drbg_init, .cra_exit = tegra_hv_vse_safety_rng_drbg_exit, } }; static struct aead_alg aead_algs[] = { { .setkey = tegra_vse_aes_gcm_setkey, .setauthsize = tegra_vse_aes_gcm_setauthsize, .encrypt = tegra_vse_aes_gcm_encrypt, .decrypt = tegra_vse_aes_gcm_decrypt, .init = tegra_vse_aes_gcm_init, .exit = tegra_vse_aes_gcm_exit, .ivsize = TEGRA_VIRTUAL_SE_AES_GCM_IV_SIZE, .maxauthsize = TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE, .chunksize = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE, .base = { .cra_name = "gcm-vse(aes)", .cra_driver_name = "gcm-aes-tegra-safety", .cra_priority = 1000, .cra_blocksize = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE, .cra_ctxsize = HV_SAFETY_AES_CTX_SIZE, .cra_module = THIS_MODULE, } } }; static struct skcipher_alg aes_algs[] = { { .base.cra_name = "cbc-vse(aes)", .base.cra_driver_name = "cbc-aes-tegra", .base.cra_priority = 400, .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, .base.cra_blocksize = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE, .base.cra_ctxsize = HV_SAFETY_AES_CTX_SIZE, .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = tegra_hv_vse_safety_aes_cra_init, .exit = tegra_hv_vse_safety_aes_cra_exit, .setkey = tegra_hv_vse_safety_aes_setkey, .encrypt = tegra_hv_vse_safety_aes_cbc_encrypt, .decrypt = tegra_hv_vse_safety_aes_cbc_decrypt, .min_keysize = TEGRA_VIRTUAL_SE_AES_MIN_KEY_SIZE, .max_keysize = TEGRA_VIRTUAL_SE_AES_MAX_KEY_SIZE, .ivsize = TEGRA_VIRTUAL_SE_AES_IV_SIZE, }, { .base.cra_name = "ctr-vse(aes)", .base.cra_driver_name = "ctr-aes-tegra-safety", .base.cra_priority = 400, .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, .base.cra_blocksize = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE, .base.cra_ctxsize = HV_SAFETY_AES_CTX_SIZE, .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = tegra_hv_vse_safety_aes_cra_init, .exit = tegra_hv_vse_safety_aes_cra_exit, .setkey = tegra_hv_vse_safety_aes_setkey, .encrypt = tegra_hv_vse_safety_aes_ctr_encrypt, .decrypt = tegra_hv_vse_safety_aes_ctr_decrypt, .min_keysize = TEGRA_VIRTUAL_SE_AES_MIN_KEY_SIZE, .max_keysize = TEGRA_VIRTUAL_SE_AES_MAX_KEY_SIZE, .ivsize = TEGRA_VIRTUAL_SE_AES_IV_SIZE, }, }; static struct ahash_alg tsec_alg = { .init = tegra_hv_vse_safety_cmac_init, .update = tegra_hv_tsec_safety_cmac_update, .final = tegra_hv_vse_safety_cmac_final, .finup = tegra_hv_tsec_safety_cmac_finup, .digest = tegra_hv_vse_safety_cmac_digest, .setkey = tegra_hv_vse_safety_cmac_setkey, .halg.digestsize = TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE, .halg.statesize = TEGRA_VIRTUAL_SE_AES_CMAC_STATE_SIZE, .halg.base = { .cra_name = "cmac-tsec(aes)", .cra_driver_name = "tegra-hv-vse-safety-tsec(aes)", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_aes_cmac_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_cmac_cra_init, .cra_exit = tegra_hv_vse_safety_cmac_cra_exit, } }; static struct ahash_alg cmac_alg = { .init = tegra_hv_vse_safety_cmac_init, .update = tegra_hv_vse_safety_cmac_update, .final = tegra_hv_vse_safety_cmac_final, .finup = tegra_hv_vse_safety_cmac_finup, .digest = tegra_hv_vse_safety_cmac_digest, .setkey = tegra_hv_vse_safety_cmac_setkey, .halg.digestsize = TEGRA_VIRTUAL_SE_AES_CMAC_DIGEST_SIZE, .halg.statesize = TEGRA_VIRTUAL_SE_AES_CMAC_STATE_SIZE, .halg.base = { .cra_name = "cmac-vse(aes)", .cra_driver_name = "tegra-hv-vse-safety-cmac(aes)", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_aes_cmac_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_cmac_cra_init, .cra_exit = tegra_hv_vse_safety_cmac_cra_exit, } }; static struct ahash_alg gmac_alg = { .init = tegra_hv_vse_aes_gmac_sv_init, .update = tegra_hv_vse_aes_gmac_sv_update, .finup = tegra_hv_vse_aes_gmac_sv_finup, .final = tegra_hv_vse_aes_gmac_sv_final, .setkey = tegra_hv_vse_aes_gmac_setkey, .halg.digestsize = TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE, .halg.statesize = TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE, .halg.base = { .cra_name = "gmac-vse(aes)", .cra_driver_name = "tegra-hv-vse-gmac(aes)", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = TEGRA_VIRTUAL_SE_AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_aes_gmac_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_gmac_cra_init, .cra_exit = tegra_hv_vse_safety_gmac_cra_exit, } }; static struct ahash_alg sha_algs[] = { { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SHA256_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "sha256-vse", .cra_driver_name = "tegra-hv-vse-safety-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SHA384_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "sha384-vse", .cra_driver_name = "tegra-hv-vse-safety-sha384", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SHA512_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "sha512-vse", .cra_driver_name = "tegra-hv-vse-safety-sha512", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SHA3_256_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "sha3-256-vse", .cra_driver_name = "tegra-hv-vse-safety-sha3-256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA3_256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SHA3_384_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "sha3-384-vse", .cra_driver_name = "tegra-hv-vse-safety-sha3-384", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA3_384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SHA3_512_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "sha3-512-vse", .cra_driver_name = "tegra-hv-vse-safety-sha3-512", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA3_512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SHA3_512_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "shake128-vse", .cra_driver_name = "tegra-hv-vse-safety-shake128", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA3_512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SHA3_512_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "shake256-vse", .cra_driver_name = "tegra-hv-vse-safety-shake256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA3_512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_sha_init, .update = tegra_hv_vse_safety_sha_update, .final = tegra_hv_vse_safety_sha_final, .finup = tegra_hv_vse_safety_sha_finup, .digest = tegra_hv_vse_safety_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .halg.digestsize = SM3_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "sm3-vse", .cra_driver_name = "tegra-hv-vse-safety-sm3", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } }, { .init = tegra_hv_vse_safety_hmac_sha_init, .update = tegra_hv_vse_safety_hmac_sha_update, .final = tegra_hv_vse_safety_hmac_sha_final, .finup = tegra_hv_vse_safety_hmac_sha_finup, .digest = tegra_hv_vse_safety_hmac_sha_digest, .export = tegra_hv_vse_safety_sha_export, .import = tegra_hv_vse_safety_sha_import, .setkey = tegra_hv_vse_safety_hmac_sha_setkey, .halg.digestsize = SHA256_DIGEST_SIZE, .halg.statesize = sizeof(struct tegra_virtual_se_req_context), .halg.base = { .cra_name = "hmac-sha256-vse", .cra_driver_name = "tegra-hv-vse-safety-hmac-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tegra_virtual_se_hmac_sha_context), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_exit = tegra_hv_vse_safety_sha_cra_exit, } } }; static const struct tegra_vse_soc_info t194_vse_sinfo = { .gcm_decrypt_supported = false, .cmac_hw_verify_supported = false, .sm_supported = false, .gcm_hw_iv_supported = false, .hmac_verify_hw_support = false, }; static const struct tegra_vse_soc_info t234_vse_sinfo = { .gcm_decrypt_supported = true, .cmac_hw_verify_supported = false, .sm_supported = false, .gcm_hw_iv_supported = false, .hmac_verify_hw_support = false, }; static const struct tegra_vse_soc_info se_51_vse_sinfo = { .gcm_decrypt_supported = true, .cmac_hw_verify_supported = true, .sm_supported = true, .gcm_hw_iv_supported = true, .hmac_verify_hw_support = true, }; static const struct of_device_id tegra_hv_vse_safety_of_match[] = { { .compatible = "nvidia,tegra194-hv-vse-safety", .data = &t194_vse_sinfo, }, { .compatible = "nvidia,tegra234-hv-vse-safety", .data = &t234_vse_sinfo, }, { .compatible = "nvidia,tegra-se-5.1-hv-vse-safety", .data = &se_51_vse_sinfo, }, {}, }; MODULE_DEVICE_TABLE(of, tegra_hv_vse_safety_of_match); static irqreturn_t tegra_vse_irq_handler(int irq, void *data) { uint32_t node_id = *((uint32_t *)data); struct tegra_hv_ivc_cookie *ivck = g_crypto_to_ivc_map[node_id].ivck; if (tegra_hv_ivc_can_read(ivck)) complete(&g_crypto_to_ivc_map[node_id].tegra_vse_complete); return IRQ_HANDLED; } static int tegra_vse_kthread(void *data) { uint32_t node_id = *((uint32_t *)data); struct tegra_virtual_se_dev *se_dev = NULL; struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[node_id].ivck; struct tegra_virtual_se_ivc_msg_t *ivc_msg; int err = 0; int timeout; int ret; bool is_dummy = false; size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t); enum ivc_irq_state *irq_state; se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[node_id].se_engine]; ivc_msg = devm_kzalloc(se_dev->dev, size_ivc_msg, GFP_KERNEL); if (!ivc_msg) return -ENOMEM; while (!kthread_should_stop()) { err = 0; ret = wait_for_completion_interruptible( &g_crypto_to_ivc_map[node_id].tegra_vse_complete); if (ret < 0) { pr_err("%s completion err\n", __func__); reinit_completion(&g_crypto_to_ivc_map[node_id].tegra_vse_complete); continue; } if (!g_crypto_to_ivc_map[node_id].vse_thread_start) { reinit_completion(&g_crypto_to_ivc_map[node_id].tegra_vse_complete); continue; } timeout = TEGRA_VIRTUAL_SE_TIMEOUT_1S; while (tegra_hv_ivc_channel_notified(pivck) != 0) { if (!timeout) { reinit_completion( &g_crypto_to_ivc_map[node_id].tegra_vse_complete); pr_err("%s:%d ivc channel_notifier timeout\n", __func__, __LINE__); err = -EAGAIN; break; } udelay(1); timeout--; } if (err == -EAGAIN) { err = 0; continue; } mutex_lock(&(se_dev->crypto_to_ivc_map[node_id].irq_state_lock)); irq_state = &(se_dev->crypto_to_ivc_map[node_id].wait_interrupt); while (tegra_hv_ivc_can_read(pivck) && *irq_state != NO_INTERRUPT) { pr_debug("%s(): wait_interrupt = %u", __func__, *irq_state); if (*irq_state == INTERMEDIATE_REQ_INTERRUPT) { err = read_and_validate_valid_msg(se_dev, pivck, node_id, &is_dummy, true); if (err != 0) { dev_err(se_dev->dev, "%s(): Unable to read validate message", __func__); } *irq_state = NO_INTERRUPT; pr_debug("%s():%d wait_interrupt = %u\n", __func__, __LINE__, *irq_state); break; } else if (*irq_state == FIRST_REQ_INTERRUPT) { err = read_and_validate_dummy_msg(se_dev, pivck, node_id, &is_dummy); if (err != 0) { dev_err(se_dev->dev, "%s:%d Invalid response header\n", __func__, __LINE__); err = 0; continue; } if (is_dummy == true) { *irq_state = INTERMEDIATE_REQ_INTERRUPT; pr_debug("%s():%d Dummy message read. Read valid message.", __func__, __LINE__); continue; } else { dev_err(se_dev->dev, "Invalid response sequence"); break; } } else { dev_err(se_dev->dev, "Invalid irq state - %u", *irq_state); return -EINVAL; } } mutex_unlock(&(se_dev->crypto_to_ivc_map[node_id].irq_state_lock)); } devm_kfree(se_dev->dev, ivc_msg); return 0; } #if defined(CONFIG_HW_RANDOM) static int tegra_hv_vse_safety_hwrng_read(struct hwrng *rng, void *buf, size_t size, bool wait) { struct tegra_virtual_se_rng_context *ctx; if (!wait) return 0; ctx = (struct tegra_virtual_se_rng_context *)rng->priv; return tegra_hv_vse_safety_get_random(ctx, buf, size); } #endif /* CONFIG_HW_RANDOM */ static int tegra_hv_vse_safety_register_hwrng(struct tegra_virtual_se_dev *se_dev) { #if defined(CONFIG_HW_RANDOM) int ret; struct hwrng *vse_hwrng = NULL; struct tegra_virtual_se_rng_context *rng_ctx = NULL; vse_hwrng = devm_kzalloc(se_dev->dev, sizeof(*vse_hwrng), GFP_KERNEL); if (!vse_hwrng) { ret = -ENOMEM; goto out; } rng_ctx = devm_kzalloc(se_dev->dev, sizeof(*rng_ctx), GFP_KERNEL); if (!rng_ctx) { ret = -ENOMEM; goto out; } rng_ctx->se_dev = se_dev; vse_hwrng->name = "tegra_hv_vse_safety"; vse_hwrng->read = tegra_hv_vse_safety_hwrng_read; vse_hwrng->quality = 1024; vse_hwrng->priv = (unsigned long)rng_ctx; ret = devm_hwrng_register(se_dev->dev, vse_hwrng); out: if (ret) { if (rng_ctx) { if (rng_ctx->rng_buf) dma_free_coherent(se_dev->dev, TEGRA_VIRTUAL_SE_RNG_DT_SIZE, rng_ctx->rng_buf, rng_ctx->rng_buf_adr); devm_kfree(se_dev->dev, rng_ctx); } if (vse_hwrng) devm_kfree(se_dev->dev, vse_hwrng); } else { se_dev->hwrng = vse_hwrng; } return ret; #else return 0; #endif /* CONFIG_HW_RANDOM */ } static void tegra_hv_vse_safety_unregister_hwrng(struct tegra_virtual_se_dev *se_dev) { #if defined(CONFIG_HW_RANDOM) struct tegra_virtual_se_rng_context *rng_ctx; if (se_dev->hwrng) { devm_hwrng_unregister(se_dev->dev, se_dev->hwrng); rng_ctx = (struct tegra_virtual_se_rng_context *)se_dev->hwrng->priv; dma_free_coherent(se_dev->dev, TEGRA_VIRTUAL_SE_RNG_DT_SIZE, rng_ctx->rng_buf, rng_ctx->rng_buf_adr); devm_kfree(se_dev->dev, rng_ctx); devm_kfree(se_dev->dev, se_dev->hwrng); se_dev->hwrng = NULL; } #endif /* CONFIG_HW_RANDOM */ } static const struct of_device_id host1x_match[] = { { .compatible = "nvidia,tegra234-host1x", }, { .compatible = "nvidia,tegra264-host1x", }, {}, }; static int se_get_nvhost_dev(struct tegra_virtual_se_dev *se_dev) { struct platform_device *host1x_pdev; struct device_node *np; np = of_find_matching_node(NULL, host1x_match); if (!np) { dev_err(se_dev->dev, "Failed to find host1x, syncpt support disabled"); return -ENODATA; } host1x_pdev = of_find_device_by_node(np); if (!host1x_pdev) { dev_err(se_dev->dev, "host1x device not available"); return -EPROBE_DEFER; } se_dev->host1x_pdev = host1x_pdev; return 0; } static bool tegra_ivc_check_entry(struct tegra_virtual_se_dev *se_dev, uint32_t ivc_id) { uint32_t cnt; for (cnt = 0; cnt < MAX_NUMBER_MISC_DEVICES; cnt++) { if (g_crypto_to_ivc_map[cnt].ivc_id == ivc_id) return true; } return false; } static bool tegra_mempool_check_entry(struct tegra_virtual_se_dev *se_dev, uint32_t mempool_id) { uint32_t cnt; for (cnt = 0; cnt < MAX_NUMBER_MISC_DEVICES; cnt++) { if (g_crypto_to_ivc_map[cnt].mempool.buf_len > 0) if (g_crypto_to_ivc_map[cnt].mempool_id == mempool_id) return true; } return false; } static int tegra_hv_vse_allocate_gpc_dma_bufs(struct tegra_vse_node_dma *node_dma, struct device *gpcdma_dev, struct crypto_dev_to_ivc_map *ivc_map) { int32_t err = -ENOMEM; if (!node_dma) { pr_err("%s node_dma is null\n", __func__); err = -EINVAL; goto exit; } if (!gpcdma_dev) { pr_err("%s gpcdma_dev is null\n", __func__); err = -EINVAL; goto exit; } if (!ivc_map) { pr_err("%s ivc_map is null\n", __func__); err = -EINVAL; goto exit; } if ((ivc_map->se_engine != VIRTUAL_SE_AES0) && (ivc_map->se_engine != VIRTUAL_SE_AES1)) { /* No GPCDMA buffer allocation is needed in case of non AES engines */ err = 0; goto exit; } if (ivc_map->gcm_dec_buffer_size > 0) { node_dma->gpc_dma_buf.buf_ptr = dma_alloc_coherent(gpcdma_dev, ALIGN(ivc_map->gcm_dec_buffer_size, 64U), &node_dma->gpc_dma_buf.buf_iova, GFP_KERNEL); if (!node_dma->gpc_dma_buf.buf_ptr) { dev_err(gpcdma_dev, "%s dma_alloc_coherent failed\n", __func__); err = -ENOMEM; goto exit; } node_dma->gpcdma_dev = gpcdma_dev; node_dma->gpc_dma_buf.buf_len = ivc_map->gcm_dec_buffer_size; } err = 0; exit: return err; } static void tegra_hv_vse_release_gpc_dma_bufs(struct device *gpcdma_dev) { uint32_t i; struct tegra_vse_dma_buf *dma_buf = NULL; if (!gpcdma_dev) { pr_err("%s gpcdma_dev is null\n", __func__); return; } for (i = 0; i < MAX_NUMBER_MISC_DEVICES; i++) { if (g_node_dma[i].gpcdma_dev == gpcdma_dev) { dma_buf = &g_node_dma[i].gpc_dma_buf; if ((dma_buf->buf_len > 0U) && (dma_buf->buf_ptr != NULL)) { dma_free_coherent(gpcdma_dev, dma_buf->buf_len, dma_buf->buf_ptr, dma_buf->buf_iova); dma_buf->buf_len = 0U; dma_buf->buf_ptr = NULL; } } } } static int tegra_hv_vse_allocate_se_dma_bufs(struct tegra_vse_node_dma *node_dma, struct device *se_dev, struct crypto_dev_to_ivc_map *ivc_map) { int32_t err = -ENOMEM; uint32_t buf_sizes[MAX_SE_DMA_BUFS] = {0U}; uint32_t i; if (!node_dma) { pr_err("%s node_dma is null\n", __func__); err = -EINVAL; goto exit; } if (!se_dev) { pr_err("%s se_dev is null\n", __func__); err = -EINVAL; goto exit; } if (!ivc_map) { pr_err("%s ivc_map is null\n", __func__); err = -EINVAL; goto exit; } switch (ivc_map->se_engine) { case VIRTUAL_SE_AES0: case VIRTUAL_SE_AES1: /* * For AES algs, the worst case requirement is for AES-GCM encryption: * 1. src buffer(requires up to max limit specified in DT) * 2. aad buffer(requires up to max limit specified in DT) * 3. mac/tag buffer(requires 16 bytes) */ buf_sizes[0] = ivc_map->max_buffer_size; buf_sizes[1] = ivc_map->max_buffer_size; buf_sizes[2] = 64U; break; case VIRTUAL_SE_SHA: /* * For SHA algs, the worst case requirement for SHAKE128/SHAKE256: * 1. plaintext buffer(requires up to max limit specified in DT) * 2. residual plaintext buffer(requires up to max limit specified in DT) * 3. digest buffer(support a maximum digest size of 1024 bytes) */ buf_sizes[0] = ivc_map->max_buffer_size; buf_sizes[1] = ivc_map->max_buffer_size; buf_sizes[2] = 1024U; break; case VIRTUAL_GCSE1_SHA: /* * For SHA algs, the worst case requirement for SHAKE128/SHAKE256: * 1. plaintext buffer(requires up to max limit specified in DT) * 2. residual plaintext buffer(requires up to max limit specified in DT) * 3. digest buffer(support a maximum digest size of 1024 bytes) */ buf_sizes[0] = ivc_map->max_buffer_size; buf_sizes[1] = ivc_map->max_buffer_size; buf_sizes[2] = 1024U; break; case VIRTUAL_GCSE2_SHA: /* * For SHA algs, the worst case requirement for SHAKE128/SHAKE256: * 1. plaintext buffer(requires up to max limit specified in DT) * 2. residual plaintext buffer(requires up to max limit specified in DT) * 3. digest buffer(support a maximum digest size of 1024 bytes) */ buf_sizes[0] = ivc_map->max_buffer_size; buf_sizes[1] = ivc_map->max_buffer_size; buf_sizes[2] = 1024U; break; default: err = 0; goto exit; } node_dma->se_dev = se_dev; for (i = 0; i < MAX_SE_DMA_BUFS; i++) { if (buf_sizes[i] == 0U) break; node_dma->se_dma_buf[i].buf_ptr = dma_alloc_coherent(se_dev, buf_sizes[i], &node_dma->se_dma_buf[i].buf_iova, GFP_KERNEL); if (!node_dma->se_dma_buf[i].buf_ptr) { dev_err(se_dev, "%s dma_alloc_coherent failed\n", __func__); err = -ENOMEM; goto exit; } node_dma->se_dma_buf[i].buf_len = buf_sizes[i]; } err = 0; exit: return err; } static void tegra_hv_vse_release_se_dma_bufs(struct device *se_dev) { uint32_t i, j; struct tegra_vse_dma_buf *dma_buf = NULL; if (!se_dev) { pr_err("%s se_dev is null\n", __func__); return; } for (i = 0; i < MAX_NUMBER_MISC_DEVICES; i++) { if (g_node_dma[i].se_dev == se_dev) { for (j = 0; j < MAX_SE_DMA_BUFS; j++) { dma_buf = &g_node_dma[i].se_dma_buf[j]; if ((dma_buf->buf_len > 0U) && (dma_buf->buf_ptr != NULL)) { dma_free_coherent(se_dev, dma_buf->buf_len, dma_buf->buf_ptr, dma_buf->buf_iova); dma_buf->buf_len = 0U; dma_buf->buf_ptr = NULL; } } } } } static void tegra_hv_vse_release_all_dma_bufs(void) { uint32_t i, j; struct tegra_vse_dma_buf *dma_buf = NULL; for (i = 0; i < MAX_NUMBER_MISC_DEVICES; i++) { if (g_node_dma[i].se_dev) { for (j = 0; j < MAX_SE_DMA_BUFS; j++) { dma_buf = &g_node_dma[i].se_dma_buf[j]; if ((dma_buf->buf_len > 0U) && (dma_buf->buf_ptr != NULL)) { dma_free_coherent(g_node_dma[i].se_dev, dma_buf->buf_len, dma_buf->buf_ptr, dma_buf->buf_iova); dma_buf->buf_len = 0U; dma_buf->buf_ptr = NULL; } } } if (g_node_dma[i].gpcdma_dev) { dma_buf = &g_node_dma[i].gpc_dma_buf; if ((dma_buf->buf_len > 0U) && (dma_buf->buf_ptr != NULL)) { dma_free_coherent(g_node_dma[i].gpcdma_dev, dma_buf->buf_len, dma_buf->buf_ptr, dma_buf->buf_iova); dma_buf->buf_len = 0U; dma_buf->buf_ptr = NULL; } } } } static int tegra_hv_vse_safety_probe(struct platform_device *pdev) { struct tegra_virtual_se_dev *se_dev = NULL; struct crypto_dev_to_ivc_map *crypto_dev = NULL; struct device_node *np; int err = 0; int i; unsigned int ivc_id; unsigned int mempool_id; unsigned int engine_id; const struct of_device_id *match; struct tegra_vse_soc_info *pdata = NULL; uint32_t ivc_cnt, cnt, node_id; gcm_supports_dma = of_property_read_bool(pdev->dev.of_node, "nvidia,gcm-dma-support"); if (gcm_supports_dma) { gpcdma_dev = &pdev->dev; for (i = 0; i < MAX_NUMBER_MISC_DEVICES; i++) { err = tegra_hv_vse_allocate_gpc_dma_bufs(&g_node_dma[i], gpcdma_dev, &g_crypto_to_ivc_map[i]); if (err) { dev_err(gpcdma_dev, "%s returned error %d for node id %d\n", __func__, err, i); tegra_hv_vse_release_gpc_dma_bufs(gpcdma_dev); goto exit; } } return 0; } se_dev = devm_kzalloc(&pdev->dev, sizeof(struct tegra_virtual_se_dev), GFP_KERNEL); if (!se_dev) { pr_err("%s devm_kzalloc failed\n", __func__); err = -ENOMEM; goto exit; } /* set host1x platform device */ err = se_get_nvhost_dev(se_dev); if (err) { dev_err(&pdev->dev, "Failed to get nvhost dev with err: %d\n", err); goto exit; } np = pdev->dev.of_node; se_dev->crypto_to_ivc_map = g_crypto_to_ivc_map; se_dev->dev = &pdev->dev; err = of_property_read_u32(np, "se-engine-id", &engine_id); if (err) { dev_err(&pdev->dev, "se-engine-id property not present\n"); err = -ENODEV; goto exit; } g_virtual_se_dev[engine_id] = se_dev; /* read ivccfg from dts */ err = of_property_read_u32_index(np, "nvidia,ivccfg_cnt", 0, &ivc_cnt); if (err) { pr_err("Error: failed to read ivc_cnt. err %u\n", err); err = -ENODEV; goto exit; } if (pdev->dev.of_node) { match = of_match_device(of_match_ptr(tegra_hv_vse_safety_of_match), &pdev->dev); if (!match) { dev_err(&pdev->dev, "Error: No device match found\n"); return -ENODEV; } pdata = (struct tegra_vse_soc_info *)match->data; } else { pdata = (struct tegra_vse_soc_info *)pdev->id_entry->driver_data; } se_dev->chipdata = pdata; for (cnt = 0; cnt < ivc_cnt; cnt++) { err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_CRYPTO_DEV_ID_OFFSET, &node_id); if (err || node_id >= MAX_NUMBER_MISC_DEVICES) { pr_err("Error: invalid node_id. err %d\n", err); err = -ENODEV; goto exit; } crypto_dev = &g_crypto_to_ivc_map[node_id]; if (crypto_dev->ivc_id == 0) { crypto_dev->node_id = node_id; } else { pr_err("Error: resouce already allocated for node_id %u\n", node_id); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_IVC_ID_OFFSET, &ivc_id); if (err) { pr_err("Error: failed to read ivc_id. err %d\n", err); err = -ENODEV; goto exit; } if (tegra_ivc_check_entry(se_dev, ivc_id) == false) { crypto_dev->ivc_id = ivc_id; } else { pr_err("Error: array entry already exit for ivc_id %u\n", ivc_id); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_SE_ENGINE_ID_OFFSET, &crypto_dev->se_engine); if (err) { pr_err("Error: failed to read se_engine. err %d\n", err); err = -ENODEV; goto exit; } if (engine_id != crypto_dev->se_engine) { pr_err("Error: se engine mistach for ivc_id %u\n", crypto_dev->ivc_id); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_IVC_PRIORITY_OFFSET, &crypto_dev->priority); if (err || crypto_dev->priority > MAX_IVC_Q_PRIORITY) { pr_err("Error: invalid queue priority. err %d\n", err); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_MAX_BUFFER_SIZE, &crypto_dev->max_buffer_size); if (err) { pr_err("Error: invalid max buffer size. err %d\n", err); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_CHANNEL_GROUPID_OFFSET, &crypto_dev->channel_grp_id); if (err) { pr_err("Error: invalid channel group id. err %d\n", err); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_GCM_SUPPORTED_FLAG_OFFSET, &crypto_dev->gcm_dec_supported); if (err || crypto_dev->gcm_dec_supported > GCM_DEC_OP_SUPPORTED) { pr_err("Error: invalid gcm decrypt supported flag. err %d\n", err); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_GCM_DEC_BUFFER_SIZE, &crypto_dev->gcm_dec_buffer_size); if (err || (crypto_dev->gcm_dec_supported != GCM_DEC_OP_SUPPORTED && crypto_dev->gcm_dec_buffer_size != 0)) { pr_err("Error: invalid gcm decrypt buffer size. err %d\n", err); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_GCM_DEC_MEMPOOL_ID, &mempool_id); if (err || ((crypto_dev->gcm_dec_supported != GCM_DEC_OP_SUPPORTED) && (mempool_id != 0))) { pr_err("Error: invalid mempool id. err %d\n", err); err = -ENODEV; goto exit; } err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN + TEGRA_GCM_DEC_MEMPOOL_SIZE, &crypto_dev->mempool.buf_len); if (err || ((crypto_dev->gcm_dec_supported == GCM_DEC_OP_SUPPORTED) && (crypto_dev->mempool.buf_len > crypto_dev->gcm_dec_buffer_size))) { pr_err("Error: invalid mempool size err %d\n", err); err = -ENODEV; goto exit; } dev_info(se_dev->dev, "Virtual SE channel number: %d", ivc_id); crypto_dev->ivck = tegra_hv_ivc_reserve(NULL, ivc_id, NULL); if (IS_ERR_OR_NULL(crypto_dev->ivck)) { dev_err(&pdev->dev, "Failed reserve channel number\n"); err = -ENODEV; goto exit; } tegra_hv_ivc_channel_reset(crypto_dev->ivck); if (!se_dev->chipdata->gcm_hw_iv_supported && (crypto_dev->mempool.buf_len > 0)) { dev_info(se_dev->dev, "Virtual SE mempool channel number: %d\n", mempool_id); if (tegra_mempool_check_entry(se_dev, mempool_id) == false) { crypto_dev->mempool_id = mempool_id; } else { pr_err("Error: mempool id %u is already used\n", mempool_id); err = -ENODEV; goto exit; } crypto_dev->ivmk = tegra_hv_mempool_reserve(crypto_dev->mempool_id); if (IS_ERR_OR_NULL(crypto_dev->ivmk)) { dev_err(&pdev->dev, "Failed to reserve mempool channel %d\n", crypto_dev->mempool_id); err = -ENODEV; goto exit; } if (crypto_dev->ivmk->size < crypto_dev->mempool.buf_len) { pr_err("Error: mempool %u size(%llu) is smaller than DT value(%u)", crypto_dev->mempool_id, crypto_dev->ivmk->size, crypto_dev->mempool.buf_len); err = -ENODEV; goto exit; } crypto_dev->mempool.buf_ptr = devm_memremap(&pdev->dev, crypto_dev->ivmk->ipa, crypto_dev->ivmk->size, MEMREMAP_WB); if (IS_ERR_OR_NULL(crypto_dev->mempool.buf_ptr)) { dev_err(&pdev->dev, "Failed to map mempool area %d\n", crypto_dev->mempool_id); err = -ENOMEM; goto exit; } /* For GCM decrypt buffer IOVA field represents offset */ crypto_dev->mempool.buf_iova = 0; } init_completion(&crypto_dev->tegra_vse_complete); mutex_init(&crypto_dev->se_ivc_lock); mutex_init(&crypto_dev->irq_state_lock); crypto_dev->tegra_vse_task = kthread_run(tegra_vse_kthread, &crypto_dev->node_id, "tegra_vse_kthread-%u", node_id); if (IS_ERR(crypto_dev->tegra_vse_task)) { dev_err(se_dev->dev, "Couldn't create kthread for vse with node id %u\n", node_id); err = PTR_ERR(crypto_dev->tegra_vse_task); goto exit; } if (request_irq(crypto_dev->ivck->irq, tegra_vse_irq_handler, 0, "vse", &crypto_dev->node_id)) { dev_err(se_dev->dev, "Failed to request irq %d for node id %u\n", crypto_dev->ivck->irq, node_id); err = -EINVAL; goto exit; } crypto_dev->wait_interrupt = FIRST_REQ_INTERRUPT; err = tegra_hv_vse_allocate_se_dma_bufs(&g_node_dma[node_id], se_dev->dev, crypto_dev); if (err) { dev_err(gpcdma_dev, "%s returned error %d for node id %d\n", __func__, err, node_id); goto exit; } } if (engine_id == VIRTUAL_SE_AES0) { err = crypto_register_ahash(&cmac_alg); if (err) { dev_err(&pdev->dev, "cmac alg register failed. Err %d\n", err); goto release_bufs; } err = crypto_register_ahash(&gmac_alg); if (err) { dev_err(&pdev->dev, "gmac alg register failed. Err %d\n", err); goto release_bufs; } err = crypto_register_rng(&rng_alg); if (err) { dev_err(&pdev->dev, "rng alg register failed. Err %d\n", err); goto release_bufs; } err = tegra_hv_vse_safety_register_hwrng(se_dev); if (err) { dev_err(&pdev->dev, "hwrng register failed. Err %d\n", err); goto release_bufs; } } if (engine_id == VIRTUAL_SE_AES1) { err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); if (err) { dev_err(&pdev->dev, "aes alg register failed: %d\n", err); goto release_bufs; } if (se_dev->chipdata->gcm_decrypt_supported) { err = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); if (err) { dev_err(&pdev->dev, "aead alg register failed: %d\n", err); goto release_bufs; } } } if ((engine_id == VIRTUAL_SE_SHA || engine_id == VIRTUAL_GCSE1_SHA || engine_id == VIRTUAL_GCSE2_SHA) && s_set_sha_algs == false) { for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { err = crypto_register_ahash(&sha_algs[i]); if (err) { dev_err(&pdev->dev, "sha alg register failed idx[%d]\n", i); goto release_bufs; } } s_set_sha_algs = true; } if (engine_id == VIRTUAL_SE_TSEC) { err = crypto_register_ahash(&tsec_alg); if (err) { dev_err(&pdev->dev, "Tsec alg register failed. Err %d\n", err); goto release_bufs; } } se_dev->engine_id = engine_id; /* Set Engine suspended state to false*/ atomic_set(&se_dev->se_suspended, 0); platform_set_drvdata(pdev, se_dev); return 0; release_bufs: tegra_hv_vse_release_se_dma_bufs(se_dev->dev); exit: return err; } static void tegra_hv_vse_safety_shutdown(struct platform_device *pdev) { struct tegra_virtual_se_dev *se_dev = platform_get_drvdata(pdev); uint32_t cnt; /* skip checking pending request for the node with "nvidia,gcm-dma-support" * which only used to allocate buffer for gpcdma * for other vse nodes which doesn't have "nvidia,gcm-dma-support", * it will still check pending request. */ if (gcm_supports_dma) return; /* Set engine to suspend state */ atomic_set(&se_dev->se_suspended, 1); for (cnt = 0; cnt < MAX_NUMBER_MISC_DEVICES; cnt++) { if (g_crypto_to_ivc_map[cnt].se_engine == se_dev->engine_id && g_crypto_to_ivc_map[cnt].ivck != NULL) { /* Wait for SE server to be free*/ while (mutex_is_locked(&g_crypto_to_ivc_map[cnt].se_ivc_lock) || mutex_is_locked(&g_crypto_to_ivc_map[cnt].irq_state_lock)) usleep_range(8, 10); } } } static int tegra_hv_vse_safety_remove(struct platform_device *pdev) { int i; tegra_hv_vse_safety_unregister_hwrng(platform_get_drvdata(pdev)); for (i = 0; i < ARRAY_SIZE(sha_algs); i++) crypto_unregister_ahash(&sha_algs[i]); tegra_hv_vse_release_all_dma_bufs(); return 0; } #if defined(CONFIG_PM) static int tegra_hv_vse_safety_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); /* Keep engine in suspended state */ tegra_hv_vse_safety_shutdown(pdev); return 0; } static int tegra_hv_vse_safety_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct tegra_virtual_se_dev *se_dev = platform_get_drvdata(pdev); /* skip checking pending request for the node with "nvidia,gcm-dma-support" * which only used to allocate buffer for gpcdma * for other vse nodes which doesn't have "nvidia,gcm-dma-support", * it will still set engine suspend state to 1. */ if (gcm_supports_dma) return 0; /* Set engine to suspend state to 1 to make it as false */ atomic_set(&se_dev->se_suspended, 0); return 0; } static const struct dev_pm_ops tegra_hv_pm_ops = { .suspend = tegra_hv_vse_safety_suspend, .resume = tegra_hv_vse_safety_resume, }; #endif /* CONFIG_PM */ #if defined(NV_PLATFORM_DRIVER_STRUCT_REMOVE_RETURNS_VOID) /* Linux v6.11 */ static void tegra_hv_vse_safety_remove_wrapper(struct platform_device *pdev) { tegra_hv_vse_safety_remove(pdev); } #else static int tegra_hv_vse_safety_remove_wrapper(struct platform_device *pdev) { return tegra_hv_vse_safety_remove(pdev); } #endif static struct platform_driver tegra_hv_vse_safety_driver = { .probe = tegra_hv_vse_safety_probe, .remove = tegra_hv_vse_safety_remove_wrapper, .shutdown = tegra_hv_vse_safety_shutdown, .driver = { .name = "tegra_hv_vse_safety", .owner = THIS_MODULE, .of_match_table = of_match_ptr(tegra_hv_vse_safety_of_match), #if defined(CONFIG_PM) .pm = &tegra_hv_pm_ops, #endif }, }; static int __init tegra_hv_vse_safety_module_init(void) { return platform_driver_register(&tegra_hv_vse_safety_driver); } static void __exit tegra_hv_vse_safety_module_exit(void) { platform_driver_unregister(&tegra_hv_vse_safety_driver); } module_init(tegra_hv_vse_safety_module_init); module_exit(tegra_hv_vse_safety_module_exit); MODULE_AUTHOR("Mallikarjun Kasoju "); MODULE_DESCRIPTION("Virtual Security Engine driver over Tegra Hypervisor IVC channel"); MODULE_LICENSE("GPL");