vse: Add support for HMAC SHA Sign/Verify

Add support for both old and new chip

Jira ESSS-1336
Jira ESSS-1371

Change-Id: Ie4010bc7f3d8f3b71e3f49197c21911cd11a6dea
Signed-off-by: Nagaraj P N <nagarajp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3144907
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Sandeep Trasi <strasi@nvidia.com>
Reviewed-by: Leo Chiu <lchiu@nvidia.com>
This commit is contained in:
Nagaraj P N
2024-05-21 09:38:08 -07:00
committed by mobile promotions
parent 9cdefe40e0
commit 0b77774c92
4 changed files with 868 additions and 20 deletions

View File

@@ -70,6 +70,7 @@
/* Command categories for SHA Engine */ /* Command categories for SHA Engine */
#define TEGRA_VIRTUAL_SE_CMD_CATEGORY_SHA 0x00010000U #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_SHA 0x00010000U
#define TEGRA_VIRTUAL_SE_CMD_CATEGORY_HMAC 0x00030000U
/* Command categories for TSEC Engine */ /* Command categories for TSEC Engine */
#define TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_KEYS 0x00010000U #define TEGRA_VIRTUAL_SE_CMD_CATEGORY_TSEC_KEYS 0x00010000U
@@ -112,6 +113,11 @@
/* Commands in the SHA Category */ /* Commands in the SHA Category */
#define TEGRA_VIRTUAL_SE_CMD_OP_SHA 0x00000001U #define TEGRA_VIRTUAL_SE_CMD_OP_SHA 0x00000001U
/* Commands in the HMAC Category */
#define TEGRA_VIRTUAL_SE_CMD_OP_HMAC_SIGN 0x00000001U
#define TEGRA_VIRTUAL_SE_CMD_OP_HMAC_VERIFY 0x00000002U
#define TEGRA_VIRTUAL_SE_CMD_OP_HMAC_GET_VERIFY 0x00000004U
/* Commands in the TSEC keys category */ /* Commands in the TSEC keys category */
#define TEGRA_VIRTUAL_SE_CMD_OP_TSEC_KEYLOAD_STATUS 0x00000001U #define TEGRA_VIRTUAL_SE_CMD_OP_TSEC_KEYLOAD_STATUS 0x00000001U
@@ -221,6 +227,19 @@
#define TEGRA_VIRTUAL_SE_CMD_SHA_HASH (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \ #define TEGRA_VIRTUAL_SE_CMD_SHA_HASH (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \
| TEGRA_VIRTUAL_SE_CMD_CATEGORY_SHA \ | TEGRA_VIRTUAL_SE_CMD_CATEGORY_SHA \
| TEGRA_VIRTUAL_SE_CMD_OP_SHA) | TEGRA_VIRTUAL_SE_CMD_OP_SHA)
#define TEGRA_VIRTUAL_SE_CMD_HMAC_SIGN (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \
| TEGRA_VIRTUAL_SE_CMD_CATEGORY_HMAC \
| TEGRA_VIRTUAL_SE_CMD_OP_HMAC_SIGN)
#define TEGRA_VIRTUAL_SE_CMD_HMAC_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \
| TEGRA_VIRTUAL_SE_CMD_CATEGORY_HMAC \
| TEGRA_VIRTUAL_SE_CMD_OP_HMAC_VERIFY)
#define TEGRA_VIRTUAL_SE_CMD_HMAC_GET_VERIFY (TEGRA_VIRTUAL_SE_CMD_ENG_SHA \
| TEGRA_VIRTUAL_SE_CMD_CATEGORY_HMAC \
| TEGRA_VIRTUAL_SE_CMD_OP_HMAC_GET_VERIFY)
#define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT (512 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT (512 / 8)
#define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_576BIT (576 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_576BIT (576 / 8)
#define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_832BIT (832 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_832BIT (832 / 8)
@@ -228,6 +247,8 @@
#define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT (1088 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT (1088 / 8)
#define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1344BIT (1344 / 8) #define TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1344BIT (1344 / 8)
#define TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH (32U)
#define SHA3_STATE_SIZE 200 #define SHA3_STATE_SIZE 200
#define TEGRA_VIRTUAL_SE_TIMEOUT_1S 1000000 #define TEGRA_VIRTUAL_SE_TIMEOUT_1S 1000000
@@ -346,6 +367,21 @@ struct tegra_vse_gmac_req_data {
uint8_t result; uint8_t result;
}; };
enum hmac_sha_request_type {
HMAC_SHA_SIGN = 0U,
HMAC_SHA_VERIFY
};
/* HMAC-SHA request data */
struct tegra_vse_hmac_sha_req_data {
/* Enum to specify HMAC-SHA request type i.e. SIGN/VERIFY */
enum hmac_sha_request_type request_type;
/* Expected digest for HMAC_SHA_VERIFY request */
char *expected_digest;
/* Hash comparison result for HMAC_SHA_VERIFY request */
uint8_t result;
};
struct tegra_vse_priv_data { struct tegra_vse_priv_data {
struct skcipher_request *req; struct skcipher_request *req;
struct tegra_virtual_se_dev *se_dev; struct tegra_virtual_se_dev *se_dev;
@@ -456,6 +492,20 @@ union tegra_virtual_se_sha_args {
} op_hash; } op_hash;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct tegra_virtual_se_hmac_sha_args {
u8 keyslot[KEYSLOT_SIZE_BYTES];
u32 mode;
u32 lastblock_len;
u8 lastblock[TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT];
u32 msg_total_length[4];
u32 msg_left_length[4];
u64 dst_addr;
u64 src_addr;
u32 src_buf_size;
u8 expected_hmac_sha[TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH];
uint64_t hmac_addr;
};
struct tegra_virtual_tsec_args { struct tegra_virtual_tsec_args {
/** /**
* Keyslot index for keyslot containing TSEC key * Keyslot index for keyslot containing TSEC key
@@ -506,6 +556,7 @@ struct tegra_virtual_se_ivc_tx_msg_t {
union tegra_virtual_se_aes_args aes; union tegra_virtual_se_aes_args aes;
union tegra_virtual_se_sha_args sha; union tegra_virtual_se_sha_args sha;
struct tegra_virtual_tsec_args tsec; struct tegra_virtual_tsec_args tsec;
struct tegra_virtual_se_hmac_sha_args hmac;
}; };
}; };
@@ -591,6 +642,25 @@ struct crypto_dev_to_ivc_map *tegra_hv_vse_get_db(void)
} }
EXPORT_SYMBOL(tegra_hv_vse_get_db); EXPORT_SYMBOL(tegra_hv_vse_get_db);
static int status_to_errno(u32 err)
{
switch (err) {
case 1: /* VSE_MSG_ERR_INVALID_CMD */
case 3: /* VSE_MSG_ERR_INVALID_ARGS */
case 11: /* VSE_MSG_ERR_MAC_INVALID */
return -EINVAL;
case 4: /* VSE_MSG_ERR_INVALID_KEY */
case 5: /* VSE_MSG_ERR_CTR_OVERFLOW */
case 6: /* VSE_MSG_ERR_INVALID_SUBKEY */
case 7: /* VSE_MSG_ERR_CTR_NONCE_INVALID */
case 8: /* VSE_MSG_ERR_GCM_IV_INVALID */
case 9: /* VSE_MSG_ERR_GCM_NONCE_INVALID */
case 10: /* VSE_MSG_ERR_GMAC_INVALID_PARAMS */
return -EPERM;
}
return err;
}
static int32_t validate_header( static int32_t validate_header(
struct tegra_virtual_se_dev *se_dev, struct tegra_virtual_se_dev *se_dev,
struct tegra_virtual_se_ivc_hdr_t *pivc_hdr, struct tegra_virtual_se_ivc_hdr_t *pivc_hdr,
@@ -1807,6 +1877,463 @@ static int tegra_hv_vse_safety_sha_digest(struct ahash_request *req)
return ret; return ret;
} }
static int tegra_hv_vse_safety_hmac_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_virtual_se_hmac_sha_context *ctx =
crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct tegra_virtual_se_dev *se_dev;
int err = 0;
s8 label[TEGRA_VIRTUAL_SE_AES_MAX_KEY_SIZE];
bool is_keyslot_label;
if (!ctx)
return -EINVAL;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].se_engine];
if (keylen != 32) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
return -EINVAL;
}
/* format: 'NVSEAES 1234567\0' */
is_keyslot_label = sscanf(key, "%s", label) == 1 &&
!strcmp(label, TEGRA_VIRTUAL_SE_AES_KEYSLOT_LABEL);
if (is_keyslot_label) {
ctx->keylen = keylen;
memcpy(ctx->aes_keyslot, key + KEYSLOT_OFFSET_BYTES, KEYSLOT_SIZE_BYTES);
ctx->is_key_slot_allocated = true;
} else {
dev_err(se_dev->dev, "%s: Invalid keyslot label %s\n", __func__, key);
return -EINVAL;
}
return err;
}
static int tegra_hv_vse_safety_hmac_sha_init(struct ahash_request *req)
{
struct crypto_ahash *tfm;
struct tegra_virtual_se_req_context *req_ctx;
struct tegra_virtual_se_hmac_sha_context *hmac_ctx;
struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[VIRTUAL_SE_SHA];
if (!req) {
dev_err(se_dev->dev, "HMAC SHA request not valid\n");
return -EINVAL;
}
/* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended))
return -ENODEV;
req_ctx = ahash_request_ctx(req);
if (!req_ctx) {
dev_err(se_dev->dev, "HMAC SHA req_ctx not valid\n");
return -EINVAL;
}
tfm = crypto_ahash_reqtfm(req);
if (!tfm) {
dev_err(se_dev->dev, "HMAC SHA transform not valid\n");
return -EINVAL;
}
hmac_ctx = crypto_ahash_ctx(tfm);
hmac_ctx->digest_size = crypto_ahash_digestsize(tfm);
if (!hmac_ctx->is_key_slot_allocated) {
pr_err("%s key is not allocated\n", __func__);
return -EINVAL;
}
if (strcmp(crypto_ahash_alg_name(tfm), "hmac-sha256-vse") == 0) {
hmac_ctx->mode = VIRTUAL_SE_OP_MODE_SHA256;
hmac_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT;
} else {
dev_err(se_dev->dev, "Invalid HMAC-SHA Alg\n");
return -EINVAL;
}
req_ctx->total_count = 0;
req_ctx->is_first = true;
req_ctx->req_context_initialized = true;
return 0;
}
static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, bool is_last)
{
struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req);
struct tegra_virtual_se_hmac_sha_context *hmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_vse_hmac_sha_req_data *hmac_req_data;
struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].se_engine];
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_virtual_se_ivc_msg_t ivc_req_msg = {0};
struct tegra_virtual_se_hmac_sha_args *phmac;
struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[hmac_ctx->node_id].ivck;
int err = 0;
struct tegra_vse_priv_data priv = {0};
struct tegra_vse_tag *priv_data_ptr;
u32 cmd = 0;
void *src_buf = NULL;
dma_addr_t src_buf_addr;
void *hash_buf = NULL;
dma_addr_t hash_buf_addr;
void *verify_result_buf = NULL;
dma_addr_t verify_result_addr;
void *match_code_buf = NULL;
dma_addr_t match_code_addr;
u32 matchcode = SE_HW_VALUE_MATCH_CODE;
u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE;
u32 match_code_buf_size = 4;
u32 blocks_to_process, last_block_bytes = 0;
u64 msg_len = 0, temp_len = 0;
if ((req->nbytes == 0) || (req->nbytes > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN)) {
dev_err(se_dev->dev, "%s: input buffer size is invalid\n", __func__);
return -EINVAL;
}
if (!is_last) {
if (req->nbytes % TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT != 0) {
dev_err(se_dev->dev, "%s: non-last buffer size is invalid\n", __func__);
return -EINVAL;
}
}
hmac_req_data = (struct tegra_vse_hmac_sha_req_data *) req->priv;
src_buf = dma_alloc_coherent(
se_dev->dev, req->nbytes,
&src_buf_addr, GFP_KERNEL);
if (!src_buf) {
dev_err(se_dev->dev, "Cannot allocate memory for source buffer\n");
return -ENOMEM;
}
if (hmac_req_data->request_type == HMAC_SHA_SIGN) {
hash_buf = dma_alloc_coherent(
se_dev->dev, hmac_ctx->digest_size,
&hash_buf_addr, GFP_KERNEL);
if (!hash_buf) {
dev_err(se_dev->dev, "Cannot allocate memory for hash buffer\n");
err = -ENOMEM;
goto unmap_exit;
}
memset(hash_buf, 0, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH);
cmd = TEGRA_VIRTUAL_SE_CMD_HMAC_SIGN;
} else {
cmd = TEGRA_VIRTUAL_SE_CMD_HMAC_VERIFY;
}
if ((se_dev->chipdata->hmac_verify_hw_support == true)
&& (is_last && (hmac_req_data->request_type == HMAC_SHA_VERIFY))) {
verify_result_buf = dma_alloc_coherent(
se_dev->dev, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH,
&verify_result_addr, GFP_KERNEL);
if (!verify_result_buf) {
dev_err(se_dev->dev, "Cannot allocate memory for verify_result_buf buffer\n");
err = -ENOMEM;
goto unmap_exit;
}
match_code_buf = dma_alloc_coherent(
se_dev->dev, match_code_buf_size,
&match_code_addr, GFP_KERNEL);
if (!match_code_buf) {
dev_err(se_dev->dev, "Cannot allocate memory for match_code_buf buffer\n");
err = -ENOMEM;
goto unmap_exit;
}
}
g_crypto_to_ivc_map[hmac_ctx->node_id].vse_thread_start = true;
msg_len = req->nbytes;
temp_len = msg_len;
req_ctx->total_count += msg_len;
ivc_tx = &ivc_req_msg.tx[0];
ivc_hdr = &ivc_req_msg.ivc_hdr;
ivc_hdr->num_reqs = 1;
ivc_hdr->header_magic[0] = 'N';
ivc_hdr->header_magic[1] = 'V';
ivc_hdr->header_magic[2] = 'D';
ivc_hdr->header_magic[3] = 'A';
ivc_hdr->engine = VIRTUAL_SE_SHA;
ivc_tx->cmd = cmd;
phmac = &(ivc_tx->hmac);
phmac->mode = hmac_ctx->mode;
phmac->msg_total_length[2] = 0;
phmac->msg_total_length[3] = 0;
phmac->msg_left_length[2] = 0;
phmac->msg_left_length[3] = 0;
memcpy(phmac->keyslot, hmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES);
phmac->src_addr = src_buf_addr;
if (hmac_req_data->request_type == HMAC_SHA_SIGN)
phmac->dst_addr = hash_buf_addr;
if (is_last) {
/* Set msg left length equal to input buffer size */
phmac->msg_left_length[0] = msg_len & 0xFFFFFFFF;
phmac->msg_left_length[1] = msg_len >> 32;
/* Set msg total length equal to sum of all input buffer size */
phmac->msg_total_length[0] = req_ctx->total_count & 0xFFFFFFFF;
phmac->msg_total_length[1] = req_ctx->total_count >> 32;
} else {
/* Set msg left length greater than input buffer size */
temp_len += 8;
phmac->msg_left_length[0] = temp_len & 0xFFFFFFFF;
phmac->msg_left_length[1] = temp_len >> 32;
/* Set msg total length greater than msg left length for non-first request */
if (req_ctx->is_first)
req_ctx->is_first = false;
else
temp_len += 8;
phmac->msg_total_length[0] = temp_len & 0xFFFFFFFF;
phmac->msg_total_length[1] = temp_len >> 32;
}
if (se_dev->chipdata->hmac_verify_hw_support == false) {
if (is_last && (hmac_req_data->request_type == HMAC_SHA_VERIFY)) {
blocks_to_process = msg_len / hmac_ctx->blk_size;
/* num of bytes less than block size */
if ((req->nbytes % hmac_ctx->blk_size) ||
blocks_to_process == 0) {
last_block_bytes =
msg_len % hmac_ctx->blk_size;
} else {
/* decrement num of blocks */
blocks_to_process--;
last_block_bytes = hmac_ctx->blk_size;
}
if (blocks_to_process > 0)
sg_copy_to_buffer(req->src, (u32)sg_nents(req->src), src_buf,
blocks_to_process * hmac_ctx->blk_size);
phmac->src_buf_size = blocks_to_process * hmac_ctx->blk_size;
phmac->lastblock_len = last_block_bytes;
sg_pcopy_to_buffer(req->src,
(u32)sg_nents(req->src),
phmac->lastblock,
last_block_bytes,
blocks_to_process * hmac_ctx->blk_size);
memcpy(phmac->expected_hmac_sha, hmac_req_data->expected_digest,
TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH);
} else {
phmac->src_buf_size = msg_len;
phmac->lastblock_len = 0;
sg_copy_to_buffer(req->src, (u32)sg_nents(req->src),
src_buf, msg_len);
}
} else {
phmac->src_buf_size = msg_len;
phmac->lastblock_len = 0;
sg_copy_to_buffer(req->src, (u32)sg_nents(req->src),
src_buf, msg_len);
if (is_last && (hmac_req_data->request_type == HMAC_SHA_VERIFY)) {
phmac->hmac_addr = verify_result_addr;
memcpy(verify_result_buf, hmac_req_data->expected_digest,
TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH);
phmac->dst_addr = match_code_addr;
}
}
priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg.ivc_hdr.tag;
priv_data_ptr->priv_data = (unsigned int *)&priv;
priv.cmd = VIRTUAL_SE_PROCESS;
priv.se_dev = se_dev;
init_completion(&priv.alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, &priv, &ivc_req_msg,
sizeof(struct tegra_virtual_se_ivc_msg_t), hmac_ctx->node_id);
if (err) {
dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err);
goto unmap_exit;
}
if (priv.rx_status != 0) {
err = status_to_errno(priv.rx_status);
dev_err(se_dev->dev, "%s: SE server returned error %u\n",
__func__, priv.rx_status);
goto unmap_exit;
}
if (is_last) {
if (hmac_req_data->request_type == HMAC_SHA_VERIFY) {
if (se_dev->chipdata->hmac_verify_hw_support == false) {
ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_HMAC_GET_VERIFY;
priv.cmd = VIRTUAL_SE_PROCESS;
init_completion(&priv.alg_complete);
err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, &priv,
&ivc_req_msg, sizeof(struct tegra_virtual_se_ivc_msg_t),
hmac_ctx->node_id);
if (err) {
dev_err(se_dev->dev,
"failed to send data over ivc err %d\n", err);
goto unmap_exit;
}
if (priv.rx_status == 0) {
hmac_req_data->result = 0;
} else if (priv.rx_status != TEGRA_VIRTUAL_SE_ERR_MAC_INVALID) {
dev_dbg(se_dev->dev, "%s: tag mismatch", __func__);
hmac_req_data->result = 1;
} else {
err = status_to_errno(priv.rx_status);
dev_err(se_dev->dev, "%s: SE server returned error %u\n",
__func__, priv.rx_status);
}
} else {
if (memcmp(match_code_buf, &matchcode, 4) == 0) {
hmac_req_data->result = 0;
} else if (memcmp(match_code_buf, &mismatch_code, 4) == 0) {
dev_dbg(se_dev->dev, "%s: tag mismatch", __func__);
hmac_req_data->result = 1;
} else {
dev_err(se_dev->dev, "%s: invalid tag match code",
__func__);
err = -EINVAL;
}
}
} else {
memcpy(req->result, hash_buf, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH);
}
}
unmap_exit:
if (src_buf)
dma_free_coherent(se_dev->dev, msg_len, src_buf, src_buf_addr);
if (hash_buf)
dma_free_coherent(se_dev->dev, hmac_ctx->digest_size, hash_buf, hash_buf_addr);
if (verify_result_buf)
dma_free_coherent(se_dev->dev, TEGRA_VIRTUAL_SE_SHA_MAX_HMAC_SHA_LENGTH,
verify_result_buf, verify_result_addr);
if (match_code_buf)
dma_free_coherent(se_dev->dev, match_code_buf_size, match_code_buf,
match_code_addr);
return err;
}
static int tegra_hv_vse_safety_hmac_sha_update(struct ahash_request *req)
{
struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req);
struct tegra_virtual_se_hmac_sha_context *hmac_ctx;
struct tegra_virtual_se_dev *se_dev;
int ret = 0;
if (!req) {
pr_err("%s HMAC SHA request not valid\n", __func__);
return -EINVAL;
}
if (!req_ctx->req_context_initialized) {
pr_err("%s Request ctx not initialized\n", __func__);
return -EINVAL;
}
hmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
if (!hmac_ctx) {
pr_err("%s HMAC SHA req_ctx not valid\n", __func__);
return -EINVAL;
}
if (!hmac_ctx->is_key_slot_allocated) {
pr_err("%s key is not allocated\n", __func__);
return -EINVAL;
}
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].se_engine];
/* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended))
return -ENODEV;
ret = tegra_hv_vse_safety_hmac_sha_sv_op(req, false);
if (ret)
dev_err(se_dev->dev, "tegra_se_hmac_sha_update failed - %d\n", ret);
return ret;
}
static int tegra_hv_vse_safety_hmac_sha_finup(struct ahash_request *req)
{
struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req);
struct tegra_virtual_se_hmac_sha_context *hmac_ctx = NULL;
struct tegra_virtual_se_dev *se_dev;
int ret = 0;
if (!req) {
pr_err("%s HMAC-SHA request not valid\n", __func__);
return -EINVAL;
}
hmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
if (!hmac_ctx) {
pr_err("%s HMAC-SHA req_ctx not valid\n", __func__);
return -EINVAL;
}
if (!hmac_ctx->is_key_slot_allocated) {
pr_err("%s key is not allocated\n", __func__);
return -EINVAL;
}
if (!req_ctx->req_context_initialized) {
pr_err("%s Request ctx not initialized\n", __func__);
return -EINVAL;
}
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].se_engine];
/* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended))
return -ENODEV;
ret = tegra_hv_vse_safety_hmac_sha_sv_op(req, true);
if (ret)
dev_err(se_dev->dev, "tegra_se_hmac_sha_finup failed - %d\n", ret);
hmac_ctx->is_key_slot_allocated = false;
req_ctx->req_context_initialized = false;
return ret;
}
static int tegra_hv_vse_safety_hmac_sha_final(struct ahash_request *req)
{
// Unsupported
return -EINVAL;
}
static int tegra_hv_vse_safety_hmac_sha_digest(struct ahash_request *req)
{
// Unsupported
return -EINVAL;
}
static int tegra_hv_vse_safety_sha_export(struct ahash_request *req, void *out) static int tegra_hv_vse_safety_sha_export(struct ahash_request *req, void *out)
{ {
struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req);
@@ -1868,25 +2395,6 @@ static void tegra_hv_vse_safety_prepare_cmd(struct tegra_virtual_se_dev *se_dev,
} }
} }
static int status_to_errno(u32 err)
{
switch (err) {
case 1: /* VSE_MSG_ERR_INVALID_CMD */
case 3: /* VSE_MSG_ERR_INVALID_ARGS */
case 11: /* VSE_MSG_ERR_MAC_INVALID */
return -EINVAL;
case 4: /* VSE_MSG_ERR_INVALID_KEY */
case 5: /* VSE_MSG_ERR_CTR_OVERFLOW */
case 6: /* VSE_MSG_ERR_INVALID_SUBKEY */
case 7: /* VSE_MSG_ERR_CTR_NONCE_INVALID */
case 8: /* VSE_MSG_ERR_GCM_IV_INVALID */
case 9: /* VSE_MSG_ERR_GCM_NONCE_INVALID */
case 10: /* VSE_MSG_ERR_GMAC_INVALID_PARAMS */
return -EPERM;
}
return err;
}
static int tegra_hv_vse_safety_aes_gen_random_iv( static int tegra_hv_vse_safety_aes_gen_random_iv(
struct tegra_virtual_se_dev *se_dev, struct tegra_virtual_se_dev *se_dev,
struct skcipher_request *req, struct skcipher_request *req,
@@ -4822,7 +5330,31 @@ static struct ahash_alg sha_algs[] = {
.cra_init = tegra_hv_vse_safety_sha_cra_init, .cra_init = tegra_hv_vse_safety_sha_cra_init,
.cra_exit = tegra_hv_vse_safety_sha_cra_exit, .cra_exit = tegra_hv_vse_safety_sha_cra_exit,
} }
}, }, {
.init = tegra_hv_vse_safety_hmac_sha_init,
.update = tegra_hv_vse_safety_hmac_sha_update,
.final = tegra_hv_vse_safety_hmac_sha_final,
.finup = tegra_hv_vse_safety_hmac_sha_finup,
.digest = tegra_hv_vse_safety_hmac_sha_digest,
.export = tegra_hv_vse_safety_sha_export,
.import = tegra_hv_vse_safety_sha_import,
.setkey = tegra_hv_vse_safety_hmac_sha_setkey,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct tegra_virtual_se_req_context),
.halg.base = {
.cra_name = "hmac-sha256-vse",
.cra_driver_name = "tegra-hv-vse-safety-hmac-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize =
sizeof(struct tegra_virtual_se_hmac_sha_context),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = tegra_hv_vse_safety_sha_cra_init,
.cra_exit = tegra_hv_vse_safety_sha_cra_exit,
}
}
}; };
static const struct tegra_vse_soc_info t194_vse_sinfo = { static const struct tegra_vse_soc_info t194_vse_sinfo = {
@@ -4830,6 +5362,7 @@ static const struct tegra_vse_soc_info t194_vse_sinfo = {
.cmac_hw_verify_supported = false, .cmac_hw_verify_supported = false,
.sm_supported = false, .sm_supported = false,
.gcm_hw_iv_supported = false, .gcm_hw_iv_supported = false,
.hmac_verify_hw_support = false,
}; };
static const struct tegra_vse_soc_info t234_vse_sinfo = { static const struct tegra_vse_soc_info t234_vse_sinfo = {
@@ -4837,6 +5370,7 @@ static const struct tegra_vse_soc_info t234_vse_sinfo = {
.cmac_hw_verify_supported = false, .cmac_hw_verify_supported = false,
.sm_supported = false, .sm_supported = false,
.gcm_hw_iv_supported = false, .gcm_hw_iv_supported = false,
.hmac_verify_hw_support = false,
}; };
static const struct tegra_vse_soc_info se_51_vse_sinfo = { static const struct tegra_vse_soc_info se_51_vse_sinfo = {
@@ -4844,6 +5378,7 @@ static const struct tegra_vse_soc_info se_51_vse_sinfo = {
.cmac_hw_verify_supported = true, .cmac_hw_verify_supported = true,
.sm_supported = true, .sm_supported = true,
.gcm_hw_iv_supported = true, .gcm_hw_iv_supported = true,
.hmac_verify_hw_support = true,
}; };
static const struct of_device_id tegra_hv_vse_safety_of_match[] = { static const struct of_device_id tegra_hv_vse_safety_of_match[] = {

View File

@@ -15,6 +15,7 @@ struct tegra_vse_soc_info {
bool cmac_hw_verify_supported; bool cmac_hw_verify_supported;
bool sm_supported; bool sm_supported;
bool gcm_hw_iv_supported; bool gcm_hw_iv_supported;
bool hmac_verify_hw_support;
}; };
/* GCM Operation Supported Flag */ /* GCM Operation Supported Flag */
@@ -143,6 +144,25 @@ struct tegra_virtual_se_sha_context {
uint32_t node_id; uint32_t node_id;
}; };
struct tegra_virtual_se_hmac_sha_context {
/* Security Engine device */
struct tegra_virtual_se_dev *se_dev;
/* SHA operation mode */
u8 mode;
u32 blk_size;
unsigned int digest_size;
/* Total bytes in all the requests */
u64 total_count;
bool is_key_slot_allocated;
/* Keyslot for HMAC-SHA request */
u8 aes_keyslot[KEYSLOT_SIZE_BYTES];
/* key length in bits */
u32 keylen;
/*Crypto dev instance*/
uint32_t node_id;
};
/* Security Engine request context */ /* Security Engine request context */
struct tegra_virtual_se_req_context { struct tegra_virtual_se_req_context {
/* Security Engine device */ /* Security Engine device */

View File

@@ -147,6 +147,21 @@ struct tnvvse_cmac_req_data {
uint8_t result; uint8_t result;
}; };
enum tnvvse_hmac_sha_request_type {
HMAC_SHA_SIGN,
HMAC_SHA_VERIFY
};
/* HMAC SHA request data */
struct tnvvse_hmac_sha_req_data {
/* Enum to specify HMAC-SHA request type i.e. SIGN/VERIFY */
enum tnvvse_hmac_sha_request_type request_type;
/* Expected digest for HMAC_SHA_VERIFY request */
char *expected_digest;
/* Hash comparison result for HMAC_SHA_VERIFY request */
uint8_t result;
};
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) #if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
static void tnvvse_crypto_complete(void *data, int err) static void tnvvse_crypto_complete(void *data, int err)
{ {
@@ -469,6 +484,193 @@ stop_sha:
return ret; return ret;
} }
static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_hmac_sha_sv_ctl *hmac_sha_ctl)
{
struct crypto_sha_state *sha_state = &ctx->sha_state;
struct tegra_virtual_se_hmac_sha_context *hmac_ctx;
struct crypto_ahash *tfm = NULL;
struct ahash_request *req = NULL;
char *src_buffer;
const char *driver_name;
struct tnvvse_crypto_completion sha_complete;
char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,};
struct tnvvse_hmac_sha_req_data priv_data;
struct scatterlist sg;
int ret = -ENOMEM;
uint32_t in_sz;
uint8_t *in_buf = NULL;
char *result = NULL;
if (hmac_sha_ctl->data_length > ivc_database.max_buffer_size[ctx->node_id]) {
pr_err("%s(): Input size is (data = %d) is not supported\n",
__func__, hmac_sha_ctl->data_length);
return -EINVAL;
}
if (sha_state->total_bytes == 0) {
if (hmac_sha_ctl->is_first != 1) {
pr_err("%s: HMAC-SHA first request is not yet received\n",
__func__);
return -EINVAL;
goto exit;
}
}
if (hmac_sha_ctl->is_first == 1) {
tfm = crypto_alloc_ahash("hmac-sha256-vse", 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
pr_err("%s(): Failed to allocate ahash for hmac-sha256-vse: %d\n",
__func__, ret);
ret = -ENOMEM;
goto exit;
}
hmac_ctx = crypto_ahash_ctx(tfm);
hmac_ctx->node_id = ctx->node_id;
driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
if (driver_name == NULL) {
crypto_free_ahash(tfm);
pr_err("%s(): Failed to get_driver_name for hmac-sha256-vse returned NULL",
__func__);
ret = -EINVAL;
goto exit;
}
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
crypto_free_ahash(tfm);
pr_err("%s(): Failed to allocate request for cmac-vse(aes)\n", __func__);
ret = -ENOMEM;
goto exit;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tnvvse_crypto_complete, &sha_complete);
sha_state->tfm = tfm;
sha_state->req = req;
(void)snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES ");
memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, hmac_sha_ctl->key_slot,
KEYSLOT_SIZE_BYTES);
ret = crypto_ahash_setkey(tfm, key_as_keyslot, hmac_sha_ctl->key_length);
if (ret) {
pr_err("%s(): Failed to set keys for hmac: %d\n", __func__, ret);
goto free_tfm;
}
} else {
tfm = sha_state->tfm;
req = sha_state->req;
}
init_completion(&sha_state->sha_complete.restart);
sha_state->sha_complete.req_err = 0;
in_sz = hmac_sha_ctl->data_length;
in_buf = kzalloc(in_sz, GFP_KERNEL);
if (in_buf == NULL) {
ret = -ENOMEM;
goto free_tfm;
}
result = kzalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
if (result == NULL) {
ret = -ENOMEM;
goto free_buf;
}
crypto_ahash_clear_flags(tfm, ~0U);
if (hmac_sha_ctl->hmac_sha_type == TEGRA_NVVSE_HMAC_SHA_SIGN)
priv_data.request_type = HMAC_SHA_SIGN;
else
priv_data.request_type = HMAC_SHA_VERIFY;
priv_data.result = 0;
req->priv = &priv_data;
if (hmac_sha_ctl->is_first == 1) {
ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_init(req));
if (ret) {
pr_err("%s(): Failed to initialize ahash: %d\n", __func__, ret);
goto free_buf;
}
}
src_buffer = hmac_sha_ctl->src_buffer;
/* copy input buffer */
ret = copy_from_user(in_buf, src_buffer, in_sz);
if (ret) {
pr_err("%s(): Failed to copy user input data: %d\n", __func__, ret);
goto free_buf;
}
sg_init_one(&sg, in_buf, in_sz);
ahash_request_set_crypt(req, &sg, result, in_sz);
sha_state->total_bytes += in_sz;
if (hmac_sha_ctl->is_last == 0) {
ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_update(req));
if (ret) {
pr_err("%s(): Failed to ahash_update: %d\n", __func__, ret);
goto free_buf;
}
} else {
if (hmac_sha_ctl->hmac_sha_type == TEGRA_NVVSE_HMAC_SHA_VERIFY) {
ret = copy_from_user((void *)result,
(void __user *)hmac_sha_ctl->digest_buffer,
crypto_ahash_digestsize(tfm));
if (ret) {
pr_err("%s(): Failed to copy_from_user: %d\n", __func__, ret);
goto free_buf;
}
priv_data.expected_digest = result;
}
ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_finup(req));
if (ret) {
pr_err("%s(): Failed to ahash_finup: %d\n", __func__, ret);
goto free_buf;
}
if (hmac_sha_ctl->hmac_sha_type == TEGRA_NVVSE_HMAC_SHA_SIGN) {
ret = copy_to_user((void __user *)hmac_sha_ctl->digest_buffer,
(const void *)result,
crypto_ahash_digestsize(tfm));
if (ret)
pr_err("%s(): Failed to copy_to_user: %d\n", __func__, ret);
} else {
hmac_sha_ctl->result = priv_data.result;
}
sha_state->total_bytes = 0;
ahash_request_free(sha_state->req);
sha_state->req = NULL;
crypto_free_ahash(sha_state->tfm);
sha_state->tfm = NULL;
}
free_buf:
//kfree won't fail even if input is NULL
kfree(result);
kfree(in_buf);
free_tfm:
if (ret != 0) {
if (sha_state->req)
ahash_request_free(sha_state->req);
if (sha_state->tfm)
crypto_free_ahash(sha_state->tfm);
}
exit:
return ret;
}
static int tnvvse_crypto_tsec_get_keyload_status(struct tnvvse_crypto_ctx *ctx, static int tnvvse_crypto_tsec_get_keyload_status(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_tsec_get_keyload_status *tsec_keyload_status) struct tegra_nvvse_tsec_get_keyload_status *tsec_keyload_status)
{ {
@@ -1613,6 +1815,8 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
struct tegra_nvvse_sha_init_ctl *sha_init_ctl; struct tegra_nvvse_sha_init_ctl *sha_init_ctl;
struct tegra_nvvse_sha_update_ctl *sha_update_ctl; struct tegra_nvvse_sha_update_ctl *sha_update_ctl;
struct tegra_nvvse_sha_final_ctl *sha_final_ctl; struct tegra_nvvse_sha_final_ctl *sha_final_ctl;
struct tegra_nvvse_hmac_sha_sv_ctl *hmac_sha_sv_ctl;
struct tegra_nvvse_hmac_sha_sv_ctl __user *arg_hmac_sha_sv_ctl;
struct tegra_nvvse_aes_enc_dec_ctl *aes_enc_dec_ctl; struct tegra_nvvse_aes_enc_dec_ctl *aes_enc_dec_ctl;
struct tegra_nvvse_aes_cmac_sign_verify_ctl *aes_cmac_sign_verify_ctl; struct tegra_nvvse_aes_cmac_sign_verify_ctl *aes_cmac_sign_verify_ctl;
struct tegra_nvvse_aes_drng_ctl *aes_drng_ctl; struct tegra_nvvse_aes_drng_ctl *aes_drng_ctl;
@@ -1692,6 +1896,33 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
kfree(sha_final_ctl); kfree(sha_final_ctl);
break; break;
case NVVSE_IOCTL_CMDID_HMAC_SHA_SIGN_VERIFY:
hmac_sha_sv_ctl = kzalloc(sizeof(*hmac_sha_sv_ctl), GFP_KERNEL);
if (!hmac_sha_sv_ctl)
return -ENOMEM;
arg_hmac_sha_sv_ctl = (void __user *)arg;
ret = copy_from_user(hmac_sha_sv_ctl, arg_hmac_sha_sv_ctl,
sizeof(*hmac_sha_sv_ctl));
if (ret) {
pr_err("%s(): Failed to copy_from_user hmac_sha_sv_ctl:%d\n", __func__,
ret);
goto out;
}
ret = tnvvse_crypto_hmac_sha_sign_verify(ctx, hmac_sha_sv_ctl);
if (hmac_sha_sv_ctl->hmac_sha_type == TEGRA_NVVSE_HMAC_SHA_VERIFY) {
ret = copy_to_user(&arg_hmac_sha_sv_ctl->result, &hmac_sha_sv_ctl->result,
sizeof(uint8_t));
if (ret)
pr_err("%s(): Failed to copy_to_user:%d\n", __func__, ret);
}
kfree(hmac_sha_sv_ctl);
break;
case NVVSE_IOCTL_CMDID_AES_ENCDEC: case NVVSE_IOCTL_CMDID_AES_ENCDEC:
aes_enc_dec_ctl = kzalloc(sizeof(*aes_enc_dec_ctl), GFP_KERNEL); aes_enc_dec_ctl = kzalloc(sizeof(*aes_enc_dec_ctl), GFP_KERNEL);
if (!aes_enc_dec_ctl) { if (!aes_enc_dec_ctl) {

View File

@@ -29,6 +29,7 @@
#define TEGRA_NVVSE_CMDID_GET_IVC_DB 12 #define TEGRA_NVVSE_CMDID_GET_IVC_DB 12
#define TEGRA_NVVSE_CMDID_TSEC_SIGN_VERIFY 13 #define TEGRA_NVVSE_CMDID_TSEC_SIGN_VERIFY 13
#define TEGRA_NVVSE_CMDID_TSEC_GET_KEYLOAD_STATUS 14 #define TEGRA_NVVSE_CMDID_TSEC_GET_KEYLOAD_STATUS 14
#define TEGRA_NVVSE_CMDID_HMAC_SHA_SIGN_VERIFY 15
/** Defines the length of the AES-CBC Initial Vector */ /** Defines the length of the AES-CBC Initial Vector */
#define TEGRA_NVVSE_AES_IV_LEN 16U #define TEGRA_NVVSE_AES_IV_LEN 16U
@@ -42,6 +43,8 @@
#define TEGRA_NVVSE_AES_CMAC_LEN 16U #define TEGRA_NVVSE_AES_CMAC_LEN 16U
/** Defines the counter offset byte in the AES Initial counter*/ /** Defines the counter offset byte in the AES Initial counter*/
#define TEGRA_COUNTER_OFFSET 12U #define TEGRA_COUNTER_OFFSET 12U
/** Defines the length of the HMAC SHA Hash */
#define TEGRA_NVVSE_HMAC_SHA256_LEN 32U
/** /**
* @brief Defines SHA Types. * @brief Defines SHA Types.
@@ -69,6 +72,16 @@ enum tegra_nvvse_sha_type {
TEGRA_NVVSE_SHA_TYPE_MAX, TEGRA_NVVSE_SHA_TYPE_MAX,
}; };
/**
* \brief Defines HMAC SHA request type.
*/
enum tegra_nvvse_hmac_sha_sv_type {
/** Defines AES GMAC Sign */
TEGRA_NVVSE_HMAC_SHA_SIGN = 0u,
/** Defines AES GMAC Verify */
TEGRA_NVVSE_HMAC_SHA_VERIFY,
};
/** /**
* \brief Defines AES modes. * \brief Defines AES modes.
*/ */
@@ -142,6 +155,55 @@ struct tegra_nvvse_sha_final_ctl {
#define NVVSE_IOCTL_CMDID_FINAL_SHA _IOWR(TEGRA_NVVSE_IOC_MAGIC, TEGRA_NVVSE_CMDID_FINAL_SHA, \ #define NVVSE_IOCTL_CMDID_FINAL_SHA _IOWR(TEGRA_NVVSE_IOC_MAGIC, TEGRA_NVVSE_CMDID_FINAL_SHA, \
struct tegra_nvvse_sha_final_ctl) struct tegra_nvvse_sha_final_ctl)
struct tegra_nvvse_hmac_sha_sv_ctl {
/** [in] Holds the enum which indicates SHA mode */
enum tegra_nvvse_sha_type hmac_sha_mode;
/** [in] Holds the enum which indicates HMAC SHA Sign or Verify */
enum tegra_nvvse_hmac_sha_sv_type hmac_sha_type;
/** [in] Holds a Boolean that specifies whether this is first
* chunk of message for HMAC-SHA Sign/Verify.
* '0' value indicates it is not First call and
* Non zero value indicates it is the first call.
*/
uint8_t is_first;
/** [in] Holds a Boolean that specifies whether this is last
* chunk of message for HMAC-SHA Sign/Verify.
* '0' value indicates it is not Last call and
* Non zero value indicates it is the Last call.
*/
uint8_t is_last;
/** [in] Holds a keyslot handle which is used for HMAC-SHA operation */
uint8_t key_slot[KEYSLOT_SIZE_BYTES];
/** [in] Holds the Key length
* Supported keylength is only 16 bytes and 32 bytes
*/
uint8_t key_length;
/** [in] Holds a pointer to the input source buffer for which
* HMAC-SHA is to be calculated/verified.
*/
uint8_t *src_buffer;
/** [in] Holds the Length of the input source buffer.
* data_length shall not be "0" supported for single part sign and verify
* data_length shall be multiple of hashblock size if it is not the last chunk
* i.e when is_last is "0"
*/
uint32_t data_length;
/** Holds the pointer of the digest buffer */
uint8_t *digest_buffer;
/** Holds the digest buffer length */
uint32_t digest_length;
/** [out] Holds HMAC-SHA verification result, which the driver updates.
* Valid only when hmac_sha_type is TEGRA_NVVSE_HMAC_SHA_VERIFY.
* Result values are:
* - '0' indicates HMAC-SHA verification success.
* - Non-zero value indicates HMAC-SHA verification failure.
*/
uint8_t result;
};
#define NVVSE_IOCTL_CMDID_HMAC_SHA_SIGN_VERIFY _IOWR(TEGRA_NVVSE_IOC_MAGIC, \
TEGRA_NVVSE_CMDID_HMAC_SHA_SIGN_VERIFY, \
struct tegra_nvvse_hmac_sha_sv_ctl)
/** /**
* \brief Holds AES encrypt/decrypt parameters for IO Control. * \brief Holds AES encrypt/decrypt parameters for IO Control.
*/ */