vse/cryptodev: add memory map/unmap interface

- add support for zero copy SHA/GMAC operations
- add support to read zero copy nodes in DT
- support memory buf map/unmap ioctl interfaces
- unmap all memory buffers when FD corresponding
  to device node is closed.
- support only one open call at a time for zero
  copy nodes.

Bug 4999798

Change-Id: If110108a73b24ca9f523a8c67a47c02b922c3fd8
Signed-off-by: Nagaraj P N <nagarajp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3292084
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Leo Chiu <lchiu@nvidia.com>
Reviewed-by: Sandeep Trasi <strasi@nvidia.com>
This commit is contained in:
Nagaraj P N
2025-02-07 19:08:01 +05:30
committed by Jon Hunter
parent 268a87ecfa
commit 965bd044c6
4 changed files with 715 additions and 147 deletions

View File

@@ -42,6 +42,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/host1x.h> #include <linux/host1x.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/dma-buf.h>
#include "tegra-hv-vse.h" #include "tegra-hv-vse.h"
@@ -322,7 +323,6 @@ static struct tegra_vse_node_dma g_node_dma[MAX_NUMBER_MISC_DEVICES];
static bool gcm_supports_dma; static bool gcm_supports_dma;
static struct device *gpcdma_dev; static struct device *gpcdma_dev;
static bool s_set_sha_algs;
/* Security Engine Linked List */ /* Security Engine Linked List */
struct tegra_virtual_se_ll { struct tegra_virtual_se_ll {
@@ -661,8 +661,6 @@ enum tsec_buf_idx {
TSEC_FW_STATUS_BUF_IDX TSEC_FW_STATUS_BUF_IDX
}; };
static struct tegra_virtual_se_dev *g_virtual_se_dev[VIRTUAL_MAX_SE_ENGINE_NUM];
struct crypto_dev_to_ivc_map *tegra_hv_vse_get_db(void) struct crypto_dev_to_ivc_map *tegra_hv_vse_get_db(void)
{ {
return &g_crypto_to_ivc_map[0]; return &g_crypto_to_ivc_map[0];
@@ -1064,7 +1062,7 @@ static int tegra_hv_vse_safety_sha_init(struct ahash_request *req)
sha_ctx = crypto_ahash_ctx(tfm); sha_ctx = crypto_ahash_ctx(tfm);
engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].engine_id; engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].engine_id;
se_dev = g_virtual_se_dev[engine_id]; se_dev = g_crypto_to_ivc_map[sha_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -1144,9 +1142,11 @@ static int tegra_hv_vse_safety_sha_op(struct ahash_request *req, bool is_last)
uint32_t engine_id; uint32_t engine_id;
int err = 0; int err = 0;
const struct tegra_vse_dma_buf *plaintext, *hash_result; const struct tegra_vse_dma_buf *plaintext, *hash_result;
bool is_zero_copy;
engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].engine_id; engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].engine_id;
se_dev = g_virtual_se_dev[engine_id]; se_dev = g_crypto_to_ivc_map[sha_ctx->node_id].se_dev;
is_zero_copy = g_crypto_to_ivc_map[sha_ctx->node_id].is_zero_copy_node;
if (sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || if (sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 ||
sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256) { sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256) {
@@ -1168,11 +1168,27 @@ static int tegra_hv_vse_safety_sha_op(struct ahash_request *req, bool is_last)
g_crypto_to_ivc_map[sha_ctx->node_id].vse_thread_start = true; g_crypto_to_ivc_map[sha_ctx->node_id].vse_thread_start = true;
plaintext = tegra_hv_vse_get_dma_buf(sha_ctx->node_id, SHA_SRC_BUF_IDX, msg_len = sha_ctx->user_src_buf_size;
sha_ctx->user_src_buf_size); if (!is_zero_copy) {
if (!plaintext) { plaintext = tegra_hv_vse_get_dma_buf(sha_ctx->node_id, SHA_SRC_BUF_IDX,
dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__); sha_ctx->user_src_buf_size);
return -ENOMEM; if (!plaintext) {
dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__);
return -ENOMEM;
}
if (msg_len > 0) {
err = copy_from_user(plaintext->buf_ptr, sha_ctx->user_src_buf, msg_len);
if (err) {
pr_err("%s(): Failed to copy plaintext: %d\n", __func__, err);
goto exit;
}
}
} else {
if (g_node_dma[sha_ctx->node_id].mapped_membuf_count == 0U) {
dev_err(se_dev->dev, "%s no mapped membuf found\n", __func__);
return -ENOMEM;
}
} }
hash_result = tegra_hv_vse_get_dma_buf(sha_ctx->node_id, SHA_HASH_BUF_IDX, hash_result = tegra_hv_vse_get_dma_buf(sha_ctx->node_id, SHA_HASH_BUF_IDX,
@@ -1205,15 +1221,6 @@ static int tegra_hv_vse_safety_sha_op(struct ahash_request *req, bool is_last)
memcpy(psha->op_hash.hash, sha_ctx->intermediate_digest, memcpy(psha->op_hash.hash, sha_ctx->intermediate_digest,
sha_ctx->intermediate_digest_size); sha_ctx->intermediate_digest_size);
msg_len = sha_ctx->user_src_buf_size;
if (msg_len > 0) {
err = copy_from_user(plaintext->buf_ptr, sha_ctx->user_src_buf, msg_len);
if (err) {
pr_err("%s(): Failed to copy plaintext: %d\n", __func__, err);
goto exit;
}
}
if (is_last == true && if (is_last == true &&
(sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || (sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 ||
sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256)) { sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256)) {
@@ -1246,7 +1253,11 @@ static int tegra_hv_vse_safety_sha_op(struct ahash_request *req, bool is_last)
psha->op_hash.msg_total_length[1] = temp_len >> 32; psha->op_hash.msg_total_length[1] = temp_len >> 32;
} }
psha->op_hash.src_addr = plaintext->buf_iova; if (!is_zero_copy)
psha->op_hash.src_addr = plaintext->buf_iova;
else
psha->op_hash.src_addr = sha_ctx->user_src_iova;
psha->op_hash.src_buf_size = msg_len; psha->op_hash.src_buf_size = msg_len;
priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg.ivc_hdr.tag; priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg.ivc_hdr.tag;
@@ -1310,7 +1321,7 @@ static int tegra_hv_vse_safety_sha_update(struct ahash_request *req)
} }
engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].engine_id; engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].engine_id;
se_dev = g_virtual_se_dev[engine_id]; se_dev = g_crypto_to_ivc_map[sha_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -1348,7 +1359,7 @@ static int tegra_hv_vse_safety_sha_finup(struct ahash_request *req)
} }
engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].engine_id; engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].engine_id;
se_dev = g_virtual_se_dev[engine_id]; se_dev = g_crypto_to_ivc_map[sha_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -1388,7 +1399,7 @@ static int tegra_hv_vse_safety_hmac_sha_setkey(struct crypto_ahash *tfm, const u
if (!ctx) if (!ctx)
return -EINVAL; return -EINVAL;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if (keylen != 32) { if (keylen != 32) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
@@ -1416,34 +1427,35 @@ static int tegra_hv_vse_safety_hmac_sha_init(struct ahash_request *req)
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
struct tegra_virtual_se_req_context *req_ctx; struct tegra_virtual_se_req_context *req_ctx;
struct tegra_virtual_se_hmac_sha_context *hmac_ctx; struct tegra_virtual_se_hmac_sha_context *hmac_ctx;
struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[VIRTUAL_SE_SHA]; struct tegra_virtual_se_dev *se_dev;
if (!req) { if (!req) {
dev_err(se_dev->dev, "HMAC SHA request not valid\n"); pr_err("%s HMAC SHA request not valid\n", __func__);
return -EINVAL; return -EINVAL;
} }
/* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended))
return -ENODEV;
req_ctx = ahash_request_ctx(req); req_ctx = ahash_request_ctx(req);
if (!req_ctx) { if (!req_ctx) {
dev_err(se_dev->dev, "HMAC SHA req_ctx not valid\n"); pr_err("%s HMAC SHA req_ctx not valid\n", __func__);
return -EINVAL; return -EINVAL;
} }
tfm = crypto_ahash_reqtfm(req); tfm = crypto_ahash_reqtfm(req);
if (!tfm) { if (!tfm) {
dev_err(se_dev->dev, "HMAC SHA transform not valid\n"); pr_err("%s HMAC SHA transform not valid\n", __func__);
return -EINVAL; return -EINVAL;
} }
hmac_ctx = crypto_ahash_ctx(tfm); hmac_ctx = crypto_ahash_ctx(tfm);
hmac_ctx->digest_size = crypto_ahash_digestsize(tfm); hmac_ctx->digest_size = crypto_ahash_digestsize(tfm);
se_dev = g_crypto_to_ivc_map[hmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended))
return -ENODEV;
if (!hmac_ctx->is_key_slot_allocated) { if (!hmac_ctx->is_key_slot_allocated) {
pr_err("%s key is not allocated\n", __func__); dev_err(se_dev->dev, "%s key is not allocated\n", __func__);
return -EINVAL; return -EINVAL;
} }
@@ -1465,7 +1477,7 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, bool is
struct tegra_virtual_se_hmac_sha_context *hmac_ctx = struct tegra_virtual_se_hmac_sha_context *hmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req)); crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].engine_id]; g_crypto_to_ivc_map[hmac_ctx->node_id].se_dev;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_virtual_se_ivc_msg_t ivc_req_msg = {0}; struct tegra_virtual_se_ivc_msg_t ivc_req_msg = {0};
@@ -1764,7 +1776,7 @@ static int tegra_hv_vse_safety_hmac_sha_update(struct ahash_request *req)
return -EINVAL; return -EINVAL;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[hmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -1805,7 +1817,7 @@ static int tegra_hv_vse_safety_hmac_sha_finup(struct ahash_request *req)
return -EINVAL; return -EINVAL;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[hmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[hmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -2091,7 +2103,7 @@ static int tegra_hv_vse_safety_aes_cbc_encrypt(struct skcipher_request *req)
req_ctx->encrypt = true; req_ctx->encrypt = true;
req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].engine_id; req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].engine_id;
req_ctx->se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; req_ctx->se_dev = g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) { if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) {
pr_err("%s: SM4 CBC is not supported for selected platform\n", __func__); pr_err("%s: SM4 CBC is not supported for selected platform\n", __func__);
return -EINVAL; return -EINVAL;
@@ -2124,7 +2136,7 @@ static int tegra_hv_vse_safety_aes_cbc_decrypt(struct skcipher_request *req)
req_ctx->encrypt = false; req_ctx->encrypt = false;
req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].engine_id; req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].engine_id;
req_ctx->se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; req_ctx->se_dev = g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
if ((req_ctx->se_dev->chipdata->sm_supported == false) && if ((req_ctx->se_dev->chipdata->sm_supported == false) &&
(aes_ctx->b_is_sm4 == 1U)) { (aes_ctx->b_is_sm4 == 1U)) {
@@ -2160,7 +2172,7 @@ static int tegra_hv_vse_safety_aes_ctr_encrypt(struct skcipher_request *req)
req_ctx->encrypt = true; req_ctx->encrypt = true;
req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].engine_id; req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].engine_id;
req_ctx->se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; req_ctx->se_dev = g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) { if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) {
pr_err("%s: SM4 CTR is not supported for selected platform\n", __func__); pr_err("%s: SM4 CTR is not supported for selected platform\n", __func__);
return -EINVAL; return -EINVAL;
@@ -2193,7 +2205,7 @@ static int tegra_hv_vse_safety_aes_ctr_decrypt(struct skcipher_request *req)
req_ctx->encrypt = false; req_ctx->encrypt = false;
req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].engine_id; req_ctx->engine_id = g_crypto_to_ivc_map[aes_ctx->node_id].engine_id;
req_ctx->se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; req_ctx->se_dev = g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) { if ((req_ctx->se_dev->chipdata->sm_supported == false) && (aes_ctx->b_is_sm4 == 1U)) {
pr_err("%s: SM4 CTR is not supported for selected platform\n", __func__); pr_err("%s: SM4 CTR is not supported for selected platform\n", __func__);
return -EINVAL; return -EINVAL;
@@ -2215,7 +2227,7 @@ static int tegra_hv_vse_safety_tsec_sv_op(struct ahash_request *req)
struct tegra_virtual_se_aes_cmac_context *cmac_ctx = struct tegra_virtual_se_aes_cmac_context *cmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req)); crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg;
@@ -2380,7 +2392,7 @@ static int tegra_hv_vse_safety_cmac_sv_op_hw_verify_supported(
struct tegra_virtual_se_aes_cmac_context *cmac_ctx = struct tegra_virtual_se_aes_cmac_context *cmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req)); crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg;
@@ -2524,7 +2536,7 @@ static int tegra_hv_vse_safety_cmac_sv_op(struct ahash_request *req, bool is_las
struct tegra_virtual_se_aes_cmac_context *cmac_ctx = struct tegra_virtual_se_aes_cmac_context *cmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req)); crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg;
@@ -2732,7 +2744,7 @@ static int tegra_hv_vse_safety_cmac_init(struct ahash_request *req)
return -EINVAL; return -EINVAL;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -2781,7 +2793,7 @@ static int tegra_hv_vse_safety_cmac_update(struct ahash_request *req)
return -EINVAL; return -EINVAL;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -2809,7 +2821,7 @@ static int tegra_hv_vse_safety_cmac_final(struct ahash_request *req)
struct tegra_virtual_se_aes_cmac_context *cmac_ctx = struct tegra_virtual_se_aes_cmac_context *cmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req)); crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -2841,7 +2853,7 @@ static int tegra_hv_vse_safety_cmac_finup(struct ahash_request *req)
return -EINVAL; return -EINVAL;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -2882,7 +2894,7 @@ static int tegra_hv_tsec_safety_cmac_finup(struct ahash_request *req)
return -EINVAL; return -EINVAL;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -2902,7 +2914,7 @@ static int tegra_hv_vse_safety_cmac_digest(struct ahash_request *req)
struct tegra_virtual_se_aes_cmac_context *cmac_ctx = struct tegra_virtual_se_aes_cmac_context *cmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req)); crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[cmac_ctx->node_id].engine_id]; g_crypto_to_ivc_map[cmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) if (atomic_read(&se_dev->se_suspended))
@@ -2925,7 +2937,7 @@ int tegra_hv_vse_safety_tsec_get_keyload_status(uint32_t node_id, uint32_t *err_
if (node_id >= MAX_NUMBER_MISC_DEVICES) if (node_id >= MAX_NUMBER_MISC_DEVICES)
return -ENODEV; return -ENODEV;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[node_id].engine_id]; se_dev = g_crypto_to_ivc_map[node_id].se_dev;
pivck = g_crypto_to_ivc_map[node_id].ivck; pivck = g_crypto_to_ivc_map[node_id].ivck;
ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg),
@@ -2988,6 +3000,179 @@ free_exit:
} }
EXPORT_SYMBOL(tegra_hv_vse_safety_tsec_get_keyload_status); EXPORT_SYMBOL(tegra_hv_vse_safety_tsec_get_keyload_status);
static int tegra_hv_vse_safety_validate_membuf_common(struct tegra_virtual_se_membuf_context *ctx)
{
struct tegra_virtual_se_dev *se_dev = NULL;
int err = 0;
if (!ctx) {
pr_err("%s ctx is null\n", __func__);
err = -EINVAL;
goto exit;
}
if (ctx->node_id >= MAX_NUMBER_MISC_DEVICES) {
pr_err("%s node_id is invalid\n", __func__);
err = -ENODEV;
goto exit;
}
se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if (ctx->fd < 0) {
dev_err(se_dev->dev, "%s fd is invalid\n", __func__);
err = -EINVAL;
goto exit;
}
exit:
return err;
}
int tegra_hv_vse_safety_map_membuf(struct tegra_virtual_se_membuf_context *ctx)
{
struct tegra_virtual_se_dev *se_dev = NULL;
struct tegra_vse_membuf_ctx *membuf_ctx = NULL;
struct dma_buf *dmabuf;
struct dma_buf_attachment *attach;
struct sg_table *sgt;
dma_addr_t dma_addr;
dma_addr_t phys_addr;
uint32_t i;
int err = 0;
err = tegra_hv_vse_safety_validate_membuf_common(ctx);
if (err != 0)
return err;
se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if (g_node_dma[ctx->node_id].mapped_membuf_count >= MAX_ZERO_COPY_BUFS) {
dev_err(se_dev->dev, "%s no free membuf_ctx\n", __func__);
return -ENOMEM;
}
for (i = 0U; i < MAX_ZERO_COPY_BUFS; i++) {
membuf_ctx = &g_node_dma[ctx->node_id].membuf_ctx[i];
if (membuf_ctx->fd == -1)
break;
}
if (i == MAX_ZERO_COPY_BUFS) {
dev_err(se_dev->dev, "%s no free membuf_ctx\n", __func__);
return -ENOMEM;
}
dmabuf = dma_buf_get(ctx->fd);
if (IS_ERR_OR_NULL(dmabuf)) {
dev_err(se_dev->dev, "%s dma_buf_get failed\n", __func__);
return -EFAULT;
}
membuf_ctx->dmabuf = dmabuf;
attach = dma_buf_attach(dmabuf, se_dev->dev);
if (IS_ERR_OR_NULL(attach)) {
err = PTR_ERR(dmabuf);
dev_err(se_dev->dev, "%s dma_buf_attach failed\n", __func__);
goto buf_attach_err;
}
membuf_ctx->attach = attach;
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(sgt)) {
err = PTR_ERR(sgt);
dev_err(se_dev->dev, "%s dma_buf_map_attachment failed\n", __func__);
goto buf_map_err;
}
phys_addr = sg_phys(sgt->sgl);
dma_addr = sg_dma_address(sgt->sgl);
if (!dma_addr)
dma_addr = phys_addr;
ctx->iova = dma_addr;
membuf_ctx->fd = ctx->fd;
g_node_dma[ctx->node_id].mapped_membuf_count += 1U;
return err;
buf_map_err:
dma_buf_detach(dmabuf, attach);
buf_attach_err:
dma_buf_put(dmabuf);
membuf_ctx->fd = -1;
return err;
}
EXPORT_SYMBOL(tegra_hv_vse_safety_map_membuf);
void tegra_hv_vse_safety_unmap_all_membufs(uint32_t node_id)
{
struct tegra_vse_membuf_ctx *membuf_ctx = NULL;
uint32_t i;
if (node_id >= MAX_NUMBER_MISC_DEVICES) {
pr_err("%s node_id is invalid\n", __func__);
return;
}
for (i = 0U; i < MAX_ZERO_COPY_BUFS; i++) {
membuf_ctx = &g_node_dma[node_id].membuf_ctx[i];
if (membuf_ctx->fd == -1)
continue;
dma_buf_detach(membuf_ctx->dmabuf, membuf_ctx->attach);
dma_buf_put(membuf_ctx->dmabuf);
membuf_ctx->fd = -1;
}
g_node_dma[node_id].mapped_membuf_count = 0U;
}
EXPORT_SYMBOL(tegra_hv_vse_safety_unmap_all_membufs);
int tegra_hv_vse_safety_unmap_membuf(struct tegra_virtual_se_membuf_context *ctx)
{
struct tegra_virtual_se_dev *se_dev;
struct tegra_vse_membuf_ctx *membuf_ctx = NULL;
uint32_t i;
int err = 0;
err = tegra_hv_vse_safety_validate_membuf_common(ctx);
if (err != 0)
return err;
se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if (g_node_dma[ctx->node_id].mapped_membuf_count == 0U) {
dev_err(se_dev->dev, "%s no mapped membuf to free\n", __func__);
return -EINVAL;
}
for (i = 0U; i < MAX_ZERO_COPY_BUFS; i++) {
membuf_ctx = &g_node_dma[ctx->node_id].membuf_ctx[i];
if (membuf_ctx->fd == ctx->fd)
break;
}
if (i == MAX_ZERO_COPY_BUFS) {
dev_err(se_dev->dev, "%s fd not found\n", __func__);
return -EINVAL;
}
dma_buf_detach(membuf_ctx->dmabuf, membuf_ctx->attach);
dma_buf_put(membuf_ctx->dmabuf);
membuf_ctx->fd = -1;
g_node_dma[ctx->node_id].mapped_membuf_count -= 1U;
return 0;
}
EXPORT_SYMBOL(tegra_hv_vse_safety_unmap_membuf);
static int tegra_hv_vse_safety_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, static int tegra_hv_vse_safety_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
@@ -3001,7 +3186,7 @@ static int tegra_hv_vse_safety_cmac_setkey(struct crypto_ahash *tfm, const u8 *k
if (!ctx) if (!ctx)
return -EINVAL; return -EINVAL;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if ((keylen != 16) && (keylen != 32)) { if ((keylen != 16) && (keylen != 32)) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
@@ -3049,7 +3234,7 @@ static int tegra_hv_vse_safety_aes_setkey(struct crypto_skcipher *tfm,
if (!ctx) if (!ctx)
return -EINVAL; return -EINVAL;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if ((keylen != 16) && (keylen != 32)) { if ((keylen != 16) && (keylen != 32)) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
@@ -3086,7 +3271,7 @@ static int tegra_hv_vse_safety_get_random(struct tegra_virtual_se_rng_context *r
u8 *rdata, unsigned int dlen, enum rng_call is_hw_req) u8 *rdata, unsigned int dlen, enum rng_call is_hw_req)
{ {
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[rng_ctx->node_id].engine_id]; g_crypto_to_ivc_map[rng_ctx->node_id].se_dev;
u8 *rdata_addr; u8 *rdata_addr;
int err = 0, j, num_blocks, data_len = 0; int err = 0, j, num_blocks, data_len = 0;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
@@ -3202,7 +3387,7 @@ static int tegra_vse_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
if (!ctx) if (!ctx)
return -EINVAL; return -EINVAL;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if ((keylen != 16) && (keylen != 32)) { if ((keylen != 16) && (keylen != 32)) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
@@ -3256,7 +3441,7 @@ static int tegra_vse_aes_gcm_check_params(struct aead_request *req, bool encrypt
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm); struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm);
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
if (aes_ctx->user_tag_buf_size != TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE) { if (aes_ctx->user_tag_buf_size != TEGRA_VIRTUAL_SE_AES_GCM_TAG_SIZE) {
dev_err(se_dev->dev, dev_err(se_dev->dev,
@@ -3298,7 +3483,7 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req, bool encrypt)
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm); struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm);
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
@@ -3533,7 +3718,7 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req, bool e
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm); struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm);
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL; struct tegra_virtual_se_ivc_msg_t *ivc_req_msg = NULL;
struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; struct tegra_virtual_se_ivc_hdr_t *ivc_hdr;
struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx;
@@ -3751,7 +3936,7 @@ static int tegra_vse_aes_gcm_encrypt(struct aead_request *req)
tfm = crypto_aead_reqtfm(req); tfm = crypto_aead_reqtfm(req);
aes_ctx = crypto_aead_ctx(tfm); aes_ctx = crypto_aead_ctx(tfm);
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
if (se_dev->chipdata->gcm_hw_iv_supported) if (se_dev->chipdata->gcm_hw_iv_supported)
err = tegra_vse_aes_gcm_enc_dec_hw_support(req, true); err = tegra_vse_aes_gcm_enc_dec_hw_support(req, true);
@@ -3777,7 +3962,7 @@ static int tegra_vse_aes_gcm_decrypt(struct aead_request *req)
tfm = crypto_aead_reqtfm(req); tfm = crypto_aead_reqtfm(req);
aes_ctx = crypto_aead_ctx(tfm); aes_ctx = crypto_aead_ctx(tfm);
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[aes_ctx->node_id].se_dev;
if (g_crypto_to_ivc_map[aes_ctx->node_id].gcm_dec_supported == GCM_DEC_OP_SUPPORTED) { if (g_crypto_to_ivc_map[aes_ctx->node_id].gcm_dec_supported == GCM_DEC_OP_SUPPORTED) {
if (se_dev->chipdata->gcm_hw_iv_supported) if (se_dev->chipdata->gcm_hw_iv_supported)
@@ -3822,7 +4007,7 @@ static int tegra_hv_vse_aes_gmac_setkey(struct crypto_ahash *tfm, const u8 *key,
goto exit; goto exit;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[ctx->node_id].se_dev;
if ((keylen != 16) && (keylen != 32)) { if ((keylen != 16) && (keylen != 32)) {
dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen); dev_err(se_dev->dev, "%s: Unsupported key length: %d", __func__, keylen);
@@ -3887,7 +4072,7 @@ static int tegra_hv_vse_aes_gmac_sv_init(struct ahash_request *req)
goto exit; goto exit;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) { if (atomic_read(&se_dev->se_suspended)) {
dev_err(se_dev->dev, "%s: engine is in suspended state", __func__); dev_err(se_dev->dev, "%s: engine is in suspended state", __func__);
@@ -3998,7 +4183,7 @@ static int tegra_vse_aes_gmac_sv_check_params(struct ahash_request *req)
struct tegra_virtual_se_aes_gmac_context *gmac_ctx = struct tegra_virtual_se_aes_gmac_context *gmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req)); crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].engine_id]; g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev;
int err = 0; int err = 0;
/* Validate aad buf len */ /* Validate aad buf len */
@@ -4030,7 +4215,7 @@ static int tegra_hv_vse_aes_gmac_sv_op(struct ahash_request *req, bool is_last)
goto exit; goto exit;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev;
pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck; pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck;
err = tegra_vse_aes_gmac_sv_check_params(req); err = tegra_vse_aes_gmac_sv_check_params(req);
@@ -4202,6 +4387,9 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, boo
u32 match_code = SE_HW_VALUE_MATCH_CODE; u32 match_code = SE_HW_VALUE_MATCH_CODE;
u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE; u32 mismatch_code = SE_HW_VALUE_MISMATCH_CODE;
const struct tegra_vse_dma_buf *aad, *tag, *comp; const struct tegra_vse_dma_buf *aad, *tag, *comp;
dma_addr_t aad_addr = 0UL;
dma_addr_t tag_addr = 0UL;
bool is_zero_copy;
gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); gmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
if (!gmac_ctx) { if (!gmac_ctx) {
@@ -4210,33 +4398,56 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, boo
goto exit; goto exit;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev;
pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck; pivck = g_crypto_to_ivc_map[gmac_ctx->node_id].ivck;
is_zero_copy = g_crypto_to_ivc_map[gmac_ctx->node_id].is_zero_copy_node;
err = tegra_vse_aes_gmac_sv_check_params(req); err = tegra_vse_aes_gmac_sv_check_params(req);
if (err != 0) if (err != 0)
goto exit; goto exit;
aad = tegra_hv_vse_get_dma_buf(gmac_ctx->node_id, AES_AAD_BUF_IDX, if (!is_zero_copy) {
gmac_ctx->user_aad_buf_size); aad = tegra_hv_vse_get_dma_buf(gmac_ctx->node_id, AES_AAD_BUF_IDX,
if (!aad) {
pr_err("%s aad buf is NULL\n", __func__);
return -ENOMEM;
}
if (gmac_ctx->user_aad_buf_size > 0) {
err = copy_from_user(aad->buf_ptr, gmac_ctx->user_aad_buf,
gmac_ctx->user_aad_buf_size); gmac_ctx->user_aad_buf_size);
if (err) { if (!aad) {
pr_err("%s(): Failed to copy aad_buf: %d\n", __func__, err); pr_err("%s aad buf is NULL\n", __func__);
goto exit; return -ENOMEM;
} }
if (gmac_ctx->user_aad_buf_size > 0) {
err = copy_from_user(aad->buf_ptr, gmac_ctx->user_aad_buf,
gmac_ctx->user_aad_buf_size);
if (err) {
pr_err("%s(): Failed to copy aad_buf: %d\n", __func__, err);
goto exit;
}
}
aad_addr = aad->buf_iova;
if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) {
tag = tegra_hv_vse_get_dma_buf(gmac_ctx->node_id,
AES_TAG_BUF_IDX, gmac_ctx->authsize);
if (!tag) {
pr_err("%s tag buf is NULL\n", __func__);
return -ENOMEM;
}
tag_addr = tag->buf_iova;
}
} else {
if (g_node_dma[gmac_ctx->node_id].mapped_membuf_count == 0U) {
dev_err(se_dev->dev, "%s no mapped membuf found\n", __func__);
return -ENOMEM;
}
aad_addr = gmac_ctx->user_aad_iova;
if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN)
tag_addr = gmac_ctx->user_tag_iova;
} }
if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) { if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_VERIFY) {
tag = tegra_hv_vse_get_dma_buf(gmac_ctx->node_id, comp = tegra_hv_vse_get_dma_buf(gmac_ctx->node_id, AES_COMP_BUF_IDX,
AES_TAG_BUF_IDX, gmac_ctx->authsize); RESULT_COMPARE_BUF_SIZE);
if (!tag) { if (!comp) {
pr_err("%s tag buf is NULL\n", __func__); pr_err("%s mac comp buf is NULL\n", __func__);
return -ENOMEM; return -ENOMEM;
} }
} }
@@ -4254,13 +4465,6 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, boo
goto free_exit; goto free_exit;
} }
comp = tegra_hv_vse_get_dma_buf(gmac_ctx->node_id, AES_COMP_BUF_IDX,
RESULT_COMPARE_BUF_SIZE);
if (!comp) {
pr_err("%s mac comp buf is NULL\n", __func__);
return -ENOMEM;
}
ivc_tx = &ivc_req_msg->tx[0]; ivc_tx = &ivc_req_msg->tx[0];
ivc_hdr = &ivc_req_msg->ivc_hdr; ivc_hdr = &ivc_req_msg->ivc_hdr;
ivc_hdr->num_reqs = 1; ivc_hdr->num_reqs = 1;
@@ -4283,11 +4487,11 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, boo
memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES); memcpy(ivc_tx->aes.op_gcm.keyslot, gmac_ctx->aes_keyslot, KEYSLOT_SIZE_BYTES);
ivc_tx->aes.op_gcm.key_length = gmac_ctx->keylen; ivc_tx->aes.op_gcm.key_length = gmac_ctx->keylen;
ivc_tx->aes.op_gcm.aad_buf_size = gmac_ctx->user_aad_buf_size; ivc_tx->aes.op_gcm.aad_buf_size = gmac_ctx->user_aad_buf_size;
ivc_tx->aes.op_gcm.aad_addr = (u32)(aad->buf_iova & U32_MAX); ivc_tx->aes.op_gcm.aad_addr = aad_addr;
if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) { if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) {
ivc_tx->aes.op_gcm.tag_buf_size = gmac_ctx->authsize; ivc_tx->aes.op_gcm.tag_buf_size = gmac_ctx->authsize;
ivc_tx->aes.op_gcm.tag_addr = (u32)(tag->buf_iova & U32_MAX); ivc_tx->aes.op_gcm.tag_addr = tag_addr;
} }
if (gmac_ctx->is_first) if (gmac_ctx->is_first)
@@ -4333,10 +4537,12 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, boo
priv->rx_status); priv->rx_status);
err = status_to_errno(priv->rx_status); err = status_to_errno(priv->rx_status);
goto free_exit; goto free_exit;
} else { }
if (is_last && gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) {
if (is_last) {
if (gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_SIGN) {
/* copy tag to req for last GMAC_SIGN requests */ /* copy tag to req for last GMAC_SIGN requests */
if (gmac_ctx->authsize > 0) { if (!is_zero_copy && (gmac_ctx->authsize > 0)) {
err = copy_to_user(gmac_ctx->user_tag_buf, tag->buf_ptr, err = copy_to_user(gmac_ctx->user_tag_buf, tag->buf_ptr,
gmac_ctx->authsize); gmac_ctx->authsize);
if (err) { if (err) {
@@ -4344,17 +4550,16 @@ static int tegra_hv_vse_aes_gmac_sv_op_hw_support(struct ahash_request *req, boo
goto free_exit; goto free_exit;
} }
} }
} else {
if (memcmp(comp->buf_ptr, &match_code, 4) == 0)
gmac_ctx->result = 0;
else if (memcmp(comp->buf_ptr, &mismatch_code, 4) == 0)
gmac_ctx->result = 1;
else
err = -EINVAL;
} }
} }
if (is_last && gmac_ctx->request_type == TEGRA_HV_VSE_GMAC_VERIFY) {
if (memcmp(comp->buf_ptr, &match_code, 4) == 0)
gmac_ctx->result = 0;
else if (memcmp(comp->buf_ptr, &mismatch_code, 4) == 0)
gmac_ctx->result = 1;
else
err = -EINVAL;
}
free_exit: free_exit:
if (ivc_req_msg) if (ivc_req_msg)
@@ -4386,7 +4591,7 @@ static int tegra_hv_vse_aes_gmac_sv_update(struct ahash_request *req)
goto exit; goto exit;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) { if (atomic_read(&se_dev->se_suspended)) {
@@ -4424,7 +4629,7 @@ static int tegra_hv_vse_aes_gmac_sv_finup(struct ahash_request *req)
goto exit; goto exit;
} }
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].engine_id]; se_dev = g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev;
/* Return error if engine is in suspended state */ /* Return error if engine is in suspended state */
if (atomic_read(&se_dev->se_suspended)) { if (atomic_read(&se_dev->se_suspended)) {
@@ -4450,7 +4655,7 @@ static int tegra_hv_vse_aes_gmac_sv_final(struct ahash_request *req)
struct tegra_virtual_se_aes_gmac_context *gmac_ctx = struct tegra_virtual_se_aes_gmac_context *gmac_ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req)); crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_virtual_se_dev *se_dev = struct tegra_virtual_se_dev *se_dev =
g_virtual_se_dev[g_crypto_to_ivc_map[gmac_ctx->node_id].engine_id]; g_crypto_to_ivc_map[gmac_ctx->node_id].se_dev;
dev_err(se_dev->dev, "%s: final not supported", __func__); dev_err(se_dev->dev, "%s: final not supported", __func__);
return -EPERM; return -EPERM;
@@ -4846,6 +5051,7 @@ static const struct tegra_vse_soc_info t194_vse_sinfo = {
.sm_supported = false, .sm_supported = false,
.gcm_hw_iv_supported = false, .gcm_hw_iv_supported = false,
.hmac_verify_hw_support = false, .hmac_verify_hw_support = false,
.zero_copy_supported = false,
}; };
static const struct tegra_vse_soc_info t234_vse_sinfo = { static const struct tegra_vse_soc_info t234_vse_sinfo = {
@@ -4854,6 +5060,7 @@ static const struct tegra_vse_soc_info t234_vse_sinfo = {
.sm_supported = false, .sm_supported = false,
.gcm_hw_iv_supported = false, .gcm_hw_iv_supported = false,
.hmac_verify_hw_support = false, .hmac_verify_hw_support = false,
.zero_copy_supported = false,
}; };
static const struct tegra_vse_soc_info se_51_vse_sinfo = { static const struct tegra_vse_soc_info se_51_vse_sinfo = {
@@ -4862,6 +5069,7 @@ static const struct tegra_vse_soc_info se_51_vse_sinfo = {
.sm_supported = true, .sm_supported = true,
.gcm_hw_iv_supported = true, .gcm_hw_iv_supported = true,
.hmac_verify_hw_support = true, .hmac_verify_hw_support = true,
.zero_copy_supported = true,
}; };
static const struct of_device_id tegra_hv_vse_safety_of_match[] = { static const struct of_device_id tegra_hv_vse_safety_of_match[] = {
@@ -4895,7 +5103,7 @@ static int tegra_vse_kthread(void *data)
size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t); size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t);
enum ivc_irq_state *irq_state; enum ivc_irq_state *irq_state;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[node_id].engine_id]; se_dev = g_crypto_to_ivc_map[node_id].se_dev;
ivc_msg = devm_kzalloc(se_dev->dev, size_ivc_msg, GFP_KERNEL); ivc_msg = devm_kzalloc(se_dev->dev, size_ivc_msg, GFP_KERNEL);
if (!ivc_msg) if (!ivc_msg)
@@ -5276,7 +5484,7 @@ static int tegra_hv_vse_allocate_se_dma_bufs(struct tegra_vse_node_dma *node_dma
node_dma->se_dev = se_dev; node_dma->se_dev = se_dev;
for (i = 0; i < MAX_SE_DMA_BUFS; i++) { for (i = 0; i < MAX_SE_DMA_BUFS; i++) {
if (buf_sizes[i] == 0U) if (buf_sizes[i] == 0U)
break; continue;
node_dma->se_dma_buf[i].buf_ptr = dma_alloc_coherent(se_dev, node_dma->se_dma_buf[i].buf_ptr = dma_alloc_coherent(se_dev,
buf_sizes[i], buf_sizes[i],
@@ -5370,6 +5578,11 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
struct tegra_vse_soc_info *pdata = NULL; struct tegra_vse_soc_info *pdata = NULL;
static uint32_t s_node_id; static uint32_t s_node_id;
uint32_t ivc_cnt, cnt, instance_id; uint32_t ivc_cnt, cnt, instance_id;
bool has_zero_copy_prop;
static bool s_aes_alg_register_done;
static bool s_sha_alg_register_done;
static bool s_tsec_alg_register_done;
bool is_aes_alg, is_sha_alg, is_tsec_alg;
gcm_supports_dma = of_property_read_bool(pdev->dev.of_node, "nvidia,gcm-dma-support"); gcm_supports_dma = of_property_read_bool(pdev->dev.of_node, "nvidia,gcm-dma-support");
@@ -5388,6 +5601,8 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
return 0; return 0;
} }
has_zero_copy_prop = of_property_read_bool(pdev->dev.of_node, "#zero-copy");
se_dev = devm_kzalloc(&pdev->dev, se_dev = devm_kzalloc(&pdev->dev,
sizeof(struct tegra_virtual_se_dev), sizeof(struct tegra_virtual_se_dev),
GFP_KERNEL); GFP_KERNEL);
@@ -5415,7 +5630,28 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto exit; goto exit;
} }
g_virtual_se_dev[engine_id] = se_dev; switch (engine_id) {
case VIRTUAL_SE_AES0:
case VIRTUAL_SE_AES1:
case VIRTUAL_GCSE1_AES0:
case VIRTUAL_GCSE1_AES1:
case VIRTUAL_GCSE2_AES0:
case VIRTUAL_GCSE2_AES1:
is_aes_alg = true;
break;
case VIRTUAL_SE_SHA:
case VIRTUAL_GCSE1_SHA:
case VIRTUAL_GCSE2_SHA:
is_sha_alg = true;
break;
case VIRTUAL_SE_TSEC:
is_tsec_alg = true;
break;
default:
dev_err(se_dev->dev, "%s unsupported engine id %u\n", __func__, engine_id);
err = -EINVAL;
goto exit;
}
/* read ivccfg from dts */ /* read ivccfg from dts */
err = of_property_read_u32_index(np, "nvidia,ivccfg_cnt", 0, &ivc_cnt); err = of_property_read_u32_index(np, "nvidia,ivccfg_cnt", 0, &ivc_cnt);
@@ -5464,12 +5700,6 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto exit; goto exit;
} }
if (instance_id >= ivc_cnt) {
pr_err("%s Error: invalid instance id %u\n", __func__, instance_id);
err = -EINVAL;
goto exit;
}
crypto_dev = &g_crypto_to_ivc_map[s_node_id]; crypto_dev = &g_crypto_to_ivc_map[s_node_id];
err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN
@@ -5488,6 +5718,7 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
crypto_dev->ivc_id = ivc_id; crypto_dev->ivc_id = ivc_id;
crypto_dev->node_id = s_node_id; crypto_dev->node_id = s_node_id;
crypto_dev->instance_id = instance_id; crypto_dev->instance_id = instance_id;
crypto_dev->se_dev = se_dev;
crypto_dev->node_in_use = true; crypto_dev->node_in_use = true;
err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN
@@ -5520,6 +5751,23 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto exit; goto exit;
} }
if (has_zero_copy_prop) {
if (!se_dev->chipdata->zero_copy_supported) {
pr_err("Error: zero copy is not supported on this platform\n");
err = -ENODEV;
goto exit;
}
if (crypto_dev->max_buffer_size > 0U) {
pr_err("Error: max buffer size must be 0 if 0-copy is supported\n");
err = -ENODEV;
goto exit;
}
crypto_dev->is_zero_copy_node = true;
} else {
crypto_dev->is_zero_copy_node = false;
}
err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN err = of_property_read_u32_index(np, "nvidia,ivccfg", cnt * TEGRA_IVCCFG_ARRAY_LEN
+ TEGRA_CHANNEL_GROUPID_OFFSET, &crypto_dev->channel_grp_id); + TEGRA_CHANNEL_GROUPID_OFFSET, &crypto_dev->channel_grp_id);
if (err) { if (err) {
@@ -5645,7 +5893,7 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
s_node_id++; s_node_id++;
} }
if (engine_id == VIRTUAL_SE_AES0) { if (is_aes_alg && !s_aes_alg_register_done) {
err = crypto_register_ahash(&cmac_alg); err = crypto_register_ahash(&cmac_alg);
if (err) { if (err) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
@@ -5674,10 +5922,6 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto release_bufs; goto release_bufs;
} }
}
if (engine_id == VIRTUAL_SE_AES1) {
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
if (err) { if (err) {
dev_err(&pdev->dev, "aes alg register failed: %d\n", dev_err(&pdev->dev, "aes alg register failed: %d\n",
@@ -5685,7 +5929,6 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto release_bufs; goto release_bufs;
} }
if (se_dev->chipdata->gcm_decrypt_supported) { if (se_dev->chipdata->gcm_decrypt_supported) {
err = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); err = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
if (err) { if (err) {
@@ -5694,12 +5937,11 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto release_bufs; goto release_bufs;
} }
} }
s_aes_alg_register_done = true;
} }
if ((engine_id == VIRTUAL_SE_SHA || if (is_sha_alg && !s_sha_alg_register_done) {
engine_id == VIRTUAL_GCSE1_SHA ||
engine_id == VIRTUAL_GCSE2_SHA) &&
s_set_sha_algs == false) {
for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
err = crypto_register_ahash(&sha_algs[i]); err = crypto_register_ahash(&sha_algs[i]);
if (err) { if (err) {
@@ -5708,15 +5950,19 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
goto release_bufs; goto release_bufs;
} }
} }
s_set_sha_algs = true;
s_sha_alg_register_done = true;
} }
if (engine_id == VIRTUAL_SE_TSEC) {
if (is_tsec_alg && !s_tsec_alg_register_done) {
err = crypto_register_ahash(&tsec_alg); err = crypto_register_ahash(&tsec_alg);
if (err) { if (err) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Tsec alg register failed. Err %d\n", err); "Tsec alg register failed. Err %d\n", err);
goto release_bufs; goto release_bufs;
} }
s_tsec_alg_register_done = true;
} }
se_dev->engine_id = engine_id; se_dev->engine_id = engine_id;
@@ -5836,10 +6082,13 @@ static struct platform_driver tegra_hv_vse_safety_driver = {
static int __init tegra_hv_vse_safety_module_init(void) static int __init tegra_hv_vse_safety_module_init(void)
{ {
uint32_t i; uint32_t i, j;
for (i = 0U; i < MAX_NUMBER_MISC_DEVICES; i++) for (i = 0U; i < MAX_NUMBER_MISC_DEVICES; i++) {
g_crypto_to_ivc_map[i].node_in_use = false; g_crypto_to_ivc_map[i].node_in_use = false;
for (j = 0; j < MAX_ZERO_COPY_BUFS; j++)
g_node_dma[i].membuf_ctx[j].fd = -1;
}
return platform_driver_register(&tegra_hv_vse_safety_driver); return platform_driver_register(&tegra_hv_vse_safety_driver);
} }
@@ -5852,6 +6101,12 @@ static void __exit tegra_hv_vse_safety_module_exit(void)
module_init(tegra_hv_vse_safety_module_init); module_init(tegra_hv_vse_safety_module_init);
module_exit(tegra_hv_vse_safety_module_exit); module_exit(tegra_hv_vse_safety_module_exit);
#if defined(NV_MODULE_IMPORT_NS_CALLS_STRINGIFY)
MODULE_IMPORT_NS(DMA_BUF);
#else
MODULE_IMPORT_NS("DMA_BUF");
#endif
MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>"); MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
MODULE_DESCRIPTION("Virtual Security Engine driver over Tegra Hypervisor IVC channel"); MODULE_DESCRIPTION("Virtual Security Engine driver over Tegra Hypervisor IVC channel");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@@ -11,6 +11,7 @@
#define KEYSLOT_OFFSET_BYTES 8 #define KEYSLOT_OFFSET_BYTES 8
#define MAX_SE_DMA_BUFS 4 #define MAX_SE_DMA_BUFS 4
#define TEGRA_HV_VSE_AES_IV_LEN 16U #define TEGRA_HV_VSE_AES_IV_LEN 16U
#define MAX_ZERO_COPY_BUFS 6U
struct tegra_vse_soc_info { struct tegra_vse_soc_info {
bool gcm_decrypt_supported; bool gcm_decrypt_supported;
@@ -18,6 +19,7 @@ struct tegra_vse_soc_info {
bool sm_supported; bool sm_supported;
bool gcm_hw_iv_supported; bool gcm_hw_iv_supported;
bool hmac_verify_hw_support; bool hmac_verify_hw_support;
bool zero_copy_supported;
}; };
/* GCM Operation Supported Flag */ /* GCM Operation Supported Flag */
@@ -38,11 +40,19 @@ struct tegra_vse_dma_buf {
uint32_t buf_len; uint32_t buf_len;
}; };
struct tegra_vse_membuf_ctx {
int fd;
struct dma_buf *dmabuf;
struct dma_buf_attachment *attach;
};
struct tegra_vse_node_dma { struct tegra_vse_node_dma {
struct device *se_dev; struct device *se_dev;
struct device *gpcdma_dev; struct device *gpcdma_dev;
struct tegra_vse_dma_buf se_dma_buf[MAX_SE_DMA_BUFS]; struct tegra_vse_dma_buf se_dma_buf[MAX_SE_DMA_BUFS];
struct tegra_vse_dma_buf gpc_dma_buf; struct tegra_vse_dma_buf gpc_dma_buf;
struct tegra_vse_membuf_ctx membuf_ctx[MAX_ZERO_COPY_BUFS];
uint32_t mapped_membuf_count;
}; };
struct crypto_dev_to_ivc_map { struct crypto_dev_to_ivc_map {
@@ -74,6 +84,8 @@ struct crypto_dev_to_ivc_map {
struct mutex irq_state_lock; struct mutex irq_state_lock;
struct tegra_vse_dma_buf mempool; struct tegra_vse_dma_buf mempool;
bool node_in_use; bool node_in_use;
bool is_zero_copy_node;
struct tegra_virtual_se_dev *se_dev;
}; };
struct tegra_virtual_se_dev { struct tegra_virtual_se_dev {
@@ -186,6 +198,8 @@ struct tegra_virtual_se_aes_gmac_context {
bool is_first; bool is_first;
/* For GMAC_VERIFY tag comparison result */ /* For GMAC_VERIFY tag comparison result */
uint8_t result; uint8_t result;
uint64_t user_aad_iova;
uint64_t user_tag_iova;
}; };
/* Security Engine SHA context */ /* Security Engine SHA context */
@@ -205,6 +219,7 @@ struct tegra_virtual_se_sha_context {
uint8_t *user_src_buf; uint8_t *user_src_buf;
uint8_t *user_digest_buffer; uint8_t *user_digest_buffer;
uint32_t user_src_buf_size; uint32_t user_src_buf_size;
uint64_t user_src_iova;
}; };
enum hmac_sha_request_type { enum hmac_sha_request_type {
@@ -237,6 +252,12 @@ struct tegra_virtual_se_hmac_sha_context {
uint8_t result; uint8_t result;
}; };
struct tegra_virtual_se_membuf_context {
int fd;
int64_t iova;
uint32_t node_id;
};
/* Security Engine request context */ /* Security Engine request context */
struct tegra_virtual_se_req_context { struct tegra_virtual_se_req_context {
@@ -253,4 +274,13 @@ struct crypto_dev_to_ivc_map *tegra_hv_vse_get_db(void);
/* API to get tsec keyload status from vse driver */ /* API to get tsec keyload status from vse driver */
int tegra_hv_vse_safety_tsec_get_keyload_status(uint32_t node_id, uint32_t *err_code); int tegra_hv_vse_safety_tsec_get_keyload_status(uint32_t node_id, uint32_t *err_code);
/* API to Map memory buffer corresponding to an FD and return IOVA */
int tegra_hv_vse_safety_map_membuf(struct tegra_virtual_se_membuf_context *ctx);
/* API to Unmap memory buffer corresponding to an FD */
int tegra_hv_vse_safety_unmap_membuf(struct tegra_virtual_se_membuf_context *ctx);
/* API to Unmap all memory buffers corresponding to a node id */
void tegra_hv_vse_safety_unmap_all_membufs(uint32_t node_id);
#endif /*__TEGRA_HV_VSE_H*/ #endif /*__TEGRA_HV_VSE_H*/

View File

@@ -67,6 +67,7 @@
struct nvvse_devnode { struct nvvse_devnode {
struct miscdevice *g_misc_devices; struct miscdevice *g_misc_devices;
struct mutex lock; struct mutex lock;
bool node_in_use;
} nvvse_devnode[MAX_NUMBER_MISC_DEVICES]; } nvvse_devnode[MAX_NUMBER_MISC_DEVICES];
static struct tegra_nvvse_get_ivc_db ivc_database; static struct tegra_nvvse_get_ivc_db ivc_database;
@@ -116,6 +117,7 @@ struct tnvvse_crypto_ctx {
uint32_t max_rng_buff; uint32_t max_rng_buff;
char *sha_result; char *sha_result;
uint32_t node_id; uint32_t node_id;
bool is_zero_copy_node;
}; };
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE) #if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
@@ -240,11 +242,43 @@ static int tnvvse_crypto_validate_sha_update_req(struct tnvvse_crypto_ctx *ctx,
} }
} }
if (sha_update_ctl->input_buffer_size > ivc_database.max_buffer_size[ctx->node_id]) { if (ctx->is_zero_copy_node) {
pr_err("%s(): Msg size is greater than supported size of %d Bytes\n", __func__, if (sha_update_ctl->b_is_zero_copy == 0U) {
ivc_database.max_buffer_size[ctx->node_id]); pr_err("%s(): only zero copy operation is supported on this node\n",
ret = -EINVAL; __func__);
goto exit; ret = -EINVAL;
goto exit;
}
if ((sha_state->sha_total_msg_length > 0U) && sha_update_ctl->is_last) {
pr_err("%s(): Multipart SHA is not supported for zero-copy\n", __func__);
ret = -EINVAL;
goto exit;
}
if ((sha_type != TEGRA_NVVSE_SHA_TYPE_SHA256)
&& (sha_type != TEGRA_NVVSE_SHA_TYPE_SHA384)
&& (sha_type != TEGRA_NVVSE_SHA_TYPE_SHA512)
&& (sha_type != TEGRA_NVVSE_SHA_TYPE_SHA3_256)
&& (sha_type != TEGRA_NVVSE_SHA_TYPE_SHA3_384)
&& (sha_type != TEGRA_NVVSE_SHA_TYPE_SHA3_512)) {
pr_err("%s(): unsupported SHA req type for zero-copy", __func__);
ret = -EINVAL;
}
} else {
if (sha_update_ctl->b_is_zero_copy != 0U) {
pr_err("%s(): zero copy operation is not supported on this node\n",
__func__);
ret = -EINVAL;
goto exit;
}
if (sha_update_ctl->input_buffer_size >
ivc_database.max_buffer_size[ctx->node_id]) {
pr_err("%s(): Msg size is greater than supported size of %d Bytes\n",
__func__, ivc_database.max_buffer_size[ctx->node_id]);
ret = -EINVAL;
}
} }
exit: exit:
@@ -273,8 +307,14 @@ static int tnvvse_crypto_sha_update(struct tnvvse_crypto_ctx *ctx,
} }
ret = tnvvse_crypto_validate_sha_update_req(ctx, sha_update_ctl); ret = tnvvse_crypto_validate_sha_update_req(ctx, sha_update_ctl);
if (ret != 0) if (ret != 0) {
if (ret != -EAGAIN) {
/* Force reset SHA state and return */
sha_state->sha_init_done = 0U;
sha_state->sha_total_msg_length = 0U;
}
goto exit; goto exit;
}
if (sha_update_ctl->init_only != 0U) { if (sha_update_ctl->init_only != 0U) {
/* Only set state as SHA init done and return */ /* Only set state as SHA init done and return */
@@ -317,10 +357,13 @@ static int tnvvse_crypto_sha_update(struct tnvvse_crypto_ctx *ctx,
sha_ctx->digest_size = sha_update_ctl->digest_size; sha_ctx->digest_size = sha_update_ctl->digest_size;
sha_ctx->total_count = sha_state->sha_total_msg_length; sha_ctx->total_count = sha_state->sha_total_msg_length;
sha_ctx->intermediate_digest = sha_state->sha_intermediate_digest; sha_ctx->intermediate_digest = sha_state->sha_intermediate_digest;
sha_ctx->user_src_buf = sha_update_ctl->in_buff;
sha_ctx->user_digest_buffer = sha_update_ctl->digest_buffer; sha_ctx->user_digest_buffer = sha_update_ctl->digest_buffer;
if (ctx->is_zero_copy_node)
sha_ctx->user_src_iova = sha_update_ctl->in_buff_iova;
else
sha_ctx->user_src_buf = sha_update_ctl->in_buff;
if (sha_state->sha_total_msg_length == sha_ctx->user_src_buf_size) if (sha_state->sha_total_msg_length == sha_ctx->user_src_buf_size)
sha_ctx->is_first = true; sha_ctx->is_first = true;
else else
@@ -871,6 +914,22 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
struct ahash_request *req; struct ahash_request *req;
int ret = -EINVAL; int ret = -EINVAL;
if (ctx->is_zero_copy_node) {
if (gmac_sign_verify_ctl->b_is_zero_copy == 0U) {
pr_err("%s(): only zero copy operation is supported on this node\n",
__func__);
ret = -EINVAL;
goto done;
}
} else {
if (gmac_sign_verify_ctl->b_is_zero_copy != 0U) {
pr_err("%s(): zero copy operation is not supported on this node\n",
__func__);
ret = -EINVAL;
goto done;
}
}
tfm = crypto_alloc_ahash("gmac-vse(aes)", 0, 0); tfm = crypto_alloc_ahash("gmac-vse(aes)", 0, 0);
if (IS_ERR(tfm)) { if (IS_ERR(tfm)) {
pr_err("%s(): Failed to load transform for gmac-vse(aes):%ld\n", __func__, pr_err("%s(): Failed to load transform for gmac-vse(aes):%ld\n", __func__,
@@ -889,18 +948,15 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
goto free_tfm; goto free_tfm;
} }
gmac_ctx->user_aad_buf = gmac_sign_verify_ctl->src_buffer;
gmac_ctx->user_tag_buf = gmac_sign_verify_ctl->tag_buffer;
gmac_ctx->user_aad_buf_size = gmac_sign_verify_ctl->data_length; gmac_ctx->user_aad_buf_size = gmac_sign_verify_ctl->data_length;
if (ctx->is_zero_copy_node) {
if (gmac_ctx->user_aad_buf_size > ivc_database.max_buffer_size[ctx->node_id] || gmac_ctx->user_aad_iova = gmac_sign_verify_ctl->src_buffer_iova;
gmac_ctx->user_aad_buf_size == 0) { } else {
pr_err("%s(): Failed due to invalid aad buf size: %d\n", __func__, ret); gmac_ctx->user_aad_buf = gmac_sign_verify_ctl->src_buffer;
goto done;
} }
if (gmac_sign_verify_ctl->is_last && if ((gmac_sign_verify_ctl->is_last) &&
gmac_sign_verify_ctl->tag_length != TEGRA_NVVSE_AES_GCM_TAG_SIZE) { (gmac_sign_verify_ctl->tag_length != TEGRA_NVVSE_AES_GCM_TAG_SIZE)) {
pr_err("%s(): Failed due to invalid tag length (%d) invalid", __func__, pr_err("%s(): Failed due to invalid tag length (%d) invalid", __func__,
gmac_sign_verify_ctl->tag_length); gmac_sign_verify_ctl->tag_length);
goto done; goto done;
@@ -928,13 +984,18 @@ static int tnvvse_crypto_aes_gmac_sign_verify(struct tnvvse_crypto_ctx *ctx,
goto free_tfm; goto free_tfm;
} }
} else { } else {
if (gmac_sign_verify_ctl->gmac_type == if (gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_SIGN) {
TEGRA_NVVSE_AES_GMAC_VERIFY) { if (ctx->is_zero_copy_node)
gmac_ctx->user_tag_iova = gmac_sign_verify_ctl->tag_buffer_iova;
else
gmac_ctx->user_tag_buf = gmac_sign_verify_ctl->tag_buffer;
} else {
gmac_ctx->user_tag_buf = gmac_sign_verify_ctl->tag_buffer;
memcpy(iv, gmac_sign_verify_ctl->initial_vector, memcpy(iv, gmac_sign_verify_ctl->initial_vector,
TEGRA_NVVSE_AES_GCM_IV_LEN); TEGRA_NVVSE_AES_GCM_IV_LEN);
gmac_ctx->iv = iv; gmac_ctx->iv = iv;
} }
ret = wait_async_op(&sha_state->sha_complete, ret = wait_async_op(&sha_state->sha_complete,
crypto_ahash_finup(req)); crypto_ahash_finup(req));
if (ret) { if (ret) {
@@ -1373,6 +1434,46 @@ static int tnvvse_crypto_get_ivc_db(struct tegra_nvvse_get_ivc_db *get_ivc_db)
return ret; return ret;
} }
static int tnvvse_crypto_map_membuf(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_map_membuf_ctl *map_membuf_ctl)
{
struct tegra_virtual_se_membuf_context membuf_ctx;
int err = 0;
membuf_ctx.node_id = ctx->node_id;
membuf_ctx.fd = map_membuf_ctl->fd;
err = tegra_hv_vse_safety_map_membuf(&membuf_ctx);
if (err) {
pr_err("%s(): map membuf failed %d\n", __func__, err);
goto exit;
}
map_membuf_ctl->iova = membuf_ctx.iova;
exit:
return err;
}
static int tnvvse_crypto_unmap_membuf(struct tnvvse_crypto_ctx *ctx,
struct tegra_nvvse_unmap_membuf_ctl *unmap_membuf_ctl)
{
struct tegra_virtual_se_membuf_context membuf_ctx;
int err = 0;
membuf_ctx.node_id = ctx->node_id;
membuf_ctx.fd = unmap_membuf_ctl->fd;
err = tegra_hv_vse_safety_unmap_membuf(&membuf_ctx);
if (err) {
pr_err("%s(): unmap membuf failed %d\n", __func__, err);
goto exit;
}
exit:
return err;
}
static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp) static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp)
{ {
struct tnvvse_crypto_ctx *ctx = NULL; struct tnvvse_crypto_ctx *ctx = NULL;
@@ -1380,15 +1481,31 @@ static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp)
int ret = 0; int ret = 0;
uint32_t node_id; uint32_t node_id;
struct miscdevice *misc; struct miscdevice *misc;
bool is_zero_copy_node;
misc = filp->private_data; misc = filp->private_data;
node_id = misc->this_device->id; node_id = misc->this_device->id;
is_zero_copy_node = tegra_hv_vse_get_db()[node_id].is_zero_copy_node;
if (is_zero_copy_node) {
mutex_lock(&nvvse_devnode[node_id].lock);
if (nvvse_devnode[node_id].node_in_use) {
mutex_unlock(&nvvse_devnode[node_id].lock);
pr_err("%s zero copy node is already opened by another process\n",
__func__);
return -EPERM;
}
nvvse_devnode[node_id].node_in_use = true;
mutex_unlock(&nvvse_devnode[node_id].lock);
}
ctx = kzalloc(sizeof(struct tnvvse_crypto_ctx), GFP_KERNEL); ctx = kzalloc(sizeof(struct tnvvse_crypto_ctx), GFP_KERNEL);
if (!ctx) { if (!ctx) {
return -ENOMEM; return -ENOMEM;
} }
ctx->node_id = node_id; ctx->node_id = node_id;
ctx->is_zero_copy_node = is_zero_copy_node;
ctx->rng_buff = kzalloc(NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED, GFP_KERNEL); ctx->rng_buff = kzalloc(NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED, GFP_KERNEL);
if (!ctx->rng_buff) { if (!ctx->rng_buff) {
@@ -1431,6 +1548,11 @@ static int tnvvse_crypto_dev_release(struct inode *inode, struct file *filp)
{ {
struct tnvvse_crypto_ctx *ctx = filp->private_data; struct tnvvse_crypto_ctx *ctx = filp->private_data;
if (ctx->is_zero_copy_node) {
tegra_hv_vse_safety_unmap_all_membufs(ctx->node_id);
nvvse_devnode[ctx->node_id].node_in_use = false;
}
kfree(ctx->sha_result); kfree(ctx->sha_result);
kfree(ctx->rng_buff); kfree(ctx->rng_buff);
kfree(ctx->sha_state.sha_intermediate_digest); kfree(ctx->sha_state.sha_intermediate_digest);
@@ -1459,6 +1581,10 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
struct tegra_nvvse_aes_gmac_sign_verify_ctl *aes_gmac_sign_verify_ctl; struct tegra_nvvse_aes_gmac_sign_verify_ctl *aes_gmac_sign_verify_ctl;
struct tegra_nvvse_get_ivc_db *get_ivc_db; struct tegra_nvvse_get_ivc_db *get_ivc_db;
struct tegra_nvvse_tsec_get_keyload_status *tsec_keyload_status; struct tegra_nvvse_tsec_get_keyload_status *tsec_keyload_status;
struct tegra_nvvse_map_membuf_ctl __user *arg_map_membuf_ctl;
struct tegra_nvvse_map_membuf_ctl *map_membuf_ctl;
struct tegra_nvvse_unmap_membuf_ctl __user *arg_unmap_membuf_ctl;
struct tegra_nvvse_unmap_membuf_ctl *unmap_membuf_ctl;
int ret = 0; int ret = 0;
/* /*
@@ -1472,6 +1598,37 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
mutex_lock(&nvvse_devnode[ctx->node_id].lock); mutex_lock(&nvvse_devnode[ctx->node_id].lock);
if (ctx->is_zero_copy_node) {
switch (ioctl_num) {
case NVVSE_IOCTL_CMDID_UPDATE_SHA:
case NVVSE_IOCTL_CMDID_AES_GMAC_INIT:
case NVVSE_IOCTL_CMDID_AES_GMAC_SIGN_VERIFY:
case NVVSE_IOCTL_CMDID_MAP_MEMBUF:
case NVVSE_IOCTL_CMDID_UNMAP_MEMBUF:
break;
default:
pr_err("%s(): unsupported zero copy node command(%08x)\n", __func__,
ioctl_num);
ret = -EINVAL;
break;
};
} else {
switch (ioctl_num) {
case NVVSE_IOCTL_CMDID_MAP_MEMBUF:
case NVVSE_IOCTL_CMDID_UNMAP_MEMBUF:
pr_err("%s(): unsupported node command(%08x)\n", __func__,
ioctl_num);
ret = -EINVAL;
break;
default:
break;
};
}
if (ret != 0)
goto release_lock;
switch (ioctl_num) { switch (ioctl_num) {
case NVVSE_IOCTL_CMDID_UPDATE_SHA: case NVVSE_IOCTL_CMDID_UPDATE_SHA:
sha_update_ctl = kzalloc(sizeof(*sha_update_ctl), GFP_KERNEL); sha_update_ctl = kzalloc(sizeof(*sha_update_ctl), GFP_KERNEL);
@@ -1780,6 +1937,80 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp,
kfree(tsec_keyload_status); kfree(tsec_keyload_status);
break; break;
case NVVSE_IOCTL_CMDID_MAP_MEMBUF:
map_membuf_ctl = kzalloc(sizeof(*map_membuf_ctl), GFP_KERNEL);
if (!map_membuf_ctl) {
ret = -ENOMEM;
goto release_lock;
}
arg_map_membuf_ctl = (void __user *)arg;
ret = copy_from_user(map_membuf_ctl, arg_map_membuf_ctl,
sizeof(*map_membuf_ctl));
if (ret) {
pr_err("%s(): Failed to copy_from_user map_membuf_ctl:%d\n",
__func__, ret);
kfree(map_membuf_ctl);
goto release_lock;
}
ret = tnvvse_crypto_map_membuf(ctx, map_membuf_ctl);
if (ret) {
pr_err("%s(): Failed to map membuf status:%d\n", __func__, ret);
kfree(map_membuf_ctl);
goto release_lock;
}
ret = copy_to_user(arg_map_membuf_ctl, map_membuf_ctl,
sizeof(*map_membuf_ctl));
if (ret) {
pr_err("%s(): Failed to copy_to_user map_membuf_ctl:%d\n",
__func__, ret);
kfree(map_membuf_ctl);
goto release_lock;
}
kfree(map_membuf_ctl);
break;
case NVVSE_IOCTL_CMDID_UNMAP_MEMBUF:
unmap_membuf_ctl = kzalloc(sizeof(*unmap_membuf_ctl), GFP_KERNEL);
if (!unmap_membuf_ctl) {
ret = -ENOMEM;
goto release_lock;
}
arg_unmap_membuf_ctl = (void __user *)arg;
ret = copy_from_user(unmap_membuf_ctl, arg_unmap_membuf_ctl,
sizeof(*unmap_membuf_ctl));
if (ret) {
pr_err("%s(): Failed to copy_from_user unmap_membuf_ctl:%d\n",
__func__, ret);
kfree(unmap_membuf_ctl);
goto release_lock;
}
ret = tnvvse_crypto_unmap_membuf(ctx, unmap_membuf_ctl);
if (ret) {
pr_err("%s(): Failed to unmap membuf status:%d\n", __func__, ret);
kfree(unmap_membuf_ctl);
goto release_lock;
}
ret = copy_to_user(arg_unmap_membuf_ctl, unmap_membuf_ctl,
sizeof(*unmap_membuf_ctl));
if (ret) {
pr_err("%s(): Failed to copy_to_user unmap_membuf_ctl:%d\n",
__func__, ret);
kfree(unmap_membuf_ctl);
goto release_lock;
}
kfree(unmap_membuf_ctl);
break;
default: default:
pr_err("%s(): invalid ioctl code(%d[0x%08x])", __func__, ioctl_num, ioctl_num); pr_err("%s(): invalid ioctl code(%d[0x%08x])", __func__, ioctl_num, ioctl_num);
ret = -EINVAL; ret = -EINVAL;

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. * SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES.
* All rights reserved. * All rights reserved.
* *
* Cryptographic API. * Cryptographic API.
@@ -30,6 +30,8 @@
#define TEGRA_NVVSE_CMDID_TSEC_SIGN_VERIFY 13 #define TEGRA_NVVSE_CMDID_TSEC_SIGN_VERIFY 13
#define TEGRA_NVVSE_CMDID_TSEC_GET_KEYLOAD_STATUS 14 #define TEGRA_NVVSE_CMDID_TSEC_GET_KEYLOAD_STATUS 14
#define TEGRA_NVVSE_CMDID_HMAC_SHA_SIGN_VERIFY 15 #define TEGRA_NVVSE_CMDID_HMAC_SHA_SIGN_VERIFY 15
#define TEGRA_NVVSE_CMDID_MAP_MEMBUF 17
#define TEGRA_NVVSE_CMDID_UNMAP_MEMBUF 18
/** Defines the length of the AES-CBC Initial Vector */ /** Defines the length of the AES-CBC Initial Vector */
#define TEGRA_NVVSE_AES_IV_LEN 16U #define TEGRA_NVVSE_AES_IV_LEN 16U
@@ -142,6 +144,15 @@ struct tegra_nvvse_sha_update_ctl {
uint8_t *digest_buffer; uint8_t *digest_buffer;
/** Holds the size of the digest buffer */ /** Holds the size of the digest buffer */
uint32_t digest_size; uint32_t digest_size;
/** [in] Flag to indicate Zero copy request.
* 0 indicates non-Zero Copy request
* non-zero indicates Zero copy request
*/
uint8_t b_is_zero_copy;
/** [in] Holds the Input buffer IOVA address
* Not used when b_is_zero_copy flag is 0.
*/
uint64_t in_buff_iova;
}; };
#define NVVSE_IOCTL_CMDID_UPDATE_SHA _IOW(TEGRA_NVVSE_IOC_MAGIC, TEGRA_NVVSE_CMDID_UPDATE_SHA, \ #define NVVSE_IOCTL_CMDID_UPDATE_SHA _IOW(TEGRA_NVVSE_IOC_MAGIC, TEGRA_NVVSE_CMDID_UPDATE_SHA, \
struct tegra_nvvse_sha_update_ctl) struct tegra_nvvse_sha_update_ctl)
@@ -353,6 +364,19 @@ struct tegra_nvvse_aes_gmac_sign_verify_ctl {
* - Non-zero value indicates GMAC verification failure. * - Non-zero value indicates GMAC verification failure.
*/ */
uint8_t result; uint8_t result;
/** [in] Flag to indicate Zero copy request.
* 0 indicates non-Zero Copy request
* non-zero indicates Zero copy request
*/
uint8_t b_is_zero_copy;
/** [in] Holds the Source buffer IOVA address
* Not used when b_is_zero_copy flag is 0.
*/
uint64_t src_buffer_iova;
/** [in] Holds the Tag buffer IOVA address
* Not used when b_is_zero_copy flag is 0.
*/
uint64_t tag_buffer_iova;
/** [in] Flag to indicate SM4 request. /** [in] Flag to indicate SM4 request.
* 0 indicates non-SM4 request * 0 indicates non-SM4 request
* non-zero indicates SM4 request * non-zero indicates SM4 request
@@ -441,6 +465,34 @@ struct tegra_nvvse_tsec_get_keyload_status {
TEGRA_NVVSE_CMDID_TSEC_GET_KEYLOAD_STATUS, \ TEGRA_NVVSE_CMDID_TSEC_GET_KEYLOAD_STATUS, \
struct tegra_nvvse_tsec_get_keyload_status) struct tegra_nvvse_tsec_get_keyload_status)
/**
* \brief Holds Map Membuf request parameters
*/
struct tegra_nvvse_map_membuf_ctl {
/** [in] Holds File descriptor ID
* Needs to be non-negative/non-zero value.
*/
int32_t fd;
/** [out] Holds IOVA corresponding to mapped memory buffer */
uint64_t iova;
};
#define NVVSE_IOCTL_CMDID_MAP_MEMBUF _IOWR(TEGRA_NVVSE_IOC_MAGIC, \
TEGRA_NVVSE_CMDID_MAP_MEMBUF, \
struct tegra_nvvse_map_membuf_ctl)
/**
* \brief Holds the Unmap Membuf request parameters
*/
struct tegra_nvvse_unmap_membuf_ctl {
/** [in] Holds File descriptor ID
* Needs to be a value greater than 0.
*/
int32_t fd;
};
#define NVVSE_IOCTL_CMDID_UNMAP_MEMBUF _IOWR(TEGRA_NVVSE_IOC_MAGIC, \
TEGRA_NVVSE_CMDID_UNMAP_MEMBUF, \
struct tegra_nvvse_unmap_membuf_ctl)
/** /**
* brief Holds IVC databse * brief Holds IVC databse
*/ */