diff --git a/drivers/crypto/tegra-hv-vse-safety.c b/drivers/crypto/tegra-hv-vse-safety.c index 6719fdf2..f722cb8e 100644 --- a/drivers/crypto/tegra-hv-vse-safety.c +++ b/drivers/crypto/tegra-hv-vse-safety.c @@ -1079,735 +1079,286 @@ exit: return err; } -static int tegra_hv_vse_safety_send_sha_data(struct tegra_virtual_se_dev *se_dev, - struct ahash_request *req, - struct tegra_virtual_se_ivc_msg_t *ivc_req_msg, - u32 count, bool islast) -{ - struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; - struct tegra_virtual_se_ivc_hdr_t *ivc_hdr = NULL; - struct tegra_virtual_se_sha_context *sha_ctx; - struct tegra_hv_ivc_cookie *pivck; - struct tegra_vse_priv_data *priv = NULL; - struct tegra_virtual_se_req_context *req_ctx; - struct tegra_vse_tag *priv_data_ptr; - union tegra_virtual_se_sha_args *psha = NULL; - int err = 0; - u64 total_count = 0, msg_len = 0; - - if (!req) { - pr_err("%s: SHA request not valid\n", __func__); - return -EINVAL; - } - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - pivck = g_crypto_to_ivc_map[sha_ctx->node_id].ivck; - - if (!ivc_req_msg) { - dev_err(se_dev->dev, - "%s Invalid ivc_req_msg\n", __func__); - return -EINVAL; - } - - priv = devm_kzalloc(se_dev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - req_ctx = ahash_request_ctx(req); - total_count = req_ctx->total_count; - - ivc_tx = &ivc_req_msg->tx[0]; - ivc_hdr = &ivc_req_msg->ivc_hdr; - ivc_hdr->engine = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_SHA_HASH; - - psha = &(ivc_tx->sha); - psha->op_hash.mode = req_ctx->mode; - psha->op_hash.msg_total_length[0] = count; - psha->op_hash.msg_total_length[1] = 0; - psha->op_hash.msg_total_length[2] = 0; - psha->op_hash.msg_total_length[3] = 0; - psha->op_hash.msg_left_length[0] = count; - psha->op_hash.msg_left_length[1] = 0; - psha->op_hash.msg_left_length[2] = 0; - psha->op_hash.msg_left_length[3] = 0; - psha->op_hash.hash_length = sha_ctx->digest_size; - if (islast) { - psha->op_hash.msg_total_length[0] = total_count & 0xFFFFFFFF; - psha->op_hash.msg_total_length[1] = total_count >> 32; - } else { - msg_len = count + 8; - psha->op_hash.msg_left_length[0] = msg_len & 0xFFFFFFFF; - psha->op_hash.msg_left_length[1] = msg_len >> 32; - - if (req_ctx->is_first) { - psha->op_hash.msg_total_length[0] = msg_len & 0xFFFFFFFF; - psha->op_hash.msg_total_length[1] = msg_len >> 32; - req_ctx->is_first = false; - } else { - msg_len += 8; - psha->op_hash.msg_total_length[0] = msg_len & 0xFFFFFFFF; - psha->op_hash.msg_total_length[1] = msg_len >> 32; - } - } - - ivc_hdr->header_magic[0] = 'N'; - ivc_hdr->header_magic[1] = 'V'; - ivc_hdr->header_magic[2] = 'D'; - ivc_hdr->header_magic[3] = 'A'; - ivc_hdr->num_reqs = 1; - priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg->ivc_hdr.tag; - priv_data_ptr->priv_data = (unsigned int *)priv; - priv->cmd = VIRTUAL_SE_PROCESS; - priv->se_dev = se_dev; - - g_crypto_to_ivc_map[sha_ctx->node_id].vse_thread_start = true; - init_completion(&priv->alg_complete); - - err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, priv, ivc_req_msg, - sizeof(struct tegra_virtual_se_ivc_msg_t), sha_ctx->node_id); - if (err) { - dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); - goto exit; - } - -exit: - devm_kfree(se_dev->dev, priv); - - return err; -} - -static int tegra_hv_vse_safety_sha_send_one(struct ahash_request *req, - u32 nbytes, bool islast) -{ - struct tegra_virtual_se_dev *se_dev; - struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; - struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; - struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_virtual_se_sha_context *sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - uint8_t *residual_data_buf = sha_ctx->residual_plaintext->buf_ptr; - int err = 0; - uint32_t engine_id; - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - se_dev = g_virtual_se_dev[engine_id]; - ivc_req_msg = devm_kzalloc(se_dev->dev, sizeof(*ivc_req_msg), - GFP_KERNEL); - if (!ivc_req_msg) - return -ENOMEM; - - if (islast == true && - (req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || - req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256)) { - residual_data_buf[nbytes] = 0xff; - nbytes++; - req_ctx->total_count++; - } - ivc_tx = &ivc_req_msg->tx[0]; - - ivc_tx->sha.op_hash.src_addr = sha_ctx->residual_plaintext->buf_iova; - ivc_tx->sha.op_hash.src_buf_size = nbytes; - - ivc_tx->sha.op_hash.dst = (u64)sha_ctx->hash_result->buf_iova; - memcpy(ivc_tx->sha.op_hash.hash, sha_ctx->hash_result->buf_ptr, - req_ctx->intermediate_digest_size); - err = tegra_hv_vse_safety_send_sha_data(se_dev, req, ivc_req_msg, - nbytes, islast); - if (err) { - dev_err(se_dev->dev, "%s error %d\n", __func__, err); - goto exit; - } -exit: - devm_kfree(se_dev->dev, ivc_req_msg); - return err; -} - -static int tegra_hv_vse_safety_sha_fast_path(struct ahash_request *req, - bool is_last, bool process_cur_req) -{ - struct tegra_virtual_se_dev *se_dev; - u32 bytes_process_in_req = 0, num_blks; - struct tegra_virtual_se_ivc_msg_t *ivc_req_msg; - struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx = NULL; - struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_virtual_se_sha_context *sha_ctx; - int err = 0; - u32 nbytes_in_req = req->nbytes; - uint32_t engine_id; - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - se_dev = g_virtual_se_dev[engine_id]; - /* process_cur_req is_last : - * false false : update() -> hash - * true true : finup(), digest() -> hash - * true : finup(), digest(), final() -> result - */ - ivc_req_msg = devm_kzalloc(se_dev->dev, - sizeof(*ivc_req_msg), GFP_KERNEL); - if (!ivc_req_msg) - return -ENOMEM; - - if ((process_cur_req == false && is_last == false) || - (process_cur_req == true && is_last == true)) { - /* When calling update(), if req->nbytes is aligned with - * req_ctx->blk_size, reduce req->nbytes with req_ctx->blk_size - * to avoid hashing zero length input at the end. - */ - if (req_ctx->residual_bytes == req_ctx->blk_size) { - err = tegra_hv_vse_safety_sha_send_one(req, - req_ctx->residual_bytes, false); - if (err) { - dev_err(se_dev->dev, - "%s: failed to send residual data %u\n", - __func__, req_ctx->residual_bytes); - goto free; - } - req_ctx->residual_bytes = 0; - } - - num_blks = nbytes_in_req / req_ctx->blk_size; - req_ctx->residual_bytes = - nbytes_in_req - (num_blks * req_ctx->blk_size); - - if (num_blks > 0 && req_ctx->residual_bytes == 0) { - /* blk_size aligned. reduce size with one blk and - * handle it in the next call. - */ - req_ctx->residual_bytes = req_ctx->blk_size; - req_ctx->total_count += req_ctx->residual_bytes; - num_blks--; - sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), - sha_ctx->residual_plaintext->buf_ptr, req_ctx->residual_bytes, - num_blks * req_ctx->blk_size); - } else { - /* not aligned at all */ - req_ctx->total_count += req_ctx->residual_bytes; - sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), - sha_ctx->residual_plaintext->buf_ptr, req_ctx->residual_bytes, - num_blks * req_ctx->blk_size); - } - nbytes_in_req -= req_ctx->residual_bytes; - - dev_dbg(se_dev->dev, "%s: req_ctx->residual_bytes %u\n", - __func__, req_ctx->residual_bytes); - - if (num_blks > 0) { - ivc_tx = &ivc_req_msg->tx[0]; - - bytes_process_in_req = num_blks * req_ctx->blk_size; - dev_dbg(se_dev->dev, "%s: bytes_process_in_req %u\n", - __func__, bytes_process_in_req); - - sg_copy_to_buffer(req->src, sg_nents(req->src), sha_ctx->plaintext->buf_ptr, - bytes_process_in_req); - ivc_tx->sha.op_hash.src_addr = sha_ctx->plaintext->buf_iova; - ivc_tx->sha.op_hash.src_buf_size = bytes_process_in_req; - ivc_tx->sha.op_hash.dst = (u64)sha_ctx->hash_result->buf_iova; - memcpy(ivc_tx->sha.op_hash.hash, sha_ctx->hash_result->buf_ptr, - req_ctx->intermediate_digest_size); - - req_ctx->total_count += bytes_process_in_req; - - err = tegra_hv_vse_safety_send_sha_data(se_dev, req, - ivc_req_msg, bytes_process_in_req, false); - if (err) { - dev_err(se_dev->dev, "%s error %d\n", - __func__, err); - goto free; - } - } - - if (req_ctx->residual_bytes > 0 && - req_ctx->residual_bytes < req_ctx->blk_size) { - /* At this point, the buffer is not aligned with - * blk_size. Thus, buffer alignment need to be done via - * slow path. - */ - req_ctx->force_align = true; - } - } - - if (is_last) { - /* handle the last data in finup() , digest() */ - if (req_ctx->residual_bytes > 0) { - err = tegra_hv_vse_safety_sha_send_one(req, - req_ctx->residual_bytes, true); - if (err) { - dev_err(se_dev->dev, - "%s: failed to send last data %u\n", - __func__, req_ctx->residual_bytes); - goto free; - } - req_ctx->residual_bytes = 0; - } - - if (req->result) { - memcpy(req->result, sha_ctx->hash_result->buf_ptr, req_ctx->digest_size); - } else { - dev_err(se_dev->dev, "Invalid clinet result buffer\n"); - } - } - -free: - devm_kfree(se_dev->dev, ivc_req_msg); - - return err; -} - -static int tegra_hv_vse_safety_sha_slow_path(struct ahash_request *req, - bool is_last, bool process_cur_req) -{ - struct tegra_virtual_se_dev *se_dev; - struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_virtual_se_sha_context *sha_ctx; - u32 nblk_bytes = 0, num_blks, buflen = SZ_4M; - u32 length = 0, skip = 0, offset = 0; - u64 total_bytes = 0, left_bytes = 0; - int err = 0; - bool data_processed; - uint32_t engine_id; - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - se_dev = g_virtual_se_dev[engine_id]; - - if ((process_cur_req == false && is_last == false) || - (process_cur_req == true && is_last == true)) { - - total_bytes = req_ctx->residual_bytes + req->nbytes; - num_blks = total_bytes / req_ctx->blk_size; - nblk_bytes = num_blks * req_ctx->blk_size; - offset = req_ctx->residual_bytes; - - /* if blk_size aligned, reduce 1 blk_size for the last hash */ - if ((total_bytes - nblk_bytes) == 0) - total_bytes -= req_ctx->blk_size; - - left_bytes = req->nbytes; - - data_processed = false; - while (total_bytes >= req_ctx->blk_size) { - /* Copy to linear buffer */ - num_blks = total_bytes / req_ctx->blk_size; - nblk_bytes = num_blks * req_ctx->blk_size; - length = min(buflen, nblk_bytes) - offset; - - sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), - sha_ctx->residual_plaintext->buf_ptr + offset, length, skip); - skip += length; - req_ctx->total_count += length; - - /* Hash */ - err = tegra_hv_vse_safety_sha_send_one(req, - length + offset, false); - if (err) { - dev_err(se_dev->dev, - "%s: failed to send one %u\n", - __func__, length + offset); - return err; - } - total_bytes -= (length + offset); - left_bytes -= length; - offset = 0; - data_processed = true; - } - - if (data_processed == true) { - /* Processed in while() loop */ - sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), - sha_ctx->residual_plaintext->buf_ptr, left_bytes, skip); - req_ctx->total_count += left_bytes; - req_ctx->residual_bytes = left_bytes; - } else { - /* Accumulate the request */ - sg_pcopy_to_buffer(req->src, (u32)sg_nents(req->src), - sha_ctx->residual_plaintext->buf_ptr + req_ctx->residual_bytes, - req->nbytes, skip); - req_ctx->total_count += req->nbytes; - req_ctx->residual_bytes += req->nbytes; - } - - if (req_ctx->force_align == true && - req_ctx->residual_bytes == req_ctx->blk_size) { - /* At this point, the buffer is aligned with blk_size. - * Thus, the next call can use fast path. - */ - req_ctx->force_align = false; - } - } - - if (is_last) { - /* handle the last data in finup() , digest() */ - if (req_ctx->residual_bytes > 0) { - err = tegra_hv_vse_safety_sha_send_one(req, - req_ctx->residual_bytes, true); - if (err) { - dev_err(se_dev->dev, - "%s: failed to send last data%u\n", - __func__, req_ctx->residual_bytes); - return err; - } - req_ctx->residual_bytes = 0; - } - - if (req->result) { - memcpy(req->result, sha_ctx->hash_result->buf_ptr, - req_ctx->digest_size); - } else { - dev_err(se_dev->dev, "Invalid clinet result buffer\n"); - } - } - - return err; -} - -static int tegra_hv_vse_safety_sha_op(struct ahash_request *req, bool is_last, - bool process_cur_req) -{ - struct tegra_virtual_se_dev *se_dev; - struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_virtual_se_sha_context *sha_ctx; - u32 mode; - u32 num_blks; - int ret; - uint32_t engine_id; - - struct sha_zero_length_vector zero_vec[] = { - { - .size = SM3_DIGEST_SIZE, - .digest = "\x1a\xb2\x1d\x83\x55\xcf\xa1\x7f" - "\x8e\x61\x19\x48\x31\xe8\x1a\x8f" - "\x22\xbe\xc8\xc7\x28\xfe\xfb\x74" - "\x7e\xd0\x35\xeb\x50\x82\xaa\x2b", - }, { - .size = SHA256_DIGEST_SIZE, - .digest = "\xe3\xb0\xc4\x42\x98\xfc\x1c\x14" - "\x9a\xfb\xf4\xc8\x99\x6f\xb9\x24" - "\x27\xae\x41\xe4\x64\x9b\x93\x4c" - "\xa4\x95\x99\x1b\x78\x52\xb8\x55", - }, { - .size = SHA384_DIGEST_SIZE, - .digest = "\x38\xb0\x60\xa7\x51\xac\x96\x38" - "\x4c\xd9\x32\x7e\xb1\xb1\xe3\x6a" - "\x21\xfd\xb7\x11\x14\xbe\x07\x43" - "\x4c\x0c\xc7\xbf\x63\xf6\xe1\xda" - "\x27\x4e\xde\xbf\xe7\x6f\x65\xfb" - "\xd5\x1a\xd2\xf1\x48\x98\xb9\x5b", - }, { - .size = SHA512_DIGEST_SIZE, - .digest = "\xcf\x83\xe1\x35\x7e\xef\xb8\xbd" - "\xf1\x54\x28\x50\xd6\x6d\x80\x07" - "\xd6\x20\xe4\x05\x0b\x57\x15\xdc" - "\x83\xf4\xa9\x21\xd3\x6c\xe9\xce" - "\x47\xd0\xd1\x3c\x5d\x85\xf2\xb0" - "\xff\x83\x18\xd2\x87\x7e\xec\x2f" - "\x63\xb9\x31\xbd\x47\x41\x7a\x81" - "\xa5\x38\x32\x7a\xf9\x27\xda\x3e", - }, { - .size = SHA3_256_DIGEST_SIZE, - .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66" - "\x51\xc1\x47\x56\xa0\x61\xd6\x62" - "\xf5\x80\xff\x4d\xe4\x3b\x49\xfa" - "\x82\xd8\x0a\x4b\x80\xf8\x43\x4a", - }, { - .size = SHA3_384_DIGEST_SIZE, - .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d" - "\x01\x10\x7d\x85\x2e\x4c\x24\x85" - "\xc5\x1a\x50\xaa\xaa\x94\xfc\x61" - "\x99\x5e\x71\xbb\xee\x98\x3a\x2a" - "\xc3\x71\x38\x31\x26\x4a\xdb\x47" - "\xfb\x6b\xd1\xe0\x58\xd5\xf0\x04", - }, { - .size = SHA3_512_DIGEST_SIZE, - .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5" - "\xc8\xb5\x67\xdc\x18\x5a\x75\x6e" - "\x97\xc9\x82\x16\x4f\xe2\x58\x59" - "\xe0\xd1\xdc\xc1\x47\x5c\x80\xa6" - "\x15\xb2\x12\x3a\xf1\xf5\xf9\x4c" - "\x11\xe3\xe9\x40\x2c\x3a\xc5\x58" - "\xf5\x00\x19\x9d\x95\xb6\xd3\xe3" - "\x01\x75\x85\x86\x28\x1d\xcd\x26", - } - }; - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - se_dev = g_virtual_se_dev[engine_id]; - - if (req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || - req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256) { - if (req_ctx->digest_size == 0) { - dev_info(se_dev->dev, "digest size is 0\n"); - return 0; - } - } - - if (req->nbytes == 0) { - if (req_ctx->total_count > 0) { - if (is_last == false) { - dev_info(se_dev->dev, "empty packet\n"); - return 0; - } - - if (req_ctx->residual_bytes > 0) { /*final() */ - ret = tegra_hv_vse_safety_sha_send_one(req, - req_ctx->residual_bytes, is_last); - if (ret) { - dev_err(se_dev->dev, - "%s: failed to send last data %u\n", - __func__, req_ctx->residual_bytes); - return ret; - } - req_ctx->residual_bytes = 0; - } - - if (is_last) { - if (req->result) { - memcpy(req->result, - sha_ctx->hash_result->buf_ptr, - req_ctx->digest_size); - } else { - dev_err(se_dev->dev, - "Invalid clinet result buffer\n"); - } - } - - return 0; - } - - if (req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || - req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256) { - ret = tegra_hv_vse_safety_sha_send_one(req, - 0, is_last); - if (ret) { - dev_err(se_dev->dev, "%s: failed to send last data\n", - __func__); - return ret; - } - - if (is_last) { - if (req->result) { - memcpy(req->result, - sha_ctx->hash_result->buf_ptr, - req_ctx->digest_size); - } else { - dev_err(se_dev->dev, - "Invalid clinet result buffer\n"); - } - } - - return 0; - } - /* If the request length is zero, SW WAR for zero length SHA - * operation since SE HW can't accept zero length SHA operation - */ - if (req_ctx->mode == VIRTUAL_SE_OP_MODE_SM3) - mode = VIRTUAL_SE_OP_MODE_SM3; - else if (req_ctx->mode < VIRTUAL_SE_OP_MODE_SHA3_256) - mode = req_ctx->mode - VIRTUAL_SE_OP_MODE_SHA256 + 1; - else - mode = req_ctx->mode - VIRTUAL_SE_OP_MODE_SHA256 - 1; - - if (is_last) { - if (req->result) { - memcpy(req->result, - zero_vec[mode].digest, - zero_vec[mode].size); - } else { - dev_err(se_dev->dev, - "Invalid clinet result buffer\n"); - } - } - - return 0; - } - - num_blks = req->nbytes / req_ctx->blk_size; - - if (sg_nents(req->src) > 1) - req_ctx->force_align = true; - - if (req_ctx->force_align == false && num_blks > 0) - ret = tegra_hv_vse_safety_sha_fast_path(req, is_last, process_cur_req); - else - ret = tegra_hv_vse_safety_sha_slow_path(req, is_last, process_cur_req); - - return ret; -} - static int tegra_hv_vse_safety_sha_init(struct ahash_request *req) { struct crypto_ahash *tfm; struct tegra_virtual_se_req_context *req_ctx; struct tegra_virtual_se_sha_context *sha_ctx; struct tegra_virtual_se_dev *se_dev; - uint32_t hash_result_len; uint32_t engine_id; - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - se_dev = g_virtual_se_dev[engine_id]; - if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); + pr_err("%s: SHA request invalid\n", __func__); return -EINVAL; } - /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) - return -ENODEV; - req_ctx = ahash_request_ctx(req); if (!req_ctx) { - dev_err(se_dev->dev, "SHA req_ctx not valid\n"); + pr_err("%s: SHA req_ctx not valid\n", __func__); return -EINVAL; } tfm = crypto_ahash_reqtfm(req); if (!tfm) { - dev_err(se_dev->dev, "SHA transform not valid\n"); + pr_err("%s: SHA transform not valid\n", __func__); return -EINVAL; } sha_ctx = crypto_ahash_ctx(tfm); - req_ctx->digest_size = crypto_ahash_digestsize(tfm); - if (strcmp(crypto_ahash_alg_name(tfm), "sha256-vse") == 0) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA256; - req_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT; - req_ctx->intermediate_digest_size = SHA256_DIGEST_SIZE; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha384-vse") == 0) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA384; - req_ctx->blk_size = - TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1024BIT; - /* - * The intermediate digest size of SHA384 is same as SHA512 - */ - req_ctx->intermediate_digest_size = SHA512_DIGEST_SIZE; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha512-vse") == 0) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA512; - req_ctx->blk_size = - TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1024BIT; - req_ctx->intermediate_digest_size = SHA512_DIGEST_SIZE; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-256-vse") == 0) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_256; - req_ctx->blk_size = - TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT; - req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-384-vse") == 0) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_384; - req_ctx->blk_size = - TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_832BIT; - req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; - } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-512-vse") == 0) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_512; - req_ctx->blk_size = - TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_576BIT; - req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; - } else if (strcmp(crypto_ahash_alg_name(tfm), "shake128-vse") == 0) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SHAKE128; - req_ctx->blk_size = - TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1344BIT; - req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; - req_ctx->digest_size = sha_ctx->digest_size; - } else if (strcmp(crypto_ahash_alg_name(tfm), "shake256-vse") == 0) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SHAKE256; - req_ctx->blk_size = - TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT; - req_ctx->intermediate_digest_size = SHA3_STATE_SIZE; - req_ctx->digest_size = sha_ctx->digest_size; - } else if ((strcmp(crypto_ahash_alg_name(tfm), "sm3-vse") == 0) && - (se_dev->chipdata->sm_supported)) { - req_ctx->mode = VIRTUAL_SE_OP_MODE_SM3; - req_ctx->blk_size = SM3_BLOCK_SIZE; - req_ctx->intermediate_digest_size = SM3_DIGEST_SIZE; - } else { - dev_err(se_dev->dev, "Invalid SHA Mode\n"); - return -EINVAL; - } - - sha_ctx->residual_plaintext = &g_node_dma[sha_ctx->node_id].se_dma_buf[0]; - if (!sha_ctx->residual_plaintext->buf_ptr) { - dev_err(se_dev->dev, "%s res_buf is NULL\n", __func__); - return -ENOMEM; - } - - sha_ctx->plaintext = &g_node_dma[sha_ctx->node_id].se_dma_buf[1]; - if (!sha_ctx->plaintext->buf_ptr) { - dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__); - return -ENOMEM; - } - - if ((req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128) || - (req_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256)) { - hash_result_len = sha_ctx->digest_size; - } else { - hash_result_len = req_ctx->intermediate_digest_size; - } - - sha_ctx->hash_result = &g_node_dma[sha_ctx->node_id].se_dma_buf[2]; - if (!sha_ctx->hash_result->buf_ptr) { - dev_err(se_dev->dev, "%s hash_result is NULL\n", __func__); - return -ENOMEM; - } - - if (hash_result_len > sha_ctx->hash_result->buf_len) { - dev_err(se_dev->dev, "%s hash_result buffer size insufficient\n", __func__); - return -ENOMEM; - } - - req_ctx->total_count = 0; - req_ctx->is_first = true; - req_ctx->residual_bytes = 0; - req_ctx->req_context_initialized = true; - req_ctx->force_align = false; - - return 0; -} - -static void tegra_hv_vse_safety_sha_req_deinit(struct ahash_request *req) -{ - struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); - struct tegra_virtual_se_sha_context *sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - - sha_ctx->residual_plaintext = NULL; - sha_ctx->plaintext = NULL; - sha_ctx->hash_result = NULL; - - req_ctx->req_context_initialized = false; -} - -static int tegra_hv_vse_safety_sha_update(struct ahash_request *req) -{ - struct tegra_virtual_se_dev *se_dev; - struct tegra_virtual_se_req_context *req_ctx; - int ret = 0; - uint32_t engine_id; - struct tegra_virtual_se_sha_context *sha_ctx; - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - req_ctx = ahash_request_ctx(req); engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; se_dev = g_virtual_se_dev[engine_id]; - if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); - return -EINVAL; - } - /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; - req_ctx = ahash_request_ctx(req); - if (!req_ctx->req_context_initialized) { - dev_err(se_dev->dev, - "%s Request ctx not initialized\n", __func__); + if (strcmp(crypto_ahash_alg_name(tfm), "sha256-vse") == 0) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SHA256; + sha_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_512BIT; + sha_ctx->intermediate_digest_size = SHA256_DIGEST_SIZE; + sha_ctx->digest_size = crypto_ahash_digestsize(tfm); + } else if (strcmp(crypto_ahash_alg_name(tfm), "sha384-vse") == 0) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SHA384; + sha_ctx->blk_size = + TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1024BIT; + /* + * The intermediate digest size of SHA384 is same as SHA512 + */ + sha_ctx->intermediate_digest_size = SHA512_DIGEST_SIZE; + sha_ctx->digest_size = crypto_ahash_digestsize(tfm); + } else if (strcmp(crypto_ahash_alg_name(tfm), "sha512-vse") == 0) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SHA512; + sha_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1024BIT; + sha_ctx->intermediate_digest_size = SHA512_DIGEST_SIZE; + sha_ctx->digest_size = crypto_ahash_digestsize(tfm); + } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-256-vse") == 0) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_256; + sha_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT; + sha_ctx->intermediate_digest_size = SHA3_STATE_SIZE; + sha_ctx->digest_size = crypto_ahash_digestsize(tfm); + } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-384-vse") == 0) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_384; + sha_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_832BIT; + sha_ctx->intermediate_digest_size = SHA3_STATE_SIZE; + sha_ctx->digest_size = crypto_ahash_digestsize(tfm); + } else if (strcmp(crypto_ahash_alg_name(tfm), "sha3-512-vse") == 0) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SHA3_512; + sha_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_576BIT; + sha_ctx->intermediate_digest_size = SHA3_STATE_SIZE; + sha_ctx->digest_size = crypto_ahash_digestsize(tfm); + } else if (strcmp(crypto_ahash_alg_name(tfm), "shake128-vse") == 0) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SHAKE128; + sha_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1344BIT; + sha_ctx->intermediate_digest_size = SHA3_STATE_SIZE; + } else if (strcmp(crypto_ahash_alg_name(tfm), "shake256-vse") == 0) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SHAKE256; + sha_ctx->blk_size = TEGRA_VIRTUAL_SE_SHA_HASH_BLOCK_SIZE_1088BIT; + sha_ctx->intermediate_digest_size = SHA3_STATE_SIZE; + } else if ((strcmp(crypto_ahash_alg_name(tfm), "sm3-vse") == 0) && + (se_dev->chipdata->sm_supported)) { + sha_ctx->mode = VIRTUAL_SE_OP_MODE_SM3; + sha_ctx->blk_size = SM3_BLOCK_SIZE; + sha_ctx->intermediate_digest_size = SM3_DIGEST_SIZE; + sha_ctx->digest_size = crypto_ahash_digestsize(tfm); + } else { + dev_err(se_dev->dev, "Invalid SHA Mode\n"); return -EINVAL; } - ret = tegra_hv_vse_safety_sha_op(req, false, false); + sha_ctx->plaintext = &g_node_dma[sha_ctx->node_id].se_dma_buf[0]; + if (!sha_ctx->plaintext->buf_ptr) { + dev_err(se_dev->dev, "%s src_buf is NULL\n", __func__); + return -ENOMEM; + } + + sha_ctx->hash_result = &g_node_dma[sha_ctx->node_id].se_dma_buf[1]; + if (!sha_ctx->hash_result->buf_ptr) { + dev_err(se_dev->dev, "%s hash_result is NULL\n", __func__); + return -ENOMEM; + } + + if (sha_ctx->digest_size > sha_ctx->hash_result->buf_len) { + dev_err(se_dev->dev, "%s hash_result buffer size insufficient\n", __func__); + return -ENOMEM; + } + + req_ctx->req_context_initialized = true; + + return 0; +} + +static int tegra_hv_vse_safety_sha_op(struct ahash_request *req, bool is_last) +{ + struct tegra_virtual_se_sha_context *sha_ctx = + crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct tegra_virtual_se_dev *se_dev; + struct tegra_virtual_se_ivc_hdr_t *ivc_hdr; + struct tegra_virtual_se_ivc_tx_msg_t *ivc_tx; + struct tegra_virtual_se_ivc_msg_t ivc_req_msg = {0}; + union tegra_virtual_se_sha_args *psha; + struct tegra_hv_ivc_cookie *pivck = g_crypto_to_ivc_map[sha_ctx->node_id].ivck; + struct tegra_vse_priv_data priv = {0}; + struct tegra_vse_tag *priv_data_ptr; + u64 msg_len = 0, temp_len = 0; + uint32_t engine_id; + int err = 0; + + void *src_buf = NULL; + dma_addr_t src_buf_addr; + void *hash_buf = NULL; + dma_addr_t hash_buf_addr; + + engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; + se_dev = g_virtual_se_dev[engine_id]; + + if (sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || + sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256) { + if (sha_ctx->digest_size == 0) { + dev_info(se_dev->dev, "digest size is 0\n"); + return 0; + } + } + + if (req->nbytes > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN) { + dev_err(se_dev->dev, "%s: input buffer size is invalid\n", __func__); + return -EINVAL; + } + + if ((!is_last) && (req->nbytes % sha_ctx->blk_size != 0)) { + dev_err(se_dev->dev, "%s: non-last buffer size is invalid\n", __func__); + return -EINVAL; + } + + src_buf = sha_ctx->plaintext->buf_ptr; + src_buf_addr = sha_ctx->plaintext->buf_iova; + + hash_buf = sha_ctx->hash_result->buf_ptr; + hash_buf_addr = sha_ctx->hash_result->buf_iova; + + g_crypto_to_ivc_map[sha_ctx->node_id].vse_thread_start = true; + + ivc_tx = &ivc_req_msg.tx[0]; + ivc_hdr = &ivc_req_msg.ivc_hdr; + ivc_hdr->num_reqs = 1; + ivc_hdr->header_magic[0] = 'N'; + ivc_hdr->header_magic[1] = 'V'; + ivc_hdr->header_magic[2] = 'D'; + ivc_hdr->header_magic[3] = 'A'; + ivc_hdr->engine = VIRTUAL_SE_SHA; + ivc_tx->cmd = TEGRA_VIRTUAL_SE_CMD_SHA_HASH; + + psha = &(ivc_tx->sha); + psha->op_hash.mode = sha_ctx->mode; + psha->op_hash.msg_total_length[2] = 0; + psha->op_hash.msg_total_length[3] = 0; + psha->op_hash.msg_left_length[2] = 0; + psha->op_hash.msg_left_length[3] = 0; + psha->op_hash.hash_length = sha_ctx->digest_size; + psha->op_hash.dst = hash_buf_addr; + + if (!sha_ctx->is_first) + memcpy(psha->op_hash.hash, sha_ctx->intermediate_digest, + sha_ctx->intermediate_digest_size); + + msg_len = req->nbytes; + sg_copy_to_buffer(req->src, (u32)sg_nents(req->src), src_buf, msg_len); + + if (is_last == true && + (sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE128 || + sha_ctx->mode == VIRTUAL_SE_OP_MODE_SHAKE256)) { + ((uint8_t *)src_buf)[msg_len] = 0xff; + msg_len++; + sha_ctx->total_count++; + } + + temp_len = msg_len; + + if (is_last) { + /* Set msg left length equal to input buffer size */ + psha->op_hash.msg_left_length[0] = msg_len & 0xFFFFFFFF; + psha->op_hash.msg_left_length[1] = msg_len >> 32; + + /* Set msg total length equal to sum of all input buffer size */ + psha->op_hash.msg_total_length[0] = sha_ctx->total_count & 0xFFFFFFFF; + psha->op_hash.msg_total_length[1] = sha_ctx->total_count >> 32; + } else { + /* Set msg left length greater than input buffer size */ + temp_len += 8; + psha->op_hash.msg_left_length[0] = temp_len & 0xFFFFFFFF; + psha->op_hash.msg_left_length[1] = temp_len >> 32; + + /* Set msg total length greater than msg left length for non-first request */ + if (!sha_ctx->is_first) + temp_len += 8; + + psha->op_hash.msg_total_length[0] = temp_len & 0xFFFFFFFF; + psha->op_hash.msg_total_length[1] = temp_len >> 32; + } + + psha->op_hash.src_addr = src_buf_addr; + psha->op_hash.src_buf_size = msg_len; + + priv_data_ptr = (struct tegra_vse_tag *)ivc_req_msg.ivc_hdr.tag; + priv_data_ptr->priv_data = (unsigned int *)&priv; + priv.cmd = VIRTUAL_SE_PROCESS; + priv.se_dev = se_dev; + init_completion(&priv.alg_complete); + + err = tegra_hv_vse_safety_send_ivc_wait(se_dev, pivck, &priv, &ivc_req_msg, + sizeof(struct tegra_virtual_se_ivc_msg_t), sha_ctx->node_id); + if (err) { + dev_err(se_dev->dev, "failed to send data over ivc err %d\n", err); + goto exit; + } + + if (priv.rx_status != 0) { + err = status_to_errno(priv.rx_status); + dev_err(se_dev->dev, "%s: SE server returned error %u\n", + __func__, priv.rx_status); + goto exit; + } + + if (is_last) + memcpy(req->result, hash_buf, sha_ctx->digest_size); + else + memcpy(sha_ctx->intermediate_digest, hash_buf, sha_ctx->intermediate_digest_size); + +exit: + return err; +} + +static int tegra_hv_vse_safety_sha_update(struct ahash_request *req) +{ + struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); + struct tegra_virtual_se_sha_context *sha_ctx; + struct tegra_virtual_se_dev *se_dev; + uint32_t engine_id; + int ret = 0; + + if (!req) { + pr_err("%s SHA request not valid\n", __func__); + return -EINVAL; + } + + if (!req_ctx->req_context_initialized) { + pr_err("%s Request ctx not initialized\n", __func__); + return -EINVAL; + } + + sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + if (!sha_ctx) { + pr_err("%s SHA req_ctx not valid\n", __func__); + return -EINVAL; + } + + engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; + se_dev = g_virtual_se_dev[engine_id]; + + /* Return error if engine is in suspended state */ + if (atomic_read(&se_dev->se_suspended)) + return -ENODEV; + + ret = tegra_hv_vse_safety_sha_op(req, false); if (ret) dev_err(se_dev->dev, "tegra_se_sha_update failed - %d\n", ret); @@ -1816,116 +1367,54 @@ static int tegra_hv_vse_safety_sha_update(struct ahash_request *req) static int tegra_hv_vse_safety_sha_finup(struct ahash_request *req) { + struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); + struct tegra_virtual_se_sha_context *sha_ctx = NULL; struct tegra_virtual_se_dev *se_dev; - struct tegra_virtual_se_req_context *req_ctx; - int ret = 0; uint32_t engine_id; - struct tegra_virtual_se_sha_context *sha_ctx; - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - req_ctx = ahash_request_ctx(req); - engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - se_dev = g_virtual_se_dev[engine_id]; + int ret = 0; if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); + pr_err("%s SHA request not valid\n", __func__); return -EINVAL; } + sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + if (!sha_ctx) { + pr_err("%s SHA req_ctx not valid\n", __func__); + return -EINVAL; + } + + if (!req_ctx->req_context_initialized) { + pr_err("%s Request ctx not initialized\n", __func__); + return -EINVAL; + } + + engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; + se_dev = g_virtual_se_dev[engine_id]; + /* Return error if engine is in suspended state */ if (atomic_read(&se_dev->se_suspended)) return -ENODEV; - req_ctx = ahash_request_ctx(req); - if (!req_ctx->req_context_initialized) { - dev_err(se_dev->dev, - "%s Request ctx not initialized\n", __func__); - return -EINVAL; - } - - ret = tegra_hv_vse_safety_sha_op(req, true, true); + ret = tegra_hv_vse_safety_sha_op(req, true); if (ret) dev_err(se_dev->dev, "tegra_se_sha_finup failed - %d\n", ret); - tegra_hv_vse_safety_sha_req_deinit(req); + req_ctx->req_context_initialized = false; return ret; } static int tegra_hv_vse_safety_sha_final(struct ahash_request *req) { - struct tegra_virtual_se_dev *se_dev; - struct tegra_virtual_se_req_context *req_ctx; - int ret = 0; - uint32_t engine_id; - struct tegra_virtual_se_sha_context *sha_ctx; - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - req_ctx = ahash_request_ctx(req); - engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - se_dev = g_virtual_se_dev[engine_id]; - - if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); - return -EINVAL; - } - - /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) - return -ENODEV; - - req_ctx = ahash_request_ctx(req); - if (!req_ctx->req_context_initialized) { - dev_err(se_dev->dev, - "%s Request ctx not initialized\n", __func__); - return -EINVAL; - } - - /* Do not process data in given request */ - ret = tegra_hv_vse_safety_sha_op(req, true, false); - if (ret) - dev_err(se_dev->dev, "tegra_se_sha_final failed - %d\n", ret); - - tegra_hv_vse_safety_sha_req_deinit(req); - - return ret; + // Unsupported + return -EINVAL; } static int tegra_hv_vse_safety_sha_digest(struct ahash_request *req) { - struct tegra_virtual_se_dev *se_dev; - int ret = 0; - uint32_t engine_id; - struct tegra_virtual_se_sha_context *sha_ctx; - struct tegra_virtual_se_req_context *req_ctx; - - sha_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - req_ctx = ahash_request_ctx(req); - engine_id = g_crypto_to_ivc_map[sha_ctx->node_id].se_engine; - se_dev = g_virtual_se_dev[engine_id]; - - if (!req) { - dev_err(se_dev->dev, "SHA request not valid\n"); - return -EINVAL; - } - - /* Return error if engine is in suspended state */ - if (atomic_read(&se_dev->se_suspended)) - return -ENODEV; - - ret = tegra_hv_vse_safety_sha_init(req); - if (ret) { - dev_err(se_dev->dev, "%s init failed - %d\n", __func__, ret); - return ret; - } - - ret = tegra_hv_vse_safety_sha_op(req, true, true); - if (ret) - dev_err(se_dev->dev, "tegra_se_sha_digest failed - %d\n", ret); - - tegra_hv_vse_safety_sha_req_deinit(req); - - return ret; + // Unsupported + return -EINVAL; } static int tegra_hv_vse_safety_hmac_sha_setkey(struct crypto_ahash *tfm, const u8 *key, @@ -2008,8 +1497,6 @@ static int tegra_hv_vse_safety_hmac_sha_init(struct ahash_request *req) return -EINVAL; } - req_ctx->total_count = 0; - req_ctx->is_first = true; req_ctx->req_context_initialized = true; return 0; @@ -2017,7 +1504,6 @@ static int tegra_hv_vse_safety_hmac_sha_init(struct ahash_request *req) static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, bool is_last) { - struct tegra_virtual_se_req_context *req_ctx = ahash_request_ctx(req); struct tegra_virtual_se_hmac_sha_context *hmac_ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_vse_hmac_sha_req_data *hmac_req_data; @@ -2109,7 +1595,6 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, bool is msg_len = req->nbytes; temp_len = msg_len; - req_ctx->total_count += msg_len; ivc_tx = &ivc_req_msg.tx[0]; ivc_hdr = &ivc_req_msg.ivc_hdr; @@ -2139,8 +1624,8 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, bool is phmac->msg_left_length[1] = msg_len >> 32; /* Set msg total length equal to sum of all input buffer size */ - phmac->msg_total_length[0] = req_ctx->total_count & 0xFFFFFFFF; - phmac->msg_total_length[1] = req_ctx->total_count >> 32; + phmac->msg_total_length[0] = hmac_ctx->total_count & 0xFFFFFFFF; + phmac->msg_total_length[1] = hmac_ctx->total_count >> 32; } else { /* Set msg left length greater than input buffer size */ @@ -2149,14 +1634,11 @@ static int tegra_hv_vse_safety_hmac_sha_sv_op(struct ahash_request *req, bool is phmac->msg_left_length[1] = temp_len >> 32; /* Set msg total length greater than msg left length for non-first request */ - if (req_ctx->is_first) - req_ctx->is_first = false; - else + if (!hmac_ctx->is_first) temp_len += 8; phmac->msg_total_length[0] = temp_len & 0xFFFFFFFF; phmac->msg_total_length[1] = temp_len >> 32; - } if (se_dev->chipdata->hmac_verify_hw_support == false) { @@ -3793,12 +3275,11 @@ static void tegra_vse_aes_gcm_exit(struct crypto_aead *tfm) /* nothing to do as user unloads the key manually with tzvault*/ } -static int tegra_vse_aes_gcm_check_params(struct aead_request *req, - bool encrypt) +static int tegra_vse_aes_gcm_check_params(struct aead_request *req, bool encrypt, bool is_hw_req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct tegra_virtual_se_aes_context *aes_ctx = crypto_aead_ctx(tfm); - uint32_t cryptlen; + uint32_t cryptlen = 0U; struct tegra_virtual_se_dev *se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[aes_ctx->node_id].se_engine]; @@ -3810,25 +3291,25 @@ static int tegra_vse_aes_gcm_check_params(struct aead_request *req, return -EINVAL; } - if (!encrypt) { if (req->cryptlen < aes_ctx->authsize) { dev_err(se_dev->dev, "%s: gcm_dec cryptlen is invalid\n", __func__); return -EINVAL; } - cryptlen = req->cryptlen - aes_ctx->authsize; - if (cryptlen > TEGRA_VIRTUAL_SE_MAX_GCMDEC_BUFLEN) { - dev_err(se_dev->dev, "%s: dec srcbuf len is invalid %d\n", __func__, - cryptlen); - return -EINVAL; - } - } else { cryptlen = req->cryptlen; - if (cryptlen > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN) { - dev_err(se_dev->dev, "%s: enc srcbuf len is invalid %d\n", __func__, - cryptlen); + } + + if (cryptlen > TEGRA_VIRTUAL_SE_MAX_SUPPORTED_BUFLEN) { + dev_err(se_dev->dev, "%s: enc srcbuf len is invalid %d\n", __func__, cryptlen); + return -EINVAL; + } + + if ((!is_hw_req) && (!encrypt)) { + if (cryptlen > TEGRA_VIRTUAL_SE_MAX_GCMDEC_BUFLEN) { + dev_err(se_dev->dev, "%s: dec srcbuf len is invalid %d\n", __func__, + cryptlen); return -EINVAL; } } @@ -3869,7 +3350,7 @@ static int tegra_vse_aes_gcm_enc_dec(struct aead_request *req, bool encrypt) dma_addr_t src_buf_addr; dma_addr_t tag_buf_addr; - err = tegra_vse_aes_gcm_check_params(req, encrypt); + err = tegra_vse_aes_gcm_check_params(req, encrypt, false); if (err != 0) goto free_exit; @@ -4098,7 +3579,7 @@ static int tegra_vse_aes_gcm_enc_dec_hw_support(struct aead_request *req, bool e dma_addr_t tag_buf_addr; dma_addr_t mac_buf_addr; - err = tegra_vse_aes_gcm_check_params(req, encrypt); + err = tegra_vse_aes_gcm_check_params(req, encrypt, true); if (err != 0) goto free_exit; @@ -5837,34 +5318,28 @@ static int tegra_hv_vse_allocate_se_dma_bufs(struct tegra_vse_node_dma *node_dma /* * For SHA algs, the worst case requirement for SHAKE128/SHAKE256: * 1. plaintext buffer(requires up to max limit specified in DT) - * 2. residual plaintext buffer(requires up to max limit specified in DT) - * 3. digest buffer(support a maximum digest size of 1024 bytes) + * 2. digest buffer(support a maximum digest size of 1024 bytes for SHAKE) */ buf_sizes[0] = ivc_map->max_buffer_size; - buf_sizes[1] = ivc_map->max_buffer_size; - buf_sizes[2] = 1024U; + buf_sizes[1] = 1024U; break; case VIRTUAL_GCSE1_SHA: /* * For SHA algs, the worst case requirement for SHAKE128/SHAKE256: * 1. plaintext buffer(requires up to max limit specified in DT) - * 2. residual plaintext buffer(requires up to max limit specified in DT) - * 3. digest buffer(support a maximum digest size of 1024 bytes) + * 2. digest buffer(support a maximum digest size of 1024 bytes for SHAKE) */ buf_sizes[0] = ivc_map->max_buffer_size; - buf_sizes[1] = ivc_map->max_buffer_size; - buf_sizes[2] = 1024U; + buf_sizes[1] = 1024U; break; case VIRTUAL_GCSE2_SHA: /* * For SHA algs, the worst case requirement for SHAKE128/SHAKE256: * 1. plaintext buffer(requires up to max limit specified in DT) - * 2. residual plaintext buffer(requires up to max limit specified in DT) - * 3. digest buffer(support a maximum digest size of 1024 bytes) + * 2. digest buffer(support a maximum digest size of 1024 bytes for SHAKE) */ buf_sizes[0] = ivc_map->max_buffer_size; - buf_sizes[1] = ivc_map->max_buffer_size; - buf_sizes[2] = 1024U; + buf_sizes[1] = 1024U; break; default: err = 0; diff --git a/drivers/crypto/tegra-hv-vse.h b/drivers/crypto/tegra-hv-vse.h index bf40e5fb..4994af2d 100644 --- a/drivers/crypto/tegra-hv-vse.h +++ b/drivers/crypto/tegra-hv-vse.h @@ -166,13 +166,16 @@ struct tegra_virtual_se_sha_context { /* Security Engine device */ struct tegra_virtual_se_dev *se_dev; /* SHA operation mode */ - u32 op_mode; + uint32_t mode; + u32 blk_size; unsigned int digest_size; - u8 mode; + uint8_t *intermediate_digest; + unsigned int intermediate_digest_size; + u64 total_count; /* Total bytes in all the requests */ + bool is_first; /*Crypto dev instance*/ uint32_t node_id; const struct tegra_vse_dma_buf *plaintext; - const struct tegra_vse_dma_buf *residual_plaintext; const struct tegra_vse_dma_buf *hash_result; }; @@ -185,6 +188,8 @@ struct tegra_virtual_se_hmac_sha_context { unsigned int digest_size; /* Total bytes in all the requests */ u64 total_count; + /* Represents first block */ + bool is_first; bool is_key_slot_allocated; /* Keyslot for HMAC-SHA request */ u8 aes_keyslot[KEYSLOT_SIZE_BYTES]; @@ -199,15 +204,7 @@ struct tegra_virtual_se_hmac_sha_context { struct tegra_virtual_se_req_context { /* Security Engine device */ struct tegra_virtual_se_dev *se_dev; - unsigned int digest_size; - unsigned int intermediate_digest_size; - u8 mode; /* SHA operation mode */ - u64 total_count; /* Total bytes in all the requests */ - u32 residual_bytes; /* Residual byte count */ - u32 blk_size; /* SHA block size */ - bool is_first; /* Represents first block */ bool req_context_initialized; /* Mark initialization status */ - bool force_align; /* Enforce buffer alignment */ /*Crypto dev instance*/ uint32_t node_id; }; diff --git a/drivers/crypto/tegra-nvvse-cryptodev.c b/drivers/crypto/tegra-nvvse-cryptodev.c index 910c38f7..4610ca01 100644 --- a/drivers/crypto/tegra-nvvse-cryptodev.c +++ b/drivers/crypto/tegra-nvvse-cryptodev.c @@ -66,7 +66,7 @@ struct nvvse_devnode { struct miscdevice *g_misc_devices; - bool sha_init_done; + struct mutex lock; } nvvse_devnode[MAX_NUMBER_MISC_DEVICES]; static struct tegra_nvvse_get_ivc_db ivc_database; @@ -96,21 +96,20 @@ typedef enum { } sha_op_state; struct crypto_sha_state { - uint32_t sha_type; - uint32_t digest_size; - uint64_t total_bytes; - uint64_t remaining_bytes; uint8_t *in_buf; struct tnvvse_crypto_completion sha_complete; struct ahash_request *req; struct crypto_ahash *tfm; char *result_buff; - sha_op_state sha_done_success; + bool sha_init_done; + uint64_t sha_total_msg_length; + char *sha_intermediate_digest; + bool hmac_sha_init_done; + uint64_t hmac_sha_total_msg_length; }; /* Tegra NVVSE crypt context */ struct tnvvse_crypto_ctx { - struct mutex lock; struct crypto_sha_state sha_state; uint8_t intermediate_counter[TEGRA_NVVSE_AES_IV_LEN]; char *rng_buff; @@ -229,258 +228,281 @@ fail: return status; } -static int tnvvse_crypto_sha_init(struct tnvvse_crypto_ctx *ctx, - struct tegra_nvvse_sha_init_ctl *init_ctl) +static int tnvvse_crypto_validate_sha_update_req(struct tnvvse_crypto_ctx *ctx, + struct tegra_nvvse_sha_update_ctl *sha_update_ctl) { struct crypto_sha_state *sha_state = &ctx->sha_state; - struct tegra_virtual_se_sha_context *sha_ctx; - struct crypto_ahash *tfm; - struct ahash_request *req; - const char *driver_name; - int ret = -ENOMEM; - char *result_buff = NULL; + enum tegra_nvvse_sha_type sha_type = sha_update_ctl->sha_type; + int32_t ret = 0; - if (nvvse_devnode[ctx->node_id].sha_init_done) { - pr_err("%s: Sha init already done for this node_id %u\n", __func__, ctx->node_id); - return -EAGAIN; - } - - if (init_ctl->sha_type < TEGRA_NVVSE_SHA_TYPE_SHA256 || - init_ctl->sha_type >= TEGRA_NVVSE_SHA_TYPE_MAX) { - pr_err("%s(): SHA Type requested %d is not supported\n", - __func__, init_ctl->sha_type); - return -EINVAL; - } - - tfm = crypto_alloc_ahash(sha_alg_names[init_ctl->sha_type], 0, 0); - if (IS_ERR(tfm)) { - pr_err("%s(): Failed to load transform for %s:%ld\n", - __func__, sha_alg_names[init_ctl->sha_type], PTR_ERR(tfm)); - ret = PTR_ERR(tfm); - goto out; - } - - sha_ctx = crypto_ahash_ctx(tfm); - sha_ctx->node_id = ctx->node_id; - - driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));; - if (driver_name == NULL) { - pr_err("%s(): Failed to get driver name\n", __func__); - goto free_tfm; - } - pr_debug("%s(): Algo name %s, driver name %s\n", - __func__, sha_alg_names[init_ctl->sha_type], driver_name); - - req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req) { - pr_err("%s(): Failed to allocate request for %s\n", - __func__, sha_alg_names[init_ctl->sha_type]); - goto free_tfm; - } - - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tnvvse_crypto_complete, &sha_state->sha_complete); - - init_completion(&sha_state->sha_complete.restart); - sha_state->sha_complete.req_err = 0; - - /* Shake128/Shake256 have variable digest size */ - if ((init_ctl->sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE128) || - (init_ctl->sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE256)) { - sha_ctx->digest_size = init_ctl->digest_size; - if (init_ctl->digest_size > NVVSE_MAX_ALLOCATED_SHA_RESULT_BUFF_SIZE) { - result_buff = kzalloc(init_ctl->digest_size, GFP_KERNEL); - if (!result_buff) { - ret = -ENOMEM; - goto free_req; - } + if (sha_update_ctl->init_only != 0U) { + if (sha_state->sha_init_done != 0U) { + pr_err("%s(): SHA init is already done\n", __func__); + ret = -EAGAIN; + goto exit; + } else { + /* + * Return success as other parameters don't need not be validated for + * init only request. + */ + ret = 0; + goto exit; } } - ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_init(req)); - if (ret) { - pr_err("%s(): Failed to ahash_init for %s: ret=%d\n", - __func__, sha_alg_names[init_ctl->sha_type], ret); - goto free_result_buf; + if ((sha_update_ctl->is_first != 0U) && (sha_state->sha_total_msg_length > 0U)) { + pr_err("%s(): SHA First request is already received\n", __func__); + ret = -EINVAL; + goto exit; } - sha_state->req = req; - sha_state->tfm = tfm; - sha_state->result_buff = (result_buff) ? result_buff : ctx->sha_result; - sha_state->sha_type = init_ctl->sha_type; - sha_state->total_bytes = init_ctl->total_msg_size; - sha_state->digest_size = init_ctl->digest_size; - sha_state->remaining_bytes = init_ctl->total_msg_size; - sha_state->sha_done_success = SHA_OP_INIT; - nvvse_devnode[ctx->node_id].sha_init_done = true; - memset(sha_state->result_buff , 0, 64); - ret = 0; - goto out; + if ((sha_state->sha_init_done == 0U) && (sha_update_ctl->is_first == 0U)) { + pr_err("%s(): SHA First req is not yet received\n", __func__); + ret = -EINVAL; + goto exit; + } -free_result_buf: - kfree(result_buff); -free_req: - ahash_request_free(req); -free_tfm: - crypto_free_ahash(tfm); -out: + if ((sha_type < TEGRA_NVVSE_SHA_TYPE_SHA256) || (sha_type >= TEGRA_NVVSE_SHA_TYPE_MAX)) { + pr_err("%s(): SHA Type requested %d is not supported\n", __func__, sha_type); + ret = -EINVAL; + goto exit; + } + + if (sha_update_ctl->input_buffer_size == 0U) { + if (sha_update_ctl->is_last == 0U) { + pr_err("%s(): zero length non-last request is not supported\n", __func__); + ret = -EINVAL; + goto exit; + } + } else { + if (sha_update_ctl->in_buff == NULL) { + pr_err("%s(): input buffer address is NULL for non-zero len req\n", + __func__); + ret = -EINVAL; + goto exit; + } + } + + if (sha_update_ctl->input_buffer_size > ivc_database.max_buffer_size[ctx->node_id]) { + pr_err("%s(): Msg size is greater than supported size of %d Bytes\n", __func__, + ivc_database.max_buffer_size[ctx->node_id]); + ret = -EINVAL; + goto exit; + } + +exit: return ret; } static int tnvvse_crypto_sha_update(struct tnvvse_crypto_ctx *ctx, - struct tegra_nvvse_sha_update_ctl *update_ctl) + struct tegra_nvvse_sha_update_ctl *sha_update_ctl) { struct crypto_sha_state *sha_state = &ctx->sha_state; - char *result_buff; - struct ahash_request *req; - char *input_buffer = update_ctl->in_buff; + struct tegra_virtual_se_sha_context *sha_ctx; + struct crypto_ahash *tfm = NULL; + struct ahash_request *req = NULL; + struct tnvvse_crypto_completion sha_complete; struct scatterlist sg; - int ret = 0, buffer_size; - - if (update_ctl->input_buffer_size < 0) { - pr_err("%s: Invalid Msg size of %d Bytes\n", __func__, update_ctl->input_buffer_size); - ret = -EINVAL; - goto stop_sha; - } - if (update_ctl->input_buffer_size > ivc_database.max_buffer_size[ctx->node_id]) { - pr_err("%s: Msg size is greater than supported size of %d Bytes\n", __func__, - ivc_database.max_buffer_size[ctx->node_id]); - ret = -EINVAL; - goto stop_sha; - } - - result_buff = sha_state->result_buff; - req = sha_state->req; - - /* allocate buffer size as 1 to perform SHA operation - * if SHA buffer size passed is zero - */ - if (update_ctl->input_buffer_size == 0) - buffer_size = 1; - else - buffer_size = update_ctl->input_buffer_size; - - sha_state->in_buf = krealloc(sha_state->in_buf, buffer_size, GFP_KERNEL); - - if (sha_state->in_buf == NULL) { - ret = -ENOMEM; - goto stop_sha; - } - memset(sha_state->in_buf, 0, buffer_size); - - /* copy input buffer */ - if (copy_from_user((void *)sha_state->in_buf, input_buffer, update_ctl->input_buffer_size)) { - pr_err("%s(): Failed to copy_from_user input data\n", __func__); - ret = -EFAULT; - goto stop_sha; - } - - sg_init_one(&sg, sha_state->in_buf, update_ctl->input_buffer_size); - ahash_request_set_crypt(req, &sg, result_buff, update_ctl->input_buffer_size); - ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_update(req)); - if (ret) { - pr_err("%s(): Failed to ahash_update for %s: %d\n", - __func__, sha_alg_names[sha_state->sha_type], ret); - goto stop_sha; - } - - if (update_ctl->last_buffer) { - ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_final(req)); - if (ret) { - pr_err("%s(): Failed to ahash_final for %s: %d\n", - __func__, sha_alg_names[sha_state->sha_type], ret); - goto stop_sha; - } - sha_state->sha_done_success = SHA_OP_SUCCESS; - } - - goto done; - -stop_sha: - sha_state->sha_done_success = SHA_OP_FAIL; - -done: - return ret; -} - -static int tnvvse_crypto_sha_final(struct tnvvse_crypto_ctx *ctx, - struct tegra_nvvse_sha_final_ctl *final_ctl) -{ - struct crypto_sha_state *sha_state = &ctx->sha_state; - struct crypto_ahash *tfm = sha_state->tfm; - struct scatterlist sg; - struct ahash_request *req; - unsigned long size = 0; - char *result_buff; + uint8_t *in_buf = NULL; + char *result_buff = NULL; + enum tegra_nvvse_sha_type sha_type; + uint32_t in_sz, in_buf_size; int ret = -ENOMEM; - if (sha_state->sha_done_success == SHA_OP_INIT) { - result_buff = sha_state->result_buff; - req = sha_state->req; + sha_type = sha_update_ctl->sha_type; + in_sz = sha_update_ctl->input_buffer_size; - sg_init_one(&sg, sha_state->in_buf, size); - ahash_request_set_crypt(req, &sg, result_buff, size); - - ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_final(req)); - if (ret) { - pr_err("%s(): Failed to ahash_final for %s: %d\n", - __func__, sha_alg_names[sha_state->sha_type], ret); - } - - pr_err("%s(): SHA is not completed successfully\n", __func__); - ret = -EFAULT; - goto stop_sha; + if (sha_update_ctl->do_reset != 0U) { + /* Force reset SHA state and return */ + sha_state->sha_init_done = 0U; + sha_state->sha_total_msg_length = 0U; + ret = 0; + goto exit; } - if (sha_state->sha_done_success == SHA_OP_FAIL) { - pr_err("%s(): SHA is either aborted or not initialized\n", __func__); - ret = -EFAULT; - goto stop_sha; + ret = tnvvse_crypto_validate_sha_update_req(ctx, sha_update_ctl); + if (ret != 0) + goto exit; + + if (sha_update_ctl->init_only != 0U) { + /* Only set state as SHA init done and return */ + sha_state->sha_init_done = 1U; + ret = 0; + goto exit; + } + + if (sha_update_ctl->is_first != 0U) + sha_state->sha_init_done = 1U; + + if (in_sz == 0U) + /** + * Need to pass non-zero buffer size to kzalloc so that a + * valid ptr is passed to sg_init_one API in debug build + */ + in_buf_size = 1U; + else + in_buf_size = in_sz; + + in_buf = kzalloc(in_buf_size, GFP_KERNEL); + if (in_buf == NULL) { + ret = -ENOMEM; + goto exit; } /* Shake128/Shake256 have variable digest size */ - if ((sha_state->sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE128) || - (sha_state->sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE256)) { - ret = copy_to_user((void __user *)final_ctl->digest_buffer, - (const void *)sha_state->result_buff, - final_ctl->digest_size); + if ((sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE128) + || (sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE256)) { + if (sha_update_ctl->digest_size > NVVSE_MAX_ALLOCATED_SHA_RESULT_BUFF_SIZE) { + result_buff = kzalloc(sha_update_ctl->digest_size, GFP_KERNEL); + if (!result_buff) { + ret = -ENOMEM; + goto free_buf; + } + } + } + + if (!result_buff) + result_buff = ctx->sha_result; + + tfm = crypto_alloc_ahash(sha_alg_names[sha_type], 0, 0); + if (IS_ERR(tfm)) { + pr_err("%s(): Failed to load transform for %s:%ld\n", + __func__, sha_alg_names[sha_type], PTR_ERR(tfm)); + ret = PTR_ERR(tfm); + goto free_buf; + } + crypto_ahash_clear_flags(tfm, ~0U); + + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + crypto_free_ahash(tfm); + pr_err("%s(): Failed to allocate request\n", __func__); + ret = -ENOMEM; + goto free_buf; + } + + init_completion(&sha_complete.restart); + sha_complete.req_err = 0; + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tnvvse_crypto_complete, &sha_complete); + + /* copy input buffer */ + ret = copy_from_user(in_buf, sha_update_ctl->in_buff, in_sz); + if (ret) { + pr_err("%s(): Failed to copy user input data: %d\n", __func__, ret); + goto free_tfm; + } + sha_state->sha_total_msg_length += in_sz; + + /* Initialize SHA ctx */ + sha_ctx = crypto_ahash_ctx(tfm); + sha_ctx->node_id = ctx->node_id; + sha_ctx->digest_size = sha_update_ctl->digest_size; + sha_ctx->total_count = sha_state->sha_total_msg_length; + sha_ctx->intermediate_digest = sha_state->sha_intermediate_digest; + + if (sha_state->sha_total_msg_length == in_sz) + sha_ctx->is_first = true; + else + sha_ctx->is_first = false; + + ret = wait_async_op(&sha_complete, crypto_ahash_init(req)); + if (ret) { + pr_err("%s(): Failed to initialize ahash: %d\n", __func__, ret); + goto free_tfm; + } + + sg_init_one(&sg, in_buf, in_sz); + ahash_request_set_crypt(req, &sg, result_buff, in_sz); + + if (sha_update_ctl->is_last == 0) { + ret = wait_async_op(&sha_complete, crypto_ahash_update(req)); + if (ret) { + pr_err("%s(): Failed to ahash_update: %d\n", __func__, ret); + goto free_tfm; + } } else { - if (final_ctl->digest_size != crypto_ahash_digestsize(tfm)) { - pr_err("%s(): digest size not matching req %d and calculated %d for %s\n", - __func__, final_ctl->digest_size, crypto_ahash_digestsize(tfm), - sha_alg_names[sha_state->sha_type]); - ret = -EINVAL; - goto stop_sha; + ret = wait_async_op(&sha_complete, crypto_ahash_finup(req)); + if (ret) { + pr_err("%s(): Failed to ahash_finup: %d\n", __func__, ret); + goto free_tfm; } - ret = copy_to_user((void __user *)final_ctl->digest_buffer, - (const void *)sha_state->result_buff, - crypto_ahash_digestsize(tfm)); - } - if (ret) { - pr_err("%s(): Failed to copy_to_user for %s: %d\n", - __func__, sha_alg_names[sha_state->sha_type], ret); + /* Shake128/Shake256 have variable digest size */ + if ((sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE128) + || (sha_type == TEGRA_NVVSE_SHA_TYPE_SHAKE256)) { + ret = copy_to_user((void __user *)sha_update_ctl->digest_buffer, + (const void *)result_buff, + sha_update_ctl->digest_size); + } else { + if (sha_update_ctl->digest_size != crypto_ahash_digestsize(tfm)) { + pr_err("%s(): %s: input digest size of %d is invalid\n", + __func__, sha_alg_names[sha_type], + sha_update_ctl->digest_size); + ret = -EINVAL; + goto free_tfm; + } + + ret = copy_to_user((void __user *)sha_update_ctl->digest_buffer, + (const void *)result_buff, + crypto_ahash_digestsize(tfm)); + } + if (ret) { + pr_err("%s(): Failed to copy_to_user for %s: %d\n", __func__, + sha_alg_names[sha_type], ret); + goto free_tfm; + } + + /* Reset sha state */ + sha_state->sha_init_done = 0; + sha_state->sha_total_msg_length = 0; } -stop_sha: - if (sha_state->in_buf) { - kfree(sha_state->in_buf); - sha_state->in_buf = NULL; - } - ahash_request_free(sha_state->req); - crypto_free_ahash(sha_state->tfm); +free_tfm: + if (req) + ahash_request_free(req); + if (tfm) + crypto_free_ahash(tfm); - sha_state->req = NULL; - sha_state->tfm = NULL; - if (sha_state->result_buff != ctx->sha_result) { - kfree(sha_state->result_buff); - sha_state->result_buff = NULL; - } - sha_state->total_bytes = 0; - sha_state->digest_size = 0; - sha_state->remaining_bytes = 0; - nvvse_devnode[ctx->node_id].sha_init_done = false; +free_buf: + //kfree won't fail even if input is NULL + if (result_buff != ctx->sha_result) + kfree(result_buff); + kfree(in_buf); +exit: + return ret; +} + +static int tnvvse_crypto_hmac_sha_validate_req(struct tnvvse_crypto_ctx *ctx, + struct tegra_nvvse_hmac_sha_sv_ctl *hmac_sha_ctl) +{ + struct crypto_sha_state *sha_state = &ctx->sha_state; + int32_t ret = 0; + + if ((hmac_sha_ctl->is_first != 0) + && (sha_state->hmac_sha_init_done != 0)) { + pr_err("%s: HMAC-Sha init already done for this node_id %u\n", __func__, + ctx->node_id); + ret = -EAGAIN; + goto exit; + } + + if ((hmac_sha_ctl->is_first == 0) + && (sha_state->hmac_sha_init_done == 0)) { + pr_err("%s: HMAC-Sha init not done for this node_id %u\n", __func__, ctx->node_id); + ret = -EAGAIN; + goto exit; + } + + if (hmac_sha_ctl->data_length > ivc_database.max_buffer_size[ctx->node_id]) { + pr_err("%s(): Input size is (data = %d) is not supported\n", + __func__, hmac_sha_ctl->data_length); + ret = -EINVAL; + goto exit; + } + +exit: return ret; } @@ -492,8 +514,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx, struct crypto_ahash *tfm = NULL; struct ahash_request *req = NULL; char *src_buffer; - const char *driver_name; - struct tnvvse_crypto_completion sha_complete; + struct tnvvse_crypto_completion hmac_sha_complete; char key_as_keyslot[AES_KEYSLOT_NAME_SIZE] = {0,}; struct tnvvse_hmac_sha_req_data priv_data; struct scatterlist sg; @@ -502,73 +523,45 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx, uint8_t *in_buf = NULL; char *result = NULL; - if (hmac_sha_ctl->data_length > ivc_database.max_buffer_size[ctx->node_id]) { - pr_err("%s(): Input size is (data = %d) is not supported\n", - __func__, hmac_sha_ctl->data_length); - return -EINVAL; + ret = tnvvse_crypto_hmac_sha_validate_req(ctx, hmac_sha_ctl); + if (ret != 0) + goto exit; + + tfm = crypto_alloc_ahash("hmac-sha256-vse", 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + pr_err("%s(): Failed to allocate ahash for hmac-sha256-vse: %d\n", + __func__, ret); + ret = -ENOMEM; + goto exit; } - if (sha_state->total_bytes == 0) { - if (hmac_sha_ctl->is_first != 1) { - pr_err("%s: HMAC-SHA first request is not yet received\n", - __func__); - return -EINVAL; - goto exit; - } + hmac_ctx = crypto_ahash_ctx(tfm); + hmac_ctx->node_id = ctx->node_id; + + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + crypto_free_ahash(tfm); + pr_err("%s(): Failed to allocate request for cmac-vse(aes)\n", __func__); + ret = -ENOMEM; + goto exit; } - if (hmac_sha_ctl->is_first == 1) { - tfm = crypto_alloc_ahash("hmac-sha256-vse", 0, 0); - if (IS_ERR(tfm)) { - ret = PTR_ERR(tfm); - pr_err("%s(): Failed to allocate ahash for hmac-sha256-vse: %d\n", - __func__, ret); - ret = -ENOMEM; - goto exit; - } + init_completion(&hmac_sha_complete.restart); + hmac_sha_complete.req_err = 0; + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tnvvse_crypto_complete, &hmac_sha_complete); - hmac_ctx = crypto_ahash_ctx(tfm); - hmac_ctx->node_id = ctx->node_id; + (void)snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); + memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, hmac_sha_ctl->key_slot, + KEYSLOT_SIZE_BYTES); - driver_name = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); - if (driver_name == NULL) { - crypto_free_ahash(tfm); - pr_err("%s(): Failed to get_driver_name for hmac-sha256-vse returned NULL", - __func__); - ret = -EINVAL; - goto exit; - } - - req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req) { - crypto_free_ahash(tfm); - pr_err("%s(): Failed to allocate request for cmac-vse(aes)\n", __func__); - ret = -ENOMEM; - goto exit; - } - - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - tnvvse_crypto_complete, &sha_complete); - sha_state->tfm = tfm; - sha_state->req = req; - - (void)snprintf(key_as_keyslot, AES_KEYSLOT_NAME_SIZE, "NVSEAES "); - memcpy(key_as_keyslot + KEYSLOT_OFFSET_BYTES, hmac_sha_ctl->key_slot, - KEYSLOT_SIZE_BYTES); - - ret = crypto_ahash_setkey(tfm, key_as_keyslot, hmac_sha_ctl->key_length); - if (ret) { - pr_err("%s(): Failed to set keys for hmac: %d\n", __func__, ret); - goto free_tfm; - } - } else { - tfm = sha_state->tfm; - req = sha_state->req; + ret = crypto_ahash_setkey(tfm, key_as_keyslot, hmac_sha_ctl->key_length); + if (ret) { + pr_err("%s(): Failed to set keys for hmac: %d\n", __func__, ret); + goto free_tfm; } - init_completion(&sha_state->sha_complete.restart); - sha_state->sha_complete.req_err = 0; - in_sz = hmac_sha_ctl->data_length; in_buf = kzalloc(in_sz, GFP_KERNEL); if (in_buf == NULL) { @@ -592,12 +585,19 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx, priv_data.result = 0; req->priv = &priv_data; - if (hmac_sha_ctl->is_first == 1) { - ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_init(req)); - if (ret) { - pr_err("%s(): Failed to initialize ahash: %d\n", __func__, ret); - goto free_buf; - } + sha_state->hmac_sha_total_msg_length += hmac_sha_ctl->data_length; + sha_state->hmac_sha_init_done = 1; + hmac_ctx->total_count = sha_state->hmac_sha_total_msg_length; + + if (hmac_sha_ctl->is_first == 1) + hmac_ctx->is_first = true; + else + hmac_ctx->is_first = false; + + ret = wait_async_op(&hmac_sha_complete, crypto_ahash_init(req)); + if (ret) { + pr_err("%s(): Failed to initialize ahash: %d\n", __func__, ret); + goto free_buf; } src_buffer = hmac_sha_ctl->src_buffer; @@ -611,10 +611,9 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx, sg_init_one(&sg, in_buf, in_sz); ahash_request_set_crypt(req, &sg, result, in_sz); - sha_state->total_bytes += in_sz; if (hmac_sha_ctl->is_last == 0) { - ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_update(req)); + ret = wait_async_op(&hmac_sha_complete, crypto_ahash_update(req)); if (ret) { pr_err("%s(): Failed to ahash_update: %d\n", __func__, ret); goto free_buf; @@ -631,7 +630,7 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx, priv_data.expected_digest = result; } - ret = wait_async_op(&sha_state->sha_complete, crypto_ahash_finup(req)); + ret = wait_async_op(&hmac_sha_complete, crypto_ahash_finup(req)); if (ret) { pr_err("%s(): Failed to ahash_finup: %d\n", __func__, ret); goto free_buf; @@ -647,11 +646,8 @@ static int tnvvse_crypto_hmac_sha_sign_verify(struct tnvvse_crypto_ctx *ctx, hmac_sha_ctl->result = priv_data.result; } - sha_state->total_bytes = 0; - ahash_request_free(sha_state->req); - sha_state->req = NULL; - crypto_free_ahash(sha_state->tfm); - sha_state->tfm = NULL; + sha_state->hmac_sha_init_done = 0; + sha_state->hmac_sha_total_msg_length = 0; } free_buf: @@ -660,12 +656,10 @@ free_buf: kfree(in_buf); free_tfm: - if (ret != 0) { - if (sha_state->req) - ahash_request_free(sha_state->req); - if (sha_state->tfm) - crypto_free_ahash(sha_state->tfm); - } + if (req) + ahash_request_free(req); + if (tfm) + crypto_free_ahash(tfm); exit: return ret; @@ -1694,6 +1688,12 @@ static int tnvvse_crypto_get_aes_drng(struct tnvvse_crypto_ctx *ctx, struct crypto_rng *rng; int ret = -ENOMEM; + if (aes_drng_ctl->data_length > ctx->max_rng_buff) { + pr_err("%s(): unsupported data length(%u)\n", __func__, aes_drng_ctl->data_length); + ret = -EINVAL; + goto out; + } + rng = crypto_alloc_rng("rng_drbg", 0, 0); if (IS_ERR(rng)) { ret = PTR_ERR(rng); @@ -1751,7 +1751,8 @@ static int tnvvse_crypto_get_ivc_db(struct tegra_nvvse_get_ivc_db *get_ivc_db) static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp) { - struct tnvvse_crypto_ctx *ctx; + struct tnvvse_crypto_ctx *ctx = NULL; + struct crypto_sha_state *p_sha_state = NULL; char root_path_buf[512]; const char *root_path, *str; int ret = 0; @@ -1776,12 +1777,10 @@ static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp) } ctx->node_id = node_id; - mutex_init(&ctx->lock); - ctx->rng_buff = kzalloc(NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED, GFP_KERNEL); if (!ctx->rng_buff) { ret = -ENOMEM; - goto free_mutex; + goto free_buf; } ctx->max_rng_buff = NVVSE_MAX_RANDOM_NUMBER_LEN_SUPPORTED; @@ -1789,17 +1788,28 @@ static int tnvvse_crypto_dev_open(struct inode *inode, struct file *filp) ctx->sha_result = kzalloc(NVVSE_MAX_ALLOCATED_SHA_RESULT_BUFF_SIZE, GFP_KERNEL); if (!ctx->sha_result) { ret = -ENOMEM; - goto free_rng_buf; + goto free_buf; + } + + p_sha_state = &ctx->sha_state; + p_sha_state->sha_intermediate_digest = kzalloc(NVVSE_MAX_ALLOCATED_SHA_RESULT_BUFF_SIZE, + GFP_KERNEL); + if (!p_sha_state->sha_intermediate_digest) { + ret = -ENOMEM; + goto free_buf; } filp->private_data = ctx; return ret; -free_rng_buf: - kfree(ctx->rng_buff); -free_mutex: - mutex_destroy(&ctx->lock); +free_buf: + if (ctx) { + kfree(ctx->rng_buff); + kfree(ctx->sha_result); + if (p_sha_state) + kfree(p_sha_state->sha_intermediate_digest); + } kfree(ctx); return ret; } @@ -1807,18 +1817,17 @@ free_mutex: static int tnvvse_crypto_dev_release(struct inode *inode, struct file *filp) { struct tnvvse_crypto_ctx *ctx = filp->private_data; - int ret = 0; - mutex_destroy(&ctx->lock); kfree(ctx->sha_result); kfree(ctx->rng_buff); + kfree(ctx->sha_state.sha_intermediate_digest); kfree(ctx); + filp->private_data = NULL; - return ret; + return 0; } - static long tnvvse_crypto_dev_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long arg) { @@ -1827,9 +1836,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, struct tegra_nvvse_aes_gmac_init_ctl __user *arg_aes_gmac_init_ctl = (void __user *)arg; struct tegra_nvvse_aes_gmac_sign_verify_ctl __user *arg_aes_gmac_sign_verify_ctl; struct tegra_nvvse_aes_cmac_sign_verify_ctl __user *arg_aes_cmac_sign_verify_ctl; - struct tegra_nvvse_sha_init_ctl *sha_init_ctl; struct tegra_nvvse_sha_update_ctl *sha_update_ctl; - struct tegra_nvvse_sha_final_ctl *sha_final_ctl; struct tegra_nvvse_hmac_sha_sv_ctl *hmac_sha_sv_ctl; struct tegra_nvvse_hmac_sha_sv_ctl __user *arg_hmac_sha_sv_ctl; struct tegra_nvvse_aes_enc_dec_ctl *aes_enc_dec_ctl; @@ -1850,71 +1857,34 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, return -EPERM; } - mutex_lock(&ctx->lock); + mutex_lock(&nvvse_devnode[ctx->node_id].lock); switch (ioctl_num) { - case NVVSE_IOCTL_CMDID_INIT_SHA: - sha_init_ctl = kzalloc(sizeof(*sha_init_ctl), GFP_KERNEL); - if (!sha_init_ctl) { - pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; - } - - ret = copy_from_user(sha_init_ctl, (void __user *)arg, sizeof(*sha_init_ctl)); - if (ret) { - pr_err("%s(): Failed to copy_from_user sha_init_ctl:%d\n", __func__, ret); - kfree(sha_init_ctl); - goto out; - } - - ret = tnvvse_crypto_sha_init(ctx, sha_init_ctl); - - kfree(sha_init_ctl); - break; - case NVVSE_IOCTL_CMDID_UPDATE_SHA: sha_update_ctl = kzalloc(sizeof(*sha_update_ctl), GFP_KERNEL); if (!sha_update_ctl) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } ret = copy_from_user(sha_update_ctl, (void __user *)arg, sizeof(*sha_update_ctl)); if (ret) { pr_err("%s(): Failed to copy_from_user sha_update_ctl:%d\n", __func__, ret); kfree(sha_update_ctl); - goto out; + goto release_lock; } ret = tnvvse_crypto_sha_update(ctx, sha_update_ctl); kfree(sha_update_ctl); break; - - - case NVVSE_IOCTL_CMDID_FINAL_SHA: - sha_final_ctl = kzalloc(sizeof(*sha_final_ctl), GFP_KERNEL); - if (!sha_final_ctl) { - pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; - } - - ret = copy_from_user(sha_final_ctl, (void __user *)arg, sizeof(*sha_final_ctl)); - if (ret) { - pr_err("%s(): Failed to copy_from_user sha_final_ctl:%d\n", __func__, ret); - kfree(sha_final_ctl); - goto out; - } - - ret = tnvvse_crypto_sha_final(ctx, sha_final_ctl); - - kfree(sha_final_ctl); - break; - case NVVSE_IOCTL_CMDID_HMAC_SHA_SIGN_VERIFY: hmac_sha_sv_ctl = kzalloc(sizeof(*hmac_sha_sv_ctl), GFP_KERNEL); - if (!hmac_sha_sv_ctl) - return -ENOMEM; + if (!hmac_sha_sv_ctl) { + ret = -ENOMEM; + goto release_lock; + } arg_hmac_sha_sv_ctl = (void __user *)arg; @@ -1923,7 +1893,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, if (ret) { pr_err("%s(): Failed to copy_from_user hmac_sha_sv_ctl:%d\n", __func__, ret); - goto out; + goto release_lock; } ret = tnvvse_crypto_hmac_sha_sign_verify(ctx, hmac_sha_sv_ctl); @@ -1942,14 +1912,15 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, aes_enc_dec_ctl = kzalloc(sizeof(*aes_enc_dec_ctl), GFP_KERNEL); if (!aes_enc_dec_ctl) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } ret = copy_from_user(aes_enc_dec_ctl, (void __user *)arg, sizeof(*aes_enc_dec_ctl)); if (ret) { pr_err("%s(): Failed to copy_from_user aes_enc_dec_ctl:%d\n", __func__, ret); kfree(aes_enc_dec_ctl); - goto out; + goto release_lock; } if (aes_enc_dec_ctl->aes_mode == TEGRA_NVVSE_AES_MODE_GCM) @@ -1959,7 +1930,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, if (ret) { kfree(aes_enc_dec_ctl); - goto out; + goto release_lock; } /* Copy IV returned by VSE */ @@ -1978,7 +1949,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, if (ret) { pr_err("%s(): Failed to copy_to_user:%d\n", __func__, ret); kfree(aes_enc_dec_ctl); - goto out; + goto release_lock; } } @@ -1989,7 +1960,8 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, aes_gmac_init_ctl = kzalloc(sizeof(*aes_gmac_init_ctl), GFP_KERNEL); if (!aes_gmac_init_ctl) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } ret = copy_from_user(aes_gmac_init_ctl, (void __user *)arg, @@ -1998,13 +1970,13 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, pr_err("%s(): Failed to copy_from_user aes_gmac_init_ctl:%d\n", __func__, ret); kfree(aes_gmac_init_ctl); - goto out; + goto release_lock; } ret = tnvvse_crypto_aes_gmac_init(ctx, aes_gmac_init_ctl); if (ret) { kfree(aes_gmac_init_ctl); - goto out; + goto release_lock; } /* Copy IV returned by VSE */ @@ -2013,7 +1985,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, if (ret) { pr_err("%s(): Failed to copy_to_user:%d\n", __func__, ret); kfree(aes_gmac_init_ctl); - goto out; + goto release_lock; } kfree(aes_gmac_init_ctl); @@ -2023,7 +1995,8 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, aes_gmac_sign_verify_ctl = kzalloc(sizeof(*aes_gmac_sign_verify_ctl), GFP_KERNEL); if (!aes_gmac_sign_verify_ctl) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } arg_aes_gmac_sign_verify_ctl = (void __user *)arg; @@ -2033,13 +2006,13 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, pr_err("%s(): Failed to copy_from_user aes_gmac_sign_verify_ctl:%d\n", __func__, ret); kfree(aes_gmac_sign_verify_ctl); - goto out; + goto release_lock; } ret = tnvvse_crypto_aes_gmac_sign_verify(ctx, aes_gmac_sign_verify_ctl); if (ret) { kfree(aes_gmac_sign_verify_ctl); - goto out; + goto release_lock; } if (aes_gmac_sign_verify_ctl->gmac_type == TEGRA_NVVSE_AES_GMAC_VERIFY) { @@ -2057,7 +2030,8 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, aes_cmac_sign_verify_ctl = kzalloc(sizeof(*aes_cmac_sign_verify_ctl), GFP_KERNEL); if (!aes_cmac_sign_verify_ctl) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } arg_aes_cmac_sign_verify_ctl = (void __user *)arg; @@ -2067,13 +2041,13 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, pr_err("%s(): Failed to copy_from_user aes_cmac_sign_verify:%d\n", __func__, ret); kfree(aes_cmac_sign_verify_ctl); - goto out; + goto release_lock; } ret = tnvvse_crypto_aes_cmac_sign_verify(ctx, aes_cmac_sign_verify_ctl); if (ret) { kfree(aes_cmac_sign_verify_ctl); - goto out; + goto release_lock; } if (aes_cmac_sign_verify_ctl->cmac_type == TEGRA_NVVSE_AES_CMAC_VERIFY) { @@ -2091,14 +2065,15 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, aes_drng_ctl = kzalloc(sizeof(*aes_drng_ctl), GFP_KERNEL); if (!aes_drng_ctl) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } ret = copy_from_user(aes_drng_ctl, (void __user *)arg, sizeof(*aes_drng_ctl)); if (ret) { pr_err("%s(): Failed to copy_from_user aes_drng_ctl:%d\n", __func__, ret); kfree(aes_drng_ctl); - goto out; + goto release_lock; } ret = tnvvse_crypto_get_aes_drng(ctx, aes_drng_ctl); @@ -2109,21 +2084,22 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, get_ivc_db = kzalloc(sizeof(*get_ivc_db), GFP_KERNEL); if (!get_ivc_db) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } ret = tnvvse_crypto_get_ivc_db(get_ivc_db); if (ret) { pr_err("%s(): Failed to get ivc database get_ivc_db:%d\n", __func__, ret); kfree(get_ivc_db); - goto out; + goto release_lock; } ret = copy_to_user((void __user *)arg, &ivc_database, sizeof(ivc_database)); if (ret) { pr_err("%s(): Failed to copy_to_user ivc_database:%d\n", __func__, ret); kfree(get_ivc_db); - goto out; + goto release_lock; } kfree(get_ivc_db); @@ -2133,7 +2109,8 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, aes_cmac_sign_verify_ctl = kzalloc(sizeof(*aes_cmac_sign_verify_ctl), GFP_KERNEL); if (!aes_cmac_sign_verify_ctl) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } arg_aes_cmac_sign_verify_ctl = (void __user *)arg; @@ -2143,12 +2120,12 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, pr_err("%s(): Failed to copy_from_user tsec_sign_verify:%d\n", __func__, ret); kfree(aes_cmac_sign_verify_ctl); - goto out; + goto release_lock; } ret = tnvvtsec_crypto_aes_cmac_sign_verify(ctx, aes_cmac_sign_verify_ctl); if (ret) { kfree(aes_cmac_sign_verify_ctl); - goto out; + goto release_lock; } if (aes_cmac_sign_verify_ctl->cmac_type == TEGRA_NVVSE_AES_CMAC_VERIFY) { @@ -2166,14 +2143,15 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, tsec_keyload_status = kzalloc(sizeof(*tsec_keyload_status), GFP_KERNEL); if (!tsec_keyload_status) { pr_err("%s(): failed to allocate memory\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto release_lock; } ret = tnvvse_crypto_tsec_get_keyload_status(ctx, tsec_keyload_status); if (ret) { pr_err("%s(): Failed to get keyload status:%d\n", __func__, ret); kfree(tsec_keyload_status); - goto out; + goto release_lock; } ret = copy_to_user((void __user *)arg, tsec_keyload_status, @@ -2182,7 +2160,7 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, pr_err("%s(): Failed to copy_to_user tsec_keyload_status:%d\n", __func__, ret); kfree(tsec_keyload_status); - goto out; + goto release_lock; } kfree(tsec_keyload_status); @@ -2194,8 +2172,8 @@ static long tnvvse_crypto_dev_ioctl(struct file *filp, break; } -out: - mutex_unlock(&ctx->lock); +release_lock: + mutex_unlock(&nvvse_devnode[ctx->node_id].lock); return ret; } @@ -2249,6 +2227,7 @@ static int __init tnvvse_crypto_device_init(void) goto fail; } nvvse_devnode[cnt].g_misc_devices = misc; + mutex_init(&nvvse_devnode[cnt].lock); } return ret; @@ -2274,6 +2253,7 @@ static void __exit tnvvse_crypto_device_exit(void) kfree(nvvse_devnode[ctr].g_misc_devices->name); kfree(nvvse_devnode[ctr].g_misc_devices); nvvse_devnode[ctr].g_misc_devices = NULL; + mutex_destroy(&nvvse_devnode[ctr].lock); } } } diff --git a/include/uapi/misc/tegra-nvvse-cryptodev.h b/include/uapi/misc/tegra-nvvse-cryptodev.h index f9841712..dcd33d0d 100644 --- a/include/uapi/misc/tegra-nvvse-cryptodev.h +++ b/include/uapi/misc/tegra-nvvse-cryptodev.h @@ -121,44 +121,31 @@ enum tegra_nvvse_cmac_type { }; /** - * \brief Holds SHA Init Header Params - */ -struct tegra_nvvse_sha_init_ctl { - enum tegra_nvvse_sha_type sha_type; - uint32_t digest_size; - uint64_t total_msg_size; -}; -#define NVVSE_IOCTL_CMDID_INIT_SHA _IOW(TEGRA_NVVSE_IOC_MAGIC, TEGRA_NVVSE_CMDID_INIT_SHA, \ - struct tegra_nvvse_sha_init_ctl) - -/** - * \brief Holds SHA Update Header Params - */ + * \brief Holds SHA Update Header Params + */ struct tegra_nvvse_sha_update_ctl { + /** Holds the SHA request type */ + enum tegra_nvvse_sha_type sha_type; + /** Specifies first request */ + uint8_t is_first; + /** Specifies last request */ + uint8_t is_last; + /** Specifies if only init is to be performed */ + uint8_t init_only; + /** Specifies if context is to be reinitialized */ + uint8_t do_reset; /** Holds the pointer of the input buffer */ - char *in_buff; + uint8_t *in_buff; /** Holds the size of the input buffer */ - uint32_t input_buffer_size; - /** Indicates the last chunk of the input message. 1 means last buffer - * else not the last buffer - */ - uint8_t last_buffer; + uint32_t input_buffer_size; + /** Holds the pointer of the digest buffer */ + uint8_t *digest_buffer; + /** Holds the size of the digest buffer */ + uint32_t digest_size; }; #define NVVSE_IOCTL_CMDID_UPDATE_SHA _IOW(TEGRA_NVVSE_IOC_MAGIC, TEGRA_NVVSE_CMDID_UPDATE_SHA, \ struct tegra_nvvse_sha_update_ctl) -/** - * \brief Holds SHA Final Header Params - */ -struct tegra_nvvse_sha_final_ctl { - /** Holds the pointer of the digest buffer */ - uint8_t *digest_buffer; - /** Holds the size of the digest buffer */ - uint32_t digest_size; -}; -#define NVVSE_IOCTL_CMDID_FINAL_SHA _IOWR(TEGRA_NVVSE_IOC_MAGIC, TEGRA_NVVSE_CMDID_FINAL_SHA, \ - struct tegra_nvvse_sha_final_ctl) - struct tegra_nvvse_hmac_sha_sv_ctl { /** [in] Holds the enum which indicates SHA mode */ enum tegra_nvvse_sha_type hmac_sha_mode;