crypto: tegra: Use separate buffer for setkey

The buffer which sends the commands to host1x was shared for all tasks
in the engine. This causes a problem with the setkey() function as it
gets called asynchronous to the crypto engine queue. Modifying the same
cmdbuf in setkey() will corrupt the ongoing host1x task and in turn
break the encryption/decryption operation. Hence use a separate cmdbuf
for setkey().

Bug 4883011

Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Change-Id: Ia6a5376e2c8dfc98e11414666ebf9ade41f10fee
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3328437
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: Laxman Dewangan <ldewangan@nvidia.com>
This commit is contained in:
Akhil R
2024-12-09 12:43:47 +05:30
committed by Jon Hunter
parent a628affd69
commit f725f36d1e
6 changed files with 60 additions and 46 deletions

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
*/
@@ -329,7 +329,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
/* Prepare the command and submit for execution */
cmdlen = tegra_aes_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* Copy the result */
tegra_aes_update_iv(req, ctx);
@@ -959,7 +959,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
cmdlen = tegra_gmac_prep_cmd(se, rctx);
return tegra_se_host1x_submit(se, cmdlen);
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
@@ -976,7 +976,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
/* Prepare command and submit */
cmdlen = tegra_gcm_crypt_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -999,7 +999,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
/* Prepare command and submit */
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, cmdlen);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -1017,7 +1017,7 @@ static int tegra_gcm_hw_verify(struct tegra_aead_ctx *ctx, struct tegra_aead_req
{
struct tegra_se *se = ctx->se;
u32 result, *cpuvaddr = se->cmdbuf->addr;
int size, ret;
int cmdlen, ret;
memcpy(rctx->inbuf.buf, mac, rctx->authsize);
rctx->inbuf.size = rctx->authsize;
@@ -1027,8 +1027,8 @@ static int tegra_gcm_hw_verify(struct tegra_aead_ctx *ctx, struct tegra_aead_req
SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
size = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, size);
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -1159,7 +1159,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_se *se, struct tegra_aead_reqctx *rc
/* Prepare command and submit */
cmdlen = tegra_cbcmac_prep_cmd(se, rctx);
return tegra_se_host1x_submit(se, cmdlen);
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1364,7 +1364,7 @@ static int tegra_ccm_do_ctr(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
/* Prepare command and submit */
cmdlen = tegra_ctr_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -1862,7 +1862,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
cmdlen = tegra_cmac_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/*
* If this is not the final update, copy the intermediate results
* from the registers so that it can be used in the next 'update'
@@ -1906,7 +1906,7 @@ static int tegra_cmac_do_final(struct ahash_request *req)
/* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
goto out;

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
*/
@@ -310,8 +310,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
unsigned int nblks, nresidue, size, ret;
u32 *cpuvaddr = ctx->se->cmdbuf->addr;
struct tegra_se *se = ctx->se;
unsigned int nblks, nresidue, cmdlen, ret;
u32 *cpuvaddr = se->cmdbuf->addr;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
@@ -368,11 +369,10 @@ static int tegra_sha_do_update(struct ahash_request *req)
* This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FIRST))
tegra_sha_paste_hash_result(ctx->se, rctx);
tegra_sha_paste_hash_result(se, rctx);
size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(ctx->se, size);
cmdlen = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/*
* If this is not the final update, copy the intermediate results
@@ -380,7 +380,7 @@ static int tegra_sha_do_update(struct ahash_request *req)
* call. This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FINAL))
tegra_sha_copy_hash_result(ctx->se, rctx);
tegra_sha_copy_hash_result(se, rctx);
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
@@ -395,7 +395,7 @@ static int tegra_sha_do_final(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
u32 *cpuvaddr = se->cmdbuf->addr;
int size, ret = 0;
int cmdlen, ret = 0;
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
@@ -414,8 +414,8 @@ static int tegra_sha_do_final(struct ahash_request *req)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, size);
cmdlen = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
goto out;

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver file to manage keys of NVIDIA Security Engine.
*/
@@ -177,11 +177,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
u32 keylen, u16 slot, u32 alg)
{
const u32 *keyval = (u32 *)key;
u32 *addr = se->cmdbuf->addr, size;
u32 *addr = se->keybuf->addr, size;
int ret;
mutex_lock(&kslt_lock);
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
ret = tegra_se_host1x_submit(se, se->keybuf, size);
return tegra_se_host1x_submit(se, size);
mutex_unlock(&kslt_lock);
return ret;
}
static int tegra_key_move_to_kds(struct tegra_se *se, u32 slot, u32 kds_id)
@@ -192,7 +198,7 @@ static int tegra_key_move_to_kds(struct tegra_se *se, u32 slot, u32 kds_id)
src_keyid = SE_KSLT_REGION_ID_SYM | slot;
size = tegra_key_prep_mov_cmd(se, se->cmdbuf->addr, src_keyid, kds_id);
ret = tegra_se_host1x_submit(se, size);
ret = tegra_se_host1x_submit(se, se->keybuf, size);
if (ret)
return ret;
@@ -207,7 +213,7 @@ static unsigned int tegra_kac_get_from_kds(struct tegra_se *se, u32 keyid, u16 s
tgt_keyid = SE_KSLT_REGION_ID_SYM | slot;
size = tegra_key_prep_mov_cmd(se, se->cmdbuf->addr, keyid, tgt_keyid);
ret = tegra_se_host1x_submit(se, size);
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
tegra_keyslot_free(slot);
@@ -219,7 +225,7 @@ static void tegra_key_kds_invalidate(struct tegra_se *se, u32 keyid)
unsigned int size;
size = tegra_key_prep_invld_cmd(se, se->cmdbuf->addr, keyid);
tegra_se_host1x_submit(se, size);
tegra_se_host1x_submit(se, se->keybuf, size);
tegra_kds_free_id(keyid);
}

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver for NVIDIA Security Engine in Tegra Chips
*/
@@ -143,7 +143,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
return cmdbuf;
}
int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
{
struct host1x_job *job;
int ret;
@@ -162,9 +162,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
job->engine_fallback_streamid = se->stream_id;
job->engine_streamid_offset = SE_STREAM_ID;
se->cmdbuf->words = size;
cmdbuf->words = size;
host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev);
if (ret) {
@@ -222,14 +222,22 @@ static int tegra_se_client_init(struct host1x_client *client)
goto syncpt_put;
}
se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
if (!se->keybuf) {
ret = -ENOMEM;
goto cmdbuf_put;
}
ret = se->hw->init_alg(se);
if (ret) {
dev_err(se->dev, "failed to register algorithms\n");
goto cmdbuf_put;
goto keybuf_put;
}
return 0;
keybuf_put:
tegra_se_cmdbuf_put(&se->keybuf->bo);
cmdbuf_put:
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
syncpt_put:

View File

@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver for NVIDIA Security Engine for block cipher operations.
*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
* Crypto driver to handle SM4 block cipher algorithms using NVIDIA Security Engine.
*/
#include <nvidia/conftest.h>
@@ -245,7 +244,7 @@ static int tegra_sm4_do_one_req(struct crypto_engine *engine, void *areq)
size = tegra_sm4_prep_cmd(se, cpuvaddr, iv, len, rctx->datbuf.addr,
config, crypto_config);
ret = tegra_se_host1x_submit(se, size);
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
/* Copy the result */
dst_nents = sg_nents(req->dst);
@@ -842,7 +841,7 @@ static int tegra_sm4_gcm_do_gmac(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm4
size = tegra_sm4_gmac_prep_cmd(se, cpuvaddr, rctx);
return tegra_se_host1x_submit(se, size);
return tegra_se_host1x_submit(se, se->cmdbuf, size);
}
static int tegra_sm4_gcm_do_crypt(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm4_gcm_reqctx *rctx)
@@ -860,7 +859,7 @@ static int tegra_sm4_gcm_do_crypt(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm
/* Prepare command and submit */
size = tegra_sm4_gcm_crypt_prep_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, size);
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
return ret;
@@ -883,7 +882,7 @@ static int tegra_sm4_gcm_do_final(struct tegra_sm4_gcm_ctx *ctx, struct tegra_sm
/* Prepare command and submit */
size = tegra_sm4_gcm_prep_final_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, size);
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
return ret;
@@ -912,7 +911,7 @@ static int tegra_sm4_gcm_hw_verify(struct tegra_sm4_gcm_ctx *ctx, struct tegra_s
/* Prepare command and submit */
size = tegra_sm4_gcm_prep_final_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, size);
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
return ret;
@@ -1228,7 +1227,7 @@ static int tegra_sm4_cmac_do_update(struct ahash_request *req)
size = tegra_sm4_cmac_prep_cmd(se, se->cmdbuf->addr, rctx);
return tegra_se_host1x_submit(se, size);
return tegra_se_host1x_submit(se, se->cmdbuf, size);
}
static int tegra_sm4_cmac_do_final(struct ahash_request *req)
@@ -1256,7 +1255,7 @@ static int tegra_sm4_cmac_do_final(struct ahash_request *req)
/* Prepare command and submit */
size = tegra_sm4_cmac_prep_cmd(se, se->cmdbuf->addr, rctx);
ret = tegra_se_host1x_submit(se, size);
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
goto out;

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Header file for NVIDIA Security Engine driver.
*/
@@ -656,6 +656,7 @@ struct tegra_se {
struct host1x_channel *channel;
struct tegra_se_regcfg *regcfg;
struct tegra_se_cmdbuf *cmdbuf;
struct tegra_se_cmdbuf *keybuf;
struct crypto_engine *engine;
struct host1x_syncpt *syncpt;
struct device *dev;
@@ -770,7 +771,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key,
u32 keylen, u32 alg, u32 *keyid);
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
unsigned int tegra_key_get_idx(struct tegra_se *se, u32 keyid);
int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
u32 tegra_kds_get_id(void);
void tegra_kds_free_id(u32 keyid);