crypto: tegra: Use separate buffer setkey

Use a separate buffer for setkey operation. setkey() is called
asynchronous to crypto engine APIs. This causes concurrency issues in
the tegra engine oprations.

Bug 4883011

Change-Id: I1ec7d0a041ee8a0a0bf350d2f3e9915091993034
Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3263282
Tested-by: Brad Griffis <bgriffis@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Brad Griffis <bgriffis@nvidia.com>
This commit is contained in:
Akhil R
2024-12-06 10:51:39 +05:30
committed by mobile promotions
parent efeb1061bb
commit 2f3c077115
5 changed files with 34 additions and 21 deletions

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/* /*
* Crypto driver to handle block cipher algorithms using NVIDIA Security Engine. * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
*/ */
@@ -292,7 +292,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
/* Prepare the command and submit for execution */ /* Prepare the command and submit for execution */
cmdlen = tegra_aes_prep_cmd(se, rctx); cmdlen = tegra_aes_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* Copy the result */ /* Copy the result */
tegra_aes_update_iv(req, ctx); tegra_aes_update_iv(req, ctx);
@@ -755,7 +755,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
cmdlen = tegra_gmac_prep_cmd(se, rctx); cmdlen = tegra_gmac_prep_cmd(se, rctx);
return tegra_se_host1x_submit(se, cmdlen); return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
} }
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
@@ -772,7 +772,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
/* Prepare command and submit */ /* Prepare command and submit */
cmdlen = tegra_gcm_crypt_prep_cmd(se, rctx); cmdlen = tegra_gcm_crypt_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret) if (ret)
return ret; return ret;
@@ -795,7 +795,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
/* Prepare command and submit */ /* Prepare command and submit */
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, cmdlen); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret) if (ret)
return ret; return ret;
@@ -923,7 +923,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
/* Prepare command and submit */ /* Prepare command and submit */
cmdlen = tegra_cbcmac_prep_cmd(se, rctx); cmdlen = tegra_cbcmac_prep_cmd(se, rctx);
return tegra_se_host1x_submit(se, cmdlen); return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
} }
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1130,7 +1130,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
/* Prepare command and submit */ /* Prepare command and submit */
cmdlen = tegra_ctr_prep_cmd(se, rctx); cmdlen = tegra_ctr_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret) if (ret)
return ret; return ret;
@@ -1600,7 +1600,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
cmdlen = tegra_cmac_prep_cmd(se, rctx); cmdlen = tegra_cmac_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* /*
* If this is not the final update, copy the intermediate results * If this is not the final update, copy the intermediate results
* from the registers so that it can be used in the next 'update' * from the registers so that it can be used in the next 'update'
@@ -1645,7 +1645,7 @@ static int tegra_cmac_do_final(struct ahash_request *req)
/* Prepare command and submit */ /* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(se, rctx); cmdlen = tegra_cmac_prep_cmd(se, rctx);
ret = tegra_se_host1x_submit(se, cmdlen); ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret) if (ret)
goto out; goto out;

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/* /*
* Crypto driver to handle HASH algorithms using NVIDIA Security Engine. * Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
*/ */
@@ -366,7 +366,7 @@ static int tegra_sha_do_update(struct ahash_request *req)
size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(ctx->se, size); ret = tegra_se_host1x_submit(ctx->se, ctx->se->cmdbuf, size);
/* /*
* If this is not the final update, copy the intermediate results * If this is not the final update, copy the intermediate results
@@ -409,7 +409,7 @@ static int tegra_sha_do_final(struct ahash_request *req)
} }
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
ret = tegra_se_host1x_submit(se, size); ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret) if (ret)
goto out; goto out;

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/* /*
* Crypto driver file to manage keys of NVIDIA Security Engine. * Crypto driver file to manage keys of NVIDIA Security Engine.
*/ */
@@ -115,11 +115,15 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
u32 keylen, u16 slot, u32 alg) u32 keylen, u16 slot, u32 alg)
{ {
const u32 *keyval = (u32 *)key; const u32 *keyval = (u32 *)key;
u32 *addr = se->cmdbuf->addr, size; u32 *addr = se->keybuf->addr, size;
int ret;
mutex_lock(&kslt_lock);
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg); size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
ret = tegra_se_host1x_submit(se, se->keybuf, size);
mutex_unlock(&kslt_lock);
return tegra_se_host1x_submit(se, size); return ret;
} }
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)

View File

@@ -143,7 +143,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
return cmdbuf; return cmdbuf;
} }
int tegra_se_host1x_submit(struct tegra_se *se, u32 size) int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
{ {
struct host1x_job *job; struct host1x_job *job;
int ret; int ret;
@@ -162,9 +162,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
job->engine_fallback_streamid = se->stream_id; job->engine_fallback_streamid = se->stream_id;
job->engine_streamid_offset = SE_STREAM_ID; job->engine_streamid_offset = SE_STREAM_ID;
se->cmdbuf->words = size; cmdbuf->words = size;
host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0); host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev); ret = host1x_job_pin(job, se->dev);
if (ret) { if (ret) {
@@ -222,14 +222,22 @@ static int tegra_se_client_init(struct host1x_client *client)
goto syncpt_put; goto syncpt_put;
} }
se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
if (!se->cmdbuf) {
ret = -ENOMEM;
goto cmdbuf_put;
}
ret = se->hw->init_alg(se); ret = se->hw->init_alg(se);
if (ret) { if (ret) {
dev_err(se->dev, "failed to register algorithms\n"); dev_err(se->dev, "failed to register algorithms\n");
goto cmdbuf_put; goto keybuf_put;
} }
return 0; return 0;
keybuf_put:
tegra_se_cmdbuf_put(&se->keybuf->bo);
cmdbuf_put: cmdbuf_put:
tegra_se_cmdbuf_put(&se->cmdbuf->bo); tegra_se_cmdbuf_put(&se->cmdbuf->bo);
syncpt_put: syncpt_put:

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/* /*
* Header file for NVIDIA Security Engine driver. * Header file for NVIDIA Security Engine driver.
*/ */
@@ -442,6 +442,7 @@ struct tegra_se {
struct host1x_client client; struct host1x_client client;
struct host1x_channel *channel; struct host1x_channel *channel;
struct tegra_se_cmdbuf *cmdbuf; struct tegra_se_cmdbuf *cmdbuf;
struct tegra_se_cmdbuf *keybuf;
struct crypto_engine *engine; struct crypto_engine *engine;
struct host1x_syncpt *syncpt; struct host1x_syncpt *syncpt;
struct device *dev; struct device *dev;
@@ -524,7 +525,7 @@ void tegra_deinit_hash(struct tegra_se *se);
int tegra_key_submit(struct tegra_se *se, const u8 *key, int tegra_key_submit(struct tegra_se *se, const u8 *key,
u32 keylen, u32 alg, u32 *keyid); u32 keylen, u32 alg, u32 *keyid);
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
int tegra_se_host1x_submit(struct tegra_se *se, u32 size); int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
/* HOST1x OPCODES */ /* HOST1x OPCODES */
static inline u32 host1x_opcode_setpayload(unsigned int payload) static inline u32 host1x_opcode_setpayload(unsigned int payload)