mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-24 10:11:26 +03:00
Compare commits
11 Commits
jetson_36.
...
jetson_36.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b27a1a79ee | ||
|
|
e488812038 | ||
|
|
cfe6242c8c | ||
|
|
efa698bed8 | ||
|
|
e7bf6f1444 | ||
|
|
e228deeef1 | ||
|
|
36a712b801 | ||
|
|
087418781c | ||
|
|
fe0a030fee | ||
|
|
7ad4c09866 | ||
|
|
f41b74b8c3 |
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
|
||||
*/
|
||||
@@ -33,6 +33,9 @@ struct tegra_aes_ctx {
|
||||
u32 ivsize;
|
||||
u32 key1_id;
|
||||
u32 key2_id;
|
||||
u32 keylen;
|
||||
u8 key1[AES_MAX_KEY_SIZE];
|
||||
u8 key2[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aes_reqctx {
|
||||
@@ -42,6 +45,8 @@ struct tegra_aes_reqctx {
|
||||
u32 crypto_config;
|
||||
u32 len;
|
||||
u32 *iv;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aead_ctx {
|
||||
@@ -53,6 +58,7 @@ struct tegra_aead_ctx {
|
||||
u32 alg;
|
||||
u32 keylen;
|
||||
u32 key_id;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aead_reqctx {
|
||||
@@ -64,8 +70,8 @@ struct tegra_aead_reqctx {
|
||||
unsigned int cryptlen;
|
||||
unsigned int authsize;
|
||||
bool encrypt;
|
||||
u32 config;
|
||||
u32 crypto_config;
|
||||
u32 config;
|
||||
u32 key_id;
|
||||
u32 iv[4];
|
||||
u8 authdata[16];
|
||||
@@ -78,6 +84,8 @@ struct tegra_cmac_ctx {
|
||||
struct tegra_se *se;
|
||||
unsigned int alg;
|
||||
u32 key_id;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
struct crypto_shash *fallback_tfm;
|
||||
};
|
||||
|
||||
@@ -92,6 +100,8 @@ struct tegra_cmac_reqctx {
|
||||
u32 config;
|
||||
u32 key_id;
|
||||
u32 *iv;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
u32 result[CMAC_RESULT_REG_COUNT];
|
||||
};
|
||||
|
||||
@@ -269,7 +279,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct tegra_se *se = ctx->se;
|
||||
unsigned int cmdlen;
|
||||
unsigned int cmdlen, key1_id, key2_id;
|
||||
int ret;
|
||||
|
||||
/* Set buffer size as a multiple of AES_BLOCK_SIZE*/
|
||||
@@ -279,8 +289,10 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rctx->iv = (u32 *)req->iv;
|
||||
rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
|
||||
rctx->len = req->cryptlen;
|
||||
key1_id = ctx->key1_id;
|
||||
key2_id = ctx->key2_id;
|
||||
|
||||
/* Pad input to AES Block size */
|
||||
if (ctx->alg != SE_ALG_XTS) {
|
||||
@@ -290,18 +302,48 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
|
||||
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
|
||||
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
|
||||
|
||||
if (!key1_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
|
||||
ctx->keylen, ctx->alg, &key1_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
|
||||
|
||||
if (ctx->alg == SE_ALG_XTS) {
|
||||
if (!key2_id) {
|
||||
ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
|
||||
ctx->keylen, ctx->alg, &key2_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
|
||||
}
|
||||
|
||||
/* Prepare the command and submit for execution */
|
||||
cmdlen = tegra_aes_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
|
||||
/* Copy the result */
|
||||
tegra_aes_update_iv(req, ctx);
|
||||
scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
|
||||
|
||||
out:
|
||||
/* Free the buffer */
|
||||
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
|
||||
if (tegra_key_is_reserved(key1_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
|
||||
|
||||
if (tegra_key_is_reserved(key2_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
|
||||
|
||||
crypto_finalize_skcipher_request(se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -327,6 +369,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key1_id = 0;
|
||||
ctx->key2_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
algname = crypto_tfm_alg_name(&tfm->base);
|
||||
ret = se_algname_to_algid(algname);
|
||||
@@ -361,13 +404,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm,
|
||||
const u8 *key, u32 keylen)
|
||||
{
|
||||
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key1, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_xts_setkey(struct crypto_skcipher *tfm,
|
||||
@@ -385,11 +435,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm,
|
||||
|
||||
ret = tegra_key_submit(ctx->se, key, len,
|
||||
ctx->alg, &ctx->key1_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
ctx->keylen = len;
|
||||
memcpy(ctx->key1, key, len);
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key + len, len,
|
||||
ret = tegra_key_submit(ctx->se, key + len, len,
|
||||
ctx->alg, &ctx->key2_id);
|
||||
if (ret) {
|
||||
ctx->keylen = len;
|
||||
memcpy(ctx->key2, key + len, len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -460,12 +516,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
|
||||
return 0;
|
||||
|
||||
rctx->encrypt = encrypt;
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
|
||||
|
||||
if (ctx->key2_id)
|
||||
rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
|
||||
|
||||
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
|
||||
}
|
||||
@@ -751,11 +801,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
cmdlen = tegra_gmac_prep_cmd(se, rctx);
|
||||
|
||||
return tegra_se_host1x_submit(se, cmdlen);
|
||||
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
}
|
||||
|
||||
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
|
||||
@@ -772,7 +822,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_crypt_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -791,11 +841,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -918,12 +968,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
|
||||
rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cbcmac_prep_cmd(se, rctx);
|
||||
|
||||
return tegra_se_host1x_submit(se, cmdlen);
|
||||
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
}
|
||||
|
||||
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
|
||||
@@ -1105,7 +1155,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Copy authdata in the top of buffer for encryption/decryption */
|
||||
if (rctx->encrypt)
|
||||
@@ -1130,7 +1180,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_ctr_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1175,6 +1225,7 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
rctx->dst_sg = req->dst;
|
||||
rctx->assoclen = req->assoclen;
|
||||
rctx->authsize = crypto_aead_authsize(tfm);
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
if (rctx->encrypt)
|
||||
rctx->cryptlen = req->cryptlen;
|
||||
@@ -1200,6 +1251,13 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rctx->encrypt) {
|
||||
|
||||
/* CBC MAC Operation */
|
||||
@@ -1231,6 +1289,9 @@ outbuf_err:
|
||||
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
rctx->inbuf.buf, rctx->inbuf.addr);
|
||||
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_aead_request(ctx->se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -1248,6 +1309,7 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
rctx->dst_sg = req->dst;
|
||||
rctx->assoclen = req->assoclen;
|
||||
rctx->authsize = crypto_aead_authsize(tfm);
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
if (rctx->encrypt)
|
||||
rctx->cryptlen = req->cryptlen;
|
||||
@@ -1273,6 +1335,13 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
|
||||
rctx->iv[3] = (1 << 24);
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If there is associated data perform GMAC operation */
|
||||
if (rctx->assoclen) {
|
||||
ret = tegra_gcm_do_gmac(ctx, rctx);
|
||||
@@ -1303,7 +1372,9 @@ outbuf_err:
|
||||
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
rctx->inbuf.buf, rctx->inbuf.addr);
|
||||
|
||||
/* Finalize the request if there are no errors */
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_aead_request(ctx->se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -1329,6 +1400,7 @@ static int tegra_ccm_cra_init(struct crypto_aead *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1367,6 +1439,7 @@ static int tegra_gcm_cra_init(struct crypto_aead *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1453,13 +1526,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm,
|
||||
const u8 *key, u32 keylen)
|
||||
{
|
||||
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int tegra_cmac_prep_cmd(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
|
||||
@@ -1558,7 +1638,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
|
||||
rctx->total_len += rctx->datbuf.size;
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
|
||||
rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
|
||||
rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/*
|
||||
* Keep one block and residue bytes in residue and
|
||||
@@ -1572,6 +1652,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Copy the previous residue first */
|
||||
if (rctx->residue.size)
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
@@ -1595,7 +1680,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
|
||||
cmdlen = tegra_cmac_prep_cmd(se, rctx);
|
||||
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
/*
|
||||
* If this is not the final update, copy the intermediate results
|
||||
* from the registers so that it can be used in the next 'update'
|
||||
@@ -1618,17 +1703,29 @@ static int tegra_cmac_do_final(struct ahash_request *req)
|
||||
|
||||
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
|
||||
return crypto_shash_tfm_digest(ctx->fallback_tfm,
|
||||
rctx->datbuf.buf, 0, req->result);
|
||||
NULL, 0, req->result);
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
rctx->datbuf.size = rctx->residue.size;
|
||||
rctx->total_len += rctx->residue.size;
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
|
||||
|
||||
if (rctx->residue.size) {
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cmac_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1640,8 +1737,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
|
||||
writel(0, se->base + se->hw->regs->result + (i * 4));
|
||||
|
||||
out:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
if (rctx->residue.size)
|
||||
dma_free_coherent(se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
out_free:
|
||||
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
|
||||
rctx->residue.buf, rctx->residue.addr);
|
||||
return ret;
|
||||
@@ -1654,7 +1753,14 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_UPDATE) {
|
||||
ret = tegra_cmac_do_update(req);
|
||||
@@ -1665,6 +1771,9 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
ret = tegra_cmac_do_final(req);
|
||||
rctx->task &= ~SHA_FINAL;
|
||||
}
|
||||
out:
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_hash_request(se->engine, req, ret);
|
||||
|
||||
@@ -1710,6 +1819,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1757,34 +1867,24 @@ static int tegra_cmac_init(struct ahash_request *req)
|
||||
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
|
||||
&rctx->residue.addr, GFP_KERNEL);
|
||||
if (!rctx->residue.buf)
|
||||
goto resbuf_fail;
|
||||
return -ENOMEM;
|
||||
|
||||
rctx->residue.size = 0;
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
goto datbuf_fail;
|
||||
|
||||
rctx->datbuf.size = 0;
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
/* Clear any previous result */
|
||||
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
|
||||
writel(0, se->base + se->hw->regs->result + (i * 4));
|
||||
|
||||
return 0;
|
||||
|
||||
datbuf_fail:
|
||||
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
|
||||
rctx->residue.addr);
|
||||
resbuf_fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
@@ -1794,7 +1894,13 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
if (ctx->fallback_tfm)
|
||||
crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_cmac_update(struct ahash_request *req)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
|
||||
*/
|
||||
@@ -335,6 +335,11 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Copy the previous residue first */
|
||||
if (rctx->residue.size)
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
@@ -361,7 +366,7 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
|
||||
size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
|
||||
|
||||
ret = tegra_se_host1x_submit(ctx->se, size);
|
||||
ret = tegra_se_host1x_submit(ctx->se, ctx->se->cmdbuf, size);
|
||||
|
||||
/*
|
||||
* If this is not the final update, copy the intermediate results
|
||||
@@ -371,6 +376,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
if (!(rctx->task & SHA_FINAL))
|
||||
tegra_sha_copy_hash_result(ctx->se, rctx);
|
||||
|
||||
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -383,16 +391,25 @@ static int tegra_sha_do_final(struct ahash_request *req)
|
||||
u32 *cpuvaddr = se->cmdbuf->addr;
|
||||
int size, ret = 0;
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
rctx->datbuf.size = rctx->residue.size;
|
||||
rctx->total_len += rctx->residue.size;
|
||||
|
||||
rctx->config = tegra_sha_get_config(rctx->alg) |
|
||||
SE_SHA_DST_MEMORY;
|
||||
|
||||
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
|
||||
if (rctx->residue.size) {
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = tegra_se_host1x_submit(se, size);
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
}
|
||||
|
||||
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -400,8 +417,10 @@ static int tegra_sha_do_final(struct ahash_request *req)
|
||||
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
|
||||
|
||||
out:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
if (rctx->residue.size)
|
||||
dma_free_coherent(se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
out_free:
|
||||
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
|
||||
rctx->residue.buf, rctx->residue.addr);
|
||||
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
|
||||
@@ -540,19 +559,11 @@ static int tegra_sha_init(struct ahash_request *req)
|
||||
if (!rctx->residue.buf)
|
||||
goto resbuf_fail;
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
goto datbuf_fail;
|
||||
|
||||
return 0;
|
||||
|
||||
datbuf_fail:
|
||||
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
|
||||
rctx->residue.addr);
|
||||
resbuf_fail:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
|
||||
rctx->datbuf.addr);
|
||||
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
|
||||
rctx->digest.addr);
|
||||
digbuf_fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -573,13 +584,18 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen))
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
ctx->fallback = false;
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret)
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_sha_update(struct ahash_request *req)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Crypto driver file to manage keys of NVIDIA Security Engine.
|
||||
*/
|
||||
@@ -115,11 +115,15 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u16 slot, u32 alg)
|
||||
{
|
||||
const u32 *keyval = (u32 *)key;
|
||||
u32 *addr = se->cmdbuf->addr, size;
|
||||
u32 *addr = se->keybuf->addr, size;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kslt_lock);
|
||||
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
|
||||
ret = tegra_se_host1x_submit(se, se->keybuf, size);
|
||||
mutex_unlock(&kslt_lock);
|
||||
|
||||
return tegra_se_host1x_submit(se, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
@@ -143,7 +147,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
|
||||
if (!tegra_key_in_kslt(*keyid)) {
|
||||
*keyid = tegra_keyslot_alloc();
|
||||
if (!(*keyid)) {
|
||||
dev_err(se->dev, "failed to allocate key slot\n");
|
||||
dev_dbg(se->dev, "failed to allocate key slot\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@@ -154,3 +158,20 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
{
|
||||
u8 zkey[AES_MAX_KEY_SIZE] = {0};
|
||||
|
||||
if (!keyid)
|
||||
return;
|
||||
|
||||
/* Overwrite the key with 0s */
|
||||
tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
|
||||
}
|
||||
|
||||
inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
return tegra_key_insert(se, key, keylen, *keyid, alg);
|
||||
}
|
||||
|
||||
@@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
|
||||
return cmdbuf;
|
||||
}
|
||||
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
|
||||
{
|
||||
struct host1x_job *job;
|
||||
int ret;
|
||||
@@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
|
||||
job->engine_fallback_streamid = se->stream_id;
|
||||
job->engine_streamid_offset = SE_STREAM_ID;
|
||||
|
||||
se->cmdbuf->words = size;
|
||||
cmdbuf->words = size;
|
||||
|
||||
host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
|
||||
host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
|
||||
|
||||
ret = host1x_job_pin(job, se->dev);
|
||||
if (ret) {
|
||||
@@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
|
||||
goto syncpt_put;
|
||||
}
|
||||
|
||||
se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
|
||||
if (!se->cmdbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto cmdbuf_put;
|
||||
}
|
||||
|
||||
ret = se->hw->init_alg(se);
|
||||
if (ret) {
|
||||
dev_err(se->dev, "failed to register algorithms\n");
|
||||
goto cmdbuf_put;
|
||||
goto keybuf_put;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
keybuf_put:
|
||||
tegra_se_cmdbuf_put(&se->keybuf->bo);
|
||||
cmdbuf_put:
|
||||
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
|
||||
syncpt_put:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Header file for NVIDIA Security Engine driver.
|
||||
*/
|
||||
@@ -342,7 +342,9 @@
|
||||
#define SE_CRYPTO_CTR_REG_COUNT 4
|
||||
#define SE_MAX_KEYSLOT 15
|
||||
#define SE_MAX_MEM_ALLOC SZ_4M
|
||||
#define SE_SHA_BUFLEN 0x2000
|
||||
|
||||
#define TEGRA_AES_RESERVED_KSLT 14
|
||||
#define TEGRA_XTS_RESERVED_KSLT 15
|
||||
|
||||
#define SHA_FIRST BIT(0)
|
||||
#define SHA_UPDATE BIT(1)
|
||||
@@ -443,6 +445,7 @@ struct tegra_se {
|
||||
struct host1x_client client;
|
||||
struct host1x_channel *channel;
|
||||
struct tegra_se_cmdbuf *cmdbuf;
|
||||
struct tegra_se_cmdbuf *keybuf;
|
||||
struct crypto_engine *engine;
|
||||
struct host1x_syncpt *syncpt;
|
||||
struct device *dev;
|
||||
@@ -525,7 +528,31 @@ void tegra_deinit_hash(struct tegra_se *se);
|
||||
int tegra_key_submit(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
|
||||
int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
|
||||
|
||||
|
||||
static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
*keyid = TEGRA_AES_RESERVED_KSLT;
|
||||
return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
|
||||
}
|
||||
|
||||
static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
*keyid = TEGRA_XTS_RESERVED_KSLT;
|
||||
return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
|
||||
}
|
||||
|
||||
static inline bool tegra_key_is_reserved(u32 keyid)
|
||||
{
|
||||
return ((keyid == TEGRA_AES_RESERVED_KSLT) ||
|
||||
(keyid == TEGRA_XTS_RESERVED_KSLT));
|
||||
}
|
||||
|
||||
/* HOST1x OPCODES */
|
||||
static inline u32 host1x_opcode_setpayload(unsigned int payload)
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* NVIDIA Tegra CSI Device
|
||||
*
|
||||
* Copyright (c) 2015-2024, NVIDIA CORPORATION. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <nvidia/conftest.h>
|
||||
@@ -687,7 +686,7 @@ static int tegra_csi_set_format(struct v4l2_subdev *subdev,
|
||||
}
|
||||
|
||||
static int tegra_csi_g_frame_interval(struct v4l2_subdev *sd,
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_FRAME_INTERVAL)
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
struct v4l2_subdev_state *sd_state,
|
||||
#endif
|
||||
struct v4l2_subdev_frame_interval *vfi)
|
||||
@@ -721,13 +720,13 @@ static int tegra_csi_enum_mbus_code(struct v4l2_subdev *sd,
|
||||
static struct v4l2_subdev_video_ops tegra_csi_video_ops = {
|
||||
.s_stream = tegra_csi_s_stream,
|
||||
.g_input_status = tegra_csi_g_input_status,
|
||||
#if !defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_FRAME_INTERVAL)
|
||||
#if !defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
.g_frame_interval = tegra_csi_g_frame_interval,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_FRAME_INTERVAL)
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
.get_frame_interval = tegra_csi_g_frame_interval,
|
||||
#endif
|
||||
.get_fmt = tegra_csi_get_format,
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* tegracam_v4l2 - tegra camera framework for v4l2 support
|
||||
*
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <nvidia/conftest.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <media/tegra-v4l2-camera.h>
|
||||
#include <media/tegracam_core.h>
|
||||
@@ -112,9 +114,30 @@ static int v4l2sd_g_input_status(struct v4l2_subdev *sd, u32 *status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cam_g_frame_interval(struct v4l2_subdev *sd,
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
struct v4l2_subdev_state *sd_state,
|
||||
#endif
|
||||
struct v4l2_subdev_frame_interval *ival)
|
||||
{
|
||||
struct i2c_client *client = v4l2_get_subdevdata(sd);
|
||||
struct camera_common_data *s_data = to_camera_common_data(&client->dev);
|
||||
|
||||
if (!s_data)
|
||||
return -EINVAL;
|
||||
|
||||
ival->interval.denominator = s_data->frmfmt[s_data->mode_prop_idx].framerates[0];
|
||||
ival->interval.numerator = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct v4l2_subdev_video_ops v4l2sd_video_ops = {
|
||||
.s_stream = v4l2sd_stream,
|
||||
.g_input_status = v4l2sd_g_input_status,
|
||||
#if !defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
.g_frame_interval = cam_g_frame_interval,
|
||||
.s_frame_interval = cam_g_frame_interval,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct v4l2_subdev_core_ops v4l2sd_core_ops = {
|
||||
@@ -161,6 +184,10 @@ static int v4l2sd_set_fmt(struct v4l2_subdev *sd,
|
||||
}
|
||||
|
||||
static struct v4l2_subdev_pad_ops v4l2sd_pad_ops = {
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
.get_frame_interval = cam_g_frame_interval,
|
||||
.set_frame_interval = cam_g_frame_interval,
|
||||
#endif
|
||||
.set_fmt = v4l2sd_set_fmt,
|
||||
.get_fmt = v4l2sd_get_fmt,
|
||||
.enum_mbus_code = camera_common_enum_mbus_code,
|
||||
|
||||
@@ -2281,10 +2281,32 @@ static long tegra_channel_default_ioctl(struct file *file, void *fh,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Implemented vidioc_s_parm and vidioc_g_parm ioctl to support multiple frame
|
||||
* rates */
|
||||
static int tegra_channel_s_parm(struct file *file, void *fh,
|
||||
struct v4l2_streamparm *a)
|
||||
{
|
||||
struct tegra_channel *chan = video_drvdata(file);
|
||||
struct v4l2_subdev *sd = chan->subdev_on_csi;
|
||||
|
||||
return v4l2_s_parm_cap(chan->video, sd, a);
|
||||
}
|
||||
|
||||
static int tegra_channel_g_parm(struct file *file, void *fh,
|
||||
struct v4l2_streamparm *a)
|
||||
{
|
||||
struct tegra_channel *chan = video_drvdata(file);
|
||||
struct v4l2_subdev *sd = chan->subdev_on_csi;
|
||||
|
||||
return v4l2_g_parm_cap(chan->video, sd, a);
|
||||
}
|
||||
|
||||
static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = {
|
||||
.vidioc_querycap = tegra_channel_querycap,
|
||||
.vidioc_enum_framesizes = tegra_channel_enum_framesizes,
|
||||
.vidioc_enum_frameintervals = tegra_channel_enum_frameintervals,
|
||||
.vidioc_s_parm = tegra_channel_s_parm,
|
||||
.vidioc_g_parm = tegra_channel_g_parm,
|
||||
.vidioc_enum_fmt_vid_cap = tegra_channel_enum_format,
|
||||
.vidioc_g_fmt_vid_cap = tegra_channel_get_format,
|
||||
.vidioc_s_fmt_vid_cap = tegra_channel_set_format,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Handle allocation and freeing routines for nvmap
|
||||
*/
|
||||
@@ -28,6 +28,7 @@
|
||||
#include <linux/libnvdimm.h>
|
||||
#endif /* NVMAP_UPSTREAM_KERNEL */
|
||||
#include "nvmap_priv.h"
|
||||
#include <linux/mm.h>
|
||||
|
||||
bool nvmap_convert_carveout_to_iovmm;
|
||||
bool nvmap_convert_iovmm_to_carveout;
|
||||
@@ -494,6 +495,8 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
#else
|
||||
static u8 chipid;
|
||||
#endif
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct nvmap_handle_ref *ref;
|
||||
|
||||
if (!chipid) {
|
||||
#ifdef NVMAP_CONFIG_COLOR_PAGES
|
||||
@@ -511,6 +514,13 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Get refcount on mm_struct, so that it won't be freed until
|
||||
* nvmap reduces refcount after it reduces the RSS counter.
|
||||
*/
|
||||
if (!mmget_not_zero(mm))
|
||||
goto page_free;
|
||||
|
||||
if (contiguous) {
|
||||
struct page *page;
|
||||
page = nvmap_alloc_pages_exact(gfp, size, true, h->numa_id);
|
||||
@@ -582,6 +592,12 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
nvmap_total_page_allocs += nr_page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment the RSS counter of the allocating process by number of pages allocated.
|
||||
*/
|
||||
h->anon_count = nr_page;
|
||||
nvmap_add_mm_counter(mm, MM_ANONPAGES, nr_page);
|
||||
|
||||
/*
|
||||
* Make sure any data in the caches is cleaned out before
|
||||
* passing these pages to userspace. Many nvmap clients assume that
|
||||
@@ -595,11 +611,28 @@ static int handle_page_alloc(struct nvmap_client *client,
|
||||
h->pgalloc.pages = pages;
|
||||
h->pgalloc.contig = contiguous;
|
||||
atomic_set(&h->pgalloc.ndirty, 0);
|
||||
|
||||
nvmap_ref_lock(client);
|
||||
ref = __nvmap_validate_locked(client, h, false);
|
||||
if (ref) {
|
||||
ref->mm = mm;
|
||||
ref->anon_count = h->anon_count;
|
||||
} else {
|
||||
nvmap_add_mm_counter(mm, MM_ANONPAGES, -nr_page);
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
nvmap_ref_unlock(client);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (i--)
|
||||
__free_page(pages[i]);
|
||||
|
||||
/* Incase of failure, release the reference on mm_struct. */
|
||||
mmput(mm);
|
||||
|
||||
page_free:
|
||||
nvmap_altfree(pages, nr_page * sizeof(*pages));
|
||||
wmb();
|
||||
return -ENOMEM;
|
||||
@@ -1072,9 +1105,18 @@ void _nvmap_handle_free(struct nvmap_handle *h)
|
||||
h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
|
||||
|
||||
#ifdef NVMAP_CONFIG_PAGE_POOLS
|
||||
if (!h->from_va && !h->is_subhandle)
|
||||
page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
|
||||
h->pgalloc.pages, nr_page);
|
||||
if (!h->from_va && !h->is_subhandle) {
|
||||
/*
|
||||
* When the process is exiting with kill signal pending, don't release the memory
|
||||
* back into page pool. So that memory would be released back to the kernel and OOM
|
||||
* killer would be able to actually free the memory.
|
||||
*/
|
||||
if (fatal_signal_pending(current) == 0 &&
|
||||
sigismember(¤t->signal->shared_pending.signal, SIGKILL) == 0) {
|
||||
page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
|
||||
h->pgalloc.pages, nr_page);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = page_index; i < nr_page; i++) {
|
||||
@@ -1129,6 +1171,17 @@ void nvmap_free_handle(struct nvmap_client *client,
|
||||
if (h->owner == client)
|
||||
h->owner = NULL;
|
||||
|
||||
/*
|
||||
* When a reference is freed, decrement rss counter of the process corresponding
|
||||
* to this ref and do mmput so that mm_struct can be freed, if required.
|
||||
*/
|
||||
if (ref->mm != NULL && ref->anon_count != 0) {
|
||||
nvmap_add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
|
||||
mmput(ref->mm);
|
||||
ref->mm = NULL;
|
||||
ref->anon_count = 0;
|
||||
}
|
||||
|
||||
if (is_ro)
|
||||
dma_buf_put(ref->handle->dmabuf_ro);
|
||||
else
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2011-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* User-space interface to nvmap
|
||||
*/
|
||||
@@ -286,6 +286,17 @@ static void destroy_client(struct nvmap_client *client)
|
||||
if (ref->handle->owner == client)
|
||||
ref->handle->owner = NULL;
|
||||
|
||||
/*
|
||||
* When a reference is freed, decrement rss counter of the process corresponding
|
||||
* to this ref and do mmput so that mm_struct can be freed, if required.
|
||||
*/
|
||||
if (ref->mm != NULL && ref->anon_count != 0) {
|
||||
nvmap_add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
|
||||
mmput(ref->mm);
|
||||
ref->mm = NULL;
|
||||
ref->anon_count = 0;
|
||||
}
|
||||
|
||||
if (ref->is_ro)
|
||||
dma_buf_put(ref->handle->dmabuf_ro);
|
||||
else
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2012-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2012-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* dma_buf exporter for nvmap
|
||||
*/
|
||||
@@ -450,6 +450,7 @@ int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
|
||||
nvmap_handle_put(h);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mutex_init(&priv->vma_lock);
|
||||
priv->handle = h;
|
||||
|
||||
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS) /* Linux v6.3 */
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||
@@ -147,6 +147,14 @@ static void nvmap_vma_close(struct vm_area_struct *vma)
|
||||
BUG_ON(!vma_found);
|
||||
nvmap_umaps_dec(h);
|
||||
|
||||
mutex_lock(&priv->vma_lock);
|
||||
if (priv->mm != NULL && h->anon_count != 0) {
|
||||
nvmap_add_mm_counter(priv->mm, MM_ANONPAGES, priv->map_rss_count);
|
||||
priv->map_rss_count = 0;
|
||||
priv->mm = NULL;
|
||||
}
|
||||
mutex_unlock(&priv->vma_lock);
|
||||
|
||||
if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
|
||||
if (h->heap_pgalloc) {
|
||||
for (i = 0; i < nr_page; i++) {
|
||||
@@ -233,6 +241,14 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGSEGV;
|
||||
}
|
||||
|
||||
mutex_lock(&priv->vma_lock);
|
||||
if (priv->handle->anon_count != 0 && current->mm != NULL) {
|
||||
nvmap_add_mm_counter(current->mm, MM_ANONPAGES, -1);
|
||||
priv->map_rss_count++;
|
||||
priv->mm = current->mm;
|
||||
}
|
||||
mutex_unlock(&priv->vma_lock);
|
||||
|
||||
if (!nvmap_handle_track_dirty(priv->handle))
|
||||
goto finish;
|
||||
mutex_lock(&priv->handle->lock);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2009-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Handle allocation and freeing routines for nvmap
|
||||
*/
|
||||
@@ -401,17 +401,31 @@ struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
|
||||
|
||||
atomic_set(&ref->dupes, 1);
|
||||
ref->handle = h;
|
||||
|
||||
/*
|
||||
* When a new reference is created to the handle, save mm, anon_count in ref and
|
||||
* increment ref count of mm.
|
||||
*/
|
||||
ref->mm = current->mm;
|
||||
ref->anon_count = h->anon_count;
|
||||
add_handle_ref(client, ref);
|
||||
|
||||
if (ref->anon_count != 0 && ref->mm != NULL) {
|
||||
if (!mmget_not_zero(ref->mm))
|
||||
goto exit;
|
||||
|
||||
nvmap_add_mm_counter(ref->mm, MM_ANONPAGES, ref->anon_count);
|
||||
}
|
||||
|
||||
if (is_ro) {
|
||||
ref->is_ro = true;
|
||||
if (!h->dmabuf_ro)
|
||||
goto exit;
|
||||
goto exit_mm;
|
||||
get_dma_buf(h->dmabuf_ro);
|
||||
} else {
|
||||
ref->is_ro = false;
|
||||
if (!h->dmabuf)
|
||||
goto exit;
|
||||
goto exit_mm;
|
||||
get_dma_buf(h->dmabuf);
|
||||
}
|
||||
|
||||
@@ -420,6 +434,14 @@ out:
|
||||
NVMAP_TP_ARGS_CHR(client, h, ref));
|
||||
return ref;
|
||||
|
||||
exit_mm:
|
||||
if (ref->anon_count != 0 && ref->mm != NULL) {
|
||||
nvmap_add_mm_counter(ref->mm, MM_ANONPAGES, -ref->anon_count);
|
||||
mmput(ref->mm);
|
||||
ref->mm = NULL;
|
||||
ref->anon_count = 0;
|
||||
}
|
||||
|
||||
exit:
|
||||
pr_err("dmabuf is NULL\n");
|
||||
kfree(ref);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2009-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2009-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* GPU memory management driver for Tegra
|
||||
*/
|
||||
@@ -93,7 +93,7 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
|
||||
#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN | __GFP_ACCOUNT | __GFP_NORETRY)
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
|
||||
|
||||
@@ -271,6 +271,7 @@ struct nvmap_handle {
|
||||
wait_queue_head_t waitq;
|
||||
int numa_id;
|
||||
u64 serial_id;
|
||||
u64 anon_count;
|
||||
};
|
||||
|
||||
struct nvmap_handle_info {
|
||||
@@ -295,6 +296,8 @@ struct nvmap_handle_ref {
|
||||
struct rb_node node;
|
||||
atomic_t dupes; /* number of times to free on file close */
|
||||
bool is_ro;
|
||||
struct mm_struct *mm;
|
||||
u64 anon_count;
|
||||
};
|
||||
|
||||
#if defined(NVMAP_CONFIG_PAGE_POOLS)
|
||||
@@ -377,6 +380,9 @@ struct nvmap_vma_priv {
|
||||
struct nvmap_handle *handle;
|
||||
size_t offs;
|
||||
atomic_t count; /* number of processes cloning the VMA */
|
||||
u64 map_rss_count;
|
||||
struct mm_struct *mm;
|
||||
struct mutex vma_lock;
|
||||
};
|
||||
|
||||
struct nvmap_device {
|
||||
@@ -913,6 +919,16 @@ static inline struct dma_buf *nvmap_id_array_id_release(struct xarray *xarr, u32
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void nvmap_add_mm_counter(struct mm_struct *mm, int member, long value)
|
||||
{
|
||||
#if defined(NV_MM_STRUCT_STRUCT_HAS_PERCPU_COUNTER_RSS_STAT) /* Linux v6.2 */
|
||||
percpu_counter_add(&mm->rss_stat[member], value);
|
||||
#else
|
||||
atomic_long_add_return(value, &mm->rss_stat.count[member]);
|
||||
#endif
|
||||
}
|
||||
|
||||
void *nvmap_dmabuf_get_drv_data(struct dma_buf *dmabuf,
|
||||
struct device *dev);
|
||||
bool is_nvmap_memory_available(size_t size, uint32_t heap);
|
||||
|
||||
@@ -134,6 +134,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_mux_add_adapter_has_no_class_argument
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iio_dev_opaque_has_mlock
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_map_has_gfp_arg
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_complete_and_exit
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_struct_struct_has_percpu_counter_rss_stat
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mii_bus_struct_has_read_c45
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mii_bus_struct_has_write_c45
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += netif_set_tso_max_size
|
||||
@@ -181,7 +182,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_async_subdev_nf_init
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_async_notifier_init
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_async_nf_init_has_v4l2_dev_arg
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += __v4l2_async_nf_add_subdev
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_subdev_pad_ops_struct_has_get_frame_interval
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_subdev_pad_ops_struct_has_get_set_frame_interval
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_subdev_pad_ops_struct_has_dv_timings
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
|
||||
|
||||
@@ -7395,6 +7395,23 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_MII_BUS_STRUCT_HAS_WRITE_C45" "" "types"
|
||||
;;
|
||||
|
||||
mm_struct_struct_has_percpu_counter_rss_stat)
|
||||
#
|
||||
# Determine if the 'rss_stat' member of the 'mm_struct' structure is
|
||||
# defined with 'percpu_counter'.
|
||||
#
|
||||
# This change was made in Linux v6.2 by commit f1a7941243c1 ("mm:
|
||||
# convert mm's rss stats into percpu_counter2").
|
||||
#
|
||||
CODE="
|
||||
#include <linux/mm_types.h>
|
||||
void conftest_mm_struct_struct_has_percpu_counter_rss_stat(struct mm_struct *mm) {
|
||||
percpu_counter_add(&mm->rss_stat[0], 0);
|
||||
}"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_MM_STRUCT_STRUCT_HAS_PERCPU_COUNTER_RSS_STAT" "" "types"
|
||||
;;
|
||||
|
||||
of_property_for_each_u32_removed_internal_args)
|
||||
#
|
||||
# Determine if the internal arguments for the macro
|
||||
@@ -8082,10 +8099,12 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_V4L2_ASYNC_NF_ADD_SUBDEV_PRESENT" "" "functions"
|
||||
;;
|
||||
|
||||
v4l2_subdev_pad_ops_struct_has_get_frame_interval)
|
||||
v4l2_subdev_pad_ops_struct_has_get_set_frame_interval)
|
||||
#
|
||||
# Determine if struct v4l2_subdev_pad_ops has the 'get_frame_interval'
|
||||
# function pointer.
|
||||
# and 'set_frame_interval' function pointers. Note that it is only
|
||||
# necessary to check for the presence of one because both were added
|
||||
# by the same commit.
|
||||
#
|
||||
# Added by commit 287fe160834a ("media: v4l2-subdev: Turn
|
||||
# .[gs]_frame_interval into pad operations") in Linux v6.8.
|
||||
@@ -8098,7 +8117,7 @@ compile_test() {
|
||||
}
|
||||
"
|
||||
compile_check_conftest "$CODE" \
|
||||
"NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_FRAME_INTERVAL" "" "types"
|
||||
"NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL" "" "types"
|
||||
;;
|
||||
|
||||
v4l2_subdev_pad_ops_struct_has_dv_timings)
|
||||
|
||||
Reference in New Issue
Block a user