mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-24 10:11:26 +03:00
Compare commits
8 Commits
jetson_36.
...
jetson_36.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
efa698bed8 | ||
|
|
e7bf6f1444 | ||
|
|
e228deeef1 | ||
|
|
36a712b801 | ||
|
|
087418781c | ||
|
|
fe0a030fee | ||
|
|
7ad4c09866 | ||
|
|
f41b74b8c3 |
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
|
||||
*/
|
||||
@@ -33,6 +33,9 @@ struct tegra_aes_ctx {
|
||||
u32 ivsize;
|
||||
u32 key1_id;
|
||||
u32 key2_id;
|
||||
u32 keylen;
|
||||
u8 key1[AES_MAX_KEY_SIZE];
|
||||
u8 key2[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aes_reqctx {
|
||||
@@ -42,6 +45,8 @@ struct tegra_aes_reqctx {
|
||||
u32 crypto_config;
|
||||
u32 len;
|
||||
u32 *iv;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aead_ctx {
|
||||
@@ -53,6 +58,7 @@ struct tegra_aead_ctx {
|
||||
u32 alg;
|
||||
u32 keylen;
|
||||
u32 key_id;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aead_reqctx {
|
||||
@@ -64,8 +70,8 @@ struct tegra_aead_reqctx {
|
||||
unsigned int cryptlen;
|
||||
unsigned int authsize;
|
||||
bool encrypt;
|
||||
u32 config;
|
||||
u32 crypto_config;
|
||||
u32 config;
|
||||
u32 key_id;
|
||||
u32 iv[4];
|
||||
u8 authdata[16];
|
||||
@@ -78,6 +84,8 @@ struct tegra_cmac_ctx {
|
||||
struct tegra_se *se;
|
||||
unsigned int alg;
|
||||
u32 key_id;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
struct crypto_shash *fallback_tfm;
|
||||
};
|
||||
|
||||
@@ -92,6 +100,8 @@ struct tegra_cmac_reqctx {
|
||||
u32 config;
|
||||
u32 key_id;
|
||||
u32 *iv;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
u32 result[CMAC_RESULT_REG_COUNT];
|
||||
};
|
||||
|
||||
@@ -269,7 +279,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct tegra_se *se = ctx->se;
|
||||
unsigned int cmdlen;
|
||||
unsigned int cmdlen, key1_id, key2_id;
|
||||
int ret;
|
||||
|
||||
/* Set buffer size as a multiple of AES_BLOCK_SIZE*/
|
||||
@@ -279,8 +289,10 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rctx->iv = (u32 *)req->iv;
|
||||
rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
|
||||
rctx->len = req->cryptlen;
|
||||
key1_id = ctx->key1_id;
|
||||
key2_id = ctx->key2_id;
|
||||
|
||||
/* Pad input to AES Block size */
|
||||
if (ctx->alg != SE_ALG_XTS) {
|
||||
@@ -290,18 +302,48 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
|
||||
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
|
||||
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
|
||||
|
||||
if (!key1_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
|
||||
ctx->keylen, ctx->alg, &key1_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
|
||||
|
||||
if (ctx->alg == SE_ALG_XTS) {
|
||||
if (!key2_id) {
|
||||
ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
|
||||
ctx->keylen, ctx->alg, &key2_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
|
||||
}
|
||||
|
||||
/* Prepare the command and submit for execution */
|
||||
cmdlen = tegra_aes_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
|
||||
/* Copy the result */
|
||||
tegra_aes_update_iv(req, ctx);
|
||||
scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
|
||||
|
||||
out:
|
||||
/* Free the buffer */
|
||||
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
|
||||
if (tegra_key_is_reserved(key1_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
|
||||
|
||||
if (tegra_key_is_reserved(key2_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
|
||||
|
||||
crypto_finalize_skcipher_request(se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -327,6 +369,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key1_id = 0;
|
||||
ctx->key2_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
algname = crypto_tfm_alg_name(&tfm->base);
|
||||
ret = se_algname_to_algid(algname);
|
||||
@@ -361,13 +404,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm,
|
||||
const u8 *key, u32 keylen)
|
||||
{
|
||||
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key1, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_xts_setkey(struct crypto_skcipher *tfm,
|
||||
@@ -385,11 +435,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm,
|
||||
|
||||
ret = tegra_key_submit(ctx->se, key, len,
|
||||
ctx->alg, &ctx->key1_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
ctx->keylen = len;
|
||||
memcpy(ctx->key1, key, len);
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key + len, len,
|
||||
ret = tegra_key_submit(ctx->se, key + len, len,
|
||||
ctx->alg, &ctx->key2_id);
|
||||
if (ret) {
|
||||
ctx->keylen = len;
|
||||
memcpy(ctx->key2, key + len, len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -460,12 +516,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
|
||||
return 0;
|
||||
|
||||
rctx->encrypt = encrypt;
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
|
||||
|
||||
if (ctx->key2_id)
|
||||
rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
|
||||
|
||||
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
|
||||
}
|
||||
@@ -751,11 +801,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
cmdlen = tegra_gmac_prep_cmd(se, rctx);
|
||||
|
||||
return tegra_se_host1x_submit(se, cmdlen);
|
||||
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
}
|
||||
|
||||
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
|
||||
@@ -772,7 +822,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_crypt_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -791,11 +841,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -918,12 +968,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
|
||||
rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cbcmac_prep_cmd(se, rctx);
|
||||
|
||||
return tegra_se_host1x_submit(se, cmdlen);
|
||||
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
}
|
||||
|
||||
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
|
||||
@@ -1105,7 +1155,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Copy authdata in the top of buffer for encryption/decryption */
|
||||
if (rctx->encrypt)
|
||||
@@ -1130,7 +1180,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_ctr_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1175,6 +1225,7 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
rctx->dst_sg = req->dst;
|
||||
rctx->assoclen = req->assoclen;
|
||||
rctx->authsize = crypto_aead_authsize(tfm);
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
if (rctx->encrypt)
|
||||
rctx->cryptlen = req->cryptlen;
|
||||
@@ -1200,6 +1251,13 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rctx->encrypt) {
|
||||
|
||||
/* CBC MAC Operation */
|
||||
@@ -1231,6 +1289,9 @@ outbuf_err:
|
||||
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
rctx->inbuf.buf, rctx->inbuf.addr);
|
||||
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_aead_request(ctx->se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -1248,6 +1309,7 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
rctx->dst_sg = req->dst;
|
||||
rctx->assoclen = req->assoclen;
|
||||
rctx->authsize = crypto_aead_authsize(tfm);
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
if (rctx->encrypt)
|
||||
rctx->cryptlen = req->cryptlen;
|
||||
@@ -1273,6 +1335,13 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
|
||||
rctx->iv[3] = (1 << 24);
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If there is associated data perform GMAC operation */
|
||||
if (rctx->assoclen) {
|
||||
ret = tegra_gcm_do_gmac(ctx, rctx);
|
||||
@@ -1303,7 +1372,9 @@ outbuf_err:
|
||||
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
rctx->inbuf.buf, rctx->inbuf.addr);
|
||||
|
||||
/* Finalize the request if there are no errors */
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_aead_request(ctx->se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@@ -1329,6 +1400,7 @@ static int tegra_ccm_cra_init(struct crypto_aead *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1367,6 +1439,7 @@ static int tegra_gcm_cra_init(struct crypto_aead *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1453,13 +1526,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm,
|
||||
const u8 *key, u32 keylen)
|
||||
{
|
||||
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int tegra_cmac_prep_cmd(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
|
||||
@@ -1558,7 +1638,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
|
||||
rctx->total_len += rctx->datbuf.size;
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
|
||||
rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
|
||||
rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/*
|
||||
* Keep one block and residue bytes in residue and
|
||||
@@ -1572,6 +1652,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Copy the previous residue first */
|
||||
if (rctx->residue.size)
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
@@ -1595,7 +1680,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
|
||||
cmdlen = tegra_cmac_prep_cmd(se, rctx);
|
||||
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
/*
|
||||
* If this is not the final update, copy the intermediate results
|
||||
* from the registers so that it can be used in the next 'update'
|
||||
@@ -1618,17 +1703,29 @@ static int tegra_cmac_do_final(struct ahash_request *req)
|
||||
|
||||
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
|
||||
return crypto_shash_tfm_digest(ctx->fallback_tfm,
|
||||
rctx->datbuf.buf, 0, req->result);
|
||||
NULL, 0, req->result);
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
rctx->datbuf.size = rctx->residue.size;
|
||||
rctx->total_len += rctx->residue.size;
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
|
||||
|
||||
if (rctx->residue.size) {
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cmac_prep_cmd(se, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1640,8 +1737,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
|
||||
writel(0, se->base + se->hw->regs->result + (i * 4));
|
||||
|
||||
out:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
if (rctx->residue.size)
|
||||
dma_free_coherent(se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
out_free:
|
||||
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
|
||||
rctx->residue.buf, rctx->residue.addr);
|
||||
return ret;
|
||||
@@ -1654,7 +1753,14 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_UPDATE) {
|
||||
ret = tegra_cmac_do_update(req);
|
||||
@@ -1665,6 +1771,9 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
ret = tegra_cmac_do_final(req);
|
||||
rctx->task &= ~SHA_FINAL;
|
||||
}
|
||||
out:
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_hash_request(se->engine, req, ret);
|
||||
|
||||
@@ -1710,6 +1819,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@@ -1757,34 +1867,24 @@ static int tegra_cmac_init(struct ahash_request *req)
|
||||
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
|
||||
&rctx->residue.addr, GFP_KERNEL);
|
||||
if (!rctx->residue.buf)
|
||||
goto resbuf_fail;
|
||||
return -ENOMEM;
|
||||
|
||||
rctx->residue.size = 0;
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
goto datbuf_fail;
|
||||
|
||||
rctx->datbuf.size = 0;
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
/* Clear any previous result */
|
||||
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
|
||||
writel(0, se->base + se->hw->regs->result + (i * 4));
|
||||
|
||||
return 0;
|
||||
|
||||
datbuf_fail:
|
||||
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
|
||||
rctx->residue.addr);
|
||||
resbuf_fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
@@ -1794,7 +1894,13 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
if (ctx->fallback_tfm)
|
||||
crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_cmac_update(struct ahash_request *req)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
|
||||
*/
|
||||
@@ -335,6 +335,11 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Copy the previous residue first */
|
||||
if (rctx->residue.size)
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
@@ -361,7 +366,7 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
|
||||
size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
|
||||
|
||||
ret = tegra_se_host1x_submit(ctx->se, size);
|
||||
ret = tegra_se_host1x_submit(ctx->se, ctx->se->cmdbuf, size);
|
||||
|
||||
/*
|
||||
* If this is not the final update, copy the intermediate results
|
||||
@@ -371,6 +376,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
if (!(rctx->task & SHA_FINAL))
|
||||
tegra_sha_copy_hash_result(ctx->se, rctx);
|
||||
|
||||
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -383,16 +391,25 @@ static int tegra_sha_do_final(struct ahash_request *req)
|
||||
u32 *cpuvaddr = se->cmdbuf->addr;
|
||||
int size, ret = 0;
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
rctx->datbuf.size = rctx->residue.size;
|
||||
rctx->total_len += rctx->residue.size;
|
||||
|
||||
rctx->config = tegra_sha_get_config(rctx->alg) |
|
||||
SE_SHA_DST_MEMORY;
|
||||
|
||||
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
|
||||
if (rctx->residue.size) {
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = tegra_se_host1x_submit(se, size);
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
}
|
||||
|
||||
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -400,8 +417,10 @@ static int tegra_sha_do_final(struct ahash_request *req)
|
||||
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
|
||||
|
||||
out:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
if (rctx->residue.size)
|
||||
dma_free_coherent(se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
out_free:
|
||||
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
|
||||
rctx->residue.buf, rctx->residue.addr);
|
||||
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
|
||||
@@ -540,19 +559,11 @@ static int tegra_sha_init(struct ahash_request *req)
|
||||
if (!rctx->residue.buf)
|
||||
goto resbuf_fail;
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
goto datbuf_fail;
|
||||
|
||||
return 0;
|
||||
|
||||
datbuf_fail:
|
||||
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
|
||||
rctx->residue.addr);
|
||||
resbuf_fail:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
|
||||
rctx->datbuf.addr);
|
||||
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
|
||||
rctx->digest.addr);
|
||||
digbuf_fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -573,13 +584,18 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen))
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
ctx->fallback = false;
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret)
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_sha_update(struct ahash_request *req)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Crypto driver file to manage keys of NVIDIA Security Engine.
|
||||
*/
|
||||
@@ -115,11 +115,15 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u16 slot, u32 alg)
|
||||
{
|
||||
const u32 *keyval = (u32 *)key;
|
||||
u32 *addr = se->cmdbuf->addr, size;
|
||||
u32 *addr = se->keybuf->addr, size;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kslt_lock);
|
||||
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
|
||||
ret = tegra_se_host1x_submit(se, se->keybuf, size);
|
||||
mutex_unlock(&kslt_lock);
|
||||
|
||||
return tegra_se_host1x_submit(se, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
@@ -143,7 +147,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
|
||||
if (!tegra_key_in_kslt(*keyid)) {
|
||||
*keyid = tegra_keyslot_alloc();
|
||||
if (!(*keyid)) {
|
||||
dev_err(se->dev, "failed to allocate key slot\n");
|
||||
dev_dbg(se->dev, "failed to allocate key slot\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@@ -154,3 +158,20 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
{
|
||||
u8 zkey[AES_MAX_KEY_SIZE] = {0};
|
||||
|
||||
if (!keyid)
|
||||
return;
|
||||
|
||||
/* Overwrite the key with 0s */
|
||||
tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
|
||||
}
|
||||
|
||||
inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
return tegra_key_insert(se, key, keylen, *keyid, alg);
|
||||
}
|
||||
|
||||
@@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
|
||||
return cmdbuf;
|
||||
}
|
||||
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
|
||||
{
|
||||
struct host1x_job *job;
|
||||
int ret;
|
||||
@@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
|
||||
job->engine_fallback_streamid = se->stream_id;
|
||||
job->engine_streamid_offset = SE_STREAM_ID;
|
||||
|
||||
se->cmdbuf->words = size;
|
||||
cmdbuf->words = size;
|
||||
|
||||
host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
|
||||
host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
|
||||
|
||||
ret = host1x_job_pin(job, se->dev);
|
||||
if (ret) {
|
||||
@@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
|
||||
goto syncpt_put;
|
||||
}
|
||||
|
||||
se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
|
||||
if (!se->cmdbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto cmdbuf_put;
|
||||
}
|
||||
|
||||
ret = se->hw->init_alg(se);
|
||||
if (ret) {
|
||||
dev_err(se->dev, "failed to register algorithms\n");
|
||||
goto cmdbuf_put;
|
||||
goto keybuf_put;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
keybuf_put:
|
||||
tegra_se_cmdbuf_put(&se->keybuf->bo);
|
||||
cmdbuf_put:
|
||||
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
|
||||
syncpt_put:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* Header file for NVIDIA Security Engine driver.
|
||||
*/
|
||||
@@ -342,7 +342,9 @@
|
||||
#define SE_CRYPTO_CTR_REG_COUNT 4
|
||||
#define SE_MAX_KEYSLOT 15
|
||||
#define SE_MAX_MEM_ALLOC SZ_4M
|
||||
#define SE_SHA_BUFLEN 0x2000
|
||||
|
||||
#define TEGRA_AES_RESERVED_KSLT 14
|
||||
#define TEGRA_XTS_RESERVED_KSLT 15
|
||||
|
||||
#define SHA_FIRST BIT(0)
|
||||
#define SHA_UPDATE BIT(1)
|
||||
@@ -443,6 +445,7 @@ struct tegra_se {
|
||||
struct host1x_client client;
|
||||
struct host1x_channel *channel;
|
||||
struct tegra_se_cmdbuf *cmdbuf;
|
||||
struct tegra_se_cmdbuf *keybuf;
|
||||
struct crypto_engine *engine;
|
||||
struct host1x_syncpt *syncpt;
|
||||
struct device *dev;
|
||||
@@ -525,7 +528,31 @@ void tegra_deinit_hash(struct tegra_se *se);
|
||||
int tegra_key_submit(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
|
||||
int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
|
||||
|
||||
|
||||
static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
*keyid = TEGRA_AES_RESERVED_KSLT;
|
||||
return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
|
||||
}
|
||||
|
||||
static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
*keyid = TEGRA_XTS_RESERVED_KSLT;
|
||||
return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
|
||||
}
|
||||
|
||||
static inline bool tegra_key_is_reserved(u32 keyid)
|
||||
{
|
||||
return ((keyid == TEGRA_AES_RESERVED_KSLT) ||
|
||||
(keyid == TEGRA_XTS_RESERVED_KSLT));
|
||||
}
|
||||
|
||||
/* HOST1x OPCODES */
|
||||
static inline u32 host1x_opcode_setpayload(unsigned int payload)
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* NVIDIA Tegra CSI Device
|
||||
*
|
||||
* Copyright (c) 2015-2024, NVIDIA CORPORATION. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <nvidia/conftest.h>
|
||||
@@ -687,7 +686,7 @@ static int tegra_csi_set_format(struct v4l2_subdev *subdev,
|
||||
}
|
||||
|
||||
static int tegra_csi_g_frame_interval(struct v4l2_subdev *sd,
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_FRAME_INTERVAL)
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
struct v4l2_subdev_state *sd_state,
|
||||
#endif
|
||||
struct v4l2_subdev_frame_interval *vfi)
|
||||
@@ -721,13 +720,13 @@ static int tegra_csi_enum_mbus_code(struct v4l2_subdev *sd,
|
||||
static struct v4l2_subdev_video_ops tegra_csi_video_ops = {
|
||||
.s_stream = tegra_csi_s_stream,
|
||||
.g_input_status = tegra_csi_g_input_status,
|
||||
#if !defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_FRAME_INTERVAL)
|
||||
#if !defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
.g_frame_interval = tegra_csi_g_frame_interval,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_FRAME_INTERVAL)
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
.get_frame_interval = tegra_csi_g_frame_interval,
|
||||
#endif
|
||||
.get_fmt = tegra_csi_get_format,
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
/*
|
||||
* tegracam_v4l2 - tegra camera framework for v4l2 support
|
||||
*
|
||||
* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <nvidia/conftest.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <media/tegra-v4l2-camera.h>
|
||||
#include <media/tegracam_core.h>
|
||||
@@ -112,9 +114,30 @@ static int v4l2sd_g_input_status(struct v4l2_subdev *sd, u32 *status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cam_g_frame_interval(struct v4l2_subdev *sd,
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
struct v4l2_subdev_state *sd_state,
|
||||
#endif
|
||||
struct v4l2_subdev_frame_interval *ival)
|
||||
{
|
||||
struct i2c_client *client = v4l2_get_subdevdata(sd);
|
||||
struct camera_common_data *s_data = to_camera_common_data(&client->dev);
|
||||
|
||||
if (!s_data)
|
||||
return -EINVAL;
|
||||
|
||||
ival->interval.denominator = s_data->frmfmt[s_data->mode_prop_idx].framerates[0];
|
||||
ival->interval.numerator = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct v4l2_subdev_video_ops v4l2sd_video_ops = {
|
||||
.s_stream = v4l2sd_stream,
|
||||
.g_input_status = v4l2sd_g_input_status,
|
||||
#if !defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
.g_frame_interval = cam_g_frame_interval,
|
||||
.s_frame_interval = cam_g_frame_interval,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct v4l2_subdev_core_ops v4l2sd_core_ops = {
|
||||
@@ -161,6 +184,10 @@ static int v4l2sd_set_fmt(struct v4l2_subdev *sd,
|
||||
}
|
||||
|
||||
static struct v4l2_subdev_pad_ops v4l2sd_pad_ops = {
|
||||
#if defined(NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL)
|
||||
.get_frame_interval = cam_g_frame_interval,
|
||||
.set_frame_interval = cam_g_frame_interval,
|
||||
#endif
|
||||
.set_fmt = v4l2sd_set_fmt,
|
||||
.get_fmt = v4l2sd_get_fmt,
|
||||
.enum_mbus_code = camera_common_enum_mbus_code,
|
||||
|
||||
@@ -2281,10 +2281,32 @@ static long tegra_channel_default_ioctl(struct file *file, void *fh,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Implemented vidioc_s_parm and vidioc_g_parm ioctl to support multiple frame
|
||||
* rates */
|
||||
static int tegra_channel_s_parm(struct file *file, void *fh,
|
||||
struct v4l2_streamparm *a)
|
||||
{
|
||||
struct tegra_channel *chan = video_drvdata(file);
|
||||
struct v4l2_subdev *sd = chan->subdev_on_csi;
|
||||
|
||||
return v4l2_s_parm_cap(chan->video, sd, a);
|
||||
}
|
||||
|
||||
static int tegra_channel_g_parm(struct file *file, void *fh,
|
||||
struct v4l2_streamparm *a)
|
||||
{
|
||||
struct tegra_channel *chan = video_drvdata(file);
|
||||
struct v4l2_subdev *sd = chan->subdev_on_csi;
|
||||
|
||||
return v4l2_g_parm_cap(chan->video, sd, a);
|
||||
}
|
||||
|
||||
static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = {
|
||||
.vidioc_querycap = tegra_channel_querycap,
|
||||
.vidioc_enum_framesizes = tegra_channel_enum_framesizes,
|
||||
.vidioc_enum_frameintervals = tegra_channel_enum_frameintervals,
|
||||
.vidioc_s_parm = tegra_channel_s_parm,
|
||||
.vidioc_g_parm = tegra_channel_g_parm,
|
||||
.vidioc_enum_fmt_vid_cap = tegra_channel_enum_format,
|
||||
.vidioc_g_fmt_vid_cap = tegra_channel_get_format,
|
||||
.vidioc_s_fmt_vid_cap = tegra_channel_set_format,
|
||||
|
||||
@@ -181,7 +181,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_async_subdev_nf_init
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_async_notifier_init
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_async_nf_init_has_v4l2_dev_arg
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += __v4l2_async_nf_add_subdev
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_subdev_pad_ops_struct_has_get_frame_interval
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_subdev_pad_ops_struct_has_get_set_frame_interval
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += v4l2_subdev_pad_ops_struct_has_dv_timings
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
|
||||
|
||||
@@ -8082,10 +8082,12 @@ compile_test() {
|
||||
compile_check_conftest "$CODE" "NV_V4L2_ASYNC_NF_ADD_SUBDEV_PRESENT" "" "functions"
|
||||
;;
|
||||
|
||||
v4l2_subdev_pad_ops_struct_has_get_frame_interval)
|
||||
v4l2_subdev_pad_ops_struct_has_get_set_frame_interval)
|
||||
#
|
||||
# Determine if struct v4l2_subdev_pad_ops has the 'get_frame_interval'
|
||||
# function pointer.
|
||||
# and 'set_frame_interval' function pointers. Note that it is only
|
||||
# necessary to check for the presence of one because both were added
|
||||
# by the same commit.
|
||||
#
|
||||
# Added by commit 287fe160834a ("media: v4l2-subdev: Turn
|
||||
# .[gs]_frame_interval into pad operations") in Linux v6.8.
|
||||
@@ -8098,7 +8100,7 @@ compile_test() {
|
||||
}
|
||||
"
|
||||
compile_check_conftest "$CODE" \
|
||||
"NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_FRAME_INTERVAL" "" "types"
|
||||
"NV_V4L2_SUBDEV_PAD_OPS_STRUCT_HAS_GET_SET_FRAME_INTERVAL" "" "types"
|
||||
;;
|
||||
|
||||
v4l2_subdev_pad_ops_struct_has_dv_timings)
|
||||
|
||||
Reference in New Issue
Block a user