crypto: tegra: Add updated Tegra SE driver

Add the updated Tegra SE driver with the below improvements.
- Remove dependency with nvhost apis and use upstream Host1x APIs.
- Implement software fallbacks for keys/plaintexts unsupported by
hardware.
- Support only T234 and later. Prior chips will use tegra-se-nvhost
driver.

Bug 4221414
Bug 3579794

Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Change-Id: I398a5b7cc3f752b44d01d6d1c81f813f862e4cd9
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2977810
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Akhil R
2023-04-14 22:09:49 +05:30
committed by mobile promotions
parent cbf177852b
commit 2659fcd46a
9 changed files with 4216 additions and 0 deletions

View File

@@ -5,5 +5,8 @@ ifdef CONFIG_TEGRA_HOST1X
obj-m += tegra-se-nvhost.o
obj-m += tegra-hv-vse-safety.o
obj-m += tegra-nvvse-cryptodev.o
ifdef CONFIG_CRYPTO_ENGINE
obj-m += tegra/
endif
endif
obj-m += tegra-se-nvrng.o

View File

@@ -0,0 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
ccflags-y += -I$(srctree.nvidia)/drivers/gpu/host1x/include
tegra-se-objs := tegra-se-key.o tegra-se-main.o
tegra-se-y += tegra-se-aes.o
tegra-se-y += tegra-se-hash.o
obj-m += tegra-se.o

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,132 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver file to manage keys of NVIDIA Security Engine.
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <crypto/aes.h>
#include "tegra-se.h"
#define SE_KEY_FULL_MASK GENMASK(SE_MAX_KEYSLOT, 0)
/* Reserve keyslot 0, 14, 15 */
#define SE_KEY_RSVD_MASK (BIT(0) | BIT(14) | BIT(15))
#define SE_KEY_VALID_MASK (SE_KEY_FULL_MASK & ~SE_KEY_RSVD_MASK)
static u16 tegra_se_keyslots = SE_KEY_RSVD_MASK;
static u16 tegra_keyslot_alloc(void)
{
u16 keyid;
/* Check if all key slots are full */
if (tegra_se_keyslots == GENMASK(SE_MAX_KEYSLOT, 0))
return 0;
keyid = ffz(tegra_se_keyslots);
tegra_se_keyslots |= BIT(keyid);
return keyid;
}
static void tegra_keyslot_free(u16 slot)
{
tegra_se_keyslots &= ~(BIT(slot));
}
static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr,
const u32 *key, u32 keylen, u16 slot, u32 alg)
{
int i = 0, j;
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->op);
cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_DUMMY;
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->manifest);
cpuvaddr[i++] = se->manifest(se->owner, alg, keylen);
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->key_dst);
cpuvaddr[i++] = SE_AES_KEY_DST_INDEX(slot);
for (j = 0; j < keylen / 4; j++) {
/* Set key address */
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->key_addr);
cpuvaddr[i++] = j;
/* Set key data */
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->key_data);
cpuvaddr[i++] = key[j];
}
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->config);
cpuvaddr[i++] = SE_CFG_INS;
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = host1x_opcode_incr_w(se->hw->regs->op);
cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_START |
SE_AES_OP_LASTBUF;
cpuvaddr[i++] = host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "key-slot %u key-manifest %#x\n",
slot, se->manifest(se->owner, alg, keylen));
return i;
}
static bool tegra_key_in_kslt(u32 keyid)
{
if (keyid > SE_MAX_KEYSLOT)
return false;
return ((BIT(keyid) & SE_KEY_VALID_MASK) &&
(BIT(keyid) & tegra_se_keyslots));
}
static int tegra_key_insert(struct tegra_se *se, const u8 *key,
u32 keylen, u16 slot, u32 alg)
{
const u32 *keyval = (u32 *)key;
u32 *addr = se->cmdbuf->addr, size;
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
return tegra_se_host1x_submit(se, size);
}
void tegra_key_invalidate(struct tegra_se *se, u32 keyid)
{
if (!keyid)
return;
tegra_keyslot_free(keyid);
}
int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid)
{
int ret;
/* Use the existing slot if it is already allocated */
if (!tegra_key_in_kslt(*keyid)) {
*keyid = tegra_keyslot_alloc();
if (!(*keyid))
return -ENOMEM;
}
ret = tegra_key_insert(se, key, keylen, *keyid, alg);
if (ret)
return ret;
return 0;
}

View File

@@ -0,0 +1,489 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver for NVIDIA Security Engine in Tegra Chips
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/host1x-next.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <crypto/engine.h>
#include "tegra-se.h"
static struct host1x_bo *tegra_se_cmdbuf_get(struct host1x_bo *host_bo)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
kref_get(&cmdbuf->ref);
return host_bo;
}
static void tegra_se_cmdbuf_release(struct kref *ref)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(ref, struct tegra_se_cmdbuf, ref);
dma_free_attrs(cmdbuf->dev, cmdbuf->size, cmdbuf->addr,
cmdbuf->iova, 0);
kfree(cmdbuf);
}
static void tegra_se_cmdbuf_put(struct host1x_bo *host_bo)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
kref_put(&cmdbuf->ref, tegra_se_cmdbuf_release);
}
static struct host1x_bo_mapping *
tegra_se_cmdbuf_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(bo, struct tegra_se_cmdbuf, bo);
struct host1x_bo_mapping *map;
int err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return ERR_PTR(-ENOMEM);
kref_init(&map->ref);
map->bo = host1x_bo_get(bo);
map->direction = direction;
map->dev = dev;
map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
if (!map->sgt) {
err = -ENOMEM;
goto free;
}
err = dma_get_sgtable(dev, map->sgt, cmdbuf->addr,
cmdbuf->iova, cmdbuf->words * 4);
if (err)
goto free_sgt;
err = dma_map_sgtable(dev, map->sgt, direction, 0);
if (err)
goto free_sgt;
map->phys = sg_dma_address(map->sgt->sgl);
map->size = cmdbuf->words * 4;
map->chunks = err;
return map;
free_sgt:
sg_free_table(map->sgt);
kfree(map->sgt);
free:
kfree(map);
return ERR_PTR(err);
}
static void tegra_se_cmdbuf_unpin(struct host1x_bo_mapping *map)
{
if (!map)
return;
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
sg_free_table(map->sgt);
kfree(map->sgt);
host1x_bo_put(map->bo);
kfree(map);
}
static void *tegra_se_cmdbuf_mmap(struct host1x_bo *host_bo)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
return cmdbuf->addr;
}
static void tegra_se_cmdbuf_munmap(struct host1x_bo *host_bo, void *addr)
{
}
static const struct host1x_bo_ops tegra_se_cmdbuf_ops = {
.get = tegra_se_cmdbuf_get,
.put = tegra_se_cmdbuf_put,
.pin = tegra_se_cmdbuf_pin,
.unpin = tegra_se_cmdbuf_unpin,
.mmap = tegra_se_cmdbuf_mmap,
.munmap = tegra_se_cmdbuf_munmap,
};
static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssize_t size)
{
struct tegra_se_cmdbuf *cmdbuf;
struct device *dev = se->dev->parent;
cmdbuf = kzalloc(sizeof(struct tegra_se_cmdbuf), GFP_KERNEL);
if (!cmdbuf)
return NULL;
cmdbuf->addr = dma_alloc_attrs(dev, size, &cmdbuf->iova,
GFP_KERNEL, 0);
if (!cmdbuf->addr)
return NULL;
cmdbuf->size = size;
cmdbuf->dev = dev;
host1x_bo_init(&cmdbuf->bo, &tegra_se_cmdbuf_ops);
kref_init(&cmdbuf->ref);
return cmdbuf;
}
int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
{
struct host1x_job *job;
int ret;
job = host1x_job_alloc(se->channel, 1, 0, true);
if (!job) {
dev_err(se->dev, "failed to allocate host1x job\n");
return -ENOMEM;
}
job->syncpt = host1x_syncpt_get(se->syncpt);
job->syncpt_incrs = 1;
job->client = &se->client;
job->class = se->client.class;
job->serialize = true;
job->engine_fallback_streamid = se->stream_id;
job->engine_streamid_offset = SE_STREAM_ID;
se->cmdbuf->words = size;
host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev);
if (ret) {
dev_err(se->dev, "failed to pin host1x job\n");
goto err_job_pin;
}
ret = host1x_job_submit(job);
if (ret) {
dev_err(se->dev, "failed to submit host1x job\n");
goto err_job_submit;
}
ret = host1x_syncpt_wait(job->syncpt, job->syncpt_end,
MAX_SCHEDULE_TIMEOUT, NULL);
if (ret) {
dev_err(se->dev, "host1x job timed out\n");
return ret;
}
host1x_job_put(job);
return 0;
err_job_submit:
host1x_job_unpin(job);
err_job_pin:
host1x_job_put(job);
return ret;
}
static int tegra_se_client_init(struct host1x_client *client)
{
struct tegra_se *se = container_of(client, struct tegra_se, client);
int ret;
se->channel = host1x_channel_request(&se->client);
if (!se->channel) {
dev_err(se->dev, "host1x channel map failed\n");
return -ENODEV;
}
se->syncpt = host1x_syncpt_request(&se->client, 0);
if (!se->syncpt) {
dev_err(se->dev, "host1x syncpt allocation failed\n");
ret = -EINVAL;
goto err_syncpt;
}
se->syncpt_id = host1x_syncpt_id(se->syncpt);
se->cmdbuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
if (!se->cmdbuf) {
ret = -ENOMEM;
goto err_bo;
}
ret = se->hw->init_alg(se);
if (ret) {
dev_err(se->dev, "failed to register algorithms\n");
goto err_alg_reg;
}
return 0;
err_alg_reg:
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
err_bo:
host1x_syncpt_put(se->syncpt);
err_syncpt:
host1x_channel_put(se->channel);
return ret;
}
static int tegra_se_client_deinit(struct host1x_client *client)
{
struct tegra_se *se = container_of(client, struct tegra_se, client);
se->hw->deinit_alg();
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
host1x_syncpt_put(se->syncpt);
host1x_channel_put(se->channel);
return 0;
}
static const struct host1x_client_ops tegra_se_client_ops = {
.init = tegra_se_client_init,
.exit = tegra_se_client_deinit,
};
int tegra_se_host1x_register(struct tegra_se *se)
{
INIT_LIST_HEAD(&se->client.list);
se->client.dev = se->dev;
se->client.ops = &tegra_se_client_ops;
se->client.class = se->hw->host1x_class;
se->client.num_syncpts = 1;
host1x_client_register(&se->client);
return 0;
}
static int tegra_se_clk_init(struct tegra_se *se)
{
int i, ret;
se->num_clks = devm_clk_bulk_get_all(se->dev, &se->clks);
if (se->num_clks < 0) {
dev_err(se->dev, "failed to get clocks\n");
return se->num_clks;
}
for (i = 0; i < se->num_clks; i++) {
ret = clk_set_rate(se->clks[i].clk, ULONG_MAX);
if (ret) {
dev_err(se->dev, "failed to set %d clock rate", i);
return ret;
}
}
ret = clk_bulk_prepare_enable(se->num_clks, se->clks);
if (ret) {
dev_err(se->dev, "failed to enable clocks\n");
return ret;
}
return 0;
}
static void tegra_se_clk_deinit(struct tegra_se *se)
{
clk_bulk_disable_unprepare(se->num_clks, se->clks);
}
static int tegra_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_se *se;
int ret;
se = devm_kzalloc(dev, sizeof(*se), GFP_KERNEL);
if (!se)
return -ENOMEM;
se->dev = dev;
se->hw = device_get_match_data(&pdev->dev);
se->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(se->base))
return PTR_ERR(se->base);
se->owner = TEGRA_GPSE_ID;
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
platform_set_drvdata(pdev, se);
ret = tegra_se_clk_init(se);
if (ret) {
dev_err(dev, "failed to init clocks\n");
return ret;
}
if (!tegra_dev_iommu_get_stream_id(dev, &se->stream_id)) {
dev_err(dev, "failed to get IOMMU stream ID\n");
goto err_iommu_spec;
}
se_writel(se, se->stream_id, SE_STREAM_ID);
se->engine = crypto_engine_alloc_init(dev, 0);
if (!se->engine) {
dev_err(dev, "failed to init crypto engine\n");
ret = -ENOMEM;
goto err_engine_alloc;
}
ret = crypto_engine_start(se->engine);
if (ret) {
dev_err(dev, "failed to start crypto engine\n");
goto err_engine_start;
}
ret = tegra_se_host1x_register(se);
if (ret) {
dev_err(dev, "failed to init host1x params\n");
goto err_host1x_init;
}
return 0;
err_host1x_init:
crypto_engine_stop(se->engine);
err_engine_start:
crypto_engine_exit(se->engine);
err_engine_alloc:
iommu_fwspec_free(se->dev);
err_iommu_spec:
tegra_se_clk_deinit(se);
return ret;
}
static int tegra_se_remove(struct platform_device *pdev)
{
struct tegra_se *se = platform_get_drvdata(pdev);
crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
iommu_fwspec_free(se->dev);
host1x_client_unregister(&se->client);
tegra_se_clk_deinit(se);
return 0;
}
static const struct tegra_se_regs tegra234_aes1_regs = {
.config = SE_AES1_CFG,
.op = SE_AES1_OPERATION,
.last_blk = SE_AES1_LAST_BLOCK,
.linear_ctr = SE_AES1_LINEAR_CTR,
.aad_len = SE_AES1_AAD_LEN,
.cryp_msg_len = SE_AES1_CRYPTO_MSG_LEN,
.manifest = SE_AES1_KEYMANIFEST,
.key_addr = SE_AES1_KEY_ADDR,
.key_data = SE_AES1_KEY_DATA,
.key_dst = SE_AES1_KEY_DST,
.result = SE_AES1_CMAC_RESULT,
};
static const struct tegra_se_regs tegra234_hash_regs = {
.config = SE_SHA_CFG,
.op = SE_SHA_OPERATION,
.manifest = SE_SHA_KEYMANIFEST,
.key_addr = SE_SHA_KEY_ADDR,
.key_data = SE_SHA_KEY_DATA,
.key_dst = SE_SHA_KEY_DST,
.result = SE_SHA_HASH_RESULT,
};
static const struct tegra_se_hw tegra234_aes_hw = {
.regs = &tegra234_aes1_regs,
.kac_ver = 1,
.host1x_class = 0x3b,
.init_alg = tegra_init_aes,
.deinit_alg = tegra_deinit_aes,
};
static const struct tegra_se_hw tegra234_hash_hw = {
.regs = &tegra234_hash_regs,
.kac_ver = 1,
.host1x_class = 0x3d,
.init_alg = tegra_init_hash,
.deinit_alg = tegra_deinit_hash,
};
static const struct of_device_id tegra_se_of_match[] = {
{
.compatible = "nvidia,tegra234-se2-aes",
.data = &tegra234_aes_hw
}, {
.compatible = "nvidia,tegra234-se4-hash",
.data = &tegra234_hash_hw,
},
{ },
};
MODULE_DEVICE_TABLE(of, tegra_se_of_match);
static struct platform_driver tegra_se_driver = {
.driver = {
.name = "tegra-se",
.of_match_table = tegra_se_of_match,
},
.probe = tegra_se_probe,
.remove = tegra_se_remove,
};
static int tegra_se_host1x_probe(struct host1x_device *dev)
{
return host1x_device_init(dev);
}
static int tegra_se_host1x_remove(struct host1x_device *dev)
{
host1x_device_exit(dev);
return 0;
}
static struct host1x_driver tegra_se_host1x_driver = {
.driver = {
.name = "tegra-se-host1x",
},
.probe = tegra_se_host1x_probe,
.remove = tegra_se_host1x_remove,
.subdevs = tegra_se_of_match,
};
static int __init tegra_se_module_init(void)
{
int ret;
ret = host1x_driver_register(&tegra_se_host1x_driver);
if (ret)
return ret;
return platform_driver_register(&tegra_se_driver);
}
static void __exit tegra_se_module_exit(void)
{
host1x_driver_unregister(&tegra_se_host1x_driver);
platform_driver_unregister(&tegra_se_driver);
}
module_init(tegra_se_module_init);
module_exit(tegra_se_module_exit);
MODULE_DESCRIPTION("NVIDIA Tegra Security Engine Driver");
MODULE_AUTHOR("Akhil R <akhilrajeev@nvidia.com>");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,614 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Header file for NVIDIA Security Engine driver.
*/
#ifndef _TEGRA_SE_H
#define _TEGRA_SE_H
#include <linux/clk.h>
#include <linux/iommu.h>
#include <linux/host1x-next.h>
#include <linux/version.h>
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/sha1.h>
#include <crypto/sha3.h>
#include <crypto/sm3.h>
#include <crypto/skcipher.h>
#include <nvidia/conftest.h>
#define SE_MAX_INSTANCES 3
#define SE_OWNERSHIP 0x14
#define SE_OWNERSHIP_UID(x) FIELD_GET(GENMASK(7, 0), x)
#define TEGRA_GPSE_ID 3
#define SE_STREAM_ID 0x90
#define SE_SHA_CFG 0x4004
#define SE_SHA_KEY_ADDR 0x4094
#define SE_SHA_KEY_DATA 0x4098
#define SE_SHA_KEYMANIFEST 0x409c
#define SE_SHA_CRYPTO_CFG 0x40a4
#define SE_SHA_KEY_DST 0x40a8
#define SE_SHA_SRC_KSLT 0x4180
#define SE_SHA_TGT_KSLT 0x4184
#define SE_SHA_MSG_LENGTH 0x401c
#define SE_SHA_OPERATION 0x407c
#define SE_SHA_HASH_RESULT 0x40b0
#define SE_SHA_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x)
#define SE_SHA_ENC_MODE_SHA1 SE_SHA_ENC_MODE(0)
#define SE_SHA_ENC_MODE_SHA224 SE_SHA_ENC_MODE(4)
#define SE_SHA_ENC_MODE_SHA256 SE_SHA_ENC_MODE(5)
#define SE_SHA_ENC_MODE_SHA384 SE_SHA_ENC_MODE(6)
#define SE_SHA_ENC_MODE_SHA512 SE_SHA_ENC_MODE(7)
#define SE_SHA_ENC_MODE_SHA_CTX_INTEGRITY SE_SHA_ENC_MODE(8)
#define SE_SHA_ENC_MODE_SHA3_224 SE_SHA_ENC_MODE(9)
#define SE_SHA_ENC_MODE_SHA3_256 SE_SHA_ENC_MODE(10)
#define SE_SHA_ENC_MODE_SHA3_384 SE_SHA_ENC_MODE(11)
#define SE_SHA_ENC_MODE_SHA3_512 SE_SHA_ENC_MODE(12)
#define SE_SHA_ENC_MODE_SHAKE128 SE_SHA_ENC_MODE(13)
#define SE_SHA_ENC_MODE_SHAKE256 SE_SHA_ENC_MODE(14)
#define SE_SHA_ENC_MODE_HMAC_SHA256_1KEY SE_SHA_ENC_MODE(0)
#define SE_SHA_ENC_MODE_HMAC_SHA256_2KEY SE_SHA_ENC_MODE(1)
#define SE_SHA_ENC_MODE_SM3_256 SE_SHA_ENC_MODE(0)
#define SE_SHA_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x)
#define SE_SHA_ENC_ALG_NOP SE_SHA_CFG_ENC_ALG(0)
#define SE_SHA_ENC_ALG_SHA_ENC SE_SHA_CFG_ENC_ALG(1)
#define SE_SHA_ENC_ALG_RNG SE_SHA_CFG_ENC_ALG(2)
#define SE_SHA_ENC_ALG_SHA SE_SHA_CFG_ENC_ALG(3)
#define SE_SHA_ENC_ALG_SM3 SE_SHA_CFG_ENC_ALG(4)
#define SE_SHA_ENC_ALG_HMAC SE_SHA_CFG_ENC_ALG(7)
#define SE_SHA_ENC_ALG_KDF SE_SHA_CFG_ENC_ALG(8)
#define SE_SHA_ENC_ALG_KEY_INVLD SE_SHA_CFG_ENC_ALG(10)
#define SE_SHA_ENC_ALG_KEY_MOV SE_SHA_CFG_ENC_ALG(11)
#define SE_SHA_ENC_ALG_KEY_INQUIRE SE_SHA_CFG_ENC_ALG(12)
#define SE_SHA_ENC_ALG_INS SE_SHA_CFG_ENC_ALG(13)
#define SE_SHA_ENC_ALG_CLONE SE_SHA_CFG_ENC_ALG(14)
#define SE_SHA_ENC_ALG_LOCK SE_SHA_CFG_ENC_ALG(15)
#define SE_SHA_OP_LASTBUF FIELD_PREP(BIT(16), 1)
#define SE_SHA_OP_WRSTALL FIELD_PREP(BIT(15), 1)
#define SE_SHA_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x)
#define SE_SHA_OP_START SE_SHA_OP_OP(1)
#define SE_SHA_OP_RESTART_OUT SE_SHA_OP_OP(2)
#define SE_SHA_OP_RESTART_IN SE_SHA_OP_OP(4)
#define SE_SHA_OP_RESTART_INOUT SE_SHA_OP_OP(5)
#define SE_SHA_OP_DUMMY SE_SHA_OP_OP(6)
#define SE_SHA_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x)
#define SE_SHA_DEC_ALG_NOP SE_SHA_CFG_DEC_ALG(0)
#define SE_SHA_DEC_ALG_AES_DEC SE_SHA_CFG_DEC_ALG(1)
#define SE_SHA_DEC_ALG_HMAC SE_SHA_CFG_DEC_ALG(7)
#define SE_SHA_DEC_ALG_HMAC_VERIFY SE_SHA_CFG_DEC_ALG(9)
#define SE_SHA_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x)
#define SE_SHA_DST_MEMORY SE_SHA_CFG_DST(0)
#define SE_SHA_DST_HASH_REG SE_SHA_CFG_DST(1)
#define SE_SHA_DST_KEYTABLE SE_SHA_CFG_DST(2)
#define SE_SHA_DST_SRK SE_SHA_CFG_DST(3)
#define SE_SHA_TASK_HASH_INIT BIT(0)
/* AES Configuration */
#define SE_AES0_CFG 0x1004
#define SE_AES0_CRYPTO_CONFIG 0x1008
#define SE_AES0_KEY_DST 0x1030
#define SE_AES0_OPERATION 0x1038
#define SE_AES0_LINEAR_CTR 0x101c
#define SE_AES0_LAST_BLOCK 0x102c
#define SE_AES0_KEY_ADDR 0x10bc
#define SE_AES0_KEY_DATA 0x10c0
#define SE_AES0_CMAC_RESULT 0x10c4
#define SE_AES0_SRC_KSLT 0x1100
#define SE_AES0_TGT_KSLT 0x1104
#define SE_AES0_KEYMANIFEST 0x1114
#define SE_AES0_AAD_LEN 0x112c
#define SE_AES0_CRYPTO_MSG_LEN 0x1134
#define SE_AES1_CFG 0x2004
#define SE_AES1_CRYPTO_CONFIG 0x2008
#define SE_AES1_KEY_DST 0x2030
#define SE_AES1_OPERATION 0x2038
#define SE_AES1_LINEAR_CTR 0x201c
#define SE_AES1_LAST_BLOCK 0x202c
#define SE_AES1_KEY_ADDR 0x20bc
#define SE_AES1_KEY_DATA 0x20c0
#define SE_AES1_CMAC_RESULT 0x20c4
#define SE_AES1_SRC_KSLT 0x2100
#define SE_AES1_TGT_KSLT 0x2104
#define SE_AES1_KEYMANIFEST 0x2114
#define SE_AES1_AAD_LEN 0x212c
#define SE_AES1_CRYPTO_MSG_LEN 0x2134
#define SE_AES_CFG_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x)
#define SE_AES_ENC_MODE_GMAC SE_AES_CFG_ENC_MODE(3)
#define SE_AES_ENC_MODE_GCM SE_AES_CFG_ENC_MODE(4)
#define SE_AES_ENC_MODE_GCM_FINAL SE_AES_CFG_ENC_MODE(5)
#define SE_AES_ENC_MODE_CMAC SE_AES_CFG_ENC_MODE(7)
#define SE_AES_ENC_MODE_CBC_MAC SE_AES_CFG_ENC_MODE(12)
#define SE_AES_CFG_DEC_MODE(x) FIELD_PREP(GENMASK(23, 16), x)
#define SE_AES_DEC_MODE_GMAC SE_AES_CFG_DEC_MODE(3)
#define SE_AES_DEC_MODE_GCM SE_AES_CFG_DEC_MODE(4)
#define SE_AES_DEC_MODE_GCM_FINAL SE_AES_CFG_DEC_MODE(5)
#define SE_AES_DEC_MODE_CBC_MAC SE_AES_CFG_DEC_MODE(12)
#define SE_AES_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x)
#define SE_AES_ENC_ALG_NOP SE_AES_CFG_ENC_ALG(0)
#define SE_AES_ENC_ALG_AES_ENC SE_AES_CFG_ENC_ALG(1)
#define SE_AES_ENC_ALG_RNG SE_AES_CFG_ENC_ALG(2)
#define SE_AES_ENC_ALG_SHA SE_AES_CFG_ENC_ALG(3)
#define SE_AES_ENC_ALG_HMAC SE_AES_CFG_ENC_ALG(7)
#define SE_AES_ENC_ALG_KDF SE_AES_CFG_ENC_ALG(8)
#define SE_AES_ENC_ALG_INS SE_AES_CFG_ENC_ALG(13)
#define SE_AES_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x)
#define SE_AES_DEC_ALG_NOP SE_AES_CFG_DEC_ALG(0)
#define SE_AES_DEC_ALG_AES_DEC SE_AES_CFG_DEC_ALG(1)
#define SE_AES_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x)
#define SE_AES_DST_MEMORY SE_AES_CFG_DST(0)
#define SE_AES_DST_HASH_REG SE_AES_CFG_DST(1)
#define SE_AES_DST_KEYTABLE SE_AES_CFG_DST(2)
#define SE_AES_DST_SRK SE_AES_CFG_DST(3)
/* AES Crypto Configuration */
#define SE_AES_KEY2_INDEX(x) FIELD_PREP(GENMASK(31, 28), x)
#define SE_AES_KEY_INDEX(x) FIELD_PREP(GENMASK(27, 24), x)
#define SE_AES_CRYPTO_CFG_SCC_DIS FIELD_PREP(BIT(20), 1)
#define SE_AES_CRYPTO_CFG_CTR_CNTN(x) FIELD_PREP(GENMASK(18, 11), x)
#define SE_AES_CRYPTO_CFG_IV_MODE(x) FIELD_PREP(BIT(10), x)
#define SE_AES_IV_MODE_SWIV SE_AES_CRYPTO_CFG_IV_MODE(0)
#define SE_AES_IV_MODE_HWIV SE_AES_CRYPTO_CFG_IV_MODE(1)
#define SE_AES_CRYPTO_CFG_CORE_SEL(x) FIELD_PREP(BIT(9), x)
#define SE_AES_CORE_SEL_DECRYPT SE_AES_CRYPTO_CFG_CORE_SEL(0)
#define SE_AES_CORE_SEL_ENCRYPT SE_AES_CRYPTO_CFG_CORE_SEL(1)
#define SE_AES_CRYPTO_CFG_IV_SEL(x) FIELD_PREP(GENMASK(8, 7), x)
#define SE_AES_IV_SEL_UPDATED SE_AES_CRYPTO_CFG_IV_SEL(1)
#define SE_AES_IV_SEL_REG SE_AES_CRYPTO_CFG_IV_SEL(2)
#define SE_AES_IV_SEL_RANDOM SE_AES_CRYPTO_CFG_IV_SEL(3)
#define SE_AES_CRYPTO_CFG_VCTRAM_SEL(x) FIELD_PREP(GENMASK(6, 5), x)
#define SE_AES_VCTRAM_SEL_MEMORY SE_AES_CRYPTO_CFG_VCTRAM_SEL(0)
#define SE_AES_VCTRAM_SEL_TWEAK SE_AES_CRYPTO_CFG_VCTRAM_SEL(1)
#define SE_AES_VCTRAM_SEL_AESOUT SE_AES_CRYPTO_CFG_VCTRAM_SEL(2)
#define SE_AES_VCTRAM_SEL_PREV_MEM SE_AES_CRYPTO_CFG_VCTRAM_SEL(3)
#define SE_AES_CRYPTO_CFG_INPUT_SEL(x) FIELD_PREP(GENMASK(4, 3), x)
#define SE_AES_INPUT_SEL_MEMORY SE_AES_CRYPTO_CFG_INPUT_SEL(0)
#define SE_AES_INPUT_SEL_RANDOM SE_AES_CRYPTO_CFG_INPUT_SEL(1)
#define SE_AES_INPUT_SEL_AESOUT SE_AES_CRYPTO_CFG_INPUT_SEL(2)
#define SE_AES_INPUT_SEL_LINEAR_CTR SE_AES_CRYPTO_CFG_INPUT_SEL(3)
#define SE_AES_INPUT_SEL_REG SE_AES_CRYPTO_CFG_INPUT_SEL(1)
#define SE_AES_CRYPTO_CFG_XOR_POS(x) FIELD_PREP(GENMASK(2, 1), x)
#define SE_AES_XOR_POS_BYPASS SE_AES_CRYPTO_CFG_XOR_POS(0)
#define SE_AES_XOR_POS_BOTH SE_AES_CRYPTO_CFG_XOR_POS(1)
#define SE_AES_XOR_POS_TOP SE_AES_CRYPTO_CFG_XOR_POS(2)
#define SE_AES_XOR_POS_BOTTOM SE_AES_CRYPTO_CFG_XOR_POS(3)
#define SE_AES_CRYPTO_CFG_HASH_EN(x) FIELD_PREP(BIT(0), x)
#define SE_AES_HASH_DISABLE SE_AES_CRYPTO_CFG_HASH_EN(0)
#define SE_AES_HASH_ENABLE SE_AES_CRYPTO_CFG_HASH_EN(1)
#define SE_LAST_BLOCK_VAL(x) FIELD_PREP(GENMASK(19, 0), x)
#define SE_LAST_BLOCK_RES_BITS(x) FIELD_PREP(GENMASK(26, 20), x)
#define SE_AES_OP_LASTBUF FIELD_PREP(BIT(16), 1)
#define SE_AES_OP_WRSTALL FIELD_PREP(BIT(15), 1)
#define SE_AES_OP_FINAL FIELD_PREP(BIT(5), 1)
#define SE_AES_OP_INIT FIELD_PREP(BIT(4), 1)
#define SE_AES_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x)
#define SE_AES_OP_START SE_AES_OP_OP(1)
#define SE_AES_OP_RESTART_OUT SE_AES_OP_OP(2)
#define SE_AES_OP_RESTART_IN SE_AES_OP_OP(4)
#define SE_AES_OP_RESTART_INOUT SE_AES_OP_OP(5)
#define SE_AES_OP_DUMMY SE_AES_OP_OP(6)
#define SE_KAC_SIZE(x) FIELD_PREP(GENMASK(15, 14), x)
#define SE_KAC_SIZE_128 SE_KAC_SIZE(0)
#define SE_KAC_SIZE_192 SE_KAC_SIZE(1)
#define SE_KAC_SIZE_256 SE_KAC_SIZE(2)
#define SE_KAC_EXPORTABLE FIELD_PREP(BIT(12), 1)
#define SE_KAC_PURPOSE(x) FIELD_PREP(GENMASK(11, 8), x)
#define SE_KAC_ENC SE_KAC_PURPOSE(0)
#define SE_KAC_CMAC SE_KAC_PURPOSE(1)
#define SE_KAC_HMAC SE_KAC_PURPOSE(2)
#define SE_KAC_GCM_KW SE_KAC_PURPOSE(3)
#define SE_KAC_HMAC_KDK SE_KAC_PURPOSE(6)
#define SE_KAC_HMAC_KDD SE_KAC_PURPOSE(7)
#define SE_KAC_HMAC_KDD_KUW SE_KAC_PURPOSE(8)
#define SE_KAC_XTS SE_KAC_PURPOSE(9)
#define SE_KAC_GCM SE_KAC_PURPOSE(10)
#define SE_KAC_USER_NS FIELD_PREP(GENMASK(6, 4), 3)
#define SE_AES_KEY_DST_INDEX(x) FIELD_PREP(GENMASK(11, 8), x)
#define SE_ADDR_HI_MSB(x) FIELD_PREP(GENMASK(31, 24), x)
#define SE_ADDR_HI_SZ(x) FIELD_PREP(GENMASK(23, 0), x)
#define SE_CFG_AES_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_DEC_ALG_NOP | \
SE_AES_DST_MEMORY)
#define SE_CFG_AES_DECRYPT (SE_AES_ENC_ALG_NOP | \
SE_AES_DEC_ALG_AES_DEC | \
SE_AES_DST_MEMORY)
#define SE_CFG_GMAC_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_DEC_ALG_NOP | \
SE_AES_ENC_MODE_GMAC | \
SE_AES_DST_MEMORY)
#define SE_CFG_GMAC_DECRYPT (SE_AES_ENC_ALG_NOP | \
SE_AES_DEC_ALG_AES_DEC | \
SE_AES_DEC_MODE_GMAC | \
SE_AES_DST_MEMORY)
#define SE_CFG_GCM_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_DEC_ALG_NOP | \
SE_AES_ENC_MODE_GCM | \
SE_AES_DST_MEMORY)
#define SE_CFG_GCM_DECRYPT (SE_AES_ENC_ALG_NOP | \
SE_AES_DEC_ALG_AES_DEC | \
SE_AES_DEC_MODE_GCM | \
SE_AES_DST_MEMORY)
#define SE_CFG_GCM_FINAL_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_DEC_ALG_NOP | \
SE_AES_ENC_MODE_GCM_FINAL | \
SE_AES_DST_MEMORY)
#define SE_CFG_GCM_FINAL_DECRYPT (SE_AES_ENC_ALG_NOP | \
SE_AES_DEC_ALG_AES_DEC | \
SE_AES_DEC_MODE_GCM_FINAL | \
SE_AES_DST_MEMORY)
#define SE_CFG_CMAC (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_ENC_MODE_CMAC | \
SE_AES_DST_HASH_REG)
#define SE_CFG_CBC_MAC (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_ENC_MODE_CBC_MAC)
#define SE_CFG_INS (SE_AES_ENC_ALG_INS | \
SE_AES_DEC_ALG_NOP)
#define SE_CRYPTO_CFG_ECB_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_XOR_POS_BYPASS | \
SE_AES_CORE_SEL_ENCRYPT)
#define SE_CRYPTO_CFG_ECB_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_XOR_POS_BYPASS | \
SE_AES_CORE_SEL_DECRYPT)
#define SE_CRYPTO_CFG_CBC_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_AESOUT | \
SE_AES_XOR_POS_TOP | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_CBC_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_PREV_MEM | \
SE_AES_XOR_POS_BOTTOM | \
SE_AES_CORE_SEL_DECRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_OFB (SE_AES_INPUT_SEL_AESOUT | \
SE_AES_VCTRAM_SEL_MEMORY | \
SE_AES_XOR_POS_BOTTOM | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_CTR (SE_AES_INPUT_SEL_LINEAR_CTR | \
SE_AES_VCTRAM_SEL_MEMORY | \
SE_AES_XOR_POS_BOTTOM | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_CRYPTO_CFG_CTR_CNTN(1) | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_XTS_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_TWEAK | \
SE_AES_XOR_POS_BOTH | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_TWEAK | \
SE_AES_XOR_POS_BOTH | \
SE_AES_CORE_SEL_DECRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_TWEAK | \
SE_AES_XOR_POS_BOTH | \
SE_AES_CORE_SEL_DECRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_CBC_MAC (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_AESOUT | \
SE_AES_XOR_POS_TOP | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_HASH_ENABLE | \
SE_AES_IV_SEL_REG)
#define HASH_RESULT_REG_COUNT 50
#define CMAC_RESULT_REG_COUNT 4
#define SE_CRYPTO_CTR_REG_COUNT 4
#define SE_MAX_KEYSLOT 15
#define SE_MAX_MEM_ALLOC SZ_4M
#define SE_AES_BUFLEN 0x8000
#define SE_SHA_BUFLEN SZ_4M
#define SHA_FIRST BIT(0)
#define SHA_UPDATE BIT(1)
#define SHA_FINAL BIT(2)
/* Security Engine operation modes */
enum se_aes_alg {
SE_ALG_CBC, /* Cipher Block Chaining (CBC) mode */
SE_ALG_ECB, /* Electronic Codebook (ECB) mode */
SE_ALG_CTR, /* Counter (CTR) mode */
SE_ALG_OFB, /* Output feedback (CFB) mode */
SE_ALG_XTS, /* XTS mode */
SE_ALG_GMAC, /* GMAC mode */
SE_ALG_GCM, /* GCM mode */
SE_ALG_GCM_FINAL, /* GCM FINAL mode */
SE_ALG_CMAC, /* Cipher-based MAC (CMAC) mode */
SE_ALG_CBC_MAC, /* CBC MAC mode */
};
enum se_hash_alg {
SE_ALG_RNG_DRBG, /* Deterministic Random Bit Generator */
SE_ALG_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */
SE_ALG_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */
SE_ALG_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */
SE_ALG_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */
SE_ALG_SHA512, /* Secure Hash Algorithm-512 (SHA512) mode */
SE_ALG_SHA3_224, /* Secure Hash Algorithm3-224 (SHA3-224) mode */
SE_ALG_SHA3_256, /* Secure Hash Algorithm3-256 (SHA3-256) mode */
SE_ALG_SHA3_384, /* Secure Hash Algorithm3-384 (SHA3-384) mode */
SE_ALG_SHA3_512, /* Secure Hash Algorithm3-512 (SHA3-512) mode */
SE_ALG_SHAKE128, /* Secure Hash Algorithm3 (SHAKE128) mode */
SE_ALG_SHAKE256, /* Secure Hash Algorithm3 (SHAKE256) mode */
SE_ALG_HMAC_SHA224, /* Hash based MAC (HMAC) - 224 */
SE_ALG_HMAC_SHA256, /* Hash based MAC (HMAC) - 256 */
SE_ALG_HMAC_SHA384, /* Hash based MAC (HMAC) - 384 */
SE_ALG_HMAC_SHA512, /* Hash based MAC (HMAC) - 512 */
};
struct tegra_se_alg {
struct tegra_se *se_dev;
const char *alg_base;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
struct ahash_alg ahash;
} alg;
};
struct tegra_se_regs {
u32 op;
u32 config;
u32 last_blk;
u32 linear_ctr;
u32 out_addr;
u32 aad_len;
u32 cryp_msg_len;
u32 manifest;
u32 key_addr;
u32 key_data;
u32 key_dst;
u32 result;
};
struct tegra_se_hw {
const struct tegra_se_regs *regs;
int (*init_alg)(struct tegra_se *se);
void (*deinit_alg)(void);
bool support_sm_alg;
u32 host1x_class;
u32 kac_ver;
};
struct tegra_se {
int (*manifest)(u32 user, u32 alg, u32 keylen);
const struct tegra_se_hw *hw;
struct crypto_engine *engine;
struct host1x_channel *channel;
struct host1x_client client;
struct host1x_syncpt *syncpt;
struct clk_bulk_data *clks;
struct tegra_se_cmdbuf *cmdbuf;
struct device *dev;
unsigned int opcode_addr;
unsigned int stream_id;
unsigned int num_clks;
unsigned int syncpt_id;
void __iomem *base;
u32 owner;
};
struct tegra_se_cmdbuf {
dma_addr_t iova;
u32 *addr;
struct device *dev;
struct kref ref;
struct host1x_bo bo;
u32 words;
ssize_t size;
};
struct tegra_se_datbuf {
u8 *buf;
dma_addr_t addr;
ssize_t size;
};
static inline int se_algname_to_algid(const char *name)
{
if (!strcmp(name, "cbc(aes)"))
return SE_ALG_CBC;
else if (!strcmp(name, "ecb(aes)"))
return SE_ALG_ECB;
else if (!strcmp(name, "ofb(aes)"))
return SE_ALG_OFB;
else if (!strcmp(name, "ctr(aes)"))
return SE_ALG_CTR;
else if (!strcmp(name, "xts(aes)"))
return SE_ALG_XTS;
else if (!strcmp(name, "cmac(aes)"))
return SE_ALG_CMAC;
else if (!strcmp(name, "gcm(aes)"))
return SE_ALG_GCM;
else if (!strcmp(name, "ccm(aes)"))
return SE_ALG_CBC_MAC;
else if (!strcmp(name, "sha1"))
return SE_ALG_SHA1;
else if (!strcmp(name, "sha224"))
return SE_ALG_SHA224;
else if (!strcmp(name, "sha256"))
return SE_ALG_SHA256;
else if (!strcmp(name, "sha384"))
return SE_ALG_SHA384;
else if (!strcmp(name, "sha512"))
return SE_ALG_SHA512;
else if (!strcmp(name, "sha3-224"))
return SE_ALG_SHA3_224;
else if (!strcmp(name, "sha3-256"))
return SE_ALG_SHA3_256;
else if (!strcmp(name, "sha3-384"))
return SE_ALG_SHA3_384;
else if (!strcmp(name, "sha3-512"))
return SE_ALG_SHA3_512;
else if (!strcmp(name, "hmac(sha224)"))
return SE_ALG_HMAC_SHA224;
else if (!strcmp(name, "hmac(sha256)"))
return SE_ALG_HMAC_SHA256;
else if (!strcmp(name, "hmac(sha384)"))
return SE_ALG_HMAC_SHA384;
else if (!strcmp(name, "hmac(sha512)"))
return SE_ALG_HMAC_SHA512;
else
return -EINVAL;
}
/* Functions */
int tegra_init_aead(struct tegra_se *se);
int tegra_init_aes(struct tegra_se *se);
int tegra_init_hash(struct tegra_se *se);
void tegra_deinit_aead(void);
void tegra_deinit_aes(void);
void tegra_deinit_hash(void);
int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid);
unsigned int tegra_key_get_idx(struct tegra_se *se, u32 keyid);
void tegra_key_invalidate(struct tegra_se *se, u32 keyid);
int tegra_se_host1x_register(struct tegra_se *se);
int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
static inline void se_writel(struct tegra_se *se, unsigned int val,
unsigned int offset)
{
writel_relaxed(val, se->base + offset);
}
static inline u32 se_readl(struct tegra_se *se, unsigned int offset)
{
return readl_relaxed(se->base + offset);
}
/****
*
* HOST1x OPCODES
*
****/
static inline u32 host1x_opcode_setpayload(unsigned int payload)
{
return (9 << 28) | payload;
}
#define host1x_opcode_incr_w(x) __host1x_opcode_incr_w((x) / 4)
static inline u32 __host1x_opcode_incr_w(unsigned int offset)
{
/* 22-bit offset supported */
return (10 << 28) | offset;
}
#define host1x_opcode_nonincr_w(x) __host1x_opcode_nonincr_w((x) / 4)
static inline u32 __host1x_opcode_nonincr_w(unsigned int offset)
{
/* 22-bit offset supported */
return (11 << 28) | offset;
}
#define host1x_opcode_incr(x, y) __host1x_opcode_incr((x) / 4, y)
static inline u32 __host1x_opcode_incr(unsigned int offset, unsigned int count)
{
return (1 << 28) | (offset << 16) | count;
}
#define host1x_opcode_nonincr(x, y) __host1x_opcode_nonincr((x) / 4, y)
static inline u32 __host1x_opcode_nonincr(unsigned int offset, unsigned int count)
{
return (2 << 28) | (offset << 16) | count;
}
static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
{
return (v & 0xff) << 10;
}
static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
{
return (v & 0x3ff) << 0;
}
static inline u32 host1x_uclass_wait_syncpt_r(void)
{
return 0x8;
}
static inline u32 host1x_uclass_incr_syncpt_r(void)
{
return 0x0;
}
#if !defined(NV_TEGRA_DEV_IOMMU_GET_STREAM_ID_PRESENT)
static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
{
#ifdef CONFIG_IOMMU_API
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (fwspec && fwspec->num_ids == 1) {
*stream_id = fwspec->ids[0] & 0xffff;
return true;
}
#endif
return false;
}
#endif
#endif /*_TEGRA_SE_H*/

View File

@@ -106,6 +106,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += netif_set_tso_max_size
NV_CONFTEST_FUNCTION_COMPILE_TESTS += netif_napi_add_weight
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_shrinker_has_fmt_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_dev_iommu_get_stream_id
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_ivc_struct_has_iosys_map
NV_CONFTEST_GENERIC_COMPILE_TESTS ?=
NV_CONFTEST_MACRO_COMPILE_TESTS ?=

View File

@@ -6646,6 +6646,23 @@ compile_test() {
compile_check_conftest "$CODE" "NV_TEGRA_IVC_STRUCT_HAS_IOSYS_MAP" "" "types"
;;
tegra_dev_iommu_get_stream_id)
#
# Determine if the function tegra_dev_iommu_get_stream_id is present.
#
# tegra_dev_iommu_get_stream_id was added in commit 493c9b68d1d8
# ("iommu/tegra: Add tegra_dev_iommu_get_stream_id() helper") in
# v6.2 (2022-12-07)
#
CODE="
#include <linux/iommu.h>
bool conftest_tegra_dev_iommu_get_stream_id(void) {
return tegra_dev_iommu_get_stream_id();
}"
compile_check_conftest "$CODE" "NV_TEGRA_DEV_IOMMU_GET_STREAM_ID_PRESENT" "" "functions"
;;
# When adding a new conftest entry, please use the correct format for
# specifying the relevant upstream Linux kernel commit.
#