Merge "tsec: Merge the tsec driver from kernel/nvidia to kernel/nvidia-oot" into dev-main

This commit is contained in:
Gerrit Code Review
2023-04-06 00:43:21 -07:00
13 changed files with 3401 additions and 8 deletions

View File

@@ -1,9 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Tsec Driver code.
#
# NOTE: Do not change or add anything in this makefile.
# The source code and makefile rules are copied from the
# kernel/nvidia/drivers/video/tegra/tsec. This file is
# just place-holder for empty makefile to avoid any build
# issue when copy is not done from command line and building
# the tree independent of source copy.
GCOV_PROFILE := y
# Set config to build as module for OOT build
ifeq ($(CONFIG_TEGRA_OOT_MODULE),m)
CONFIG_TEGRA_TSEC := m
endif
obj-$(CONFIG_TEGRA_TSEC) += tsecriscv.o
tsecriscv-y := tsec_comms/tsec_comms.o tsec_boot.o tsec.o

View File

@@ -0,0 +1,322 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "tsec_linux.h"
#include "tsec.h"
#include "tsec_boot.h"
#include "tsec_regs.h"
/*
* TSEC Device Data
*/
static struct tsec_device_data t23x_tsec_data = {
.rate = {192000000, 0, 204000000},
.riscv_desc_bin = "tegra23x/nvhost_tsec_desc.fw",
.riscv_image_bin = "tegra23x/nvhost_tsec_riscv.fw",
};
static struct tsec_device_data t239_tsec_data = {
.rate = {192000000, 0, 204000000},
.riscv_desc_bin = "tegra239/nvhost_tsec_desc.fw",
.riscv_image_bin = "tegra239/nvhost_tsec_riscv.fw",
};
/*
* TSEC Register Access APIs
*/
void tsec_writel(struct tsec_device_data *pdata, u32 r, u32 v)
{
void __iomem *addr = pdata->reg_aperture + r;
writel(v, addr);
}
u32 tsec_readl(struct tsec_device_data *pdata, u32 r)
{
void __iomem *addr = pdata->reg_aperture + r;
return readl(addr);
}
/*
* TSEC helpers for clock, reset and register initialisation
*/
static int tsec_enable_clks(struct tsec_device_data *pdata)
{
int err = 0, index = 0;
for (index = 0; index < TSEC_NUM_OF_CLKS; index++) {
err = clk_prepare_enable(pdata->clk[index]);
if (err) {
err = -EINVAL;
goto out;
}
}
out:
return err;
}
static void tsec_disable_clk(struct tsec_device_data *pdata)
{
int index = 0;
for (index = 0; index < TSEC_NUM_OF_CLKS; index++)
clk_disable_unprepare(pdata->clk[index]);
}
static void tsec_deassert_reset(struct tsec_device_data *pdata)
{
reset_control_acquire(pdata->reset_control);
reset_control_reset(pdata->reset_control);
reset_control_release(pdata->reset_control);
}
static void tsec_set_streamid_regs(struct device *dev,
struct tsec_device_data *pdata)
{
struct iommu_fwspec *fwspec;
int streamid;
/* Get the StreamID value */
fwspec = dev_iommu_fwspec_get(dev);
if (fwspec && fwspec->num_ids)
streamid = fwspec->ids[0] & 0xffff;
else
streamid = 0x7F; /* bypass hwid */
/* Update the StreamID value */
tsec_writel(pdata, tsec_thi_streamid0_r(), streamid);
tsec_writel(pdata, tsec_thi_streamid1_r(), streamid);
}
static void tsec_set_cg_regs(struct tsec_device_data *pdata)
{
tsec_writel(pdata, tsec_priv_blocker_ctrl_cg1_r(), 0x0);
tsec_writel(pdata, tsec_riscv_cg_r(), 0x3);
}
/*
* TSEC Power Management Operations
*/
int tsec_poweron(struct device *dev)
{
struct tsec_device_data *pdata;
int err = 0, tsec_clks_enabled = 0;
pdata = dev_get_drvdata(dev);
err = tsec_enable_clks(pdata);
if (err) {
dev_err(dev, "Cannot enable tsec clocks %d\n", err);
goto out;
}
tsec_clks_enabled = 1;
tsec_deassert_reset(pdata);
tsec_set_cg_regs(pdata);
tsec_set_streamid_regs(dev, pdata);
err = tsec_finalize_poweron(to_platform_device(dev));
/* Failed to start the device */
if (err) {
dev_err(dev, "tsec_finalize_poweron error %d\n", err);
goto out;
}
pdata->power_on = true;
out:
if (err && tsec_clks_enabled)
tsec_disable_clk(pdata);
return err;
}
int tsec_poweroff(struct device *dev)
{
struct tsec_device_data *pdata;
pdata = dev_get_drvdata(dev);
if (pdata->power_on) {
tsec_prepare_poweroff(to_platform_device(dev));
tsec_disable_clk(pdata);
pdata->power_on = false;
}
return 0;
}
static int tsec_module_suspend(struct device *dev)
{
return tsec_poweroff(dev);
}
static int tsec_module_resume(struct device *dev)
{
return tsec_poweron(dev);
}
/*
* TSEC Probe/Remove and Module Init
*/
static int tsec_module_init(struct platform_device *dev)
{
struct tsec_device_data *pdata = platform_get_drvdata(dev);
struct resource *res = NULL;
void __iomem *regs = NULL;
/* Initialize dma parameters */
dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(39));
dev->dev.dma_parms = &pdata->dma_parms;
dma_set_max_seg_size(&dev->dev, UINT_MAX);
/* Get register aperture */
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
regs = devm_ioremap_resource(&dev->dev, res);
if (IS_ERR(regs)) {
int err = PTR_ERR(regs);
dev_err(&dev->dev, "failed to get register memory %d\n", err);
return err;
}
pdata->reg_aperture = regs;
/* Get interrupt */
pdata->irq = platform_get_irq(dev, 0);
if (pdata->irq < 0) {
dev_err(&dev->dev, "failed to get irq %d\n", -pdata->irq);
return -ENXIO;
}
/* get TSEC_CLK and enable it */
pdata->clk[TSEC_CLK_INDEX] = devm_clk_get(&dev->dev, TSEC_CLK_NAME);
if (IS_ERR(pdata->clk[TSEC_CLK_INDEX])) {
dev_err(&dev->dev, "failed to get %s clk", TSEC_CLK_NAME);
return -ENXIO;
}
clk_set_rate(pdata->clk[TSEC_CLK_INDEX],
clk_round_rate(pdata->clk[TSEC_CLK_INDEX],
pdata->rate[TSEC_CLK_INDEX]));
clk_prepare_enable(pdata->clk[TSEC_CLK_INDEX]);
/* get EFUSE_CLK and enable it */
pdata->clk[EFUSE_CLK_INDEX] = devm_clk_get(&dev->dev, EFUSE_CLK_NAME);
if (IS_ERR(pdata->clk[EFUSE_CLK_INDEX])) {
dev_err(&dev->dev, "failed to get %s clk", EFUSE_CLK_NAME);
clk_disable_unprepare(pdata->clk[TSEC_CLK_INDEX]);
return -ENXIO;
}
clk_set_rate(pdata->clk[EFUSE_CLK_INDEX],
clk_round_rate(pdata->clk[EFUSE_CLK_INDEX],
pdata->rate[EFUSE_CLK_INDEX]));
clk_prepare_enable(pdata->clk[EFUSE_CLK_INDEX]);
/* get TSEC_PKA_CLK and enable it */
pdata->clk[TSEC_PKA_CLK_INDEX] = devm_clk_get(&dev->dev, TSEC_PKA_CLK_NAME);
if (IS_ERR(pdata->clk[TSEC_PKA_CLK_INDEX])) {
dev_err(&dev->dev, "failed to get %s clk", TSEC_PKA_CLK_NAME);
clk_disable_unprepare(pdata->clk[EFUSE_CLK_INDEX]);
clk_disable_unprepare(pdata->clk[TSEC_CLK_INDEX]);
return -ENXIO;
}
clk_set_rate(pdata->clk[TSEC_PKA_CLK_INDEX],
clk_round_rate(pdata->clk[TSEC_PKA_CLK_INDEX],
pdata->rate[TSEC_PKA_CLK_INDEX]));
clk_prepare_enable(pdata->clk[TSEC_PKA_CLK_INDEX]);
/* get reset_control and reset the module */
pdata->reset_control = devm_reset_control_get_exclusive_released(
&dev->dev, NULL);
if (IS_ERR(pdata->reset_control))
pdata->reset_control = NULL;
tsec_deassert_reset(pdata);
/* disable the clocks after resetting the module */
tsec_disable_clk(pdata);
return 0;
}
const static struct dev_pm_ops tsec_module_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tsec_module_suspend, tsec_module_resume)
};
static const struct of_device_id tsec_of_match[] = {
{ .compatible = "nvidia,tegra234-tsec",
.data = (struct tsec_device_data *)&t23x_tsec_data },
{ .compatible = "nvidia,tegra239-tsec",
.data = (struct tsec_device_data *)&t239_tsec_data },
{ },
};
static int tsec_probe(struct platform_device *dev)
{
int err;
struct tsec_device_data *pdata = NULL;
/* Get device platform data */
if (dev->dev.of_node) {
const struct of_device_id *match;
match = of_match_device(tsec_of_match, &dev->dev);
if (match)
pdata = (struct tsec_device_data *)match->data;
} else {
pdata = (struct tsec_device_data *)dev->dev.platform_data;
}
pdata->pdev = dev;
platform_set_drvdata(dev, pdata);
err = tsec_module_init(dev);
if (err) {
dev_err(&dev->dev, "error %d in tsec_module_init\n", err);
return err;
}
return tsec_kickoff_boot(dev);
}
static int tsec_remove(struct platform_device *dev)
{
return tsec_poweroff(&dev->dev);
}
static struct platform_driver tsec_driver = {
.probe = tsec_probe,
.remove = tsec_remove,
.driver = {
.owner = THIS_MODULE,
.name = "tsec",
.pm = &tsec_module_pm_ops,
.of_match_table = tsec_of_match,
}
};
module_platform_driver(tsec_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nikesh Oswal <noswal@nvidia.com>");
MODULE_DEVICE_TABLE(of, tsec_of_match);
MODULE_DESCRIPTION("TSEC Driver");

View File

@@ -0,0 +1,80 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Tegra TSEC Module Support
*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSEC_H
#define TSEC_H
/*
* TSEC Device Data Structure
*/
#define TSEC_CLK_NAME "tsec"
#define TSEC_CLK_INDEX (0)
#define EFUSE_CLK_NAME "efuse"
#define EFUSE_CLK_INDEX (1)
#define TSEC_PKA_CLK_NAME "tsec_pka"
#define TSEC_PKA_CLK_INDEX (2)
#define TSEC_NUM_OF_CLKS (3)
struct tsec_device_data {
void __iomem *reg_aperture;
struct device_dma_parameters dma_parms;
int irq;
/* spin lock for module irq */
spinlock_t mirq_lock;
/* If module is powered on */
bool power_on;
struct clk *clk[TSEC_NUM_OF_CLKS];
long rate[TSEC_NUM_OF_CLKS];
/* private platform data */
void *private_data;
/* owner platform_device */
struct platform_device *pdev;
/* reset control for this device */
struct reset_control *reset_control;
/* store the risc-v info */
void *riscv_data;
/* name of riscv descriptor binary */
char *riscv_desc_bin;
/* name of riscv image binary */
char *riscv_image_bin;
};
/*
* TSEC Register Access APIs
*/
void tsec_writel(struct tsec_device_data *pdata, u32 r, u32 v);
u32 tsec_readl(struct tsec_device_data *pdata, u32 r);
/*
* TSEC power on/off APIs
*/
int tsec_poweron(struct device *dev);
int tsec_poweroff(struct device *dev);
#endif /* TSEC_H */

View File

@@ -0,0 +1,723 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsec_linux.h"
#include "tsec.h"
#include "tsec_boot.h"
#include "tsec_regs.h"
#include "tsec_cmds.h"
#include "tsec_comms/tsec_comms.h"
#include "tsec_comms/tsec_comms_plat.h"
#define CMD_INTERFACE_TEST 0
#if CMD_INTERFACE_TEST
#define NUM_OF_CMDS_TO_TEST (5)
#endif
#define TSEC_RISCV_INIT_SUCCESS (0xa5a5a5a5)
#define TSEC_RISCV_SMMU_STREAMID1 BIT_ULL(40)
/* Set this to 1 to force backdoor boot */
#define TSEC_FORCE_BACKDOOR_BOOT (0)
/* Pointer to this device */
struct platform_device *g_tsec;
/* tsec device private data */
typedef void (*plat_work_cb_t)(void *);
struct tsec_device_priv_data {
struct platform_device *pdev;
struct delayed_work poweron_work;
u32 fwreq_retry_interval_ms;
u32 fwreq_duration_ms;
u32 fwreq_fail_threshold_ms;
struct work_struct plat_work;
plat_work_cb_t plat_cb;
void *plat_cb_ctx;
};
struct carveout_info {
u64 base;
u64 size;
};
/*
* Platform specific APIs to be used by platform independent comms library
*/
static DEFINE_MUTEX(s_plat_comms_mutex);
void tsec_plat_acquire_comms_mutex(void)
{
mutex_lock(&s_plat_comms_mutex);
}
void tsec_plat_release_comms_mutex(void)
{
mutex_unlock(&s_plat_comms_mutex);
}
static void tsec_plat_work_handler(struct work_struct *work)
{
struct tsec_device_priv_data *tsec_priv_data;
plat_work_cb_t cb;
void *cb_ctx;
tsec_priv_data = container_of(work, struct tsec_device_priv_data,
plat_work);
cb = tsec_priv_data->plat_cb;
cb_ctx = tsec_priv_data->plat_cb_ctx;
tsec_priv_data->plat_cb = NULL;
tsec_priv_data->plat_cb_ctx = NULL;
if (cb)
cb(cb_ctx);
}
void tsec_plat_queue_work(plat_work_cb_t cb, void *ctx)
{
struct tsec_device_data *pdata = platform_get_drvdata(g_tsec);
struct tsec_device_priv_data *tsec_priv_data =
(struct tsec_device_priv_data *)pdata->private_data;
tsec_priv_data->plat_cb = cb;
tsec_priv_data->plat_cb_ctx = ctx;
schedule_work(&tsec_priv_data->plat_work);
}
void tsec_plat_udelay(u64 usec)
{
udelay(usec);
}
void tsec_plat_reg_write(u32 r, u32 v)
{
tsec_writel(platform_get_drvdata(g_tsec), r, v);
}
u32 tsec_plat_reg_read(u32 r)
{
return tsec_readl(platform_get_drvdata(g_tsec), r);
}
/*
* Helpers to initialise riscv_data with image and descriptor info
*/
static int tsec_compute_ucode_offsets(struct platform_device *dev,
struct riscv_data *rv_data, const struct firmware *fw_desc)
{
struct RM_RISCV_UCODE_DESC *ucode_desc;
ucode_desc = (struct RM_RISCV_UCODE_DESC *)fw_desc->data;
rv_data->desc.manifest_offset = le32_to_cpu((__force __le32)ucode_desc->manifestOffset);
rv_data->desc.code_offset = le32_to_cpu((__force __le32)ucode_desc->monitorCodeOffset);
rv_data->desc.data_offset = le32_to_cpu((__force __le32)ucode_desc->monitorDataOffset);
return 0;
}
static int tsec_read_img_and_desc(struct platform_device *dev,
const char *desc_name, const char *image_name)
{
int err, w;
const struct firmware *fw_desc, *fw_image;
struct tsec_device_data *pdata = platform_get_drvdata(dev);
struct riscv_data *rv_data = (struct riscv_data *)pdata->riscv_data;
if (!rv_data) {
dev_err(&dev->dev, "riscv data is NULL\n");
return -ENODATA;
}
err = request_firmware(&fw_desc, desc_name, &dev->dev);
if (err) {
dev_err(&dev->dev, "failed to get tsec desc binary\n");
return -ENOENT;
}
err = request_firmware(&fw_image, image_name, &dev->dev);
if (err) {
dev_err(&dev->dev, "failed to get tsec image binary\n");
release_firmware(fw_desc);
return -ENOENT;
}
/* Allocate memory to copy image */
rv_data->backdoor_img_size = fw_image->size;
rv_data->backdoor_img_va = dma_alloc_attrs(&dev->dev,
rv_data->backdoor_img_size, &rv_data->backdoor_img_iova,
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
if (!rv_data->backdoor_img_va) {
dev_err(&dev->dev, "dma memory allocation failed");
err = -ENOMEM;
goto clean_up;
}
/* Copy the whole image taking endianness into account */
for (w = 0; w < fw_image->size/sizeof(u32); w++)
rv_data->backdoor_img_va[w] = le32_to_cpu(((__le32 *)fw_image->data)[w]);
#if (KERNEL_VERSION(5, 14, 0) <= LINUX_VERSION_CODE)
arch_invalidate_pmem(rv_data->backdoor_img_va, rv_data->backdoor_img_size);
#else
__flush_dcache_area((void *)rv_data->backdoor_img_va, fw_image->size);
#endif
/* Read the offsets from desc binary */
err = tsec_compute_ucode_offsets(dev, rv_data, fw_desc);
if (err) {
dev_err(&dev->dev, "failed to parse desc binary\n");
goto clean_up;
}
rv_data->valid = true;
release_firmware(fw_desc);
release_firmware(fw_image);
return 0;
clean_up:
if (rv_data->backdoor_img_va) {
dma_free_attrs(&dev->dev, rv_data->backdoor_img_size,
rv_data->backdoor_img_va, rv_data->backdoor_img_iova,
DMA_ATTR_FORCE_CONTIGUOUS);
rv_data->backdoor_img_va = NULL;
rv_data->backdoor_img_iova = 0;
}
release_firmware(fw_desc);
release_firmware(fw_image);
return err;
}
static int tsec_riscv_data_init(struct platform_device *dev)
{
int err = 0;
struct tsec_device_data *pdata = platform_get_drvdata(dev);
struct riscv_data *rv_data = (struct riscv_data *)pdata->riscv_data;
if (rv_data)
return 0;
rv_data = kzalloc(sizeof(*rv_data), GFP_KERNEL);
if (!rv_data)
return -ENOMEM;
pdata->riscv_data = rv_data;
err = tsec_read_img_and_desc(dev, pdata->riscv_desc_bin,
pdata->riscv_image_bin);
if (err || !rv_data->valid) {
dev_err(&dev->dev, "ucode not valid");
goto clean_up;
}
return 0;
clean_up:
dev_err(&dev->dev, "RISC-V init sw failed: err=%d", err);
kfree(rv_data);
pdata->riscv_data = NULL;
return err;
}
static int tsec_riscv_data_deinit(struct platform_device *dev)
{
struct tsec_device_data *pdata = platform_get_drvdata(dev);
struct riscv_data *rv_data = (struct riscv_data *)pdata->riscv_data;
if (!rv_data)
return 0;
if (rv_data->backdoor_img_va) {
dma_free_attrs(&dev->dev, rv_data->backdoor_img_size,
rv_data->backdoor_img_va, rv_data->backdoor_img_iova,
DMA_ATTR_FORCE_CONTIGUOUS);
rv_data->backdoor_img_va = NULL;
rv_data->backdoor_img_iova = 0;
}
kfree(rv_data);
pdata->riscv_data = NULL;
return 0;
}
/*
* APIs to load firmware and boot tsec
*/
static int get_carveout_info_4(
struct platform_device *dev, struct carveout_info *co_info)
{
#if (KERNEL_VERSION(5, 14, 0) <= LINUX_VERSION_CODE)
int err;
phys_addr_t base;
u64 size;
struct tegra_mc *mc;
mc = devm_tegra_memory_controller_get(&dev->dev);
if (IS_ERR(mc))
return PTR_ERR(mc);
err = tegra_mc_get_carveout_info(mc, 4, &base, &size);
if (err)
return err;
co_info->base = (u64)base;
co_info->size = size;
return 0;
#else
int err;
struct mc_carveout_info mc_co_info;
err = mc_get_carveout_info(&mc_co_info, NULL, MC_SECURITY_CARVEOUT4);
if (err)
return err;
co_info->base = mc_co_info.base;
co_info->size = mc_co_info.size;
return 0;
#endif
}
static int get_carveout_info_lite42(
struct platform_device *dev, struct carveout_info *co_info)
{
#define LITE42_BASE (0x2c10000 + 0x7324)
#define LITE42_SIZE (12)
#define LITE42_BOM_OFFSET (0)
#define LITE42_BOM_HI_OFFSET (4)
#define LITE42_SIZE_128KB_OFFSET (8)
void __iomem *lit42_regs;
lit42_regs = ioremap(LITE42_BASE, LITE42_SIZE);
if (!lit42_regs) {
dev_err(&dev->dev, "lit42_regs VA mapping failed\n");
return -ENOMEM;
}
co_info->base = readl(lit42_regs + LITE42_BOM_OFFSET) |
((u64)readl(lit42_regs + LITE42_BOM_HI_OFFSET) & 0xFF) << 32;
co_info->size = readl(lit42_regs + LITE42_SIZE_128KB_OFFSET);
co_info->size <<= 17; /* Convert to bytes. */
iounmap(lit42_regs);
return 0;
#undef LITE42_BASE
#undef LITE42_SIZE
#undef LITE42_BOM_OFFSET
#undef LITE42_BOM_HI_OFFSET
#undef LITE42_SIZE_128KB_OFFSET
}
int tsec_finalize_poweron(struct platform_device *dev)
{
#if CMD_INTERFACE_TEST
union RM_FLCN_CMD cmd;
struct RM_FLCN_HDCP22_CMD_MONITOR_OFF hdcp22Cmd;
u8 cmd_size = RM_FLCN_CMD_SIZE(HDCP22, MONITOR_OFF);
u32 cmdDataSize = RM_FLCN_CMD_BODY_SIZE(HDCP22, MONITOR_OFF);
int idx;
#endif //CMD_INTERFACE_TEST
int err = 0;
struct riscv_data *rv_data;
u32 val;
phys_addr_t img_pa, pa;
struct iommu_domain *domain;
void __iomem *cpuctl_addr, *retcode_addr, *mailbox0_addr;
struct carveout_info img_co_info;
unsigned int img_co_gscid = 0x0;
struct tsec_device_data *pdata = platform_get_drvdata(dev);
struct carveout_info ipc_co_info;
void __iomem *ipc_co_va = NULL;
dma_addr_t ipc_co_iova = 0;
dma_addr_t ipc_co_iova_with_streamid;
if (!pdata) {
dev_err(&dev->dev, "no platform data\n");
return -ENODATA;
}
/* Init rv_data with image and descriptor info */
err = tsec_riscv_data_init(dev);
if (err)
return err;
rv_data = (struct riscv_data *)pdata->riscv_data;
/* Get pa of memory having tsec fw image */
err = get_carveout_info_4(dev, &img_co_info);
if (err) {
dev_err(&dev->dev, "Carveout memory allocation failed");
err = -ENOMEM;
goto clean_up;
}
dev_dbg(&dev->dev, "CARVEOUT4 base=0x%llx size=0x%llx\n",
img_co_info.base, img_co_info.size);
/* Get iommu domain to convert iova to physical address for backdoor boot*/
domain = iommu_get_domain_for_dev(&dev->dev);
if ((img_co_info.base) && !(TSEC_FORCE_BACKDOOR_BOOT)) {
img_pa = img_co_info.base;
img_co_gscid = 0x4;
dev_info(&dev->dev, "RISC-V booting from GSC\n");
} else {
/* For backdoor non-secure boot only. It can be depricated later */
img_pa = iommu_iova_to_phys(domain, rv_data->backdoor_img_iova);
dev_info(&dev->dev, "RISC-V boot using kernel allocated Mem\n");
}
/* Get va and iova of careveout used for ipc */
err = get_carveout_info_lite42(dev, &ipc_co_info);
if (err) {
dev_err(&dev->dev, "IPC Carveout memory allocation failed");
err = -ENOMEM;
goto clean_up;
}
dev_dbg(&dev->dev, "IPCCO base=0x%llx size=0x%llx\n", ipc_co_info.base, ipc_co_info.size);
ipc_co_va = ioremap(ipc_co_info.base, ipc_co_info.size);
if (!ipc_co_va) {
dev_err(&dev->dev, "IPC Carveout memory VA mapping failed");
err = -ENOMEM;
goto clean_up;
}
dev_dbg(&dev->dev, "IPCCO va=0x%llx pa=0x%llx\n",
(__force phys_addr_t)(ipc_co_va), page_to_phys(vmalloc_to_page(ipc_co_va)));
#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE)
ipc_co_iova = dma_map_page_attrs(&dev->dev, vmalloc_to_page(ipc_co_va),
offset_in_page(ipc_co_va), ipc_co_info.size, DMA_BIDIRECTIONAL, 0);
#else
ipc_co_iova = dma_map_page(&dev->dev, vmalloc_to_page(ipc_co_va),
offset_in_page(ipc_co_va), ipc_co_info.size, DMA_BIDIRECTIONAL);
#endif
err = dma_mapping_error(&dev->dev, ipc_co_iova);
if (err) {
dev_err(&dev->dev, "IPC Carveout memory IOVA mapping failed");
ipc_co_iova = 0;
err = -ENOMEM;
goto clean_up;
}
dev_dbg(&dev->dev, "IPCCO iova=0x%llx\n", ipc_co_iova);
/* Lock channel so that non-TZ channel request can't write non-THI region */
tsec_writel(pdata, tsec_thi_sec_r(), tsec_thi_sec_chlock_f());
/* Select RISC-V core */
tsec_writel(pdata, tsec_riscv_bcr_ctrl_r(),
tsec_riscv_bcr_ctrl_core_select_riscv_f());
/* Program manifest start address */
pa = (img_pa + rv_data->desc.manifest_offset) >> 8;
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_pkcparam_lo_r(),
lower_32_bits(pa));
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_pkcparam_hi_r(),
upper_32_bits(pa));
/* Program FMC code start address */
pa = (img_pa + rv_data->desc.code_offset) >> 8;
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_fmccode_lo_r(),
lower_32_bits(pa));
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_fmccode_hi_r(),
upper_32_bits(pa));
/* Program FMC data start address */
pa = (img_pa + rv_data->desc.data_offset) >> 8;
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_fmcdata_lo_r(),
lower_32_bits(pa));
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_fmcdata_hi_r(),
upper_32_bits(pa));
/* Program DMA config registers */
tsec_writel(pdata, tsec_riscv_bcr_dmacfg_sec_r(),
tsec_riscv_bcr_dmacfg_sec_gscid_f(img_co_gscid));
tsec_writel(pdata, tsec_riscv_bcr_dmacfg_r(),
tsec_riscv_bcr_dmacfg_target_local_fb_f() |
tsec_riscv_bcr_dmacfg_lock_locked_f());
/* Pass the address of ipc carveout via mailbox registers */
ipc_co_iova_with_streamid = (ipc_co_iova | TSEC_RISCV_SMMU_STREAMID1);
tsec_writel(pdata, tsec_falcon_mailbox0_r(),
lower_32_bits((unsigned long long)ipc_co_iova_with_streamid));
tsec_writel(pdata, tsec_falcon_mailbox1_r(),
upper_32_bits((unsigned long long)ipc_co_iova_with_streamid));
/* Kick start RISC-V and let BR take over */
tsec_writel(pdata, tsec_riscv_cpuctl_r(),
tsec_riscv_cpuctl_startcpu_true_f());
cpuctl_addr = pdata->reg_aperture + tsec_riscv_cpuctl_r();
retcode_addr = pdata->reg_aperture + tsec_riscv_br_retcode_r();
mailbox0_addr = pdata->reg_aperture + tsec_falcon_mailbox0_r();
/* Check BR return code */
err = readl_poll_timeout(retcode_addr, val,
(tsec_riscv_br_retcode_result_v(val) ==
tsec_riscv_br_retcode_result_pass_v()),
RISCV_IDLE_CHECK_PERIOD,
RISCV_IDLE_TIMEOUT_DEFAULT);
if (err) {
dev_err(&dev->dev, "BR return code timeout! val=0x%x\n", val);
goto clean_up;
}
/* Check cpuctl active state */
err = readl_poll_timeout(cpuctl_addr, val,
(tsec_riscv_cpuctl_active_stat_v(val) ==
tsec_riscv_cpuctl_active_stat_active_v()),
RISCV_IDLE_CHECK_PERIOD,
RISCV_IDLE_TIMEOUT_DEFAULT);
if (err) {
dev_err(&dev->dev, "cpuctl active state timeout! val=0x%x\n",
val);
goto clean_up;
}
/* Check tsec has reached a proper initialized state */
err = readl_poll_timeout(mailbox0_addr, val,
(val == TSEC_RISCV_INIT_SUCCESS),
RISCV_IDLE_CHECK_PERIOD_LONG,
RISCV_IDLE_TIMEOUT_LONG);
if (err) {
dev_err(&dev->dev,
"not reached initialized state, timeout! val=0x%x\n",
val);
goto clean_up;
}
/* Mask out TSEC SWGEN1 Interrupt.
* Host should not receive SWGEN1, as it uses only SWGEN0 for message
* communication with tsec. RISCV Fw is generating SWGEN1 for some debug
* purpose at below path,, we want to ensure that this doesn't interrupt
* Arm driver code.
* nvriscv/drivers/src/debug/debug.c:164: irqFireSwGen(SYS_INTR_SWGEN1)
*/
tsec_writel(pdata, tsec_riscv_irqmclr_r(), tsec_riscv_irqmclr_swgen1_set_f());
/* initialise the comms library before enabling msg interrupt */
tsec_comms_initialize((__force u64)ipc_co_va, ipc_co_info.size);
/* enable message interrupt from tsec to ccplex */
enable_irq(pdata->irq);
/* Booted-up successfully */
dev_info(&dev->dev, "RISC-V boot success\n");
#if CMD_INTERFACE_TEST
pr_debug("cmd_size=%d, cmdDataSize=%d\n", cmd_size, cmdDataSize);
msleep(3000);
for (idx = 0; idx < NUM_OF_CMDS_TO_TEST; idx++) {
hdcp22Cmd.cmdType = RM_FLCN_HDCP22_CMD_ID_MONITOR_OFF;
hdcp22Cmd.sorNum = -1;
hdcp22Cmd.dfpSublinkMask = -1;
cmd.cmdGen.hdr.size = cmd_size;
cmd.cmdGen.hdr.unitId = RM_GSP_UNIT_HDCP22WIRED;
cmd.cmdGen.hdr.seqNumId = idx+1;
cmd.cmdGen.hdr.ctrlFlags = 0;
memcpy(&cmd.cmdGen.cmd, &hdcp22Cmd, cmdDataSize);
tsec_comms_send_cmd((void *)&cmd, 0, NULL, NULL);
msleep(200);
}
#endif //CMD_INTERFACE_TEST
return err;
clean_up:
if (ipc_co_iova) {
#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE)
dma_unmap_page_attrs(&dev->dev, ipc_co_iova,
ipc_co_info.size, DMA_BIDIRECTIONAL, 0);
#else
dma_unmap_page(&dev->dev, ipc_co_iova,
ipc_co_info.size, DMA_BIDIRECTIONAL);
#endif
}
if (ipc_co_va)
iounmap(ipc_co_va);
tsec_riscv_data_deinit(dev);
return err;
}
int tsec_prepare_poweroff(struct platform_device *dev)
{
struct tsec_device_data *pdata = platform_get_drvdata(dev);
if (!pdata) {
dev_err(&dev->dev, "no platform data\n");
return -ENODATA;
}
if (pdata->irq < 0) {
dev_err(&dev->dev, "found interrupt number to be negative\n");
return -ENODATA;
}
disable_irq((unsigned int) pdata->irq);
return 0;
}
/*
* Irq top and bottom half handling. On receiving a message interrupt from
* the bottom half we call the comms lib API to drain and handle that message.
*/
static irqreturn_t tsec_irq_top_half(int irq, void *dev_id)
{
unsigned long flags;
struct platform_device *pdev = (struct platform_device *)(dev_id);
struct tsec_device_data *pdata = platform_get_drvdata(pdev);
irqreturn_t irq_ret_val = IRQ_HANDLED;
u32 irq_status;
spin_lock_irqsave(&pdata->mirq_lock, flags);
/* Read the interrupt status */
irq_status = tsec_readl(pdata, tsec_irqstat_r());
/* Clear the interrupt */
tsec_writel(pdata, tsec_thi_int_status_r(),
tsec_thi_int_status_clr_f());
/* Wakeup threaded handler for SWGEN0 Irq */
if (irq_status & tsec_irqstat_swgen0()) {
/* Clear SWGEN0 Interrupt */
tsec_writel(pdata, tsec_irqsclr_r(),
tsec_irqsclr_swgen0_set_f());
/* Mask the interrupt.
* Clear RISCV Mask for SWGEN0, so that no more SWGEN0
* interrupts will be routed to CCPLEX, it will be re-enabled
* by the bottom half
*/
tsec_writel(pdata, tsec_riscv_irqmclr_r(),
tsec_riscv_irqmclr_swgen0_set_f());
irq_ret_val = IRQ_WAKE_THREAD;
irq_status &= ~(tsec_irqstat_swgen0());
}
/* RISCV FW is generating SWGEN1 when it logs something
* in the print buffer at below path
* nvriscv/drivers/src/debug/debug.c:164: irqFireSwGen(SYS_INTR_SWGEN1)
* We dont want to pull out the print buffer from CCPLEX
* hence we just mask out SWGEN1 interrupt here so that it
* is not received any further
*/
if (irq_status & tsec_irqstat_swgen1()) {
tsec_writel(pdata, tsec_riscv_irqmclr_r(),
tsec_riscv_irqmclr_swgen1_set_f());
irq_status &= ~(tsec_irqstat_swgen1());
}
spin_unlock_irqrestore(&pdata->mirq_lock, flags);
return irq_ret_val;
}
static irqreturn_t tsec_irq_bottom_half(int irq, void *args)
{
/* Call into the comms lib API to drain the message */
tsec_comms_drain_msg(true);
/* Unmask the interrupt.
* Set RISCV Mask for SWGEN0, so that it is re-enabled
* and if it is pending the CCPLEX will be interrupted
* by this the top half
*/
tsec_writel(platform_get_drvdata(g_tsec),
tsec_riscv_irqmset_r(), tsec_riscv_irqmset_swgen0_set_f());
return IRQ_HANDLED;
}
/*
* Tsec power on handler attempts to boot tsec from a worker thread as
* soon as the fw descriptor image is available
*/
static void tsec_poweron_handler(struct work_struct *work)
{
struct tsec_device_priv_data *tsec_priv_data;
struct tsec_device_data *pdata;
const struct firmware *tsec_fw_desc;
int err;
tsec_priv_data = container_of(to_delayed_work(work), struct tsec_device_priv_data,
poweron_work);
pdata = platform_get_drvdata(tsec_priv_data->pdev);
err = firmware_request_nowarn(&tsec_fw_desc, pdata->riscv_desc_bin,
&(tsec_priv_data->pdev->dev));
tsec_priv_data->fwreq_duration_ms += tsec_priv_data->fwreq_retry_interval_ms;
if (!err) {
dev_info(&(tsec_priv_data->pdev->dev),
"tsec fw req success in %d ms\n",
tsec_priv_data->fwreq_duration_ms);
release_firmware(tsec_fw_desc);
err = tsec_poweron(&(tsec_priv_data->pdev->dev));
if (err)
dev_dbg(&(tsec_priv_data->pdev->dev),
"tsec_poweron returned with error: %d\n",
err);
} else if (tsec_priv_data->fwreq_duration_ms < tsec_priv_data->fwreq_fail_threshold_ms) {
dev_info(&(tsec_priv_data->pdev->dev),
"retry tsec fw req, total retry duration %d ms\n",
tsec_priv_data->fwreq_duration_ms);
schedule_delayed_work(&tsec_priv_data->poweron_work,
msecs_to_jiffies(tsec_priv_data->fwreq_retry_interval_ms));
} else {
dev_err(&(tsec_priv_data->pdev->dev),
"tsec boot failure, fw not available within %d ms\n",
tsec_priv_data->fwreq_fail_threshold_ms);
}
}
/*
* Register irq handlers and kick off tsec boot from a separate worker thread
*/
int tsec_kickoff_boot(struct platform_device *pdev)
{
int ret = 0;
struct tsec_device_data *pdata = platform_get_drvdata(pdev);
struct tsec_device_priv_data *tsec_priv_data = NULL;
tsec_priv_data = devm_kzalloc(&pdev->dev, sizeof(*tsec_priv_data), GFP_KERNEL);
if (!tsec_priv_data)
return -ENOMEM;
tsec_priv_data->pdev = pdev;
INIT_DELAYED_WORK(&tsec_priv_data->poweron_work, tsec_poweron_handler);
INIT_WORK(&tsec_priv_data->plat_work, tsec_plat_work_handler);
tsec_priv_data->fwreq_retry_interval_ms = 100;
tsec_priv_data->fwreq_duration_ms = 0;
tsec_priv_data->fwreq_fail_threshold_ms = tsec_priv_data->fwreq_retry_interval_ms * 10;
pdata->private_data = tsec_priv_data;
spin_lock_init(&pdata->mirq_lock);
ret = request_threaded_irq(pdata->irq, tsec_irq_top_half,
tsec_irq_bottom_half, 0, "tsec_riscv_irq", pdev);
if (ret) {
dev_err(&pdev->dev, "CMD: failed to request irq %d\n", ret);
devm_kfree(&pdev->dev, tsec_priv_data);
return ret;
}
/* keep irq disabled */
disable_irq(pdata->irq);
g_tsec = pdev;
/* schedule work item to turn on tsec */
schedule_delayed_work(&tsec_priv_data->poweron_work,
msecs_to_jiffies(tsec_priv_data->fwreq_retry_interval_ms));
return 0;
}

View File

@@ -0,0 +1,95 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Tegra TSEC Module Support
*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSEC_BOOT_H
#define TSEC_BOOT_H
#define RISCV_IDLE_TIMEOUT_DEFAULT 100000 /* 100 milliseconds */
#define RISCV_IDLE_TIMEOUT_LONG 2000000 /* 2 seconds */
#define RISCV_IDLE_CHECK_PERIOD 10 /* 10 usec */
#define RISCV_IDLE_CHECK_PERIOD_LONG 1000 /* 1 milliseconds */
/* Image descriptor format */
struct RM_RISCV_UCODE_DESC {
/*
* Version 1
* Version 2
* Vesrion 3 = for Partition boot
* Vesrion 4 = for eb riscv boot
*/
u32 version; /* structure version */
u32 bootloaderOffset;
u32 bootloaderSize;
u32 bootloaderParamOffset;
u32 bootloaderParamSize;
u32 riscvElfOffset;
u32 riscvElfSize;
u32 appVersion; /* Changelist number associated with the image */
/*
* Manifest contains information about Monitor and it is
* input to BR
*/
u32 manifestOffset;
u32 manifestSize;
/*
* Monitor Data offset within RISCV image and size
*/
u32 monitorDataOffset;
u32 monitorDataSize;
/*
* Monitor Code offset withtin RISCV image and size
*/
u32 monitorCodeOffset;
u32 monitorCodeSize;
u32 bIsMonitorEnabled;
/*
* Swbrom Code offset within RISCV image and size
*/
u32 swbromCodeOffset;
u32 swbromCodeSize;
/*
* Swbrom Data offset within RISCV image and size
*/
u32 swbromDataOffset;
u32 swbromDataSize;
};
struct riscv_image_desc {
u32 manifest_offset;
u32 manifest_size;
u32 data_offset;
u32 data_size;
u32 code_offset;
u32 code_size;
};
struct riscv_data {
bool valid;
struct riscv_image_desc desc;
dma_addr_t backdoor_img_iova;
u32 *backdoor_img_va;
size_t backdoor_img_size;
};
int tsec_kickoff_boot(struct platform_device *pdev);
int tsec_finalize_poweron(struct platform_device *dev);
int tsec_prepare_poweroff(struct platform_device *dev);
#endif /* TSEC_BOOT_H */

View File

@@ -0,0 +1,781 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSEC_CMDS_H
#define TSEC_CMDS_H
#include "tsec_comms/tsec_comms_cmds.h"
struct RM_FLCN_U64 {
u32 lo;
u32 hi;
};
struct RM_UPROC_TEST_CMD_WR_PRIV_PROTECTED_REG {
u8 cmdType;
u8 regType;
u8 pad[2];
u32 val;
};
struct RM_UPROC_TEST_CMD_RTTIMER_TEST {
u8 cmdType;
u8 bCheckTime;
u8 pad[2];
u32 count;
};
struct RM_UPROC_TEST_CMD_FAKEIDLE_TEST {
u8 cmdType;
u8 op;
};
struct RM_UPROC_TEST_CMD_RD_BLACKLISTED_REG {
u8 cmdType;
u8 pad[3];
};
struct RM_UPROC_TEST_CMD_MSCG_ISSUE_FB_ACCESS {
u8 cmdType;
u8 op;
u8 pad[2];
u32 fbOffsetLo32;
u32 fbOffsetHi32;
};
struct RM_UPROC_TEST_CMD_COMMON_TEST {
u8 cmdType;
u32 subCmdType;
u8 pad[3];
};
union RM_UPROC_TEST_CMD {
u8 cmdType;
struct RM_UPROC_TEST_CMD_WR_PRIV_PROTECTED_REG wrPrivProtectedReg;
struct RM_UPROC_TEST_CMD_RTTIMER_TEST rttimer;
struct RM_UPROC_TEST_CMD_FAKEIDLE_TEST fakeidle;
struct RM_UPROC_TEST_CMD_RD_BLACKLISTED_REG rdBlacklistedReg;
struct RM_UPROC_TEST_CMD_MSCG_ISSUE_FB_ACCESS mscgFbAccess;
struct RM_UPROC_TEST_CMD_COMMON_TEST commonTest;
};
struct RM_FLCN_HDCP_CMD_GENERIC {
u8 cmdType;
};
struct RM_FLCN_HDCP_CMD_INIT {
u8 cmdType;
u8 reserved[2];
u8 sorMask;
u32 chipId;
u32 options;
};
struct RM_FLCN_HDCP_CMD_SET_OPTIONS {
u8 cmdType;
u8 reserved[3];
u32 options;
};
struct RM_FLCN_MEM_DESC {
struct RM_FLCN_U64 address;
u32 params;
};
struct RM_FLCN_HDCP_CMD_VALIDATE_SRM {
u8 cmdType;
u8 reserved[3];
struct RM_FLCN_MEM_DESC srm;
u32 srmListSize;
};
struct RM_FLCN_HDCP_CMD_VALIDATE_KSV {
u8 cmdType;
u8 head;
u16 BInfo;
u32 sorIndex;
u32 flags;
u32 ksvNumEntries;
struct RM_FLCN_MEM_DESC ksvList;
struct RM_FLCN_MEM_DESC srm;
u32 srmListSize;
struct RM_FLCN_MEM_DESC vPrime;
};
struct RM_FLCN_HDCP_CMD_READ_SPRIME {
u8 cmdType;
};
union RM_FLCN_HDCP_CMD {
u8 cmdType;
struct RM_FLCN_HDCP_CMD_GENERIC gen;
struct RM_FLCN_HDCP_CMD_INIT init;
struct RM_FLCN_HDCP_CMD_SET_OPTIONS setOptions;
struct RM_FLCN_HDCP_CMD_VALIDATE_SRM valSrm;
struct RM_FLCN_HDCP_CMD_VALIDATE_KSV valKsv;
struct RM_FLCN_HDCP_CMD_READ_SPRIME readSprime;
};
#define HDCP22_NUM_STREAMS_MAX 4
#define HDCP22_NUM_DP_TYPE_MASK 2
enum {
RM_FLCN_HDCP22_CMD_ID_ENABLE_HDCP22 = 0,
RM_FLCN_HDCP22_CMD_ID_MONITOR_OFF,
RM_FLCN_HDCP22_CMD_ID_VALIDATE_SRM2,
RM_FLCN_HDCP22_CMD_ID_TEST_SE,
RM_FLCN_HDCP22_CMD_ID_WRITE_DP_ECF,
RM_FLCN_HDCP22_CMD_ID_VALIDATE_STREAM,
RM_FLCN_HDCP22_CMD_ID_FLUSH_TYPE,
};
struct HDCP22_STREAM {
u8 streamId;
u8 streamType;
};
struct RM_FLCN_HDCP22_CMD_ENABLE_HDCP22 {
u8 cmdType;
u8 sorNum;
u8 sorProtocol;
u8 ddcPortPrimary;
u8 ddcPortSecondary;
u8 bRxRestartRequest;
u8 bRxIDMsgPending;
u8 bHpdFromRM;
u8 bEnforceType0Hdcp1xDS;
u8 bCheckAutoDisableState;
u8 numStreams;
struct HDCP22_STREAM streamIdType[HDCP22_NUM_STREAMS_MAX];
u32 dpTypeMask[HDCP22_NUM_DP_TYPE_MASK];
u32 srmListSize;
struct RM_FLCN_MEM_DESC srm;
};
struct RM_FLCN_HDCP22_CMD_MONITOR_OFF {
u8 cmdType;
u8 sorNum;
u8 dfpSublinkMask;
};
struct RM_FLCN_HDCP22_CMD_VALIDATE_SRM2 {
u8 cmdType;
u32 srmListSize;
struct RM_FLCN_MEM_DESC srm;
};
struct RM_FLCN_HDCP22_CMD_TEST_SE {
u8 cmdType;
u8 reserved[3];
u32 options;
};
struct RM_FLCN_HDCP22_CMD_WRITE_DP_ECF {
u8 cmdType;
u8 sorNum;
u8 reserved[2];
u32 ecfTimeslot[2];
u8 bForceClearEcf;
u8 bAddStreamBack;
};
struct RM_FLCN_HDCP22_CMD_FLUSH_TYPE {
u8 cmdType;
u8 reserved[3];
};
union RM_FLCN_HDCP22_CMD {
u8 cmdType;
struct RM_FLCN_HDCP22_CMD_ENABLE_HDCP22 cmdHdcp22Enable;
struct RM_FLCN_HDCP22_CMD_MONITOR_OFF cmdHdcp22MonitorOff;
struct RM_FLCN_HDCP22_CMD_VALIDATE_SRM2 cmdValidateSrm2;
struct RM_FLCN_HDCP22_CMD_TEST_SE cmdTestSe;
struct RM_FLCN_HDCP22_CMD_WRITE_DP_ECF cmdWriteDpEcf;
struct RM_FLCN_HDCP22_CMD_FLUSH_TYPE cmdFlushType;
};
enum {
RM_GSP_SCHEDULER_CMD_ID_TEST = 0x1,
};
struct RM_GSP_SCHEDULER_CMD_TEST {
u8 cmdType;
u8 num;
};
union RM_GSP_SCHEDULER_CMD {
u8 cmdType;
struct RM_GSP_SCHEDULER_CMD_TEST test;
};
struct RM_GSP_SCHEDULER_MSG_TEST {
u8 msgType;
u8 pad;
u16 status;
};
union RM_GSP_SCHEDULER_MSG {
u8 msgType;
struct RM_GSP_SCHEDULER_MSG_TEST test;
};
struct RM_GSP_ACR_BOOTSTRAP_ENGINE_DETAILS1 {
u32 engineId;
u32 engineInstance;
};
struct RM_GSP_ACR_BOOTSTRAP_ENGINE_DETAILS2 {
u32 engineIndexMask;
u32 boot_flags;
};
struct RM_GSP_ACR_CMD_BOOTSTRAP_ENGINE {
u8 cmdType;
struct RM_GSP_ACR_BOOTSTRAP_ENGINE_DETAILS1 engineDetails1;
struct RM_GSP_ACR_BOOTSTRAP_ENGINE_DETAILS2 engineDetails2;
};
struct RM_GSP_ACR_CMD_LOCK_WPR {
u8 cmdType;
struct RM_FLCN_U64 wprAddressFb;
};
struct RM_GSP_ACR_CMD_UNLOCK_WPR {
u8 cmdType;
u8 unloadType;
};
union RM_GSP_ACR_CMD {
u8 cmdType;
struct RM_GSP_ACR_CMD_BOOTSTRAP_ENGINE bootstrapEngine;
struct RM_GSP_ACR_CMD_LOCK_WPR lockWprDetails;
struct RM_GSP_ACR_CMD_UNLOCK_WPR unlockWprDetails;
};
struct RM_GSP_RMPROXY_CMD {
u8 cmdType;
u32 addr;
u32 value;
};
struct RM_GSP_SPDM_CE_KEY_INFO {
u32 ceIndex;
u32 keyIndex;
u32 ivSlotIndex;
};
struct RM_GSP_SPDM_CMD_PROGRAM_CE_KEYS {
u8 cmdType;
struct RM_GSP_SPDM_CE_KEY_INFO ceKeyInfo;
};
union RM_GSP_SPDM_CMD {
u8 cmdType;
struct RM_GSP_SPDM_CMD_PROGRAM_CE_KEYS programCeKeys;
};
struct RM_FLCN_CMD_GSP {
struct RM_FLCN_QUEUE_HDR hdr;
union {
union RM_UPROC_TEST_CMD test;
union RM_FLCN_HDCP_CMD hdcp;
union RM_FLCN_HDCP22_CMD hdcp22wired;
union RM_GSP_SCHEDULER_CMD scheduler;
union RM_GSP_ACR_CMD acr;
struct RM_GSP_RMPROXY_CMD rmProxy;
union RM_GSP_SPDM_CMD spdm;
} cmd;
};
struct RM_FLCN_CMD_GEN {
struct RM_FLCN_QUEUE_HDR hdr;
u32 cmd;
};
struct RM_PMU_RPC_CMD {
u8 padding1;
u8 flags;
u16 padding2;
u32 rpcDmemPtr;
};
struct RM_FLCN_CMD_PMU {
struct RM_FLCN_QUEUE_HDR hdr;
union {
struct RM_PMU_RPC_CMD rpc;
} cmd;
};
struct RM_DPU_REGCACHE_CMD_CONFIG_SV {
u8 cmdType;
u8 dmaBufferIdx;
struct RM_FLCN_MEM_DESC dmaDesc;
u32 wborPresentMask;
};
union RM_DPU_REGCACHE_CMD {
u8 cmdType;
struct RM_DPU_REGCACHE_CMD_CONFIG_SV cmdConfigSv;
};
struct RM_DPU_VRR_CMD_ENABLE {
u8 cmdType;
u8 headIdx;
u8 bEnableVrrForceFrameRelease;
u32 forceReleaseThresholdUs;
};
union RM_DPU_VRR_CMD {
u8 cmdType;
struct RM_DPU_VRR_CMD_ENABLE cmdEnable;
};
struct RM_DPU_SCANOUTLOGGING_CMD_ENABLE {
u8 cmdType;
u8 scanoutFlag;
u32 rmBufTotalRecordCnt;
u32 head;
s32 timerOffsetLo;
s32 timerOffsetHi;
struct RM_FLCN_MEM_DESC dmaDesc;
};
struct RM_DPU_SCANOUTLOGGING_CMD_DISABLE {
u8 cmdType;
};
union RM_DPU_SCANOUTLOGGING_CMD {
u8 cmdType;
struct RM_DPU_SCANOUTLOGGING_CMD_ENABLE cmdEnable;
struct RM_DPU_SCANOUTLOGGING_CMD_DISABLE cmdDisable;
};
struct RM_DPU_MSCGWITHFRL_CMD_ENQUEUE {
u8 cmdType;
u8 flag;
u32 head;
u32 startTimeNsLo;
u32 startTimeNsHi;
u32 frlDelayNsLo;
u32 frlDelayNsHi;
};
union RM_DPU_MSCGWITHFRL_CMD {
u8 cmdType;
struct RM_DPU_MSCGWITHFRL_CMD_ENQUEUE cmdEnqueue;
};
struct RM_DPU_TIMER_CMD_UPDATE_FREQ {
u8 cmdType;
u8 reserved[3];
u32 freqKhz;
};
union RM_DPU_TIMER_CMD {
u8 cmdType;
struct RM_DPU_TIMER_CMD_UPDATE_FREQ cmdUpdateFreq;
};
struct RM_FLCN_CMD_DPU {
struct RM_FLCN_QUEUE_HDR hdr;
union {
union RM_DPU_REGCACHE_CMD regcache;
union RM_DPU_VRR_CMD vrr;
union RM_FLCN_HDCP_CMD hdcp;
union RM_FLCN_HDCP22_CMD hdcp22wired;
union RM_DPU_SCANOUTLOGGING_CMD scanoutLogging;
union RM_DPU_MSCGWITHFRL_CMD mscgWithFrl;
union RM_DPU_TIMER_CMD timer;
union RM_UPROC_TEST_CMD test;
} cmd;
};
struct RM_SEC2_TEST_CMD_WR_PRIV_PROTECTED_REG {
u8 cmdType;
u8 regType;
u8 pad[2];
u32 val;
};
struct RM_SEC2_TEST_CMD_RTTIMER_TEST {
u8 cmdType;
u8 bCheckTime;
u8 pad[2];
u32 count;
};
struct RM_SEC2_TEST_CMD_FAKEIDLE_TEST {
u8 cmdType;
u8 op;
};
struct RM_SEC2_TEST_CMD_RD_BLACKLISTED_REG {
u8 cmdType;
u8 pad[3];
};
struct RM_SEC2_TEST_CMD_MSCG_ISSUE_FB_ACCESS {
u8 cmdType;
u8 op;
u8 pad[2];
u32 fbOffsetLo32;
u32 fbOffsetHi32;
};
union RM_SEC2_TEST_CMD {
u8 cmdType;
struct RM_SEC2_TEST_CMD_WR_PRIV_PROTECTED_REG wrPrivProtectedReg;
struct RM_SEC2_TEST_CMD_RTTIMER_TEST rttimer;
struct RM_SEC2_TEST_CMD_FAKEIDLE_TEST fakeidle;
struct RM_SEC2_TEST_CMD_RD_BLACKLISTED_REG rdBlacklistedReg;
struct RM_SEC2_TEST_CMD_MSCG_ISSUE_FB_ACCESS mscgFbAccess;
};
struct RM_SEC2_CHNMGMT_CMD_ENGINE_RC_RECOVERY {
u8 cmdType;
u8 pad[3];
};
struct RM_SEC2_CHNMGMT_CMD_FINISH_RC_RECOVERY {
u8 cmdType;
u8 pad[3];
};
union RM_SEC2_CHNMGMT_CMD {
u8 cmdType;
struct RM_SEC2_CHNMGMT_CMD_ENGINE_RC_RECOVERY engineRcCmd;
struct RM_SEC2_CHNMGMT_CMD_FINISH_RC_RECOVERY finishRcCmd;
};
struct RM_SEC2_ACR_CMD_BOOTSTRAP_FALCON {
u8 cmdType;
u32 flags;
u32 falconId;
u32 falconInstance;
u32 falconIndexMask;
};
struct RM_SEC2_ACR_CMD_WRITE_CBC_BASE {
u8 cmdType;
u32 cbcBase;
};
union RM_SEC2_ACR_CMD {
u8 cmdType;
struct RM_SEC2_ACR_CMD_BOOTSTRAP_FALCON bootstrapFalcon;
struct RM_SEC2_ACR_CMD_WRITE_CBC_BASE writeCbcBase;
};
struct RM_SEC2_VPR_CMD_SETUP_VPR {
u8 cmdType;
u8 pad[3];
u32 startAddr;
u32 size;
};
union RM_SEC2_VPR_CMD {
u8 cmdType;
struct RM_SEC2_VPR_CMD_SETUP_VPR vprCmd;
};
struct RM_SEC2_SPDM_CMD_INIT {
u8 cmdType;
u8 pad[3];
};
enum SpdmPayloadType {
SpdmPayloadTypeNormalMessage = 0x0,
SpdmPayloadTypeSecuredMessage = 0x1,
SpdmPayloadTypeAppMessage = 0x2,
};
struct RM_SEC2_SPDM_CMD_REQUEST {
u8 cmdType;
u8 pad[3];
u32 reqPayloadEmemAddr;
u32 reqPayloadSize;
enum SpdmPayloadType reqPayloadType;
};
union RM_SEC2_SPDM_CMD {
u8 cmdType;
struct RM_SEC2_SPDM_CMD_INIT initCmd;
struct RM_SEC2_SPDM_CMD_REQUEST reqCmd;
};
struct RM_FLCN_CMD_SEC2 {
struct RM_FLCN_QUEUE_HDR hdr;
union {
union RM_SEC2_TEST_CMD sec2Test;
union RM_SEC2_CHNMGMT_CMD chnmgmt;
union RM_FLCN_HDCP22_CMD hdcp22;
union RM_SEC2_ACR_CMD acr;
union RM_SEC2_VPR_CMD vpr;
union RM_FLCN_HDCP_CMD hdcp1x;
union RM_SEC2_SPDM_CMD spdm;
union RM_UPROC_TEST_CMD test;
} cmd;
};
union RM_FLCN_CMD {
struct RM_FLCN_CMD_GEN cmdGen;
struct RM_FLCN_CMD_PMU cmdPmu;
struct RM_FLCN_CMD_DPU cmdDpu;
struct RM_FLCN_CMD_SEC2 cmdSec2;
struct RM_FLCN_CMD_GSP cmdGsp;
};
#define RM_GSP_UNIT_REWIND (0x00)
#define RM_GSP_UNIT_INIT (0x02)
#define RM_GSP_UNIT_HDCP22WIRED (0x06)
#define RM_GSP_UNIT_END (0x11)
struct RM_GSP_INIT_MSG_UNIT_READY {
u8 msgType;
u8 taskId;
u8 taskStatus;
};
union RM_GSP_INIT_MSG {
u8 msgType;
struct RM_GSP_INIT_MSG_GSP_INIT gspInit;
struct RM_GSP_INIT_MSG_UNIT_READY msgUnitState;
};
struct RM_UPROC_TEST_MSG_WR_PRIV_PROTECTED_REG {
u8 msgType;
u8 regType;
u8 status;
u8 pad[1];
u32 val;
};
struct RM_UPROC_TEST_MSG_RTTIMER_TEST {
u8 msgType;
u8 status;
u8 pad[2];
u32 oneShotNs;
u32 continuousNs;
};
struct RM_UPROC_TEST_MSG_FAKEIDLE_TEST {
u8 msgType;
u8 status;
};
struct RM_UPROC_TEST_MSG_RD_BLACKLISTED_REG {
u8 msgType;
u8 status;
u8 pad[2];
u32 val;
};
struct RM_UPROC_TEST_MSG_MSCG_ISSUE_FB_ACCESS {
u8 msgType;
u8 status;
u8 pad[2];
};
struct RM_UPROC_TEST_MSG_COMMON_TEST {
u8 msgType;
u8 status;
u8 pad[2];
};
union RM_UPROC_TEST_MSG {
u8 msgType;
struct RM_UPROC_TEST_MSG_WR_PRIV_PROTECTED_REG wrPrivProtectedReg;
struct RM_UPROC_TEST_MSG_RTTIMER_TEST rttimer;
struct RM_UPROC_TEST_MSG_FAKEIDLE_TEST fakeidle;
struct RM_UPROC_TEST_MSG_RD_BLACKLISTED_REG rdBlacklistedReg;
struct RM_UPROC_TEST_MSG_MSCG_ISSUE_FB_ACCESS mscgFbAccess;
struct RM_UPROC_TEST_MSG_COMMON_TEST commonTest;
};
struct RM_FLCN_HDCP_MSG_GENERIC {
u8 msgType;
u8 status;
u8 rsvd[2];
};
struct RM_FLCN_HDCP_MSG_VALIDATE_KSV {
u8 msgType;
u8 status;
u8 attachPoint;
u8 head;
};
struct RM_FLCN_HDCP_MSG_VALIDATE_LPRIME {
u8 msgType;
u8 status;
u8 rsvd[2];
u8 l[20];
};
struct RM_FLCN_HDCP_MSG_READ_SPRIME {
u8 msgType;
u8 status;
u8 sprime[9];
u8 rsvd;
};
union RM_FLCN_HDCP_MSG {
u8 msgType;
struct RM_FLCN_HDCP_MSG_GENERIC gen;
struct RM_FLCN_HDCP_MSG_VALIDATE_KSV ksv;
struct RM_FLCN_HDCP_MSG_VALIDATE_LPRIME lprimeValidateReply;
struct RM_FLCN_HDCP_MSG_READ_SPRIME readSprime;
};
enum RM_FLCN_HDCP22_STATUS {
RM_FLCN_HDCP22_STATUS_ERROR_NULL = 0,
RM_FLCN_HDCP22_STATUS_ERROR_ENC_ACTIVE,
RM_FLCN_HDCP22_STATUS_ERROR_FLCN_BUSY,
RM_FLCN_HDCP22_STATUS_ERROR_TYPE1_LOCK_ACTIVE,
RM_FLCN_HDCP22_STATUS_ERROR_INIT_SESSION_FAILED,
RM_FLCN_HDCP22_STATUS_ERROR_AKE_INIT,
RM_FLCN_HDCP22_STATUS_ERROR_CERT_RX,
RM_FLCN_HDCP22_STATUS_TIMEOUT_CERT_RX,
RM_FLCN_HDCP22_STATUS_ERROR_MASTER_KEY_EXCHANGE,
RM_FLCN_HDCP22_STATUS_ERROR_H_PRIME,
RM_FLCN_HDCP22_STATUS_TIMEOUT_H_PRIME,
RM_FLCN_HDCP22_STATUS_ERROR_PAIRING,
RM_FLCN_HDCP22_STATUS_TIMEOUT_PAIRING,
RM_FLCN_HDCP22_STATUS_ERROR_LC_INIT,
RM_FLCN_HDCP22_STATUS_ERROR_L_PRIME,
RM_FLCN_HDCP22_STATUS_TIMEOUT_L_PRIME,
RM_FLCN_HDCP22_STATUS_ERROR_SKE_INIT,
RM_FLCN_HDCP22_STATUS_ERROR_SET_STREAM_TYPE,
RM_FLCN_HDCP22_STATUS_ERROR_EN_ENC,
RM_FLCN_HDCP22_STATUS_ERROR_RPTR_INIT,
RM_FLCN_HDCP22_STATUS_ERROR_RPTR_STREAM_MNT,
RM_FLCN_HDCP22_STATUS_TIMEOUT_RXID_LIST,
RM_FLCN_HDCP22_STATUS_ERROR_RPTR_MPRIME,
RM_FLCN_HDCP22_STATUS_TIMEOUT_MPRIME,
RM_FLCN_HDCP22_STATUS_ENC_ENABLED,
RM_FLCN_HDCP22_STATUS_INIT_SECONDARY_LINK,
RM_FLCN_HDCP22_STATUS_RPTR_STARTED,
RM_FLCN_HDCP22_STATUS_RPTR_DONE,
RM_FLCN_HDCP22_STATUS_REAUTH_REQ,
RM_FLCN_HDCP22_STATUS_MONITOR_OFF_SUCCESS,
RM_FLCN_HDCP22_STATUS_VALID_SRM,
RM_FLCN_HDCP22_STATUS_ERROR_INVALID_SRM,
RM_FLCN_HDCP22_STATUS_TEST_SE_SUCCESS,
RM_FLCN_HDCP22_STATUS_TEST_SE_FAILURE,
RM_FLCN_HDCP22_STATUS_WRITE_DP_ECF_SUCCESS,
RM_FLCN_HDCP22_STATUS_WRITE_DP_ECF_FAILURE,
RM_FLCN_HDCP22_STATUS_ERROR_NOT_SUPPORTED,
RM_FLCN_HDCP22_STATUS_ERROR_HPD,
RM_FLCN_HDCP22_STATUS_VALIDATE_STREAM_SUCCESS,
RM_FLCN_HDCP22_STATUS_ERROR_VALIDATE_STREAM_FAILURE,
RM_FLCN_HDCP22_STATUS_ERROR_STREAM_INVALID,
RM_FLCN_HDCP22_STATUS_ERROR_ILLEGAL_TIMEREVENT,
RM_FLCN_HDCP22_STATUS_FLUSH_TYPE_SUCCESS,
RM_FLCN_HDCP22_STATUS_FLUSH_TYPE_FAILURE,
RM_FLCN_HDCP22_STATUS_FLUSH_TYPE_LOCK_ACTIVE,
RM_FLCN_HDCP22_STATUS_FLUSH_TYPE_IN_PROGRESS,
RM_FLCN_HDCP22_STATUS_ERROR_REGISTER_RW,
RM_FLCN_HDCP22_STATUS_INVALID_ARGUMENT,
RM_FLCN_HDCP22_STATUS_ERROR_INTEGRITY_CHECK_FAILURE,
RM_FLCN_HDCP22_STATUS_ERROR_INTEGRITY_UPDATE_FAILURE,
RM_FLCN_HDCP22_STATUS_ERROR_DISABLE_WITH_LANECNT0,
RM_FLCN_HDCP22_STATUS_ERROR_START_TIMER,
RM_FLCN_HDCP22_STATUS_ERROR_HWDRM_WAR_AUTH_FAILURE,
RM_FLCN_HDCP22_STATUS_ERROR_START_SESSION,
};
struct RM_FLCN_HDCP22_MSG_GENERIC {
u8 msgType;
enum RM_FLCN_HDCP22_STATUS flcnStatus;
u8 streamType;
};
union RM_FLCN_HDCP22_MSG {
u8 msgType;
struct RM_FLCN_HDCP22_MSG_GENERIC msgGeneric;
};
struct RM_GSP_ACR_MSG_BOOTSTRAP_ENGINE {
u8 msgType;
u32 errorCode;
struct RM_GSP_ACR_BOOTSTRAP_ENGINE_DETAILS1 engineDetails;
};
struct RM_GSP_ACR_MSG_LOCK_WPR {
u8 msgType;
u32 errorCode;
u32 errorInfo;
};
struct RM_GSP_ACR_MSG_UNLOCK_WPR {
u8 msgType;
u32 errorCode;
u32 errorInfo;
};
union RM_GSP_ACR_MSG {
u8 msgType;
struct RM_GSP_ACR_MSG_BOOTSTRAP_ENGINE msgEngine;
struct RM_GSP_ACR_MSG_LOCK_WPR msgLockWpr;
struct RM_GSP_ACR_MSG_UNLOCK_WPR msgUnlockWpr;
};
struct RM_GSP_RMPROXY_MSG {
u8 msgType;
u8 result;
u32 value;
};
struct RM_GSP_SPDM_MSG_PROGRAM_CE_KEYS {
u8 msgType;
u32 errorCode;
};
union RM_GSP_SPDM_MSG {
u8 msgType;
struct RM_GSP_SPDM_MSG_PROGRAM_CE_KEYS msgProgramCeKeys;
};
struct RM_FLCN_MSG_GSP {
struct RM_FLCN_QUEUE_HDR hdr;
union {
union RM_GSP_INIT_MSG init;
union RM_UPROC_TEST_MSG test;
union RM_FLCN_HDCP_MSG hdcp;
union RM_FLCN_HDCP22_MSG hdcp22wired;
union RM_GSP_SCHEDULER_MSG scheduler;
union RM_GSP_ACR_MSG acr;
struct RM_GSP_RMPROXY_MSG rmProxy;
union RM_GSP_SPDM_MSG spdm;
} msg;
};
/*!
* Convenience macros for determining the size of body for a command or message:
*/
#define RM_FLCN_CMD_BODY_SIZE(u, t) sizeof(struct RM_FLCN_##u##_CMD_##t)
/*!
* Convenience macros for determining the size of a command or message:
*/
#define RM_FLCN_CMD_SIZE(u, t) \
(RM_FLCN_QUEUE_HDR_SIZE + RM_FLCN_CMD_BODY_SIZE(u, t))
#endif /* TSEC_CMDS_H */

View File

@@ -0,0 +1,686 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsec_comms_plat.h"
#include "tsec_comms.h"
#include "tsec_comms_regs.h"
#include "tsec_comms_cmds.h"
#define TSEC_QUEUE_POLL_INTERVAL_US (50)
#define TSEC_QUEUE_POLL_COUNT (2000)
#define TSEC_CMD_QUEUE_PORT (0)
#define TSEC_MSG_QUEUE_PORT (0)
#define TSEC_EMEM_PORT (0)
#define TSEC_QUEUE_OFFSET_MAGIC (0x01000000)
#define TSEC_EMEM_SIZE (0x2000)
#define TSEC_MAX_MSG_SIZE (128)
#define DO_IPC_OVER_GSC_CO (1)
#ifdef DO_IPC_OVER_GSC_CO
#define TSEC_BOOT_POLL_TIME_US (100000)
#define TSEC_BOOT_POLL_INTERVAL_US (50)
#define TSEC_BOOT_POLL_COUNT (TSEC_BOOT_POLL_TIME_US / TSEC_BOOT_POLL_INTERVAL_US)
#define TSEC_BOOT_FLAG_MAGIC (0xA5A5A5A5)
static u64 s_ipc_gscco_base;
static u64 s_ipc_gscco_size;
static u64 s_ipc_gscco_page_base;
static u64 s_ipc_gscco_page_size;
static u64 s_ipc_gscco_page_count;
static u64 s_ipc_gscco_free_page_mask;
struct TSEC_BOOT_INFO {
u32 bootFlag;
};
#endif
/*
* Locally cache init message so that same can be conveyed
* to DisplayRM when it asks for it
*/
static bool s_init_msg_rcvd;
static u8 s_init_tsec_msg[TSEC_MAX_MSG_SIZE];
/*
* Array of structs to register client callback function
* for every sw unit/module within tsec
*/
struct callback_t {
callback_func_t cb_func;
void *cb_ctx;
};
static struct callback_t s_callbacks[RM_GSP_UNIT_END];
static int validate_cmd(struct RM_FLCN_QUEUE_HDR *cmd_hdr)
{
if (cmd_hdr == NULL)
return -TSEC_EINVAL;
if ((cmd_hdr->size < RM_FLCN_QUEUE_HDR_SIZE) ||
(cmd_hdr->unitId >= RM_GSP_UNIT_END)) {
return -TSEC_EINVAL;
}
return 0;
}
static int ipc_txfr(u32 offset, u8 *buff, u32 size, bool read_msg)
{
#ifdef DO_IPC_OVER_GSC_CO
u8 *gscCo;
u32 idx;
if (offset < TSEC_QUEUE_OFFSET_MAGIC) {
plat_print(LVL_ERR,
"Invalid Offset %x less than TSEC_QUEUE_OFFSET_MAGIC\n", offset);
return -TSEC_EINVAL;
}
offset -= TSEC_QUEUE_OFFSET_MAGIC;
if (!s_ipc_gscco_base || !s_ipc_gscco_size) {
plat_print(LVL_ERR, "Invalid IPC GSC-CO address/size\n");
return -TSEC_EINVAL;
}
if (!buff || !size) {
plat_print(LVL_ERR, "Invalid client buf/size\n");
return -TSEC_EINVAL;
}
if (offset > s_ipc_gscco_size || ((offset + size) > s_ipc_gscco_size)) {
plat_print(LVL_ERR, "Client buf beyond IPC GSC-CO limits\n");
return -TSEC_EINVAL;
}
gscCo = (u8 *)(s_ipc_gscco_base + offset);
if (read_msg) {
for (idx = 0; idx < size; idx++)
buff[idx] = gscCo[idx];
} else {
for (idx = 0; idx < size; idx++)
gscCo[idx] = buff[idx];
}
return 0;
#else
u32 *buff32 = (u32 *)buff;
u32 ememc_offset = tsec_ememc_r(TSEC_EMEM_PORT);
u32 ememd_offset = tsec_ememd_r(TSEC_EMEM_PORT);
u32 num_words, num_bytes, reg32, i;
if (offset < TSEC_QUEUE_OFFSET_MAGIC) {
plat_print(LVL_ERR,
"Invalid Offset %x less than TSEC_QUEUE_OFFSET_MAGIC\n", offset);
return -TSEC_EINVAL;
}
if (!buff || !size) {
plat_print(LVL_ERR, "Invalid client buf/size\n");
return -TSEC_EINVAL;
}
offset -= TSEC_QUEUE_OFFSET_MAGIC;
if (offset > TSEC_EMEM_SIZE || ((offset + size) > TSEC_EMEM_SIZE)) {
plat_print(LVL_ERR, "Client buf beyond EMEM limits\n");
return -TSEC_EINVAL;
}
/*
* Set offset within EMEM
* (DRF_SHIFTMASK(NV_PGSP_EMEMC_OFFS) |
* DRF_SHIFTMASK(NV_PGSP_EMEMC_BLK));
*/
reg32 = offset & 0x00007ffc;
if (read_msg) {
/*
* Enable Auto Increment on Read
* PSEC_EMEMC EMEMC_AINCR
*/
reg32 = reg32 | 0x02000000;
} else {
/*
* Enable Auto Increment on Write
* PSEC_EMEMC EMEMC_AINCW
*/
reg32 = reg32 | 0x01000000;
}
/* Set number of 4 byte words and remaining residual bytes to transfer*/
num_words = size >> 2;
num_bytes = size & 0x3;
/* Transfer 4 byte words */
tsec_plat_reg_write(ememc_offset, reg32);
for (i = 0; i < num_words; i++) {
if (read_msg)
buff32[i] = tsec_plat_reg_read(ememd_offset);
else
tsec_plat_reg_write(ememd_offset, buff32[i]);
}
/* Transfer residual bytes if any */
if (num_bytes > 0) {
u32 bytes_copied = num_words << 2;
/*
* Read the contents first. If we're copying to the EMEM,
* we've set autoincrement on write,
* so reading does not modify the pointer.
* We can, thus, do a read/modify/write without needing
* to worry about the pointer having moved forward.
* There is no special explanation needed
* if we're copying from the EMEM since this is the last
* access to HW in that case.
*/
reg32 = tsec_plat_reg_read(ememd_offset);
if (read_msg) {
for (i = 0; i < num_bytes; i++)
buff[bytes_copied + i] = ((u8 *)&reg32)[i];
} else {
for (i = 0; i < num_bytes; i++)
((u8 *)&reg32)[i] = buff[bytes_copied + i];
tsec_plat_reg_write(ememd_offset, reg32);
}
}
return 0;
#endif
}
static int ipc_write(u32 head, u8 *pSrc, u32 num_bytes)
{
return ipc_txfr(head, pSrc, num_bytes, false);
}
static int ipc_read(u32 tail, u8 *pdst, u32 num_bytes)
{
return ipc_txfr(tail, pdst, num_bytes, true);
}
#ifdef DO_IPC_OVER_GSC_CO
static u32 tsec_get_boot_flag(void)
{
struct TSEC_BOOT_INFO *bootInfo = (struct TSEC_BOOT_INFO *)(s_ipc_gscco_base);
if (!s_ipc_gscco_base || !s_ipc_gscco_size) {
plat_print(LVL_ERR, "%s: Invalid GSC-CO address/size\n", __func__);
return 0;
} else {
return bootInfo->bootFlag;
}
}
static void tsec_reset_boot_flag(void)
{
struct TSEC_BOOT_INFO *bootInfo = (struct TSEC_BOOT_INFO *)(s_ipc_gscco_base);
if (!s_ipc_gscco_base || !s_ipc_gscco_size)
plat_print(LVL_ERR, "%s: Invalid GSC-CO address/size\n", __func__);
else
bootInfo->bootFlag = 0;
}
#endif
static void invoke_init_cb(void *unused)
{
callback_func_t cb_func;
void *cb_ctx;
tsec_plat_acquire_comms_mutex();
cb_func = s_callbacks[RM_GSP_UNIT_INIT].cb_func;
cb_ctx = s_callbacks[RM_GSP_UNIT_INIT].cb_ctx;
s_callbacks[RM_GSP_UNIT_INIT].cb_func = NULL;
s_callbacks[RM_GSP_UNIT_INIT].cb_ctx = NULL;
tsec_plat_release_comms_mutex();
if (cb_func)
cb_func(cb_ctx, (void *)s_init_tsec_msg);
}
void tsec_comms_drain_msg(bool invoke_cb)
{
int i;
u32 tail = 0;
u32 head = 0;
u32 msgq_head_reg;
u32 msgq_tail_reg;
static u32 sMsgq_start;
struct RM_FLCN_QUEUE_HDR *msg_hdr;
struct RM_GSP_INIT_MSG_GSP_INIT *init_msg_body;
struct RM_FLCN_QUEUE_HDR *cached_init_msg_hdr;
struct RM_GSP_INIT_MSG_GSP_INIT *cached_init_msg_body;
callback_func_t cb_func = NULL;
void *cb_ctx = NULL;
u8 tsec_msg[TSEC_MAX_MSG_SIZE];
msgq_head_reg = tsec_msgq_head_r(TSEC_MSG_QUEUE_PORT);
msgq_tail_reg = tsec_msgq_tail_r(TSEC_MSG_QUEUE_PORT);
msg_hdr = (struct RM_FLCN_QUEUE_HDR *)(tsec_msg);
init_msg_body = (struct RM_GSP_INIT_MSG_GSP_INIT *)
(tsec_msg + RM_FLCN_QUEUE_HDR_SIZE);
cached_init_msg_hdr = (struct RM_FLCN_QUEUE_HDR *)(s_init_tsec_msg);
cached_init_msg_body = (struct RM_GSP_INIT_MSG_GSP_INIT *)
(s_init_tsec_msg + RM_FLCN_QUEUE_HDR_SIZE);
for (i = 0; !sMsgq_start && i < TSEC_QUEUE_POLL_COUNT; i++) {
sMsgq_start = tsec_plat_reg_read(msgq_tail_reg);
if (!sMsgq_start)
tsec_plat_udelay(TSEC_QUEUE_POLL_INTERVAL_US);
}
if (!sMsgq_start)
plat_print(LVL_WARN, "msgq_start=0x%x\n", sMsgq_start);
for (i = 0; i < TSEC_QUEUE_POLL_COUNT; i++) {
tail = tsec_plat_reg_read(msgq_tail_reg);
head = tsec_plat_reg_read(msgq_head_reg);
if (tail != head)
break;
tsec_plat_udelay(TSEC_QUEUE_POLL_INTERVAL_US);
}
if (head == 0 || tail == 0) {
plat_print(LVL_ERR, "Invalid MSGQ head=0x%x, tail=0x%x\n",
head, tail);
goto EXIT;
}
if (tail == head) {
plat_print(LVL_DBG, "Empty MSGQ tail = 0x%x head = 0x%x\n", tail, head);
goto EXIT;
}
while (tail != head) {
/* read header */
ipc_read(tail, tsec_msg, RM_FLCN_QUEUE_HDR_SIZE);
/* copy msg body */
if (msg_hdr->size > RM_FLCN_QUEUE_HDR_SIZE) {
ipc_read(tail + RM_FLCN_QUEUE_HDR_SIZE,
tsec_msg + RM_FLCN_QUEUE_HDR_SIZE,
msg_hdr->size - RM_FLCN_QUEUE_HDR_SIZE);
}
if (msg_hdr->unitId == RM_GSP_UNIT_INIT) {
plat_print(LVL_DBG, "init_msg received\n");
if (init_msg_body->numQueues < 2) {
plat_print(LVL_ERR, "init_msg less queues than expected %d\n",
init_msg_body->numQueues);
goto FAIL;
}
#ifdef DO_IPC_OVER_GSC_CO
/* Poll for the Tsec booted flag and also reset it */
for (i = 0; i < TSEC_BOOT_POLL_COUNT; i++) {
if (tsec_get_boot_flag() == TSEC_BOOT_FLAG_MAGIC)
break;
tsec_plat_udelay(TSEC_BOOT_POLL_INTERVAL_US);
}
if (i >= TSEC_BOOT_POLL_COUNT) {
plat_print(LVL_ERR, "Tsec GSC-CO Boot Flag not set\n");
goto FAIL;
} else {
tsec_reset_boot_flag();
plat_print(LVL_DBG, "Tsec GSC-CO Boot Flag reset done\n");
}
#endif
/* cache the init_msg */
memcpy(cached_init_msg_hdr, msg_hdr, RM_FLCN_QUEUE_HDR_SIZE);
memcpy(cached_init_msg_body, init_msg_body,
msg_hdr->size - RM_FLCN_QUEUE_HDR_SIZE);
/* Invoke the callback and clear it */
tsec_plat_acquire_comms_mutex();
s_init_msg_rcvd = true;
if (invoke_cb) {
cb_func = s_callbacks[msg_hdr->unitId].cb_func;
cb_ctx = s_callbacks[msg_hdr->unitId].cb_ctx;
s_callbacks[msg_hdr->unitId].cb_func = NULL;
s_callbacks[msg_hdr->unitId].cb_ctx = NULL;
}
tsec_plat_release_comms_mutex();
if (cb_func && invoke_cb)
cb_func(cb_ctx, (void *)tsec_msg);
} else if (msg_hdr->unitId < RM_GSP_UNIT_END) {
if (msg_hdr->unitId == RM_GSP_UNIT_HDCP22WIRED) {
plat_print(LVL_DBG, "msg received from hdcp22 unitId 0x%x\n",
msg_hdr->unitId);
} else if (msg_hdr->unitId == RM_GSP_UNIT_REWIND) {
tail = sMsgq_start;
tsec_plat_reg_write(msgq_tail_reg, tail);
head = tsec_plat_reg_read(msgq_head_reg);
plat_print(LVL_DBG, "MSGQ tail rewinded\n");
continue;
} else {
plat_print(LVL_DBG, "msg received from unknown unitId 0x%x\n",
msg_hdr->unitId);
}
/* Invoke the callback and clear it */
if (invoke_cb) {
tsec_plat_acquire_comms_mutex();
cb_func = s_callbacks[msg_hdr->unitId].cb_func;
cb_ctx = s_callbacks[msg_hdr->unitId].cb_ctx;
s_callbacks[msg_hdr->unitId].cb_func = NULL;
s_callbacks[msg_hdr->unitId].cb_ctx = NULL;
tsec_plat_release_comms_mutex();
if (cb_func)
cb_func(cb_ctx, (void *)tsec_msg);
}
} else {
plat_print(LVL_DBG,
"msg received from unknown unitId 0x%x >= RM_GSP_UNIT_END\n",
msg_hdr->unitId);
}
FAIL:
tail += ALIGN(msg_hdr->size, 4);
head = tsec_plat_reg_read(msgq_head_reg);
tsec_plat_reg_write(msgq_tail_reg, tail);
}
EXIT:
return;
}
void tsec_comms_initialize(u64 ipc_co_va, u64 ipc_co_va_size)
{
#ifdef DO_IPC_OVER_GSC_CO
/* Set IPC CO Info before enabling Msg Interrupts from TSEC to CCPLEX */
s_ipc_gscco_base = ipc_co_va;
s_ipc_gscco_size = ipc_co_va_size;
s_ipc_gscco_page_size = (64 * 1024);
/* First Page Reserved */
if (s_ipc_gscco_size > s_ipc_gscco_page_size) {
s_ipc_gscco_page_count = (s_ipc_gscco_size -
s_ipc_gscco_page_size) / s_ipc_gscco_page_size;
} else {
s_ipc_gscco_page_count = 0;
}
s_ipc_gscco_page_base = s_ipc_gscco_page_count ?
s_ipc_gscco_base + s_ipc_gscco_page_size : 0;
s_ipc_gscco_free_page_mask = ~((u64)0);
#else
(void)ipc_co_va;
(void)ipc_co_va_size;
#endif
}
void *tsec_comms_get_gscco_page(u32 page_number, u32 *gscco_offset)
{
#ifdef DO_IPC_OVER_GSC_CO
u8 *page_va;
if (!s_ipc_gscco_page_base || (page_number >= s_ipc_gscco_page_count)) {
plat_print(LVL_ERR,
"%s: No reserved memory for Page %d\n",
__func__, page_number);
return NULL;
}
page_va = (u8 *)s_ipc_gscco_page_base;
page_va += (page_number * s_ipc_gscco_page_size);
if (gscco_offset) {
*gscco_offset =
(u32)((s_ipc_gscco_page_base - s_ipc_gscco_base) +
(page_number * s_ipc_gscco_page_size));
}
return page_va;
#else
plat_print(LVL_ERR, "%s: IPC over GSC-CO not enabled\n", __func__);
return NULL;
#endif
}
EXPORT_SYMBOL_COMMS(tsec_comms_get_gscco_page);
void *tsec_comms_alloc_mem_from_gscco(u32 size_in_bytes, u32 *gscco_offset)
{
#ifdef DO_IPC_OVER_GSC_CO
void *page_va;
u32 page_number;
u64 mask;
/* memory allocated must fit within 1 page */
if (size_in_bytes > s_ipc_gscco_page_size) {
plat_print(LVL_ERR,
"%s: size %d is larger than page size\n",
__func__, size_in_bytes);
return NULL;
}
/* there must be atleast 1 page free */
if (s_ipc_gscco_free_page_mask == 0) {
plat_print(LVL_ERR,
"%s: No free page\n", __func__);
return NULL;
}
/* find a free page */
page_number = 0;
mask = 0x1;
while (!(s_ipc_gscco_free_page_mask & mask)) {
mask <<= 1;
page_number += 1;
}
/* allocate page */
page_va = tsec_comms_get_gscco_page(page_number, gscco_offset);
if (page_va)
s_ipc_gscco_free_page_mask &= ~(mask);
return page_va;
#else
plat_print(LVL_ERR, "%s: IPC over GSC-CO not enabled\n", __func__);
return NULL;
#endif
}
EXPORT_SYMBOL_COMMS(tsec_comms_alloc_mem_from_gscco);
void tsec_comms_free_gscco_mem(void *page_va)
{
#ifdef DO_IPC_OVER_GSC_CO
u64 page_addr = (u64)page_va;
u64 gscco_page_start = s_ipc_gscco_page_base;
u64 gscco_page_end = s_ipc_gscco_page_base +
(s_ipc_gscco_page_count * s_ipc_gscco_page_size);
u64 page_number = (page_addr - gscco_page_start) /
s_ipc_gscco_page_size;
if ((page_addr >= gscco_page_start) &&
(page_addr < gscco_page_end) &&
(!(page_addr % s_ipc_gscco_page_size)))
s_ipc_gscco_free_page_mask |= ((u64)0x1 << page_number);
#endif
}
EXPORT_SYMBOL_COMMS(tsec_comms_free_gscco_mem);
int tsec_comms_send_cmd(void *cmd, u32 queue_id,
callback_func_t cb_func, void *cb_ctx)
{
int i;
int placeholder;
u32 head;
u32 tail;
u8 cmd_size;
u32 cmd_size_aligned;
u32 cmdq_head_reg;
u32 cmdq_tail_reg;
static u32 sCmdq_size = 0x80;
static u32 sCmdq_start;
struct RM_FLCN_QUEUE_HDR *cmd_hdr;
struct RM_FLCN_QUEUE_HDR hdr;
if (!s_init_msg_rcvd) {
plat_print(LVL_ERR, "TSEC RISCV hasn't booted successfully\n");
return -TSEC_ENODEV;
}
if (queue_id != TSEC_CMD_QUEUE_PORT)
return -TSEC_EINVAL;
cmdq_head_reg = tsec_cmdq_head_r(TSEC_CMD_QUEUE_PORT);
cmdq_tail_reg = tsec_cmdq_tail_r(TSEC_CMD_QUEUE_PORT);
for (i = 0; !sCmdq_start && i < TSEC_QUEUE_POLL_COUNT; i++) {
sCmdq_start = tsec_plat_reg_read(cmdq_tail_reg);
if (!sCmdq_start)
tsec_plat_udelay(TSEC_QUEUE_POLL_INTERVAL_US);
}
if (!sCmdq_start) {
plat_print(LVL_WARN, "cmdq_start=0x%x\n", sCmdq_start);
return -TSEC_ENODEV;
}
if (validate_cmd(cmd)) {
plat_print(LVL_DBG, "CMD: %s: %d Invalid command\n",
__func__, __LINE__);
return -TSEC_EINVAL;
}
cmd_hdr = (struct RM_FLCN_QUEUE_HDR *)cmd;
tsec_plat_acquire_comms_mutex();
if (s_callbacks[cmd_hdr->unitId].cb_func) {
tsec_plat_release_comms_mutex();
plat_print(LVL_ERR, "more than 1 outstanding cmd for unit 0x%x\n",
cmd_hdr->unitId);
return -TSEC_EINVAL;
}
tsec_plat_release_comms_mutex();
cmd_size = cmd_hdr->size;
placeholder = ALIGN(cmd_size, 4);
if (placeholder < 0) {
plat_print(LVL_ERR, "Alignment found to be negative\n");
return -TSEC_EINVAL;
}
cmd_size_aligned = (unsigned int) placeholder;
head = tsec_plat_reg_read(cmdq_head_reg);
check_space:
tail = tsec_plat_reg_read(cmdq_tail_reg);
if (head < sCmdq_start || tail < sCmdq_start)
plat_print(LVL_ERR, "head/tail less than sCmdq_start, h=0x%x,t=0x%x\n",
head, tail);
if (UINT_MAX - head < cmd_size_aligned) {
pr_err("addition of head and offset wraps\n");
return -EINVAL;
}
if (tail > head) {
if ((head + cmd_size_aligned) < tail)
goto enqueue;
tsec_plat_udelay(TSEC_QUEUE_POLL_INTERVAL_US);
goto check_space;
} else {
if ((head + cmd_size_aligned) < (sCmdq_start + sCmdq_size)) {
goto enqueue;
} else {
if ((sCmdq_start + cmd_size_aligned) < tail) {
goto rewind;
} else {
tsec_plat_udelay(TSEC_QUEUE_POLL_INTERVAL_US);
goto check_space;
}
}
}
rewind:
hdr.unitId = RM_GSP_UNIT_REWIND;
hdr.size = RM_FLCN_QUEUE_HDR_SIZE;
hdr.ctrlFlags = 0;
hdr.seqNumId = 0;
if (ipc_write(head, (u8 *)&hdr, hdr.size))
return -TSEC_EINVAL;
head = sCmdq_start;
tsec_plat_reg_write(cmdq_head_reg, head);
plat_print(LVL_DBG, "CMDQ: rewind h=%x,t=%x\n", head, tail);
enqueue:
tsec_plat_acquire_comms_mutex();
s_callbacks[cmd_hdr->unitId].cb_func = cb_func;
s_callbacks[cmd_hdr->unitId].cb_ctx = cb_ctx;
tsec_plat_release_comms_mutex();
if (ipc_write(head, (u8 *)cmd, cmd_size)) {
tsec_plat_acquire_comms_mutex();
s_callbacks[cmd_hdr->unitId].cb_func = NULL;
s_callbacks[cmd_hdr->unitId].cb_ctx = NULL;
tsec_plat_release_comms_mutex();
return -TSEC_EINVAL;
}
head += cmd_size_aligned;
tsec_plat_reg_write(cmdq_head_reg, head);
plat_print(LVL_DBG, "Cmd sent to unit 0x%x\n", cmd_hdr->unitId);
return 0;
}
EXPORT_SYMBOL_COMMS(tsec_comms_send_cmd);
int tsec_comms_set_init_cb(callback_func_t cb_func, void *cb_ctx)
{
int err = 0;
tsec_plat_acquire_comms_mutex();
if (s_callbacks[RM_GSP_UNIT_INIT].cb_func) {
plat_print(LVL_ERR, "%s: %d: INIT unit cb_func already set\n",
__func__, __LINE__);
err = -TSEC_EINVAL;
goto FAIL;
}
if (!cb_func) {
plat_print(LVL_ERR, "%s: %d: Init CallBack NULL\n",
__func__, __LINE__);
err = -TSEC_EINVAL;
goto FAIL;
}
s_callbacks[RM_GSP_UNIT_INIT].cb_func = cb_func;
s_callbacks[RM_GSP_UNIT_INIT].cb_ctx = cb_ctx;
if (s_init_msg_rcvd) {
plat_print(LVL_DBG, "Init msg already received invoking callback\n");
tsec_plat_queue_work(invoke_init_cb, NULL);
}
#ifdef DO_IPC_OVER_GSC_CO
else if (tsec_get_boot_flag() == TSEC_BOOT_FLAG_MAGIC) {
plat_print(LVL_DBG, "Doorbell missed tsec booted first, invoke init callback\n");
/* Interrupt missed as tsec booted first
* Explicitly call drain_msg
*/
tsec_plat_release_comms_mutex();
tsec_comms_drain_msg(false);
tsec_plat_acquire_comms_mutex();
/* Init message is drained now, hence queue the work item to invoke init callback*/
tsec_plat_queue_work(invoke_init_cb, NULL);
}
#endif
FAIL:
tsec_plat_release_comms_mutex();
return err;
}
EXPORT_SYMBOL_COMMS(tsec_comms_set_init_cb);
void tsec_comms_clear_init_cb(void)
{
tsec_plat_acquire_comms_mutex();
s_callbacks[RM_GSP_UNIT_INIT].cb_func = NULL;
s_callbacks[RM_GSP_UNIT_INIT].cb_ctx = NULL;
tsec_plat_release_comms_mutex();
}
EXPORT_SYMBOL_COMMS(tsec_comms_clear_init_cb);

View File

@@ -0,0 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSEC_COMMS_H
#define TSEC_COMMS_H
typedef void (*callback_func_t)(void *, void *);
/* -------- Tsec driver internal functions to be called by platform dependent code --------- */
/* @brief: Initialises IPC CO and reserves pages on the same.
*
* usage: To be called when tsec driver is initialised. Must be
* called before any other API is used from the comms lib.
*
* params[in]: ipc_co_va carveout base virtual address
* ipc_co_va_size carveout address space size
*/
void tsec_comms_initialize(u64 ipc_co_va, u64 ipc_co_va_size);
/* @brief: This function will drain all the messages
* from the tsec queue. It is called when interrupt is
* received from TSec. It should be called in threaded
* context and not interrupt context.
*
* usage: To be called when interrupt is received from TSec.
*
* params[in]: invoke_cb indicates whether to invoke callback or not.
*/
void tsec_comms_drain_msg(bool invoke_cb);
/* -------- END -------- */
/* -------- Exported functions which are invoked from DisplayRM. -------- */
/* @brief: Sets callback for init message
*
* usage: Called for setting callback for init msg
*
* params[in]: cb_func function to be called after init msg is
* received
* cb_ctx pointer to callback context
*
* params[out]: return value(0 for success).
*/
int tsec_comms_set_init_cb(callback_func_t cb_func, void *cb_ctx);
/* @brief: Clear callback for init message
*
* usage: When DisplayRM is unloaded it would call this API to
* clear the init callback it previousy set.
*
* params[in]: NONE
* params[out]: NONE
*/
void tsec_comms_clear_init_cb(void);
/* @brief: Send the command upon receiving it by putting it into the
* tsec queue. Also sets appropriate callback to be called when
* response arrives.
*
* usage: Called when sending a command to tsec.
*
* params[in]: cmd pointer to the memory containing the command
* queue_id Id of the queue being used.
* cb_func callback function tobe registered
* cb_ctx pointer to context of the callback function.
*
* params[out]: return value(0 for success)
*/
int tsec_comms_send_cmd(void *cmd, u32 queue_id,
callback_func_t cb_func, void *cb_ctx);
/* @brief: Retrieves a page from the carevout memory
*
* usage: Called to get a particular page from the carveout.
*
* params[in]: page_number page number
* params[in/out]: gscco_offset filled with offset of the allocated co page
* params[out]: return value - ccplex va for the co page or NULL if
* page_number more than number of available pages
*/
void *tsec_comms_get_gscco_page(u32 page_number, u32 *gscco_offset);
/* @brief: Allocates memory from carveout
*
* usage: Called to allocate memory from the carveout.
*
* params[in]: size_in_bytes conveys the required size (must be less
* than page size)
* params[in/out]: gscco_offset filled with offset of the allocated co memory
* params[out]: return value - ccplex va for the co memory or NULL if
* allocation failure
*/
void *tsec_comms_alloc_mem_from_gscco(u32 size_in_bytes, u32 *gscco_offset);
/* @brief: Free the memory previously allocated using
* tsec_comms_alloc_mem_from_gscco
*
* params[in]: page_va previously allocated using
* tsec_comms_alloc_mem_from_gscco
*/
void tsec_comms_free_gscco_mem(void *page_va);
/* -------- END -------- */
#endif /* TSEC_COMMS_H */

View File

@@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSEC_COMMS_CMDS_H
#define TSEC_COMMS_CMDS_H
struct RM_FLCN_QUEUE_HDR {
u8 unitId;
u8 size;
u8 ctrlFlags;
u8 seqNumId;
};
#define RM_FLCN_QUEUE_HDR_SIZE sizeof(struct RM_FLCN_QUEUE_HDR)
#define RM_GSP_UNIT_REWIND (0x00)
#define RM_GSP_UNIT_INIT (0x02)
#define RM_GSP_UNIT_HDCP22WIRED (0x06)
#define RM_GSP_UNIT_END (0x11)
#define RM_GSP_LOG_QUEUE_NUM (2)
struct RM_GSP_INIT_MSG_GSP_INIT {
u8 msgType;
u8 numQueues;
u16 osDebugEntryPoint;
struct {
u32 queueOffset;
u16 queueSize;
u8 queuePhyId;
u8 queueLogId;
} qInfo[RM_GSP_LOG_QUEUE_NUM];
u32 rsvd1;
u8 rsvd2;
u8 status;
};
#endif /* TSEC_COMMS_CMDS_H */

View File

@@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSEC_COMMS_PLAT_H
#define TSEC_COMMS_PLAT_H
#define LVL_INFO (1)
#define LVL_DBG (2)
#define LVL_WARN (3)
#define LVL_ERR (4)
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/device.h>
#include <linux/platform_device.h>
extern struct platform_device *g_tsec;
#define EXPORT_SYMBOL_COMMS(sym) EXPORT_SYMBOL(sym)
#define TSEC_EINVAL EINVAL
#define TSEC_ENODEV ENODEV
#define plat_print(level, fmt, ...) \
do { \
if (level == LVL_INFO) \
dev_info(&g_tsec->dev, fmt, ##__VA_ARGS__); \
else if (level == LVL_DBG) \
dev_dbg(&g_tsec->dev, fmt, ##__VA_ARGS__); \
else if (level == LVL_WARN) \
dev_warn(&g_tsec->dev, fmt, ##__VA_ARGS__); \
else if (level == LVL_ERR) \
dev_err(&g_tsec->dev, fmt, ##__VA_ARGS__); \
} while (0)
#elif __DCE_KERNEL__
// Functions to be implemented by DCE
#else
// Platform not supported
#endif
typedef void (*tsec_plat_work_cb_t)(void *);
/* @brief: API to write a register r with the value specified by v.
*
* usage: Writes a register r with a value specified by v.
*
* params[in]: r register address to write to
* v value to write
*/
void tsec_plat_reg_write(u32 r, u32 v);
/* @brief: API to Read a register specified by address r.
*
* usage: Reads a register specified by address r.
*
* params[in]: r register to read from
*
* params[out]: value that is read
*/
uint32_t tsec_plat_reg_read(u32 r);
/* @brief: Adds a delay of usec micro-seconds.
*
* usage: Add a delay
*
* params[in]: usec delay specified in micro-seconds.
*/
void tsec_plat_udelay(u64 usec);
/* @brief: The Tsec comms unit needs a comms mutex for its internal
* synchronization. Tsec driver provides this mutex. This is an API
* to acquire the mutex.
*
* usage: Called to acquire mutex provided by Tsec driver.
*/
void tsec_plat_acquire_comms_mutex(void);
/* @brief: API to release the mutex provided by TSec driver.
*
* usage: Called to release the mutex acquired by
* tsec_plat_acquire_comms_mutex.
*/
void tsec_plat_release_comms_mutex(void);
/* @brief: A generic API to queue a work item. This work item
* will later be scheduled in work queue's thread/task context.
*
* usage: Used for queueing a work item.
*
* params[in]: cb callback
* ctx context
*/
void tsec_plat_queue_work(tsec_plat_work_cb_t cb, void *ctx);
#endif /* TSEC_COMMS_PLAT_H */

View File

@@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef TSEC_COMMS_REGS_H
#define TSEC_COMMS_REGS_H
static inline u32 tsec_cmdq_head_r(u32 r)
{
/* NV_PSEC_QUEUE_HEAD_0 */
return (0x1c00+(r)*8);
}
static inline u32 tsec_cmdq_tail_r(u32 r)
{
/* NV_PSEC_QUEUE_TAIL_0 */
return (0x1c04+(r)*8);
}
static inline u32 tsec_msgq_head_r(u32 r)
{
/* NV_PSEC_MSGQ_HEAD_0 */
return (0x1c80+(r)*8);
}
static inline u32 tsec_msgq_tail_r(u32 r)
{
/* NV_PSEC_MSGQ_TAIL_0 */
return (0x1c84+(r)*8);
}
static inline u32 tsec_ememc_r(u32 r)
{
/* NV_PSEC_EMEMC_0 */
return (0x1ac0+(r)*8);
}
static inline u32 tsec_ememd_r(u32 r)
{
/* NV_PSEC_EMEMD_0 */
return (0x1ac4+(r)*8);
}
#endif /* TSEC_COMMS_REGS_H */

View File

@@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Tegra TSEC Module Support
*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSEC_LINUX_H
#define TSEC_LINUX_H
#include <linux/types.h> /* for types like u8, u32 etc */
#include <linux/platform_device.h> /* for platform_device */
#include <linux/of_platform.h> /* for of_match_device etc */
#include <linux/slab.h> /* for kzalloc */
#include <linux/delay.h> /* for udelay */
#include <linux/clk.h> /* for clk_prepare_enable */
#include <linux/reset.h> /* for reset_control_reset */
#include <linux/iommu.h> /* for dev_iommu_fwspec_get */
#include <linux/iopoll.h> /* for readl_poll_timeout */
#include <linux/dma-mapping.h> /* for dma_map_page_attrs */
#include <linux/pm.h> /* for dev_pm_ops */
#include <linux/version.h> /* for KERNEL_VERSION */
#include <linux/interrupt.h> /* for enable_irq */
#include <linux/firmware.h> /* for request_firmware */
#if (KERNEL_VERSION(5, 14, 0) <= LINUX_VERSION_CODE)
#include <soc/tegra/mc.h> /* for tegra_mc_get_carveout_info */
#include <linux/libnvdimm.h> /* for arch_invalidate_pmem */
#else
#include <linux/platform/tegra/tegra_mc.h> /* for mc_get_carveout_info */
#include <asm/cacheflush.h> /* for __flush_dcache_area */
#endif
#endif /* TSEC_LINUX_H */

View File

@@ -0,0 +1,266 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tegra TSEC Module Support
*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef TSEC_REGS_H
#define TSEC_REGS_H
#include "tsec_comms/tsec_comms_regs.h"
static inline u32 tsec_thi_int_status_r(void)
{
/* NV_PSEC_THI_INT_STATUS_0 */
return 0x78;
}
static inline u32 tsec_thi_int_status_clr_f(void)
{
return 0x1;
}
static inline u32 tsec_thi_streamid0_r(void)
{
/* NV_PSEC_THI_STREAMID0_0 */
return 0x30;
}
static inline u32 tsec_thi_streamid1_r(void)
{
/* NV_PSEC_THI_STREAMID1_0 */
return 0x34;
}
static inline u32 tsec_priv_blocker_ctrl_cg1_r(void)
{
/* NV_PSEC_PRIV_BLOCKER_CTRL_CG1 */
return 0x1e28;
}
static inline u32 tsec_riscv_cg_r(void)
{
/* NV_PSEC_RISCV_CG */
return 0x2398;
}
static inline u32 tsec_irqsclr_r(void)
{
/* NV_PSEC_FALCON_IRQSCLR_0 */
return 0x1004;
}
static inline u32 tsec_irqsclr_swgen0_set_f(void)
{
return 0x40;
}
static inline u32 tsec_irqstat_r(void)
{
/* NV_PSEC_FALCON_IRQSTAT_0 */
return 0x1008;
}
static inline u32 tsec_irqstat_swgen0(void)
{
return 0x40;
}
static inline u32 tsec_irqstat_swgen1(void)
{
return 0x80;
}
static inline u32 tsec_riscv_irqmset_r(void)
{
/* NV_PSEC_RISCV_IRQMSET_0 */
return 0x2520;
}
static inline u32 tsec_riscv_irqmset_swgen0_set_f(void)
{
return 0x40;
}
static inline u32 tsec_riscv_irqmclr_r(void)
{
/* NV_PSEC_RISCV_IRQMCLR_0 */
return 0x2524;
}
static inline u32 tsec_riscv_irqmclr_swgen0_set_f(void)
{
return 0x40;
}
static inline u32 tsec_riscv_irqmclr_swgen1_set_f(void)
{
return 0x80;
}
static inline u32 tsec_thi_sec_r(void)
{
/* NV_PSEC_THI_THI_SEC_0 */
return 0x38;
}
static inline u32 tsec_thi_sec_chlock_f(void)
{
return 0x100;
}
static inline u32 tsec_riscv_bcr_ctrl_r(void)
{
/* NV_PSEC_RISCV_BCR_CTRL */
return 0x2668;
}
static inline u32 tsec_riscv_bcr_ctrl_core_select_riscv_f(void)
{
return 0x10;
}
static inline u32 tsec_riscv_bcr_dmaaddr_pkcparam_lo_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_PKCPARAM_LO */
return 0x2670;
}
static inline u32 tsec_riscv_bcr_dmaaddr_pkcparam_hi_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_PKCPARAM_HI */
return 0x2674;
}
static inline u32 tsec_riscv_bcr_dmaaddr_fmccode_lo_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_FMCCODE_LO */
return 0x2678;
}
static inline u32 tsec_riscv_bcr_dmaaddr_fmccode_hi_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_FMCCODE_HI */
return 0x267c;
}
static inline u32 tsec_riscv_bcr_dmaaddr_fmcdata_lo_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_FMCDATA_LO */
return 0x2680;
}
static inline u32 tsec_riscv_bcr_dmaaddr_fmcdata_hi_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_FMCDATA_HI */
return 0x2684;
}
static inline u32 tsec_riscv_bcr_dmacfg_r(void)
{
/* NV_PSEC_RISCV_BCR_DMACFG */
return 0x266c;
}
static inline u32 tsec_riscv_bcr_dmacfg_target_local_fb_f(void)
{
return 0x0;
}
static inline u32 tsec_riscv_bcr_dmacfg_lock_locked_f(void)
{
return 0x80000000;
}
static inline u32 tsec_riscv_bcr_dmacfg_sec_r(void)
{
/* NV_PSEC_RISCV_BCR_DMACFG_SEC */
return 0x2694;
}
static inline u32 tsec_riscv_bcr_dmacfg_sec_gscid_f(u32 v)
{
return (v & 0x1f) << 16;
}
static inline u32 tsec_falcon_mailbox0_r(void)
{
/* NV_PSEC_FALCON_MAILBOX0 */
return 0x1040;
}
static inline u32 tsec_falcon_mailbox1_r(void)
{
/* NV_PSEC_FALCON_MAILBOX1 */
return 0x1044;
}
static inline u32 tsec_riscv_cpuctl_r(void)
{
/* NV_PSEC_RISCV_CPUCTL */
return 0x2388;
}
static inline u32 tsec_riscv_cpuctl_startcpu_true_f(void)
{
return 0x1;
}
static inline u32 tsec_riscv_cpuctl_active_stat_v(u32 r)
{
return (r >> 7) & 0x1;
}
static inline u32 tsec_riscv_cpuctl_active_stat_active_v(void)
{
return 0x00000001;
}
static inline u32 tsec_riscv_br_retcode_r(void)
{
/* NV_PSEC_RISCV_BR_RETCODE */
return 0x265c;
}
static inline u32 tsec_riscv_br_retcode_result_v(u32 r)
{
return (r >> 0) & 0x3;
}
static inline u32 tsec_riscv_br_retcode_result_pass_v(void)
{
return 0x00000003;
}
#endif /* TSEC_REGS_H */