tsec: Return SoC specific offsets

Since the offsets for the tsec engine registers have changed with t264,
this change adds the support to use SoC specific register offsets.

Jira TSEC-14

Change-Id: I37afc076809008b0948239f5e9555dfa5c763ba8
Signed-off-by: spatki <spatki@nvidia.com>
Signed-off-by: Mayuresh Kulkarni <mkulkarni@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3360321
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Nikesh Oswal <noswal@nvidia.com>
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
This commit is contained in:
Mayuresh Kulkarni
2023-03-27 16:03:09 +00:00
committed by Jon Hunter
parent 97f7875469
commit 2aa72a2701
11 changed files with 372 additions and 353 deletions

View File

@@ -6,4 +6,4 @@ GCOV_PROFILE := y
LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/video/tegra/tsec
obj-m += tsecriscv.o
tsecriscv-y := tsec_comms/tsec_comms.o tsec_boot.o tsec.o
tsecriscv-y := tsec_comms/tsec_comms.o tsec_t23x.o tsec_t264.o tsec_boot.o tsec.o

View File

@@ -12,6 +12,13 @@
#include "tsec_boot.h"
#include "tsec_regs.h"
/*
* TSEC register offsets
*/
extern struct tsec_reg_offsets_t t23x_reg_offsets;
extern struct tsec_reg_offsets_t t264_reg_offsets;
/*
* TSEC Device Data
*/
@@ -21,6 +28,8 @@ static struct tsec_device_data t23x_tsec_data = {
.riscv_desc_bin = "tegra23x/nvhost_tsec_desc.fw",
.riscv_image_bin = "tegra23x/nvhost_tsec_riscv.fw",
.dma_mask_bits = 39,
.soc = TSEC_ON_T23x,
.tsec_reg_offsets = &t23x_reg_offsets
};
MODULE_FIRMWARE("tegra23x/nvhost_tsec_riscv.fw");
MODULE_FIRMWARE("tegra23x/nvhost_tsec_desc.fw");
@@ -30,6 +39,8 @@ static struct tsec_device_data t239_tsec_data = {
.riscv_desc_bin = "tegra239/nvhost_tsec_desc.fw",
.riscv_image_bin = "tegra239/nvhost_tsec_riscv.fw",
.dma_mask_bits = 39,
.soc = TSEC_ON_T239,
.tsec_reg_offsets = &t23x_reg_offsets
};
static struct tsec_device_data t264_tsec_data = {
@@ -37,6 +48,8 @@ static struct tsec_device_data t264_tsec_data = {
.riscv_desc_bin = "tegra264/nvhost_tsec_desc.fw",
.riscv_image_bin = "tegra264/nvhost_tsec_riscv.fw",
.dma_mask_bits = 48,
.soc = TSEC_ON_T26x,
.tsec_reg_offsets = &t264_reg_offsets
};
/*
@@ -105,7 +118,7 @@ static void tsec_set_streamid_regs(struct device *dev,
{
struct iommu_fwspec *fwspec;
int streamid;
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
/* Get the StreamID value */
fwspec = dev_iommu_fwspec_get(dev);
if (fwspec && fwspec->num_ids)
@@ -114,14 +127,16 @@ static void tsec_set_streamid_regs(struct device *dev,
streamid = 0x7F; /* bypass hwid */
/* Update the StreamID value */
tsec_writel(pdata, tsec_thi_streamid0_r(), streamid);
tsec_writel(pdata, tsec_thi_streamid1_r(), streamid);
tsec_writel(pdata, reg_off->THI_STREAMID0_0, streamid);
tsec_writel(pdata, reg_off->THI_STREAMID1_0, streamid);
}
static void tsec_set_cg_regs(struct tsec_device_data *pdata)
{
tsec_writel(pdata, tsec_priv_blocker_ctrl_cg1_r(), 0x0);
tsec_writel(pdata, tsec_riscv_cg_r(), 0x3);
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
tsec_writel(pdata, reg_off->PRIV_BLOCKER_CTRL_CG1, 0x0);
tsec_writel(pdata, reg_off->RISCV_CG, 0x3);
}
#ifdef CONFIG_DEBUG_FS
@@ -146,14 +161,15 @@ static int tsec_debug_show(struct seq_file *s, void *unused)
{
int info_len = 0;
struct tsec_device_data *pdata = (struct tsec_device_data *)s->private;
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
/* Do not attempt to read DMEM if TSEC is power-down */
if (pdata->power_on) {
#define DMEM_PORT (0)
/* Offset of Log Buffer in DMEM */
u32 log_bug_off = tsec_dmem_logbuf_offset_f();
u32 dmemC = tsec_falcon_dmemc_r(DMEM_PORT);
u32 dmemD = tsec_falcon_dmemd_r(DMEM_PORT);
u32 log_bug_off = reg_off->DMEM_LOGBUF_OFFSET;
u32 dmemC = tsec_falcon_dmemc_r(DMEM_PORT, reg_off->FALCON_DMEMC_0);
u32 dmemD = tsec_falcon_dmemd_r(DMEM_PORT, reg_off->FALCON_DMEMD_0);
struct nvriscv_log_buffer log_buf_info;
/* Auto Increment Read */

View File

@@ -8,6 +8,7 @@
#ifndef TSEC_H
#define TSEC_H
#include "tsec_regs.h"
/*
* TSEC Device Data Structure
*/
@@ -20,6 +21,13 @@
#define TSEC_PKA_CLK_INDEX (2)
#define TSEC_NUM_OF_CLKS (3)
enum tsec_soc {
TSEC_ON_T23x = 0,
TSEC_ON_T239,
TSEC_ON_T26x,
TSEC_ON_INVALID,
};
struct tsec_device_data {
void __iomem *reg_aperture;
struct device_dma_parameters dma_parms;
@@ -54,6 +62,11 @@ struct tsec_device_data {
/* Number of bits for DMA mask - IOVA/PA number of bits */
u8 dma_mask_bits;
/* Which SOC Tsec is running on */
enum tsec_soc soc;
/* store the register offsets */
struct tsec_reg_offsets_t *tsec_reg_offsets;
};
/*

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Tegra TSEC Module Support
*/
@@ -334,6 +334,7 @@ int tsec_finalize_poweron(struct platform_device *dev)
void __iomem *ipc_co_va = NULL;
dma_addr_t ipc_co_iova = 0;
dma_addr_t ipc_co_iova_with_streamid;
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
if (!pdata) {
dev_err(&dev->dev, "no platform data\n");
@@ -409,59 +410,59 @@ int tsec_finalize_poweron(struct platform_device *dev)
}
/* Lock channel so that non-TZ channel request can't write non-THI region */
tsec_writel(pdata, tsec_thi_sec_r(), tsec_thi_sec_chlock_f());
tsec_writel(pdata, reg_off->THI_SEC_0, reg_off->THI_SEC_CHLOCK);
/* Select RISC-V core */
tsec_writel(pdata, tsec_riscv_bcr_ctrl_r(),
tsec_riscv_bcr_ctrl_core_select_riscv_f());
tsec_writel(pdata, reg_off->RISCV_BCR_CTRL,
reg_off->RISCV_BCR_CTRL_CORE_SELECT_RISCV);
/* Program manifest start address */
pa = (img_pa + rv_data->desc.manifest_offset) >> 8;
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_pkcparam_lo_r(),
tsec_writel(pdata, reg_off->RISCV_BCR_DMAADDR_PKCPARAM_LO,
lower_32_bits(pa));
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_pkcparam_hi_r(),
tsec_writel(pdata, reg_off->RISCV_BCR_DMAADDR_PKCPARAM_HI,
upper_32_bits(pa));
/* Program FMC code start address */
pa = (img_pa + rv_data->desc.code_offset) >> 8;
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_fmccode_lo_r(),
tsec_writel(pdata, reg_off->RISCV_BCR_DMAADDR_FMCCODE_LO,
lower_32_bits(pa));
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_fmccode_hi_r(),
tsec_writel(pdata, reg_off->RISCV_BCR_DMAADDR_FMCCODE_HI,
upper_32_bits(pa));
/* Program FMC data start address */
pa = (img_pa + rv_data->desc.data_offset) >> 8;
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_fmcdata_lo_r(),
tsec_writel(pdata, reg_off->RISCV_BCR_DMAADDR_FMCDATA_LO,
lower_32_bits(pa));
tsec_writel(pdata, tsec_riscv_bcr_dmaaddr_fmcdata_hi_r(),
tsec_writel(pdata, reg_off->RISCV_BCR_DMAADDR_FMCDATA_HI,
upper_32_bits(pa));
/* Program DMA config registers */
tsec_writel(pdata, tsec_riscv_bcr_dmacfg_sec_r(),
tsec_riscv_bcr_dmacfg_sec_gscid_f(img_co_gscid));
tsec_writel(pdata, tsec_riscv_bcr_dmacfg_r(),
tsec_riscv_bcr_dmacfg_target_local_fb_f() |
tsec_riscv_bcr_dmacfg_lock_locked_f());
tsec_writel(pdata, reg_off->RISCV_BCR_DMACFG_SEC,
tsec_riscv_bcr_dmacfg_sec_gscid_f(img_co_gscid, reg_off->RISCV_BCR_DMACFG_SEC));
tsec_writel(pdata, reg_off->RISCV_BCR_DMACFG,
reg_off->RISCV_BCR_DMACFG_TARGET_LOCAL_FB |
reg_off->RISCV_BCR_DMACFG_LOCK_LOCKED);
/* Pass the address of ipc carveout via mailbox registers */
ipc_co_iova_with_streamid = (ipc_co_iova | TSEC_RISCV_SMMU_STREAMID1);
tsec_writel(pdata, tsec_falcon_mailbox0_r(),
tsec_writel(pdata, reg_off->FALCON_MAILBOX0,
lower_32_bits((unsigned long long)ipc_co_iova_with_streamid));
tsec_writel(pdata, tsec_falcon_mailbox1_r(),
tsec_writel(pdata, reg_off->FALCON_MAILBOX1,
upper_32_bits((unsigned long long)ipc_co_iova_with_streamid));
/* Kick start RISC-V and let BR take over */
tsec_writel(pdata, tsec_riscv_cpuctl_r(),
tsec_riscv_cpuctl_startcpu_true_f());
tsec_writel(pdata, reg_off->RISCV_CPUCTL,
reg_off->RISCV_CPUCTL_STARTCPU_TRUE);
cpuctl_addr = pdata->reg_aperture + tsec_riscv_cpuctl_r();
retcode_addr = pdata->reg_aperture + tsec_riscv_br_retcode_r();
mailbox0_addr = pdata->reg_aperture + tsec_falcon_mailbox0_r();
cpuctl_addr = pdata->reg_aperture + reg_off->RISCV_CPUCTL;
retcode_addr = pdata->reg_aperture + reg_off->RISCV_BR_RETCODE;
mailbox0_addr = pdata->reg_aperture + reg_off->FALCON_MAILBOX0;
/* Check BR return code */
err = readl_poll_timeout(retcode_addr, val,
(tsec_riscv_br_retcode_result_v(val) ==
tsec_riscv_br_retcode_result_pass_v()),
(tsec_riscv_br_retcode_result_v(val, reg_off->RISCV_BR_RETCODE_RESULT) ==
reg_off->RISCV_BR_RETCODE_RESULT_PASS),
RISCV_IDLE_CHECK_PERIOD,
RISCV_IDLE_TIMEOUT_DEFAULT);
if (err) {
@@ -471,8 +472,8 @@ int tsec_finalize_poweron(struct platform_device *dev)
/* Check cpuctl active state */
err = readl_poll_timeout(cpuctl_addr, val,
(tsec_riscv_cpuctl_active_stat_v(val) ==
tsec_riscv_cpuctl_active_stat_active_v()),
(tsec_riscv_cpuctl_active_stat_v(val, reg_off->RISCV_CPUCTL_ACTIVE_STAT) ==
reg_off->RISCV_CPUCTL_ACTIVE_STAT_ACTIVE),
RISCV_IDLE_CHECK_PERIOD,
RISCV_IDLE_TIMEOUT_DEFAULT);
if (err) {
@@ -500,16 +501,18 @@ int tsec_finalize_poweron(struct platform_device *dev)
* Arm driver code.
* nvriscv/drivers/src/debug/debug.c:164: irqFireSwGen(SYS_INTR_SWGEN1)
*/
tsec_writel(pdata, tsec_riscv_irqmclr_r(), tsec_riscv_irqmclr_swgen1_set_f());
tsec_writel(pdata, reg_off->RISCV_IRQMCLR_0,
reg_off->RISCV_IRQMCLR_SWGEN1_SET);
/* initialise the comms library before enabling msg interrupt */
tsec_comms_initialize((__force u64)ipc_co_va, ipc_co_info.size);
/* enable message interrupt from tsec to ccplex */
enable_irq(pdata->irq);
/* Booted-up successfully */
dev_info(&dev->dev, "RISC-V boot success\n");
#if CMD_INTERFACE_TEST
pr_debug("cmd_size=%d, cmdDataSize=%d\n", cmd_size, cmdDataSize);
msleep(3000);
@@ -575,30 +578,31 @@ static irqreturn_t tsec_irq_top_half(int irq, void *dev_id)
struct tsec_device_data *pdata = platform_get_drvdata(pdev);
irqreturn_t irq_ret_val = IRQ_HANDLED;
u32 irq_status;
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
spin_lock_irqsave(&pdata->mirq_lock, flags);
/* Read the interrupt status */
irq_status = tsec_readl(pdata, tsec_irqstat_r());
irq_status = tsec_readl(pdata, reg_off->RISCV_IRQSTAT_0);
/* Clear the interrupt */
tsec_writel(pdata, tsec_thi_int_status_r(),
tsec_thi_int_status_clr_f());
tsec_writel(pdata, reg_off->THI_INT_STATUS_0,
reg_off->THI_INT_STATUS_CLR_0);
/* Wakeup threaded handler for SWGEN0 Irq */
if (irq_status & tsec_irqstat_swgen0()) {
if (irq_status & reg_off->RISCV_IRQSTAT_SWGEN0) {
/* Clear SWGEN0 Interrupt */
tsec_writel(pdata, tsec_irqsclr_r(),
tsec_irqsclr_swgen0_set_f());
tsec_writel(pdata, reg_off->RISCV_IRQSCLR_0,
reg_off->RISCV_IRQSCLR_SWGEN0_SET);
/* Mask the interrupt.
* Clear RISCV Mask for SWGEN0, so that no more SWGEN0
* interrupts will be routed to CCPLEX, it will be re-enabled
* by the bottom half
*/
tsec_writel(pdata, tsec_riscv_irqmclr_r(),
tsec_riscv_irqmclr_swgen0_set_f());
tsec_writel(pdata, reg_off->RISCV_IRQMCLR_0,
reg_off->RISCV_IRQMCLR_SWGEN0_SET);
irq_ret_val = IRQ_WAKE_THREAD;
irq_status &= ~(tsec_irqstat_swgen0());
irq_status &= ~(reg_off->RISCV_IRQSTAT_SWGEN0);
}
/* RISCV FW is generating SWGEN1 when it logs something
@@ -608,10 +612,10 @@ static irqreturn_t tsec_irq_top_half(int irq, void *dev_id)
* hence we just mask out SWGEN1 interrupt here so that it
* is not received any further
*/
if (irq_status & tsec_irqstat_swgen1()) {
tsec_writel(pdata, tsec_riscv_irqmclr_r(),
tsec_riscv_irqmclr_swgen1_set_f());
irq_status &= ~(tsec_irqstat_swgen1());
if (irq_status & reg_off->RISCV_IRQSTAT_SWGEN1) {
tsec_writel(pdata, reg_off->RISCV_IRQMCLR_0,
reg_off->RISCV_IRQMCLR_SWGEN1_SET);
irq_status &= ~(reg_off->RISCV_IRQSTAT_SWGEN1);
}
spin_unlock_irqrestore(&pdata->mirq_lock, flags);
@@ -621,6 +625,11 @@ static irqreturn_t tsec_irq_top_half(int irq, void *dev_id)
static irqreturn_t tsec_irq_bottom_half(int irq, void *args)
{
struct tsec_device_data *pdata;
struct tsec_reg_offsets_t *reg_off;
pdata = platform_get_drvdata(g_tsec);
reg_off = pdata->tsec_reg_offsets;
/* Call into the comms lib API to drain the message */
tsec_comms_drain_msg(true);
@@ -629,8 +638,8 @@ static irqreturn_t tsec_irq_bottom_half(int irq, void *args)
* and if it is pending the CCPLEX will be interrupted
* by this the top half
*/
tsec_writel(platform_get_drvdata(g_tsec),
tsec_riscv_irqmset_r(), tsec_riscv_irqmset_swgen0_set_f());
tsec_writel(pdata, reg_off->RISCV_IRQMSET_0,
reg_off->RISCV_IRQMSET_SWGEN0_SET);
return IRQ_HANDLED;
}
@@ -717,3 +726,81 @@ int tsec_kickoff_boot(struct platform_device *pdev)
return 0;
}
u32 tsec_plat_cmdq_head_r(u32 r)
{
u32 offset = U32_MAX;
struct tsec_device_data *pdata = platform_get_drvdata(g_tsec);
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
if (reg_off != NULL) {
offset = reg_off->QUEUE_HEAD_0 + (r) * 8;
}
return offset;
}
u32 tsec_plat_cmdq_tail_r(u32 r)
{
u32 offset = U32_MAX;
struct tsec_device_data *pdata = platform_get_drvdata(g_tsec);
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
if (reg_off != NULL) {
offset = reg_off->QUEUE_TAIL_0 + (r) * 8;
}
return offset;
}
u32 tsec_plat_msgq_head_r(u32 r)
{
u32 offset = U32_MAX;
struct tsec_device_data *pdata = platform_get_drvdata(g_tsec);
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
if (reg_off != NULL) {
offset = reg_off->MSGQ_HEAD_0 + (r) * 8;
}
return offset;
}
u32 tsec_plat_msgq_tail_r(u32 r)
{
u32 offset = U32_MAX;
struct tsec_device_data *pdata = platform_get_drvdata(g_tsec);
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
if (reg_off != NULL) {
offset = reg_off->MSGQ_TAIL_0 + (r) * 8;
}
return offset;
}
u32 tsec_plat_ememc_r(u32 r)
{
u32 offset = U32_MAX;
struct tsec_device_data *pdata = platform_get_drvdata(g_tsec);
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
if (reg_off != NULL) {
offset = reg_off->EMEMC_0 + (r) * 8;
}
return offset;
}
u32 tsec_plat_ememd_r(u32 r)
{
u32 offset = U32_MAX;
struct tsec_device_data *pdata = platform_get_drvdata(g_tsec);
struct tsec_reg_offsets_t *reg_off = pdata->tsec_reg_offsets;
if (reg_off != NULL) {
offset = reg_off->EMEMD_0 + (r) * 8;
}
return offset;
}

View File

@@ -7,7 +7,6 @@
#include "tsec_comms_plat.h"
#include "tsec_comms.h"
#include "tsec_comms_regs.h"
#include "tsec_comms_cmds.h"
#include "tsec_cmds.h"
@@ -222,8 +221,8 @@ static int ipc_txfr(u32 offset, u8 *buff, u32 size, bool read_msg)
return 0;
#else
u32 *buff32 = (u32 *)buff;
u32 ememc_offset = tsec_ememc_r(TSEC_EMEM_PORT);
u32 ememd_offset = tsec_ememd_r(TSEC_EMEM_PORT);
u32 ememc_offset = tsec_plat_ememc_r(TSEC_EMEM_PORT);
u32 ememd_offset = tsec_plat_ememd_r(TSEC_EMEM_PORT);
u32 num_words, num_bytes, reg32, i;
if (offset < TSEC_QUEUE_OFFSET_MAGIC) {
@@ -370,8 +369,8 @@ void tsec_comms_drain_msg(bool invoke_cb)
u8 tsec_msg[TSEC_MAX_MSG_SIZE];
bool shutdown_tsec = false;
msgq_head_reg = tsec_msgq_head_r(TSEC_MSG_QUEUE_PORT);
msgq_tail_reg = tsec_msgq_tail_r(TSEC_MSG_QUEUE_PORT);
msgq_head_reg = tsec_plat_msgq_head_r(TSEC_MSG_QUEUE_PORT);
msgq_tail_reg = tsec_plat_msgq_tail_r(TSEC_MSG_QUEUE_PORT);
msg_hdr = (struct RM_FLCN_QUEUE_HDR *)(tsec_msg);
init_msg_body = (struct RM_GSP_INIT_MSG_GSP_INIT *)
(tsec_msg + RM_FLCN_QUEUE_HDR_SIZE);
@@ -665,8 +664,8 @@ int tsec_comms_send_cmd(void *cmd, u32 queue_id,
sCmdq_start = 0x0;
}
cmdq_head_reg = tsec_cmdq_head_r(TSEC_CMD_QUEUE_PORT);
cmdq_tail_reg = tsec_cmdq_tail_r(TSEC_CMD_QUEUE_PORT);
cmdq_head_reg = tsec_plat_cmdq_head_r(TSEC_CMD_QUEUE_PORT);
cmdq_tail_reg = tsec_plat_cmdq_tail_r(TSEC_CMD_QUEUE_PORT);
for (i = 0; !sCmdq_start && i < TSEC_QUEUE_POLL_COUNT; i++) {
sCmdq_start = tsec_plat_reg_read(cmdq_tail_reg);

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Tegra TSEC Module Support
*/
@@ -130,4 +130,11 @@ static inline void tsec_plat_poweroff(void)
tsec_poweroff(&g_tsec->dev);
}
u32 tsec_plat_cmdq_head_r(u32 r);
u32 tsec_plat_cmdq_tail_r(u32 r);
u32 tsec_plat_msgq_head_r(u32 r);
u32 tsec_plat_msgq_tail_r(u32 r);
u32 tsec_plat_ememc_r(u32 r);
u32 tsec_plat_ememd_r(u32 r);
#endif /* TSEC_COMMS_PLAT_H */

View File

@@ -1,82 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*
* Tegra TSEC Module Support
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
*
* <x>_o(void) : Returns the offset for element <x>.
*
* <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
*
* <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
*
* <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
* and masked to place it at field <y> of register <x>. This value
* can be |'d with others to produce a full register value for
* register <x>.
*
* <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
* value can be ~'d and then &'d to clear the value of field <y> for
* register <x>.
*
* <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
* to place it at field <y> of register <x>. This value can be |'d
* with others to produce a full register value for <x>.
*
* <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
* <x> value 'r' after being shifted to place its LSB at bit 0.
* This value is suitable for direct comparison with other unshifted
* values appropriate for use in field <y> of register <x>.
*
* <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
* field <y> of register <x>. This value is suitable for direct
* comparison with unshifted values appropriate for use in field <y>
* of register <x>.
*/
#ifndef TSEC_COMMS_REGS_H
#define TSEC_COMMS_REGS_H
static inline u32 tsec_cmdq_head_r(u32 r)
{
/* NV_PSEC_QUEUE_HEAD_0 */
return (0x1c00+(r)*8);
}
static inline u32 tsec_cmdq_tail_r(u32 r)
{
/* NV_PSEC_QUEUE_TAIL_0 */
return (0x1c04+(r)*8);
}
static inline u32 tsec_msgq_head_r(u32 r)
{
/* NV_PSEC_MSGQ_HEAD_0 */
return (0x1c80+(r)*8);
}
static inline u32 tsec_msgq_tail_r(u32 r)
{
/* NV_PSEC_MSGQ_TAIL_0 */
return (0x1c84+(r)*8);
}
static inline u32 tsec_ememc_r(u32 r)
{
/* NV_PSEC_EMEMC_0 */
return (0x1ac0+(r)*8);
}
static inline u32 tsec_ememd_r(u32 r)
{
/* NV_PSEC_EMEMD_0 */
return (0x1ac4+(r)*8);
}
#endif /* TSEC_COMMS_REGS_H */

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Tegra TSEC Module Support
*/
@@ -33,5 +33,6 @@
#include <linux/debugfs.h> /* for debugfs APIs */
#endif
#include <linux/sizes.h> /* for SZ_* size macros */
#include <vdso/bits.h> /* for BIT(x) macro */
#endif /* TSEC_LINUX_H */

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Tegra TSEC Module Support
*/
@@ -43,229 +43,85 @@
#ifndef TSEC_REGS_H
#define TSEC_REGS_H
#include "tsec_comms/tsec_comms_regs.h"
#include <linux/types.h>
static inline u32 tsec_thi_int_status_r(void)
struct tsec_reg_offsets_t {
u32 QUEUE_HEAD_0;
u32 QUEUE_TAIL_0;
u32 MSGQ_HEAD_0;
u32 MSGQ_TAIL_0;
u32 EMEMC_0;
u32 EMEMD_0;
u32 THI_INT_STATUS_0;
u32 THI_INT_STATUS_CLR_0;
u32 THI_STREAMID0_0;
u32 THI_STREAMID1_0;
u32 PRIV_BLOCKER_CTRL_CG1;
u32 RISCV_CG;
u32 RISCV_IRQSCLR_0;
u32 RISCV_IRQSTAT_0;
u32 RISCV_IRQMSET_0;
u32 RISCV_IRQMCLR_0;
u32 RISCV_IRQSCLR_SWGEN0_SET;
u32 RISCV_IRQMCLR_SWGEN0_SET;
u32 RISCV_IRQMCLR_SWGEN1_SET;
u32 RISCV_IRQSTAT_SWGEN0;
u32 RISCV_IRQSTAT_SWGEN1;
u32 RISCV_IRQMSET_SWGEN0_SET;
u32 THI_SEC_0;
u32 THI_SEC_CHLOCK;
u32 RISCV_BCR_CTRL;
u32 RISCV_BCR_CTRL_CORE_SELECT_RISCV;
u32 RISCV_BCR_DMAADDR_PKCPARAM_LO;
u32 RISCV_BCR_DMAADDR_PKCPARAM_HI;
u32 RISCV_BCR_DMAADDR_FMCCODE_LO;
u32 RISCV_BCR_DMAADDR_FMCCODE_HI;
u32 RISCV_BCR_DMAADDR_FMCDATA_LO;
u32 RISCV_BCR_DMAADDR_FMCDATA_HI;
u32 RISCV_BCR_DMACFG;
u32 RISCV_BCR_DMACFG_TARGET_LOCAL_FB;
u32 RISCV_BCR_DMACFG_LOCK_LOCKED;
u32 RISCV_BCR_DMACFG_SEC;
u32 RISCV_BCR_DMACFG_SEC_GSCID;
u32 FALCON_MAILBOX0;
u32 FALCON_MAILBOX1;
u32 RISCV_CPUCTL;
u32 RISCV_CPUCTL_STARTCPU_TRUE;
u32 RISCV_CPUCTL_ACTIVE_STAT;
u32 RISCV_CPUCTL_ACTIVE_STAT_ACTIVE;
u32 RISCV_BR_RETCODE;
u32 RISCV_BR_RETCODE_RESULT;
u32 RISCV_BR_RETCODE_RESULT_PASS;
u32 FALCON_DMEMC_0;
u32 FALCON_DMEMD_0;
u32 DMEM_LOGBUF_OFFSET;
};
static inline u32 tsec_riscv_bcr_dmacfg_sec_gscid_f(u32 v, u32 offset)
{
/* NV_PSEC_THI_INT_STATUS_0 */
return 0x78;
}
static inline u32 tsec_thi_int_status_clr_f(void)
{
return 0x1;
return ((v & offset) << 16);
}
static inline u32 tsec_thi_streamid0_r(void)
static inline u32 tsec_riscv_cpuctl_active_stat_v(u32 r, u32 offset)
{
/* NV_PSEC_THI_STREAMID0_0 */
return 0x30;
return ((r >> offset) & 0x1);
}
static inline u32 tsec_thi_streamid1_r(void)
static inline u32 tsec_riscv_br_retcode_result_v(u32 r, u32 offset)
{
/* NV_PSEC_THI_STREAMID1_0 */
return 0x34;
return ((r >> offset) & 0x3);
}
static inline u32 tsec_priv_blocker_ctrl_cg1_r(void)
static inline u32 tsec_falcon_dmemc_r(u32 r, u32 offset)
{
/* NV_PSEC_PRIV_BLOCKER_CTRL_CG1 */
return 0x1e28;
return (offset + (r) * 8);
}
static inline u32 tsec_riscv_cg_r(void)
static inline u32 tsec_falcon_dmemd_r(u32 r, u32 offset)
{
/* NV_PSEC_RISCV_CG */
return 0x2398;
}
static inline u32 tsec_irqsclr_r(void)
{
/* NV_PSEC_FALCON_IRQSCLR_0 */
return 0x1004;
}
static inline u32 tsec_irqsclr_swgen0_set_f(void)
{
return 0x40;
}
static inline u32 tsec_irqstat_r(void)
{
/* NV_PSEC_FALCON_IRQSTAT_0 */
return 0x1008;
}
static inline u32 tsec_irqstat_swgen0(void)
{
return 0x40;
}
static inline u32 tsec_irqstat_swgen1(void)
{
return 0x80;
}
static inline u32 tsec_riscv_irqmset_r(void)
{
/* NV_PSEC_RISCV_IRQMSET_0 */
return 0x2520;
}
static inline u32 tsec_riscv_irqmset_swgen0_set_f(void)
{
return 0x40;
}
static inline u32 tsec_riscv_irqmclr_r(void)
{
/* NV_PSEC_RISCV_IRQMCLR_0 */
return 0x2524;
}
static inline u32 tsec_riscv_irqmclr_swgen0_set_f(void)
{
return 0x40;
}
static inline u32 tsec_riscv_irqmclr_swgen1_set_f(void)
{
return 0x80;
}
static inline u32 tsec_thi_sec_r(void)
{
/* NV_PSEC_THI_THI_SEC_0 */
return 0x38;
}
static inline u32 tsec_thi_sec_chlock_f(void)
{
return 0x100;
}
static inline u32 tsec_riscv_bcr_ctrl_r(void)
{
/* NV_PSEC_RISCV_BCR_CTRL */
return 0x2668;
}
static inline u32 tsec_riscv_bcr_ctrl_core_select_riscv_f(void)
{
return 0x10;
}
static inline u32 tsec_riscv_bcr_dmaaddr_pkcparam_lo_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_PKCPARAM_LO */
return 0x2670;
}
static inline u32 tsec_riscv_bcr_dmaaddr_pkcparam_hi_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_PKCPARAM_HI */
return 0x2674;
}
static inline u32 tsec_riscv_bcr_dmaaddr_fmccode_lo_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_FMCCODE_LO */
return 0x2678;
}
static inline u32 tsec_riscv_bcr_dmaaddr_fmccode_hi_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_FMCCODE_HI */
return 0x267c;
}
static inline u32 tsec_riscv_bcr_dmaaddr_fmcdata_lo_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_FMCDATA_LO */
return 0x2680;
}
static inline u32 tsec_riscv_bcr_dmaaddr_fmcdata_hi_r(void)
{
/* NV_PSEC_RISCV_BCR_DMAADDR_FMCDATA_HI */
return 0x2684;
}
static inline u32 tsec_riscv_bcr_dmacfg_r(void)
{
/* NV_PSEC_RISCV_BCR_DMACFG */
return 0x266c;
}
static inline u32 tsec_riscv_bcr_dmacfg_target_local_fb_f(void)
{
return 0x0;
}
static inline u32 tsec_riscv_bcr_dmacfg_lock_locked_f(void)
{
return 0x80000000;
}
static inline u32 tsec_riscv_bcr_dmacfg_sec_r(void)
{
/* NV_PSEC_RISCV_BCR_DMACFG_SEC */
return 0x2694;
}
static inline u32 tsec_riscv_bcr_dmacfg_sec_gscid_f(u32 v)
{
return (v & 0x1f) << 16;
}
static inline u32 tsec_falcon_mailbox0_r(void)
{
/* NV_PSEC_FALCON_MAILBOX0 */
return 0x1040;
}
static inline u32 tsec_falcon_mailbox1_r(void)
{
/* NV_PSEC_FALCON_MAILBOX1 */
return 0x1044;
}
static inline u32 tsec_riscv_cpuctl_r(void)
{
/* NV_PSEC_RISCV_CPUCTL */
return 0x2388;
}
static inline u32 tsec_riscv_cpuctl_startcpu_true_f(void)
{
return 0x1;
}
static inline u32 tsec_riscv_cpuctl_active_stat_v(u32 r)
{
return (r >> 7) & 0x1;
}
static inline u32 tsec_riscv_cpuctl_active_stat_active_v(void)
{
return 0x00000001;
}
static inline u32 tsec_riscv_br_retcode_r(void)
{
/* NV_PSEC_RISCV_BR_RETCODE */
return 0x265c;
}
static inline u32 tsec_riscv_br_retcode_result_v(u32 r)
{
return (r >> 0) & 0x3;
}
static inline u32 tsec_riscv_br_retcode_result_pass_v(void)
{
return 0x00000003;
}
static inline u32 tsec_falcon_dmemc_r(u32 r)
{
/* NV_PSEC_FALCON_DMEMC_0 */
return (0x11c0 + (r) * 8);
}
static inline u32 tsec_falcon_dmemd_r(u32 r)
{
/* NV_PSEC_FALCON_DMEMD_0 */
return (0x11c4 + (r) * 8);
}
static inline u32 tsec_dmem_logbuf_offset_f(void)
{
return 0x14000;
return (offset + (r) * 8);
}
#endif /* TSEC_REGS_H */

View File

@@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Tegra TSEC Module Support
*/
#include <vdso/bits.h> /* for BIT(x) macro */
#include "tsec_regs.h"
struct tsec_reg_offsets_t t23x_reg_offsets = {
.QUEUE_HEAD_0 = 0x1c00,
.QUEUE_TAIL_0 = 0x1c04,
.MSGQ_HEAD_0 = 0x1c80,
.MSGQ_TAIL_0 = 0x1c84,
.EMEMC_0 = 0x1ac0,
.EMEMD_0 = 0x1ac4,
.THI_INT_STATUS_0 = 0x78,
.THI_INT_STATUS_CLR_0 = BIT(0),
.THI_STREAMID0_0 = 0x30,
.THI_STREAMID1_0 = 0x34,
.PRIV_BLOCKER_CTRL_CG1 = 0x1e28,
.RISCV_CG = 0x2398,
.RISCV_IRQSCLR_0 = 0x1004,
.RISCV_IRQSTAT_0 = 0x1008,
.RISCV_IRQMSET_0 = 0x2520,
.RISCV_IRQMCLR_0 = 0x2524,
.RISCV_IRQSCLR_SWGEN0_SET = BIT(6),
.RISCV_IRQSTAT_SWGEN0 = BIT(6),
.RISCV_IRQSTAT_SWGEN1 = BIT(7),
.RISCV_IRQMCLR_SWGEN0_SET = BIT(6),
.RISCV_IRQMCLR_SWGEN1_SET = BIT(7),
.RISCV_IRQMSET_SWGEN0_SET = BIT(6),
.THI_SEC_0 = 0x38,
.THI_SEC_CHLOCK = BIT(8),
.RISCV_BCR_CTRL = 0x2668,
.RISCV_BCR_CTRL_CORE_SELECT_RISCV = BIT(4),
.RISCV_BCR_DMAADDR_PKCPARAM_LO = 0x2670,
.RISCV_BCR_DMAADDR_PKCPARAM_HI = 0x2674,
.RISCV_BCR_DMAADDR_FMCCODE_LO = 0x2678,
.RISCV_BCR_DMAADDR_FMCCODE_HI = 0x267c,
.RISCV_BCR_DMAADDR_FMCDATA_LO = 0x2680,
.RISCV_BCR_DMAADDR_FMCDATA_HI = 0x2684,
.RISCV_BCR_DMACFG = 0x266c,
.RISCV_BCR_DMACFG_TARGET_LOCAL_FB = 0x0,
.RISCV_BCR_DMACFG_LOCK_LOCKED = BIT(31),
.RISCV_BCR_DMACFG_SEC = 0x2694,
.RISCV_BCR_DMACFG_SEC_GSCID = 0x1f,
.FALCON_MAILBOX0 = 0x1040,
.FALCON_MAILBOX1 = 0x1044,
.RISCV_CPUCTL = 0x2388,
.RISCV_CPUCTL_STARTCPU_TRUE = BIT(0),
.RISCV_CPUCTL_ACTIVE_STAT = 7,
.RISCV_CPUCTL_ACTIVE_STAT_ACTIVE = 1,
.RISCV_BR_RETCODE = 0x265c,
.RISCV_BR_RETCODE_RESULT = 0,
.RISCV_BR_RETCODE_RESULT_PASS = 0x3,
.FALCON_DMEMC_0 = 0x11c0,
.FALCON_DMEMD_0 = 0x11c4,
.DMEM_LOGBUF_OFFSET = 0x14000,
};

View File

@@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Tegra TSEC Module Support
*/
#include <vdso/bits.h> /* for BIT(x) macro */
#include "tsec_regs.h"
struct tsec_reg_offsets_t t264_reg_offsets = {
.QUEUE_HEAD_0 = 0x4c00,
.QUEUE_TAIL_0 = 0x4c04,
.MSGQ_HEAD_0 = 0x4c80,
.MSGQ_TAIL_0 = 0x4c84,
.EMEMC_0 = 0x4ac0,
.EMEMD_0 = 0x4ac4,
.THI_INT_STATUS_0 = 0x78,
.THI_INT_STATUS_CLR_0 = BIT(0),
.THI_STREAMID0_0 = 0x30,
.THI_STREAMID1_0 = 0x34,
.PRIV_BLOCKER_CTRL_CG1 = 0x1e28,
.RISCV_CG = 0x2398,
.RISCV_IRQSCLR_0 = 0x1004,
.RISCV_IRQSTAT_0 = 0x1008,
.RISCV_IRQMSET_0 = 0x2520,
.RISCV_IRQMCLR_0 = 0x2524,
.RISCV_IRQSCLR_SWGEN0_SET = BIT(6),
.RISCV_IRQSTAT_SWGEN0 = BIT(6),
.RISCV_IRQSTAT_SWGEN1 = BIT(7),
.RISCV_IRQMCLR_SWGEN0_SET = BIT(6),
.RISCV_IRQMCLR_SWGEN1_SET = BIT(7),
.RISCV_IRQMSET_SWGEN0_SET = BIT(6),
.THI_SEC_0 = 0x38,
.THI_SEC_CHLOCK = BIT(8),
.RISCV_BCR_CTRL = 0x2668,
.RISCV_BCR_CTRL_CORE_SELECT_RISCV = BIT(4),
.RISCV_BCR_DMAADDR_PKCPARAM_LO = 0x2670,
.RISCV_BCR_DMAADDR_PKCPARAM_HI = 0x2674,
.RISCV_BCR_DMAADDR_FMCCODE_LO = 0x2678,
.RISCV_BCR_DMAADDR_FMCCODE_HI = 0x267c,
.RISCV_BCR_DMAADDR_FMCDATA_LO = 0x2680,
.RISCV_BCR_DMAADDR_FMCDATA_HI = 0x2684,
.RISCV_BCR_DMACFG = 0x266c,
.RISCV_BCR_DMACFG_TARGET_LOCAL_FB = 0x0,
.RISCV_BCR_DMACFG_LOCK_LOCKED = BIT(31),
.RISCV_BCR_DMACFG_SEC = 0x2694,
.RISCV_BCR_DMACFG_SEC_GSCID = 0x1f,
.FALCON_MAILBOX0 = 0x1040,
.FALCON_MAILBOX1 = 0x1044,
.RISCV_CPUCTL = 0x2388,
.RISCV_CPUCTL_STARTCPU_TRUE = BIT(0),
.RISCV_CPUCTL_ACTIVE_STAT = 7,
.RISCV_CPUCTL_ACTIVE_STAT_ACTIVE = 1,
.RISCV_BR_RETCODE = 0x265c,
.RISCV_BR_RETCODE_RESULT = 0,
.RISCV_BR_RETCODE_RESULT_PASS = 0x3,
.FALCON_DMEMC_0 = 0x11c0,
.FALCON_DMEMD_0 = 0x11c4,
.DMEM_LOGBUF_OFFSET = 0x14000,
};