diff --git a/drivers/video/tegra/tsec/tsec.c b/drivers/video/tegra/tsec/tsec.c index 1f1218c5..21532d4a 100644 --- a/drivers/video/tegra/tsec/tsec.c +++ b/drivers/video/tegra/tsec/tsec.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Tegra TSEC Module Support */ @@ -75,10 +75,18 @@ static void tsec_disable_clk(struct tsec_device_data *pdata) static void tsec_deassert_reset(struct tsec_device_data *pdata) { reset_control_acquire(pdata->reset_control); + /* Does assert and then deassert */ reset_control_reset(pdata->reset_control); reset_control_release(pdata->reset_control); } +static void tsec_assert_reset(struct tsec_device_data *pdata) +{ + reset_control_acquire(pdata->reset_control); + reset_control_assert(pdata->reset_control); + reset_control_release(pdata->reset_control); +} + static void tsec_set_streamid_regs(struct device *dev, struct tsec_device_data *pdata) { @@ -275,8 +283,9 @@ int tsec_poweroff(struct device *dev) pdata = dev_get_drvdata(dev); if (pdata->power_on) { - tsec_prepare_poweroff(to_platform_device(dev)); + tsec_assert_reset(pdata); tsec_disable_clk(pdata); + tsec_prepare_poweroff(to_platform_device(dev)); pdata->power_on = false; } diff --git a/drivers/video/tegra/tsec/tsec_boot.c b/drivers/video/tegra/tsec/tsec_boot.c index ff5d708d..1171aebf 100644 --- a/drivers/video/tegra/tsec/tsec_boot.c +++ b/drivers/video/tegra/tsec/tsec_boot.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Tegra TSEC Module Support */ @@ -375,29 +375,38 @@ int tsec_finalize_poweron(struct platform_device *dev) goto clean_up; } dev_dbg(&dev->dev, "IPCCO base=0x%llx size=0x%llx\n", ipc_co_info.base, ipc_co_info.size); - ipc_co_va = ioremap(ipc_co_info.base, ipc_co_info.size); - if (!ipc_co_va) { - dev_err(&dev->dev, "IPC Carveout memory VA mapping failed"); - err = -ENOMEM; - goto clean_up; - } - dev_dbg(&dev->dev, "IPCCO va=0x%llx pa=0x%llx\n", - (__force phys_addr_t)(ipc_co_va), page_to_phys(vmalloc_to_page(ipc_co_va))); + + if (!(rv_data->ipc_mem_initialised)) { + ipc_co_va = ioremap(ipc_co_info.base, ipc_co_info.size); + if (!ipc_co_va) { + dev_err(&dev->dev, "IPC Carveout memory VA mapping failed"); + err = -ENOMEM; + goto clean_up; + } + dev_dbg(&dev->dev, "IPCCO va=0x%llx pa=0x%llx\n", + (__force phys_addr_t)(ipc_co_va), page_to_phys(vmalloc_to_page(ipc_co_va))); #if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE) - ipc_co_iova = dma_map_page_attrs(&dev->dev, vmalloc_to_page(ipc_co_va), - offset_in_page(ipc_co_va), ipc_co_info.size, DMA_BIDIRECTIONAL, 0); + ipc_co_iova = dma_map_page_attrs(&dev->dev, vmalloc_to_page(ipc_co_va), + offset_in_page(ipc_co_va), ipc_co_info.size, DMA_BIDIRECTIONAL, 0); #else - ipc_co_iova = dma_map_page(&dev->dev, vmalloc_to_page(ipc_co_va), - offset_in_page(ipc_co_va), ipc_co_info.size, DMA_BIDIRECTIONAL); + ipc_co_iova = dma_map_page(&dev->dev, vmalloc_to_page(ipc_co_va), + offset_in_page(ipc_co_va), ipc_co_info.size, DMA_BIDIRECTIONAL); #endif - err = dma_mapping_error(&dev->dev, ipc_co_iova); - if (err) { - dev_err(&dev->dev, "IPC Carveout memory IOVA mapping failed"); - ipc_co_iova = 0; - err = -ENOMEM; - goto clean_up; + err = dma_mapping_error(&dev->dev, ipc_co_iova); + if (err) { + dev_err(&dev->dev, "IPC Carveout memory IOVA mapping failed"); + ipc_co_iova = 0; + err = -ENOMEM; + goto clean_up; + } + dev_dbg(&dev->dev, "IPCCO iova=0x%llx\n", ipc_co_iova); + rv_data->ipc_co_va = ipc_co_va; + rv_data->ipc_co_iova = ipc_co_iova; + rv_data->ipc_mem_initialised = true; + } else { + ipc_co_va = rv_data->ipc_co_va; + ipc_co_iova = rv_data->ipc_co_iova; } - dev_dbg(&dev->dev, "IPCCO iova=0x%llx\n", ipc_co_iova); /* Lock channel so that non-TZ channel request can't write non-THI region */ tsec_writel(pdata, tsec_thi_sec_r(), tsec_thi_sec_chlock_f()); @@ -549,7 +558,7 @@ int tsec_prepare_poweroff(struct platform_device *dev) dev_err(&dev->dev, "found interrupt number to be negative\n"); return -ENODATA; } - disable_irq((unsigned int) pdata->irq); + disable_irq_nosync((unsigned int) pdata->irq); return 0; } diff --git a/drivers/video/tegra/tsec/tsec_boot.h b/drivers/video/tegra/tsec/tsec_boot.h index e09598fe..07d162f4 100644 --- a/drivers/video/tegra/tsec/tsec_boot.h +++ b/drivers/video/tegra/tsec/tsec_boot.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +// SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Tegra TSEC Module Support */ @@ -73,6 +73,9 @@ struct riscv_data { dma_addr_t backdoor_img_iova; u32 *backdoor_img_va; size_t backdoor_img_size; + bool ipc_mem_initialised; + void __iomem *ipc_co_va; + dma_addr_t ipc_co_iova; }; int tsec_kickoff_boot(struct platform_device *pdev); diff --git a/drivers/video/tegra/tsec/tsec_cmds.h b/drivers/video/tegra/tsec/tsec_cmds.h index 1357d363..9e6d012b 100644 --- a/drivers/video/tegra/tsec/tsec_cmds.h +++ b/drivers/video/tegra/tsec/tsec_cmds.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +// SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Tegra TSEC Module Support */ @@ -10,11 +10,6 @@ #include "tsec_comms/tsec_comms_cmds.h" -struct RM_FLCN_U64 { - u32 lo; - u32 hi; -}; - struct RM_UPROC_TEST_CMD_WR_PRIV_PROTECTED_REG { u8 cmdType; u8 regType; @@ -81,11 +76,6 @@ struct RM_FLCN_HDCP_CMD_SET_OPTIONS { u32 options; }; -struct RM_FLCN_MEM_DESC { - struct RM_FLCN_U64 address; - u32 params; -}; - struct RM_FLCN_HDCP_CMD_VALIDATE_SRM { u8 cmdType; u8 reserved[3]; @@ -632,69 +622,6 @@ union RM_FLCN_HDCP_MSG { struct RM_FLCN_HDCP_MSG_READ_SPRIME readSprime; }; -enum RM_FLCN_HDCP22_STATUS { - RM_FLCN_HDCP22_STATUS_ERROR_NULL = 0, - RM_FLCN_HDCP22_STATUS_ERROR_ENC_ACTIVE, - RM_FLCN_HDCP22_STATUS_ERROR_FLCN_BUSY, - RM_FLCN_HDCP22_STATUS_ERROR_TYPE1_LOCK_ACTIVE, - RM_FLCN_HDCP22_STATUS_ERROR_INIT_SESSION_FAILED, - RM_FLCN_HDCP22_STATUS_ERROR_AKE_INIT, - RM_FLCN_HDCP22_STATUS_ERROR_CERT_RX, - RM_FLCN_HDCP22_STATUS_TIMEOUT_CERT_RX, - RM_FLCN_HDCP22_STATUS_ERROR_MASTER_KEY_EXCHANGE, - RM_FLCN_HDCP22_STATUS_ERROR_H_PRIME, - RM_FLCN_HDCP22_STATUS_TIMEOUT_H_PRIME, - RM_FLCN_HDCP22_STATUS_ERROR_PAIRING, - RM_FLCN_HDCP22_STATUS_TIMEOUT_PAIRING, - RM_FLCN_HDCP22_STATUS_ERROR_LC_INIT, - RM_FLCN_HDCP22_STATUS_ERROR_L_PRIME, - RM_FLCN_HDCP22_STATUS_TIMEOUT_L_PRIME, - RM_FLCN_HDCP22_STATUS_ERROR_SKE_INIT, - RM_FLCN_HDCP22_STATUS_ERROR_SET_STREAM_TYPE, - RM_FLCN_HDCP22_STATUS_ERROR_EN_ENC, - RM_FLCN_HDCP22_STATUS_ERROR_RPTR_INIT, - RM_FLCN_HDCP22_STATUS_ERROR_RPTR_STREAM_MNT, - RM_FLCN_HDCP22_STATUS_TIMEOUT_RXID_LIST, - RM_FLCN_HDCP22_STATUS_ERROR_RPTR_MPRIME, - RM_FLCN_HDCP22_STATUS_TIMEOUT_MPRIME, - RM_FLCN_HDCP22_STATUS_ENC_ENABLED, - RM_FLCN_HDCP22_STATUS_INIT_SECONDARY_LINK, - RM_FLCN_HDCP22_STATUS_RPTR_STARTED, - RM_FLCN_HDCP22_STATUS_RPTR_DONE, - RM_FLCN_HDCP22_STATUS_REAUTH_REQ, - RM_FLCN_HDCP22_STATUS_MONITOR_OFF_SUCCESS, - RM_FLCN_HDCP22_STATUS_VALID_SRM, - RM_FLCN_HDCP22_STATUS_ERROR_INVALID_SRM, - RM_FLCN_HDCP22_STATUS_TEST_SE_SUCCESS, - RM_FLCN_HDCP22_STATUS_TEST_SE_FAILURE, - RM_FLCN_HDCP22_STATUS_WRITE_DP_ECF_SUCCESS, - RM_FLCN_HDCP22_STATUS_WRITE_DP_ECF_FAILURE, - RM_FLCN_HDCP22_STATUS_ERROR_NOT_SUPPORTED, - RM_FLCN_HDCP22_STATUS_ERROR_HPD, - RM_FLCN_HDCP22_STATUS_VALIDATE_STREAM_SUCCESS, - RM_FLCN_HDCP22_STATUS_ERROR_VALIDATE_STREAM_FAILURE, - RM_FLCN_HDCP22_STATUS_ERROR_STREAM_INVALID, - RM_FLCN_HDCP22_STATUS_ERROR_ILLEGAL_TIMEREVENT, - RM_FLCN_HDCP22_STATUS_FLUSH_TYPE_SUCCESS, - RM_FLCN_HDCP22_STATUS_FLUSH_TYPE_FAILURE, - RM_FLCN_HDCP22_STATUS_FLUSH_TYPE_LOCK_ACTIVE, - RM_FLCN_HDCP22_STATUS_FLUSH_TYPE_IN_PROGRESS, - RM_FLCN_HDCP22_STATUS_ERROR_REGISTER_RW, - RM_FLCN_HDCP22_STATUS_INVALID_ARGUMENT, - RM_FLCN_HDCP22_STATUS_ERROR_INTEGRITY_CHECK_FAILURE, - RM_FLCN_HDCP22_STATUS_ERROR_INTEGRITY_UPDATE_FAILURE, - RM_FLCN_HDCP22_STATUS_ERROR_DISABLE_WITH_LANECNT0, - RM_FLCN_HDCP22_STATUS_ERROR_START_TIMER, - RM_FLCN_HDCP22_STATUS_ERROR_HWDRM_WAR_AUTH_FAILURE, - RM_FLCN_HDCP22_STATUS_ERROR_START_SESSION, -}; - -struct RM_FLCN_HDCP22_MSG_GENERIC { - u8 msgType; - enum RM_FLCN_HDCP22_STATUS flcnStatus; - u8 streamType; -}; - union RM_FLCN_HDCP22_MSG { u8 msgType; struct RM_FLCN_HDCP22_MSG_GENERIC msgGeneric; diff --git a/drivers/video/tegra/tsec/tsec_comms/tsec_comms.c b/drivers/video/tegra/tsec/tsec_comms/tsec_comms.c index e0f8acb3..ff42e225 100644 --- a/drivers/video/tegra/tsec/tsec_comms/tsec_comms.c +++ b/drivers/video/tegra/tsec/tsec_comms/tsec_comms.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Tegra TSEC Module Support */ - #include "tsec_comms_plat.h" #include "tsec_comms.h" #include "tsec_comms_regs.h" @@ -20,9 +19,9 @@ #define TSEC_EMEM_SIZE (0x2000) #define TSEC_MAX_MSG_SIZE (128) +/* If enabled IPC Cmd-Msg Exchange will be done using GSC else over EMEM */ #define DO_IPC_OVER_GSC_CO (1) -#ifdef DO_IPC_OVER_GSC_CO #define TSEC_BOOT_POLL_TIME_US (100000) #define TSEC_BOOT_POLL_INTERVAL_US (50) #define TSEC_BOOT_POLL_COUNT (TSEC_BOOT_POLL_TIME_US / TSEC_BOOT_POLL_INTERVAL_US) @@ -33,10 +32,25 @@ static u64 s_ipc_gscco_page_base; static u64 s_ipc_gscco_page_size; static u64 s_ipc_gscco_page_count; static u64 s_ipc_gscco_free_page_mask; +static void *s_tsec_context_va; +static u32 s_tsec_context_gscco_offset; struct TSEC_BOOT_INFO { u32 bootFlag; + u32 bootWithContextFlag; + u32 bootContextOffset; }; -#endif + +enum TSEC_STATE { + TSEC_STATE_BOOTED = 0, + TSEC_STATE_SHUTDOWN, + TSEC_STATE_BOOTED_WITH_CONTEXT, +}; + +/* + * Tsec State + */ +enum TSEC_STATE s_tsec_state; + /* * Locally cache init message so that same can be conveyed @@ -45,6 +59,11 @@ struct TSEC_BOOT_INFO { static bool s_init_msg_rcvd; static u8 s_init_tsec_msg[TSEC_MAX_MSG_SIZE]; +/* + * Callback function to shutdown external firmware + */ +shutdown_callback_func_t s_ext_fw_shutdown_cb; + /* * Array of structs to register client callback function * for every sw unit/module within tsec @@ -68,6 +87,100 @@ static int validate_cmd(struct RM_FLCN_QUEUE_HDR *cmd_hdr) return 0; } +static void tsec_comms_alloc_ctxt_memory(void) +{ + s_tsec_context_va = tsec_comms_alloc_mem_from_gscco(s_ipc_gscco_page_size, + &s_tsec_context_gscco_offset); +} + + +/* Sends a get context command to retrieve the context before shutting down tsec */ +static int tsec_comms_send_getctxt_cmd(callback_func_t cb_func, void *cb_ctx) +{ + struct RM_FLCN_HDCP22_CMD_GET_CONTEXT cmd; + + cmd.hdr.unitId = RM_GSP_UNIT_HDCP22WIRED; + cmd.hdr.size = sizeof(struct RM_FLCN_HDCP22_CMD_GET_CONTEXT); + cmd.cmdType = RM_FLCN_HDCP22_CMD_ID_GET_CONTEXT; + cmd.context.address.hi = 0U; + cmd.context.address.lo = s_tsec_context_gscco_offset; + cmd.context.params = s_ipc_gscco_page_size; + return tsec_comms_send_cmd(&cmd, 0, cb_func, cb_ctx); +} + +/* Context retrieved, shutdown tsec now */ +static void tsec_comms_process_getctxt_msg(void) +{ + tsec_plat_poweroff(); + s_tsec_state = TSEC_STATE_SHUTDOWN; +} + +/* Power on tsec with context information */ +static void tsec_comms_poweron_with_context(void) +{ + struct TSEC_BOOT_INFO *bootInfo = (struct TSEC_BOOT_INFO *)(s_ipc_gscco_base); + + bootInfo->bootWithContextFlag = 1; + bootInfo->bootContextOffset = s_tsec_context_gscco_offset; + tsec_plat_poweron(); + s_tsec_state = TSEC_STATE_BOOTED_WITH_CONTEXT; +} + +/* Clear flags after power on with context done */ +static void tsec_comms_poweron_with_context_done(void) +{ + struct TSEC_BOOT_INFO *bootInfo = (struct TSEC_BOOT_INFO *)(s_ipc_gscco_base); + + bootInfo->bootWithContextFlag = 0; + plat_print(LVL_DBG, "%s: reset boot with context flag in GSCCO\n", __func__); + bootInfo->bootContextOffset = 0; +} + +static PLAT_DEFINE_SEMA(s_shutdown_sema); + +static void signal_shutdown_done(void *ctx, void *msg) +{ + PLAT_UP_SEMA(&s_shutdown_sema); +} + +/* No command should be pending when shutting down tsec */ +static bool is_cmd_pending(void) +{ + u8 unit_id; + bool cmd_pending = false; + + tsec_plat_acquire_comms_mutex(); + for (unit_id = 0; unit_id < RM_GSP_UNIT_END; unit_id++) { + if (s_callbacks[unit_id].cb_func) { + cmd_pending = true; + break; + } + } + tsec_plat_release_comms_mutex(); + return cmd_pending; +} + +/* Synchronously retreives the tsec context and then shuts it down */ +int tsec_comms_shutdown(void) +{ + int status = -TSEC_EINVAL; + + if (!is_cmd_pending()) { + status = tsec_comms_send_getctxt_cmd(signal_shutdown_done, NULL); + if (PLAT_DOWN_SEMA(&s_shutdown_sema)) + return(-EINTR); + } + return status; +} +EXPORT_SYMBOL_COMMS(tsec_comms_shutdown); + +/* External fw shutdown needed before we reboot tsec hdcp fw */ +void tsec_comms_register_shutdown_callback(shutdown_callback_func_t func) +{ + s_ext_fw_shutdown_cb = func; +} +EXPORT_SYMBOL_COMMS(tsec_comms_register_shutdown_callback); + static int ipc_txfr(u32 offset, u8 *buff, u32 size, bool read_msg) { #ifdef DO_IPC_OVER_GSC_CO @@ -200,7 +313,6 @@ static int ipc_read(u32 tail, u8 *pdst, u32 num_bytes) return ipc_txfr(tail, pdst, num_bytes, true); } -#ifdef DO_IPC_OVER_GSC_CO static u32 tsec_get_boot_flag(void) { struct TSEC_BOOT_INFO *bootInfo = (struct TSEC_BOOT_INFO *)(s_ipc_gscco_base); @@ -222,7 +334,6 @@ static void tsec_reset_boot_flag(void) else bootInfo->bootFlag = 0; } -#endif static void invoke_init_cb(void *unused) { @@ -255,6 +366,7 @@ void tsec_comms_drain_msg(bool invoke_cb) callback_func_t cb_func = NULL; void *cb_ctx = NULL; u8 tsec_msg[TSEC_MAX_MSG_SIZE]; + bool shutdown_tsec = false; msgq_head_reg = tsec_msgq_head_r(TSEC_MSG_QUEUE_PORT); msgq_tail_reg = tsec_msgq_tail_r(TSEC_MSG_QUEUE_PORT); @@ -265,6 +377,10 @@ void tsec_comms_drain_msg(bool invoke_cb) cached_init_msg_body = (struct RM_GSP_INIT_MSG_GSP_INIT *) (s_init_tsec_msg + RM_FLCN_QUEUE_HDR_SIZE); + /* tsec reboot so read sMsgq_start again */ + if (s_tsec_state == TSEC_STATE_BOOTED_WITH_CONTEXT) + sMsgq_start = 0x0; + for (i = 0; !sMsgq_start && i < TSEC_QUEUE_POLL_COUNT; i++) { sMsgq_start = tsec_plat_reg_read(msgq_tail_reg); if (!sMsgq_start) @@ -310,7 +426,6 @@ void tsec_comms_drain_msg(bool invoke_cb) init_msg_body->numQueues); goto FAIL; } -#ifdef DO_IPC_OVER_GSC_CO /* Poll for the Tsec booted flag and also reset it */ for (i = 0; i < TSEC_BOOT_POLL_COUNT; i++) { if (tsec_get_boot_flag() == TSEC_BOOT_FLAG_MAGIC) @@ -324,7 +439,7 @@ void tsec_comms_drain_msg(bool invoke_cb) tsec_reset_boot_flag(); plat_print(LVL_DBG, "Tsec GSC-CO Boot Flag reset done\n"); } -#endif + /* cache the init_msg */ memcpy(cached_init_msg_hdr, msg_hdr, RM_FLCN_QUEUE_HDR_SIZE); memcpy(cached_init_msg_body, init_msg_body, @@ -333,7 +448,8 @@ void tsec_comms_drain_msg(bool invoke_cb) /* Invoke the callback and clear it */ tsec_plat_acquire_comms_mutex(); s_init_msg_rcvd = true; - if (invoke_cb) { + /* Don't invoke init cb for tsec reboots */ + if (invoke_cb && (s_tsec_state == TSEC_STATE_BOOTED)) { cb_func = s_callbacks[msg_hdr->unitId].cb_func; cb_ctx = s_callbacks[msg_hdr->unitId].cb_ctx; s_callbacks[msg_hdr->unitId].cb_func = NULL; @@ -342,10 +458,21 @@ void tsec_comms_drain_msg(bool invoke_cb) tsec_plat_release_comms_mutex(); if (cb_func && invoke_cb) cb_func(cb_ctx, (void *)tsec_msg); + if (s_tsec_state == TSEC_STATE_BOOTED_WITH_CONTEXT) + tsec_comms_poweron_with_context_done(); } else if (msg_hdr->unitId < RM_GSP_UNIT_END) { if (msg_hdr->unitId == RM_GSP_UNIT_HDCP22WIRED) { - plat_print(LVL_DBG, "msg received from hdcp22 unitId 0x%x\n", - msg_hdr->unitId); + struct RM_FLCN_HDCP22_MSG_GENERIC *hdcp22Msg = + (struct RM_FLCN_HDCP22_MSG_GENERIC *) + (tsec_msg + RM_FLCN_QUEUE_HDR_SIZE); + plat_print(LVL_DBG, + "msg received from hdcp22 unitId 0x%x msgType 0x%x\n", + msg_hdr->unitId, hdcp22Msg->msgType); + /* tsec context retrieved so can shutdown now */ + if (hdcp22Msg->msgType == RM_FLCN_HDCP22_MSG_ID_GET_CONTEXT) { + shutdown_tsec = true; + } + } else if (msg_hdr->unitId == RM_GSP_UNIT_REWIND) { tail = sMsgq_start; tsec_plat_reg_write(msgq_tail_reg, tail); @@ -365,7 +492,8 @@ void tsec_comms_drain_msg(bool invoke_cb) s_callbacks[msg_hdr->unitId].cb_func = NULL; s_callbacks[msg_hdr->unitId].cb_ctx = NULL; tsec_plat_release_comms_mutex(); - if (cb_func) + /* shutdown must be done first and then invoke the cb */ + if (cb_func && !shutdown_tsec) cb_func(cb_ctx, (void *)tsec_msg); } } else { @@ -378,6 +506,15 @@ FAIL: tail += ALIGN(msg_hdr->size, 4); head = tsec_plat_reg_read(msgq_head_reg); tsec_plat_reg_write(msgq_tail_reg, tail); + + if (shutdown_tsec) { + /* First shutdown then invoke the callback */ + tsec_comms_process_getctxt_msg(); + if (cb_func) + cb_func(cb_ctx, (void *)tsec_msg); + /* tsec is shutdown, break out of loop, no more messages to process*/ + break; + } } EXIT: @@ -386,32 +523,35 @@ EXIT: void tsec_comms_initialize(u64 ipc_co_va, u64 ipc_co_va_size) { -#ifdef DO_IPC_OVER_GSC_CO - /* Set IPC CO Info before enabling Msg Interrupts from TSEC to CCPLEX */ - s_ipc_gscco_base = ipc_co_va; - s_ipc_gscco_size = ipc_co_va_size; + static bool s_tsec_comms_initialised; - s_ipc_gscco_page_size = (64 * 1024); + if (!s_tsec_comms_initialised) { + /* Set IPC CO Info before enabling Msg Interrupts from TSEC to CCPLEX */ + s_ipc_gscco_base = ipc_co_va; + s_ipc_gscco_size = ipc_co_va_size; - /* First Page Reserved */ - if (s_ipc_gscco_size > s_ipc_gscco_page_size) { - s_ipc_gscco_page_count = (s_ipc_gscco_size - - s_ipc_gscco_page_size) / s_ipc_gscco_page_size; - } else { - s_ipc_gscco_page_count = 0; - } - s_ipc_gscco_page_base = s_ipc_gscco_page_count ? + s_ipc_gscco_page_size = (64 * 1024); + + /* First Page Reserved */ + if (s_ipc_gscco_size > s_ipc_gscco_page_size) { + s_ipc_gscco_page_count = (s_ipc_gscco_size - + s_ipc_gscco_page_size) / s_ipc_gscco_page_size; + } else { + s_ipc_gscco_page_count = 0; + } + s_ipc_gscco_page_base = s_ipc_gscco_page_count ? s_ipc_gscco_base + s_ipc_gscco_page_size : 0; - s_ipc_gscco_free_page_mask = ~((u64)0); -#else - (void)ipc_co_va; - (void)ipc_co_va_size; -#endif + s_ipc_gscco_free_page_mask = ~((u64)0); + /* Allocate memory in GSCCO to save tsec context */ + tsec_comms_alloc_ctxt_memory(); + PLAT_INIT_SEMA(&s_shutdown_sema, 0); + s_tsec_state = TSEC_STATE_BOOTED; + s_tsec_comms_initialised = true; + } } void *tsec_comms_get_gscco_page(u32 page_number, u32 *gscco_offset) { -#ifdef DO_IPC_OVER_GSC_CO u8 *page_va; if (!s_ipc_gscco_page_base || (page_number >= s_ipc_gscco_page_count)) { @@ -429,16 +569,11 @@ void *tsec_comms_get_gscco_page(u32 page_number, u32 *gscco_offset) (page_number * s_ipc_gscco_page_size)); } return page_va; -#else - plat_print(LVL_ERR, "%s: IPC over GSC-CO not enabled\n", __func__); - return NULL; -#endif } EXPORT_SYMBOL_COMMS(tsec_comms_get_gscco_page); void *tsec_comms_alloc_mem_from_gscco(u32 size_in_bytes, u32 *gscco_offset) { -#ifdef DO_IPC_OVER_GSC_CO void *page_va; u32 page_number; u64 mask; @@ -471,16 +606,11 @@ void *tsec_comms_alloc_mem_from_gscco(u32 size_in_bytes, u32 *gscco_offset) s_ipc_gscco_free_page_mask &= ~(mask); return page_va; -#else - plat_print(LVL_ERR, "%s: IPC over GSC-CO not enabled\n", __func__); - return NULL; -#endif } EXPORT_SYMBOL_COMMS(tsec_comms_alloc_mem_from_gscco); void tsec_comms_free_gscco_mem(void *page_va) { -#ifdef DO_IPC_OVER_GSC_CO u64 page_addr = (u64)page_va; u64 gscco_page_start = s_ipc_gscco_page_base; u64 gscco_page_end = s_ipc_gscco_page_base + @@ -492,7 +622,6 @@ void tsec_comms_free_gscco_mem(void *page_va) (page_addr < gscco_page_end) && (!(page_addr % s_ipc_gscco_page_size))) s_ipc_gscco_free_page_mask |= ((u64)0x1 << page_number); -#endif } EXPORT_SYMBOL_COMMS(tsec_comms_free_gscco_mem); @@ -521,6 +650,18 @@ int tsec_comms_send_cmd(void *cmd, u32 queue_id, if (queue_id != TSEC_CMD_QUEUE_PORT) return -TSEC_EINVAL; + /* First shutdown external fw then restart tsec with hdcp fw */ + if (s_tsec_state == TSEC_STATE_SHUTDOWN) { + if (s_ext_fw_shutdown_cb) { + int status = s_ext_fw_shutdown_cb(); + + if (status) + return status; + } + tsec_comms_poweron_with_context(); + sCmdq_start = 0x0; + } + cmdq_head_reg = tsec_cmdq_head_r(TSEC_CMD_QUEUE_PORT); cmdq_tail_reg = tsec_cmdq_tail_r(TSEC_CMD_QUEUE_PORT); @@ -644,7 +785,6 @@ int tsec_comms_set_init_cb(callback_func_t cb_func, void *cb_ctx) plat_print(LVL_DBG, "Init msg already received invoking callback\n"); tsec_plat_queue_work(invoke_init_cb, NULL); } -#ifdef DO_IPC_OVER_GSC_CO else if (tsec_get_boot_flag() == TSEC_BOOT_FLAG_MAGIC) { plat_print(LVL_DBG, "Doorbell missed tsec booted first, invoke init callback\n"); /* Interrupt missed as tsec booted first @@ -656,7 +796,6 @@ int tsec_comms_set_init_cb(callback_func_t cb_func, void *cb_ctx) /* Init message is drained now, hence queue the work item to invoke init callback*/ tsec_plat_queue_work(invoke_init_cb, NULL); } -#endif FAIL: tsec_plat_release_comms_mutex(); diff --git a/drivers/video/tegra/tsec/tsec_comms/tsec_comms.h b/drivers/video/tegra/tsec/tsec_comms/tsec_comms.h index 4db5eeed..dedab5ba 100644 --- a/drivers/video/tegra/tsec/tsec_comms/tsec_comms.h +++ b/drivers/video/tegra/tsec/tsec_comms/tsec_comms.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +// SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Tegra TSEC Module Support */ @@ -107,6 +107,24 @@ void *tsec_comms_alloc_mem_from_gscco(u32 size_in_bytes, u32 *gscco_offset); */ void tsec_comms_free_gscco_mem(void *page_va); + +/* @brief: Gets and saves the TSEC context and then shutsdown TSEC + * + * params[out]: return value(0 for success) + */ +int tsec_comms_shutdown(void); + + +typedef int (*shutdown_callback_func_t)(void); + +/* @brief: Registers a callback function to cleanly shutdown external + * firmware running on tsec + * + * params[in]: func is the call back function to shutdown external + * firmware running on tsec + */ +void tsec_comms_register_shutdown_callback(shutdown_callback_func_t func); + /* -------- END -------- */ #endif /* TSEC_COMMS_H */ diff --git a/drivers/video/tegra/tsec/tsec_comms/tsec_comms_cmds.h b/drivers/video/tegra/tsec/tsec_comms/tsec_comms_cmds.h index 01b900fd..29b0fbf7 100644 --- a/drivers/video/tegra/tsec/tsec_comms/tsec_comms_cmds.h +++ b/drivers/video/tegra/tsec/tsec_comms/tsec_comms_cmds.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +// SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * Tegra TSEC Module Support */ @@ -25,6 +25,9 @@ struct RM_FLCN_QUEUE_HDR { #define RM_GSP_LOG_QUEUE_NUM (2) +#define RM_FLCN_HDCP22_CMD_ID_GET_CONTEXT (8) +#define RM_FLCN_HDCP22_MSG_ID_GET_CONTEXT (8) + struct RM_GSP_INIT_MSG_GSP_INIT { u8 msgType; u8 numQueues; @@ -40,4 +43,86 @@ struct RM_GSP_INIT_MSG_GSP_INIT { u8 status; }; +struct RM_FLCN_U64 { + u32 lo; + u32 hi; +}; + +struct RM_FLCN_MEM_DESC { + struct RM_FLCN_U64 address; + u32 params; +}; + + +struct RM_FLCN_HDCP22_CMD_GET_CONTEXT { + struct RM_FLCN_QUEUE_HDR hdr; // #include #include +#include +#include +#include "../tsec.h" extern struct platform_device *g_tsec; @@ -38,6 +41,15 @@ do { \ dev_err(&g_tsec->dev, fmt, ##__VA_ARGS__); \ } while (0) +#if defined(NV_DEFINE_SEMAPHORE_HAS_NUMBER_ARG) +#define PLAT_DEFINE_SEMA(s) DEFINE_SEMAPHORE(s, 0) +#else +#define PLAT_DEFINE_SEMA(s) DEFINE_SEMAPHORE(s) +#endif +#define PLAT_INIT_SEMA(s, v) sema_init(s, v) +#define PLAT_UP_SEMA(s) up(s) +#define PLAT_DOWN_SEMA(s) down_interruptible(s) + #elif __DCE_KERNEL__ // Functions to be implemented by DCE @@ -102,4 +114,20 @@ void tsec_plat_release_comms_mutex(void); */ void tsec_plat_queue_work(tsec_plat_work_cb_t cb, void *ctx); +/* + * @brief: Power On Tsec + */ +static inline void tsec_plat_poweron(void) +{ + tsec_poweron(&g_tsec->dev); +} + +/* + * @brief: Power Off Tsec + */ +static inline void tsec_plat_poweroff(void) +{ + tsec_poweroff(&g_tsec->dev); +} + #endif /* TSEC_COMMS_PLAT_H */