From a2163680aae7a9af3a40c31ff99c917f9f55b03d Mon Sep 17 00:00:00 2001 From: Akhilesh Reddy Khumbum Date: Tue, 14 Mar 2023 09:44:22 -0700 Subject: [PATCH] nvidia-oot: Add aon kmd module - This patch includes AON-KMD module as part of OOT kernel. Bug 3583580 Change-Id: I531731136189d76ebb4d3f2880e8f46913f390f4 Signed-off-by: Akhilesh Khumbum Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2870990 Reviewed-by: Robert Collins GVS: Gerrit_Virtual_Submit --- drivers/platform/tegra/Makefile | 3 +- drivers/platform/tegra/aon/Makefile | 22 + .../platform/tegra/aon/aon-hsp-mbox-client.c | 219 ++++ .../platform/tegra/aon/aon-ivc-dbg-messages.h | 160 +++ .../tegra/aon/include/aon-hsp-combo.h | 20 + drivers/platform/tegra/aon/include/aon-regs.h | 11 + drivers/platform/tegra/aon/include/aon.h | 164 +++ .../platform/tegra/aon/include/hw/hw_aon.h | 81 ++ drivers/platform/tegra/aon/tegra-aon-debug.c | 888 +++++++++++++++ drivers/platform/tegra/aon/tegra-aon-hsp.c | 66 ++ drivers/platform/tegra/aon/tegra-aon-mail.c | 423 +++++++ drivers/platform/tegra/aon/tegra-aon-module.c | 176 +++ drivers/platform/tegra/tegra-ivc.c | 1009 +++++++++++++++++ include/linux/tegra-aon.h | 14 + include/linux/tegra-ivc-instance.h | 51 + include/linux/tegra-ivc.h | 475 ++++++++ 16 files changed, 3781 insertions(+), 1 deletion(-) create mode 100644 drivers/platform/tegra/aon/Makefile create mode 100644 drivers/platform/tegra/aon/aon-hsp-mbox-client.c create mode 100644 drivers/platform/tegra/aon/aon-ivc-dbg-messages.h create mode 100644 drivers/platform/tegra/aon/include/aon-hsp-combo.h create mode 100644 drivers/platform/tegra/aon/include/aon-regs.h create mode 100644 drivers/platform/tegra/aon/include/aon.h create mode 100644 drivers/platform/tegra/aon/include/hw/hw_aon.h create mode 100644 drivers/platform/tegra/aon/tegra-aon-debug.c create mode 100644 drivers/platform/tegra/aon/tegra-aon-hsp.c create mode 100644 drivers/platform/tegra/aon/tegra-aon-mail.c create mode 100644 drivers/platform/tegra/aon/tegra-aon-module.c create mode 100644 drivers/platform/tegra/tegra-ivc.c create mode 100644 include/linux/tegra-aon.h create mode 100644 include/linux/tegra-ivc-instance.h create mode 100644 include/linux/tegra-ivc.h diff --git a/drivers/platform/tegra/Makefile b/drivers/platform/tegra/Makefile index 4eead8b0..59e08553 100644 --- a/drivers/platform/tegra/Makefile +++ b/drivers/platform/tegra/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. LINUXINCLUDE += -I$(srctree.nvidia-oot) @@ -31,3 +31,4 @@ obj-m += mc-utils/ obj-m += dce/ obj-m += psc/ obj-m += rtcpu/ +obj-m += aon/ diff --git a/drivers/platform/tegra/aon/Makefile b/drivers/platform/tegra/aon/Makefile new file mode 100644 index 00000000..580429aa --- /dev/null +++ b/drivers/platform/tegra/aon/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Always On Sensor Processing Engine code. +# +GCOV_PROFILE := y + +LINUXINCLUDE += -I$(srctree.nvidia-oot)/include +LINUXINCLUDE += -I$(srctree)/include +LINUXINCLUDE += -I$(srctree.nvidia-oot)/drivers/platform/tegra/aon/include + +ccflags-y += -Werror + +obj-m += tegra234-aon.o + +tegra234-aon-objs += \ + tegra-aon-hsp.o \ + ../tegra-ivc.o \ + tegra-aon-mail.o \ + tegra-aon-module.o \ + aon-hsp-mbox-client.o \ + tegra-aon-debug.o diff --git a/drivers/platform/tegra/aon/aon-hsp-mbox-client.c b/drivers/platform/tegra/aon/aon-hsp-mbox-client.c new file mode 100644 index 00000000..d4243fee --- /dev/null +++ b/drivers/platform/tegra/aon/aon-hsp-mbox-client.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define TX_BLOCK_PERIOD 20 + +struct aon_hsp_sm { + struct mbox_client client; + struct mbox_chan *chan; +}; + +struct aon_hsp { + struct aon_hsp_sm rx; + struct aon_hsp_sm tx; + struct device dev; + struct completion emptied; + void (*full_notify)(void *data, u32 value); + void *pdata; +}; + +static void aon_hsp_rx_full_notify(struct mbox_client *cl, void *data) +{ + struct aon_hsp *aonhsp = dev_get_drvdata(cl->dev); + u32 msg = (u32) (unsigned long) data; + + aonhsp->full_notify(aonhsp->pdata, msg); +} + +static void aon_hsp_tx_empty_notify(struct mbox_client *cl, void *data, + int empty_value) +{ + struct aon_hsp *aonhsp = dev_get_drvdata(cl->dev); + + (void)empty_value; /* ignored */ + + complete(&aonhsp->emptied); +} + +static int aon_hsp_probe(struct aon_hsp *aonhsp) +{ + struct device_node *np = aonhsp->dev.parent->of_node; + int err = -ENODEV; + + np = of_get_compatible_child(np, "nvidia,tegra-aon-hsp"); + if (np == NULL || !of_device_is_available(np)) { + of_node_put(np); + dev_err(&aonhsp->dev, "no hsp protocol \"%s\"\n", + "nvidia,tegra-aon-hsp"); + return -ENODEV; + } + + aonhsp->dev.of_node = np; + + aonhsp->rx.chan = mbox_request_channel_byname(&aonhsp->rx.client, + "ivc-rx"); + if (IS_ERR(aonhsp->rx.chan)) { + err = PTR_ERR(aonhsp->rx.chan); + goto fail; + } + + aonhsp->tx.chan = mbox_request_channel_byname(&aonhsp->tx.client, + "ivc-tx"); + if (IS_ERR(aonhsp->tx.chan)) { + err = PTR_ERR(aonhsp->tx.chan); + goto fail; + } + + dev_set_name(&aonhsp->dev, "%s:%s", dev_name(aonhsp->dev.parent), + aonhsp->dev.of_node->name); + dev_info(&aonhsp->dev, "probed\n"); + + return 0; + +fail: + if (err != -EPROBE_DEFER) { + dev_err(&aonhsp->dev, "%s: failed to obtain : %d\n", + np->name, err); + } + of_node_put(np); + return err; +} + +static const struct device_type aon_hsp_combo_dev_type = { + .name = "aon-hsp-protocol", +}; + +static void aon_hsp_combo_dev_release(struct device *dev) +{ + struct aon_hsp *aonhsp = container_of(dev, struct aon_hsp, dev); + + if (!IS_ERR_OR_NULL(aonhsp->rx.chan)) + mbox_free_channel(aonhsp->rx.chan); + if (!IS_ERR_OR_NULL(aonhsp->tx.chan)) + mbox_free_channel(aonhsp->tx.chan); + + of_node_put(dev->of_node); + kfree(aonhsp); +} + +static void aon_hsp_free(struct aon_hsp *aonhsp) +{ + if (IS_ERR_OR_NULL(aonhsp)) + return; + + if (dev_get_drvdata(&aonhsp->dev) != NULL) + device_unregister(&aonhsp->dev); + else + put_device(&aonhsp->dev); +} + +static struct aon_hsp *aon_hsp_create(struct device *dev, + void (*full_notify)(void *data, u32 value), + void *pdata) +{ + struct aon_hsp *aonhsp; + int ret = -EINVAL; + + aonhsp = kzalloc(sizeof(*aonhsp), GFP_KERNEL); + if (aonhsp == NULL) + return ERR_PTR(-ENOMEM); + + aonhsp->dev.parent = dev; + aonhsp->full_notify = full_notify; + aonhsp->pdata = pdata; + + init_completion(&aonhsp->emptied); + + aonhsp->dev.type = &aon_hsp_combo_dev_type; + aonhsp->dev.release = aon_hsp_combo_dev_release; + device_initialize(&aonhsp->dev); + + dev_set_name(&aonhsp->dev, "%s:%s", dev_name(dev), "hsp"); + + aonhsp->tx.client.tx_block = false; + aonhsp->rx.client.rx_callback = aon_hsp_rx_full_notify; + aonhsp->tx.client.tx_done = aon_hsp_tx_empty_notify; + aonhsp->rx.client.dev = aonhsp->tx.client.dev = &(aonhsp->dev); + + ret = aon_hsp_probe(aonhsp); + if (ret < 0) + goto fail; + + ret = device_add(&aonhsp->dev); + if (ret < 0) + goto fail; + + dev_set_drvdata(&aonhsp->dev, aonhsp); + + return aonhsp; + +fail: + aon_hsp_free(aonhsp); + return ERR_PTR(ret); +} + +bool tegra_aon_hsp_sm_tx_is_empty(struct tegra_aon *aon) +{ + struct aon_hsp *aonhsp = aon->hsp; + + return try_wait_for_completion(&aonhsp->emptied); +} + +int tegra_aon_hsp_sm_tx_write(struct tegra_aon *aon, u32 value) +{ + struct aon_hsp *aonhsp = aon->hsp; + + return mbox_send_message(aonhsp->tx.chan, + (void *) (unsigned long) value); +} + +int tegra_aon_hsp_sm_pair_request(struct tegra_aon *aon, + void (*full_notify)(void *data, u32 value), + void *pdata) +{ + struct device_node *hsp_node; + struct device *dev = aon->dev; + struct device_node *dn = dev->of_node; + + hsp_node = of_get_child_by_name(dn, "hsp"); + if (hsp_node == NULL) { + dev_err(dev, "No hsp child node for AON\n"); + return -ENODEV; + } + + aon->hsp = aon_hsp_create(dev, full_notify, pdata); + if (IS_ERR(aon->hsp)) { + aon->hsp = NULL; + return PTR_ERR(aon->hsp); + } + + return 0; +} + +void tegra_aon_hsp_sm_pair_free(struct tegra_aon *aon) +{ + if (!IS_ERR_OR_NULL(aon) && !IS_ERR_OR_NULL(aon->hsp)) { + aon_hsp_free(aon->hsp); + aon->hsp = NULL; + } +} diff --git a/drivers/platform/tegra/aon/aon-ivc-dbg-messages.h b/drivers/platform/tegra/aon/aon-ivc-dbg-messages.h new file mode 100644 index 00000000..669d69ca --- /dev/null +++ b/drivers/platform/tegra/aon/aon-ivc-dbg-messages.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef AON_IVC_DBG_MESSAGES_H +#define AON_IVC_DBG_MESSAGES_H + +#define AON_BOOT 0 +#define AON_PING 1 +#define AON_QUERY_TAG 2 +#define AON_MODS_CASE 3 +#define AON_MODS_RESULT 4 +#define AON_MODS_CRC 5 +#define AON_REQUEST_TYPE_MAX 5 + +#define AON_DBG_STATUS_OK 0 +#define AON_DBG_STATUS_ERROR 1 + +#define ADCC_NCHANS 6 + +/** + * @brief ping message request + * + * Structure that describes the ping message request. + * + * @challenge Arbitrarily chosen value. Response to ping is + * computed based on this value. + */ +struct aon_ping_req { + u32 challenge; +}; + +/** + * @brief response to the ping request + * + * Structure that describes the response to ping request. + * + * @reply Response to ping request with challenge left-shifted + * by 1 with carry-bit dropped. + */ +struct aon_ping_resp { + u32 reply; +}; + +/** + * @brief response to the query tag request + * + * This struct is used to extract the tag/firmware version of the AON. + * + * @tag array to store tag information. + */ +struct aon_query_tag_resp { + u8 tag[32]; +}; + +/** + * @brief mods adcc test request + * + * This struct is used to send the adcc configuration to perform the mods + * adcc test on the target. + * + * @chans adcc channels bit mask for the mods adcc tests + * @sampling_dur sampling duration + * @avg_window averaging window duration + * @mode single shot or continuous mode + * @clk_src ADCC clock source + * @lb_data ADCC channels loopback data + */ +struct aon_mods_adcc_req { + u32 chans; + u32 sampling_dur; + u32 avg_window; + u32 mode; + u32 clk_src; + u64 lb_data; +}; + +/** + * @brief mods test request + * + * This struct is used to send the loop count to perform the mods test + * on the target. + * + * @mods_case mods test type: basic, mem2mem dma, io2mem dma + * @loops number of times mods test should be run + * @dma_chans dma channels bit mask for the mods dma tests + * @adccs mods adcc req config data + */ +struct aon_mods_req { + u32 mods_case; + u32 loops; + u32 dma_chans; + struct aon_mods_adcc_req adcc; +}; + +/** + * @brief mods test adcc response + * + * This struct is used to fetch the adcc channels data. + * Fields: + * + * @ch_data array containing all the channels samples + */ +struct aon_mods_adcc_resp { + uint32_t ch_data[ADCC_NCHANS]; +}; + +/** + * @brief mods test crc response + * + * This struct is used to send the CRC32 of the AON text section to the target. + * Fields: + * + * @crc CRC32 of the text section. + */ +struct aon_mods_crc_resp { + u32 crc; +}; + +/** + * @brief aon dbg request + * + * This struct encapsulates the type of the request and the reaonctive + * data associated with that request. + * + * @req_type indicates the type of the request + * @data data needed to send for the request + */ +struct aon_dbg_request { + u32 req_type; + union { + struct aon_ping_req ping_req; + struct aon_mods_req mods_req; + } data; +}; + +/** + * @brief aon dbg response + * + * This struct encapsulates the type of the response and the reaonctive + * data associated with that response. + * + * @resp_type indicates the type of the response. + * @status response in regard to the request i.e success/failure. + * In case of mods, this field is the result. + * @data data associated with the response to a request. + */ +struct aon_dbg_response { + u32 resp_type; + u32 status; + union { + struct aon_ping_resp ping_resp; + struct aon_query_tag_resp tag_resp; + struct aon_mods_crc_resp crc_resp; + struct aon_mods_adcc_resp adcc_resp; + } data; +}; + +#endif diff --git a/drivers/platform/tegra/aon/include/aon-hsp-combo.h b/drivers/platform/tegra/aon/include/aon-hsp-combo.h new file mode 100644 index 00000000..e704b36f --- /dev/null +++ b/drivers/platform/tegra/aon/include/aon-hsp-combo.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef INCLUDE_AON_HSP_COMBO_H +#define INCLUDE_AON_HSP_COMBO_H + +#include + +struct tegra_aon; + +int tegra_aon_hsp_sm_tx_write(struct tegra_aon *aon, u32 value); +int tegra_aon_hsp_sm_pair_request(struct tegra_aon *aon, + void (*full_notify)(void *data, u32 value), + void *pdata); +void tegra_aon_hsp_sm_pair_free(struct tegra_aon *aon); +bool tegra_aon_hsp_sm_tx_is_empty(struct tegra_aon *aon); + +#endif diff --git a/drivers/platform/tegra/aon/include/aon-regs.h b/drivers/platform/tegra/aon/include/aon-regs.h new file mode 100644 index 00000000..74889518 --- /dev/null +++ b/drivers/platform/tegra/aon/include/aon-regs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef AON_REGS_H +#define AON_REGS_H + +#include + +#endif /* AON_REGS_H */ diff --git a/drivers/platform/tegra/aon/include/aon.h b/drivers/platform/tegra/aon/include/aon.h new file mode 100644 index 00000000..b41a5daa --- /dev/null +++ b/drivers/platform/tegra/aon/include/aon.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef TEGRA_AON_H +#define TEGRA_AON_H + +#include +#include + +#include + +struct tegra_aon; + +#define NV(p) "nvidia," p + +enum smbox_msgs { + SMBOX_IVC_READY_MSG = 0xAAAA5555, + SMBOX_IVC_DBG_ENABLE = 0xAAAA6666, + SMBOX_IVC_NOTIFY = 0x0000AABB, +}; + +/** + * Declaration for struct aon_hsp that allows other structs to have a pointer + * to it without having to define it + */ +struct aon_hsp; + +/** + * struct tegra_aon - Primary OS independent tegra aon structure to hold aon + * cluster's and it's element's runtime info. + * Also encapsulates linux device aoncific info. + */ +struct tegra_aon { + /** + * @dev : Pointer to AON Cluster's Linux device struct. + */ + struct device *dev; + /** + * @hsp : Pointer to HSP instance used for communication with AON FW. + */ + struct aon_hsp *hsp; + /** + * @regs : Stores the cpu-mapped base address of AON Cluster. Will be + * used for MMIO transactions to AON elements. + */ + void __iomem *regs; + /** + * @ipcbuf : Pointer to the ipc buffer. + */ + void *ipcbuf; + /** + * @ipcbuf_size : Stores the ipc buffer size. + */ + size_t ipcbuf_size; + /** + * @boot_status - u32 variable to store aon's boot status. + */ + u32 boot_status; + /** + * @ivc_carveout_base_ss : Stores the shared semaphore index that holds + * the ipc carveout base address that AON uses to configure the AST. + */ + u32 ivc_carveout_base_ss; + /** + * @ivc_carveout_base_ss : Stores the shared semaphore index that holds + * the ipc carveout size that AON uses to configure the AST. + */ + u32 ivc_carveout_size_ss; + u32 ivc_tx_ss; + u32 ivc_rx_ss; + /** + * @ipcbuf_dma : DMA handle of the ipc buffer. + */ + dma_addr_t ipcbuf_dma; + /** + * @ast_config_complete - Boolean variable to store aon's ast + * configuration status. + */ + bool ast_config_complete; + /** + * @reset_complete - Boolean variable to store aon's reset status. + */ + bool reset_complete; + /** + * @load_complete - Boolean variable to store aon's fw load status. + */ + bool load_complete; + /** + * @log_level - Stores the log level for aon cpu prints. + */ + u32 log_level; +}; + +/** + * aon_set_ast_config_status - updates the current status of ast configuration. + * + * @aon : Pointer to tegra_aon struct. + * @val : true or false. + * + * Return : void + */ +static inline void aon_set_ast_config_status(struct tegra_aon *aon, bool val) +{ + aon->ast_config_complete = val; +} + +/** + * aon_set_aon_reset_status - updates the current status of aon reset. + * + * @aon : Pointer to tegra_aon struct. + * @val : true or false. + * + * Return : void + */ +static inline void aon_set_aon_reset_status(struct tegra_aon *aon, bool val) +{ + aon->reset_complete = val; +} + +/** + * aon_set_load_fw_status - updates the current status of fw loading. + * + * @aon : Pointer to tegra_aon struct. + * @val : true or false stating fw load is complete or incomplete reaonctiveely. + * + * Return : void + */ +static inline void aon_set_load_fw_status(struct tegra_aon *aon, bool val) +{ + aon->load_complete = val; +} + +static inline void __iomem *aon_reg(const struct tegra_aon *aon, u32 reg) +{ + if (unlikely(aon->regs == NULL)) { + dev_err(aon->dev, "AON register space not IOMapped"); + return NULL; + } + + return (aon->regs + reg); +} + +#if defined(CONFIG_DEBUG_FS) +int tegra_aon_debugfs_create(struct tegra_aon *aon); +void tegra_aon_debugfs_remove(struct tegra_aon *aon); +#else +static inline int tegra_aon_debugfs_create(struct tegra_aon *aon) { return 0; } +static inline void tegra_aon_debugfs_remove(struct tegra_aon *aon) { return; } +#endif + +int tegra_aon_reset(struct tegra_aon *aon); +int tegra_aon_mail_init(struct tegra_aon *aon); +int tegra_aon_ipc_init(struct tegra_aon *aon); +void tegra_aon_mail_deinit(struct tegra_aon *aon); +int tegra_aon_ast_config(struct tegra_aon *aon); + +u32 tegra_aon_hsp_ss_status(const struct tegra_aon *aon, u32 ss); +void tegra_aon_hsp_ss_set(const struct tegra_aon *aon, u32 ss, u32 bits); +void tegra_aon_hsp_ss_clr(const struct tegra_aon *aon, u32 ss, u32 bits); +void tegra_aon_hsp_sm_write(const struct tegra_aon *aon, u32 sm, u32 value); + +#endif diff --git a/drivers/platform/tegra/aon/include/hw/hw_aon.h b/drivers/platform/tegra/aon/include/hw/hw_aon.h new file mode 100644 index 00000000..2ef450b9 --- /dev/null +++ b/drivers/platform/tegra/aon/include/hw/hw_aon.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef HW_AON_H +#define HW_AON_H + +static inline u32 pm_r5_ctrl_r(void) +{ + return 0x1f0040U; +} +static inline u32 pm_r5_ctrl_fwloaddone_halted_f(void) +{ + return 0x0U; +} +static inline u32 pm_r5_ctrl_fwloaddone_done_f(void) +{ + return 0x2U; +} + +static inline u32 evp_reset_addr_r(void) +{ + return 0x20U; +} + +static inline u32 hsp_ss_base_r(void) +{ + return 0x1a0000U; +} + +static inline u32 hsp_sm_base_r(void) +{ + return 0x160000U; +} + +static inline u32 ast_ast0_base_r(void) +{ + return 0x40000U; +} + +static inline u32 ast_ast1_base_r(void) +{ + return 0x50000U; +} + +#endif diff --git a/drivers/platform/tegra/aon/tegra-aon-debug.c b/drivers/platform/tegra/aon/tegra-aon-debug.c new file mode 100644 index 00000000..7cfb0d9f --- /dev/null +++ b/drivers/platform/tegra/aon/tegra-aon-debug.c @@ -0,0 +1,888 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "aon-ivc-dbg-messages.h" + +#define AON_REQUEST_MASK 0xF +#define AON_REQUESTS_TOTAL (AON_REQUEST_TYPE_MAX + 1) + +#define TX_BLOCK_PERIOD 20 + +#define AON_ROOT 0 +#define AON_MODS 1 +#define AON_ADCC 2 + +#define IVC_DBG_CH_FRAME_SIZE 64 +#define MODS_DEFAULT_VAL 0xFFFF +#define MODS_DEFAULT_LOOPS 10 +#define MODS_DEFAULT_CHANS 0x1 +#define MODS_BASIC_TEST 0x0 +#define MODS_DMA_MEM2MEM 0x1 +#define MODS_DMA_IO2MEM 0x2 +#define MODS_ADCC_SINGLE 0x3 +#define MODS_ADCC_CONT 0x4 + +#define ADCC_MODE_SINGLE_SHOT 1 +#define ADCC_MODE_CONT 0 +#define ADCC_CLK_SRC_OSC 0 +#define ADCC_CLK_SRC_PLLP 1 + +#define AONFW_BOOT 1 + +struct tegra_aondbg { + struct device *dev; + struct tegra_aon *aon; + struct mbox_client cl; + struct mbox_chan *mbox; + struct dentry *aon_root; + bool supports_adcc; +}; + +static struct tegra_aondbg aondbg_dev; + +struct aon_dbgfs_node { + char *name; + u32 id; + u8 pdr_id; + mode_t mode; + struct completion *wait_on; + const struct file_operations *fops; + char data[IVC_DBG_CH_FRAME_SIZE]; +}; + +struct dbgfs_dir { + const char *name; + struct dentry *dir; + struct dbgfs_dir *parent; +}; + +static struct dbgfs_dir aon_dbgfs_dirs[] = { + {.name = "aon", .parent = NULL}, + {.name = "aon_mods", .parent = &aon_dbgfs_dirs[AON_ROOT]}, + {.name = "adcc", .parent = &aon_dbgfs_dirs[AON_MODS]} +}; + +static u32 mods_result = MODS_DEFAULT_VAL; +static u32 mods_dma_chans = MODS_DEFAULT_CHANS; +static u32 mods_adcc_chans = MODS_DEFAULT_CHANS; +static u32 mods_case = MODS_BASIC_TEST; +static u32 mods_loops = MODS_DEFAULT_LOOPS; +static u32 mods_adcc_smpl_dur = 16; +static u32 mods_adcc_avg_window = 1024; +static u32 mods_adcc_clk_src = ADCC_CLK_SRC_OSC; +static u64 mods_adcc_chans_data; +static u64 mods_adcc_dac_lb_data; + +static unsigned int completion_timeout = 50; + +static DEFINE_MUTEX(aon_mutex); +static DEFINE_SPINLOCK(mods); +static DEFINE_SPINLOCK(completion); +static DEFINE_SPINLOCK(loops); +static DEFINE_SPINLOCK(mods_dma); +static DEFINE_SPINLOCK(mods_adcc); + +static struct aon_dbgfs_node aon_nodes[]; + +static void set_mods_result(u32 result) +{ + spin_lock(&mods); + mods_result = result; + spin_unlock(&mods); +} + +static unsigned int get_mods_result(void) +{ + u32 val; + + spin_lock(&mods); + val = mods_result; + spin_unlock(&mods); + + return val; +} + +static void set_completion_timeout(unsigned int timeout) +{ + spin_lock(&completion); + completion_timeout = timeout; + spin_unlock(&completion); +} + +static unsigned int get_completion_timeout(void) +{ + unsigned int val; + + spin_lock(&completion); + val = completion_timeout; + spin_unlock(&completion); + + return val; +} + +static void set_mods_loops(u32 count) +{ + spin_lock(&loops); + mods_loops = count; + spin_unlock(&loops); +} + +static unsigned int get_mods_loops(void) +{ + unsigned int val; + + spin_lock(&loops); + val = mods_loops; + spin_unlock(&loops); + + return val; +} + +static void set_mods_dma_chans(u32 dma_chans) +{ + spin_lock(&mods_dma); + mods_dma_chans = dma_chans; + spin_unlock(&mods_dma); +} + +static unsigned int get_mods_dma_chans(void) +{ + unsigned int val; + + spin_lock(&mods_dma); + val = mods_dma_chans; + spin_unlock(&mods_dma); + + return val; +} + +static void set_mods_adcc_chans(u64 chans) +{ + spin_lock(&mods_adcc); + mods_adcc_chans = (u32) (chans & 0xFFFFFFFFUL); + spin_unlock(&mods_adcc); +} + +static unsigned int get_mods_adcc_chans(void) +{ + unsigned int val; + + spin_lock(&mods_adcc); + val = mods_adcc_chans; + spin_unlock(&mods_adcc); + + return val; +} + +static void set_mods_adcc_smpl_dur(u64 dur) +{ + spin_lock(&mods_adcc); + mods_adcc_smpl_dur = (u32)(dur & 0xFFFFFFFFUL); + spin_unlock(&mods_adcc); +} + +static unsigned int get_mods_adcc_smpl_dur(void) +{ + unsigned int val; + + spin_lock(&mods_adcc); + val = mods_adcc_smpl_dur; + spin_unlock(&mods_adcc); + + return val; +} + +static void set_mods_adcc_avg_window(u64 avg) +{ + spin_lock(&mods_adcc); + mods_adcc_avg_window = (u32) (avg & 0xFFFFFFFFUL); + spin_unlock(&mods_adcc); +} + +static unsigned int get_mods_adcc_avg_window(void) +{ + unsigned int val; + + spin_lock(&mods_adcc); + val = mods_adcc_avg_window; + spin_unlock(&mods_adcc); + + return val; +} + +static void set_mods_adcc_clk_src(u64 src) +{ + spin_lock(&mods_adcc); + mods_adcc_clk_src = (u32)(src & 0xFFFFFFFFUL); + spin_unlock(&mods_adcc); +} + +static u64 get_mods_adcc_chans_data(void) +{ + u32 val; + + spin_lock(&mods_adcc); + val = (u32)(mods_adcc_chans_data & 0xFFFFFFFFUL); + spin_unlock(&mods_adcc); + + return val; +} + +static void set_mods_adcc_chans_data(u64 adcc_data) +{ + spin_lock(&mods_adcc); + mods_adcc_chans_data = adcc_data; + spin_unlock(&mods_adcc); +} + +static u64 get_mods_adcc_dac_lb_data(void) +{ + u64 val; + + spin_lock(&mods_adcc); + val = mods_adcc_dac_lb_data; + spin_unlock(&mods_adcc); + + return val; +} + +static void set_mods_adcc_dac_lb_data(u64 lb_data) +{ + spin_lock(&mods_adcc); + mods_adcc_dac_lb_data = lb_data; + spin_unlock(&mods_adcc); +} + +static unsigned int get_mods_adcc_clk_src(void) +{ + unsigned int val; + + spin_lock(&mods_adcc); + val = mods_adcc_clk_src; + spin_unlock(&mods_adcc); + + return val; +} + +static void aon_create_mods_req(struct aon_dbg_request *req, u32 data) +{ + switch (data) { + case MODS_BASIC_TEST: + break; + case MODS_DMA_MEM2MEM: + case MODS_DMA_IO2MEM: + req->data.mods_req.dma_chans = get_mods_dma_chans(); + break; + case MODS_ADCC_SINGLE: + req->data.mods_req.adcc.chans = get_mods_adcc_chans(); + req->data.mods_req.adcc.mode = ADCC_MODE_SINGLE_SHOT; + req->data.mods_req.adcc.sampling_dur = get_mods_adcc_smpl_dur(); + req->data.mods_req.adcc.avg_window = get_mods_adcc_avg_window(); + req->data.mods_req.adcc.clk_src = get_mods_adcc_clk_src(); + req->data.mods_req.adcc.lb_data = get_mods_adcc_dac_lb_data(); + break; + case MODS_ADCC_CONT: + req->data.mods_req.adcc.chans = get_mods_adcc_chans(); + req->data.mods_req.adcc.mode = ADCC_MODE_CONT; + req->data.mods_req.adcc.sampling_dur = get_mods_adcc_smpl_dur(); + req->data.mods_req.adcc.avg_window = get_mods_adcc_avg_window(); + req->data.mods_req.adcc.clk_src = get_mods_adcc_clk_src(); + req->data.mods_req.adcc.lb_data = get_mods_adcc_dac_lb_data(); + break; + } +} + +static struct aon_dbg_response *aon_create_ivc_dbg_req(u32 request, + u32 flag, + u32 data) +{ + struct tegra_aondbg *aondbg = &aondbg_dev; + struct aon_dbg_request req; + struct aon_dbg_response *resp; + struct tegra_aon_mbox_msg msg; + int ret = 0; + unsigned int timeout = 0; + + if (aondbg->dev == NULL) + return (void *)-EPROBE_DEFER; + + req.req_type = request & AON_REQUEST_MASK; + switch (req.req_type) { + case AON_MODS_CASE: + req.data.mods_req.loops = get_mods_loops(); + req.data.mods_req.mods_case = data; + aon_create_mods_req(&req, data); + break; + case AON_MODS_CRC: + case AON_PING: + break; + case AON_QUERY_TAG: + break; + default: + dev_err(aondbg->dev, "Invalid aon dbg request\n"); + return ERR_PTR(-EINVAL); + } + + msg.length = sizeof(struct aon_dbg_request); + msg.data = (void *)&req; + ret = mbox_send_message(aondbg->mbox, (void *)&msg); + if (ret < 0) { + dev_err(aondbg->dev, "mbox_send_message failed\n"); + return ERR_PTR(ret); + } + timeout = get_completion_timeout(); + ret = wait_for_completion_timeout(aon_nodes[req.req_type].wait_on, + msecs_to_jiffies(timeout)); + if (!ret) { + dev_err(aondbg->dev, "No response\n"); + return ERR_PTR(-ETIMEDOUT); + } + resp = (void *)aon_nodes[req.req_type].data; + if (resp->resp_type > AON_REQUEST_TYPE_MAX) { + dev_err(aondbg->dev, "Invalid aon dbg response\n"); + return ERR_PTR(-EIO); + } + if (resp->status != AON_DBG_STATUS_OK) { + dev_err(aondbg->dev, "Request failed\n"); + return ERR_PTR(-EIO); + } + + return resp; +} + +static int aon_boot_show(void *data, u64 *val) +{ + struct tegra_aon *aon = aondbg_dev.aon; + + if (aon->ast_config_complete && + aon->reset_complete && aon->load_complete) + *val = 1; + + return 0; +} + +static int aon_boot_store(void *data, u64 val) +{ + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_boot_fops, aon_boot_show, + aon_boot_store, "%lld\n"); + +static ssize_t aon_get_fwtag(u32 context, char **data) +{ + struct aon_dbg_response *resp; + int ret = 0; + + *data = NULL; + resp = aon_create_ivc_dbg_req(context, READ, 0); + if (IS_ERR(resp)) + ret = PTR_ERR(resp); + else + *data = resp->data.tag_resp.tag; + + return ret; +} + +static int aon_tag_show(struct seq_file *file, void *param) +{ + char *data; + int ret; + + mutex_lock(&aon_mutex); + ret = aon_get_fwtag(*(u32 *)file->private, &data); + if (ret >= 0) + seq_printf(file, "%s\n", data); + mutex_unlock(&aon_mutex); + + return ret; +} + +static ssize_t aon_version_show(struct device *dev, char *buf, size_t size) +{ + char *data; + int ret = 0; + + mutex_lock(&aon_mutex); + ret = aon_get_fwtag(AON_QUERY_TAG, &data); + if (ret < 0) + ret = snprintf(buf, size, "error retrieving version: %d", ret); + else + ret = snprintf(buf, size, "%s", data ? data : "unavailable"); + mutex_unlock(&aon_mutex); + + return ret; +} + + +static int aon_tag_open(struct inode *inode, struct file *file) +{ + return single_open(file, aon_tag_show, inode->i_private); +} + +static const struct file_operations aon_tag_fops = { + .open = aon_tag_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +static int __aon_do_ping(void) +{ + struct aon_dbg_response *resp; + int challenge = 8; + int ret = 0; + + resp = aon_create_ivc_dbg_req(AON_PING, READ, challenge); + if (IS_ERR(resp)) { + ret = PTR_ERR(resp); + goto exit; + } + + ret = resp->data.ping_resp.reply; + if (ret != challenge * 2) + ret = -EINVAL; + +exit: + return ret; +} + +static int aon_ping_show(void *data, u64 *val) +{ + ktime_t tm; + int ret = 0; + + mutex_lock(&aon_mutex); + tm = ktime_get(); + ret = __aon_do_ping(); + tm = ktime_sub(ktime_get(), tm); + *val = (ret < 0) ? tm : ret; + mutex_unlock(&aon_mutex); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_ping_fops, aon_ping_show, + NULL, "%lld\n"); + +static int aon_mods_loops_show(void *data, u64 *val) +{ + *val = get_mods_loops(); + + return 0; +} + +static int aon_mods_loops_store(void *data, u64 val) +{ + set_mods_loops(val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_loops_fops, aon_mods_loops_show, + aon_mods_loops_store, "%lld\n"); + +static int aon_mods_case_show(void *data, u64 *val) +{ + *val = mods_case; + + return 0; +} + +static int aon_mods_case_store(void *data, u64 val) +{ + struct aon_dbg_response *resp; + int ret = 0; + int i; + u64 adcc_data = 0U; + u64 ch_data = 0U; + + if (val > MODS_ADCC_CONT) { + ret = -1; + dev_err(aondbg_dev.dev, "Invalid mods case\n"); + goto out; + } + + if (val > MODS_DMA_IO2MEM && val <= MODS_ADCC_CONT) { + if (!aondbg_dev.supports_adcc) { + ret = -1; + dev_err(aondbg_dev.dev, "no adcc on this platform\n"); + goto out; + } + } + + mutex_lock(&aon_mutex); + set_mods_result(MODS_DEFAULT_VAL); + resp = aon_create_ivc_dbg_req(*(u32 *)data, WRITE, val); + if (IS_ERR(resp)) { + ret = PTR_ERR(resp); + } else { + set_mods_result(resp->status); + if (val == MODS_ADCC_SINGLE || val == MODS_ADCC_CONT) { + adcc_data = 0U; + for (i = 0; i < ADCC_NCHANS; i++) { + ch_data = resp->data.adcc_resp.ch_data[i]; + adcc_data |= (ch_data & 0x3FFU) << (i * 10); + } + set_mods_adcc_chans_data(adcc_data); + } + } + mutex_unlock(&aon_mutex); + +out: + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_case_fops, aon_mods_case_show, + aon_mods_case_store, "%lld\n"); + +static int aon_mods_result_show(void *data, u64 *val) +{ + *val = get_mods_result(); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_result_fops, aon_mods_result_show, + NULL, "%lld\n"); + +static int aon_mods_crc_show(void *data, u64 *val) +{ + struct aon_dbg_response *resp; + int ret = 0; + + mutex_lock(&aon_mutex); + resp = aon_create_ivc_dbg_req(*(u32 *)data, READ, 0); + if (IS_ERR(resp)) + ret = PTR_ERR(resp); + else + *val = resp->data.crc_resp.crc; + mutex_unlock(&aon_mutex); + + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_crc_fops, aon_mods_crc_show, + NULL, "%llx\n"); + +static int aon_mods_dma_show(void *data, u64 *val) +{ + *val = get_mods_dma_chans(); + + return 0; +} + +static int aon_mods_dma_store(void *data, u64 val) +{ + set_mods_dma_chans(val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_dma_fops, aon_mods_dma_show, + aon_mods_dma_store, "%lld\n"); + +static int aon_mods_adcc_chans_show(void *data, u64 *val) +{ + *val = get_mods_adcc_chans(); + + return 0; +} + +static int aon_mods_adcc_chans_store(void *data, u64 val) +{ + set_mods_adcc_chans(val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_adcc_chans_fops, aon_mods_adcc_chans_show, + aon_mods_adcc_chans_store, "%lld\n"); + +static int aon_mods_adcc_smpl_show(void *data, u64 *val) +{ + *val = get_mods_adcc_smpl_dur(); + + return 0; +} + +static int aon_mods_adcc_smpl_store(void *data, u64 val) +{ + set_mods_adcc_smpl_dur(val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_adcc_smpl_fops, aon_mods_adcc_smpl_show, + aon_mods_adcc_smpl_store, "%lld\n"); + +static int aon_mods_adcc_avg_show(void *data, u64 *val) +{ + *val = get_mods_adcc_avg_window(); + + return 0; +} + +static int aon_mods_adcc_avg_store(void *data, u64 val) +{ + set_mods_adcc_avg_window(val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_adcc_avg_fops, aon_mods_adcc_avg_show, + aon_mods_adcc_avg_store, "%lld\n"); + +static int aon_mods_adcc_clk_show(void *data, u64 *val) +{ + *val = get_mods_adcc_clk_src(); + + return 0; +} + +static int aon_mods_adcc_clk_store(void *data, u64 val) +{ + set_mods_adcc_clk_src(val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_adcc_clk_fops, aon_mods_adcc_clk_show, + aon_mods_adcc_clk_store, "%lld\n"); + +static int aon_mods_adcc_data_show(void *data, u64 *val) +{ + *val = get_mods_adcc_chans_data(); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_adcc_data_fops, aon_mods_adcc_data_show, + NULL, "%lld\n"); + +static int aon_mods_adcc_dac_show(void *data, u64 *val) +{ + *val = get_mods_adcc_dac_lb_data(); + + return 0; +} + +static int aon_mods_adcc_dac_store(void *data, u64 val) +{ + set_mods_adcc_dac_lb_data(val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_mods_adcc_dac_fops, aon_mods_adcc_dac_show, + aon_mods_adcc_dac_store, "%lld\n"); + +static int aon_timeout_show(void *data, u64 *val) +{ + *val = get_completion_timeout(); + + return 0; +} + +static int aon_timeout_store(void *data, u64 val) +{ + set_completion_timeout(val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(aon_timeout_fops, aon_timeout_show, + aon_timeout_store, "%lld\n"); + +static struct aon_dbgfs_node aon_nodes[] = { + {.name = "boot", .id = AON_BOOT, .pdr_id = AON_ROOT, + .mode = 0644, .fops = &aon_boot_fops, }, + {.name = "loops", .pdr_id = AON_MODS, + .mode = 0644, .fops = &aon_mods_loops_fops, }, + {.name = "result", .id = AON_MODS_RESULT, .pdr_id = AON_MODS, + .mode = 0444, .fops = &aon_mods_result_fops,}, + {.name = "crc", .id = AON_MODS_CRC, .pdr_id = AON_MODS, + .mode = 0444, .fops = &aon_mods_crc_fops,}, + {.name = "case", .id = AON_MODS_CASE, .pdr_id = AON_MODS, + .mode = 0644, .fops = &aon_mods_case_fops,}, + {.name = "dma_channels", .pdr_id = AON_MODS, + .mode = 0644, .fops = &aon_mods_dma_fops,}, + {.name = "adcc_chans", .pdr_id = AON_ADCC, + .mode = 0644, .fops = &aon_mods_adcc_chans_fops,}, + {.name = "sampling_dur", .pdr_id = AON_ADCC, + .mode = 0644, .fops = &aon_mods_adcc_smpl_fops,}, + {.name = "avg_window", .pdr_id = AON_ADCC, + .mode = 0644, .fops = &aon_mods_adcc_avg_fops,}, + {.name = "clk_src", .pdr_id = AON_ADCC, + .mode = 0644, .fops = &aon_mods_adcc_clk_fops,}, + {.name = "adcc_data", .pdr_id = AON_ADCC, + .mode = 0644, .fops = &aon_mods_adcc_data_fops,}, + {.name = "dac", .pdr_id = AON_ADCC, + .mode = 0644, .fops = &aon_mods_adcc_dac_fops,}, + {.name = "ping", .id = AON_PING, .pdr_id = AON_ROOT, + .mode = 0644, .fops = &aon_ping_fops,}, + {.name = "tag", .id = AON_QUERY_TAG, .pdr_id = AON_ROOT, + .mode = 0644, .fops = &aon_tag_fops,}, + {.name = "completion_timeout", .pdr_id = AON_ROOT, + .mode = 0644, .fops = &aon_timeout_fops,}, +}; + +static void tegra_aondbg_recv_msg(struct mbox_client *cl, void *rx_msg) +{ + struct tegra_aon_mbox_msg *msg; + struct aon_dbg_response *resp; + + msg = (struct tegra_aon_mbox_msg *)rx_msg; + resp = (void *)msg->data; + if (resp->resp_type > AON_REQUEST_TYPE_MAX) { + dev_err(aondbg_dev.dev, + "Multiple request types in 1 response\n"); + return; + } + memcpy(aon_nodes[resp->resp_type].data, msg->data, + IVC_DBG_CH_FRAME_SIZE); + complete(aon_nodes[resp->resp_type].wait_on); +} + +static int aon_dbg_init(struct tegra_aondbg *aon) +{ + struct dentry *d; + struct dentry *parent_dir; + struct dbgfs_dir *dbgdir; + int i; + + d = debugfs_create_dir(aon_dbgfs_dirs[AON_ROOT].name, NULL); + if (IS_ERR_OR_NULL(d)) + goto clean; + + aon_dbgfs_dirs[AON_ROOT].dir = d; + aon->aon_root = d; + + for (i = 1; i < ARRAY_SIZE(aon_dbgfs_dirs); i++) { + dbgdir = &aon_dbgfs_dirs[i]; + d = debugfs_create_dir(dbgdir->name, dbgdir->parent->dir); + if (IS_ERR_OR_NULL(d)) + goto clean; + + dbgdir->dir = d; + } + + for (i = 0; i < ARRAY_SIZE(aon_nodes); i++) { + parent_dir = aon_dbgfs_dirs[aon_nodes[i].pdr_id].dir; + d = debugfs_create_file(aon_nodes[i].name, + aon_nodes[i].mode, + parent_dir, + &aon_nodes[i].id, + aon_nodes[i].fops); + if (IS_ERR_OR_NULL(d)) + goto clean; + } + + return 0; + +clean: + debugfs_remove_recursive(aon->aon_root); + return PTR_ERR(d); +} + +int tegra_aon_debugfs_create(struct tegra_aon *aon) +{ + struct tegra_aondbg *aondbg = &aondbg_dev; + struct device *dev = aon->dev; + struct device_node *np = dev->of_node; + int count; + int ret = 0; + int i; + + if (!debugfs_initialized()) { + ret = -ENODEV; + goto exit; + } + + if (np == NULL) { + dev_err(dev, "tegra-aondbg: DT data required.\n"); + ret = -EINVAL; + goto exit; + } + + count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); + if (count != 1) { + dev_err(dev, "incorrect mboxes property in '%pOF'\n", np); + ret = -EINVAL; + goto exit; + } + + if (of_property_read_bool(np, NV("adcc"))) + aondbg->supports_adcc = true; + else + aondbg->supports_adcc = false; + + aondbg->dev = aon->dev; + aondbg->aon = aon; + aondbg->cl.dev = aon->dev; + aondbg->cl.tx_block = true; + aondbg->cl.tx_tout = TX_BLOCK_PERIOD; + aondbg->cl.knows_txdone = false; + aondbg->cl.rx_callback = tegra_aondbg_recv_msg; + aondbg->mbox = mbox_request_channel(&aondbg->cl, 0); + if (IS_ERR(aondbg->mbox)) { + ret = PTR_ERR(aondbg->mbox); + if (ret != -EPROBE_DEFER) { + dev_warn(dev, + "can't get mailbox channel (%d)\n", ret); + } + goto exit; + } + dev_dbg(dev, "aondbg->mbox = %p\n", aondbg->mbox); + + for (i = 0; i < ARRAY_SIZE(aon_nodes); i++) { + aon_nodes[i].wait_on = devm_kzalloc(aon->dev, + sizeof(struct completion), + GFP_KERNEL); + if (!aon_nodes[i].wait_on) { + dev_err(dev, "out of memory.\n"); + ret = -ENOMEM; + goto exit; + } + init_completion(aon_nodes[i].wait_on); + } + + ret = aon_dbg_init(aondbg); + if (ret) { + dev_err(dev, "failed to create debugfs nodes.\n"); + goto exit; + } + + devm_tegrafw_register(dev, "aon", TFW_NORMAL, aon_version_show, NULL); + +exit: + return ret; +} + +void tegra_aon_debugfs_remove(struct tegra_aon *aon) +{ + struct tegra_aondbg *aondbg = &aondbg_dev; + + mbox_free_channel(aondbg->mbox); + debugfs_remove_recursive(aondbg->aon_root); +} diff --git a/drivers/platform/tegra/aon/tegra-aon-hsp.c b/drivers/platform/tegra/aon/tegra-aon-hsp.c new file mode 100644 index 00000000..6d04526c --- /dev/null +++ b/drivers/platform/tegra/aon/tegra-aon-hsp.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include + +#include + +#define SHRD_MBOX_OFFSET 0x8000 +#define SHRD_SEM_OFFSET 0x10000 +#define SHRD_SEM_SET 0x4u +#define SHRD_SEM_CLR 0x8u +#define AON_SS_MAX 4 +#define AON_SM_MAX 8 +#define MBOX_TAG BIT(32) + +static void __iomem *tegra_aon_hsp_sm_reg(const struct tegra_aon *aon, u32 sm) +{ + return aon_reg(aon, hsp_sm_base_r()) + (SHRD_MBOX_OFFSET * sm); +} + +void tegra_aon_hsp_sm_write(const struct tegra_aon *aon, u32 sm, u32 value) +{ + void __iomem *reg; + + WARN_ON(sm >= AON_SM_MAX); + reg = tegra_aon_hsp_sm_reg(aon, sm); + + writel(MBOX_TAG | value, reg); +} + +static void __iomem *tegra_aon_hsp_ss_reg(const struct tegra_aon *aon, u32 ss) +{ + return aon_reg(aon, hsp_ss_base_r()) + (SHRD_SEM_OFFSET * ss); +} + +u32 tegra_aon_hsp_ss_status(const struct tegra_aon *aon, u32 ss) +{ + void __iomem *reg; + + WARN_ON(ss >= AON_SS_MAX); + reg = tegra_aon_hsp_ss_reg(aon, ss); + + return readl(reg); +} + +void tegra_aon_hsp_ss_set(const struct tegra_aon *aon, u32 ss, u32 bits) +{ + void __iomem *reg; + + WARN_ON(ss >= AON_SS_MAX); + reg = tegra_aon_hsp_ss_reg(aon, ss); + + writel(bits, reg + SHRD_SEM_SET); +} + +void tegra_aon_hsp_ss_clr(const struct tegra_aon *aon, u32 ss, u32 bits) +{ + void __iomem *reg; + + WARN_ON(ss >= AON_SS_MAX); + reg = tegra_aon_hsp_ss_reg(aon, ss); + + writel(bits, reg + SHRD_SEM_CLR); +} diff --git a/drivers/platform/tegra/aon/tegra-aon-mail.c b/drivers/platform/tegra/aon/tegra-aon-mail.c new file mode 100644 index 00000000..405a2d70 --- /dev/null +++ b/drivers/platform/tegra/aon/tegra-aon-mail.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define IVC_INIT_TIMEOUT_US (200000) + +struct tegra_aon_ivc { + struct mbox_controller mbox; +}; + +struct tegra_aon_ivc_chan { + struct ivc ivc; + char *name; + int chan_id; + struct tegra_aon *aon; + bool last_tx_done; +}; + +static struct tegra_aon_ivc aon_ivc; + +/* This has to be a multiple of the cache line size */ +static inline int ivc_min_frame_size(void) +{ + return cache_line_size(); +} + +int tegra_aon_ipc_init(struct tegra_aon *aon) +{ + ktime_t tstart; + int ret = 0; + + tegra_aon_hsp_ss_set(aon, aon->ivc_carveout_base_ss, + (u32)aon->ipcbuf_dma); + tegra_aon_hsp_ss_set(aon, aon->ivc_carveout_size_ss, + (u32)aon->ipcbuf_size); + ret = tegra_aon_hsp_sm_tx_write(aon, SMBOX_IVC_READY_MSG); + if (ret) { + dev_err(aon->dev, "aon hsp sm tx write failed: %d\n", ret); + return ret; + } + + tstart = ktime_get(); + while (!tegra_aon_hsp_sm_tx_is_empty(aon)) { + if (ktime_us_delta(ktime_get(), tstart) > IVC_INIT_TIMEOUT_US) { + tegra_aon_hsp_sm_pair_free(aon); + ret = -ETIMEDOUT; + break; + } + } + + return ret; +} + +static int tegra_aon_mbox_send_data(struct mbox_chan *mbox_chan, void *data) +{ + struct tegra_aon_ivc_chan *ivc_chan; + struct tegra_aon_mbox_msg *msg; + int bytes; + int ret; + + msg = (struct tegra_aon_mbox_msg *)data; + ivc_chan = (struct tegra_aon_ivc_chan *)mbox_chan->con_priv; + bytes = tegra_ivc_write(&ivc_chan->ivc, msg->data, msg->length); + ret = (bytes != msg->length) ? -EBUSY : 0; + if (bytes < 0) { + pr_err("%s mbox send failed with error %d\n", __func__, bytes); + ret = bytes; + } + ivc_chan->last_tx_done = (ret == 0); + + return ret; +} + +static int tegra_aon_mbox_startup(struct mbox_chan *mbox_chan) +{ + return 0; +} + +static void tegra_aon_mbox_shutdown(struct mbox_chan *mbox_chan) +{ + struct tegra_aon_ivc_chan *ivc_chan; + + ivc_chan = (struct tegra_aon_ivc_chan *)mbox_chan->con_priv; + ivc_chan->chan_id = -1; +} + +static bool tegra_aon_mbox_last_tx_done(struct mbox_chan *mbox_chan) +{ + struct tegra_aon_ivc_chan *ivc_chan; + + ivc_chan = (struct tegra_aon_ivc_chan *)mbox_chan->con_priv; + + return ivc_chan->last_tx_done; +} + +static struct mbox_chan_ops tegra_aon_mbox_chan_ops = { + .send_data = tegra_aon_mbox_send_data, + .startup = tegra_aon_mbox_startup, + .shutdown = tegra_aon_mbox_shutdown, + .last_tx_done = tegra_aon_mbox_last_tx_done, +}; + +static void tegra_aon_notify_remote(struct ivc *ivc) +{ + struct tegra_aon_ivc_chan *ivc_chan; + + ivc_chan = container_of(ivc, struct tegra_aon_ivc_chan, ivc); + tegra_aon_hsp_ss_set(ivc_chan->aon, ivc_chan->aon->ivc_tx_ss, + BIT(ivc_chan->chan_id)); + tegra_aon_hsp_sm_tx_write(ivc_chan->aon, SMBOX_IVC_NOTIFY); +} + +static void tegra_aon_rx_handler(u32 ivc_chans) +{ + struct mbox_chan *mbox_chan; + struct ivc *ivc; + struct tegra_aon_ivc_chan *ivc_chan; + struct tegra_aon_mbox_msg msg; + int i; + + ivc_chans &= BIT(aon_ivc.mbox.num_chans) - 1; + while (ivc_chans) { + i = __builtin_ctz(ivc_chans); + ivc_chans &= ~BIT(i); + mbox_chan = &aon_ivc.mbox.chans[i]; + ivc_chan = (struct tegra_aon_ivc_chan *)mbox_chan->con_priv; + /* check if mailbox client exists */ + if (ivc_chan->chan_id == -1) + continue; + ivc = &ivc_chan->ivc; + while (tegra_ivc_can_read(ivc)) { + msg.data = tegra_ivc_read_get_next_frame(ivc); + msg.length = ivc->frame_size; + mbox_chan_received_data(mbox_chan, &msg); + tegra_ivc_read_advance(ivc); + } + } +} + +static void tegra_aon_hsp_sm_full_notify(void *data, u32 value) +{ + struct tegra_aon *aon = data; + u32 ss_val; + + if (value != SMBOX_IVC_NOTIFY) { + dev_err(aon->dev, "Invalid IVC notification\n"); + return; + } + + ss_val = tegra_aon_hsp_ss_status(aon, aon->ivc_rx_ss); + tegra_aon_hsp_ss_clr(aon, aon->ivc_rx_ss, ss_val); + tegra_aon_rx_handler(ss_val); +} + +static int tegra_aon_parse_channel(struct tegra_aon *aon, + struct mbox_chan *mbox_chan, + struct device_node *ch_node, + int chan_id) +{ + struct device *dev; + struct tegra_aon_ivc_chan *ivc_chan; + struct { + u32 rx, tx; + } start, end; + u32 nframes, frame_size; + int ret = 0; + + /* Sanity check */ + if (!mbox_chan || !ch_node || !aon) + return -EINVAL; + + dev = aon->dev; + + ret = of_property_read_u32_array(ch_node, "reg", &start.rx, 2); + if (ret) { + dev_err(dev, "missing <%s> property\n", "reg"); + return ret; + } + ret = of_property_read_u32(ch_node, NV("frame-count"), &nframes); + if (ret) { + dev_err(dev, "missing <%s> property\n", NV("frame-count")); + return ret; + } + ret = of_property_read_u32(ch_node, NV("frame-size"), &frame_size); + if (ret) { + dev_err(dev, "missing <%s> property\n", NV("frame-size")); + return ret; + } + + if (!nframes) { + dev_err(dev, "Invalid property\n"); + return -EINVAL; + } + + if (frame_size < ivc_min_frame_size()) { + dev_err(dev, "Invalid property\n"); + return -EINVAL; + } + + end.rx = start.rx + tegra_ivc_total_queue_size(nframes * frame_size); + end.tx = start.tx + tegra_ivc_total_queue_size(nframes * frame_size); + + if (end.rx > aon->ipcbuf_size) { + dev_err(dev, "%s buffer exceeds ivc size\n", "rx"); + return -EINVAL; + } + if (end.tx > aon->ipcbuf_size) { + dev_err(dev, "%s buffer exceeds ivc size\n", "tx"); + return -EINVAL; + } + + if (start.tx < start.rx ? end.tx > start.rx : end.rx > start.tx) { + dev_err(dev, "rx and tx buffers overlap on channel %s\n", + ch_node->name); + return -EINVAL; + } + + ivc_chan = devm_kzalloc(dev, sizeof(*ivc_chan), GFP_KERNEL); + if (!ivc_chan) + return -ENOMEM; + + ivc_chan->name = devm_kstrdup(dev, ch_node->name, GFP_KERNEL); + if (!ivc_chan->name) + return -ENOMEM; + + ivc_chan->chan_id = chan_id; + + /* Allocate the IVC links */ + ret = tegra_ivc_init(&ivc_chan->ivc, + (unsigned long)aon->ipcbuf + start.rx, + (unsigned long)aon->ipcbuf + start.tx, + nframes, frame_size, NULL, + tegra_aon_notify_remote); + if (ret) { + dev_err(dev, "failed to instantiate IVC.\n"); + return ret; + } + + ivc_chan->aon = aon; + mbox_chan->con_priv = ivc_chan; + + dev_dbg(dev, "%s: RX: 0x%x-0x%x TX: 0x%x-0x%x\n", + ivc_chan->name, start.rx, end.rx, start.tx, end.tx); + + return ret; +} + +static int tegra_aon_check_channels_overlap(struct device *dev, + struct tegra_aon_ivc_chan *ch0, + struct tegra_aon_ivc_chan *ch1) +{ + unsigned int s0, s1; + unsigned long tx0, rx0, tx1, rx1; + + if (ch0 == NULL || ch1 == NULL) + return -EINVAL; + + tx0 = (unsigned long)ch0->ivc.tx_channel; + rx0 = (unsigned long)ch0->ivc.rx_channel; + s0 = ch0->ivc.nframes * ch0->ivc.frame_size; + s0 = tegra_ivc_total_queue_size(s0); + + tx1 = (unsigned long)ch1->ivc.tx_channel; + rx1 = (unsigned long)ch1->ivc.rx_channel; + s1 = ch1->ivc.nframes * ch1->ivc.frame_size; + s1 = tegra_ivc_total_queue_size(s1); + + if ((tx0 < tx1 ? tx0 + s0 > tx1 : tx1 + s1 > tx0) || + (rx0 < tx1 ? rx0 + s0 > tx1 : tx1 + s1 > rx0) || + (rx0 < rx1 ? rx0 + s0 > rx1 : rx1 + s1 > rx0) || + (tx0 < rx1 ? tx0 + s0 > rx1 : rx1 + s1 > tx0)) { + dev_err(dev, "ivc buffers overlap on channels %s and %s\n", + ch0->name, ch1->name); + return -EINVAL; + } + + return 0; +} + +static int tegra_aon_validate_channels(struct device *dev) +{ + struct tegra_aon *aon; + struct tegra_aon_ivc_chan *i_chan, *j_chan; + int i, j; + int ret; + + aon = dev_get_drvdata(dev); + + for (i = 0; i < aon_ivc.mbox.num_chans; i++) { + i_chan = aon_ivc.mbox.chans[i].con_priv; + + for (j = i + 1; j < aon_ivc.mbox.num_chans; j++) { + j_chan = aon_ivc.mbox.chans[j].con_priv; + + ret = tegra_aon_check_channels_overlap(aon->dev, + i_chan, j_chan); + if (ret) + return ret; + } + } + + return 0; +} + +static int tegra_aon_parse_channels(struct tegra_aon *aon) +{ + struct tegra_aon_ivc *aonivc = &aon_ivc; + struct device *dev = aon->dev; + struct device_node *reg_node, *ch_node; + int ret, i; + + i = 0; + + for_each_child_of_node(dev->of_node, reg_node) { + if (strcmp(reg_node->name, "ivc-channels")) + continue; + + for_each_child_of_node(reg_node, ch_node) { + ret = tegra_aon_parse_channel(aon, + &aonivc->mbox.chans[i], + ch_node, i); + i++; + if (ret) { + dev_err(dev, "failed to parse a channel\n"); + return ret; + } + } + break; + } + + return tegra_aon_validate_channels(dev); +} + +static int tegra_aon_count_ivc_channels(struct device_node *dev_node) +{ + int num = 0; + struct device_node *child_node; + + for_each_child_of_node(dev_node, child_node) { + if (strcmp(child_node->name, "ivc-channels")) + continue; + num = of_get_child_count(child_node); + break; + } + + return num; +} + +int tegra_aon_mail_init(struct tegra_aon *aon) +{ + struct tegra_aon_ivc *aonivc = &aon_ivc; + struct device *dev = aon->dev; + struct device_node *dn = dev->of_node; + int num_chans; + int ret; + + num_chans = tegra_aon_count_ivc_channels(dn); + if (num_chans <= 0) { + dev_err(dev, "no ivc channels\n"); + ret = -EINVAL; + goto exit; + } + + aonivc->mbox.dev = aon->dev; + aonivc->mbox.chans = devm_kzalloc(aon->dev, + num_chans * sizeof(*aonivc->mbox.chans), + GFP_KERNEL); + if (!aonivc->mbox.chans) { + ret = -ENOMEM; + goto exit; + } + + aonivc->mbox.num_chans = num_chans; + aonivc->mbox.ops = &tegra_aon_mbox_chan_ops; + aonivc->mbox.txdone_poll = true; + aonivc->mbox.txpoll_period = 1; + + /* Parse out all channels from DT */ + ret = tegra_aon_parse_channels(aon); + if (ret) { + dev_err(dev, "ivc-channels set up failed: %d\n", ret); + goto exit; + } + + /* Fetch the shared mailbox pair associated with IVC tx and rx */ + ret = tegra_aon_hsp_sm_pair_request(aon, tegra_aon_hsp_sm_full_notify, + aon); + if (ret) { + dev_err(dev, "aon hsp sm pair request failed: %d\n", ret); + goto exit; + } + + ret = mbox_controller_register(&aonivc->mbox); + if (ret) { + dev_err(dev, "failed to register mailbox: %d\n", ret); + tegra_aon_hsp_sm_pair_free(aon); + goto exit; + } + +exit: + return ret; +} + +void tegra_aon_mail_deinit(struct tegra_aon *aon) +{ + mbox_controller_unregister(&aon_ivc.mbox); + tegra_aon_hsp_sm_pair_free(aon); +} diff --git a/drivers/platform/tegra/aon/tegra-aon-module.c b/drivers/platform/tegra/aon/tegra-aon-module.c new file mode 100644 index 00000000..e44d16d1 --- /dev/null +++ b/drivers/platform/tegra/aon/tegra-aon-module.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define IPCBUF_SIZE 2097152 + +static int tegra_aon_init_dev_data(struct platform_device *pdev) +{ + struct tegra_aon *aon; + struct device *dev = &pdev->dev; + struct device_node *dn = dev->of_node; + int ret = 0; + + aon = devm_kzalloc(dev, sizeof(*aon), GFP_KERNEL); + if (!aon) { + ret = -ENOMEM; + goto exit; + } + platform_set_drvdata(pdev, aon); + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + dev_err(&pdev->dev, "setting DMA MASK failed!\n"); + } + aon->dev = dev; + + aon->regs = of_iomap(dn, 0); + if (!aon->regs) { + dev_err(&pdev->dev, "Cannot map AON register space\n"); + ret = -ENOMEM; + goto exit; + } + +exit: + return ret; +} + +static int tegra_aon_setup_ipc_carveout(struct tegra_aon *aon) +{ + struct device_node *dn = aon->dev->of_node; + int ret = 0; + + aon->ipcbuf = dmam_alloc_coherent(aon->dev, + IPCBUF_SIZE, + &aon->ipcbuf_dma, + GFP_KERNEL | __GFP_ZERO); + if (!aon->ipcbuf) { + dev_err(aon->dev, "failed to allocate IPC memory\n"); + ret = -ENOMEM; + goto exit; + + } + aon->ipcbuf_size = IPCBUF_SIZE; + + ret = of_property_read_u32(dn, NV("ivc-rx-ss"), &aon->ivc_rx_ss); + if (ret) { + dev_err(aon->dev, "missing <%s> property\n", NV("ivc-rx-ss")); + goto exit; + } + + ret = of_property_read_u32(dn, NV("ivc-tx-ss"), &aon->ivc_tx_ss); + if (ret) { + dev_err(aon->dev, "missing <%s> property\n", NV("ivc-tx-ss")); + goto exit; + } + + ret = of_property_read_u32(dn, NV("ivc-carveout-base-ss"), + &aon->ivc_carveout_base_ss); + if (ret) { + dev_err(aon->dev, "missing <%s> property\n", + NV("ivc-carveout-base-ss")); + goto exit; + } + + ret = of_property_read_u32(dn, NV("ivc-carveout-size-ss"), + &aon->ivc_carveout_size_ss); + if (ret) { + dev_err(aon->dev, "missing <%s> property\n", + NV("ivc-carveout-size-ss")); + goto exit; + } + +exit: + return ret; +} + +static int tegra_aon_probe(struct platform_device *pdev) +{ + struct tegra_aon *aon = NULL; + struct device *dev = &pdev->dev; + int ret = 0; + + ret = tegra_aon_init_dev_data(pdev); + if (ret) { + dev_err(dev, "failed to init device data err = %d\n", ret); + goto exit; + } + + aon = platform_get_drvdata(pdev); + ret = tegra_aon_setup_ipc_carveout(aon); + if (ret) { + dev_err(dev, "failed to setup ipc carveout err = %d\n", ret); + goto exit; + } + + ret = tegra_aon_mail_init(aon); + if (ret) { + dev_err(dev, "failed to init mail err = %d\n", ret); + goto exit; + } + + ret = tegra_aon_debugfs_create(aon); + if (ret) { + dev_err(dev, "failed to create debugfs err = %d\n", ret); + goto exit; + } + + ret = tegra_aon_ipc_init(aon); + if (ret) { + dev_err(dev, "failed to init ipc err = %d\n", ret); + goto exit; + } + + dev_info(aon->dev, "init done\n"); + +exit: + return ret; +} + +static int tegra_aon_remove(struct platform_device *pdev) +{ + struct tegra_aon *aon = platform_get_drvdata(pdev); + + if (!IS_ERR_OR_NULL(aon)) { + tegra_aon_debugfs_remove(aon); + tegra_aon_mail_deinit(aon); + } + + return 0; +} + +static const struct of_device_id tegra_aon_of_match[] = { + { + .compatible = NV("tegra234-aon"), + }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_aon_of_match); + +static struct platform_driver tegra234_aon_driver = { + .driver = { + .name = "tegra234-aon", + .of_match_table = tegra_aon_of_match, + }, + .probe = tegra_aon_probe, + .remove = tegra_aon_remove, +}; +module_platform_driver(tegra234_aon_driver); + +MODULE_DESCRIPTION("Tegra SPE driver"); +MODULE_AUTHOR("akhumbum@nvidia.com"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/tegra/tegra-ivc.c b/drivers/platform/tegra/tegra-ivc.c new file mode 100644 index 00000000..e7e89408 --- /dev/null +++ b/drivers/platform/tegra/tegra-ivc.c @@ -0,0 +1,1009 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Inter-VM Communication + * + * Copyright (c) 2014-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SMP + +static inline void ivc_rmb(void) +{ + smp_rmb(); +} + +static inline void ivc_wmb(void) +{ + smp_wmb(); +} + +static inline void ivc_mb(void) +{ + smp_mb(); +} + +#else + +static inline void ivc_rmb(void) +{ + rmb(); +} + +static inline void ivc_wmb(void) +{ + wmb(); +} + +static inline void ivc_mb(void) +{ + mb(); +} + +#endif + +/* + * IVC channel reset protocol. + * + * Each end uses its tx_channel.state to indicate its synchronization state. + */ +enum ivc_state { + /* + * This value is zero for backwards compatibility with services that + * assume channels to be initially zeroed. Such channels are in an + * initially valid state, but cannot be asynchronously reset, and must + * maintain a valid state at all times. + * + * The transmitting end can enter the established state from the sync or + * ack state when it observes the receiving endpoint in the ack or + * established state, indicating that has cleared the counters in our + * rx_channel. + */ + ivc_state_established = 0, + + /* + * If an endpoint is observed in the sync state, the remote endpoint is + * allowed to clear the counters it owns asynchronously with respect to + * the current endpoint. Therefore, the current endpoint is no longer + * allowed to communicate. + */ + ivc_state_sync, + + /* + * When the transmitting end observes the receiving end in the sync + * state, it can clear the w_count and r_count and transition to the ack + * state. If the remote endpoint observes us in the ack state, it can + * return to the established state once it has cleared its counters. + */ + ivc_state_ack +}; + +/* + * This structure is divided into two-cache aligned parts, the first is only + * written through the tx_channel pointer, while the second is only written + * through the rx_channel pointer. This delineates ownership of the cache lines, + * which is critical to performance and necessary in non-cache coherent + * implementations. + */ +struct ivc_channel_header { + union { + struct { + /* fields owned by the transmitting end */ + uint32_t w_count; + uint32_t state; + }; + uint8_t w_align[IVC_ALIGN]; + }; + union { + /* fields owned by the receiving end */ + uint32_t r_count; + uint8_t r_align[IVC_ALIGN]; + }; +}; + +static inline bool is_u32_subtraction_safe(uint32_t a, uint32_t b) +{ + return (a >= b); +} + +static inline uint32_t safe_subtract_u32(uint32_t a, uint32_t b) +{ + if (!is_u32_subtraction_safe(a, b)) + BUG(); + else + return (a - b); +} + +static inline bool is_u64_addition_safe(uint64_t a, uint64_t b) +{ + return (ULLONG_MAX - a >= b); +} + +static inline uint64_t safe_add_u64(uint64_t a, uint64_t b) +{ + if (!is_u64_addition_safe(a, b)) + BUG(); + else + return (a + b); +} + +static inline bool is_u32_addition_safe(uint32_t a, uint32_t b) +{ + return (UINT_MAX - a >= b); +} + +static inline uint32_t safe_add_u32(uint32_t a, uint32_t b) +{ + if (!is_u32_addition_safe(a, b)) + BUG(); + else + return (a + b); +} + +static inline bool is_u32_u32__u32_multiplication_safe(uint32_t a, uint32_t b) +{ + if (a == 0U || b == 0U) + return true; + return ((UINT_MAX / b) >= a); +} + +static inline bool is_u32_u32__u64_multiplication_safe(uint32_t a, uint32_t b) +{ + if (a == 0U || b == 0U) + return true; + return ((ULLONG_MAX / b) >= (a * 1ULL)); +} + +static inline uint64_t safe_mult_u32_u32__u64(uint32_t a, uint32_t b) +{ + if (!is_u32_u32__u64_multiplication_safe(a, b)) + BUG(); + else + return ((uint64_t)a * (uint64_t)b); +} + +static inline void ivc_invalidate_counter(struct ivc *ivc, + dma_addr_t handle) +{ + if (!ivc->peer_device) + return; + dma_sync_single_for_cpu(ivc->peer_device, handle, IVC_ALIGN, + DMA_FROM_DEVICE); +} + +static inline void ivc_flush_counter(struct ivc *ivc, dma_addr_t handle) +{ + if (!ivc->peer_device) + return; + dma_sync_single_for_device(ivc->peer_device, handle, IVC_ALIGN, + DMA_TO_DEVICE); +} + +static inline int ivc_channel_empty(struct ivc *ivc, + struct ivc_channel_header *ch) +{ + /* + * This function performs multiple checks on the same values with + * security implications, so create snapshots with READ_ONCE() to + * ensure that these checks use the same values. + */ + uint32_t w_count = READ_ONCE(ch->w_count); + uint32_t r_count = READ_ONCE(ch->r_count); + + /* + * Perform an over-full check to prevent denial of service attacks where + * a server could be easily fooled into believing that there's an + * extremely large number of frames ready, since receivers are not + * expected to check for full or over-full conditions. + * + * Although the channel isn't empty, this is an invalid case caused by + * a potentially malicious peer, so returning empty is safer, because it + * gives the impression that the channel has gone silent. + */ + if (w_count - r_count > ivc->nframes) + return 1; + + return w_count == r_count; +} + +static inline int ivc_channel_full(struct ivc *ivc, + struct ivc_channel_header *ch) +{ + /* + * Invalid cases where the counters indicate that the queue is over + * capacity also appear full. + */ + return READ_ONCE(ch->w_count) - READ_ONCE(ch->r_count) + >= ivc->nframes; +} + +static inline uint32_t ivc_channel_avail_count(struct ivc *ivc, + struct ivc_channel_header *ch) +{ + /* + * This function isn't expected to be used in scenarios where an + * over-full situation can lead to denial of service attacks. See the + * comment in ivc_channel_empty() for an explanation about special + * over-full considerations. + */ + return READ_ONCE(ch->w_count) - READ_ONCE(ch->r_count); +} + +static inline void ivc_advance_tx(struct ivc *ivc) +{ + WRITE_ONCE(ivc->tx_channel->w_count, (READ_ONCE(ivc->tx_channel->w_count) + 1)); + + if (ivc->w_pos == ivc->nframes - 1) + ivc->w_pos = 0; + else + ivc->w_pos++; +} + +static inline void ivc_advance_rx(struct ivc *ivc) +{ + WRITE_ONCE(ivc->rx_channel->r_count, (READ_ONCE(ivc->rx_channel->r_count) + 1)); + + if (ivc->r_pos == ivc->nframes - 1) + ivc->r_pos = 0; + else + ivc->r_pos++; +} + +static inline int ivc_check_read(struct ivc *ivc) +{ + /* + * tx_channel->state is set locally, so it is not synchronized with + * state from the remote peer. The remote peer cannot reset its + * transmit counters until we've acknowledged its synchronization + * request, so no additional synchronization is required because an + * asynchronous transition of rx_channel->state to ivc_state_ack is not + * allowed. + */ + if (ivc->tx_channel->state != ivc_state_established) + return -ECONNRESET; + + /* + * Avoid unnecessary invalidations when performing repeated accesses to + * an IVC channel by checking the old queue pointers first. + * Synchronization is only necessary when these pointers indicate empty + * or full. + */ + if (!ivc_channel_empty(ivc, ivc->rx_channel)) + return 0; + + ivc_invalidate_counter(ivc, ivc->rx_handle + + offsetof(struct ivc_channel_header, w_count)); + return ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0; +} + +static inline int ivc_check_write(struct ivc *ivc) +{ + if (ivc->tx_channel->state != ivc_state_established) + return -ECONNRESET; + + if (!ivc_channel_full(ivc, ivc->tx_channel)) + return 0; + + ivc_invalidate_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, r_count)); + return ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0; +} + +int tegra_ivc_can_read(struct ivc *ivc) +{ + return ivc_check_read(ivc) == 0; +} +EXPORT_SYMBOL(tegra_ivc_can_read); + +int tegra_ivc_can_write(struct ivc *ivc) +{ + return ivc_check_write(ivc) == 0; +} +EXPORT_SYMBOL(tegra_ivc_can_write); + +int tegra_ivc_tx_empty(struct ivc *ivc) +{ + BUILD_BUG_ON(offsetof(struct ivc_channel_header, r_count) > (UINT_MAX * 1ULL)); + ivc_invalidate_counter(ivc, safe_add_u32(ivc->tx_handle, + offsetof(struct ivc_channel_header, r_count))); + return ivc_channel_empty(ivc, ivc->tx_channel); +} +EXPORT_SYMBOL(tegra_ivc_tx_empty); + +uint32_t tegra_ivc_tx_frames_available(struct ivc *ivc) +{ + BUILD_BUG_ON(offsetof(struct ivc_channel_header, r_count) > (UINT_MAX * 1ULL)); + ivc_invalidate_counter(ivc, safe_add_u32(ivc->tx_handle, + offsetof(struct ivc_channel_header, r_count))); + return safe_subtract_u32(ivc->nframes, + safe_subtract_u32(READ_ONCE(ivc->tx_channel->w_count), + READ_ONCE(ivc->tx_channel->r_count))); +} +EXPORT_SYMBOL(tegra_ivc_tx_frames_available); + +static void *ivc_frame_pointer(struct ivc *ivc, struct ivc_channel_header *ch, + uint32_t frame) +{ + BUG_ON(frame >= ivc->nframes); + return (void *)((uintptr_t)(ch + 1) + ivc->frame_size * frame); +} + +static inline dma_addr_t ivc_frame_handle(struct ivc *ivc, + dma_addr_t channel_handle, uint32_t frame) +{ + BUG_ON(!ivc->peer_device); + BUG_ON(frame >= ivc->nframes); + + return safe_add_u64(safe_add_u64(channel_handle, sizeof(struct ivc_channel_header)), + safe_mult_u32_u32__u64(ivc->frame_size, frame)); +} + +static inline void ivc_invalidate_frame(struct ivc *ivc, + dma_addr_t channel_handle, unsigned int frame, uint64_t offset, size_t len) +{ + if (!ivc->peer_device) + return; + dma_sync_single_for_cpu(ivc->peer_device, + safe_add_u64(ivc_frame_handle(ivc, channel_handle, frame), offset), + len, DMA_FROM_DEVICE); +} + +static inline void ivc_flush_frame(struct ivc *ivc, dma_addr_t channel_handle, + unsigned int frame, uint64_t offset, size_t len) +{ + if (!ivc->peer_device) + return; + + dma_sync_single_for_device(ivc->peer_device, + safe_add_u64(ivc_frame_handle(ivc, channel_handle, frame), offset), + len, DMA_TO_DEVICE); +} + +static int ivc_read_frame(struct ivc *ivc, void *buf, void __user *user_buf, + size_t max_read) +{ + const void *src; + int result; + + BUG_ON(buf && user_buf); + + if (max_read > ivc->frame_size) + return -E2BIG; + + result = ivc_check_read(ivc); + if (result) + return result; + + /* + * Order observation of w_pos potentially indicating new data before + * data read. + */ + ivc_rmb(); + + ivc_invalidate_frame(ivc, ivc->rx_handle, ivc->r_pos, 0, max_read); + src = ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos); + + /* + * When compiled with optimizations, different versions of this + * function should be inlined into tegra_ivc_read_frame() or + * tegra_ivc_read_frame_user(). This should ensure that the user + * version does not add overhead to the kernel version. + */ + if (buf) { + memcpy(buf, src, max_read); + } else if (user_buf) { + if (copy_to_user(user_buf, src, max_read)) + return -EFAULT; + } else + BUG(); + + ivc_advance_rx(ivc); + ivc_flush_counter(ivc, ivc->rx_handle + + offsetof(struct ivc_channel_header, r_count)); + + /* + * Ensure our write to r_pos occurs before our read from w_pos. + */ + ivc_mb(); + + /* + * Notify only upon transition from full to non-full. + * The available count can only asynchronously increase, so the + * worst possible side-effect will be a spurious notification. + */ + ivc_invalidate_counter(ivc, ivc->rx_handle + + offsetof(struct ivc_channel_header, w_count)); + + if (ivc_channel_avail_count(ivc, ivc->rx_channel) == ivc->nframes - 1) + ivc->notify(ivc); + + return (int)max_read; +} + +int tegra_ivc_read(struct ivc *ivc, void *buf, size_t max_read) +{ + return ivc_read_frame(ivc, buf, NULL, max_read); +} +EXPORT_SYMBOL(tegra_ivc_read); + +int tegra_ivc_read_user(struct ivc *ivc, void __user *buf, size_t max_read) +{ + return ivc_read_frame(ivc, NULL, buf, max_read); +} +EXPORT_SYMBOL(tegra_ivc_read_user); + +/* peek in the next rx buffer at offset off, the count bytes */ +int tegra_ivc_read_peek(struct ivc *ivc, void *buf, size_t off, size_t count) +{ + const void *src; + int result; + + if (!is_u64_addition_safe(off, count)) + return -EFAULT; + + if (off > ivc->frame_size || off + count > ivc->frame_size) + return -E2BIG; + + result = ivc_check_read(ivc); + if (result) + return result; + + /* + * Order observation of w_pos potentially indicating new data before + * data read. + */ + ivc_rmb(); + + ivc_invalidate_frame(ivc, ivc->rx_handle, ivc->r_pos, off, count); + src = ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos); + + memcpy(buf, (void *)((uintptr_t)src + off), count); + + /* note, no interrupt is generated */ + + return (int)count; +} +EXPORT_SYMBOL(tegra_ivc_read_peek); + +/* directly peek at the next frame rx'ed */ +void *tegra_ivc_read_get_next_frame(struct ivc *ivc) +{ + int result = ivc_check_read(ivc); + if (result) + return ERR_PTR(result); + + /* + * Order observation of w_pos potentially indicating new data before + * data read. + */ + ivc_rmb(); + + ivc_invalidate_frame(ivc, ivc->rx_handle, ivc->r_pos, 0, + ivc->frame_size); + return ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos); +} +EXPORT_SYMBOL(tegra_ivc_read_get_next_frame); + +int tegra_ivc_read_advance(struct ivc *ivc) +{ + /* + * No read barriers or synchronization here: the caller is expected to + * have already observed the channel non-empty. This check is just to + * catch programming errors. + */ + int result = ivc_check_read(ivc); + if (result) + return result; + + if (!is_u64_addition_safe(ivc->rx_handle, offsetof(struct ivc_channel_header, r_count))) + return -EFAULT; + if (!is_u64_addition_safe(ivc->rx_handle, offsetof(struct ivc_channel_header, w_count))) + return -EFAULT; + if (!is_u32_subtraction_safe(ivc->nframes, 1)) + return -EFAULT; + + ivc_advance_rx(ivc); + + ivc_flush_counter(ivc, ivc->rx_handle + + offsetof(struct ivc_channel_header, r_count)); + + /* + * Ensure our write to r_pos occurs before our read from w_pos. + */ + ivc_mb(); + + /* + * Notify only upon transition from full to non-full. + * The available count can only asynchronously increase, so the + * worst possible side-effect will be a spurious notification. + */ + ivc_invalidate_counter(ivc, ivc->rx_handle + + offsetof(struct ivc_channel_header, w_count)); + + if (ivc_channel_avail_count(ivc, ivc->rx_channel) == ivc->nframes - 1) + ivc->notify(ivc); + + return 0; +} +EXPORT_SYMBOL(tegra_ivc_read_advance); + +static int ivc_write_frame(struct ivc *ivc, const void *buf, + const void __user *user_buf, size_t size) +{ + void *p; + int result; + + BUG_ON(buf && user_buf); + + if (size > ivc->frame_size) + return -E2BIG; + + result = ivc_check_write(ivc); + if (result) + return result; + + p = ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos); + + /* + * When compiled with optimizations, different versions of this + * function should be inlined into tegra_ivc_write_frame() or + * tegra_ivc_write_frame_user(). This should ensure that the user + * version does not add overhead to the kernel version. + */ + if (buf) { + memcpy(p, buf, size); + } else if (user_buf) { + if (copy_from_user(p, user_buf, size)) + return -EFAULT; + } else + BUG(); + + memset(p + size, 0, ivc->frame_size - size); + ivc_flush_frame(ivc, ivc->tx_handle, ivc->w_pos, 0, size); + + /* + * Ensure that updated data is visible before the w_pos counter + * indicates that it is ready. + */ + ivc_wmb(); + + ivc_advance_tx(ivc); + ivc_flush_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, w_count)); + + /* + * Ensure our write to w_pos occurs before our read from r_pos. + */ + ivc_mb(); + + /* + * Notify only upon transition from empty to non-empty. + * The available count can only asynchronously decrease, so the + * worst possible side-effect will be a spurious notification. + */ + ivc_invalidate_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, r_count)); + + if (ivc_channel_avail_count(ivc, ivc->tx_channel) == 1) + ivc->notify(ivc); + + return (int)size; +} + +int tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size) +{ + return ivc_write_frame(ivc, buf, NULL, size); +} +EXPORT_SYMBOL(tegra_ivc_write); + +int tegra_ivc_write_user(struct ivc *ivc, const void __user *user_buf, + size_t size) +{ + return ivc_write_frame(ivc, NULL, user_buf, size); +} +EXPORT_SYMBOL(tegra_ivc_write_user); + +/* poke in the next tx buffer at offset off, the count bytes */ +int tegra_ivc_write_poke(struct ivc *ivc, const void *buf, size_t off, + size_t count) +{ + void *dest; + int result; + + if (!is_u64_addition_safe(off, count)) + return -EFAULT; + + if ((off > UINT_MAX) || (count > INT_MAX) + || (off + count) > UINT_MAX) { + return -EFAULT; + } + + if (off > ivc->frame_size || off + count > ivc->frame_size) + return -E2BIG; + + result = ivc_check_write(ivc); + if (result) + return result; + + dest = ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos); + memcpy(dest + off, buf, count); + + return (int)count; +} +EXPORT_SYMBOL(tegra_ivc_write_poke); + +/* directly poke at the next frame to be tx'ed */ +void *tegra_ivc_write_get_next_frame(struct ivc *ivc) +{ + int result = ivc_check_write(ivc); + if (result) + return ERR_PTR(result); + + return ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos); +} +EXPORT_SYMBOL(tegra_ivc_write_get_next_frame); + +/* advance the tx buffer */ +int tegra_ivc_write_advance(struct ivc *ivc) +{ + int result = ivc_check_write(ivc); + if (result) + return result; + + ivc_flush_frame(ivc, ivc->tx_handle, ivc->w_pos, 0, ivc->frame_size); + + /* + * Order any possible stores to the frame before update of w_pos. + */ + ivc_wmb(); + + ivc_advance_tx(ivc); + ivc_flush_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, w_count)); + + /* + * Ensure our write to w_pos occurs before our read from r_pos. + */ + ivc_mb(); + + /* + * Notify only upon transition from empty to non-empty. + * The available count can only asynchronously decrease, so the + * worst possible side-effect will be a spurious notification. + */ + BUILD_BUG_ON(offsetof(struct ivc_channel_header, + r_count) > (UINT_MAX * 1ULL)); + if (!is_u32_addition_safe(ivc->tx_handle, + offsetof(struct ivc_channel_header, r_count))) + return -EFAULT; + + ivc_invalidate_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, r_count)); + + if (ivc_channel_avail_count(ivc, ivc->tx_channel) == 1) + ivc->notify(ivc); + + return 0; +} +EXPORT_SYMBOL(tegra_ivc_write_advance); + +void tegra_ivc_channel_reset(struct ivc *ivc) +{ + ivc->tx_channel->state = ivc_state_sync; + ivc_flush_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, w_count)); + ivc->notify(ivc); +} +EXPORT_SYMBOL(tegra_ivc_channel_reset); + +/* + * =============================================================== + * IVC State Transition Table - see tegra_ivc_channel_notified() + * =============================================================== + * + * local remote action + * ----- ------ ----------------------------------- + * SYNC EST + * SYNC ACK reset counters; move to EST; notify + * SYNC SYNC reset counters; move to ACK; notify + * ACK EST move to EST; notify + * ACK ACK move to EST; notify + * ACK SYNC reset counters; move to ACK; notify + * EST EST + * EST ACK + * EST SYNC reset counters; move to ACK; notify + * + * =============================================================== + */ + +int tegra_ivc_channel_notified(struct ivc *ivc) +{ + enum ivc_state peer_state; + + /* Copy the receiver's state out of shared memory. */ + ivc_invalidate_counter(ivc, ivc->rx_handle + + offsetof(struct ivc_channel_header, w_count)); + peer_state = READ_ONCE(ivc->rx_channel->state); + + if (peer_state == ivc_state_sync) { + /* + * Order observation of ivc_state_sync before stores clearing + * tx_channel. + */ + ivc_rmb(); + + /* + * Reset tx_channel counters. The remote end is in the SYNC + * state and won't make progress until we change our state, + * so the counters are not in use at this time. + */ + ivc->tx_channel->w_count = 0; + ivc->rx_channel->r_count = 0; + + ivc->w_pos = 0; + ivc->r_pos = 0; + + /* + * Ensure that counters appear cleared before new state can be + * observed. + */ + ivc_wmb(); + + /* + * Move to ACK state. We have just cleared our counters, so it + * is now safe for the remote end to start using these values. + */ + ivc->tx_channel->state = ivc_state_ack; + ivc_flush_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, w_count)); + + /* + * Notify remote end to observe state transition. + */ + ivc->notify(ivc); + + } else if (ivc->tx_channel->state == ivc_state_sync && + peer_state == ivc_state_ack) { + /* + * Order observation of ivc_state_sync before stores clearing + * tx_channel. + */ + ivc_rmb(); + + /* + * Reset tx_channel counters. The remote end is in the ACK + * state and won't make progress until we change our state, + * so the counters are not in use at this time. + */ + ivc->tx_channel->w_count = 0; + ivc->rx_channel->r_count = 0; + + ivc->w_pos = 0; + ivc->r_pos = 0; + + /* + * Ensure that counters appear cleared before new state can be + * observed. + */ + ivc_wmb(); + + /* + * Move to ESTABLISHED state. We know that the remote end has + * already cleared its counters, so it is safe to start + * writing/reading on this channel. + */ + ivc->tx_channel->state = ivc_state_established; + ivc_flush_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, w_count)); + + /* + * Notify remote end to observe state transition. + */ + ivc->notify(ivc); + + } else if (ivc->tx_channel->state == ivc_state_ack) { + /* + * At this point, we have observed the peer to be in either + * the ACK or ESTABLISHED state. Next, order observation of + * peer state before storing to tx_channel. + */ + ivc_rmb(); + + /* + * Move to ESTABLISHED state. We know that we have previously + * cleared our counters, and we know that the remote end has + * cleared its counters, so it is safe to start writing/reading + * on this channel. + */ + ivc->tx_channel->state = ivc_state_established; + ivc_flush_counter(ivc, ivc->tx_handle + + offsetof(struct ivc_channel_header, w_count)); + + /* + * Notify remote end to observe state transition. + */ + ivc->notify(ivc); + + } else { + /* + * There is no need to handle any further action. Either the + * channel is already fully established, or we are waiting for + * the remote end to catch up with our current state. Refer + * to the diagram in "IVC State Transition Table" above. + */ + } + + return ivc->tx_channel->state == ivc_state_established ? 0 : -EAGAIN; +} +EXPORT_SYMBOL(tegra_ivc_channel_notified); + +/* + * Temporary routine for re-synchronizing the channel across a reboot. + */ +int tegra_ivc_channel_sync(struct ivc *ivc) +{ + if ((ivc == NULL) || (ivc->nframes == 0)) { + return -EINVAL; + } else { + ivc->w_pos = ivc->tx_channel->w_count % ivc->nframes; + ivc->r_pos = ivc->rx_channel->r_count % ivc->nframes; + } + return 0; +} +EXPORT_SYMBOL(tegra_ivc_channel_sync); + +size_t tegra_ivc_align(size_t size) +{ + return (safe_add_u64(size, IVC_ALIGN - 1ULL)) & ~(IVC_ALIGN - 1ULL); +} +EXPORT_SYMBOL(tegra_ivc_align); + +unsigned int tegra_ivc_total_queue_size(unsigned int queue_size) +{ + BUILD_BUG_ON(sizeof(struct ivc_channel_header) > (UINT_MAX * 1ULL)); + if (queue_size & (IVC_ALIGN - 1)) { + pr_err("%s: queue_size (%u) must be %u-byte aligned\n", + __func__, queue_size, IVC_ALIGN); + return 0; + } + return safe_add_u32(queue_size, sizeof(struct ivc_channel_header)); +} +EXPORT_SYMBOL(tegra_ivc_total_queue_size); + +static int check_ivc_params(uintptr_t queue_base1, uintptr_t queue_base2, + unsigned int nframes, unsigned int frame_size) +{ + BUG_ON(offsetof(struct ivc_channel_header, w_count) & (IVC_ALIGN - 1)); + BUG_ON(offsetof(struct ivc_channel_header, r_count) & (IVC_ALIGN - 1)); + BUG_ON(sizeof(struct ivc_channel_header) & (IVC_ALIGN - 1)); + + if ((uint64_t)nframes * (uint64_t)frame_size >= 0x100000000) { + pr_err("nframes * frame_size overflows\n"); + return -EINVAL; + } + + /* + * The headers must at least be aligned enough for counters + * to be accessed atomically. + */ + if (queue_base1 & (IVC_ALIGN - 1)) { + pr_err("ivc channel start not aligned: %lx\n", queue_base1); + return -EINVAL; + } + if (queue_base2 & (IVC_ALIGN - 1)) { + pr_err("ivc channel start not aligned: %lx\n", queue_base2); + return -EINVAL; + } + + if (frame_size & (IVC_ALIGN - 1)) { + pr_err("frame size not adequately aligned: %u\n", frame_size); + return -EINVAL; + } + + if (queue_base1 < queue_base2) { + if (queue_base1 + frame_size * nframes > queue_base2) { + pr_err("queue regions overlap: %lx + %x, %x\n", + queue_base1, frame_size, + frame_size * nframes); + return -EINVAL; + } + } else { + if (queue_base2 + frame_size * nframes > queue_base1) { + pr_err("queue regions overlap: %lx + %x, %x\n", + queue_base2, frame_size, + frame_size * nframes); + return -EINVAL; + } + } + + return 0; +} + +static int tegra_ivc_init_body(struct ivc *ivc, uintptr_t rx_base, + dma_addr_t rx_handle, uintptr_t tx_base, dma_addr_t tx_handle, + unsigned int nframes, unsigned int frame_size, + struct device *peer_device, void (*notify)(struct ivc *)) +{ + size_t queue_size; + + int result = check_ivc_params(rx_base, tx_base, nframes, frame_size); + if (result) + return result; + + BUG_ON(!ivc); + BUG_ON(!notify); + + if (!is_u32_u32__u32_multiplication_safe(nframes, frame_size)) + return -EFAULT; + + queue_size = tegra_ivc_total_queue_size(nframes * frame_size); + + /* + * All sizes that can be returned by communication functions should + * fit in an int. + */ + if (frame_size > INT_MAX) + return -E2BIG; + + ivc->rx_channel = (struct ivc_channel_header *)rx_base; + ivc->tx_channel = (struct ivc_channel_header *)tx_base; + + if (peer_device) { + if (rx_handle && !dma_mapping_error(peer_device, rx_handle)) { + ivc->rx_handle = rx_handle; + ivc->tx_handle = tx_handle; + } else { + ivc->rx_handle = dma_map_single(peer_device, + ivc->rx_channel, queue_size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(peer_device, ivc->rx_handle)) + return -ENOMEM; + + ivc->tx_handle = dma_map_single(peer_device, + ivc->tx_channel, queue_size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(peer_device, ivc->tx_handle)) { + dma_unmap_single(peer_device, ivc->rx_handle, + queue_size, DMA_BIDIRECTIONAL); + return -ENOMEM; + } + } + } + + ivc->notify = notify; + ivc->frame_size = frame_size; + ivc->nframes = nframes; + ivc->peer_device = peer_device; + + /* + * These values aren't necessarily correct until the channel has been + * reset. + */ + ivc->w_pos = 0; + ivc->r_pos = 0; + + return 0; +} + +int tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base, + unsigned int nframes, unsigned int frame_size, + struct device *peer_device, void (*notify)(struct ivc *)) +{ + return tegra_ivc_init_body(ivc, rx_base, 0, tx_base, + 0, nframes, frame_size, peer_device, notify); +} +EXPORT_SYMBOL(tegra_ivc_init); + +int tegra_ivc_init_with_dma_handle(struct ivc *ivc, uintptr_t rx_base, + dma_addr_t rx_handle, uintptr_t tx_base, dma_addr_t tx_handle, + unsigned int nframes, unsigned int frame_size, + struct device *peer_device, void (*notify)(struct ivc *)) +{ + return tegra_ivc_init_body(ivc, rx_base, rx_handle, tx_base, + tx_handle, nframes, frame_size, peer_device, notify); +} +EXPORT_SYMBOL(tegra_ivc_init_with_dma_handle); diff --git a/include/linux/tegra-aon.h b/include/linux/tegra-aon.h new file mode 100644 index 00000000..5ec9b0d9 --- /dev/null +++ b/include/linux/tegra-aon.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef _LINUX_TEGRA_AON_H +#define _LINUX_TEGRA_AON_H + +struct tegra_aon_mbox_msg { + int length; + void *data; +}; + +#endif diff --git a/include/linux/tegra-ivc-instance.h b/include/linux/tegra-ivc-instance.h new file mode 100644 index 00000000..9bb6c5d3 --- /dev/null +++ b/include/linux/tegra-ivc-instance.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef __TEGRA_IVC_INSTANCE_H__ +#define __TEGRA_IVC_INSTANCE_H__ + +#include +#include + +#define IVC_ALIGN 64 + +struct ivc_channel_header; + +struct ivc { + struct ivc_channel_header *rx_channel, *tx_channel; + uint32_t w_pos, r_pos; + + void (*notify)(struct ivc *); + uint32_t nframes, frame_size; + + struct device *peer_device; + dma_addr_t rx_handle, tx_handle; +}; + +/* FIXME: overrides to avoid conflict with upstreamed ivc APIs. */ +#define tegra_ivc_init nv_tegra_ivc_init +#define tegra_ivc_init_with_dma_handle nv_tegra_ivc_init_with_dma_handle +#define tegra_ivc_total_queue_size nv_tegra_ivc_total_queue_size +#define tegra_ivc_write_user nv_tegra_ivc_write_user +#define tegra_ivc_read_user nv_tegra_ivc_read_user +#define tegra_ivc_align nv_tegra_ivc_align +#define tegra_ivc_channel_sync nv_tegra_ivc_channel_sync + + +int tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base, + unsigned nframes, unsigned frame_size, + struct device *peer_device, void (*notify)(struct ivc *)); +int tegra_ivc_init_with_dma_handle(struct ivc *ivc, uintptr_t rx_base, + dma_addr_t rx_handle, uintptr_t tx_base, dma_addr_t tx_handle, + unsigned nframes, unsigned frame_size, + struct device *peer_device, void (*notify)(struct ivc *)); +unsigned tegra_ivc_total_queue_size(unsigned queue_size); +int tegra_ivc_write_user(struct ivc *ivc, const void __user *user_buf, + size_t size); +int tegra_ivc_read_user(struct ivc *ivc, void __user *buf, size_t max_read); +size_t tegra_ivc_align(size_t size); +int tegra_ivc_channel_sync(struct ivc *ivc); + +#endif /* __TEGRA_IVC_INSTANCE_H__ */ diff --git a/include/linux/tegra-ivc.h b/include/linux/tegra-ivc.h new file mode 100644 index 00000000..d8f7a6f3 --- /dev/null +++ b/include/linux/tegra-ivc.h @@ -0,0 +1,475 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + */ + +#ifndef __TEGRA_IVC_H +#define __TEGRA_IVC_H + +#include +#include + +struct device_node; + +/* in kernel interfaces */ + +struct tegra_hv_ivc_ops; + +struct tegra_hv_ivc_cookie { + /* some fields that might be useful */ + uint32_t irq; + int peer_vmid; + int nframes; + int frame_size; + uint32_t *notify_va; /* address used to notify end-point */ +}; + +struct tegra_hv_ivc_ops { + /* called when data are received */ + void (*rx_rdy)(struct tegra_hv_ivc_cookie *ivck); + /* called when space is available to write data */ + void (*tx_rdy)(struct tegra_hv_ivc_cookie *ivck); +}; + +struct ivc; + +struct tegra_hv_ivm_cookie { + uint64_t ipa; + uint64_t size; + unsigned peer_vmid; + void *reserved; +}; + +/* FIXME: overrides to avoid conflict with upstreamed ivc APIs */ +#define tegra_ivc_can_read nv_tegra_ivc_can_read +#define tegra_ivc_can_write nv_tegra_ivc_can_write +#define tegra_ivc_tx_empty nv_tegra_ivc_tx_empty +#define tegra_ivc_tx_frames_available nv_tegra_ivc_tx_frames_available +#define tegra_ivc_read nv_tegra_ivc_read +#define tegra_ivc_read_peek nv_tegra_ivc_read_peek +#define tegra_ivc_read_get_next_frame nv_tegra_ivc_read_get_next_frame +#define tegra_ivc_read_advance nv_tegra_ivc_read_advance +#define tegra_ivc_write nv_tegra_ivc_write +#define tegra_ivc_write_poke nv_tegra_ivc_write_poke +#define tegra_ivc_write_get_next_frame nv_tegra_ivc_write_get_next_frame +#define tegra_ivc_write_advance nv_tegra_ivc_write_advance +#define tegra_ivc_channel_reset nv_tegra_ivc_channel_reset +#define tegra_ivc_channel_notified nv_tegra_ivc_channel_notified + +int tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size); +int tegra_ivc_read(struct ivc *ivc, void *buf, size_t size); +int tegra_ivc_can_read(struct ivc *ivc); +int tegra_ivc_can_write(struct ivc *ivc); +uint32_t tegra_ivc_tx_frames_available(struct ivc *ivc); +int tegra_ivc_tx_empty(struct ivc *ivc); +int tegra_ivc_read_peek(struct ivc *ivc, void *buf, size_t off, size_t count); +void *tegra_ivc_read_get_next_frame(struct ivc *ivc); +int tegra_ivc_read_advance(struct ivc *ivc); +int tegra_ivc_write_poke(struct ivc *ivc, const void *buf, size_t off, + size_t count); +void *tegra_ivc_write_get_next_frame(struct ivc *ivc); +int tegra_ivc_write_advance(struct ivc *ivc); +int tegra_ivc_channel_notified(struct ivc *ivc); +void tegra_ivc_channel_reset(struct ivc *ivc); + +#ifdef CONFIG_TEGRA_HV_MANAGER +/** + * tegra_hv_ivc_reserve - Reserve an IVC queue for use + * @dn: Device node pointer to the queue in the DT + * If NULL, then operate on first HV device + * @queue_id Id number of the queue to use. + * @ops Ops structure or NULL (deprecated) + * + * Reserves the queue for use + * + * Returns a pointer to the ivc_dev to use or an ERR_PTR. + * Note that returning EPROBE_DEFER means that the ivc driver + * hasn't loaded yet and you should try again later in the + * boot sequence. + * + * Note that @ops must be NULL for channels that handle reset. + */ +struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve( + struct device_node *dn, uint32_t id, + const struct tegra_hv_ivc_ops *ops); + +/** + * tegra_hv_ivc_unreserve - Unreserve an IVC queue used + * @ivck IVC cookie + * + * Unreserves the IVC channel + * + * Returns 0 on success and an error code otherwise + */ +int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_hv_ivc_write - Writes a frame to the IVC queue + * @ivck IVC cookie of the queue + * @buf Pointer to the data to write + * @size Size of the data to write + * + * Write a number of bytes (as a single frame) from the queue. + * + * Returns size on success and an error code otherwise + */ +int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck, const void *buf, + size_t size); + +/** + * ivc_hv_ivc_write_user - Writes a frame to the IVC queue + * @ivck IVC cookie of the queue + * @buf Pointer to the userspace data to write + * @size Size of the data to write + * + * Write a number of bytes (as a single frame) from the queue. + * + * Returns size on success and an error code otherwise + */ +int tegra_hv_ivc_write_user(struct tegra_hv_ivc_cookie *ivck, const void __user *buf, + size_t size); + +/** + * ivc_hv_ivc_read - Reads a frame from the IVC queue + * @ivck IVC cookie of the queue + * @buf Pointer to the data to read + * @size max size of the data to read + * + * Reads a number of bytes (as a single frame) from the queue. + * + * Returns size on success and an error code otherwise + */ +int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, void *buf, size_t size); + +/** + * ivc_hv_ivc_read_user - Reads a frame from the IVC queue + * @ivck IVC cookie of the queue + * @buf Pointer to the userspace data to read + * @size max size of the data to read + * + * Reads a number of bytes (as a single frame) from the queue. + * + * Returns size on success and an error code otherwise + */ +int tegra_hv_ivc_read_user(struct tegra_hv_ivc_cookie *ivck, void __user *buf, size_t size); + +/** + * ivc_hv_ivc_can_read - Test whether data are available + * @ivck IVC cookie of the queue + * + * Test wheter data to read are available + * + * Returns 1 if data are available in the rx queue, 0 if not + */ +int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_hv_ivc_can_write - Test whether data can be written + * @ivck IVC cookie of the queue + * + * Test wheter data can be written + * + * Returns 1 if data are can be written to the tx queue, 0 if not + */ +int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck); + +/** + * tegra_ivc_tx_frames_available - gets number of free entries in tx queue + * @ivc/@ivck IVC channel or cookie + * + * Returns the number of unused entries in the tx queue. Assuming the caller + * does not write any additional frames, this number may increase from the + * value returned as the receiver consumes frames. + * + */ +uint32_t tegra_hv_ivc_tx_frames_available(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_hv_ivc_tx_empty - Test whether the tx queue is empty + * @ivck IVC cookie of the queue + * + * Test wheter the tx queue is completely empty + * + * Returns 1 if the queue is empty, zero otherwise + */ +int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_hv_ivc_loopback - Sets (or clears) loopback mode + * @ivck IVC cookie of the queue + * @mode Set loopback on/off (1 = on, 0 = off) + * + * Sets or clears loopback mode accordingly. + * + * When loopback is active any writes are ignored, while + * reads do not return data. + * Incoming data are copied immediately to the tx queue. + * + * Returns 0 on success, a negative error code otherwise + */ +int tegra_hv_ivc_set_loopback(struct tegra_hv_ivc_cookie *ivck, int mode); + +/* debugging aid */ +int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_hv_ivc_read_peek - Peek (copying) data from a received frame + * @ivck IVC cookie of the queue + * @buf Buffer to receive the data + * @off Offset in the frame + * @count Count of bytes to copy + * + * Peek data from a received frame, copying to buf, without removing + * the frame from the queue. + * + * Returns 0 on success, a negative error code otherwise + */ +int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck, + void *buf, int off, int count); + +/** + * ivc_hv_ivc_read_get_next_frame - Peek at the next frame to receive + * @ivck IVC cookie of the queue + * + * Peek at the next frame to be received, without removing it from + * the queue. + * + * Returns a pointer to the frame, or an error encoded pointer. + */ +void *tegra_hv_ivc_read_get_next_frame(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_hv_ivc_read_advance - Advance the read queue + * @ivck IVC cookie of the queue + * + * Advance the read queue + * + * Returns 0, or a negative error value if failed. + */ +int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_hv_ivc_write_poke - Poke data to a frame to be transmitted + * @ivck IVC cookie of the queue + * @buf Buffer to the data + * @off Offset in the frame + * @count Count of bytes to copy + * + * Copy data to a transmit frame, copying from buf, without advancing + * the the transmit queue. + * + * Returns 0 on success, a negative error code otherwise + */ +int tegra_hv_ivc_write_poke(struct tegra_hv_ivc_cookie *ivck, + const void *buf, int off, int count); + +/** + * ivc_hv_ivc_write_get_next_frame - Poke at the next frame to transmit + * @ivck IVC cookie of the queue + * + * Get access to the next frame. + * + * Returns a pointer to the frame, or an error encoded pointer. + */ +void *tegra_hv_ivc_write_get_next_frame(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_hv_ivc_write_advance - Advance the write queue + * @ivck IVC cookie of the queue + * + * Advance the write queue + * + * Returns 0, or a negative error value if failed. + */ +int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck); + +/** + * tegra_hv_mempool_reserve - reserve a mempool for use + * @id Id of the requested mempool. + * + * Returns a cookie representing the mempool on success, otherwise an ERR_PTR. + */ +struct tegra_hv_ivm_cookie *tegra_hv_mempool_reserve(unsigned id); + +/** + * tegra_hv_mempool_release - release a reserved mempool + * @ck Cookie returned by tegra_hv_mempool_reserve(). + * + * Returns 0 on success or a negative error code otherwise. + */ +int tegra_hv_mempool_unreserve(struct tegra_hv_ivm_cookie *ck); + +/** + * ivc_channel_notified - handle internal messages + * @ivck IVC cookie of the queue + * + * This function must be called following every notification (interrupt or + * callback invocation) for the tegra_hv_- version). + * + * Returns 0 if the channel is ready for communication, or -EAGAIN if a channel + * reset is in progress. + */ +int tegra_hv_ivc_channel_notified(struct tegra_hv_ivc_cookie *ivck); + +/** + * ivc_channel_reset - initiates a reset of the shared memory state + * @ivck IVC cookie of the queue + * + * This function must be called after a channel is reserved before it is used + * for communication. The channel will be ready for use when a subsequent call + * to ivc_channel_notified() returns 0. + */ +void tegra_hv_ivc_channel_reset(struct tegra_hv_ivc_cookie *ivck); + +/** + * tegra_hv_ivc_get_info - Get info of Guest shared area + * @ivck IVC cookie of the queue + * @pa IPA of shared area + * @size Size of the shared area + * + * Get info (IPA and size) of Guest shared area + * + * Returns size on success and an error code otherwise + */ +int tegra_hv_ivc_get_info(struct tegra_hv_ivc_cookie *ivck, uint64_t *pa, + uint64_t *size); + +/** + * tegra_hv_ivc_notify - Notify remote guest + * @ivck IVC cookie of the queue + * + * Notify remote guest + * + */ +void tegra_hv_ivc_notify(struct tegra_hv_ivc_cookie *ivck); + +struct ivc *tegra_hv_ivc_convert_cookie(struct tegra_hv_ivc_cookie *ivck); +#else +static inline struct tegra_hv_ivc_cookie *tegra_hv_ivc_reserve( + struct device_node *dn, int id, + const struct tegra_hv_ivc_ops *ops) +{ + return ERR_PTR(-ENODEV); +} + +static inline int tegra_hv_ivc_unreserve(struct tegra_hv_ivc_cookie *ivck) +{ + return -ENODEV; +} + +static inline int tegra_hv_ivc_write(struct tegra_hv_ivc_cookie *ivck, + const void *buf, size_t size) +{ + return -ENODEV; +} + +static inline int tegra_hv_ivc_read(struct tegra_hv_ivc_cookie *ivck, + void *buf, size_t size) +{ + return -ENODEV; +} + +static inline int tegra_hv_ivc_can_read(struct tegra_hv_ivc_cookie *ivck) +{ + return 0; +} + +static inline int tegra_hv_ivc_can_write(struct tegra_hv_ivc_cookie *ivck) +{ + return 0; +} + +static inline uint32_t tegra_hv_ivc_tx_frames_available( + struct tegra_hv_ivc_cookie *ivck) +{ + return 0; +} + +static inline int tegra_hv_ivc_tx_empty(struct tegra_hv_ivc_cookie *ivck) +{ + return 0; +} + +static inline int tegra_hv_ivc_set_loopback(struct tegra_hv_ivc_cookie *ivck, + int mode) +{ + return -ENODEV; +} + +static inline int tegra_hv_ivc_dump(struct tegra_hv_ivc_cookie *ivck) +{ + return -ENODEV; +} + +static inline int tegra_hv_ivc_read_peek(struct tegra_hv_ivc_cookie *ivck, + void *buf, int off, int count) +{ + return -ENODEV; +} + +static inline void *tegra_hv_ivc_read_get_next_frame( + struct tegra_hv_ivc_cookie *ivck) +{ + return ERR_PTR(-ENODEV); +} + +static inline int tegra_hv_ivc_read_advance(struct tegra_hv_ivc_cookie *ivck) +{ + return -ENODEV; +} + +static inline int tegra_hv_ivc_write_poke(struct tegra_hv_ivc_cookie *ivck, + const void *buf, int off, int count) +{ + return -ENODEV; +} + +static inline void *tegra_hv_ivc_write_get_next_frame( + struct tegra_hv_ivc_cookie *ivck) +{ + return ERR_PTR(-ENODEV); +} + +static inline int tegra_hv_ivc_write_advance(struct tegra_hv_ivc_cookie *ivck) +{ + return -ENODEV; +} + +static inline struct tegra_hv_ivm_cookie *tegra_hv_mempool_reserve(unsigned id) +{ + return ERR_PTR(-ENODEV); +} + +static inline int tegra_hv_mempool_unreserve(struct tegra_hv_ivm_cookie *ck) +{ + return -ENODEV; +} + +static inline int tegra_hv_ivc_channel_notified( + struct tegra_hv_ivc_cookie *ivck) +{ + return -ENODEV; +} + +static inline void tegra_hv_ivc_channel_reset(struct tegra_hv_ivc_cookie *ivck) +{ +} + +static inline int tegra_hv_ivc_get_info(struct tegra_hv_ivc_cookie *ivck, + uint64_t *pa, uint64_t *size) +{ + return -ENODEV; +} + +static inline void tegra_hv_ivc_notify(struct tegra_hv_ivc_cookie *ivck) +{ +} + +static inline struct ivc *tegra_hv_ivc_convert_cookie( + struct tegra_hv_ivc_cookie *ivck) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#endif