diff --git a/drivers/media/platform/tegra/camera/Makefile b/drivers/media/platform/tegra/camera/Makefile index 45e4a713..876107c6 100644 --- a/drivers/media/platform/tegra/camera/Makefile +++ b/drivers/media/platform/tegra/camera/Makefile @@ -51,5 +51,8 @@ obj-m += tests/ tegra-capture-isp-objs += fusa-capture/capture-isp-channel.o tegra-capture-isp-objs += fusa-capture/capture-isp.o obj-m += tegra-capture-isp.o + +tegra-capture-coe-objs += coe/rtcpu-coe.o +obj-m += tegra-capture-coe.o endif endif diff --git a/drivers/media/platform/tegra/camera/coe/rtcpu-coe.c b/drivers/media/platform/tegra/camera/coe/rtcpu-coe.c new file mode 100644 index 00000000..1dfc1483 --- /dev/null +++ b/drivers/media/platform/tegra/camera/coe/rtcpu-coe.c @@ -0,0 +1,1936 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#define ETHER_PACKET_HDR_SIZE 64U + + +/** Helper macros to get the lower and higher 32bits of 64bit address */ +#define L32(data) ((uint32_t)((data) & 0xFFFFFFFFU)) +#define H32(data) ((uint32_t)(((data) & 0xFFFFFFFF00000000UL) >> 32UL)) + +/** HW OWN bit for the Rx desciptor in MGBE */ +#define RDES3_OWN BIT(31) + +/** Corresponds to max number of Virtual DMA channels in MGBE device HW */ +#define MAX_HW_CHANS_PER_DEVICE 48U +/** How many capture channels can actually be opened on each MGBE device */ +#define MAX_ACTIVE_CHANS_PER_DEVICE 8U +#define MAX_NUM_COE_DEVICES 4U +#define MAX_ACTIVE_COE_CHANNELS (MAX_ACTIVE_CHANS_PER_DEVICE * MAX_NUM_COE_DEVICES) + +/** Size of a single Rx descriptor */ +#define MGBE_RXDESC_SIZE 16U +/** Size of a Packet Info descriptor */ +#define MGBE_PKTINFO_DESC_SIZE 16U +/** Max size of a buffer to be used to store Ethernet packet header (stripped from data payload) */ +#define COE_MAX_PKT_HEADER_SIZE 64U + +/** Maximum number of Rx descriptors in a Rx ring for a single channel */ +#define COE_MGBE_MAX_RXDESC_NUM 16384U +/** Maximum number of descriptors in a Rx packet info ring for a single channel */ +#define COE_MGBE_MAX_PKTINFO_NUM 4096U + +/** + * @brief Invalid CoE channel ID; the channel is not initialized. + */ +#define CAPTURE_COE_CHANNEL_INVALID_ID U32_C(0xFFFFFFFF) + +#define CAPTURE_COE_CHAN_INVALID_HW_ID U8_C(0xFF) + +#define COE_CHAN_CAPTURE_QUEUE_LEN 32U +/** Max number of physical DMA channel for each Eth controller */ +#define COE_MGBE_MAX_NUM_PDMA_CHANS 10U +#define COE_MGBE_PDMA_CHAN_INVALID COE_MGBE_MAX_NUM_PDMA_CHANS + +/** Total max size of all Rx descriptors rings for all possible channels */ +#define COE_TOTAL_RXDESCR_MEM_SIZE roundup_pow_of_two( \ + (COE_MGBE_MAX_RXDESC_NUM * MAX_ACTIVE_COE_CHANNELS * MGBE_RXDESC_SIZE) + \ + (COE_MGBE_MAX_PKTINFO_NUM * COE_MGBE_MAX_NUM_PDMA_CHANS * MAX_NUM_COE_DEVICES * \ + MGBE_PKTINFO_DESC_SIZE)) + +/** State associated with a physical DMA channel of an Eth controller */ +struct coe_pdma_state { + /* Rx packet info memory DMA address for MGBE engine */ + dma_addr_t rx_pktinfo_dma_mgbe; + /* Rx packet info memory DMA address for RCE engine */ + dma_addr_t rx_pktinfo_dma_rce; +}; + +/** Rx descriptor shadow ring for MGBE */ +struct mgbe_rx_desc { + uint32_t rdes0; + uint32_t rdes1; + uint32_t rdes2; + uint32_t rdes3; +}; + +struct coe_state { + struct platform_device *pdev; + + struct device *rtcpu_dev; + /* Platform device object for MGBE controller (not a netdevice) */ + struct device *mgbe_dev; + /** An ID of a corresponding mgbe_dev */ + u32 mgbe_id; + + struct notifier_block netdev_nb; + + struct list_head channels; + /* Number of Rx descriptors in a descriptors ring for each channel */ + u16 rx_ring_size; + /* Number of Rx Packet Info descriptors */ + u16 rx_pktinfo_ring_size; + + /** MGBE DMA mapping of a memory area for Rx descriptors */ + struct sg_table rx_pktinfo_mgbe_sgt; + + /* Bitmap indicating which DMA channels of the device are used for camera */ + DECLARE_BITMAP(dmachans_map, MAX_HW_CHANS_PER_DEVICE); + /** Track how VDMAs map to physical DMA (PDMA) */ + u8 vdma2pdma_map[MAX_HW_CHANS_PER_DEVICE]; + /* List entry in a global list of probed devices */ + struct list_head device_entry; + + /** State of PDMA channels */ + struct coe_pdma_state pdmas[COE_MGBE_MAX_NUM_PDMA_CHANS]; + + /** Protect access to the state object */ + struct mutex access_lock; + + /** MGBE IRQ ID which must be handled by camera CPU */ + u8 mgbe_irq_id; +}; + +struct coe_capreq_state_inhw { + struct capture_common_unpins unpins; + /**< Capture number passed with coe_ioctl_data_capture_req, assigned by a user + * to track the capture number in userspace. + * Valid range: [0, UINT32_MAX]. + */ + u32 user_capture_id; +}; + +struct coe_capreq_state_unreported { + u32 capture_status; + u32 user_capture_id; + u64 eofTimestamp; + u64 sofTimestamp; + u32 errData; +}; + +/* State of a single CoE (Camera Over Ethernet) capture channel */ +struct coe_channel_state { + /* Device object for the channel, from device_create */ + struct device *dev; + /* Pointer to a parent platform device */ + struct coe_state *parent; + /* List entry to allow parent platform device to keep track of its channels */ + struct list_head list_entry; + /* Network device servicing the channel (child of a &parent) */ + struct net_device *netdev; + + /* Serialize operations on the channel */ + struct mutex channel_lock; + + /* Ethernet engine HW DMA channel ID which services memory accesses for + * that CoE channel */ + u8 dma_chan; + /* Minor device ID, as registered with kernel (under /dev/ path) */ + dev_t devt; + /* Channel ID assigned by RCE */ + u32 rce_chan_id; + + u8 sensor_mac_addr[ETH_HLEN]; + + /* Flag indicating whether the channel has been open()'ed by userspace */ + bool opened; + + /* Scratch space to store a response from RCE */ + struct CAPTURE_CONTROL_MSG rce_resp_msg; + /* Serialize accessing RCE response rce_resp_msg */ + struct mutex rce_msg_lock; + /* Indication that RCE has responded to a command and response data + * is avaialble in rce_resp_msg + */ + struct completion rce_resp_ready; + /**< Completion for capture-control IVC response */ + struct completion capture_resp_ready; + + /* Rx descriptor ring memory DMA address for MGBE engine */ + struct sg_table rx_desc_mgbe_sgt; + /* Rx descriptor ring memory DMA address for RCE engine */ + dma_addr_t rx_desc_dma_rce; + + /* Virtual pointer to Eth packet header memory, for each Rx descriptor */ + void *rx_pkt_hdrs; + /* Rx packet headers memory DMA address for MGBE engine */ + dma_addr_t rx_pkt_hdrs_dma_mgbe; + + /* Virtual pointer to 'Prefilled' Eth shadow Rx ring memory */ + void *rx_desc_shdw; + /* Rx desc shadow ring address for RCE engine */ + dma_addr_t rx_desc_shdw_dma_rce; + + /** "dummy" buffer which RCE can use as a scratch space */ + struct capture_common_buf rx_dummy_buf; + + /** A PDMA channel which services this CoE channel */ + u8 pdma_id; + + /**< Surface buffer management table */ + struct capture_buffer_table *buf_ctx; + + /**< Queue of capture requests waiting for capture completion from RCE */ + struct coe_capreq_state_inhw capq_inhw[COE_CHAN_CAPTURE_QUEUE_LEN]; + /**< Protect capq_inhw access */ + struct mutex capq_inhw_lock; + /**< number of elements in capq_inhw */ + u16 capq_inhw_pending; + /**< Next write index in capq_inhw */ + u16 capq_inhw_wr; + /**< Next read index in capq_inhw */ + u16 capq_inhw_rd; + + /**< Captures reported by RCE, waiting to be reported to an app */ + struct coe_capreq_state_unreported capq_appreport[COE_CHAN_CAPTURE_QUEUE_LEN]; + /**< Protect capq_appreport access */ + struct mutex capq_appreport_lock; + /**< number of elements in capq_appreport */ + u16 capq_appreport_pending; + /**< Next write index in capq_appreport */ + u16 capq_appreport_wr; + /**< Next read index in capq_appreport */ + u16 capq_appreport_rd; +}; + +/** + * @brief Set up CoE channel resources and request FW channel allocation in RCE. + * + * @param[in] ptr Pointer to a struct @ref coe_ioctl_data_capture_setup + * + * @returns 0 (success), neg. errno (failure) + */ +#define COE_IOCTL_CAPTURE_SETUP \ + _IOW('I', 1, struct coe_ioctl_data_capture_setup) + +/** + * @brief Perform an operation on the buffer as specified in IOCTL + * payload. + * + * @param[in] ptr Pointer to a struct @ref coe_ioctl_data_buffer_op + * @returns 0 (success), neg. errno (failure) + */ +#define COE_IOCTL_BUFFER_OP \ + _IOW('I', 2, struct coe_ioctl_data_buffer_op) + +/** + * @brief Enqueue a capture request + * + * @param[in] ptr Pointer to a struct @ref coe_ioctl_data_capture_req + * @returns 0 (success), neg. errno (failure) + */ +#define COE_IOCTL_CAPTURE_REQ \ + _IOW('I', 3, struct coe_ioctl_data_capture_req) + +/** + * Wait on the next completion of an enqueued frame, signalled by RCE. + * + * @note This call completes for the frame at the head of the FIFO queue, and is + * not necessarily for the most recently enqueued capture request. + * + * @param[in,out] ptr Pointer to a struct @ref coe_ioctl_data_capture_status + * + * @returns 0 (success), neg. errno (failure) + */ +#define COE_IOCTL_CAPTURE_STATUS \ + _IOWR('I', 4, struct coe_ioctl_data_capture_status) + +/** + * @brief Get information about an open channel. + * + * @param[out] ptr Pointer to a struct @ref coe_ioctl_data_get_info + * @returns 0 (success), neg. errno (failure) + */ +#define COE_IOCTL_GET_INFO \ + _IOR('I', 5, struct coe_ioctl_data_get_info) + +/* List of all CoE platform devices which were successfully probed */ +static LIST_HEAD(coe_device_list); +/* Lock to protect the list of CoE platform devices */ +static DEFINE_MUTEX(coe_device_list_lock); + +static struct class *coe_channel_class; +static int coe_channel_major; +static struct coe_channel_state coe_channels_arr[MAX_ACTIVE_COE_CHANNELS]; +static DEFINE_MUTEX(coe_channels_arr_lock); + +/* RCE CPU manages Rx descriptors ring. RCE has Dcache and all access to Rx + * descriptors would require cache management on RCE side. + * Since single descriptor may not fill entire CACHELINE_SIZE - it's possible + * that descriptors are unintentianally correupted when RCE handles other + * descriptors on the same cache line. + * To avoid that RCE would use unacched access to descriptors. However, uncached + * mapping region can be configured only in chunks of power of two sizes and + * number of such mapping regions is very limited. + * Allocate a single large buffer to contain descriptor rings for all possible + * channels. + */ +static struct device *g_rtcpu_dev = NULL; +static void *g_rx_descr_mem_area = NULL; +static dma_addr_t g_rxdesc_mem_dma_rce; +static struct sg_table g_rxdesc_rce_sgt; + +static inline struct coe_channel_state *coe_channel_arr_find_free(u32 * const arr_idx) +{ + u32 i; + + for (i = 0U; i < ARRAY_SIZE(coe_channels_arr); i++) { + if (coe_channels_arr[i].dev == NULL) { + *arr_idx = i; + return &coe_channels_arr[i]; + } + } + + return NULL; +} + +/* + * A callback to process RCE responses to commands issued through capture-control + * IVC channel (struct CAPTURE_CONTROL_MSG). + */ +static void coe_rce_cmd_control_response_cb(const void *ivc_resp, const void *pcontext) +{ + const struct CAPTURE_CONTROL_MSG *r = ivc_resp; + struct coe_channel_state * const ch = (struct coe_channel_state *)pcontext; + + switch (r->header.msg_id) { + case CAPTURE_CHANNEL_SETUP_RESP: + case CAPTURE_COE_CHANNEL_RESET_RESP: + case CAPTURE_COE_CHANNEL_RELEASE_RESP: + ch->rce_resp_msg = *r; + complete(&ch->rce_resp_ready); + break; + default: + dev_err(ch->dev, "unknown RCE control resp 0x%x", + r->header.msg_id); + break; + } +} + +static inline void coe_chan_buf_release(struct capture_buffer_table * const buf_ctx, + struct coe_capreq_state_inhw * const buf) +{ + struct capture_common_unpins * const unpins = &buf->unpins; + + for (u32 i = 0U; i < unpins->num_unpins; i++) { + if (buf_ctx != NULL && unpins->data[i] != NULL) + put_mapping(buf_ctx, unpins->data[i]); + unpins->data[i] = NULL; + } + unpins->num_unpins = 0U; +} + +/* + * A callback to process RCE responses to commands issued through capture + * IVC channel (struct CAPTURE_MSG). + */ +static void coe_rce_cmd_capture_response_cb(const void *ivc_resp, + const void *pcontext) +{ + struct CAPTURE_MSG *msg = (struct CAPTURE_MSG *)ivc_resp; + struct coe_channel_state * const ch = (struct coe_channel_state *)pcontext; + + if (ch == NULL || msg == NULL) { + pr_err_ratelimited("Invalid RCE msg\n"); + return; + } + + switch (msg->header.msg_id) { + case CAPTURE_COE_STATUS_IND: + { + struct coe_capreq_state_unreported *unrep; + u32 buf_idx; + u32 capture_status; + u32 user_capture_id; + + buf_idx = msg->capture_coe_status_ind.buffer_index; + capture_status = msg->capture_coe_status_ind.capture_status; + + mutex_lock(&ch->capq_inhw_lock); + + if (ch->capq_inhw_pending == 0U) { + mutex_unlock(&ch->capq_inhw_lock); + return; + } + + if (ch->capq_inhw_rd != buf_idx) { + dev_warn_ratelimited(ch->dev, "Unexpected capture buf %u (expected %u)", + buf_idx, ch->capq_inhw_rd); + mutex_unlock(&ch->capq_inhw_lock); + return; + } + + user_capture_id = ch->capq_inhw[buf_idx].user_capture_id; + coe_chan_buf_release(ch->buf_ctx, &ch->capq_inhw[buf_idx]); + + ch->capq_inhw_pending--; + ch->capq_inhw_rd = (ch->capq_inhw_rd + 1U) % ARRAY_SIZE(ch->capq_inhw); + + mutex_unlock(&ch->capq_inhw_lock); + + mutex_lock(&ch->capq_appreport_lock); + + if (ch->rce_chan_id == CAPTURE_COE_CHANNEL_INVALID_ID) { + /* Channel was closed */ + mutex_unlock(&ch->capq_appreport_lock); + return; + } + + if (ch->capq_appreport_pending >= ARRAY_SIZE(ch->capq_appreport)) { + dev_warn_ratelimited(ch->dev, "No space to report capture %u", + buf_idx); + mutex_unlock(&ch->capq_appreport_lock); + return; + } + + unrep = &ch->capq_appreport[ch->capq_appreport_wr]; + unrep->capture_status = capture_status; + unrep->user_capture_id = user_capture_id; + unrep->eofTimestamp = msg->capture_coe_status_ind.timestamp_eof_ns; + unrep->sofTimestamp = msg->capture_coe_status_ind.timestamp_sof_ns; + unrep->errData = 0U; + + ch->capq_appreport_pending++; + ch->capq_appreport_wr = + (ch->capq_appreport_wr + 1U) % ARRAY_SIZE(ch->capq_appreport); + + mutex_unlock(&ch->capq_appreport_lock); + + complete(&ch->capture_resp_ready); + + break; + } + default: + dev_err_ratelimited(ch->dev, "unknown RCE msg %u", msg->header.msg_id); + break; + } +} + +static int coe_channel_open_on_rce(struct coe_channel_state *ch, + uint8_t sensor_mac_addr[ETH_ALEN], + uint8_t vlan_enable) +{ + struct CAPTURE_CONTROL_MSG control_desc; + struct capture_coe_channel_config *config = + &control_desc.channel_coe_setup_req.channel_config; + struct CAPTURE_CONTROL_MSG const * const resp = &ch->rce_resp_msg; + int ret; + u32 transaction; + unsigned long timeout = HZ; + u32 rce_chan_id = CAPTURE_COE_CHANNEL_INVALID_ID; + + ret = tegra_capture_ivc_register_control_cb(&coe_rce_cmd_control_response_cb, + &transaction, ch); + if (ret < 0) { + dev_err(ch->dev, "failed to register control callback: %d\n", ret); + return ret; + } + + memset(&control_desc, 0, sizeof(control_desc)); + control_desc.header.msg_id = CAPTURE_COE_CHANNEL_SETUP_REQ; + control_desc.header.transaction = transaction; + + config->mgbe_instance_id = ch->parent->mgbe_id; + config->mgbe_irq_num = ch->parent->mgbe_irq_id; + config->dma_chan = ch->dma_chan; + config->pdma_chan_id = ch->pdma_id; + memcpy(config->mac_addr, sensor_mac_addr, ETH_ALEN); + + config->rx_desc_ring_iova_mgbe = sg_dma_address(ch->rx_desc_mgbe_sgt.sgl); + config->rx_desc_ring_iova_rce = ch->rx_desc_dma_rce; + config->rx_desc_ring_mem_size = ch->parent->rx_ring_size * MGBE_RXDESC_SIZE; + + config->rx_desc_shdw_iova_rce = ch->rx_desc_shdw_dma_rce; + + config->rx_pkthdr_iova_mgbe = ch->rx_pkt_hdrs_dma_mgbe; + config->rx_pkthdr_mem_size = ch->parent->rx_ring_size * COE_MAX_PKT_HEADER_SIZE; + + config->rx_pktinfo_iova_mgbe = ch->parent->pdmas[ch->pdma_id].rx_pktinfo_dma_mgbe; + config->rx_pktinfo_iova_rce = ch->parent->pdmas[ch->pdma_id].rx_pktinfo_dma_rce; + config->rx_pktinfo_mem_size = ch->parent->rx_pktinfo_ring_size * MGBE_PKTINFO_DESC_SIZE; + + config->dummy_buf_dma = ch->rx_dummy_buf.iova; + config->dummy_buf_dma_size = ch->rx_dummy_buf.buf->size; + + config->rxmem_base = g_rxdesc_mem_dma_rce; + config->rxmem_size = COE_TOTAL_RXDESCR_MEM_SIZE; + + config->vlan_enable = vlan_enable; + + mutex_lock(&ch->rce_msg_lock); + + ret = tegra_capture_ivc_control_submit(&control_desc, sizeof(control_desc)); + if (ret < 0) { + dev_err(ch->dev, "IVC control submit failed\n"); + goto err; + } + + timeout = wait_for_completion_timeout(&ch->rce_resp_ready, timeout); + if (timeout <= 0) { + dev_err(ch->dev, "capture control message timed out\n"); + ret = -ETIMEDOUT; + + goto err; + } + + if (resp->header.msg_id != CAPTURE_CHANNEL_SETUP_RESP || + resp->header.transaction != transaction) { + dev_err(ch->dev, "%s: wrong msg id 0x%x transaction %u!\n", __func__, + resp->header.msg_id, resp->header.transaction); + ret = -EINVAL; + goto err; + }; + + if (resp->channel_setup_resp.result != CAPTURE_OK) { + dev_err(ch->dev, "%s: control failed, errno %d", __func__, + resp->channel_setup_resp.result); + ret = -EINVAL; + goto err; + } + + rce_chan_id = resp->channel_setup_resp.channel_id; + + mutex_unlock(&ch->rce_msg_lock); + + ret = tegra_capture_ivc_notify_chan_id(rce_chan_id, transaction); + if (ret != 0) { + dev_err(ch->dev, "failed to update control callback\n"); + tegra_capture_ivc_unregister_control_cb(transaction); + return ret; + } + + ret = tegra_capture_ivc_register_capture_cb( + &coe_rce_cmd_capture_response_cb, + rce_chan_id, ch); + if (ret != 0) { + dev_err(ch->dev, "failed to register capture callback\n"); + tegra_capture_ivc_unregister_control_cb(rce_chan_id); + return ret; + } + + ch->rce_chan_id = rce_chan_id; + + return 0; + +err: + mutex_unlock(&ch->rce_msg_lock); + + tegra_capture_ivc_unregister_control_cb(transaction); + + return ret; +} + +static int coe_chan_rce_capture_req(struct coe_channel_state * const ch, + u16 const buf_idx, + u64 const buf_mgbe_iova, + u32 const buf_len) +{ + struct CAPTURE_MSG rce_desc = {0U}; + int ret; + + rce_desc.header.msg_id = CAPTURE_COE_REQUEST; + rce_desc.header.channel_id = ch->rce_chan_id; + rce_desc.capture_coe_req.buffer_index = buf_idx; + rce_desc.capture_coe_req.buf_mgbe_iova = buf_mgbe_iova; + rce_desc.capture_coe_req.buf_len = buf_len; + + ret = tegra_capture_ivc_capture_submit(&rce_desc, sizeof(rce_desc)); + if (ret < 0) { + dev_err(ch->dev, "IVC capture submit failed\n"); + return ret; + } + + return 0; +} + +static int coe_ioctl_handle_capture_req(struct coe_channel_state * const ch, + const struct coe_ioctl_data_capture_req * const req) +{ + uint64_t mgbe_iova; + uint64_t buf_max_size; + int ret; + struct capture_common_unpins *unpins = NULL; + + mutex_lock(&ch->capq_inhw_lock); + + if (ch->capq_inhw_pending >= ARRAY_SIZE(ch->capq_inhw)) { + dev_err(ch->dev, "CAPTURE_REQ: Rx queue is full\n"); + ret = -EAGAIN; + goto error; + } + + if (ch->rce_chan_id == CAPTURE_COE_CHANNEL_INVALID_ID) { + dev_err(ch->dev, "CAPTURE_REQ: chan not opened\n"); + ret = -ENOTCONN; + goto error; + } + + unpins = &ch->capq_inhw[ch->capq_inhw_wr].unpins; + ret = capture_common_pin_and_get_iova(ch->buf_ctx, + req->mem_fd, + req->mem_fd_offset, + &mgbe_iova, + &buf_max_size, + unpins); + + if (ret) { + dev_err(ch->dev, "get buf iova failed: %d\n", ret); + goto error; + } + + if (req->buf_size > buf_max_size) { + dev_err(ch->dev, "CAPTURE_REQ: capture too long %u\n", req->buf_size); + ret = -ENOSPC; + goto error; + } + + if (req->buf_size > ch->rx_dummy_buf.buf->size) { + dev_err(ch->dev, "CAPTURE_REQ: buf size > scratch buf %u\n", req->buf_size); + ret = -ENOSPC; + goto error; + } + + if ((mgbe_iova >> 32U) != (ch->rx_dummy_buf.iova >> 32U)) { + dev_err(ch->dev, "Capture buf IOVA MSB 32 bits != scratch buf IOVA\n" + "0x%x != 0x%x\n", + (uint32_t)(mgbe_iova >> 32U), + (uint32_t)(ch->rx_dummy_buf.iova >> 32U)); + ret = -EIO; + goto error; + } + + ret = coe_chan_rce_capture_req(ch, ch->capq_inhw_wr, mgbe_iova, req->buf_size); + if (ret) + goto error; + + ch->capq_inhw[ch->capq_inhw_wr].user_capture_id = req->capture_number; + ch->capq_inhw_pending++; + ch->capq_inhw_wr = (ch->capq_inhw_wr + 1U) % ARRAY_SIZE(ch->capq_inhw); + + mutex_unlock(&ch->capq_inhw_lock); + + return 0; + +error: + if (unpins && unpins->num_unpins != 0) { + u32 i; + + for (i = 0U; i < unpins->num_unpins; i++) { + if (ch->buf_ctx != NULL && unpins->data[i] != NULL) + put_mapping(ch->buf_ctx, unpins->data[i]); + } + (void)memset(unpins, 0U, sizeof(*unpins)); + } + + mutex_unlock(&ch->capq_inhw_lock); + + return ret; +} + +static int coe_ioctl_handle_capture_status(struct coe_channel_state * const ch, + struct coe_ioctl_data_capture_status * const req) +{ + int ret; + const s32 timeout_ms = (s32)req->timeout_ms; + + if (ch->rce_chan_id == CAPTURE_COE_CHANNEL_INVALID_ID) { + dev_err(ch->dev, "CAPTURE_STATUS: chan not opened\n"); + return -ENOTCONN; + } + + dev_dbg_ratelimited(ch->dev, "CAPTURE_STATUS num=%u timeout:%d ms\n", + req->capture_number, timeout_ms); + + /* negative timeout means wait forever */ + if (timeout_ms < 0) { + ret = wait_for_completion_interruptible(&ch->capture_resp_ready); + if (ret == -ERESTARTSYS) { + dev_dbg_ratelimited(ch->dev, "capture status interrupted\n"); + return -ETIMEDOUT; + } + } else { + ret = wait_for_completion_timeout( + &ch->capture_resp_ready, + msecs_to_jiffies(timeout_ms)); + if (ret == 0) { + dev_dbg_ratelimited(ch->dev, "capture status timed out\n"); + return -ETIMEDOUT; + } + } + + if (ret < 0) { + dev_err_ratelimited(ch->dev, "wait for capture status failed\n"); + return ret; + } + + mutex_lock(&ch->capq_appreport_lock); + + if (ch->capq_appreport_pending == 0) { + dev_warn_ratelimited(ch->dev, "No captures pending\n"); + mutex_unlock(&ch->capq_appreport_lock); + return -ENODATA; + } + + req->capture_status = ch->capq_appreport[ch->capq_appreport_rd].capture_status; + req->capture_number = ch->capq_appreport[ch->capq_appreport_rd].user_capture_id; + req->eofTimestamp = ch->capq_appreport[ch->capq_appreport_rd].eofTimestamp; + req->sofTimestamp = ch->capq_appreport[ch->capq_appreport_rd].sofTimestamp; + req->errData = ch->capq_appreport[ch->capq_appreport_rd].errData; + ch->capq_appreport_pending--; + ch->capq_appreport_rd = (ch->capq_appreport_rd + 1U) % ARRAY_SIZE(ch->capq_appreport); + + mutex_unlock(&ch->capq_appreport_lock); + + return 0; +} + +static int coe_helper_map_rcebuf_to_dev(struct device * const dev, + struct sg_table * const sgt, + const size_t map_offset, + const size_t map_size) +{ + struct scatterlist *sg; + u32 i; + int ret; + size_t remaining; + + ret = sg_alloc_table(sgt, 1U, GFP_KERNEL); + if (ret) { + dev_err(dev, "sg_alloc_table failed ret=%d\n", ret); + return ret; + } + + remaining = map_offset; + for_each_sg(g_rxdesc_rce_sgt.sgl, sg, g_rxdesc_rce_sgt.orig_nents, i) { + if (sg->length > remaining) { + const size_t start = remaining; + const size_t sg_size = sg->length - start; + + /* For now support only the case when entire per-MGBE pktinfo + SG is located in one sg entry */ + if (sg_size < map_size) { + dev_err(dev, + "Not enough space for mapping len=%zu\n", sg_size); + sg_free_table(sgt); + return -ENOSPC; + } + + sg_set_page(&sgt->sgl[0], sg_page(sg), map_size, sg->offset + start); + break; + } + + remaining -= sg->length; + } + + ret = dma_map_sg(dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL); + if (ret <= 0) { + dev_err(dev, "dma_map_sg failed ret=%d\n", ret); + sg_free_table(sgt); + return -ENOEXEC; + } + + sgt->nents = ret; + + dma_sync_sg_for_device(dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + + return 0; +} + +static int coe_chan_map_descr_to_mgbe(struct coe_channel_state * const ch) +{ + const size_t rxring_size_per_chan = COE_MGBE_MAX_RXDESC_NUM * MGBE_RXDESC_SIZE; + const size_t rxring_start_offset = MINOR(ch->devt) * rxring_size_per_chan; + int ret; + + ret = coe_helper_map_rcebuf_to_dev(ch->parent->mgbe_dev, &ch->rx_desc_mgbe_sgt, + rxring_start_offset, rxring_size_per_chan); + if (ret) { + dev_err(ch->dev, "Failed to map Rx descr ret=%d\n", ret); + return ret; + } + + dev_info(ch->dev, "Rx descr MGBE addr=0x%llx nentr=%u\n", + sg_dma_address(ch->rx_desc_mgbe_sgt.sgl), ch->rx_desc_mgbe_sgt.nents); + + return 0; +} + +static int +coe_ioctl_handle_setup_channel(struct coe_channel_state * const ch, + struct coe_ioctl_data_capture_setup *setup) +{ + struct nvether_coe_cfg g_coe_cfg; + struct nvether_per_coe_cfg per_coe_cfg; + struct net_device *ndev; + struct mgbe_rx_desc *rx_desc_shdw_ring; + struct coe_state *parent; + struct device *find_dev = NULL; + uint32_t dma_chan; + u8 pdma_chan; + int ret; + + if (ch->rce_chan_id != CAPTURE_COE_CHANNEL_INVALID_ID || + ch->buf_ctx != NULL) { + dev_err(ch->dev, "Chan already opened\n"); + return -EBUSY; + } + + if (MINOR(ch->devt) >= ARRAY_SIZE(coe_channels_arr)) { + dev_err(ch->dev, "Bad chan Minor\n"); + return -EFAULT; + } + + mutex_lock(&coe_device_list_lock); + list_for_each_entry(parent, &coe_device_list, device_entry) { + find_dev = device_find_child_by_name(parent->mgbe_dev, + setup->if_name); + if (find_dev != NULL) + break; + + } + mutex_unlock(&coe_device_list_lock); + + if (find_dev == NULL) { + dev_err(ch->dev, "Can't find netdev %s\n", setup->if_name); + return -ENODEV; + } + + ndev = to_net_dev(find_dev); + + /* Check if the network interface is UP */ + if (!netif_running(ndev)) { + dev_err(ch->dev, "Network interface %s is not UP\n", + netdev_name(ndev)); + put_device(find_dev); + return -ENETDOWN; + } + + dma_chan = find_first_bit(parent->dmachans_map, MAX_HW_CHANS_PER_DEVICE); + if (dma_chan >= MAX_HW_CHANS_PER_DEVICE) { + dev_err(&parent->pdev->dev, + "No DMA chans left %s\n", setup->if_name); + put_device(find_dev); + return -ENOENT; + } + + pdma_chan = parent->vdma2pdma_map[dma_chan]; + if (pdma_chan >= ARRAY_SIZE(parent->pdmas)) { + dev_err(&parent->pdev->dev, "Bad PDMA chan %u\n", pdma_chan); + put_device(find_dev); + return -EFAULT; + } + + ret = device_move(ch->dev, &parent->pdev->dev, DPM_ORDER_NONE); + if (ret) { + dev_err(ch->dev, "Can't move state\n"); + put_device(find_dev); + return ret; + } + + ch->parent = parent; + /* Store netdev reference - it will be released in coe_channel_close() */ + ch->netdev = ndev; + ch->dma_chan = dma_chan; + ch->pdma_id = pdma_chan; + clear_bit(dma_chan, parent->dmachans_map); + list_add(&ch->list_entry, &parent->channels); + reinit_completion(&ch->capture_resp_ready); + + ret = coe_chan_map_descr_to_mgbe(ch); + if (ret != 0) + return ret; + + ch->buf_ctx = create_buffer_table(ch->parent->mgbe_dev); + if (ch->buf_ctx == NULL) { + dev_err(ch->dev, "Failed to alloc buffers table\n"); + return -ENOMEM; + } + + g_coe_cfg.coe_enable = COE_ENABLE; + + if (setup->vlan_enable == COE_VLAN_ENABLE) { + g_coe_cfg.vlan_enable = COE_VLAN_ENABLE; + g_coe_cfg.coe_hdr_offset = COE_MACSEC_HDR_OFFSET; + } else { + g_coe_cfg.vlan_enable = COE_VLAN_DISABLE; + g_coe_cfg.coe_hdr_offset = COE_MACSEC_HDR_VLAN_DISABLE_OFFSET; + } + + ret = nvether_coe_config(ndev, &g_coe_cfg); + if (ret != 0) { + dev_err(ch->dev, "COE config failed for ch %u\n", ch->dma_chan); + return ret; + } + + per_coe_cfg.lc1 = COE_MACSEC_SFT_LC1; + per_coe_cfg.lc2 = COE_MACSEC_SFT_LC2; + ret = nvether_coe_chan_config(ndev, ch->dma_chan, &per_coe_cfg); + if (ret != 0) { + dev_err(ch->dev, "Failed to setup line counters %u\n", ch->dma_chan); + return ret; + } + + ether_addr_copy(ch->sensor_mac_addr, setup->sensor_mac_addr); + + ch->rx_pkt_hdrs = dma_alloc_coherent(ch->parent->mgbe_dev, + ch->parent->rx_ring_size * COE_MAX_PKT_HEADER_SIZE, + &ch->rx_pkt_hdrs_dma_mgbe, + GFP_KERNEL | __GFP_ZERO); + if (ch->rx_pkt_hdrs == NULL) { + dev_err(ch->dev, "Rx pkt headers alloc failed\n"); + return -ENOMEM; + } + ch->rx_desc_shdw = dma_alloc_coherent(ch->parent->rtcpu_dev, + COE_MGBE_MAX_RXDESC_NUM * MGBE_RXDESC_SIZE, + &ch->rx_desc_shdw_dma_rce, + GFP_KERNEL); + if (ch->rx_desc_shdw == NULL) { + dev_err(ch->dev, "Rx desc shadow ring alloc failed\n"); + return -ENOMEM; + } + + /* Pre-fill the shadow Rx desc ring with the header buffers */ + rx_desc_shdw_ring = (struct mgbe_rx_desc *) ch->rx_desc_shdw; + for (uint32_t i = 0; i < COE_MGBE_MAX_RXDESC_NUM; i++) { + rx_desc_shdw_ring[i].rdes0 = L32(ch->rx_pkt_hdrs_dma_mgbe + (i * ETHER_PACKET_HDR_SIZE)); + rx_desc_shdw_ring[i].rdes1 = H32(ch->rx_pkt_hdrs_dma_mgbe + (i * ETHER_PACKET_HDR_SIZE)); + rx_desc_shdw_ring[i].rdes2 = 0U; + rx_desc_shdw_ring[i].rdes3 = 0U; + rx_desc_shdw_ring[i].rdes3 |= RDES3_OWN; + + } + + /* pin the capture descriptor ring buffer */ + ret = capture_common_pin_memory(ch->parent->mgbe_dev, + setup->scratchBufMem, + &ch->rx_dummy_buf); + if (ret < 0) { + dev_err(ch->dev, "Rx dummy buf map failed: %d\n", ret); + return ret; + } + + ret = coe_channel_open_on_rce(ch, setup->sensor_mac_addr, setup->vlan_enable); + if (ret) + return ret; + + dev_info(&parent->pdev->dev, "CoE chan added %s dmachan=%u num_desc=%u\n", + netdev_name(ndev), ch->dma_chan, ch->parent->rx_ring_size); + + return 0; +} + +static long coe_fop_channel_ioctl( + struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct coe_channel_state *ch = file->private_data; + void __user *ptr = (void __user *)arg; + long ret; + + if (ch == NULL || ch->dev == NULL) { + pr_err("CoE IOCTL invalid channel\n"); + return -EINVAL; + } + + if (_IOC_NR(cmd) != _IOC_NR(COE_IOCTL_CAPTURE_SETUP)) { + if (ch->parent == NULL || ch->netdev == NULL) { + dev_err(ch->dev, "CoE channel is not set up\n"); + return -ENOTCONN; + } + } + + switch (_IOC_NR(cmd)) { + case _IOC_NR(COE_IOCTL_CAPTURE_SETUP): + { + struct coe_ioctl_data_capture_setup setup; + + if (copy_from_user(&setup, ptr, sizeof(setup))) { + return -EFAULT; + } + + ret = coe_ioctl_handle_setup_channel(ch, &setup); + if (ret != 0) + return ret; + break; + } + case _IOC_NR(COE_IOCTL_BUFFER_OP): + { + struct coe_ioctl_data_buffer_op req; + + ret = copy_from_user(&req, ptr, sizeof(req)); + if (ret != 0) + return ret; + + mutex_lock(&ch->channel_lock); + if (ch->rce_chan_id == CAPTURE_COE_CHANNEL_INVALID_ID) { + dev_err(ch->dev, "BUFFER_OP: chan not opened\n"); + mutex_unlock(&ch->channel_lock); + return -ENOTCONN; + } + + ret = capture_buffer_request(ch->buf_ctx, req.mem, req.flag); + if (ret < 0) { + dev_err(ch->dev, "CoE buffer op failed flag=0x%x: %ld\n", + req.flag, ret); + mutex_unlock(&ch->channel_lock); + return ret; + } + + mutex_unlock(&ch->channel_lock); + + dev_dbg(ch->dev, "CoE buffer op OK flag=0x%x\n", req.flag); + break; + } + case _IOC_NR(COE_IOCTL_CAPTURE_REQ): + { + struct coe_ioctl_data_capture_req req; + + ret = copy_from_user(&req, ptr, sizeof(req)); + if (ret != 0) + return ret; + + ret = coe_ioctl_handle_capture_req(ch, &req); + break; + } + case _IOC_NR(COE_IOCTL_CAPTURE_STATUS): + { + struct coe_ioctl_data_capture_status req; + + ret = copy_from_user(&req, ptr, sizeof(req)); + if (ret != 0) + return ret; + + ret = coe_ioctl_handle_capture_status(ch, &req); + if (ret < 0) { + dev_err(ch->dev, "CoE capture status failed: %ld\n", + ret); + return ret; + } + + ret = copy_to_user(ptr, &req, sizeof(req)); + if (ret != 0) + return ret; + + break; + } + case _IOC_NR(COE_IOCTL_GET_INFO): + { + struct coe_ioctl_data_get_info ret_info = {0U}; + + if (ch->dma_chan == CAPTURE_COE_CHAN_INVALID_HW_ID) { + dev_err(ch->dev, "CoE chan HW ID not set yet\n"); + return -EAGAIN; + } + + ret_info.channel_number = ch->dma_chan; + ret = copy_to_user(ptr, &ret_info, sizeof(ret_info)); + if (ret != 0) + return ret; + break; + } + default: + dev_err(ch->dev, "Unknown IOCTL 0x%x\n", _IOC_NR(cmd)); + ret = -EIO; + break; + } + + return ret; +} + +static int coe_fop_channel_open( + struct inode *inode, + struct file *file) +{ + struct coe_channel_state *ch; + unsigned int chan_id = iminor(inode); + int ret; + + if (chan_id >= ARRAY_SIZE(coe_channels_arr)) { + pr_err("CoE: open chan invalid minor %u\n", chan_id); + return -ENXIO; + } + + if (mutex_lock_interruptible(&coe_channels_arr_lock)) + return -ERESTARTSYS; + + ch = &coe_channels_arr[chan_id]; + + if (ch->devt != inode->i_rdev) { + pr_err("CoE: open chan mismatch devt %u!=%u\n", + ch->devt, inode->i_rdev); + ret = -ENXIO; + goto mutex_unlock; + } + + if (ch->dev == NULL) { + pr_err("CoE: open chan bad state\n"); + ret = -EFAULT; + goto mutex_unlock; + } + + if (ch->opened) { + dev_dbg(ch->dev, "CoE channel is busy\n"); + ret = -EBUSY; + goto mutex_unlock; + } + + file->private_data = ch; + ch->opened = true; + + ret = nonseekable_open(inode, file); + +mutex_unlock: + mutex_unlock(&coe_channels_arr_lock); + return ret; +} + +static int coe_channel_reset_rce(struct coe_channel_state *ch) +{ + struct CAPTURE_CONTROL_MSG control_desc; + struct CAPTURE_CONTROL_MSG const * const resp = &ch->rce_resp_msg; + ///@todo A capture reset barrier ind message is also needed + /// This would be similar to how both VI and ISP handle reset + int ret; + unsigned long timeout = HZ; + + if (ch->rce_chan_id == CAPTURE_COE_CHANNEL_INVALID_ID) { + dev_dbg(ch->dev, "%s: CoE channel not set up\n", __func__); + return 0; + } + + dev_info(ch->dev, "Reset CoE chan rce %u, rce_chan_id %u\n", + MINOR(ch->devt), ch->rce_chan_id); + + memset(&control_desc, 0, sizeof(control_desc)); + control_desc.header.msg_id = CAPTURE_COE_CHANNEL_RESET_REQ; + control_desc.header.channel_id = ch->rce_chan_id; + + mutex_lock(&ch->rce_msg_lock); + + ret = tegra_capture_ivc_control_submit(&control_desc, sizeof(control_desc)); + if (ret < 0) { + dev_info(ch->dev, "IVC control submit failed\n"); + goto mutex_unlock; + } + + timeout = wait_for_completion_timeout(&ch->rce_resp_ready, timeout); + if (timeout <= 0) { + dev_info(ch->dev, "capture control message timed out\n"); + ret = -ETIMEDOUT; + goto mutex_unlock; + } + + if (resp->header.msg_id != CAPTURE_COE_CHANNEL_RESET_RESP) { + dev_info(ch->dev, "%s: wrong msg id 0x%x\n", __func__, resp->header.msg_id); + ret = -EINVAL; + goto mutex_unlock; + }; + + if (resp->channel_coe_reset_resp.result != CAPTURE_OK) { + dev_info(ch->dev, "%s: control failed, errno %d", __func__, + resp->channel_coe_reset_resp.result); + ret = -EINVAL; + goto mutex_unlock; + } + +mutex_unlock: + mutex_unlock(&ch->rce_msg_lock); + + return ret; +} + +///@todo refactor reset and release to use common code to send IVC +static int coe_channel_release_rce(struct coe_channel_state *ch) +{ + struct CAPTURE_CONTROL_MSG control_desc; + struct CAPTURE_CONTROL_MSG const * const resp = &ch->rce_resp_msg; + int ret; + unsigned long timeout = HZ; + + if (ch->rce_chan_id == CAPTURE_COE_CHANNEL_INVALID_ID) { + dev_dbg(ch->dev, "%s: CoE channel not set up\n", __func__); + return 0; + } + + dev_info(ch->dev, "Release CoE chan rce %u, rce_chan_id %u\n", + MINOR(ch->devt), ch->rce_chan_id); + + memset(&control_desc, 0, sizeof(control_desc)); + control_desc.header.msg_id = CAPTURE_COE_CHANNEL_RELEASE_REQ; + control_desc.header.channel_id = ch->rce_chan_id; + + mutex_lock(&ch->rce_msg_lock); + + ret = tegra_capture_ivc_control_submit(&control_desc, sizeof(control_desc)); + if (ret < 0) { + dev_info(ch->dev, "IVC control submit failed\n"); + goto mutex_unlock; + } + + timeout = wait_for_completion_timeout(&ch->rce_resp_ready, timeout); + if (timeout <= 0) { + dev_info(ch->dev, "capture control message timed out\n"); + ret = -ETIMEDOUT; + goto mutex_unlock; + } + + if (resp->header.msg_id != CAPTURE_COE_CHANNEL_RELEASE_RESP) { + dev_info(ch->dev, "%s: wrong msg id 0x%x\n", __func__, resp->header.msg_id); + ret = -EINVAL; + goto mutex_unlock; + }; + + if (resp->channel_coe_release_resp.result != CAPTURE_OK) { + dev_info(ch->dev, "%s: control failed, errno %d", __func__, + resp->channel_coe_reset_resp.result); + ret = -EINVAL; + goto mutex_unlock; + } + +mutex_unlock: + mutex_unlock(&ch->rce_msg_lock); + + return ret; +} + +static int coe_channel_close(struct coe_channel_state *ch) +{ + if (!ch->opened) + return 0; + + dev_info(ch->dev, "Closing CoE chan %u\n", MINOR(ch->devt)); + + mutex_lock(&ch->channel_lock); + mutex_lock(&ch->capq_inhw_lock); + + coe_channel_reset_rce(ch); + + for (u32 buf_idx = 0U; buf_idx < ARRAY_SIZE(ch->capq_inhw); buf_idx++) { + coe_chan_buf_release(ch->buf_ctx, &ch->capq_inhw[buf_idx]); + } + + ch->capq_inhw_pending = 0U; + ch->capq_inhw_wr = 0U; + ch->capq_inhw_rd = 0U; + + coe_channel_release_rce(ch); + + if (ch->rce_chan_id != CAPTURE_COE_CHANNEL_INVALID_ID) { + tegra_capture_ivc_unregister_capture_cb(ch->rce_chan_id); + tegra_capture_ivc_unregister_control_cb(ch->rce_chan_id); + + ch->rce_chan_id = CAPTURE_COE_CHANNEL_INVALID_ID; + } + + mutex_unlock(&ch->capq_inhw_lock); + + mutex_lock(&ch->capq_appreport_lock); + + for (u32 buf_idx = 0U; buf_idx < ARRAY_SIZE(ch->capq_appreport); buf_idx++) { + ch->capq_appreport[buf_idx].capture_status = CAPTURE_STATUS_UNKNOWN; + } + + ch->capq_appreport_pending = 0U; + ch->capq_appreport_rd = 0U; + ch->capq_appreport_wr = 0U; + complete_all(&ch->capture_resp_ready); + + mutex_unlock(&ch->capq_appreport_lock); + + if (ch->buf_ctx != NULL) { + destroy_buffer_table(ch->buf_ctx); + ch->buf_ctx = NULL; + } + + capture_common_unpin_memory(&ch->rx_dummy_buf); + + if (ch->rx_pkt_hdrs != NULL) { + dma_free_coherent(ch->parent->mgbe_dev, + ch->parent->rx_ring_size * COE_MAX_PKT_HEADER_SIZE, + ch->rx_pkt_hdrs, ch->rx_pkt_hdrs_dma_mgbe); + ch->rx_pkt_hdrs = NULL; + } + + if (ch->rx_desc_shdw != NULL) { + dma_free_coherent(ch->parent->rtcpu_dev, + COE_MGBE_MAX_RXDESC_NUM * MGBE_RXDESC_SIZE, + ch->rx_desc_shdw, ch->rx_desc_shdw_dma_rce); + } + + if (ch->netdev) { + put_device(&ch->netdev->dev); + ch->netdev = NULL; + } + + if (ch->rx_desc_mgbe_sgt.sgl != NULL) { + dma_unmap_sg(ch->parent->mgbe_dev, ch->rx_desc_mgbe_sgt.sgl, + ch->rx_desc_mgbe_sgt.orig_nents, DMA_BIDIRECTIONAL); + sg_free_table(&ch->rx_desc_mgbe_sgt); + } + + if (ch->parent) { + device_move(ch->dev, NULL, DPM_ORDER_NONE); + set_bit(ch->dma_chan, ch->parent->dmachans_map); + list_del(&ch->list_entry); + ch->parent = NULL; + ch->dma_chan = CAPTURE_COE_CHAN_INVALID_HW_ID; + } + + ch->opened = false; + mutex_unlock(&ch->channel_lock); + + return 0; +} + +static int coe_fop_channel_release( + struct inode *inode, + struct file *file) +{ + struct coe_channel_state *ch = file->private_data; + + file->private_data = NULL; + + if (ch == NULL || ch->dev == NULL) { + return 0; + } + + dev_info(ch->dev, "%s\n", __func__); + + return coe_channel_close(ch); +} + +static const struct file_operations coe_channel_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = coe_fop_channel_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = coe_fop_channel_ioctl, +#endif + .open = coe_fop_channel_open, + .release = coe_fop_channel_release, +}; + +static void coe_netdev_event_handle(struct coe_state * const s, + unsigned long event, struct net_device *event_dev) +{ + dev_info(&s->pdev->dev, "netdev event %lu dev %s\n", + event, netdev_name(event_dev)); + + switch (event) { + case NETDEV_UP: + /* TODO can do sensor discovery here */ + break; + case NETDEV_DOWN: + break; + default: + break; + } +} + +static int rtcpu_coe_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *event_dev; + struct coe_state * const s = + container_of(this, struct coe_state, netdev_nb); + + if (ptr == NULL) + return NOTIFY_DONE; + + event_dev = netdev_notifier_info_to_dev(ptr); + if (event_dev == NULL) + return NOTIFY_DONE; + + if (s->mgbe_dev == event_dev->dev.parent) + coe_netdev_event_handle(s, event, event_dev); + + return NOTIFY_DONE; +} + +static struct device *camrtc_coe_get_linked_device( + const struct device *dev, char const *name, int index) +{ + struct device_node *np; + struct platform_device *pdev; + + np = of_parse_phandle(dev->of_node, name, index); + if (np == NULL) + return NULL; + + pdev = of_find_device_by_node(np); + of_node_put(np); + + if (pdev == NULL) { + dev_warn(dev, "%s[%u] node has no device\n", name, index); + return NULL; + } + + return &pdev->dev; +} + +static int coe_mgbe_init_pdmas(struct coe_state * const s) +{ + struct device_node *vm_node; + struct device_node *temp; + u32 num_of_pdma; + int ret; + unsigned int node = 0; + + vm_node = of_parse_phandle(s->mgbe_dev->of_node, + "nvidia,vm-vdma-config", 0); + if (vm_node == NULL) { + dev_err(&s->pdev->dev, "failed to found VDMA configuration\n"); + return -ENOMEM; + } + + ret = of_property_read_u32(vm_node, "nvidia,pdma-num", &num_of_pdma); + if (ret != 0) { + dev_err(&s->pdev->dev, "failed to get number of PDMA (%d)\n", + ret); + dev_info(&s->pdev->dev, "Using number of PDMA as 1\n"); + num_of_pdma = 1U; + } + + if (num_of_pdma > COE_MGBE_MAX_NUM_PDMA_CHANS) { + dev_err(&s->pdev->dev, "Invalid Num. of PDMA's %u\n", num_of_pdma); + return -EINVAL; + } + + ret = of_get_child_count(vm_node); + if (ret != (int)num_of_pdma) { + dev_err(&s->pdev->dev, + "Mismatch in num_of_pdma and VDMA config DT nodes\n"); + return ret; + } + + for_each_child_of_node(vm_node, temp) { + u32 pdma_chan; + u32 num_vdma_chans; + u32 vdma_chans[MAX_HW_CHANS_PER_DEVICE]; + + if (node == num_of_pdma) + break; + + ret = of_property_read_u32(temp, "nvidia,pdma-chan", &pdma_chan); + if (ret != 0) { + dev_err(&s->pdev->dev, "failed to read PDMA ID\n"); + return ret; + } + + if (pdma_chan >= ARRAY_SIZE(s->pdmas)) { + dev_err(&s->pdev->dev, "Invalid PDMA ID %u\n", pdma_chan); + return -EINVAL; + } + + ret = of_property_read_u32(temp, "nvidia,num-vdma-channels", &num_vdma_chans); + if (ret != 0) { + dev_err(&s->pdev->dev, + "failed to read number of VDMA channels\n"); + return ret; + } + + if (num_vdma_chans >= ARRAY_SIZE(vdma_chans)) { + dev_err(&s->pdev->dev, "Invalid num of VDMAs %u\n", num_vdma_chans); + return -EINVAL; + } + + ret = of_property_read_u32_array(temp, "nvidia,vdma-channels", + vdma_chans, num_vdma_chans); + if (ret != 0) { + dev_err(&s->pdev->dev, "failed to get VDMA channels\n"); + return ret; + } + + for (u32 i = 0U; i < num_vdma_chans; i++) { + if (vdma_chans[i] >= ARRAY_SIZE(s->vdma2pdma_map)) { + dev_err(&s->pdev->dev, "Bad VDMA ID %u\n", vdma_chans[i]); + return -EINVAL; + } + + s->vdma2pdma_map[vdma_chans[i]] = pdma_chan; + } + } + + return 0; +} + +static int coe_alloc_rx_descr_mem_area(struct coe_state * const s) +{ + const size_t alloc_size = COE_TOTAL_RXDESCR_MEM_SIZE; + int ret; + dma_addr_t mgbe_addr; + dma_addr_t rce_addr; + dma_addr_t alloc_align_offset; + dma_addr_t pib_base_offset; + size_t pktinfo_size_per_mgbe; + dma_addr_t pib_start_offset; + + mutex_lock(&coe_device_list_lock); + + if (g_rx_descr_mem_area != NULL) { + if (g_rtcpu_dev != s->rtcpu_dev) { + mutex_unlock(&coe_device_list_lock); + dev_err(&s->pdev->dev, "Multiple RCE CPUs not supported\n"); + return -ENOTSUPP; + } + } else { + g_rx_descr_mem_area = dma_alloc_coherent(s->rtcpu_dev, + alloc_size, + &g_rxdesc_mem_dma_rce, + GFP_KERNEL); + if (g_rx_descr_mem_area == NULL) { + mutex_unlock(&coe_device_list_lock); + dev_err(&s->pdev->dev, "Failed to allocate RX descriptor memory\n"); + return -ENOMEM; + } + + /* Allocation must be aligned to a region size and must be power of two. + * TODO in case this check fails - need to allocate twice as much + * memory (alloc_size + alloc_size - 1) and then align base RCE address + * to a required boundary. + */ + if (g_rxdesc_mem_dma_rce != ALIGN(g_rxdesc_mem_dma_rce, alloc_size)) { + dma_free_coherent(s->rtcpu_dev, alloc_size, + g_rx_descr_mem_area, + g_rxdesc_mem_dma_rce); + g_rx_descr_mem_area = NULL; + mutex_unlock(&coe_device_list_lock); + dev_err(&s->pdev->dev, + "Wrong RCE Rx desc addr alignment 0x%llx size=%lu\n", + g_rxdesc_mem_dma_rce, alloc_size); + return -EFAULT; + } + + ret = dma_get_sgtable(s->rtcpu_dev, &g_rxdesc_rce_sgt, + g_rx_descr_mem_area, + g_rxdesc_mem_dma_rce, + alloc_size); + if (ret < 0) { + dma_free_coherent(s->rtcpu_dev, alloc_size, + g_rx_descr_mem_area, + g_rxdesc_mem_dma_rce); + g_rx_descr_mem_area = NULL; + mutex_unlock(&coe_device_list_lock); + dev_err(&s->pdev->dev, "dma_get_sgtable for RCE failed ret=%d\n", ret); + return ret; + } + + g_rtcpu_dev = s->rtcpu_dev; + + dev_info(&s->pdev->dev, "Rx descr RCE addr=0x%llx len=%lu\n", + g_rxdesc_mem_dma_rce, alloc_size); + } + + mutex_unlock(&coe_device_list_lock); + + /* Offset from the beginning of allocated Rx descr area where RCE MPU region starts */ + alloc_align_offset = ALIGN(g_rxdesc_mem_dma_rce, alloc_size) - g_rxdesc_mem_dma_rce; + /* Offset from start of Rx mem area where PktInfo bufs portion begins */ + pib_base_offset = alloc_align_offset + + (MAX_ACTIVE_COE_CHANNELS * COE_MGBE_MAX_RXDESC_NUM * MGBE_RXDESC_SIZE); + pktinfo_size_per_mgbe = COE_MGBE_MAX_PKTINFO_NUM * + COE_MGBE_MAX_NUM_PDMA_CHANS * MGBE_PKTINFO_DESC_SIZE; + /* Offset to Rx mem aread dedicated to Rx PackeInfo bufs for the specific MGBE ID */ + pib_start_offset = pib_base_offset + s->mgbe_id * pktinfo_size_per_mgbe; + + ret = coe_helper_map_rcebuf_to_dev(s->mgbe_dev, &s->rx_pktinfo_mgbe_sgt, + pib_start_offset, pktinfo_size_per_mgbe); + if (ret) { + dev_err(&s->pdev->dev, "Failed to map Pktinfo ret=%d\n", ret); + return ret; + } + + mgbe_addr = sg_dma_address(s->rx_pktinfo_mgbe_sgt.sgl); + rce_addr = g_rxdesc_mem_dma_rce + pib_start_offset; + + dev_info(&s->pdev->dev, "Rx pktinfo MGBE addr=0x%llx nentr=%u\n", + mgbe_addr, s->rx_pktinfo_mgbe_sgt.nents); + + /* Initialize addresses for all Physical DMA channels */ + for (u32 pdma_id = 0U; pdma_id < ARRAY_SIZE(s->pdmas); pdma_id++) { + struct coe_pdma_state * const pdma = &s->pdmas[pdma_id]; + + pdma->rx_pktinfo_dma_rce = rce_addr; + pdma->rx_pktinfo_dma_mgbe = mgbe_addr; + rce_addr += COE_MGBE_MAX_PKTINFO_NUM * MGBE_PKTINFO_DESC_SIZE; + mgbe_addr += COE_MGBE_MAX_PKTINFO_NUM * MGBE_PKTINFO_DESC_SIZE; + } + + return 0; +} + +static int32_t coe_mgbe_parse_dt_dmachans(struct coe_state * const s, + u32 * const vm_chans, + size_t max_num_chans) +{ + struct device_node *vm_node; + struct device_node *temp; + u32 vm_irq_id = 0U; + int ret = 0; + u32 num_vm_chans; + + vm_node = of_parse_phandle(s->mgbe_dev->of_node, + "nvidia,vm-irq-config", 0); + if (vm_node == NULL) { + dev_err(&s->pdev->dev, "failed to found VM IRQ config\n"); + return -ENOMEM; + } + + for_each_child_of_node(vm_node, temp) { + bool isCoE; + + isCoE = of_property_read_bool(temp, "nvidia,camera-over-eth"); + if (!isCoE) { + continue; + } + + ret = of_property_read_u32(temp, "nvidia,vm-num", &vm_irq_id); + if (ret != 0) { + dev_err(&s->pdev->dev, "failed to read VM Number\n"); + break; + } + + ret = of_property_read_u32(temp, "nvidia,num-vm-channels", + &num_vm_chans); + if (ret != 0) { + dev_err(&s->pdev->dev, + "failed to read number of VM channels\n"); + break; + } + + if (num_vm_chans > max_num_chans) { + dev_warn(&s->pdev->dev, "Too many CoE channels\n"); + ret = -E2BIG; + break; + } + + ret = of_property_read_u32_array(temp, "nvidia,vm-channels", + vm_chans, num_vm_chans); + if (ret != 0) { + dev_err(&s->pdev->dev, "failed to get VM channels\n"); + break; + } + + s->mgbe_irq_id = vm_irq_id; + ret = num_vm_chans; + break; + } + + return ret; +} + +static int camrtc_coe_probe(struct platform_device *pdev) +{ + struct coe_state *s; + struct device *dev = &pdev->dev; + int ret; + u32 dma_chans_arr[MAX_HW_CHANS_PER_DEVICE]; + int num_coe_channels; + const struct coe_state *check_state; + + dev_dbg(dev, "tegra-camrtc-capture-coe probe\n"); + + s = devm_kzalloc(dev, sizeof(*s), GFP_KERNEL); + if (s == NULL) + return -ENOMEM; + + s->rtcpu_dev = camrtc_coe_get_linked_device(dev, + "nvidia,cam_controller", 0U); + if (s->rtcpu_dev == NULL) { + dev_err(dev, "No CoE controller found\n"); + return -ENOENT; + } + + s->mgbe_dev = camrtc_coe_get_linked_device(dev, + "nvidia,eth_controller", 0U); + if (s->mgbe_dev == NULL) { + return -ENOENT; + } + + num_coe_channels = coe_mgbe_parse_dt_dmachans(s, dma_chans_arr, + ARRAY_SIZE(dma_chans_arr)); + if (num_coe_channels < 0) { + return num_coe_channels; + } + + platform_set_drvdata(pdev, s); + INIT_LIST_HEAD(&s->channels); + INIT_LIST_HEAD(&s->device_entry); + mutex_init(&s->access_lock); + s->pdev = pdev; + + s->netdev_nb.notifier_call = rtcpu_coe_netdev_event; + ret = register_netdevice_notifier(&s->netdev_nb); + if (ret != 0) { + dev_err(dev, "CoE failed to register notifier\n"); + return ret; + } + + /* TODO take from DT? */ + s->rx_ring_size = 16384U; + s->rx_pktinfo_ring_size = 4096U; /* Can only be 256, 512, 2048 or 4096 */ + + ret = of_property_read_u32(s->mgbe_dev->of_node, + "nvidia,instance_id", &s->mgbe_id); + if (ret != 0) { + dev_info(dev, + "DT instance_id missing, setting default to MGBE0\n"); + s->mgbe_id = 0U; + } + + if (s->rx_ring_size > COE_MGBE_MAX_RXDESC_NUM) { + dev_err(dev, "Invalid Rx ring size %u\n", s->rx_ring_size); + return -ENOSPC; + } + + if (s->rx_pktinfo_ring_size != 256U && + s->rx_pktinfo_ring_size != 512U && + s->rx_pktinfo_ring_size != 2048U && + s->rx_pktinfo_ring_size != 4096U) { + dev_err(dev, "Invalid pktinfo ring size %u\n", s->rx_pktinfo_ring_size); + return -ENOSPC; + } + + if (s->mgbe_id >= MAX_NUM_COE_DEVICES) { + dev_err(dev, "Invalid MGBE ID %u\n", s->mgbe_id); + return -EBADFD; + } + + ret = coe_mgbe_init_pdmas(s); + if (ret) + return ret; + + ret = coe_alloc_rx_descr_mem_area(s); + if (ret) + return ret; + + for (u32 ch = 0U; ch < num_coe_channels; ch++) { + u32 arr_idx; + size_t offset; + struct coe_channel_state *chan; + + mutex_lock(&coe_channels_arr_lock); + + chan = coe_channel_arr_find_free(&arr_idx); + if (chan == NULL) { + dev_err(dev, "No free channel slots ch=%u\n", ch); + mutex_unlock(&coe_channels_arr_lock); + return -ENOMEM; + } + + chan->devt = MKDEV(coe_channel_major, arr_idx); + chan->dev = device_create(coe_channel_class, NULL, chan->devt, NULL, + "coe-chan-%u", arr_idx); + if (IS_ERR(chan->dev)) { + ret = PTR_ERR(chan->dev); + chan->dev = NULL; + mutex_unlock(&coe_channels_arr_lock); + return ret; + } + + mutex_unlock(&coe_channels_arr_lock); + + INIT_LIST_HEAD(&chan->list_entry); + mutex_init(&chan->rce_msg_lock); + mutex_init(&chan->channel_lock); + mutex_init(&chan->capq_inhw_lock); + mutex_init(&chan->capq_appreport_lock); + init_completion(&chan->rce_resp_ready); + init_completion(&chan->capture_resp_ready); + chan->rce_chan_id = CAPTURE_COE_CHANNEL_INVALID_ID; + chan->pdma_id = COE_MGBE_PDMA_CHAN_INVALID; + chan->dma_chan = CAPTURE_COE_CHAN_INVALID_HW_ID; + + offset = arr_idx * COE_MGBE_MAX_RXDESC_NUM * MGBE_RXDESC_SIZE; + chan->rx_desc_dma_rce = g_rxdesc_mem_dma_rce + offset; + + set_bit(dma_chans_arr[ch], s->dmachans_map); + + dev_info(&s->pdev->dev, "Ch%u->PDMA%u\n", + dma_chans_arr[ch], s->vdma2pdma_map[dma_chans_arr[ch]]); + } + + mutex_lock(&coe_device_list_lock); + list_for_each_entry(check_state, &coe_device_list, device_entry) { + if (s->mgbe_id == check_state->mgbe_id) { + mutex_unlock(&coe_device_list_lock); + dev_err(dev, "Device already exists for mgbe_id=%u\n", + s->mgbe_id); + return -EEXIST; + } + } + + list_add(&s->device_entry, &coe_device_list); + mutex_unlock(&coe_device_list_lock); + + dev_info(dev, "Camera Over Eth controller %s num_chans=%u IRQ=%u\n", + dev_name(s->mgbe_dev), num_coe_channels, s->mgbe_irq_id); + + return 0; +} + +static int camrtc_coe_remove(struct platform_device *pdev) +{ + struct coe_state * const s = platform_get_drvdata(pdev); + struct coe_channel_state *ch; + struct coe_channel_state *ch_tmp; + + dev_dbg(&pdev->dev, "tegra-camrtc-capture-coe remove\n"); + + unregister_netdevice_notifier(&s->netdev_nb); + + mutex_lock(&coe_channels_arr_lock); + + list_for_each_entry_safe(ch, ch_tmp, &s->channels, list_entry) { + coe_channel_close(ch); + if (ch->dev != NULL) + device_destroy(coe_channel_class, ch->devt); + ch->dev = NULL; + ch->devt = 0U; + } + + mutex_unlock(&coe_channels_arr_lock); + + if (s->rx_pktinfo_mgbe_sgt.sgl != NULL) { + dma_unmap_sg(s->mgbe_dev, s->rx_pktinfo_mgbe_sgt.sgl, + s->rx_pktinfo_mgbe_sgt.orig_nents, DMA_BIDIRECTIONAL); + sg_free_table(&s->rx_pktinfo_mgbe_sgt); + } + + if (s->mgbe_dev != NULL) { + put_device(s->mgbe_dev); + s->mgbe_dev = NULL; + } + + if (s->rtcpu_dev != NULL) { + put_device(s->rtcpu_dev); + s->rtcpu_dev = NULL; + } + + return 0; +} + +static const struct of_device_id camrtc_coe_of_match[] = { + { .compatible = "nvidia,tegra-camrtc-capture-coe" }, + {}, +}; +MODULE_DEVICE_TABLE(of, camrtc_coe_of_match); + +static struct platform_driver capture_coe_driver = { + .probe = camrtc_coe_probe, + .remove = camrtc_coe_remove, + .driver = { + .name = "camrtc-coe", + .owner = THIS_MODULE, + .of_match_table = camrtc_coe_of_match, + }, +}; + +static int __init capture_coe_init(void) +{ + int err; + +#if defined(NV_CLASS_CREATE_HAS_NO_OWNER_ARG) /* Linux v6.4 */ + coe_channel_class = class_create("capture-coe-channel"); +#else + coe_channel_class = class_create(THIS_MODULE, "capture-coe-channel"); +#endif + if (IS_ERR(coe_channel_class)) + return PTR_ERR(coe_channel_class); + + coe_channel_major = register_chrdev(0, "capture-coe-channel", + &coe_channel_fops); + if (coe_channel_major < 0) { + class_destroy(coe_channel_class); + return coe_channel_major; + } + + err = platform_driver_register(&capture_coe_driver); + if (err) { + unregister_chrdev(coe_channel_major, "capture-coe-channel"); + class_destroy(coe_channel_class); + return err; + } + + return 0; +} + +static void __exit capture_coe_exit(void) +{ + if (g_rx_descr_mem_area != NULL) { + const size_t alloc_size = COE_TOTAL_RXDESCR_MEM_SIZE; + + sg_free_table(&g_rxdesc_rce_sgt); + dma_free_coherent(g_rtcpu_dev, alloc_size, + g_rx_descr_mem_area, g_rxdesc_mem_dma_rce); + g_rx_descr_mem_area = NULL; + g_rtcpu_dev = NULL; + } + + for (u32 ch_id = 0U; ch_id < ARRAY_SIZE(coe_channels_arr); ch_id++) { + struct coe_channel_state * const ch = &coe_channels_arr[ch_id]; + + if (ch->dev != NULL) + device_destroy(coe_channel_class, ch->devt); + ch->dev = NULL; + ch->devt = 0U; + } + + platform_driver_unregister(&capture_coe_driver); + unregister_chrdev(coe_channel_major, "capture-coe-channel"); + class_destroy(coe_channel_class); +} + +module_init(capture_coe_init); +module_exit(capture_coe_exit); + +MODULE_AUTHOR("Igor Mitsyanko "); +MODULE_DESCRIPTION("NVIDIA Tegra Camera Over Ethernet driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/nvidia/nvethernet/Makefile b/drivers/net/ethernet/nvidia/nvethernet/Makefile index f6438b63..4217bfd7 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/Makefile +++ b/drivers/net/ethernet/nvidia/nvethernet/Makefile @@ -19,11 +19,7 @@ endif ccflags-y += -DLINUX_OS -DNET30 -DNVPKCS_MACSEC -DLINUX_IVC -mno-outline-atomics -Werror \ -I$(srctree.nvidia-oot)/drivers/net/ethernet/nvidia/nvethernet/nvethernetrm/include -ifdef CONFIG_KASAN ccflags-y += -Wframe-larger-than=4096 -else -ccflags-y += -Wframe-larger-than=2048 -endif #ccflags-y += -DOSI_DEBUG -DMACSEC_SUPPORT -DDEBUG_MACSEC -DMACSEC_KEY_PROGRAM ccflags-y += -DMACSEC_SUPPORT diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index 014dc31a..c64c9f60 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #ifdef CONFIG_DEBUG_FS @@ -2053,6 +2054,11 @@ static int ether_request_irqs(struct ether_priv_data *pdata) "unexpected irq name index received (%d)\n", j); goto err_chan_irq; } + + if (osi_core->irq_data[i].is_coe == 1U) { + continue; + } + snprintf(pdata->irq_names[j], ETHER_IRQ_NAME_SZ, "%s.vm%d", netdev_name(pdata->ndev), i); ret = devm_request_irq(pdata->dev, pdata->vm_irqs[i], @@ -2169,6 +2175,41 @@ static void ether_napi_enable(struct ether_priv_data *pdata) } } +static void ether_free_coe_resource(struct ether_priv_data *pdata) +{ + int i; + + for (i = 0; i < OSI_MGBE_COE_NUM_RX_FRAMES; i++) { + dma_unmap_single(pdata->dev, pdata->mgbe_coe.rx_fb_addr_phys[i], PAGE_SIZE, DMA_FROM_DEVICE); + free_page(pdata->mgbe_coe.rx_fb_addr[i]); + } + dma_free_coherent(pdata->dev, sizeof(struct osi_mgbe_coe_pib) * pdata->mgbe_coe.rx_pib_sz, + (void *)pdata->mgbe_coe.rx_pib_addr, pdata->mgbe_coe.rx_pib_addr_phys); +} + + +static int ether_allocate_coe_resource(struct ether_priv_data *pdata) +{ + int i; + + for (i = 0; i < OSI_MGBE_COE_NUM_RX_FRAMES; i++) { + pdata->mgbe_coe.rx_fb_addr[i] = __get_free_page(GFP_DMA); + pdata->mgbe_coe.rx_fb_addr_phys[i] = dma_map_single(pdata->dev, + (void *)pdata->mgbe_coe.rx_fb_addr[i], + PAGE_SIZE, DMA_FROM_DEVICE); + pr_err("%s: rx_fb_addr[%d]: virt: %#llx phys: %#llx\n", __func__, i, + pdata->mgbe_coe.rx_fb_addr[i], + pdata->mgbe_coe.rx_fb_addr_phys[i]); + } + pdata->mgbe_coe.rx_pib_addr = (u64) dma_alloc_coherent(pdata->dev, + sizeof(struct osi_mgbe_coe_pib) * pdata->mgbe_coe.rx_pib_sz, + (dma_addr_t *) &pdata->mgbe_coe.rx_pib_addr_phys, + GFP_KERNEL | __GFP_ZERO); + pr_err("%s: rx_pib_addr: virt: %#llx phys: %#llx\n", __func__, pdata->mgbe_coe.rx_pib_addr, pdata->mgbe_coe.rx_pib_addr_phys); + + return 0; +} + /** * @brief Free receive skbs * @@ -2257,6 +2298,10 @@ static void free_rx_dma_resources(struct osi_dma_priv_data *osi_dma, pdata->page_pool[chan] = NULL; } #endif + if (i == pdata->mgbe_coe.vdma && pdata->coe_enable) { + pr_err("%s: Freeing COE DMA resources for vdma %d\n", __func__, i); + ether_free_coe_resource(pdata); + } } } @@ -2428,6 +2473,7 @@ static int ether_page_pool_create_per_chan(struct ether_priv_data *pdata, } #endif + /** * @brief Allocate Receive DMA channel ring resources. * @@ -2448,6 +2494,7 @@ static int ether_allocate_rx_dma_resources(struct osi_dma_priv_data *osi_dma, unsigned int chan; unsigned int i; int ret = 0; + struct osi_ioctl ioctl_data = {}; const nveu32_t max_dma_chan[OSI_MAX_MAC_IP_TYPES] = { OSI_EQOS_MAX_NUM_CHANS, OSI_MGBE_T23X_MAX_NUM_CHANS, @@ -2478,6 +2525,22 @@ static int ether_allocate_rx_dma_resources(struct osi_dma_priv_data *osi_dma, if (ret < 0) { goto exit; } + if (pdata->coe_enable && chan == pdata->mgbe_coe.vdma) { + pr_err("%s: Allocating COE DMA resources for vdma %d\n", __func__, chan); + ret = ether_allocate_coe_resource(pdata); + if (ret < 0) { + goto exit; + } + /* Program the buffers in HW */ + memcpy(&ioctl_data.data.mgbe_coe, &pdata->mgbe_coe, sizeof(struct osi_mgbe_coe)); + ioctl_data.cmd = OSI_CMD_GMSL_COE_CONFIG; + ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); + if (ret < 0) { + dev_err(pdata->dev, "Enabling MAC COE in HW failed\n"); + } else { + dev_info(pdata->dev, "MAC COE enabled in HW\n"); + } + } } } @@ -2666,6 +2729,10 @@ static void ether_init_invalid_chan_ring(struct osi_dma_priv_data *osi_dma) for (i = osi_dma->num_dma_chans; i < max_dma_chan[osi_dma->mac]; i++) { osi_dma->dma_chans[i] = ETHER_INVALID_CHAN_NUM; } + + for (i = osi_dma->num_dma_chans_coe; i < max_dma_chan[osi_dma->mac]; i++) { + osi_dma->dma_chans_coe[i] = ETHER_INVALID_CHAN_NUM; + } } /** @@ -3126,6 +3193,14 @@ int ether_open(struct net_device *dev) } /* initialize MAC/MTL/DMA Common registers */ + /** If COE is enabled, disable RIWT so that IOC is set for all desc. */ + if (pdata->coe_enable) { + pdata->osi_dma->use_riwt = OSI_DISABLE; + pdata->osi_dma->coe_enable = pdata->coe_enable; + pdata->osi_dma->mgbe_coe = pdata->mgbe_coe; + pdata->osi_core->coe_enable = pdata->coe_enable; + pdata->osi_core->mgbe_coe = pdata->mgbe_coe; + } ret = osi_hw_core_init(pdata->osi_core); if (ret < 0) { dev_err(pdata->dev, @@ -3901,8 +3976,8 @@ unsigned short ether_select_queue(struct net_device *dev, if ((osi_core->pre_sil == OSI_ENABLE) && (pdata->tx_queue_select != 0U)) { txqueue_select = pdata->tx_queue_select; } else { - for (i = 0; i < osi_core->num_mtl_queues; i++) { - mtlq = osi_core->mtl_queues[i]; + for (i = 0; i < osi_core->num_dma_chans; i++) { + mtlq = osi_core->dma_chans[i]; if (pdata->txq_prio[mtlq] == priority) { txqueue_select = (unsigned short)i; break; @@ -4981,6 +5056,26 @@ static void ether_set_vm_irq_chan_mask(struct ether_vm_irq_data *vm_irq_data, } } +/** + * @brief ether_set_coe_chan_mask - Set CoE channels bitmap + * + * @param[in] osi_dma: DMA data + * @param[in] num_vm_chan: Number of VM DMA channels + * @param[in] vm_chans: Pointer to list of VM DMA channels + * + * @retval None. + */ +static void ether_set_coe_chan_mask(struct osi_dma_priv_data *osi_dma, + unsigned int num_vm_chan, + unsigned int *vm_chans) +{ + osi_dma->num_dma_chans_coe = num_vm_chan; + + for (u32 i = 0; i < num_vm_chan; i++) { + osi_dma->dma_chans_coe[i] = vm_chans[i]; + } +} + /** * @brief ether_get_rx_riit - Get the rx_riit value for speed. * @@ -5205,6 +5300,8 @@ static int ether_get_vm_irq_data(struct platform_device *pdev, child_id = 0; for_each_child_of_node(vm_node, temp) { + bool is_coe; + ret = of_property_read_u32(temp, "nvidia,vm-irq-id", &vm_irq_id); if (ret != 0) { vm_irq_id = child_id; @@ -5246,15 +5343,35 @@ static int ether_get_vm_irq_data(struct platform_device *pdev, } } + is_coe = of_property_read_bool(temp, "nvidia,camera-over-eth"); + if (is_coe) { + osi_core->irq_data[node].is_coe = 1U; + dev_info(&pdev->dev, "VM IRQ is handled by Camera CPU: %u\n", + node); + } else { + osi_core->irq_data[node].is_coe = 0U; + } + /* Assuming there would not be more than 0xFFFF nodes */ child_id &= MAX_CHILD_NODES; child_id++; } for (node = 0; node < osi_core->num_vm_irqs; node++) { - ether_set_vm_irq_chan_mask(&pdata->vm_irq_data[node], + if (osi_core->irq_data[node].is_coe) { + if (pdata->osi_dma->num_dma_chans_coe != 0) { + dev_err(&pdev->dev, "Only one CoE IRQ allowed\n"); + return -EINVAL; + } + /* CoE channels IRQ will be handled by camera CPU */ + ether_set_coe_chan_mask(pdata->osi_dma, + osi_core->irq_data[node].num_vm_chans, + osi_core->irq_data[node].vm_chans); + } else { + ether_set_vm_irq_chan_mask(&pdata->vm_irq_data[node], osi_core->irq_data[node].num_vm_chans, osi_core->irq_data[node].vm_chans); + } pdata->vm_irq_data[node].pdata = pdata; } @@ -7690,6 +7807,103 @@ void ether_shutdown(struct platform_device *pdev) dev_err(pdata->dev, "Failure in ether_close"); } +int nvether_coe_config(struct net_device *ndev, + struct nvether_coe_cfg *ether_coe_cfg) +{ + struct ether_priv_data *pdata = netdev_priv(ndev); + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + struct osi_macsec_lut_config lut_config; + int ret = -ENOENT; + + /* If macsec not enabled, enable it. + * FIXME: Need a lock to protect any concurrent macsec configuration outside this API from supplicant for ex. */ + if (macsec_pdata == NULL) { + dev_err(pdata->dev, "macsec is not supported in platform, COE config failed\n"); + return ret; + } + if (macsec_pdata->enabled != OSI_ENABLE) { + ret = macsec_open(macsec_pdata, NULL); + if (ret < 0) { + dev_err(pdata->dev, "macsec_open failure, COE config failed\n"); + return ret; + } + } + if (macsec_pdata->coe.enable == OSI_ENABLE) { + return 0; + } + + /* Program the COE LUT classifier for AVTP COE packets */ + memset(&lut_config, 0, sizeof(lut_config)); + lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_RX; + lut_config.table_config.index = 0U; + if (ether_coe_cfg->vlan_enable == COE_VLAN_ENABLE) { + // 16B offset for AV ethtype with VLAN, + // divided by 2 as HW expects offset in multiple of 2. + lut_config.coe_lut_inout.offset = 8U; + } + else if (ether_coe_cfg->vlan_enable == COE_VLAN_DISABLE) { + // 12B offset for AV ethtype without VLAN, + // divided by 2 as HW expects offset in multiple of 2. + lut_config.coe_lut_inout.offset = 6U; + } + else { + dev_err(pdata->dev, "Invalid VLAN enable value\n"); + return -EINVAL; + } + lut_config.coe_lut_inout.byte_pattern_mask = 0xF; + lut_config.coe_lut_inout.byte_pattern[1] = (unsigned char) 0x22; + lut_config.coe_lut_inout.byte_pattern[0] = (unsigned char) 0xF0; + lut_config.lut_sel = OSI_LUT_SEL_COE; + lut_config.table_config.rw = OSI_LUT_WRITE; + ret = osi_macsec_config_lut(osi_core, &lut_config); + if (ret < 0) { + dev_err(pdata->dev, "%s: Failed to config COE LUT\n", __func__); + return ret; + } + + /* Program the COE header offset and enable COE engine */ + ret = macsec_coe_config(macsec_pdata, ether_coe_cfg->coe_enable, + ether_coe_cfg->coe_hdr_offset); + if (ret < 0) { + dev_err(pdata->dev, "COE config in macsec controller failed\n"); + return ret; + } else { + macsec_pdata->coe.enable = ether_coe_cfg->coe_enable; + macsec_pdata->coe.hdr_offset = ether_coe_cfg->coe_hdr_offset; + dev_info(pdata->dev, "COE config success\n"); + } + + return ret; +} +EXPORT_SYMBOL_GPL(nvether_coe_config); + +int nvether_coe_chan_config(struct net_device *ndev, + u32 dmachan, + struct nvether_per_coe_cfg *p_coe_cfg) +{ + struct ether_priv_data *pdata = netdev_priv(ndev); + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + int ret; + + if (macsec_pdata == NULL) { + dev_err(pdata->dev, "macsec is not supported in platform, COE config failed\n"); + return -ENONET; + } + + if (macsec_pdata->coe.enable != OSI_ENABLE) { + dev_err(pdata->dev, "COE not enabled\n"); + return -EIO; + } + + ret = macsec_coe_lc(macsec_pdata, dmachan, + p_coe_cfg->lc1, + p_coe_cfg->lc2); + + return ret; +} +EXPORT_SYMBOL_GPL(nvether_coe_chan_config); + #ifdef CONFIG_PM static s32 ether_handle_rx_buffers(struct ether_priv_data *pdata, uint32_t suspend) @@ -7893,6 +8107,16 @@ int ether_suspend_noirq(struct device *dev) OSI_DMA_INTR_DISABLE); } + for (i = 0; i < osi_dma->num_dma_chans_coe; i++) { + chan = osi_dma->dma_chans_coe[i]; + osi_handle_dma_intr(osi_dma, chan, + OSI_DMA_CH_TX_INTR, + OSI_DMA_INTR_DISABLE); + osi_handle_dma_intr(osi_dma, chan, + OSI_DMA_CH_RX_INTR, + OSI_DMA_INTR_DISABLE); + } + if (ether_handle_rx_buffers(pdata, OSI_ENABLE) != 0) dev_err(dev, "Failed to free the Rx buffers\n"); diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h index 2bbcdea3..41b6b584 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.h @@ -755,6 +755,10 @@ struct ether_priv_data { /** tx bandwidth pkt work queue */ struct workqueue_struct *tx_bw_wq; #endif + /** COE mode enabled */ + u32 coe_enable; + /** OSI instance of COE */ + struct osi_mgbe_coe mgbe_coe; }; /** diff --git a/drivers/net/ethernet/nvidia/nvethernet/macsec.c b/drivers/net/ethernet/nvidia/nvethernet/macsec.c index 9f7f7e9b..4ffb2fef 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/macsec.c +++ b/drivers/net/ethernet/nvidia/nvethernet/macsec.c @@ -211,6 +211,38 @@ int macsec_close(struct macsec_priv_data *macsec_pdata) return ret; } +int macsec_coe_config(struct macsec_priv_data *macsec_pdata, + uint32_t coe_enable, uint32_t coe_hdr_offset) +{ + int ret = 0; + struct ether_priv_data *pdata = macsec_pdata->ether_pdata; + struct device *dev = pdata->dev; + + /* Input is already validated */ + ret = osi_macsec_coe_config(pdata->osi_core, coe_enable, + coe_hdr_offset); + if (ret < 0) { + dev_err(dev, "osi_macsec_coe_config failed, %d\n", ret); + } + return ret; +} + +int macsec_coe_lc(struct macsec_priv_data *macsec_pdata, + uint32_t ch, uint32_t lc1, uint32_t lc2) +{ + int ret = 0; + struct ether_priv_data *pdata = macsec_pdata->ether_pdata; + struct device *dev = pdata->dev; + + /* Input is already validated */ + ret = osi_macsec_coe_lc(pdata->osi_core, ch, lc1, lc2); + if (ret < 0) { + dev_err(dev, "osi_macsec_coe_lc failed, %d\n", ret); + } + return ret; + +} + int macsec_open(struct macsec_priv_data *macsec_pdata, void *const genl_info) { diff --git a/drivers/net/ethernet/nvidia/nvethernet/macsec.h b/drivers/net/ethernet/nvidia/nvethernet/macsec.h index cd1ea494..6f09284e 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/macsec.h +++ b/drivers/net/ethernet/nvidia/nvethernet/macsec.h @@ -205,6 +205,20 @@ struct nvpkcs_data { u64 nv_kek; }; +/** + * @brief MACsec COE private data structure + */ +struct macsec_coe { + /** Macsec COE state */ + unsigned int enable; + /** Macsec COE hdr offset */ + unsigned int hdr_offset; + /** Macsec COE line counter threshold 1 */ + unsigned int lc1_threshold[OSI_MGBE_MAX_NUM_CHANS]; + /** Macsec COE line counter threshold 2 */ + unsigned int lc2_threshold[OSI_MGBE_MAX_NUM_CHANS]; +}; + /** * @brief MACsec private data structure */ @@ -259,9 +273,15 @@ struct macsec_priv_data { unsigned int macsec_tx_an_map; /** Macsec RX currently enabled AN */ unsigned int macsec_rx_an_map; + /** Macsec COE instance */ + struct macsec_coe coe; }; int macsec_probe(struct ether_priv_data *pdata); +int macsec_coe_config(struct macsec_priv_data *macsec_pdata, + uint32_t coe_enable, uint32_t coe_hdr_offset); +int macsec_coe_lc(struct macsec_priv_data *macsec_pdata, + uint32_t ch, uint32_t lc1, uint32_t lc2); void macsec_remove(struct ether_priv_data *pdata); int macsec_open(struct macsec_priv_data *macsec_pdata, void *const genl_info); diff --git a/drivers/net/ethernet/nvidia/nvethernet/sysfs.c b/drivers/net/ethernet/nvidia/nvethernet/sysfs.c index ea511cf4..c3bac056 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/sysfs.c +++ b/drivers/net/ethernet/nvidia/nvethernet/sysfs.c @@ -533,6 +533,336 @@ static DEVICE_ATTR(macsec_enable, (S_IRUGO | S_IWUSR), macsec_enable_show, macsec_enable_store); +#define MACSEC_COE_LC_INPUT_LEN 3 +extern int macsec_coe_config(struct macsec_priv_data *macsec_pdata, + uint32_t coe_enable, uint32_t coe_hdr_offset); +/** + * @brief Shows the current COE setting of MACsec controllers enabled + * + * Algorithm: Display the current COE settings for MACsec controllers. + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to store the current COE setting + */ +static ssize_t macsec_coe_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + unsigned int coe_enable; + unsigned int coe_hdr_offset; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + if ((macsec_pdata == NULL)) { + dev_err(pdata->dev, "Not Allowed. MACSec is not supported on this platform\n"); + return 0; + } + if (macsec_pdata->enabled != OSI_ENABLE) { + dev_err(pdata->dev, "Not Allowed. MACSec is not enabled on the controller\n"); + return 0; + } + + coe_enable = macsec_pdata->coe.enable; + coe_hdr_offset = macsec_pdata->coe.hdr_offset; + + if (OSI_ENABLE == coe_enable) { + return scnprintf(buf, PAGE_SIZE, "COE enabled, COE hdr offset: %u\n", + coe_hdr_offset); + } else { + return scnprintf(buf, PAGE_SIZE, "COE disabled\n"); + } +} + +/** + * @brief Set the MACsec controller COE logic enabled (Rx only) + * + * Algorithm: This is used to set the Rx MACsec controller COE logic enabled. + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer which contains the user settings of MACsec COE enable + * @param[in] size: size of buffer + * + * @return size of buffer. + */ +static ssize_t macsec_coe_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + uint32_t coe_enable, coe_hdr_offset; + int ret = 0, i; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return size; + } + if ((macsec_pdata == NULL)) { + dev_err(pdata->dev, "Not Allowed. MACSec is not supported on this platform\n"); + return size; + } + if (macsec_pdata->enabled != OSI_ENABLE) { + dev_err(pdata->dev, "Not Allowed. MACSec is not enabled on the controller\n"); + return size; + } + + /* Read user inputs and validate it */ + i = sscanf(buf, "%u %u", &coe_enable, &coe_hdr_offset); + if ((coe_enable != OSI_ENABLE) && (coe_enable != OSI_DISABLE)) { + dev_err(pdata->dev, "Invalid value %u for coe_enable\n", coe_enable); + return size; + } + if ((coe_hdr_offset < 16) || (coe_hdr_offset > 56)) { + dev_err(pdata->dev, "Invalid value %u for coe_hdr_offset\n", coe_hdr_offset); + return size; + } + + /* Configure COE */ + if (coe_enable == OSI_ENABLE) { + if (macsec_pdata->coe.enable == OSI_ENABLE) { + dev_err(pdata->dev, "COE already enabled\n"); + return size; + } + } else { + if (macsec_pdata->coe.enable == OSI_DISABLE) { + dev_err(pdata->dev, "COE already disabled\n"); + return size; + } + } + ret = macsec_coe_config(macsec_pdata, coe_enable, coe_hdr_offset); + if (0 == ret) { + macsec_pdata->coe.enable = coe_enable; + macsec_pdata->coe.hdr_offset = coe_hdr_offset; + } + return size; +} + +/** + * @brief Sysfs attribute for MACsec COE enable + * + */ +static DEVICE_ATTR(macsec_coe_enable, (S_IRUGO | S_IWUSR), + macsec_coe_enable_show, + macsec_coe_enable_store); + + +/** + * @brief Shows the current COE setting of MAC controllers enabled + * + * Algorithm: Display the current COE settings for MAC controllers. + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to store the current COE setting for the MAC + */ +static ssize_t mac_coe_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + char *p = buf; + int i, j; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + + if (OSI_ENABLE == pdata->coe_enable) { + p += scnprintf(p, PAGE_SIZE, "MGBE COE mode enabled\n"); + p += scnprintf(p, PAGE_SIZE, "\t vdma: %d\n", pdata->mgbe_coe.vdma); + p += scnprintf(p, PAGE_SIZE, "\t pdma: %d\n", pdata->mgbe_coe.pdma); + p += scnprintf(p, PAGE_SIZE, "\t PIB size: %d\n", pdata->mgbe_coe.rx_pib_sz); + for (j = 0; j < OSI_MGBE_COE_NUM_RX_FRAMES; j++) { + p += scnprintf(p, PAGE_SIZE, "\t FB[%d]:\n", j); + for (i = 0; i < 200; i++) { + p += scnprintf(p, PAGE_SIZE - i, "%02x", *(((unsigned char *)(pdata->mgbe_coe.rx_fb_addr[j])) + i)); + } + p += scnprintf(p, PAGE_SIZE - i, "\n"); + } + p += scnprintf(p, PAGE_SIZE, "\t PIB:\n"); + for (i = 0; i < 200; i++) { + p += scnprintf(p, PAGE_SIZE - i, "%02x", *(((unsigned char *)pdata->mgbe_coe.rx_pib_addr) + i)); + } + return p - buf; + } else { + return scnprintf(buf, PAGE_SIZE, "MGBE COE mode disabled\n"); + } +} + +/** + * @brief Set the MAC controller COE logic enabled (Rx only) + * + * Algorithm: This is used to set the Rx MAC controller COE logic enabled. + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer which contains the user settings of MAC COE enable + * @param[in] size: size of buffer + * + * @return size of buffer. + */ +static ssize_t mac_coe_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + int32_t coe_enable, vdma, pdma, rx_pib_sz; + + if (netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not down\n"); + return size; + } + + /* Read user inputs and validate it */ + sscanf(buf, "%d %d %d %d", &coe_enable, &vdma, &pdma, &rx_pib_sz); + if ((coe_enable != OSI_ENABLE) && (coe_enable != OSI_DISABLE)) { + dev_err(pdata->dev, "Invalid value %u for coe_enable\n", coe_enable); + return size; + } + if ((rx_pib_sz != 256) && + (rx_pib_sz != 512) && + (rx_pib_sz != 2048) && + (rx_pib_sz != 4096)) { + dev_err(pdata->dev, "Invalid value %d for rx_pib_sz\n", rx_pib_sz); + return size; + } + + /* Configure COE */ + pdata->coe_enable = coe_enable; + pdata->mgbe_coe.vdma = vdma; + pdata->mgbe_coe.pdma = pdma; + pdata->mgbe_coe.rx_pib_sz = rx_pib_sz; + + return size; +} + +/** + * @brief Sysfs attribute for MAC COE enable + * + */ +static DEVICE_ATTR(mac_coe_enable, (S_IRUGO | S_IWUSR), + mac_coe_enable_show, + mac_coe_enable_store); + +extern int macsec_coe_lc(struct macsec_priv_data *macsec_pdata, + uint32_t ch, uint32_t lc1, uint32_t lc2); +/** + * @brief Shows the current COE Line counter thresholds. + * + * Algorithm: Loop through the current COE Line counter thresholds per VDMA + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to store the current LC thresholds + */ +static ssize_t macsec_coe_lc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + unsigned int i; + char *start = buf; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + if ((macsec_pdata == NULL)) { + dev_err(pdata->dev, "Not Allowed. MACSec is not supported on this platform\n"); + return 0; + } + if (macsec_pdata->enabled != OSI_ENABLE) { + dev_err(pdata->dev, "Not Allowed. MACSec is not enabled on the controller\n"); + return 0; + } + + for (i = 0U; i < OSI_MGBE_MAX_NUM_CHANS; i++) { + buf += scnprintf(buf, PAGE_SIZE, "ch: %u lc1: %u lc2: %u\n", i, + macsec_pdata->coe.lc1_threshold[i], + macsec_pdata->coe.lc2_threshold[i]); + } + return (buf - start); +} + +/** + * @brief Set the MACsec controller COE line counter thresholds + * + * Algorithm: This is used to set threshold per VDMA Rx MACsec COE line counter + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer which contains the user settings of MACsec COE LC + * @param[in] size: size of buffer + * + * @return size of buffer. + */ +static ssize_t macsec_coe_lc_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + uint32_t ch, lc1, lc2; + int ret = 0, i; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return size; + } + if ((macsec_pdata == NULL)) { + dev_err(pdata->dev, "Not Allowed. MACSec is not supported on this platform\n"); + return size; + } + if (macsec_pdata->enabled != OSI_ENABLE) { + dev_err(pdata->dev, "Not Allowed. MACSec is not enabled on the controller\n"); + return size; + } + + /* Read user inputs and validate it */ + i = sscanf(buf, "%u %u %u", &ch, &lc1, &lc2); + if (i != MACSEC_COE_LC_INPUT_LEN) { + pr_err("%s: Invalid COE LC inputs(read %d)", __func__, i); + return size; + } + if (ch >= OSI_MGBE_MAX_NUM_CHANS) { + dev_err(pdata->dev, "Invalid value %u for channel\n", ch); + return size; + } + if ((lc1 > OSI_MACSEC_COE_MAX_LC) || (lc2 > OSI_MACSEC_COE_MAX_LC)) { + dev_err(pdata->dev, "Line counter threshold has to be < %x\n", OSI_MACSEC_COE_MAX_LC); + return size; + } + + /* Configure COE LC threshold */ + ret = macsec_coe_lc(macsec_pdata, ch, lc1, lc2); + if (ret != 0) { + dev_err(pdata->dev, "Line counter threshold config failed\n"); + } else { + macsec_pdata->coe.lc1_threshold[ch] = lc1; + macsec_pdata->coe.lc2_threshold[ch] = lc2; + } + return size; +} + +/** + * @brief Sysfs attribute for MACsec COE LC threshold + * + */ +static DEVICE_ATTR(macsec_coe_lc, (S_IRUGO | S_IWUSR), + macsec_coe_lc_show, + macsec_coe_lc_store); + /** * @brief Shows the current setting of MACsec cipther set * @@ -1726,6 +2056,162 @@ static DEVICE_ATTR(macsec_sci_lut, (S_IRUGO | S_IWUSR), NULL, macsec_sci_lut_store); +/** + * @brief Shows the current COE LUT configuration + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer to print the current COE LUT configuration + */ +static ssize_t macsec_coe_lut_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_macsec_lut_config lut_config = {0}; + struct osi_coe_lut_inout coe_lut_inout = {0}; + int i; + char *start = buf; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return 0; + } + if (!macsec_pdata) { + dev_err(pdata->dev, "Not Allowed. MACsec is not supported in platform\n"); + return 0; + } + + for (i = 0; i < OSI_COE_LUT_MAX_INDEX; i++) { + memset(&lut_config, OSI_NONE, sizeof(lut_config)); + lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_RX; + lut_config.lut_sel = OSI_LUT_SEL_COE; + lut_config.table_config.rw = OSI_LUT_READ; + lut_config.table_config.index = i; + if (osi_macsec_config_lut(osi_core, &lut_config) < 0) { + dev_err(dev, "%s: Failed to read COE LUT\n", __func__); + goto exit; + } else { + buf += scnprintf(buf, PAGE_SIZE, "%d.\t", i); + if (lut_config.coe_lut_inout.valid != OSI_COE_LUT_ENTRY_VALID) { + buf += scnprintf(buf, PAGE_SIZE, "Invalid\n"); + continue; + } + coe_lut_inout = lut_config.coe_lut_inout; + /* HW design expects offset to be divided by 2. + * So multiply for actual byte offset. + */ + buf += scnprintf(buf, PAGE_SIZE, "Offset: %u ", + coe_lut_inout.offset * 2); + buf += scnprintf(buf, PAGE_SIZE, "Mask: %#x ", + coe_lut_inout.byte_pattern_mask); + buf += scnprintf(buf, PAGE_SIZE, "Pattern: 0x%x%x\n", + coe_lut_inout.byte_pattern[1], + coe_lut_inout.byte_pattern[0]); + } + } + +exit: + return (buf - start); +} + +#define COE_LUT_INPUTS 5 + +/** + * @brief Set the COE LUT configuration + * + * @param[in] dev: Device data. + * @param[in] attr: Device attribute + * @param[in] buf: Buffer which contains the desired LUT configuration + * @param[in] size: size of buffer + * + * @return size of buffer. + */ +static ssize_t macsec_coe_lut_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct net_device *ndev = (struct net_device *)dev_get_drvdata(dev); + struct ether_priv_data *pdata = netdev_priv(ndev); + struct macsec_priv_data *macsec_pdata = pdata->macsec_pdata; + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_macsec_lut_config lut_config; + int ret, temp[OSI_COE_LUT_BYTE_PATTERN_MAX]; + int i; + + if (!netif_running(ndev)) { + dev_err(pdata->dev, "Not Allowed. Ether interface is not up\n"); + return size; + } + if (!macsec_pdata) { + dev_err(pdata->dev, "Not Allowed. MACsec is not supported on platform\n"); + return size; + } + + memset(&lut_config, 0, sizeof(lut_config)); + lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_RX; + + ret = sscanf(buf, "%hu %u %x %x%x", + &lut_config.table_config.index, + &lut_config.coe_lut_inout.offset, + &lut_config.coe_lut_inout.byte_pattern_mask, + &temp[1], + &temp[0]); + if (ret != COE_LUT_INPUTS) { + dev_err(pdata->dev, "Failed to parse COE LUT arguments"); + goto exit; + } + + if (lut_config.coe_lut_inout.offset % 2 != 0) { + dev_err(pdata->dev, "Offset field is not multiple of 2"); + goto exit; + } else { + /* HW is expecting offset to be divided by 2 */ + lut_config.coe_lut_inout.offset /= 2; + } + + for (i = OSI_COE_LUT_BYTE_PATTERN_MAX - 1; i >= 0; i--) { + if (temp[i] > 0xFF) { + dev_err(pdata->dev, "Invalid byte pattern %d\n", temp[i]); + } + lut_config.coe_lut_inout.byte_pattern[i] = (unsigned char)temp[i]; + } + lut_config.lut_sel = OSI_LUT_SEL_COE; + lut_config.table_config.rw = OSI_LUT_WRITE; + /* Rest of LUT attributes are filled by parse_inputs() */ + if (lut_config.table_config.index >= OSI_COE_LUT_MAX_INDEX) { + dev_err(dev, "%s: Index can't be >= %d\n", __func__, + OSI_COE_LUT_MAX_INDEX); + goto exit; + } + if (lut_config.coe_lut_inout.offset > OSI_COE_LUT_OFFSET_MAX) { + dev_err(dev, "%s: COE offset can't be > %d\n", __func__, + OSI_COE_LUT_MAX_INDEX); + goto exit; + } + + if (osi_macsec_config_lut(osi_core, &lut_config) < 0) { + dev_err(dev, "%s: Failed to config COE LUT\n", __func__); + goto exit; + } else { + dev_err(dev, "%s: Added COE LUT idx: %d", __func__, + lut_config.table_config.index); + } + +exit: + return size; +} + +/** + * @brief Sysfs attribute for MACsec COE LUT config + * + */ +static DEVICE_ATTR(macsec_coe_lut, (S_IRUGO | S_IWUSR), + macsec_coe_lut_show, + macsec_coe_lut_store); + #ifdef MACSEC_KEY_PROGRAM static void dump_kt(char **buf_p, unsigned short ctlr_sel, struct osi_core_priv_data *osi_core, @@ -3487,6 +3973,7 @@ static DEVICE_ATTR(hsi_enable, 0644, #endif #endif /* OSI_STRIPPED_LIB */ + /** * @brief Attributes for nvethernet sysfs */ @@ -3529,6 +4016,10 @@ static struct attribute *ether_sysfs_attrs[] = { &dev_attr_macsec_sc_param_rx_lut.attr, &dev_attr_macsec_cipher.attr, &dev_attr_macsec_enable.attr, + &dev_attr_macsec_coe_enable.attr, + &dev_attr_macsec_coe_lc.attr, + &dev_attr_macsec_coe_lut.attr, + &dev_attr_mac_coe_enable.attr, &dev_attr_macsec_an_status.attr, &dev_attr_macsec_mmc_counters_tx.attr, &dev_attr_macsec_mmc_counters_rx.attr, diff --git a/include/media/fusa-capture/capture-coe.h b/include/media/fusa-capture/capture-coe.h new file mode 100644 index 00000000..fa291d2a --- /dev/null +++ b/include/media/fusa-capture/capture-coe.h @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __FUSA_CAPTURE_COE_H__ +#define __FUSA_CAPTURE_COE_H__ + +#if defined(__KERNEL__) +#include +#include +#else +#include +#endif +#include +#include +#include + +#define __COE_CAPTURE_ALIGN __aligned(8) + +/** + * @brief CoE channel setup config (COE_IOCTL_CAPTURE_SETUP payload). + * + */ +struct coe_ioctl_data_capture_setup { + char if_name[IFNAMSIZ]; /**< Net interface through which the camera is accessible */ + int32_t scratchBufMem; /**< Memory handle of a scratch buffer allocated by a user */ + uint8_t sensor_mac_addr[ETH_ALEN]; /**< Ethernet MAC address of a camera */ + uint8_t vlan_enable; /**< VLAN enable value. 1 - VLAN enabled, 0 - VLAN disabled */ + uint8_t reserved[1U]; +} __COE_CAPTURE_ALIGN; + +/** + * @brief CoE channel buffer operation (COE_IOCTL_BUFFER_OP payload). + */ +struct coe_ioctl_data_buffer_op { + uint32_t mem; /**< handle to a buffer. */ + uint32_t flag; /**< Buffer @ref CAPTURE_BUFFER_OPS bitmask. */ +} __COE_CAPTURE_ALIGN; + +/** + * @brief Enqueue CoE capture request (COE_IOCTL_CAPTURE_REQ payload). + * + * Issue a capture request using a specified buffer mem_fd. + * A buffer must previously have been registered with COE_IOCTL_BUFFER_OP. + * mem_fd_offset specifies the offset from the beginning of the buffer memory from which + * data should be received into. It can be used if an application makes a single large + * allocation for all image memory, and then specifies separate offset within it for each + * capture. + * capture_number is used to track the capture number in userspace. The same capture_number + * is returned by the driver in coe_ioctl_data_capture_status when capture is completed. + */ +struct coe_ioctl_data_capture_req { + uint32_t mem_fd; /**< handle to a buffer. */ + uint32_t buf_size; /**< capture image size in bytes */ + uint32_t mem_fd_offset; /**< offset from the beginning of a mem_fd */ + uint32_t capture_number; /**< capture number for a tracking by userspace */ +} __COE_CAPTURE_ALIGN; + +/** + * @brief Wait on the next completion of an enqueued frame (COE_IOCTL_CAPTURE_STATUS payload). + * + * Wait for the next capture completion with the specified timeout. + * The status of the capture will be returned via capture_status. + * + * @param[in] timeout_ms uint32_t timeout in [ms], 0 for indefinite wait. + * @param[out] capture_number uint32_t capture number for which the status is returned. + * @param[out] capture_status uint32_t capture status, Valid range: [ @ref CAPTURE_STATUS_UNKNOWN, + * @ref CAPTURE_STATUS_INVALID_CAP_SETTINGS] + * @param[out] errData uint32_t extended error data. + * @param[out] sofTimestamp uint64_t start-of-frame time stamp in nanoseconds. + * @param[out] eofTimestamp uint64_t end-of-frame time stamp in nanoseconds. + */ +struct coe_ioctl_data_capture_status { + uint32_t timeout_ms; /**< Capture timeout in milliseconds. */ + uint32_t capture_number; /**< capture number passed with coe_ioctl_data_capture_req */ + uint32_t capture_status; /**< Capture status returned by the driver. */ + uint32_t errData; /**< Extended error data. */ + uint64_t sofTimestamp; /**< Start-of-frame time stamp in nanoseconds. */ + uint64_t eofTimestamp; /**< End-of-frame time stamp in nanoseconds. */ +} __COE_CAPTURE_ALIGN; + +/** + * @brief Get info on CoE channel (COE_IOCTL_GET_INFO payload). + */ +struct coe_ioctl_data_get_info { + uint8_t channel_number; /**< channel number value assigned by a driver */ + uint8_t reserved[7U]; +} __COE_CAPTURE_ALIGN; + +#endif /* __FUSA_CAPTURE_COE_H__ */ diff --git a/include/soc/tegra/camrtc-capture-messages.h b/include/soc/tegra/camrtc-capture-messages.h index d260ced4..f24bf4c7 100644 --- a/include/soc/tegra/camrtc-capture-messages.h +++ b/include/soc/tegra/camrtc-capture-messages.h @@ -1,6 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2016-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: GPL-2.0-only +/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ /** @@ -121,12 +131,14 @@ struct CAPTURE_MSG_HEADER { #define CAPTURE_CHANNEL_SETUP_REQ MK_U32(0x1E) /** - * @brief VI capture channel setup response. + * @brief VI or CoE capture channel setup response. * * This is a @ref CapCtrlMsgType "capture control message" received in - * response to a @ref CAPTURE_CHANNEL_SETUP_REQ message. + * response to a @ref CAPTURE_CHANNEL_SETUP_REQ or @ref CAPTURE_COE_CHANNEL_SETUP_REQ + * message. * - * @pre A @ref CAPTURE_CHANNEL_SETUP_REQ message has been sent. + * @pre A @ref CAPTURE_CHANNEL_SETUP_REQ or @ref CAPTURE_COE_CHANNEL_SETUP_REQ message + * has been sent. * * @par Header * - @ref CAPTURE_CONTROL_MSG@b::@ref CAPTURE_MSG_HEADER "header" @@ -252,6 +264,130 @@ struct CAPTURE_MSG_HEADER { #define CAPTURE_SYNCGEN_DISABLE_RESP MK_U32(0x1D) /** @} */ +/** + * @defgroup CoeCapCtrlMsgType Message types for CoE capture channel control messages + * + * Capture channel control messages are used to set up, reset, and release + * channels for capturing images from an Ethernet imaging stream source, as well as + * execute other control operations on the CoE capture channel. + * + * @{ + */ + +/** + * @brief CoE capture channel setup request. + * + * This is a "capture control message" to allocate a CoE capture channel and + * associated resources. + * + * @pre The capture-control IVC channel has been set up during + * boot using the @ref CAMRTC_HSP_CH_SETUP command. + * + * @par Header + * - @ref CAPTURE_CONTROL_MSG@b::@ref CAPTURE_MSG_HEADER "header" + * - @ref CAPTURE_MSG_HEADER::msg_id "msg_id" = @ref CAPTURE_COE_CHANNEL_SETUP_REQ + * - @ref CAPTURE_MSG_HEADER::transaction "transaction" = unique ID + * + * @par Payload + * - @ref CAPTURE_COE_CHANNEL_SETUP_REQ_MSG + * + * @par Response + * - @ref CAPTURE_CHANNEL_SETUP_RESP + */ +#define CAPTURE_COE_CHANNEL_SETUP_REQ MK_U32(0x1F) + +/** + * @brief CoE capture channel reset request. + * + * This is a @ref CapCtrlMsgType "capture control message" to + * reset a CoE capture channel. + * + * When RCE FW receives the @ref CAPTURE_COE_CHANNEL_RESET_REQ message, it + * will cancel all capture requests in the channel queue. The response is sent + * after the RCE side channel cleanup is complete. + * + * @pre A CoE capture channel has been set up with + * @ref CAPTURE_COE_CHANNEL_SETUP_REQ. + * + * @par Header + * - @ref CAPTURE_CONTROL_MSG@b::@ref CAPTURE_MSG_HEADER "header" + * - @ref CAPTURE_MSG_HEADER::msg_id "msg_id" = @ref CAPTURE_COE_CHANNEL_RESET_REQ + * - @ref CAPTURE_MSG_HEADER::channel_id "channel_id" = + * @ref CAPTURE_COE_CHANNEL_SETUP_RESP_MSG@b::@ref CAPTURE_MSG_HEADER "header"@b::@ref CAPTURE_MSG_HEADER::channel_id "channel_id" + * + * @par Payload + * - @ref CAPTURE_COE_CHANNEL_RESET_REQ_MSG + * + * @par Response + * - @ref CAPTURE_COE_CHANNEL_RESET_RESP + */ +#define CAPTURE_COE_CHANNEL_RESET_REQ MK_U32(0x26) + +/** + * @brief CoE capture channel reset response. + * + * This is a @ref CapCtrlMsgType "capture control message" received in + * response to a @ref CAPTURE_COE_CHANNEL_RESET_REQ message. + * + * @pre A @ref CAPTURE_COE_CHANNEL_RESET_REQ message has been sent to the + * logical channel. + * + * @par Header + * - @ref CAPTURE_CONTROL_MSG@b::@ref CAPTURE_MSG_HEADER "header" + * - @ref CAPTURE_MSG_HEADER::msg_id "msg_id" = @ref CAPTURE_COE_CHANNEL_RESET_RESP + * - @ref CAPTURE_MSG_HEADER::channel_id "channel_id" = + * @ref CAPTURE_COE_CHANNEL_RESET_REQ_MSG@b::@ref CAPTURE_MSG_HEADER "header"@b::@ref CAPTURE_MSG_HEADER::channel_id "channel_id" + * + * @par Payload + * - @ref CAPTURE_COE_CHANNEL_RESET_RESP_MSG + */ +#define CAPTURE_COE_CHANNEL_RESET_RESP MK_U32(0x27) + +/** + * @brief CoE capture channel release request. + * + * This is a @ref CapCtrlMsgType "capture control message" to + * release a CoE capture channel. Cancels all pending capture + * requests. + * + * @pre A CoE capture channel has been set up with + * @ref CAPTURE_COE_CHANNEL_SETUP_REQ. + * + * @par Header + * - @ref CAPTURE_CONTROL_MSG@b::@ref CAPTURE_MSG_HEADER "header" + * - @ref CAPTURE_MSG_HEADER::msg_id "msg_id" = @ref CAPTURE_COE_CHANNEL_RELEASE_REQ + * - @ref CAPTURE_MSG_HEADER::channel_id "channel_id" = + * @ref CAPTURE_COE_CHANNEL_SETUP_RESP_MSG@b::@ref CAPTURE_MSG_HEADER "header"@b::@ref CAPTURE_MSG_HEADER::channel_id "channel_id" + * + * @par Payload + * - @ref CAPTURE_COE_CHANNEL_RELEASE_REQ_MSG + * + * @par Response + * - @ref CAPTURE_COE_CHANNEL_RELEASE_RESP + */ +#define CAPTURE_COE_CHANNEL_RELEASE_REQ MK_U32(0x28) + +/** + * @brief CoE capture channel release response. + * + * This is a @ref CapCtrlMsgType "capture control message" received in + * response to a @ref CAPTURE_COE_CHANNEL_RELEASE_REQ message. + * + * @pre A @ref CAPTURE_COE_CHANNEL_RELEASE_REQ message has been sent to the + * logical channel. + * + * @par Header + * - @ref CAPTURE_CONTROL_MSG@b::@ref CAPTURE_MSG_HEADER "header" + * - @ref CAPTURE_MSG_HEADER::msg_id "msg_id" = @ref CAPTURE_COE_CHANNEL_RELEASE_RESP + * - @ref CAPTURE_MSG_HEADER::channel_id "channel_id" = + * @ref CAPTURE_COE_CHANNEL_RELEASE_REQ_MSG@b::@ref CAPTURE_MSG_HEADER "header"@b::@ref CAPTURE_MSG_HEADER::channel_id "channel_id" + * + * @par Payload + * - @ref CAPTURE_COE_CHANNEL_RELEASE_RESP_MSG + */ +#define CAPTURE_COE_CHANNEL_RELEASE_RESP MK_U32(0x29) +/** @} */ + /** * @defgroup IspCapCtrlMsgType Message types for ISP capture-control IVC channel messages. * @{ @@ -760,6 +896,66 @@ struct CAPTURE_MSG_HEADER { /** @} */ /** @} */ +/** + * @defgroup CoeCapMsgType Message types for CoE capture request messages and indications. + * + * Capture channel messages are used to submit capture requests and to + * receive status indications pertaining to submitted requests. + * + * @{ + */ + +/** + * @brief Submit a new capture request on a CoE capture channel. + * + * This is a @ref CapMsgType "capture channel message" to + * submit a CoE capture request. The capture request provides all the information + * needed to submit a capture request to the Ethernet engine, like DMA address of the buffer, + * buffer size, etc. + * + * The capture request is sent asynchronously and is queued by RCE for execution. + * Status of the request is indicated with @ref CAPTURE_COE_STATUS_IND message. + * + * @pre A CoE capture channel has been set up with + * @ref CAPTURE_COE_CHANNEL_SETUP_REQ. + * + * @par Header + * - @ref CAPTURE_MSG@b::@ref CAPTURE_MSG_HEADER "header" + * - @ref CAPTURE_MSG_HEADER::msg_id "msg_id" = @ref CAPTURE_COE_REQUEST + * - @ref CAPTURE_MSG_HEADER::channel_id "channel_id" = + * @ref CAPTURE_CHANNEL_SETUP_RESP_MSG@b::@ref CAPTURE_MSG_HEADER + * "header"@b::@ref CAPTURE_MSG_HEADER::channel_id "channel_id" + * + * @par Payload + * - @ref CAPTURE_COE_REQUEST_MSG + * + * @par Response + * - @ref CAPTURE_COE_STATUS_IND (asynchronous) + */ +#define CAPTURE_COE_REQUEST MK_U32(0x0A) + +/** + * @brief Capture status indication for CoE capture channel. + * + * This is a @ref CapMsgType "capture channel message" + * received in response to a @ref CAPTURE_COE_REQUEST message + * when the capture request has been completed. + * It is sent asynchronously whenever capture request completion is signalled by a HW. + * + * @pre A @ref CAPTURE_COE_REQUEST has been sent. + * + * @par Header + * - @ref CAPTURE_MSG@b::@ref CAPTURE_MSG_HEADER "header" + * - @ref CAPTURE_MSG_HEADER::msg_id "msg_id" = @ref CAPTURE_COE_STATUS_IND + * - @ref CAPTURE_MSG_HEADER::channel_id "channel_id" = + * @ref CAPTURE_COE_REQUEST_MSG@b::@ref CAPTURE_MSG_HEADER "header"@b::@ref CAPTURE_MSG_HEADER::channel_id "channel_id" + * + * @par Payload + * - @ref CAPTURE_COE_STATUS_IND_MSG + */ +#define CAPTURE_COE_STATUS_IND MK_U32(0x0B) +/** @} */ + /** * @brief Invalid message type. This can be used to respond to an invalid request. */ @@ -818,6 +1014,22 @@ struct CAPTURE_CHANNEL_SETUP_RESP_MSG { uint64_t vi_channel_mask; } CAPTURE_IVC_ALIGN; +/** @brief Message data for @ref CAPTURE_COE_CHANNEL_SETUP_REQ message */ +struct CAPTURE_COE_CHANNEL_SETUP_REQ_MSG { + /** Capture channel configuration. */ + struct capture_coe_channel_config channel_config; +} CAPTURE_IVC_ALIGN; + +/** @brief Message data for @ref CAPTURE_COE_REQUEST message. */ +struct CAPTURE_COE_REQUEST_MSG { + /** Index of the buffer to be captured, for tacking by KMD. */ + uint32_t buffer_index; + /** Length of the buffer to be captured. */ + uint32_t buf_len; + /** DMA address of the buffer to be captured. */ + iova_t buf_mgbe_iova; +} CAPTURE_IVC_ALIGN; + /** * @defgroup CapResetFlags VI Capture channel reset flags * @{ @@ -844,6 +1056,42 @@ struct CAPTURE_CHANNEL_RESET_RESP_MSG { uint32_t pad__; } CAPTURE_IVC_ALIGN; +/** @brief Message data for @ref CAPTURE_CHANNEL_RESET_REQ message */ +struct CAPTURE_COE_CHANNEL_RESET_REQ_MSG { + /** Placeholder. Unused */ + uint32_t reset_flags; + + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** @brief Message data for @ref CAPTURE_CHANNEL_RESET_RESP message */ +struct CAPTURE_COE_CHANNEL_RESET_RESP_MSG { + /** Request result code. See @ref CapErrorCodes "result codes". */ + capture_result result; + + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** @brief Message data for @ref CAPTURE_CHANNEL_RELEASE_REQ message */ +struct CAPTURE_COE_CHANNEL_RELEASE_REQ_MSG { + /** Placeholder. Unused */ + uint32_t reset_flags; + + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + +/** @brief Message data for @ref CAPTURE_CHANNEL_RELEASE_RESP message */ +struct CAPTURE_COE_CHANNEL_RELEASE_RESP_MSG { + /** Request result code. See @ref CapErrorCodes "result codes". */ + capture_result result; + + /** Reserved */ + uint32_t pad__; +} CAPTURE_IVC_ALIGN; + /** @brief Message data for @ref CAPTURE_CHANNEL_RELEASE_REQ message */ struct CAPTURE_CHANNEL_RELEASE_REQ_MSG { /** Unused */ @@ -1740,12 +1988,27 @@ struct CAPTURE_CONTROL_MSG { /** Message data for @ref CAPTURE_CHANNEL_SETUP_RESP message */ struct CAPTURE_CHANNEL_SETUP_RESP_MSG channel_setup_resp; /** @anon_union_member */ + /** Message data for @ref CAPTURE_COE_CHANNEL_SETUP_RESP message */ + struct CAPTURE_COE_CHANNEL_SETUP_REQ_MSG channel_coe_setup_req; + /** @anon_union_member */ /** Message data for @ref CAPTURE_CHANNEL_RESET_REQ message */ struct CAPTURE_CHANNEL_RESET_REQ_MSG channel_reset_req; /** @anon_union_member */ /** Message data for @ref CAPTURE_CHANNEL_RESET_RESP message */ struct CAPTURE_CHANNEL_RESET_RESP_MSG channel_reset_resp; /** @anon_union_member */ + /** Message data for @ref CAPTURE_COE_CHANNEL_RESET_REQ message */ + struct CAPTURE_COE_CHANNEL_RESET_REQ_MSG channel_coe_reset_req; + /** @anon_union_member */ + /** Message data for @ref CAPTURE_COE_CHANNEL_RESET_RESP message */ + struct CAPTURE_COE_CHANNEL_RESET_RESP_MSG channel_coe_reset_resp; + /** @anon_union_member */ + /** Message data for @ref CAPTURE_COE_CHANNEL_RELEASE_REQ message */ + struct CAPTURE_COE_CHANNEL_RELEASE_REQ_MSG channel_coe_release_req; + /** @anon_union_member */ + /** Message data for @ref CAPTURE_COE_CHANNEL_RELEASE_RESP message */ + struct CAPTURE_COE_CHANNEL_RELEASE_RESP_MSG channel_coe_release_resp; + /** @anon_union_member */ /** Message data for @ref CAPTURE_CHANNEL_RELEASE_REQ message */ struct CAPTURE_CHANNEL_RELEASE_REQ_MSG channel_release_req; /** @anon_union_member */ @@ -1923,6 +2186,54 @@ struct CAPTURE_STATUS_IND_MSG { uint32_t pad__; } CAPTURE_IVC_ALIGN; +/** @brief Message data for @ref CAPTURE_COE_STATUS_IND message. */ +struct CAPTURE_COE_STATUS_IND_MSG { + /** + * Buffer index to match against capture request to which + * this completion corresponds to. + */ + uint32_t buffer_index; +/** + * CoE capture error indicating some Ethernet packets carrying image data were lost. + */ +#define CAPTURE_STATUS_COE_PACKET_LOSS MK_U32(2) + +/** + * CoE capture error indicating Start Of Frame packet was never received. + */ +#define CAPTURE_STATUS_COE_SOF_MISSED MK_U32(3) + +/** + * CoE capture error indicating a frame with an unexpected sequence number was received. + * Could be caused by losing EOF packet as an example. + */ +#define CAPTURE_STATUS_COE_DISCONTINUITY MK_U32(4) + +/** + * CoE capture error indicating SW has aborted the capture (did not wait for all + * network packets carrying the frame data to be received). + */ +#define CAPTURE_STATUS_COE_ABORTED MK_U32(5) + + /** Capture status to indicate to the host + * Valid range: [ @ref CAPTURE_STATUS_UNKNOWN, + * @ref CAPTURE_STATUS_SUCCESS, + * @ref CAPTURE_STATUS_COE_*] + */ + uint32_t capture_status; + + /** + * Timestamp of the SOF event in nanoseconds. + * Valid range: [0, UINT64_MAX]. + */ + uint64_t timestamp_sof_ns; + + /** + * Timestamp of the EOF event in nanoseconds. + * Valid range: [0, UINT64_MAX]. + */ + uint64_t timestamp_eof_ns; +} CAPTURE_IVC_ALIGN; /** @brief Message data for @ref CAPTURE_ISP_REQUEST_REQ message. */ struct CAPTURE_ISP_REQUEST_REQ_MSG { @@ -2020,7 +2331,11 @@ struct CAPTURE_MSG { /** @anon_union_member */ struct CAPTURE_REQUEST_REQ_MSG capture_request_req; /** @anon_union_member */ + struct CAPTURE_COE_REQUEST_MSG capture_coe_req; + /** @anon_union_member */ struct CAPTURE_STATUS_IND_MSG capture_status_ind; + /** @anon_union_member */ + struct CAPTURE_COE_STATUS_IND_MSG capture_coe_status_ind; /** @anon_union_member */ CAPTURE_ISP_REQUEST_REQ_MSG capture_isp_request_req; diff --git a/include/soc/tegra/camrtc-capture.h b/include/soc/tegra/camrtc-capture.h index c763d66c..57af28db 100644 --- a/include/soc/tegra/camrtc-capture.h +++ b/include/soc/tegra/camrtc-capture.h @@ -1,6 +1,16 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2016-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: GPL-2.0-only +/* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ /** @@ -852,6 +862,95 @@ struct capture_channel_config { } CAPTURE_IVC_ALIGN; +/** Length of Ethernet MAC address */ +#define COE_ETH_ALEN 6U + +/** + * @brief Describes RTCPU CoE resources for a capture pipe-line. + */ +struct capture_coe_channel_config { + /** + * Numerical instance ID of an ethernet controller for the channel + */ + uint32_t mgbe_instance_id; + /** + * Virtual DMA channel number for this capture channel + */ + uint16_t dma_chan; + /** + * Physical DMA which services the CoE channel + */ + uint16_t pdma_chan_id; + /** + * Hardware IRQ ID which will be asserted for events on that DMA channel + */ + uint16_t mgbe_irq_num; + /** + * Ethernet address of a camera module which will us the channel + */ + uint8_t mac_addr[COE_ETH_ALEN]; + /** + * IOVA for RX descriptors for MGBE access + */ + iova_t rx_desc_ring_iova_mgbe; + /** + * IOVA for RX descriptors for RCE access + */ + iova_t rx_desc_ring_iova_rce; + /** + * IOVA for RX descriptors shadown ring for RCE access + */ + iova_t rx_desc_shdw_iova_rce; + /** + * Size of Rx descr ring buffer memory area + */ + uint64_t rx_desc_ring_mem_size; + /** + * IOVA for RX packet headers memory area for MGBE access + */ + iova_t rx_pkthdr_iova_mgbe; + /** + * Size of Rx packet headers memory area + */ + uint64_t rx_pkthdr_mem_size; + /** + * IOVA for RX Packet Info descriptors memory area for MGBE access + */ + iova_t rx_pktinfo_iova_mgbe; + /** + * IOVA for RX Packet Info descriptors memory area for RCE access + */ + iova_t rx_pktinfo_iova_rce; + /** + * Size of RX Packet Info descriptors memory area + */ + uint64_t rx_pktinfo_mem_size; + /** + * IOVA for RX scratch buffer memory area for MGBE access + */ + iova_t dummy_buf_dma; + /** + * Size of RX scratch buffer memory area + */ + uint64_t dummy_buf_dma_size; + /** + * IOVA for Rx descriptors memory area base address + */ + iova_t rxmem_base; + /** + * Size of RX descriptors memory area. Must be power of two. + */ + uint64_t rxmem_size; + /** + * VLAN enable value. 1 - VLAN enabled, 0 - VLAN disabled + */ + uint8_t vlan_enable; + /** + * Padding to make the structure size a multiple of 8 bytes + */ + uint8_t _padding[7]; +} CAPTURE_IVC_ALIGN; + /** * @defgroup ViDpcmModes VI DPCM Modes (non-safety) */ diff --git a/include/soc/tegra/nvethernet-public.h b/include/soc/tegra/nvethernet-public.h new file mode 100644 index 00000000..5ede3129 --- /dev/null +++ b/include/soc/tegra/nvethernet-public.h @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#ifndef INCLUDE_NVETHERNET_PUBLIC_H +#define INCLUDE_NVETHERNET_PUBLIC_H + +#include +#include + +#define COE_ENABLE 1U +#define COE_MACSEC_HDR_OFFSET 42U +#define COE_VLAN_ENABLE 1U +#define COE_MACSEC_HDR_VLAN_DISABLE_OFFSET 38U +#define COE_VLAN_DISABLE 0U +#define COE_MACSEC_SFT_LC1 1024U +#define COE_MACSEC_SFT_LC2 1024U + +/* These need to be configured only once during the COE plat driver init */ +struct nvether_coe_cfg { + /* Flag to track if COE is already configured */ + u32 coe_enable; + /* System wide COE header offset to be used */ + u32 coe_hdr_offset; + /* System wide config whether COE packets are VLAN tagged */ + u32 vlan_enable; +}; + +/* These have to be provided for every COE stream added */ +struct nvether_per_coe_cfg { + /* No. of lines in first subframes */ + u32 lc1; + /* No. of lines in subsequent subframes */ + u32 lc2; +}; + +/** + * @brief Configure COE in the macsec controller + * + * Configure the COE engine in the macsec controller for the + * to a DMA channel specified by dma_chan. + * + * @param[in] ndev: Network device instance to be used for COE + * @param[in] ether_coe_config: Struct that defines the system wide COE config. + * + * @retval value >=0 on success. + * @retval "negative value" on failure. + */ +int nvether_coe_config(struct net_device *ndev, + struct nvether_coe_cfg *ether_coe_cfg); + +/** + * @brief Configure CoE on a channel + * + * @param[in] ndev: Network device to operate on. + * @param[in] dmachan: Tte dma channel to take ownership of. + * + * @retval value >=0 on success. + * @retval "negative value" on failure. + */ +int nvether_coe_chan_config(struct net_device *ndev, u32 dmachan, + struct nvether_per_coe_cfg *p_coe_cfg); + +#endif /* INCLUDE_NVETHERNET_PUBLIC_H */