coe: make CoE capture more robust

Modify CoE capture logic a bit to make it more robust and error-proof:
- RCE Rx queue limit size is 16, no point to have 32 elements long queue
  in kernel.
- Pass kernel's queue length to RCE when opening a channel so it can be
  validated (to not exceed RCE max depth)
- validate image buffers IOVA addresses and buffer length before queuing
  to RCE

Jira CT26X-1892

Change-Id: I199143fe726ebab05a1236d4b14b59f0528d65a8
Signed-off-by: Igor Mitsyanko <imitsyanko@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3419638
Reviewed-by: svcacv <svcacv@nvidia.com>
Tested-by: Raki Hassan <rakibulh@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Narendra Kondapalli <nkondapalli@nvidia.com>
This commit is contained in:
Igor Mitsyanko
2025-06-26 06:59:47 +00:00
committed by mobile promotions
parent f374450381
commit 344aa664d2
2 changed files with 43 additions and 3 deletions

View File

@@ -66,6 +66,12 @@
/** Maximum number of descriptors in a Rx packet info ring for a single channel */ /** Maximum number of descriptors in a Rx packet info ring for a single channel */
#define COE_MGBE_MAX_PKTINFO_NUM 4096U #define COE_MGBE_MAX_PKTINFO_NUM 4096U
/** Buffer offset field in CoE header is 28 bits wide (bits 0-27) */
#define COE_MGBE_MAX_BUF_SIZE (1U << 28U)
/** Mask for the Rx frame buffer address. Must be 4K aligned. */
#define COE_MGBE_RXFRAMEBUF_MASK 0x0000FFFFFFFFF000ULL
/** /**
* @brief Invalid CoE channel ID; the channel is not initialized. * @brief Invalid CoE channel ID; the channel is not initialized.
*/ */
@@ -73,7 +79,7 @@
#define CAPTURE_COE_CHAN_INVALID_HW_ID U8_C(0xFF) #define CAPTURE_COE_CHAN_INVALID_HW_ID U8_C(0xFF)
#define COE_CHAN_CAPTURE_QUEUE_LEN 32U #define COE_CHAN_CAPTURE_QUEUE_LEN 16U
/** Max number of physical DMA channel for each Eth controller */ /** Max number of physical DMA channel for each Eth controller */
#define COE_MGBE_MAX_NUM_PDMA_CHANS 10U #define COE_MGBE_MAX_NUM_PDMA_CHANS 10U
#define COE_MGBE_PDMA_CHAN_INVALID COE_MGBE_MAX_NUM_PDMA_CHANS #define COE_MGBE_PDMA_CHAN_INVALID COE_MGBE_MAX_NUM_PDMA_CHANS
@@ -502,6 +508,7 @@ static int coe_channel_open_on_rce(struct coe_channel_state *ch,
config->rxmem_size = COE_TOTAL_RXDESCR_MEM_SIZE; config->rxmem_size = COE_TOTAL_RXDESCR_MEM_SIZE;
config->vlan_enable = vlan_enable; config->vlan_enable = vlan_enable;
config->rx_queue_depth = ARRAY_SIZE(ch->capq_inhw);
mutex_lock(&ch->rce_msg_lock); mutex_lock(&ch->rce_msg_lock);
@@ -594,9 +601,15 @@ static int coe_ioctl_handle_capture_req(struct coe_channel_state * const ch,
{ {
uint64_t mgbe_iova; uint64_t mgbe_iova;
uint64_t buf_max_size; uint64_t buf_max_size;
uint32_t alloc_size_min;
int ret; int ret;
struct capture_common_unpins *unpins = NULL; struct capture_common_unpins *unpins = NULL;
if (req->buf_size == 0U || req->buf_size >= COE_MGBE_MAX_BUF_SIZE) {
dev_err(ch->dev, "CAPTURE_REQ: bad buf size %u\n", req->buf_size);
return -EINVAL;
}
mutex_lock(&ch->capq_inhw_lock); mutex_lock(&ch->capq_inhw_lock);
if (ch->capq_inhw_pending >= ARRAY_SIZE(ch->capq_inhw)) { if (ch->capq_inhw_pending >= ARRAY_SIZE(ch->capq_inhw)) {
@@ -624,18 +637,41 @@ static int coe_ioctl_handle_capture_req(struct coe_channel_state * const ch,
goto error; goto error;
} }
if (req->buf_size > buf_max_size) { if ((mgbe_iova & ~COE_MGBE_RXFRAMEBUF_MASK) != 0U) {
dev_err(ch->dev, "CAPTURE_REQ: bad buf iova 0x%llx\n", mgbe_iova);
ret = -ERANGE;
goto error;
}
/* Hardware can limit memory access within a range of powers of two only.
* Make sure DMA buffer allocation is large enough to at least cover the memory
* up to the next closest power of two boundary to eliminate a risk of a malformed
* incoming network packet triggerring invalid memory access.
*/
alloc_size_min = roundup_pow_of_two(req->buf_size);
if (alloc_size_min > buf_max_size) {
dev_err(ch->dev, "CAPTURE_REQ: capture too long %u\n", req->buf_size); dev_err(ch->dev, "CAPTURE_REQ: capture too long %u\n", req->buf_size);
ret = -ENOSPC; ret = -ENOSPC;
goto error; goto error;
} }
/* Scratch buffer is used as a scratch space to receive incoming images into buffer
* slots which were not yet initialized with an application image buffer pointers.
* There is no way of knowing which buffer slots will be used first as it is
* controlled by an external sender. Make sure scratch space is large enough to fit
* an image of expected size, if needed.
*/
if (req->buf_size > ch->rx_dummy_buf.buf->size) { if (req->buf_size > ch->rx_dummy_buf.buf->size) {
dev_err(ch->dev, "CAPTURE_REQ: buf size > scratch buf %u\n", req->buf_size); dev_err(ch->dev, "CAPTURE_REQ: buf size > scratch buf %u\n", req->buf_size);
ret = -ENOSPC; ret = -ENOSPC;
goto error; goto error;
} }
/* All buffer pointer slots in CoE hardware share the same highest 32 bits of IOVA
* address register.
* Make sure all buffers IOVA registered by application have the same MSB 32 bits.
*/
if ((mgbe_iova >> 32U) != (ch->rx_dummy_buf.iova >> 32U)) { if ((mgbe_iova >> 32U) != (ch->rx_dummy_buf.iova >> 32U)) {
dev_err(ch->dev, "Capture buf IOVA MSB 32 bits != scratch buf IOVA\n" dev_err(ch->dev, "Capture buf IOVA MSB 32 bits != scratch buf IOVA\n"
"0x%x != 0x%x\n", "0x%x != 0x%x\n",

View File

@@ -872,7 +872,7 @@ struct capture_coe_channel_config {
/** /**
* Numerical instance ID of an ethernet controller for the channel * Numerical instance ID of an ethernet controller for the channel
*/ */
uint32_t mgbe_instance_id; uint16_t mgbe_instance_id;
/** /**
* Virtual DMA channel number for this capture channel * Virtual DMA channel number for this capture channel
*/ */
@@ -885,6 +885,10 @@ struct capture_coe_channel_config {
* Hardware IRQ ID which will be asserted for events on that DMA channel * Hardware IRQ ID which will be asserted for events on that DMA channel
*/ */
uint16_t mgbe_irq_num; uint16_t mgbe_irq_num;
/**
* Maximum number of capture requests in the requests queue [1, 16].
*/
uint16_t rx_queue_depth;
/** /**
* Ethernet address of a camera module which will us the channel * Ethernet address of a camera module which will us the channel
*/ */