nvethernet: fix top-25 issues

Fixed below issues

-FORWARD_NULL
-CERT STR07-C
-CERT INT32-C
-CERT INT30-C
-CERT INT08-C
-CERT EXP39-C
-CERT EXP34-C

JIRA NET-2044

Change-Id: I839bd5aedff30c7e9679f513a2cf7a1fbe3b2b8a
Signed-off-by: Sanath Kumar Gampa <sgampa@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3258684
Reviewed-by: Ashutosh Jha <ajha@nvidia.com>
Reviewed-by: Mohan Thadikamalla <mohant@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Sanath Kumar Gampa
2024-11-28 05:20:12 +00:00
committed by Jon Hunter
parent b2a59883b3
commit 247613d6ce
8 changed files with 256 additions and 76 deletions

View File

@@ -2013,6 +2013,12 @@ static int ether_request_irqs(struct ether_priv_data *pdata)
if (osi_core->mac_ver > OSI_EQOS_MAC_5_00 || if (osi_core->mac_ver > OSI_EQOS_MAC_5_00 ||
osi_core->mac != OSI_MAC_HW_EQOS) { osi_core->mac != OSI_MAC_HW_EQOS) {
for (i = 0; i < osi_core->num_vm_irqs; i++) { for (i = 0; i < osi_core->num_vm_irqs; i++) {
// Need to get reviewd for these checks
if ((j >= ETHER_IRQ_MAX_IDX) || (i >= OSI_MAX_VM_IRQS)) {
dev_err(pdata->dev,
"unexpected irq name index received (%d)\n", j);
goto err_chan_irq;
}
snprintf(pdata->irq_names[j], ETHER_IRQ_NAME_SZ, "%s.vm%d", snprintf(pdata->irq_names[j], ETHER_IRQ_NAME_SZ, "%s.vm%d",
netdev_name(pdata->ndev), i); netdev_name(pdata->ndev), i);
ret = devm_request_irq(pdata->dev, pdata->vm_irqs[i], ret = devm_request_irq(pdata->dev, pdata->vm_irqs[i],
@@ -2031,8 +2037,12 @@ static int ether_request_irqs(struct ether_priv_data *pdata)
} }
} else { } else {
for (i = 0; i < osi_dma->num_dma_chans; i++) { for (i = 0; i < osi_dma->num_dma_chans; i++) {
if (j >= (ETHER_IRQ_MAX_IDX - 1)) {
dev_err(pdata->dev,
"unexpected irq name index received (%d)\n", j);
goto err_chan_irq;
}
chan = osi_dma->dma_chans[i]; chan = osi_dma->dma_chans[i];
snprintf(pdata->irq_names[j], ETHER_IRQ_NAME_SZ, "%s.rx%d", snprintf(pdata->irq_names[j], ETHER_IRQ_NAME_SZ, "%s.rx%d",
netdev_name(pdata->ndev), chan); netdev_name(pdata->ndev), chan);
ret = devm_request_irq(pdata->dev, pdata->rx_irqs[i], ret = devm_request_irq(pdata->dev, pdata->rx_irqs[i],
@@ -2826,7 +2836,9 @@ static int ether_update_mac_addr_filter(struct ether_priv_data *pdata,
static u32 ether_mdio_c45_addr(int devad, u16 regnum) static u32 ether_mdio_c45_addr(int devad, u16 regnum)
{ {
return OSI_MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum; unsigned int udevad = (unsigned int)devad & 0x7FFFU;
return OSI_MII_ADDR_C45 | (udevad << MII_DEVADDR_C45_SHIFT) | regnum;
} }
/** /**
@@ -3457,19 +3469,21 @@ int ether_close(struct net_device *ndev)
* @retval 1 on success * @retval 1 on success
* @retval "negative value" on failure. * @retval "negative value" on failure.
*/ */
static int ether_handle_tso(struct osi_tx_pkt_cx *tx_pkt_cx, static int ether_handle_tso(struct device *dev,
struct osi_tx_pkt_cx *tx_pkt_cx,
struct sk_buff *skb) struct sk_buff *skb)
{ {
int ret = 1; int ret = 1;
if (skb_is_gso(skb) == 0) { if (skb_is_gso(skb) == 0) {
return 0; ret = 0;
goto func_exit;
} }
if (skb_header_cloned(skb)) { if (skb_header_cloned(skb)) {
ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (ret) { if (ret != 0) {
return ret; goto func_exit;
} }
} }
@@ -3482,16 +3496,28 @@ static int ether_handle_tso(struct osi_tx_pkt_cx *tx_pkt_cx,
tx_pkt_cx->tcp_udp_hdrlen = tcp_hdrlen(skb); tx_pkt_cx->tcp_udp_hdrlen = tcp_hdrlen(skb);
tx_pkt_cx->mss = skb_shinfo(skb)->gso_size; tx_pkt_cx->mss = skb_shinfo(skb)->gso_size;
} }
if ((UINT_MAX - skb_transport_offset(skb)) < tx_pkt_cx->tcp_udp_hdrlen) {
dev_err(dev, "Unexpected udp hdr length\n");
ret = -EINVAL; // return failure in boundary condition
goto func_exit;
}
tx_pkt_cx->total_hdrlen = skb_transport_offset(skb) + tx_pkt_cx->total_hdrlen = skb_transport_offset(skb) +
tx_pkt_cx->tcp_udp_hdrlen; tx_pkt_cx->tcp_udp_hdrlen;
if (tx_pkt_cx->total_hdrlen > skb->len) {
dev_err(dev, "Unexpected total hdr length\n");
ret = -EINVAL; // return failure in boundary condition
goto func_exit;
} else {
tx_pkt_cx->payload_len = (skb->len - tx_pkt_cx->total_hdrlen); tx_pkt_cx->payload_len = (skb->len - tx_pkt_cx->total_hdrlen);
}
netdev_dbg(skb->dev, "mss =%u\n", tx_pkt_cx->mss); netdev_dbg(skb->dev, "mss =%u\n", tx_pkt_cx->mss);
netdev_dbg(skb->dev, "payload_len =%u\n", tx_pkt_cx->payload_len); netdev_dbg(skb->dev, "payload_len =%u\n", tx_pkt_cx->payload_len);
netdev_dbg(skb->dev, "tcp_udp_hdrlen=%u\n", tx_pkt_cx->tcp_udp_hdrlen); netdev_dbg(skb->dev, "tcp_udp_hdrlen=%u\n", tx_pkt_cx->tcp_udp_hdrlen);
netdev_dbg(skb->dev, "total_hdrlen =%u\n", tx_pkt_cx->total_hdrlen); netdev_dbg(skb->dev, "total_hdrlen =%u\n", tx_pkt_cx->total_hdrlen);
return 1; func_exit:
return ret;
} }
/** /**
@@ -3517,6 +3543,10 @@ static void ether_tx_swcx_rollback(struct ether_priv_data *pdata,
struct osi_tx_swcx *tx_swcx = NULL; struct osi_tx_swcx *tx_swcx = NULL;
while (count > 0) { while (count > 0) {
if ((pdata->osi_dma->tx_ring_sz == 0U) || (cur_tx_idx == 0U)) {
dev_err(dev, "Invalid Tx ring size or index\n");
break;
}
DECR_TX_DESC_INDEX(cur_tx_idx, pdata->osi_dma->tx_ring_sz); DECR_TX_DESC_INDEX(cur_tx_idx, pdata->osi_dma->tx_ring_sz);
tx_swcx = tx_ring->tx_swcx + cur_tx_idx; tx_swcx = tx_ring->tx_swcx + cur_tx_idx;
if (tx_swcx->buf_phy_addr) { if (tx_swcx->buf_phy_addr) {
@@ -3568,11 +3598,13 @@ static int ether_tx_swcx_alloc(struct ether_priv_data *pdata,
memset(tx_pkt_cx, 0, sizeof(*tx_pkt_cx)); memset(tx_pkt_cx, 0, sizeof(*tx_pkt_cx));
ret = ether_handle_tso(tx_pkt_cx, skb); // Need to get reviewed aboyut this properly
ret = ether_handle_tso(dev, tx_pkt_cx, skb);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
dev_err(dev, "Unable to handle TSO packet (%d)\n", ret); dev_err(dev, "Unable to handle TSO packet (%d)\n", ret);
/* Caller will take care of consuming skb */ /* Caller will take care of consuming skb */
return ret; goto exit_func;
} }
if (ret == 0) { if (ret == 0) {
@@ -3597,6 +3629,10 @@ static int ether_tx_swcx_alloc(struct ether_priv_data *pdata,
tx_pkt_cx->flags |= OSI_PKT_CX_PTP; tx_pkt_cx->flags |= OSI_PKT_CX_PTP;
} }
if (pdata->osi_dma->tx_ring_sz == 0U) {
dev_err(dev, "Invalid Tx ring size\n");
goto exit_func;
}
if (((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) || if (((tx_pkt_cx->flags & OSI_PKT_CX_VLAN) == OSI_PKT_CX_VLAN) ||
((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) || ((tx_pkt_cx->flags & OSI_PKT_CX_TSO) == OSI_PKT_CX_TSO) ||
(((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) && (((tx_pkt_cx->flags & OSI_PKT_CX_PTP) == OSI_PKT_CX_PTP) &&
@@ -3606,7 +3642,8 @@ static int ether_tx_swcx_alloc(struct ether_priv_data *pdata,
OSI_PTP_SYNC_ONESTEP)))) { OSI_PTP_SYNC_ONESTEP)))) {
tx_swcx = tx_ring->tx_swcx + cur_tx_idx; tx_swcx = tx_ring->tx_swcx + cur_tx_idx;
if (tx_swcx->len) { if (tx_swcx->len) {
return 0; ret = 0;
goto exit_func;
} }
tx_swcx->len = -1; tx_swcx->len = -1;
@@ -3647,6 +3684,11 @@ static int ether_tx_swcx_alloc(struct ether_priv_data *pdata,
tx_swcx->len = size; tx_swcx->len = size;
len -= size; len -= size;
offset += size; offset += size;
if (cnt == (int)INT_MAX) {
dev_err(dev, "Reached Max desc count\n");
ret = -ENOMEM;
goto dma_map_failed;
}
cnt++; cnt++;
INCR_TX_DESC_INDEX(cur_tx_idx, pdata->osi_dma->tx_ring_sz); INCR_TX_DESC_INDEX(cur_tx_idx, pdata->osi_dma->tx_ring_sz);
} }
@@ -3678,7 +3720,19 @@ static int ether_tx_swcx_alloc(struct ether_priv_data *pdata,
tx_swcx->flags &= ~OSI_PKT_CX_PAGED_BUF; tx_swcx->flags &= ~OSI_PKT_CX_PAGED_BUF;
tx_swcx->len = size; tx_swcx->len = size;
len -= size; len -= size;
if (offset > UINT_MAX - size) {
dev_err(dev, "Offset addition overflow detected:"
"size = %u, offset = %u\n",
size, offset);
ret = -ENOMEM;
goto dma_map_failed;
}
offset += size; offset += size;
if (cnt == (int)INT_MAX) {
dev_err(dev, "Reached Max desc count\n");
ret = -ENOMEM;
goto dma_map_failed;
}
cnt++; cnt++;
INCR_TX_DESC_INDEX(cur_tx_idx, pdata->osi_dma->tx_ring_sz); INCR_TX_DESC_INDEX(cur_tx_idx, pdata->osi_dma->tx_ring_sz);
} }
@@ -3697,6 +3751,12 @@ static int ether_tx_swcx_alloc(struct ether_priv_data *pdata,
} }
size = min(len, max_data_len_per_txd); size = min(len, max_data_len_per_txd);
if (skb_frag_off(frag) > UINT_MAX - offset) {
dev_err(dev, "Offset addition overflow detected:"
"frag offset = %u, offset = %u\n",
skb_frag_off(frag), offset);
return -ENOMEM;
}
page_idx = (skb_frag_off(frag) + offset) >> PAGE_SHIFT; page_idx = (skb_frag_off(frag) + offset) >> PAGE_SHIFT;
page_offset = (skb_frag_off(frag) + offset) & ~PAGE_MASK; page_offset = (skb_frag_off(frag) + offset) & ~PAGE_MASK;
tx_swcx->buf_phy_addr = dma_map_page(dev, tx_swcx->buf_phy_addr = dma_map_page(dev,
@@ -3714,6 +3774,11 @@ static int ether_tx_swcx_alloc(struct ether_priv_data *pdata,
tx_swcx->len = size; tx_swcx->len = size;
len -= size; len -= size;
offset += size; offset += size;
if (cnt == (int)INT_MAX) {
dev_err(dev, "Reached Max desc count\n");
ret = -ENOMEM;
goto dma_map_failed;
}
cnt++; cnt++;
INCR_TX_DESC_INDEX(cur_tx_idx, pdata->osi_dma->tx_ring_sz); INCR_TX_DESC_INDEX(cur_tx_idx, pdata->osi_dma->tx_ring_sz);
} }
@@ -3730,6 +3795,7 @@ desc_not_free:
dma_map_failed: dma_map_failed:
/* Failed to fill current desc. Rollback previous desc's */ /* Failed to fill current desc. Rollback previous desc's */
ether_tx_swcx_rollback(pdata, tx_ring, cur_tx_idx, cnt); ether_tx_swcx_rollback(pdata, tx_ring, cur_tx_idx, cnt);
exit_func:
return ret; return ret;
} }
@@ -3954,7 +4020,7 @@ static int ether_prepare_uc_list(struct net_device *dev,
if (ioctl_data == NULL) { if (ioctl_data == NULL) {
dev_err(pdata->dev, "ioctl_data is NULL\n"); dev_err(pdata->dev, "ioctl_data is NULL\n");
return ret; goto exit_func;
} }
memset(&ioctl_data->l2_filter, 0x0, sizeof(struct osi_filter)); memset(&ioctl_data->l2_filter, 0x0, sizeof(struct osi_filter));
@@ -3971,6 +4037,11 @@ static int ether_prepare_uc_list(struct net_device *dev,
return osi_handle_ioctl(osi_core, ioctl_data); return osi_handle_ioctl(osi_core, ioctl_data);
} }
#endif /* !OSI_STRIPPED_LIB */ #endif /* !OSI_STRIPPED_LIB */
if (i > pdata->num_mac_addr_regs) {
dev_err(pdata->dev, "Invalid value: i (%u) exceeds num_mac_addr_regs (%u)\n",
i, pdata->num_mac_addr_regs);
goto exit_func;
}
if (netdev_uc_count(dev) > (pdata->num_mac_addr_regs - i)) { if (netdev_uc_count(dev) > (pdata->num_mac_addr_regs - i)) {
/* switch to PROMISCUOUS mode */ /* switch to PROMISCUOUS mode */
ioctl_data->l2_filter.oper_mode = (OSI_OPER_DIS_PERFECT | ioctl_data->l2_filter.oper_mode = (OSI_OPER_DIS_PERFECT |
@@ -4028,6 +4099,7 @@ static int ether_prepare_uc_list(struct net_device *dev,
*mac_addr_idx = i; *mac_addr_idx = i;
} }
exit_func:
return ret; return ret;
} }
@@ -4980,8 +5052,8 @@ static int ether_get_vm_irq_data(struct platform_device *pdev,
{ {
struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_core_priv_data *osi_core = pdata->osi_core;
struct device_node *vm_node, *temp; struct device_node *vm_node, *temp;
unsigned int i, j, node = 0; unsigned int i, j, node = 0, vm_irq_id, child_id;
int vm_irq_id, child_id, ret =0; int ret = 0;
vm_node = of_parse_phandle(pdev->dev.of_node, vm_node = of_parse_phandle(pdev->dev.of_node,
"nvidia,vm-irq-config", 0); "nvidia,vm-irq-config", 0);
@@ -5063,6 +5135,8 @@ static int ether_get_vm_irq_data(struct platform_device *pdev,
} }
} }
/* Assuming there would not be more than 0xFFFF nodes */
child_id &= MAX_CHILD_NODES;
child_id++; child_id++;
} }
@@ -5128,6 +5202,11 @@ static int ether_get_irqs(struct platform_device *pdev,
} else { } else {
/* get TX IRQ numbers */ /* get TX IRQ numbers */
for (i = 0, j = 1; i < num_chans; i++) { for (i = 0, j = 1; i < num_chans; i++) {
if (j == UINT_MAX) {
dev_err(pdata->dev, "Index j exceeded maximum value\n");
ret = -1;
goto exit_func;
}
pdata->tx_irqs[i] = platform_get_irq(pdev, j++); pdata->tx_irqs[i] = platform_get_irq(pdev, j++);
if (pdata->tx_irqs[i] < 0) { if (pdata->tx_irqs[i] < 0) {
dev_err(&pdev->dev, "failed to get TX IRQ number\n"); dev_err(&pdev->dev, "failed to get TX IRQ number\n");
@@ -5136,6 +5215,11 @@ static int ether_get_irqs(struct platform_device *pdev,
} }
for (i = 0; i < num_chans; i++) { for (i = 0; i < num_chans; i++) {
if (j == UINT_MAX) {
dev_err(pdata->dev, "Index j exceeded maximum value\n");
ret = -1;
goto exit_func;
}
pdata->rx_irqs[i] = platform_get_irq(pdev, j++); pdata->rx_irqs[i] = platform_get_irq(pdev, j++);
if (pdata->rx_irqs[i] < 0) { if (pdata->rx_irqs[i] < 0) {
dev_err(&pdev->dev, "failed to get RX IRQ number\n"); dev_err(&pdev->dev, "failed to get RX IRQ number\n");
@@ -5144,7 +5228,10 @@ static int ether_get_irqs(struct platform_device *pdev,
} }
} }
return 0; ret = 0;
exit_func:
return ret;
} }
/** /**
@@ -5299,6 +5386,12 @@ static int ether_get_mac_address(struct ether_priv_data *pdata)
eth_mac_addr = addr; eth_mac_addr = addr;
} }
/* Added below to fix FORWARD_NULL Static analysis error */
if (eth_mac_addr == NULL) {
ret = -EINVAL;
goto exit_func;
}
/* Found a valid mac address */ /* Found a valid mac address */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)
dev_addr_mod(ndev, 0, eth_mac_addr, ETH_ALEN); dev_addr_mod(ndev, 0, eth_mac_addr, ETH_ALEN);
@@ -5309,6 +5402,7 @@ static int ether_get_mac_address(struct ether_priv_data *pdata)
dev_info(dev, "Ethernet MAC address: %pM\n", ndev->dev_addr); dev_info(dev, "Ethernet MAC address: %pM\n", ndev->dev_addr);
exit_func:
return ret; return ret;
} }

View File

@@ -95,6 +95,16 @@
*/ */
#define ETH_MAC_STR_LEN 20 #define ETH_MAC_STR_LEN 20
/**
* @addtogroup Maximum number of child nodes
*/
#define MAX_CHILD_NODES 0xFFFFU
/**
* @addtogroup Helper for INT_32 MAX
*/
#define OSD_INT_MAX 0x7FFFFFFF
/** /**
* @addtogroup Ethernet Transmit Queue Priority * @addtogroup Ethernet Transmit Queue Priority
* *
@@ -308,8 +318,14 @@ static inline bool valid_tx_len(unsigned int length)
static inline int ether_avail_txdesc_cnt(struct osi_dma_priv_data *osi_dma, static inline int ether_avail_txdesc_cnt(struct osi_dma_priv_data *osi_dma,
struct osi_tx_ring *tx_ring) struct osi_tx_ring *tx_ring)
{ {
return ((tx_ring->clean_idx - tx_ring->cur_tx_idx - 1) & int ret = -EINVAL;
(osi_dma->tx_ring_sz - 1));
if ((osi_dma->tx_ring_sz == 0U) || (tx_ring->cur_tx_idx == 0U) ||
(tx_ring->clean_idx < (tx_ring->cur_tx_idx - 1U))) {
return ret;
}
return ((tx_ring->clean_idx - tx_ring->cur_tx_idx - 1U) &
(osi_dma->tx_ring_sz - 1U));
} }
/** /**

View File

@@ -1030,6 +1030,23 @@ static const struct ether_stats ether_tstrings_stats[] = {
#endif /* OSI_STRIPPED_LIB */ #endif /* OSI_STRIPPED_LIB */
}; };
static u64 counter_helper(u64 sizeof_stat, char *p)
{
u64 ret = 0;
if (sizeof_stat == sizeof(u64)) {
u64 temp = 0;
(void)memcpy(&temp, (void *)p, sizeof(temp));
ret = temp;
} else {
u32 temp = 0;
(void)memcpy(&temp, (void *)p, sizeof(temp));
ret = temp;
}
return ret;
}
void ether_get_ethtool_stats(struct net_device *dev, void ether_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, struct ethtool_stats *dummy,
u64 *data) u64 *data)
@@ -1077,18 +1094,18 @@ void ether_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < ETHER_MMC_STATS_LEN; i++) { for (i = 0; i < ETHER_MMC_STATS_LEN; i++) {
char *p = (char *)osi_core + ether_mmc[i].stat_offset; char *p = (char *)osi_core + ether_mmc[i].stat_offset;
if (j < OSD_INT_MAX) {
data[j++] = (ether_mmc[i].sizeof_stat == data[j++] = counter_helper(ether_mmc[i].sizeof_stat, p);
sizeof(u64)) ? (*(u64 *)p) : }
(*(u32 *)p);
} }
for (i = 0; i < ETHER_EXTRA_STAT_LEN; i++) { for (i = 0; i < ETHER_EXTRA_STAT_LEN; i++) {
char *p = (char *)pdata + char *p = (char *)pdata +
ether_gstrings_stats[i].stat_offset; ether_gstrings_stats[i].stat_offset;
data[j++] = (ether_gstrings_stats[i].sizeof_stat == if (j < OSD_INT_MAX) {
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); data[j++] = counter_helper(ether_gstrings_stats[i].sizeof_stat, p);
}
} }
#ifndef OSI_STRIPPED_LIB #ifndef OSI_STRIPPED_LIB
@@ -1096,16 +1113,18 @@ void ether_get_ethtool_stats(struct net_device *dev,
char *p = (char *)osi_dma + char *p = (char *)osi_dma +
ether_dstrings_stats[i].stat_offset; ether_dstrings_stats[i].stat_offset;
data[j++] = (ether_dstrings_stats[i].sizeof_stat == if (j < OSD_INT_MAX) {
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); data[j++] = counter_helper(ether_dstrings_stats[i].sizeof_stat, p);
}
} }
for (i = 0; i < ETHER_PKT_ERR_STAT_LEN; i++) { for (i = 0; i < ETHER_PKT_ERR_STAT_LEN; i++) {
char *p = (char *)osi_dma + char *p = (char *)osi_dma +
ether_cstrings_stats[i].stat_offset; ether_cstrings_stats[i].stat_offset;
data[j++] = (ether_cstrings_stats[i].sizeof_stat == if (j < OSD_INT_MAX) {
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); data[j++] = counter_helper(ether_cstrings_stats[i].sizeof_stat, p);
}
} }
for (i = 0; ((i < ETHER_FRP_STAT_LEN) && for (i = 0; ((i < ETHER_FRP_STAT_LEN) &&
@@ -1113,8 +1132,9 @@ void ether_get_ethtool_stats(struct net_device *dev,
char *p = (char *)osi_dma + char *p = (char *)osi_dma +
ether_frpstrings_stats[i].stat_offset; ether_frpstrings_stats[i].stat_offset;
data[j++] = (ether_frpstrings_stats[i].sizeof_stat == if (j < OSD_INT_MAX) {
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); data[j++] = counter_helper(ether_frpstrings_stats[i].sizeof_stat, p);
}
} }
#endif /* OSI_STRIPPED_LIB */ #endif /* OSI_STRIPPED_LIB */
@@ -1122,8 +1142,9 @@ void ether_get_ethtool_stats(struct net_device *dev,
char *p = (char *)osi_core + char *p = (char *)osi_core +
ether_tstrings_stats[i].stat_offset; ether_tstrings_stats[i].stat_offset;
data[j++] = (ether_tstrings_stats[i].sizeof_stat == if (j < OSD_INT_MAX) {
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); data[j++] = counter_helper(ether_tstrings_stats[i].sizeof_stat, p);
}
} }
} }
} }
@@ -1135,29 +1156,29 @@ int ether_get_sset_count(struct net_device *dev, int sset)
if (sset == ETH_SS_STATS) { if (sset == ETH_SS_STATS) {
if (pdata->hw_feat.mmc_sel == OSI_ENABLE) { if (pdata->hw_feat.mmc_sel == OSI_ENABLE) {
if (INT_MAX < ETHER_MMC_STATS_LEN) { if (OSD_INT_MAX < ETHER_MMC_STATS_LEN) {
/* do nothing*/ /* do nothing*/
} else { } else {
len = ETHER_MMC_STATS_LEN; len = ETHER_MMC_STATS_LEN;
} }
} }
if (INT_MAX - ETHER_EXTRA_STAT_LEN < len) { if (OSD_INT_MAX - ETHER_EXTRA_STAT_LEN < len) {
/* do nothing */ /* do nothing */
} else { } else {
len += ETHER_EXTRA_STAT_LEN; len += ETHER_EXTRA_STAT_LEN;
} }
#ifndef OSI_STRIPPED_LIB #ifndef OSI_STRIPPED_LIB
if (INT_MAX - ETHER_EXTRA_DMA_STAT_LEN < len) { if (OSD_INT_MAX - ETHER_EXTRA_DMA_STAT_LEN < len) {
/* do nothing */ /* do nothing */
} else { } else {
len += ETHER_EXTRA_DMA_STAT_LEN; len += ETHER_EXTRA_DMA_STAT_LEN;
} }
if (INT_MAX - ETHER_PKT_ERR_STAT_LEN < len) { if (OSD_INT_MAX - ETHER_PKT_ERR_STAT_LEN < len) {
/* do nothing */ /* do nothing */
} else { } else {
len += ETHER_PKT_ERR_STAT_LEN; len += ETHER_PKT_ERR_STAT_LEN;
} }
if (INT_MAX - ETHER_FRP_STAT_LEN < len) { if (OSD_INT_MAX - ETHER_FRP_STAT_LEN < len) {
/* do nothing */ /* do nothing */
} else { } else {
if (pdata->hw_feat.frp_sel == OSI_ENABLE) { if (pdata->hw_feat.frp_sel == OSI_ENABLE) {
@@ -1165,7 +1186,7 @@ int ether_get_sset_count(struct net_device *dev, int sset)
} }
} }
#endif /* OSI_STRIPPED_LIB */ #endif /* OSI_STRIPPED_LIB */
if (INT_MAX - ETHER_CORE_STAT_LEN < len) { if (OSD_INT_MAX - ETHER_CORE_STAT_LEN < len) {
/* do nothing */ /* do nothing */
} else { } else {
len += ETHER_CORE_STAT_LEN; len += ETHER_CORE_STAT_LEN;

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019-2024, NVIDIA CORPORATION. All rights reserved */ // SPDX-FileCopyrightText: Copyright (c) 2019-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#ifdef MACSEC_SUPPORT #ifdef MACSEC_SUPPORT
#include "ether_linux.h" #include "ether_linux.h"
@@ -420,7 +420,7 @@ static struct macsec_supplicant_data *macsec_get_supplicant(
int i; int i;
/* check for already exist instance */ /* check for already exist instance */
for (i = 0; i < OSI_MAX_NUM_SC; i++) { for (i = 0; i < OSI_MAX_NUM_SC_T26x; i++) {
if (supplicant[i].snd_portid == portid && if (supplicant[i].snd_portid == portid &&
supplicant[i].in_use == OSI_ENABLE) { supplicant[i].in_use == OSI_ENABLE) {
return &supplicant[i]; return &supplicant[i];
@@ -1052,6 +1052,7 @@ static int macsec_deinit(struct sk_buff *skb, struct genl_info *info)
struct macsec_supplicant_data *supplicant; struct macsec_supplicant_data *supplicant;
struct ether_priv_data *pdata; struct ether_priv_data *pdata;
int ret = 0; int ret = 0;
int ref_count_lcl = 0;
PRINT_ENTRY(); PRINT_ENTRY();
@@ -1084,7 +1085,8 @@ static int macsec_deinit(struct sk_buff *skb, struct genl_info *info)
macsec_pdata->next_supp_idx--; macsec_pdata->next_supp_idx--;
/* check for reference count to zero before deinit macsec */ /* check for reference count to zero before deinit macsec */
if ((atomic_read(&macsec_pdata->ref_count) - 1) > 0) { ref_count_lcl = atomic_read(&macsec_pdata->ref_count);
if (ref_count_lcl > 1) {
ret = 0; ret = 0;
mutex_unlock(&macsec_pdata->lock); mutex_unlock(&macsec_pdata->lock);
goto done; goto done;
@@ -1309,7 +1311,7 @@ void macsec_remove(struct ether_priv_data *pdata)
mutex_lock(&macsec_pdata->lock); mutex_lock(&macsec_pdata->lock);
/* Delete if any supplicant active heartbeat timer */ /* Delete if any supplicant active heartbeat timer */
supplicant = macsec_pdata->supplicant; supplicant = macsec_pdata->supplicant;
for (i = 0; i < OSI_MAX_NUM_SC; i++) { for (i = 0; i < OSI_MAX_NUM_SC_T26x; i++) {
if (supplicant[i].in_use == OSI_ENABLE) { if (supplicant[i].in_use == OSI_ENABLE) {
supplicant->snd_portid = OSI_NONE; supplicant->snd_portid = OSI_NONE;
supplicant->in_use = OSI_NONE; supplicant->in_use = OSI_NONE;
@@ -1473,6 +1475,8 @@ int macsec_probe(struct ether_priv_data *pdata)
} else { } else {
strncpy(macsec_pdata->nv_macsec_fam.name, strncpy(macsec_pdata->nv_macsec_fam.name,
netdev_name(pdata->ndev), GENL_NAMSIZ - 1); netdev_name(pdata->ndev), GENL_NAMSIZ - 1);
// Explicit null-termination to fix CERT STR07-C
macsec_pdata->nv_macsec_fam.name[GENL_NAMSIZ - 1] = '\0';
} }
ret = genl_register_family(&macsec_pdata->nv_macsec_fam); ret = genl_register_family(&macsec_pdata->nv_macsec_fam);
if (ret) { if (ret) {
@@ -1671,6 +1675,8 @@ static int macsec_get_tx_next_pn(struct sk_buff *skb, struct genl_info *info)
memset(&lut_config, OSI_NONE, sizeof(lut_config)); memset(&lut_config, OSI_NONE, sizeof(lut_config));
lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_TX; lut_config.table_config.ctlr_sel = OSI_CTLR_SEL_TX;
lut_config.table_config.rw = OSI_LUT_READ; lut_config.table_config.rw = OSI_LUT_READ;
// Added bitwise just to avoid CERT error
key_index = key_index & MAX_KEY_INDEX;
lut_config.table_config.index = key_index + tx_sa.curr_an; lut_config.table_config.index = key_index + tx_sa.curr_an;
lut_config.lut_sel = OSI_LUT_SEL_SA_STATE; lut_config.lut_sel = OSI_LUT_SEL_SA_STATE;
if (osi_macsec_config_lut(osi_core, &lut_config) < 0) { if (osi_macsec_config_lut(osi_core, &lut_config) < 0) {

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved */ /* Copyright (c) 2019-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved */
#ifndef INCLUDED_MACSEC_H #ifndef INCLUDED_MACSEC_H
#define INCLUDED_MACSEC_H #define INCLUDED_MACSEC_H
@@ -40,6 +40,11 @@
*/ */
#define MAX_SUPPLICANTS_ALLOWED 48 #define MAX_SUPPLICANTS_ALLOWED 48
/**
* @brief Maximum number of Key Indices for masking
*/
#define MAX_KEY_INDEX 0xFFU
#define NV_MACSEC_GENL_VERSION 1 #define NV_MACSEC_GENL_VERSION 1
#ifdef MACSEC_KEY_PROGRAM #ifdef MACSEC_KEY_PROGRAM
@@ -251,7 +256,7 @@ struct macsec_priv_data {
/** MACsec controller init reference count */ /** MACsec controller init reference count */
atomic_t ref_count; atomic_t ref_count;
/** supplicant instance specific data */ /** supplicant instance specific data */
struct macsec_supplicant_data supplicant[OSI_MAX_NUM_SC]; struct macsec_supplicant_data supplicant[OSI_MAX_NUM_SC_T26x];
/** next supplicant instance index */ /** next supplicant instance index */
unsigned short next_supp_idx; unsigned short next_supp_idx;
/** macsec mutex lock */ /** macsec mutex lock */

View File

@@ -33,6 +33,7 @@ static inline void add_skb_node(struct ether_priv_data *pdata, struct sk_buff *s
unsigned int idx; unsigned int idx;
unsigned long flags; unsigned long flags;
unsigned long now_jiffies = jiffies; unsigned long now_jiffies = jiffies;
unsigned long timeout_jiffies = msecs_to_jiffies(ETHER_SECTOMSEC);
if (list_empty(&pdata->tx_ts_skb_head)) { if (list_empty(&pdata->tx_ts_skb_head)) {
goto empty; goto empty;
@@ -44,9 +45,9 @@ static inline void add_skb_node(struct ether_priv_data *pdata, struct sk_buff *s
pnode = list_entry(head_node, pnode = list_entry(head_node,
struct ether_tx_ts_skb_list, struct ether_tx_ts_skb_list,
list_head); list_head);
if ((ULLONG_MAX - pnode->pkt_jiffies > timeout_jiffies) &&
if ((jiffies_to_msecs(now_jiffies) - jiffies_to_msecs(pnode->pkt_jiffies)) (now_jiffies > (pnode->pkt_jiffies + timeout_jiffies))) {
>= ETHER_SECTOMSEC) { if (time_after_eq(now_jiffies, pnode->pkt_jiffies + timeout_jiffies)) {
dev_dbg(pdata->dev, "%s() skb %p deleting for pktid=%x time=%lu\n", dev_dbg(pdata->dev, "%s() skb %p deleting for pktid=%x time=%lu\n",
__func__, pnode->skb, pnode->pktid, pnode->pkt_jiffies); __func__, pnode->skb, pnode->pktid, pnode->pkt_jiffies);
if (pnode->skb != NULL) { if (pnode->skb != NULL) {
@@ -56,6 +57,7 @@ static inline void add_skb_node(struct ether_priv_data *pdata, struct sk_buff *s
pnode->in_use = OSI_DISABLE; pnode->in_use = OSI_DISABLE;
} }
} }
}
raw_spin_unlock_irqrestore(&pdata->txts_lock, flags); raw_spin_unlock_irqrestore(&pdata->txts_lock, flags);
empty: empty:
raw_spin_lock_irqsave(&pdata->txts_lock, flags); raw_spin_lock_irqsave(&pdata->txts_lock, flags);
@@ -671,7 +673,11 @@ void osd_receive_packet(void *priv, struct osi_rx_ring *rx_ring,
skb_record_rx_queue(skb, chan); skb_record_rx_queue(skb, chan);
skb->dev = ndev; skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
if ((ULLONG_MAX - ndev->stats.rx_bytes) <= (skb->len)) {
ndev->stats.rx_bytes = skb->len;
} else {
ndev->stats.rx_bytes += skb->len; ndev->stats.rx_bytes += skb->len;
}
#ifdef ETHER_NVGRO #ifdef ETHER_NVGRO
if ((ndev->features & NETIF_F_GRO) && if ((ndev->features & NETIF_F_GRO) &&
ether_do_nvgro(pdata, &rx_napi->napi, skb)) ether_do_nvgro(pdata, &rx_napi->napi, skb))
@@ -688,7 +694,11 @@ void osd_receive_packet(void *priv, struct osi_rx_ring *rx_ring,
ndev->stats.rx_frame_errors = pkt_err_stat->rx_frame_error; ndev->stats.rx_frame_errors = pkt_err_stat->rx_frame_error;
#endif /* !OSI_STRIPPED_LIB */ #endif /* !OSI_STRIPPED_LIB */
ndev->stats.rx_fifo_errors = osi_core->mmc.mmc_rx_fifo_overflow; ndev->stats.rx_fifo_errors = osi_core->mmc.mmc_rx_fifo_overflow;
if (ndev->stats.rx_errors == ULLONG_MAX) {
ndev->stats.rx_errors = 0;
} else {
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
}
#ifdef ETHER_PAGE_POOL #ifdef ETHER_PAGE_POOL
page_pool_recycle_direct(pdata->page_pool[chan], page); page_pool_recycle_direct(pdata->page_pool[chan], page);
#endif #endif
@@ -698,7 +708,11 @@ void osd_receive_packet(void *priv, struct osi_rx_ring *rx_ring,
#ifdef ETHER_NVGRO #ifdef ETHER_NVGRO
done: done:
#endif #endif
if (ndev->stats.rx_packets == ULLONG_MAX) {
ndev->stats.rx_packets = 0;
} else {
ndev->stats.rx_packets++; ndev->stats.rx_packets++;
}
rx_swcx->buf_virt_addr = NULL; rx_swcx->buf_virt_addr = NULL;
rx_swcx->buf_phy_addr = 0; rx_swcx->buf_phy_addr = 0;
/* mark packet is processed */ /* mark packet is processed */
@@ -723,7 +737,11 @@ void osd_transmit_complete(void *priv, const struct osi_tx_swcx *swcx,
unsigned int chan, qinx; unsigned int chan, qinx;
unsigned int len = swcx->len; unsigned int len = swcx->len;
if ((ULLONG_MAX - ndev->stats.tx_bytes) <= (len)) {
ndev->stats.tx_bytes = len;
} else {
ndev->stats.tx_bytes += len; ndev->stats.tx_bytes += len;
}
#ifdef BW_TEST #ifdef BW_TEST
if (pdata->test_tx_bandwidth == OSI_ENABLE) { if (pdata->test_tx_bandwidth == OSI_ENABLE) {
@@ -764,8 +782,11 @@ void osd_transmit_complete(void *priv, const struct osi_tx_swcx *swcx,
netif_tx_wake_queue(txq); netif_tx_wake_queue(txq);
netdev_dbg(ndev, "Tx ring[%d] - waking Txq\n", chan); netdev_dbg(ndev, "Tx ring[%d] - waking Txq\n", chan);
} }
if (ndev->stats.tx_packets == ULLONG_MAX) {
ndev->stats.tx_packets = 0;
} else {
ndev->stats.tx_packets++; ndev->stats.tx_packets++;
}
if ((txdone_pkt_cx->flags & OSI_TXDONE_CX_TS_DELAYED) == if ((txdone_pkt_cx->flags & OSI_TXDONE_CX_TS_DELAYED) ==
OSI_TXDONE_CX_TS_DELAYED) { OSI_TXDONE_CX_TS_DELAYED) {
add_skb_node(pdata, skb, txdone_pkt_cx->pktid, txdone_pkt_cx->vdmaid); add_skb_node(pdata, skb, txdone_pkt_cx->pktid, txdone_pkt_cx->vdmaid);
@@ -936,6 +957,9 @@ int osd_ivc_send_cmd(void *priv, ivc_msg_common_t *ivc_buf, unsigned int len)
return -1; return -1;
} }
ivc_buf->status = -1; ivc_buf->status = -1;
if (cnt == (int)INT_MAX) {
cnt = 0;
}
ivc_buf->count = cnt++; ivc_buf->count = cnt++;
raw_spin_lock_irqsave(&ictxt->ivck_lock, flags); raw_spin_lock_irqsave(&ictxt->ivck_lock, flags);

View File

@@ -159,9 +159,9 @@ static int ether_test_loopback_validate(struct sk_buff *skb,
#else #else
unsigned char *dst = tpdata->ctxt->dst; unsigned char *dst = tpdata->ctxt->dst;
#endif #endif
struct ether_testhdr *thdr; struct ether_testhdr thdr;
struct ethhdr *ehdr; struct ethhdr ehdr;
struct udphdr *uhdr; struct udphdr *uhdr, local_uhdr;
struct iphdr *ihdr; struct iphdr *ihdr;
skb = skb_unshare(skb, GFP_ATOMIC); skb = skb_unshare(skb, GFP_ATOMIC);
@@ -173,9 +173,9 @@ static int ether_test_loopback_validate(struct sk_buff *skb,
if (skb_headlen(skb) < (ETHER_TEST_PKT_SIZE - ETH_HLEN)) if (skb_headlen(skb) < (ETHER_TEST_PKT_SIZE - ETH_HLEN))
goto out; goto out;
ehdr = (struct ethhdr *)skb_mac_header(skb); (void)memcpy(&ehdr, (void *)skb_mac_header(skb), sizeof(struct ethhdr));
if (dst) { if (dst) {
if (!ether_addr_equal_unaligned(ehdr->h_dest, dst)) if (!ether_addr_equal_unaligned(ehdr.h_dest, dst))
goto out; goto out;
} }
@@ -183,13 +183,14 @@ static int ether_test_loopback_validate(struct sk_buff *skb,
if (ihdr->protocol != IPPROTO_UDP) if (ihdr->protocol != IPPROTO_UDP)
goto out; goto out;
uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); (void)memcpy(&local_uhdr, (void *)((u8 *)ihdr + (4 * ihdr->ihl)), sizeof(local_uhdr));
if (uhdr->dest != htons(ETHER_UDP_TEST_PORT)) if (local_uhdr.dest != htons(ETHER_UDP_TEST_PORT))
goto out; goto out;
thdr = (struct ether_testhdr *)((u8 *)uhdr + sizeof(*uhdr)); uhdr = (struct udphdr *)((u8 *)ihdr + (4 * ihdr->ihl));
(void)memcpy(&thdr, (void *)((char *)uhdr + sizeof(*uhdr)), sizeof(thdr));
if (thdr->magic != cpu_to_be64(ETHER_TEST_PKT_MAGIC)) if (thdr.magic != cpu_to_be64(ETHER_TEST_PKT_MAGIC))
goto out; goto out;
tpdata->completed = true; tpdata->completed = true;

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "ether_linux.h" #include "ether_linux.h"
#ifdef MACSEC_SUPPORT #ifdef MACSEC_SUPPORT
@@ -111,6 +111,12 @@ static inline int extract_mtu(const char* str)
// Iterate through the string to find the first digit // Iterate through the string to find the first digit
while (*str) { while (*str) {
if (((*str) >= '0') && ((*str) <= '9')) { if (((*str) >= '0') && ((*str) <= '9')) {
// Adding boundary check
if ((num > (INT_MAX / 10)) ||
((num == (INT_MAX / 10)) && ((*str - '0') > (INT_MAX % 10)))) {
num = 0;
break;
}
// Convert the digit characters to an integer // Convert the digit characters to an integer
num = num * 10 + (*str - '0'); num = num * 10 + (*str - '0');
} }
@@ -2974,19 +2980,26 @@ static ssize_t ether_mac_frp_show(struct device *dev,
struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_core_priv_data *osi_core = pdata->osi_core;
struct osi_core_frp_entry *entry = NULL; struct osi_core_frp_entry *entry = NULL;
struct osi_core_frp_data *data = NULL; struct osi_core_frp_data *data = NULL;
int i = 0, j = 0; int i = 0, j = 0, written = 0;
/* Write FRP table entries */ /* Write FRP table entries */
for (i = 0, j = 0; ((i < osi_core->frp_cnt) && (j < PAGE_SIZE)); i++) { for (i = 0, j = 0; ((i < osi_core->frp_cnt) && (j < PAGE_SIZE)); i++) {
entry = &osi_core->frp_table[i]; entry = &osi_core->frp_table[i];
data = &entry->data; data = &entry->data;
j += scnprintf((buf + j), (PAGE_SIZE - j), written = scnprintf((buf + j), (PAGE_SIZE - j),
"[%d] ID:%d MD:0x%x ME:0x%x AF:%d RF:%d IM:%d NIC:%d FO:%d OKI:%d DCH:x%llx\n", "[%d] ID:%d MD:0x%x ME:0x%x AF:%d "
"RF:%d IM:%d NIC:%d FO:%d OKI:%d DCH:x%llx\n",
i, entry->frp_id, data->match_data, i, entry->frp_id, data->match_data,
data->match_en, data->accept_frame, data->match_en, data->accept_frame,
data->reject_frame, data->inverse_match, data->reject_frame, data->inverse_match,
data->next_ins_ctrl, data->frame_offset, data->next_ins_ctrl, data->frame_offset,
data->ok_index, data->dma_chsel); data->ok_index, data->dma_chsel);
//Ensure `written` is non-negative and within bounds
if ((written < 0) || (((unsigned long)j + (unsigned long)written) >= PAGE_SIZE)) {
// Prevent overflow and exit loop
break;
}
j += written;
} }
return j; return j;