diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c index 3cc344d0..027e2dce 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_linux.c @@ -72,6 +72,7 @@ int ether_get_tx_ts(struct ether_priv_data *pdata) struct list_head *tstamp_head, *temp_tstamp_head; struct skb_shared_hwtstamps shhwtstamp; struct osi_ioctl ioctl_data = {}; + struct osi_core_tx_ts *tx_ts = (struct osi_core_tx_ts *)&ioctl_data.data.tx_ts; unsigned long long nsec = 0x0; struct ether_tx_ts_skb_list *pnode; struct ether_timestamp_skb_list *tnode; @@ -98,15 +99,15 @@ int ether_get_tx_ts(struct ether_priv_data *pdata) list_head); ioctl_data.cmd = OSI_CMD_GET_TX_TS; - ioctl_data.tx_ts.pkt_id = pnode->pktid; - ioctl_data.tx_ts.vdma_id = pnode->vdmaid; + tx_ts->pkt_id = pnode->pktid; + tx_ts->vdma_id = pnode->vdmaid; ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); if (ret == 0) { /* get time stamp form ethernet server */ dev_dbg(pdata->dev,"%s() pktid = %x, skb = %p\n, vdmaid=%x", __func__, pnode->pktid, pnode->skb, pnode->vdmaid); - if ((ioctl_data.tx_ts.nsec & OSI_MAC_TCR_TXTSSMIS) == + if ((tx_ts->nsec & OSI_MAC_TCR_TXTSSMIS) == OSI_MAC_TCR_TXTSSMIS) { dev_warn(pdata->dev, "No valid time for skb, removed\n"); @@ -117,8 +118,7 @@ int ether_get_tx_ts(struct ether_priv_data *pdata) goto update_skb; } - nsec = ioctl_data.tx_ts.sec * ETHER_ONESEC_NENOSEC + - ioctl_data.tx_ts.nsec; + nsec = tx_ts->sec * ETHER_ONESEC_NENOSEC + tx_ts->nsec; if (pnode->skb != NULL) { idx = ether_get_free_timestamp_node(pdata); @@ -2811,6 +2811,7 @@ static int ether_update_mac_addr_filter(struct ether_priv_data *pdata, struct osi_dma_priv_data *osi_dma = pdata->osi_dma; nveu32_t dma_channel = osi_dma->dma_chans[0]; unsigned char bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct osi_filter *l2_filter = (struct osi_filter *)&ioctl_data->data.l2_filter; unsigned int MAC_index[OSI_MAX_MAC_IP_TYPES] = { ETHER_MAC_ADDRESS_INDEX, ETHER_MAC_ADDRESS_INDEX, @@ -2829,38 +2830,37 @@ static int ether_update_mac_addr_filter(struct ether_priv_data *pdata, return -1; } - memset(&ioctl_data->l2_filter, 0x0, sizeof(struct osi_filter)); + memset(l2_filter, 0x0, sizeof(struct osi_filter)); /* Set MAC address with DCS set to route all legacy Rx * packets from RxQ0 to default DMA at index 0. */ - ioctl_data->l2_filter.oper_mode = (OSI_OPER_EN_PERFECT | - OSI_OPER_DIS_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_EN_PERFECT | + OSI_OPER_DIS_PROMISC | + OSI_OPER_DIS_ALLMULTI); if (en_dis == OSI_ENABLE) { - ioctl_data->l2_filter.oper_mode |= OSI_OPER_ADDR_UPDATE; + l2_filter->oper_mode |= OSI_OPER_ADDR_UPDATE; } else { - ioctl_data->l2_filter.oper_mode |= OSI_OPER_ADDR_DEL; + l2_filter->oper_mode |= OSI_OPER_ADDR_DEL; } if (uc_bc == ETHER_ADDRESS_MAC) { - ioctl_data->l2_filter.index = MAC_index[osi_core->mac]; - memcpy(ioctl_data->l2_filter.mac_addr, osi_core->mac_addr, - ETH_ALEN); + l2_filter->index = MAC_index[osi_core->mac]; + memcpy(l2_filter->mac_addr, osi_core->mac_addr, ETH_ALEN); } else { if (osi_dma->num_dma_chans > 1) { dma_channel = osi_dma->dma_chans[1]; } else { dma_channel = osi_dma->dma_chans[0]; } - ioctl_data->l2_filter.index = BC_index[osi_core->mac]; - memcpy(ioctl_data->l2_filter.mac_addr, bc_addr, ETH_ALEN); - ioctl_data->l2_filter.pkt_dup = OSI_ENABLE; - ioctl_data->l2_filter.dma_chansel = OSI_BIT_64(dma_channel); + l2_filter->index = BC_index[osi_core->mac]; + memcpy(l2_filter->mac_addr, bc_addr, ETH_ALEN); + l2_filter->pkt_dup = OSI_ENABLE; + l2_filter->dma_chansel = OSI_BIT_64(dma_channel); } - ioctl_data->l2_filter.dma_routing = OSI_ENABLE; - ioctl_data->l2_filter.dma_chan = dma_channel; - ioctl_data->l2_filter.addr_mask = OSI_AMASK_DISABLE; - ioctl_data->l2_filter.src_dest = OSI_DA_MATCH; + l2_filter->dma_routing = OSI_ENABLE; + l2_filter->dma_chan = dma_channel; + l2_filter->addr_mask = OSI_AMASK_DISABLE; + l2_filter->src_dest = OSI_DA_MATCH; ioctl_data->cmd = OSI_CMD_L2_FILTER; return osi_handle_ioctl(osi_core, ioctl_data); @@ -3028,6 +3028,49 @@ exit: return ret; } +#ifndef OSI_STRIPPED_LIB +/** + * @brief ether_init_rss - Init OSI RSS structure + * + * Algorithm: Populates RSS hash key and table in OSI core structure. + * + * @param[in] pdata: Ethernet private data + * @param[in] features: Netdev features + */ +static void ether_init_rss(struct ether_priv_data *pdata, + netdev_features_t features) +{ + struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_ioctl ioctl_data = {}; + struct osi_core_rss *rss = (struct osi_core_rss *)&ioctl_data.data.rss; + unsigned int num_q = osi_core->num_mtl_queues; + unsigned int i = 0; + + if ((features & NETIF_F_RXHASH) == NETIF_F_RXHASH) { + rss->enable = 1; + } else { + rss->enable = 0; + return; + } + + /* generate random key */ + netdev_rss_key_fill(rss->key, sizeof(rss->key)); + + /* In T26x mgbe default 8 VDMA channels enabled */ + if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) { + num_q = pdata->osi_dma->num_dma_chans; + } + /* initialize hash table */ + for (i = 0; i < OSI_RSS_MAX_TABLE_SIZE; i++) + rss->table[i] = ethtool_rxfh_indir_default(i, num_q); + + ioctl_data.cmd = OSI_CMD_CONFIG_RSS; + if (osi_handle_ioctl(osi_core, &ioctl_data)) { + pr_err("Failed to configure RSS\n"); + } +} +#endif /* !OSI_STRIPPED_LIB */ + int ether_open(struct net_device *dev) { struct ether_priv_data *pdata = netdev_priv(dev); @@ -3109,6 +3152,11 @@ int ether_open(struct net_device *dev) goto err_hw_init; } +#ifndef OSI_STRIPPED_LIB + /* RSS init */ + ether_init_rss(pdata, pdata->ndev->features); +#endif /* !OSI_STRIPPED_LIB */ + ret = ether_update_mac_addr_filter(pdata, &ioctl_data, OSI_ENABLE, ETHER_ADDRESS_MAC); if (ret < 0) { @@ -3311,16 +3359,17 @@ static inline void ether_delete_l2_filter(struct ether_priv_data *pdata) { struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_ioctl ioctl_data = {}; + struct osi_filter *l2_filter = (struct osi_filter *)&ioctl_data.data.l2_filter; int ret, i; - memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); + memset(l2_filter, 0x0, sizeof(struct osi_filter)); ret = ether_update_mac_addr_filter(pdata, &ioctl_data, OSI_DISABLE, ETHER_ADDRESS_MAC); if (ret < 0) { dev_err(pdata->dev, "issue in deleting MAC address\n"); } - memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); + memset(l2_filter, 0x0, sizeof(struct osi_filter)); ret = ether_update_mac_addr_filter(pdata, &ioctl_data, OSI_DISABLE, ETHER_ADDRESS_BC); if (ret < 0) { @@ -3331,15 +3380,15 @@ static inline void ether_delete_l2_filter(struct ether_priv_data *pdata) for (i = ETHER_MAC_ADDRESS_INDEX + 1; i < pdata->last_filter_index; i++) { /* Reset the filter structure to avoid any old value */ - memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); - ioctl_data.l2_filter.oper_mode = OSI_OPER_ADDR_DEL; - ioctl_data.l2_filter.index = i; - ioctl_data.l2_filter.dma_routing = OSI_ENABLE; - memcpy(ioctl_data.l2_filter.mac_addr, + memset(l2_filter, 0x0, sizeof(struct osi_filter)); + l2_filter->oper_mode = OSI_OPER_ADDR_DEL; + l2_filter->index = i; + l2_filter->dma_routing = OSI_ENABLE; + memcpy(l2_filter->mac_addr, pdata->mac_addr[i].addr, ETH_ALEN); - ioctl_data.l2_filter.dma_chan = pdata->mac_addr[i].dma_chan; - ioctl_data.l2_filter.addr_mask = OSI_AMASK_DISABLE; - ioctl_data.l2_filter.src_dest = OSI_DA_MATCH; + l2_filter->dma_chan = pdata->mac_addr[i].dma_chan; + l2_filter->addr_mask = OSI_AMASK_DISABLE; + l2_filter->src_dest = OSI_DA_MATCH; ioctl_data.cmd = OSI_CMD_L2_FILTER; ret = osi_handle_ioctl(osi_core, &ioctl_data); @@ -3957,6 +4006,7 @@ static int ether_prepare_mc_list(struct net_device *dev, struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_dma_priv_data *osi_dma = pdata->osi_dma; + struct osi_filter *l2_filter = (struct osi_filter *)&ioctl_data->data.l2_filter; struct netdev_hw_addr *ha; unsigned int i = *mac_addr_idx; int ret = -1; @@ -3966,15 +4016,15 @@ static int ether_prepare_mc_list(struct net_device *dev, return ret; } - memset(&ioctl_data->l2_filter, 0x0, sizeof(struct osi_filter)); + memset(l2_filter, 0x0, sizeof(struct osi_filter)); #ifndef OSI_STRIPPED_LIB if (pdata->l2_filtering_mode == OSI_HASH_FILTER_MODE) { dev_err(pdata->dev, "HASH FILTERING for mc addresses not Supported in SW\n"); - ioctl_data->l2_filter.oper_mode = (OSI_OPER_EN_PERFECT | - OSI_OPER_DIS_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_EN_PERFECT | + OSI_OPER_DIS_PROMISC | + OSI_OPER_DIS_ALLMULTI); ioctl_data->cmd = OSI_CMD_L2_FILTER; return osi_handle_ioctl(osi_core, ioctl_data); /* address 0 is used for DUT DA so compare with @@ -3984,9 +4034,9 @@ static int ether_prepare_mc_list(struct net_device *dev, #endif /* !OSI_STRIPPED_LIB */ if (netdev_mc_count(dev) > (pdata->num_mac_addr_regs - 1)) { /* switch to PROMISCUOUS mode */ - ioctl_data->l2_filter.oper_mode = (OSI_OPER_DIS_PERFECT | - OSI_OPER_EN_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_DIS_PERFECT | + OSI_OPER_EN_PROMISC | + OSI_OPER_DIS_ALLMULTI); dev_dbg(pdata->dev, "enabling Promiscuous mode\n"); ioctl_data->cmd = OSI_CMD_L2_FILTER; @@ -3996,29 +4046,27 @@ static int ether_prepare_mc_list(struct net_device *dev, "select PERFECT FILTERING for mc addresses, mc_count = %d, num_mac_addr_regs = %d\n", netdev_mc_count(dev), pdata->num_mac_addr_regs); - ioctl_data->l2_filter.oper_mode = (OSI_OPER_EN_PERFECT | - OSI_OPER_ADDR_UPDATE | - OSI_OPER_DIS_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_EN_PERFECT | + OSI_OPER_ADDR_UPDATE | + OSI_OPER_DIS_PROMISC | + OSI_OPER_DIS_ALLMULTI); netdev_for_each_mc_addr(ha, dev) { dev_dbg(pdata->dev, "mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n", i, ha->addr[0], ha->addr[1], ha->addr[2], ha->addr[3], ha->addr[4], ha->addr[5]); - ioctl_data->l2_filter.index = i; - memcpy(ioctl_data->l2_filter.mac_addr, ha->addr, + l2_filter->index = i; + memcpy(l2_filter->mac_addr, ha->addr, ETH_ALEN); - ioctl_data->l2_filter.dma_routing = OSI_ENABLE; + l2_filter->dma_routing = OSI_ENABLE; if (osi_dma->num_dma_chans > 1) { - ioctl_data->l2_filter.dma_chan = - osi_dma->dma_chans[1]; + l2_filter->dma_chan = osi_dma->dma_chans[1]; } else { - ioctl_data->l2_filter.dma_chan = - osi_dma->dma_chans[0]; + l2_filter->dma_chan = osi_dma->dma_chans[0]; } - ioctl_data->l2_filter.addr_mask = OSI_AMASK_DISABLE; - ioctl_data->l2_filter.src_dest = OSI_DA_MATCH; - ioctl_data->l2_filter.pkt_dup = OSI_ENABLE; + l2_filter->addr_mask = OSI_AMASK_DISABLE; + l2_filter->src_dest = OSI_DA_MATCH; + l2_filter->pkt_dup = OSI_ENABLE; ioctl_data->cmd = OSI_CMD_L2_FILTER; ret = osi_handle_ioctl(pdata->osi_core, ioctl_data); if (ret < 0) { @@ -4028,7 +4076,7 @@ static int ether_prepare_mc_list(struct net_device *dev, } memcpy(pdata->mac_addr[i].addr, ha->addr, ETH_ALEN); - pdata->mac_addr[i].dma_chan = ioctl_data->l2_filter.dma_chan; + pdata->mac_addr[i].dma_chan = l2_filter->dma_chan; if (i == EQOS_MAX_MAC_ADDRESS_FILTER - 1) { dev_err(pdata->dev, "Configured max number of supported MAC, ignoring it\n"); @@ -4063,6 +4111,7 @@ static int ether_prepare_uc_list(struct net_device *dev, struct ether_priv_data *pdata = netdev_priv(dev); struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_dma_priv_data *osi_dma = pdata->osi_dma; + struct osi_filter *l2_filter = (struct osi_filter *)&ioctl_data->data.l2_filter; /* last valid MC/MAC DA + 1 should be start of UC addresses */ unsigned int i = *mac_addr_idx; struct netdev_hw_addr *ha; @@ -4073,16 +4122,16 @@ static int ether_prepare_uc_list(struct net_device *dev, goto exit_func; } - memset(&ioctl_data->l2_filter, 0x0, sizeof(struct osi_filter)); + memset(l2_filter, 0x0, sizeof(struct osi_filter)); #ifndef OSI_STRIPPED_LIB if (pdata->l2_filtering_mode == OSI_HASH_FILTER_MODE) { dev_err(pdata->dev, "HASH FILTERING for uc addresses not Supported in SW\n"); /* Perfect filtering for multicast */ - ioctl_data->l2_filter.oper_mode = (OSI_OPER_EN_PERFECT | - OSI_OPER_DIS_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_EN_PERFECT | + OSI_OPER_DIS_PROMISC | + OSI_OPER_DIS_ALLMULTI); ioctl_data->cmd = OSI_CMD_L2_FILTER; return osi_handle_ioctl(osi_core, ioctl_data); } @@ -4094,9 +4143,9 @@ static int ether_prepare_uc_list(struct net_device *dev, } if (netdev_uc_count(dev) > (pdata->num_mac_addr_regs - i)) { /* switch to PROMISCUOUS mode */ - ioctl_data->l2_filter.oper_mode = (OSI_OPER_DIS_PERFECT | - OSI_OPER_EN_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_DIS_PERFECT | + OSI_OPER_EN_PROMISC | + OSI_OPER_DIS_ALLMULTI); dev_dbg(pdata->dev, "enabling Promiscuous mode\n"); ioctl_data->cmd = OSI_CMD_L2_FILTER; return osi_handle_ioctl(osi_core, ioctl_data); @@ -4105,28 +4154,25 @@ static int ether_prepare_uc_list(struct net_device *dev, "select PERFECT FILTERING for uc addresses: uc_count = %d\n", netdev_uc_count(dev)); - ioctl_data->l2_filter.oper_mode = (OSI_OPER_EN_PERFECT | - OSI_OPER_ADDR_UPDATE | - OSI_OPER_DIS_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_EN_PERFECT | + OSI_OPER_ADDR_UPDATE | + OSI_OPER_DIS_PROMISC | + OSI_OPER_DIS_ALLMULTI); netdev_for_each_uc_addr(ha, dev) { dev_dbg(pdata->dev, "uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n", i, ha->addr[0], ha->addr[1], ha->addr[2], ha->addr[3], ha->addr[4], ha->addr[5]); - ioctl_data->l2_filter.index = i; - memcpy(ioctl_data->l2_filter.mac_addr, ha->addr, - ETH_ALEN); - ioctl_data->l2_filter.dma_routing = OSI_ENABLE; + l2_filter->index = i; + memcpy(l2_filter->mac_addr, ha->addr, ETH_ALEN); + l2_filter->dma_routing = OSI_ENABLE; if (osi_dma->num_dma_chans > 1) { - ioctl_data->l2_filter.dma_chan = - osi_dma->dma_chans[1]; + l2_filter->dma_chan = osi_dma->dma_chans[1]; } else { - ioctl_data->l2_filter.dma_chan = - osi_dma->dma_chans[0]; + l2_filter->dma_chan = osi_dma->dma_chans[0]; } - ioctl_data->l2_filter.addr_mask = OSI_AMASK_DISABLE; - ioctl_data->l2_filter.src_dest = OSI_DA_MATCH; + l2_filter->addr_mask = OSI_AMASK_DISABLE; + l2_filter->src_dest = OSI_DA_MATCH; ioctl_data->cmd = OSI_CMD_L2_FILTER; ret = osi_handle_ioctl(pdata->osi_core, ioctl_data); @@ -4138,7 +4184,7 @@ static int ether_prepare_uc_list(struct net_device *dev, } memcpy(pdata->mac_addr[i].addr, ha->addr, ETH_ALEN); - pdata->mac_addr[i].dma_chan = ioctl_data->l2_filter.dma_chan; + pdata->mac_addr[i].dma_chan = l2_filter->dma_chan; if (i == EQOS_MAX_MAC_ADDRESS_FILTER - 1) { dev_err(pdata->dev, "Already MAX MAC added\n"); @@ -4169,16 +4215,16 @@ void ether_set_rx_mode(struct net_device *dev) struct osi_core_priv_data *osi_core = pdata->osi_core; /* store last call last_uc_filter_index in temporary variable */ struct osi_ioctl ioctl_data = {}; + struct osi_filter *l2_filter = (struct osi_filter *)&ioctl_data.data.l2_filter; unsigned int mac_addr_idx = ETHER_MAC_ADDRESS_INDEX + 1U, i; int ret = -1; - memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); + memset(l2_filter, 0x0, sizeof(struct osi_filter)); if ((dev->flags & IFF_PROMISC) == IFF_PROMISC) { if (pdata->promisc_mode == OSI_ENABLE) { - ioctl_data.l2_filter.oper_mode = - (OSI_OPER_DIS_PERFECT | - OSI_OPER_EN_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_DIS_PERFECT | + OSI_OPER_EN_PROMISC | + OSI_OPER_DIS_ALLMULTI); dev_dbg(pdata->dev, "enabling Promiscuous mode\n"); ioctl_data.cmd = OSI_CMD_L2_FILTER; ret = osi_handle_ioctl(osi_core, &ioctl_data); @@ -4192,9 +4238,9 @@ void ether_set_rx_mode(struct net_device *dev) } return; } else if ((dev->flags & IFF_ALLMULTI) == IFF_ALLMULTI) { - ioctl_data.l2_filter.oper_mode = (OSI_OPER_EN_ALLMULTI | - OSI_OPER_DIS_PERFECT | - OSI_OPER_DIS_PROMISC); + l2_filter->oper_mode = (OSI_OPER_EN_ALLMULTI | + OSI_OPER_DIS_PERFECT | + OSI_OPER_DIS_PROMISC); dev_dbg(pdata->dev, "pass all multicast pkt\n"); ioctl_data.cmd = OSI_CMD_L2_FILTER; ret = osi_handle_ioctl(osi_core, &ioctl_data); @@ -4220,15 +4266,14 @@ void ether_set_rx_mode(struct net_device *dev) if (pdata->last_filter_index > mac_addr_idx) { for (i = mac_addr_idx; i < pdata->last_filter_index; i++) { /* Reset the filter structure to avoid any old value */ - memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); - ioctl_data.l2_filter.oper_mode = OSI_OPER_ADDR_DEL; - ioctl_data.l2_filter.index = i; - ioctl_data.l2_filter.dma_routing = OSI_ENABLE; - memcpy(ioctl_data.l2_filter.mac_addr, - pdata->mac_addr[i].addr, ETH_ALEN); - ioctl_data.l2_filter.dma_chan = pdata->mac_addr[i].dma_chan; - ioctl_data.l2_filter.addr_mask = OSI_AMASK_DISABLE; - ioctl_data.l2_filter.src_dest = OSI_DA_MATCH; + memset(l2_filter, 0x0, sizeof(struct osi_filter)); + l2_filter->oper_mode = OSI_OPER_ADDR_DEL; + l2_filter->index = i; + l2_filter->dma_routing = OSI_ENABLE; + memcpy(l2_filter->mac_addr, pdata->mac_addr[i].addr, ETH_ALEN); + l2_filter->dma_chan = pdata->mac_addr[i].dma_chan; + l2_filter->addr_mask = OSI_AMASK_DISABLE; + l2_filter->src_dest = OSI_DA_MATCH; ioctl_data.cmd = OSI_CMD_L2_FILTER; ret = osi_handle_ioctl(osi_core, &ioctl_data); @@ -4244,10 +4289,10 @@ void ether_set_rx_mode(struct net_device *dev) /* Set default MAC configuration because if this path is called * only when flag for promiscuous or all_multi is not set. */ - memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); - ioctl_data.l2_filter.oper_mode = (OSI_OPER_EN_PERFECT | - OSI_OPER_DIS_PROMISC | - OSI_OPER_DIS_ALLMULTI); + memset(l2_filter, 0x0, sizeof(struct osi_filter)); + l2_filter->oper_mode = (OSI_OPER_EN_PERFECT | + OSI_OPER_DIS_PROMISC | + OSI_OPER_DIS_ALLMULTI); ioctl_data.cmd = OSI_CMD_L2_FILTER; ret = osi_handle_ioctl(osi_core, &ioctl_data); @@ -7295,42 +7340,6 @@ static void init_filter_values(struct ether_priv_data *pdata) } } -#ifndef OSI_STRIPPED_LIB -/** - * @brief ether_init_rss - Init OSI RSS structure - * - * Algorithm: Populates RSS hash key and table in OSI core structure. - * - * @param[in] pdata: Ethernet private data - * @param[in] features: Netdev features - */ -static void ether_init_rss(struct ether_priv_data *pdata, - netdev_features_t features) -{ - struct osi_core_priv_data *osi_core = pdata->osi_core; - unsigned int num_q = osi_core->num_mtl_queues; - unsigned int i = 0; - - if ((features & NETIF_F_RXHASH) == NETIF_F_RXHASH) { - osi_core->rss.enable = 1; - } else { - osi_core->rss.enable = 0; - return; - } - - /* generate random key */ - netdev_rss_key_fill(osi_core->rss.key, sizeof(osi_core->rss.key)); - - /* In T26x mgbe default 8 VDMA channels enabled */ - if (osi_core->mac == OSI_MAC_HW_MGBE_T26X) { - num_q = pdata->osi_dma->num_dma_chans; - } - /* initialize hash table */ - for (i = 0; i < OSI_RSS_MAX_TABLE_SIZE; i++) - osi_core->rss.table[i] = ethtool_rxfh_indir_default(i, num_q); -} -#endif /* !OSI_STRIPPED_LIB */ - int ether_probe(struct platform_device *pdev) { struct ether_priv_data *pdata; @@ -7455,7 +7464,7 @@ int ether_probe(struct platform_device *pdev) goto err_dma_mask; } osi_core->mac_ver = ioctl_data->arg1_u32; - memcpy(&pdata->hw_feat, &ioctl_data->hw_feat, + memcpy(&pdata->hw_feat, &ioctl_data->data.hw_feat, sizeof(struct osi_hw_features)); ret = ether_get_mac_address(pdata); @@ -7485,11 +7494,6 @@ int ether_probe(struct platform_device *pdev) /* Set netdev features based on hw features */ ether_set_ndev_features(ndev, pdata); -#ifndef OSI_STRIPPED_LIB - /* RSS init */ - ether_init_rss(pdata, ndev->features); -#endif /* !OSI_STRIPPED_LIB */ - ret = ether_get_irqs(pdev, pdata, num_dma_chans); if (ret < 0) { dev_err(&pdev->dev, "failed to get IRQ's\n"); diff --git a/drivers/net/ethernet/nvidia/nvethernet/ether_tc.c b/drivers/net/ethernet/nvidia/nvethernet/ether_tc.c index 6dc41089..1cb7c52c 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ether_tc.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ether_tc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved */ +/* Copyright (c) 2019-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved */ #include #include "ether_linux.h" @@ -10,6 +10,8 @@ int ether_tc_setup_taprio(struct ether_priv_data *pdata, struct osi_core_priv_data *osi_core = pdata->osi_core; unsigned int fpe_required = OSI_DISABLE; struct osi_ioctl tc_ioctl_data = {}; + struct osi_est_config *est = (struct osi_est_config *)&tc_ioctl_data.data.est; + struct osi_fpe_config *fpe = (struct osi_fpe_config *)&tc_ioctl_data.data.fpe; unsigned long cycle_time = 0x0U; /* Hardcode width base on current HW config, input parameter validation * will be done by OSI code any way @@ -53,8 +55,8 @@ int ether_tc_setup_taprio(struct ether_priv_data *pdata, goto done; } - memset(&tc_ioctl_data.est, 0x0, sizeof(struct osi_est_config)); - memset(&tc_ioctl_data.fpe, 0x0, sizeof(struct osi_fpe_config)); + memset(est, 0x0, sizeof(struct osi_est_config)); + memset(fpe, 0x0, sizeof(struct osi_fpe_config)); /* This code is to disable TSN, User space is asking to disable */ @@ -66,23 +68,23 @@ int ether_tc_setup_taprio(struct ether_priv_data *pdata, goto disable; } - tc_ioctl_data.est.llr = qopt->num_entries; + est->llr = qopt->num_entries; #if defined(NV_TC_TAPRIO_QOPT_OFFLOAD_STRUCT_HAS_CMD) /* Linux v6.4.5 */ switch (qopt->cmd) { case TAPRIO_CMD_REPLACE: - tc_ioctl_data.est.en_dis = true; + est->en_dis = true; break; case TAPRIO_CMD_DESTROY: - tc_ioctl_data.est.en_dis = false; + est->en_dis = false; break; default: return -EOPNOTSUPP; } #else - tc_ioctl_data.est.en_dis = qopt->enable; + est->en_dis = qopt->enable; #endif //NV_TC_TAPRIO_QOPT_OFFLOAD_STRUCT_HAS_CMD - for (i = 0U; i < tc_ioctl_data.est.llr; i++) { + for (i = 0U; i < est->llr; i++) { cycle_time = qopt->entries[i].interval; gates = qopt->entries[i].gate_mask; @@ -109,8 +111,8 @@ int ether_tc_setup_taprio(struct ether_priv_data *pdata, goto done; } - tc_ioctl_data.est.gcl[i] = cycle_time | (gates << wid); - if (tc_ioctl_data.est.gcl[i] > wid_val) { + est->gcl[i] = cycle_time | (gates << wid); + if (est->gcl[i] > wid_val) { netdev_err(pdata->ndev, "invalid GCL creation\n"); ret = -EINVAL; goto done; @@ -121,14 +123,14 @@ int ether_tc_setup_taprio(struct ether_priv_data *pdata, * some offset to avoid BTRE */ time = ktime_to_timespec64(qopt->base_time); - tc_ioctl_data.est.btr[0] = (unsigned int)time.tv_nsec; - tc_ioctl_data.est.btr[1] = (unsigned int)time.tv_sec; - tc_ioctl_data.est.btr_offset[0] = 0; - tc_ioctl_data.est.btr_offset[1] = 0; + est->btr[0] = (unsigned int)time.tv_nsec; + est->btr[1] = (unsigned int)time.tv_sec; + est->btr_offset[0] = 0; + est->btr_offset[1] = 0; ctr = qopt->cycle_time; - tc_ioctl_data.est.ctr[0] = do_div(ctr, NSEC_PER_SEC); - tc_ioctl_data.est.ctr[1] = (unsigned int)ctr; + est->ctr[0] = do_div(ctr, NSEC_PER_SEC); + est->ctr[1] = (unsigned int)ctr; if ((!pdata->hw_feat.fpe_sel) && (fpe_required == OSI_ENABLE)) { netdev_err(pdata->ndev, "FPE not supported in HW\n"); @@ -137,8 +139,8 @@ int ether_tc_setup_taprio(struct ether_priv_data *pdata, } if (fpe_required == OSI_ENABLE) { - tc_ioctl_data.fpe.rq = osi_core->residual_queue; - tc_ioctl_data.fpe.tx_queue_preemption_enable = 0x1; + fpe->rq = osi_core->residual_queue; + fpe->tx_queue_preemption_enable = 0x1; tc_ioctl_data.cmd = OSI_CMD_CONFIG_FPE; ret = osi_handle_ioctl(osi_core, &tc_ioctl_data); if (ret < 0) { @@ -161,11 +163,11 @@ int ether_tc_setup_taprio(struct ether_priv_data *pdata, return 0; disable: - tc_ioctl_data.est.en_dis = false; + est->en_dis = false; tc_ioctl_data.cmd = OSI_CMD_CONFIG_EST; ret = osi_handle_ioctl(osi_core, &tc_ioctl_data); if ((ret >= 0) && (fpe_required == OSI_ENABLE)) { - tc_ioctl_data.fpe.tx_queue_preemption_enable = 0x0; + fpe->tx_queue_preemption_enable = 0x0; tc_ioctl_data.cmd = OSI_CMD_CONFIG_FPE; ret = osi_handle_ioctl(osi_core, &tc_ioctl_data); } @@ -180,6 +182,7 @@ int ether_tc_setup_cbs(struct ether_priv_data *pdata, struct osi_core_priv_data *osi_core = pdata->osi_core; struct phy_device *phydev = pdata->phydev; struct osi_ioctl ioctl_data = {}; + struct osi_core_avb_algorithm *avb = (struct osi_core_avb_algorithm *)&ioctl_data.data.avb; int queue = qopt->queue; unsigned int multiplier, speed_div; unsigned long value; @@ -223,34 +226,34 @@ int ether_tc_setup_cbs(struct ether_priv_data *pdata, return -EINVAL; } - ioctl_data.avb.qindex = (unsigned int)queue; - ioctl_data.avb.tcindex = (unsigned int)queue; + avb->qindex = (unsigned int)queue; + avb->tcindex = (unsigned int)queue; if (qopt->enable) { - ioctl_data.avb.algo = OSI_MTL_TXQ_AVALG_CBS; - ioctl_data.avb.oper_mode = OSI_MTL_QUEUE_AVB; - ioctl_data.avb.credit_control = OSI_ENABLE; + avb->algo = OSI_MTL_TXQ_AVALG_CBS; + avb->oper_mode = OSI_MTL_QUEUE_AVB; + avb->credit_control = OSI_ENABLE; } else { /* For EQOS harware library code use internally SP(0) and For MGBE harware library code use internally ETS(2) if algo != CBS. */ - ioctl_data.avb.algo = OSI_MTL_TXQ_AVALG_SP; - ioctl_data.avb.oper_mode = OSI_MTL_QUEUE_ENABLE; - ioctl_data.avb.credit_control = OSI_DISABLE; + avb->algo = OSI_MTL_TXQ_AVALG_SP; + avb->oper_mode = OSI_MTL_QUEUE_ENABLE; + avb->credit_control = OSI_DISABLE; } /* Final adjustments for HW */ value = div_s64(qopt->idleslope * 1024ll * multiplier, speed_div); - ioctl_data.avb.idle_slope = (unsigned long)value; + avb->idle_slope = (unsigned long)value; value = div_s64(-qopt->sendslope * 1024ll * multiplier, speed_div); - ioctl_data.avb.send_slope = (unsigned long)value; + avb->send_slope = (unsigned long)value; value = qopt->hicredit * 1024ll * 8; - ioctl_data.avb.hi_credit = (unsigned long)value; + avb->hi_credit = (unsigned long)value; value = qopt->locredit * 1024ll * 8; - ioctl_data.avb.low_credit = (unsigned long)value; + avb->low_credit = (unsigned long)value; ioctl_data.cmd = OSI_CMD_SET_AVB; diff --git a/drivers/net/ethernet/nvidia/nvethernet/ethtool.c b/drivers/net/ethernet/nvidia/nvethernet/ethtool.c index cf0f07e5..78b6d0ba 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ethtool.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ethtool.c @@ -1941,18 +1941,16 @@ int ether_get_rxnfc(struct net_device *ndev, u32 ether_get_rxfh_key_size(struct net_device *ndev) { - struct ether_priv_data *pdata = netdev_priv(ndev); - struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_ioctl ioctl_data = {}; - return sizeof(osi_core->rss.key); + return sizeof(ioctl_data.data.rss.key); } u32 ether_get_rxfh_indir_size(struct net_device *ndev) { - struct ether_priv_data *pdata = netdev_priv(ndev); - struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_ioctl ioctl_data = {}; - return ARRAY_SIZE(osi_core->rss.table); + return ARRAY_SIZE(ioctl_data.data.rss.table); } /** @@ -1976,28 +1974,40 @@ static int ether_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, { struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; + struct osi_ioctl ioctl_data = {}; + struct osi_core_rss *rss = (struct osi_core_rss *)&ioctl_data.data.rss; #if defined(NV_ETHTOOL_OPS_GET_SET_RXFH_HAS_RXFH_PARAM_ARGS) u32 *indir = rxfh->indir; u8 *hfunc = &rxfh->hfunc; u8 *key = rxfh->key; #endif int i; + int ret = 0; - if (indir) { - for (i = 0; i < ARRAY_SIZE(osi_core->rss.table); i++) - indir[i] = osi_core->rss.table[i]; + ioctl_data.cmd = OSI_CMD_GET_RSS; + ret = osi_handle_ioctl(osi_core, &ioctl_data); + if (ret != 0) { + dev_err(pdata->dev, "Failed to get RSS info from registers\n"); + return ret; + } + + if (indir) { + for (i = 0; i < ARRAY_SIZE(rss->table); i++) + indir[i] = rss->table[i]; + } + + if (key) { + memcpy(key, rss->key, sizeof(rss->key)); } - if (key) - memcpy(key, osi_core->rss.key, sizeof(osi_core->rss.key)); if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - return 0; + return ret; } /** - * @brief Set the contents of the RX flow hash indirection table, hash key + * @b rief Set the contents of the RX flow hash indirection table, hash key * and/or hash function * * param[in] ndev: Pointer to net device structure. @@ -2018,14 +2028,15 @@ static int ether_set_rxfh(struct net_device *ndev, const u32 *indir, #endif { struct ether_priv_data *pdata = netdev_priv(ndev); - struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_ioctl ioctl_data = {}; + struct osi_core_rss *rss = (struct osi_core_rss *)&ioctl_data.data.rss; #if defined(NV_ETHTOOL_OPS_GET_SET_RXFH_HAS_RXFH_PARAM_ARGS) u32 *indir = rxfh->indir; u8 hfunc = rxfh->hfunc; u8 *key = rxfh->key; #endif int i; + int ret = 0; if (!netif_running(ndev)) { netdev_err(pdata->ndev, "interface must be up\n"); @@ -2035,17 +2046,27 @@ static int ether_set_rxfh(struct net_device *ndev, const u32 *indir, if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) return -EOPNOTSUPP; - if (indir) { - for (i = 0; i < ARRAY_SIZE(osi_core->rss.table); i++) - osi_core->rss.table[i] = indir[i]; + /* First get current RSS configuration and update what ever required */ + ioctl_data.cmd = OSI_CMD_GET_RSS; + ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); + if (ret != 0) { + dev_err(pdata->dev, "Failed to get current RSS configuration\n"); + return ret; } - if (key) - memcpy(osi_core->rss.key, key, sizeof(osi_core->rss.key)); + if (indir) { + for (i = 0; i < ARRAY_SIZE(rss->table); i++) + rss->table[i] = indir[i]; + } + + if (key) { + memcpy(rss->key, key, sizeof(rss->key)); + } + /* RSS need to be enabled for applying the settings */ + rss->enable = 1; ioctl_data.cmd = OSI_CMD_CONFIG_RSS; return osi_handle_ioctl(pdata->osi_core, &ioctl_data); - } #if defined(NV_ETHTOOL_OPS_GET_SET_RINGPARAM_HAS_RINGPARAM_AND_EXTACT_ARGS) /* Linux v5.17 */ diff --git a/drivers/net/ethernet/nvidia/nvethernet/ioctl.c b/drivers/net/ethernet/nvidia/nvethernet/ioctl.c index 4ba3fab5..a4cef008 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ioctl.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ioctl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved */ +/* Copyright (c) 2019-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved */ #include "ether_linux.h" @@ -26,6 +26,7 @@ static int ether_set_avb_algo(struct net_device *ndev, struct osi_tx_ring *tx_ring = NULL; #endif /* !OSI_STRIPPED_LIB */ struct osi_ioctl ioctl_data = {}; + struct osi_core_avb_algorithm *avb = (struct osi_core_avb_algorithm *)&ioctl_data.data.avb; int ret = -1; if (ifdata->ptr == NULL) { @@ -34,7 +35,7 @@ static int ether_set_avb_algo(struct net_device *ndev, return ret; } - if (copy_from_user(&ioctl_data.avb, + if (copy_from_user(avb, (void __user *)ifdata->ptr, sizeof(struct osi_core_avb_algorithm)) != 0U) { dev_err(pdata->dev, @@ -42,19 +43,19 @@ static int ether_set_avb_algo(struct net_device *ndev, return ret; } - if (ioctl_data.avb.qindex >= OSI_MGBE_MAX_NUM_QUEUES) { + if (avb->qindex >= OSI_MGBE_MAX_NUM_QUEUES) { dev_err(pdata->dev, "Invalid queue index from user\n"); return -EINVAL; } #ifndef OSI_STRIPPED_LIB /* Check AVB mode disable on slot function enable */ - tx_ring = osi_dma->tx_ring[ioctl_data.avb.qindex]; + tx_ring = osi_dma->tx_ring[avb->qindex]; if (tx_ring && tx_ring->slot_check == OSI_ENABLE && - ioctl_data.avb.oper_mode == OSI_MTL_QUEUE_ENABLE) { + avb->oper_mode == OSI_MTL_QUEUE_ENABLE) { dev_err(pdata->dev, "Can't disable queue:%d AVB mode when slot is enabled", - ioctl_data.avb.qindex); + avb->qindex); return -EINVAL; } #endif /* !OSI_STRIPPED_LIB */ @@ -130,7 +131,7 @@ static int ether_get_tsc_ptp_cap(struct net_device *ndev, return -EINVAL; } - if (copy_from_user(&ioctl_data.ptp_tsc, + if (copy_from_user(&ioctl_data.data.ptp_tsc, (void __user *)ifdata->ptr, sizeof(struct osi_core_ptp_tsc_data)) != 0U) { dev_err(pdata->dev, @@ -145,7 +146,7 @@ static int ether_get_tsc_ptp_cap(struct net_device *ndev, "Failed to get TSC Struct info from registers\n"); return ret; } - if (copy_to_user(ifdata->ptr, &ioctl_data.ptp_tsc, + if (copy_to_user(ifdata->ptr, &ioctl_data.data.ptp_tsc, sizeof(struct osi_core_ptp_tsc_data)) != 0U) { dev_err(pdata->dev, "%s: copy_to_user failed\n", __func__); return -EFAULT; @@ -182,7 +183,7 @@ static int ether_get_avb_algo(struct net_device *ndev, return -EINVAL; } - if (copy_from_user(&ioctl_data.avb, + if (copy_from_user(&ioctl_data.data.avb, (void __user *)ifdata->ptr, sizeof(struct osi_core_avb_algorithm)) != 0U) { dev_err(pdata->dev, @@ -197,7 +198,7 @@ static int ether_get_avb_algo(struct net_device *ndev, "Failed to get AVB Struct info from registers\n"); return ret; } - if (copy_to_user(ifdata->ptr, &ioctl_data.avb, + if (copy_to_user(ifdata->ptr, &ioctl_data.data.avb, sizeof(struct osi_core_avb_algorithm)) != 0U) { dev_err(pdata->dev, "%s: copy_to_user failed\n", __func__); return -EFAULT; @@ -226,6 +227,7 @@ static int ether_config_ptp_offload(struct ether_priv_data *pdata, unsigned int snap_type = 0x0; unsigned int master = 0x0; struct osi_ioctl ioctl_data = {}; + struct osi_pto_config *pto = (struct osi_pto_config *)&ioctl_data.data.pto_config; struct timespec64 now; if (!ifrd_p->ptr) { @@ -277,13 +279,13 @@ static int ether_config_ptp_offload(struct ether_priv_data *pdata, master = OSI_DISABLE; } - ioctl_data.pto_config.en_dis = param.en_dis; - ioctl_data.pto_config.snap_type = snap_type; - ioctl_data.pto_config.master = master; - ioctl_data.pto_config.domain_num = param.domain_num; - ioctl_data.pto_config.mc_uc = param.mc_uc; + pto->en_dis = param.en_dis; + pto->snap_type = snap_type; + pto->master = master; + pto->domain_num = param.domain_num; + pto->mc_uc = param.mc_uc; /* PTP port ID hard code to port 1 for POC */ - ioctl_data.pto_config.portid = 0x1U; + pto->portid = 0x1U; ioctl_data.cmd = OSI_CMD_CONFIG_PTP_OFFLOAD; ret = osi_handle_ioctl(pdata->osi_core, &ioctl_data); if (ret < 0) { @@ -468,7 +470,7 @@ static int ether_config_frp_cmd(struct net_device *dev, return ret; } - if (copy_from_user(&ioctl_data.frp_cmd, + if (copy_from_user(&ioctl_data.data.frp_cmd, (void __user *)ifdata->ptr, sizeof(struct osi_core_frp_cmd)) != 0U) { dev_err(pdata->dev, "%s copy from user failed\n", __func__); @@ -518,7 +520,7 @@ static int ether_config_l3_l4_filtering(struct net_device *dev, u_l3_filter = (struct osi_l3_l4_filter *)ifdata->ptr; - if (copy_from_user(&ioctl_data.l3l4_filter, (void __user *)u_l3_filter, + if (copy_from_user(&ioctl_data.data.l3l4_filter, (void __user *)u_l3_filter, sizeof(struct osi_l3_l4_filter)) != 0U) { dev_err(pdata->dev, "%s copy from user failed\n", __func__); return -EFAULT; @@ -554,6 +556,7 @@ static int ether_config_l2_filters(struct net_device *dev, struct osi_dma_priv_data *osi_dma = pdata->osi_dma; struct ether_l2_filter u_l2_filter; struct osi_ioctl ioctl_data = {}; + struct osi_filter *l2_filter = (struct osi_filter *)&ioctl_data.data.l2_filter; int ret = -1; if (ifdata->ptr == NULL) { @@ -574,30 +577,29 @@ static int ether_config_l2_filters(struct net_device *dev, return ret; } - ioctl_data.l2_filter.index = u_l2_filter.index; - ioctl_data.l2_filter.src_dest = OSI_DA_MATCH; + l2_filter->index = u_l2_filter.index; + l2_filter->src_dest = OSI_DA_MATCH; - ioctl_data.l2_filter.oper_mode = (OSI_OPER_EN_PERFECT | - OSI_OPER_DIS_PROMISC | - OSI_OPER_DIS_ALLMULTI); + l2_filter->oper_mode = (OSI_OPER_EN_PERFECT | + OSI_OPER_DIS_PROMISC | + OSI_OPER_DIS_ALLMULTI); if (u_l2_filter.en_dis == OSI_ENABLE) { - ioctl_data.l2_filter.oper_mode |= OSI_OPER_ADDR_UPDATE; + l2_filter->oper_mode |= OSI_OPER_ADDR_UPDATE; } else { - ioctl_data.l2_filter.oper_mode |= OSI_OPER_ADDR_DEL; + l2_filter->oper_mode |= OSI_OPER_ADDR_DEL; } - memcpy(ioctl_data.l2_filter.mac_addr, - u_l2_filter.mac_addr, ETH_ALEN); - ioctl_data.l2_filter.dma_routing = OSI_ENABLE; - ioctl_data.l2_filter.addr_mask = OSI_DISABLE; - ioctl_data.l2_filter.pkt_dup = u_l2_filter.pkt_dup; - if (ioctl_data.l2_filter.pkt_dup) { - ioctl_data.l2_filter.dma_chan = u_l2_filter.dma_chan; + memcpy(l2_filter->mac_addr, u_l2_filter.mac_addr, ETH_ALEN); + l2_filter->dma_routing = OSI_ENABLE; + l2_filter->addr_mask = OSI_DISABLE; + l2_filter->pkt_dup = u_l2_filter.pkt_dup; + if (l2_filter->pkt_dup) { + l2_filter->dma_chan = u_l2_filter.dma_chan; } else { - ioctl_data.l2_filter.dma_chan = osi_dma->dma_chans[0]; + l2_filter->dma_chan = osi_dma->dma_chans[0]; } - ioctl_data.l2_filter.dma_chansel = OSI_BIT_64(ioctl_data.l2_filter.dma_chan); + l2_filter->dma_chansel = OSI_BIT_64(l2_filter->dma_chan); ioctl_data.cmd = OSI_CMD_L2_FILTER; return osi_handle_ioctl(osi_core, &ioctl_data); } @@ -627,6 +629,7 @@ static int ether_config_vlan_filter(struct net_device *dev, struct osi_vlan_filter *u_vlan_filter = (struct osi_vlan_filter *)ifdata->ptr; struct osi_ioctl ioctl_data = {}; + struct osi_vlan_filter *vlan = (struct osi_vlan_filter *)&ioctl_data.data.vlan_filter; int ret = -EINVAL; if (ifdata->ptr == NULL) { @@ -635,14 +638,14 @@ static int ether_config_vlan_filter(struct net_device *dev, return ret; } - if (copy_from_user(&ioctl_data.vlan_filter, (void __user *)u_vlan_filter, + if (copy_from_user(vlan, (void __user *)u_vlan_filter, sizeof(struct osi_vlan_filter)) != 0U) { dev_err(pdata->dev, "%s copy from user failed", __func__); return -EFAULT; } /*0 - perfect and 1 - hash filtering */ - if (ioctl_data.vlan_filter.perfect_hash == OSI_HASH_FILTER_MODE) { + if (vlan->perfect_hash == OSI_HASH_FILTER_MODE) { dev_err(pdata->dev, "VLAN HASH filtering is not supported\n"); return ret; } @@ -650,8 +653,7 @@ static int ether_config_vlan_filter(struct net_device *dev, ioctl_data.cmd = OSI_CMD_VLAN_FILTER; ret = osi_handle_ioctl(osi_core, &ioctl_data); if (ret == 0) { - pdata->vlan_hash_filtering = - ioctl_data.vlan_filter.perfect_hash; + pdata->vlan_hash_filtering = vlan->perfect_hash; } return ret; @@ -730,7 +732,7 @@ static int ether_config_l2_da_filter(struct net_device *dev, struct osi_ioctl ioctl_data = {}; int ret = -EINVAL; - memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); + memset(&ioctl_data.data.l2_filter, 0x0, sizeof(struct osi_filter)); if (ifdata->ptr == NULL) { dev_err(pdata->dev, "%s: Invalid data for priv ioctl %d\n", @@ -756,9 +758,9 @@ static int ether_config_l2_da_filter(struct net_device *dev, /* configure L2 DA perfect/inverse_matching */ if (l_l2_da_filter.perfect_inverse_match == OSI_ENABLE) { - ioctl_data.l2_filter.oper_mode |= OSI_OPER_EN_L2_DA_INV; + ioctl_data.data.l2_filter.oper_mode |= OSI_OPER_EN_L2_DA_INV; } else { - ioctl_data.l2_filter.oper_mode |= OSI_OPER_DIS_L2_DA_INV; + ioctl_data.data.l2_filter.oper_mode |= OSI_OPER_DIS_L2_DA_INV; } ioctl_data.cmd = OSI_CMD_L2_FILTER; @@ -902,11 +904,12 @@ static int ether_config_ptp_rxq(struct net_device *ndev, struct ether_priv_data *pdata = netdev_priv(ndev); struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_ioctl ioctl_data = {}; + struct osi_rxq_route *rxq = (struct osi_rxq_route *)&ioctl_data.data.rxq_route; /* Fill PTP RX queue route values and call osi_rxq_route */ - ioctl_data.rxq_route.route_type = OSI_RXQ_ROUTE_PTP; - ioctl_data.rxq_route.enable = OSI_ENABLE; - ioctl_data.rxq_route.idx = flags; + rxq->route_type = OSI_RXQ_ROUTE_PTP; + rxq->enable = OSI_ENABLE; + rxq->idx = flags; ioctl_data.cmd = OSI_CMD_PTP_RXQ_ROUTE; return osi_handle_ioctl(osi_core, &ioctl_data); } @@ -943,7 +946,7 @@ static int ether_config_est(struct net_device *dev, return ret; } - if (copy_from_user(&ioctl_data.est, (void __user *)u_est_cfg, + if (copy_from_user(&ioctl_data.data.est, (void __user *)u_est_cfg, sizeof(struct osi_est_config)) != 0U) { return -EFAULT; } @@ -990,7 +993,7 @@ static int ether_config_fpe(struct net_device *dev, return ret; } - if (copy_from_user(&ioctl_data.fpe, (void __user *)u_fpe_cfg, + if (copy_from_user(&ioctl_data.data.fpe, (void __user *)u_fpe_cfg, sizeof(struct osi_fpe_config)) != 0U) { dev_err(pdata->dev, "%s: copy_from_user error\n", __func__); return -EFAULT; diff --git a/drivers/net/ethernet/nvidia/nvethernet/ptp.c b/drivers/net/ethernet/nvidia/nvethernet/ptp.c index 366bf9c6..354a14db 100644 --- a/drivers/net/ethernet/nvidia/nvethernet/ptp.c +++ b/drivers/net/ethernet/nvidia/nvethernet/ptp.c @@ -59,7 +59,7 @@ static inline int ether_get_hw_time(struct net_device *dev, raw_spin_unlock_irqrestore(&pdata->ptp_lock, flags); return ret; } - memcpy(&local_ts, &ioctl_data.ptp_tsc, + memcpy(&local_ts, &ioctl_data.data.ptp_tsc, sizeof(struct osi_core_ptp_tsc_data)); ((struct ptp_tsc_data *)ts)->ptp_ts = local_ts.ptp_low_bits + @@ -294,23 +294,24 @@ static void ether_config_slot_function(struct ether_priv_data *pdata, u32 set) struct osi_core_priv_data *osi_core = pdata->osi_core; unsigned int ret, i, chan, qinx; struct osi_ioctl ioctl_data = {}; + struct osi_core_avb_algorithm *avb = (struct osi_core_avb_algorithm *)&ioctl_data.data.avb; /* Configure TXQ AVB mode */ for (i = 0; i < osi_dma->num_dma_chans; i++) { chan = osi_dma->dma_chans[i]; if (osi_dma->slot_enabled[chan] == OSI_ENABLE) { /* Set TXQ AVB info */ - memset(&ioctl_data.avb, 0, + memset(avb, 0, sizeof(struct osi_core_avb_algorithm)); qinx = osi_core->mtl_queues[i]; - ioctl_data.avb.qindex = qinx; + avb->qindex = qinx; /* For EQOS harware library code use internally SP(0) and For MGBE harware library code use internally ETS(2) if algo != CBS. */ - ioctl_data.avb.algo = OSI_MTL_TXQ_AVALG_SP; - ioctl_data.avb.oper_mode = (set == OSI_ENABLE) ? - OSI_MTL_QUEUE_AVB : - OSI_MTL_QUEUE_ENABLE; + avb->algo = OSI_MTL_TXQ_AVALG_SP; + avb->oper_mode = (set == OSI_ENABLE) ? + OSI_MTL_QUEUE_AVB : + OSI_MTL_QUEUE_ENABLE; ioctl_data.cmd = OSI_CMD_SET_AVB; ret = osi_handle_ioctl(osi_core, &ioctl_data);