nvethernet: Add Linux safety support

Issue:
Observed compilation issues
on the nvethernet driver
for DRIVE Linux safety builds.

Fix:
Add OSI_STRIPPED_LIB support
on nvethernet driver.

Bug 3939603

Change-Id: Id4213c1cf33dc0e925df3e627072b727ae44fa71
Signed-off-by: Mohan Thadikamalla <mohant@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2842470
(cherry picked from commit ece0eb57a89396ea8bdc412beeaa7dfabbda49b1)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2879701
Tested-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Mohan Thadikamalla
2023-01-12 17:52:45 +05:30
committed by mobile promotions
parent a2163680aa
commit 01d8319aae
8 changed files with 537 additions and 375 deletions

View File

@@ -725,6 +725,7 @@ static int ether_enable_clks(struct ether_priv_data *pdata)
return 0;
}
#ifndef OSI_STRIPPED_LIB
/**
* @brief ether_conf_eee - Init and configure EEE LPI in the MAC
*
@@ -785,6 +786,7 @@ int ether_conf_eee(struct ether_priv_data *pdata, unsigned int tx_lpi_enable)
return ret;
}
#endif /* !OSI_STRIPPED_LIB */
/**
* @brief Set MGBE MAC_DIV/TX clk rate
@@ -860,7 +862,9 @@ static inline void set_speed_work_func(struct work_struct *work)
struct net_device *dev = pdata->ndev;
struct phy_device *phydev = pdata->phydev;
nveu32_t iface_mode = pdata->osi_core->phy_iface_mode;
#ifndef OSI_STRIPPED_LIB
unsigned int eee_enable = OSI_DISABLE;
#endif /* !OSI_STRIPPED_LIB */
int speed;
int ret = 0;
@@ -910,11 +914,13 @@ static inline void set_speed_work_func(struct work_struct *work)
ether_set_mgbe_mac_div_rate(pdata->mac_div_clk,
pdata->speed);
#ifndef OSI_STRIPPED_LIB
if (pdata->eee_enabled && pdata->tx_lpi_enabled) {
/* Configure EEE if it is enabled */
eee_enable = OSI_ENABLE;
}
pdata->eee_active = ether_conf_eee(pdata, eee_enable);
#endif /* !OSI_STRIPPED_LIB */
netif_carrier_on(dev);
atomic_set(&pdata->set_speed_ref_cnt, OSI_DISABLE);
@@ -972,7 +978,9 @@ static void ether_adjust_link(struct net_device *dev)
struct phy_device *phydev = pdata->phydev;
int new_state = 0, speed_changed = 0, speed;
unsigned long val;
#ifndef OSI_STRIPPED_LIB
unsigned int eee_enable = OSI_DISABLE;
#endif /* !OSI_STRIPPED_LIB */
struct osi_ioctl ioctl_data = {};
int ret = 0;
@@ -982,6 +990,7 @@ static void ether_adjust_link(struct net_device *dev)
cancel_delayed_work_sync(&pdata->set_speed_work);
if (phydev->link) {
#ifndef OSI_STRIPPED_LIB
if ((pdata->osi_core->pause_frames == OSI_PAUSE_FRAMES_ENABLE)
&& (phydev->pause || phydev->asym_pause)) {
ioctl_data.cmd = OSI_CMD_FLOW_CTRL;
@@ -992,6 +1001,7 @@ static void ether_adjust_link(struct net_device *dev)
return;
}
}
#endif /* !OSI_STRIPPED_LIB */
if (pdata->fixed_link == OSI_ENABLE) {
if (pdata->osi_core->mac == OSI_MAC_HW_MGBE) {
@@ -1003,6 +1013,7 @@ static void ether_adjust_link(struct net_device *dev)
phydev->duplex = OSI_FULL_DUPLEX;
}
}
#ifndef OSI_STRIPPED_LIB
if (phydev->duplex != pdata->oldduplex) {
new_state = 1;
ioctl_data.cmd = OSI_CMD_SET_MODE;
@@ -1014,6 +1025,7 @@ static void ether_adjust_link(struct net_device *dev)
}
pdata->oldduplex = phydev->duplex;
}
#endif /* !OSI_STRIPPED_LIB */
if (phydev->speed != pdata->speed) {
new_state = 1;
@@ -1115,12 +1127,14 @@ static void ether_adjust_link(struct net_device *dev)
}
}
#ifndef OSI_STRIPPED_LIB
/* Configure EEE if it is enabled */
if (pdata->eee_enabled && pdata->tx_lpi_enabled) {
eee_enable = OSI_ENABLE;
}
pdata->eee_active = ether_conf_eee(pdata, eee_enable);
#endif /* !OSI_STRIPPED_LIB */
}
/**
@@ -1744,7 +1758,7 @@ static void free_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
if (rx_ring->rx_swcx != NULL) {
ether_free_rx_skbs(rx_ring->rx_swcx, pdata,
osi_dma->rx_buf_len,
osi_dma->resv_buf_virt_addr);
pdata->resv_buf_virt_addr);
kfree(rx_ring->rx_swcx);
}
@@ -1960,7 +1974,7 @@ static int ether_allocate_rx_dma_resources(struct osi_dma_priv_data *osi_dma,
for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
chan = osi_dma->dma_chans[i];
if (chan != OSI_INVALID_CHAN_NUM) {
if (chan != ETHER_INVALID_CHAN_NUM) {
ret = allocate_rx_dma_resource(osi_dma, pdata->dev,
chan);
if (ret != 0) {
@@ -2103,7 +2117,7 @@ static int ether_allocate_tx_dma_resources(struct osi_dma_priv_data *osi_dma,
for (i = 0; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
chan = osi_dma->dma_chans[i];
if (chan != OSI_INVALID_CHAN_NUM) {
if (chan != ETHER_INVALID_CHAN_NUM) {
ret = allocate_tx_dma_resource(osi_dma, dev, chan);
if (ret != 0) {
goto exit;
@@ -2143,7 +2157,7 @@ static void ether_init_invalid_chan_ring(struct osi_dma_priv_data *osi_dma)
}
for (i = osi_dma->num_dma_chans; i < OSI_MGBE_MAX_NUM_CHANS; i++) {
osi_dma->dma_chans[i] = OSI_INVALID_CHAN_NUM;
osi_dma->dma_chans[i] = ETHER_INVALID_CHAN_NUM;
}
}
@@ -2165,17 +2179,17 @@ static void free_dma_resources(struct ether_priv_data *pdata)
free_rx_dma_resources(osi_dma, pdata);
/* unmap reserved DMA*/
if (osi_dma->resv_buf_phy_addr) {
dma_unmap_single(dev, osi_dma->resv_buf_phy_addr,
if (pdata->resv_buf_phy_addr) {
dma_unmap_single(dev, pdata->resv_buf_phy_addr,
osi_dma->rx_buf_len,
DMA_FROM_DEVICE);
osi_dma->resv_buf_phy_addr = 0;
pdata->resv_buf_phy_addr = 0;
}
/* free reserve buffer */
if (osi_dma->resv_buf_virt_addr) {
dev_kfree_skb_any(osi_dma->resv_buf_virt_addr);
osi_dma->resv_buf_virt_addr = NULL;
if (pdata->resv_buf_virt_addr) {
dev_kfree_skb_any(pdata->resv_buf_virt_addr);
pdata->resv_buf_virt_addr = NULL;
}
}
@@ -2208,11 +2222,11 @@ static int ether_allocate_dma_resources(struct ether_priv_data *pdata)
goto error_alloc;
}
osi_dma->resv_buf_phy_addr = dma_map_single(pdata->dev,
skb->data,
osi_dma->rx_buf_len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdata->dev, osi_dma->resv_buf_phy_addr)
pdata->resv_buf_phy_addr = dma_map_single(pdata->dev,
skb->data,
osi_dma->rx_buf_len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdata->dev, pdata->resv_buf_phy_addr)
!= 0)) {
dev_err(pdata->dev, "Reserve RX skb dma map failed\n");
ret = -ENOMEM;
@@ -2230,7 +2244,7 @@ static int ether_allocate_dma_resources(struct ether_priv_data *pdata)
goto error_alloc;
}
osi_dma->resv_buf_virt_addr = (void *)skb;
pdata->resv_buf_virt_addr = (void *)skb;
return ret;
@@ -2238,12 +2252,13 @@ error_alloc:
if (skb != NULL) {
dev_kfree_skb_any(skb);
}
osi_dma->resv_buf_virt_addr = NULL;
osi_dma->resv_buf_phy_addr = 0;
pdata->resv_buf_virt_addr = NULL;
pdata->resv_buf_phy_addr = 0;
return ret;
}
#ifndef OSI_STRIPPED_LIB
/**
* @brief Initialize default EEE LPI configurations
*
@@ -2264,6 +2279,7 @@ static inline void ether_init_eee_params(struct ether_priv_data *pdata)
pdata->eee_active = OSI_DISABLE;
pdata->tx_lpi_timer = OSI_DEFAULT_TX_LPI_TIMER;
}
#endif /* !OSI_STRIPPED_LIB */
/**
* @brief function to set unicast/Broadcast MAC address filter
@@ -2515,13 +2531,6 @@ static int ether_open(struct net_device *dev)
}
}
ioctl_data.cmd = OSI_CMD_POLL_FOR_MAC_RST;
ret = osi_handle_ioctl(osi_core, &ioctl_data);
if (ret < 0) {
dev_err(&dev->dev, "failed to poll MAC Software reset\n");
goto err_poll_swr;
}
ret = ether_mdio_register(pdata);
if (ret < 0) {
dev_err(&dev->dev, "failed to register MDIO bus\n");
@@ -2602,10 +2611,12 @@ static int ether_open(struct net_device *dev)
goto err_hw_init;
}
#ifndef OSI_STRIPPED_LIB
/* As all registers reset as part of ether_close(), reset private
* structure variable as well */
pdata->vlan_hash_filtering = OSI_PERFECT_FILTER_MODE;
pdata->l2_filtering_mode = OSI_PERFECT_FILTER_MODE;
#endif /* !OSI_STRIPPED_LIB */
/* Initialize PTP */
ret = ether_ptp_init(pdata);
@@ -2628,8 +2639,10 @@ static int ether_open(struct net_device *dev)
goto err_r_irq;
}
#ifndef OSI_STRIPPED_LIB
/* Init EEE configuration */
ether_init_eee_params(pdata);
#endif /* !OSI_STRIPPED_LIB */
/* start PHY */
phy_start(pdata->phydev);
@@ -2672,7 +2685,6 @@ err_phy_init:
if (pdata->mii != NULL) {
mdiobus_unregister(pdata->mii);
}
err_poll_swr:
err_mdio_reg:
if (pdata->xpcs_rst) {
reset_control_assert(pdata->xpcs_rst);
@@ -2710,14 +2722,18 @@ err_get_sync:
static inline void ether_reset_stats(struct ether_priv_data *pdata)
{
struct osi_core_priv_data *osi_core = pdata->osi_core;
#ifndef OSI_STRIPPED_LIB
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
#endif /* !OSI_STRIPPED_LIB */
memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters));
memset(&pdata->xstats, 0U,
sizeof(struct ether_xtra_stat_counters));
#ifndef OSI_STRIPPED_LIB
memset(&osi_dma->dstats, 0U,
sizeof(struct osi_xtra_dma_stat_counters));
memset(&osi_dma->pkt_err_stats, 0U, sizeof(struct osi_pkt_err_stats));
#endif /* !OSI_STRIPPED_LIB */
}
/**
@@ -3367,6 +3383,7 @@ static int ether_prepare_mc_list(struct net_device *dev,
memset(&ioctl_data->l2_filter, 0x0, sizeof(struct osi_filter));
#ifndef OSI_STRIPPED_LIB
if (pdata->l2_filtering_mode == OSI_HASH_FILTER_MODE) {
dev_err(pdata->dev,
"HASH FILTERING for mc addresses not Supported in SW\n");
@@ -3378,7 +3395,9 @@ static int ether_prepare_mc_list(struct net_device *dev,
/* address 0 is used for DUT DA so compare with
* pdata->num_mac_addr_regs - 1
*/
} else if (netdev_mc_count(dev) > (pdata->num_mac_addr_regs - 1)) {
}
#endif /* !OSI_STRIPPED_LIB */
if (netdev_mc_count(dev) > (pdata->num_mac_addr_regs - 1)) {
/* switch to PROMISCUOUS mode */
ioctl_data->l2_filter.oper_mode = (OSI_OPER_DIS_PERFECT |
OSI_OPER_EN_PROMISC |
@@ -3470,6 +3489,7 @@ static int ether_prepare_uc_list(struct net_device *dev,
memset(&ioctl_data->l2_filter, 0x0, sizeof(struct osi_filter));
#ifndef OSI_STRIPPED_LIB
if (pdata->l2_filtering_mode == OSI_HASH_FILTER_MODE) {
dev_err(pdata->dev,
"HASH FILTERING for uc addresses not Supported in SW\n");
@@ -3479,7 +3499,9 @@ static int ether_prepare_uc_list(struct net_device *dev,
OSI_OPER_DIS_ALLMULTI);
ioctl_data->cmd = OSI_CMD_L2_FILTER;
return osi_handle_ioctl(osi_core, ioctl_data);
} else if (netdev_uc_count(dev) > (pdata->num_mac_addr_regs - i)) {
}
#endif /* !OSI_STRIPPED_LIB */
if (netdev_uc_count(dev) > (pdata->num_mac_addr_regs - i)) {
/* switch to PROMISCUOUS mode */
ioctl_data->l2_filter.oper_mode = (OSI_OPER_DIS_PERFECT |
OSI_OPER_EN_PROMISC |
@@ -4016,6 +4038,7 @@ static int ether_set_features(struct net_device *ndev, netdev_features_t feat)
return ret;
}
#ifndef OSI_STRIPPED_LIB
/**
* @brief Adds VLAN ID. This function is invoked by upper
* layer when a new VLAN id is registered. This function updates the HW
@@ -4103,6 +4126,7 @@ static int ether_vlan_rx_kill_vid(struct net_device *ndev, __be16 vlan_proto,
return ret;
}
#endif /* !OSI_STRIPPED_LIB */
/**
* @brief ether_setup_tc - TC HW offload support
@@ -4157,8 +4181,10 @@ static const struct net_device_ops ether_netdev_ops = {
.ndo_select_queue = ether_select_queue,
.ndo_set_features = ether_set_features,
.ndo_set_rx_mode = ether_set_rx_mode,
#ifndef OSI_STRIPPED_LIB
.ndo_vlan_rx_add_vid = ether_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ether_vlan_rx_kill_vid,
#endif /* !OSI_STRIPPED_LIB */
.ndo_setup_tc = ether_setup_tc,
};
@@ -4631,7 +4657,7 @@ static int ether_get_mac_address(struct ether_priv_data *pdata)
* if there is MGBE controller DT node with index 8 MGBE,
* MAC address is at /chosen/nvidia,ether-mac8
*/
if ((pdata->osi_core->mac_ver > OSI_EQOS_MAC_5_10) ||
if ((pdata->osi_core->mac_ver > OSI_EQOS_MAC_5_00) ||
(pdata->osi_core->mac == OSI_MAC_HW_MGBE)) {
ret = of_property_read_u32(np, "nvidia,mac-addr-idx",
&mac_addr_idx);
@@ -5107,8 +5133,10 @@ static int ether_configure_car(struct platform_device *pdev,
struct device *dev = pdata->dev;
struct device_node *np = dev->of_node;
struct osi_core_priv_data *osi_core = pdata->osi_core;
#ifndef OSI_STRIPPED_LIB
unsigned long csr_clk_rate = 0;
struct osi_ioctl ioctl_data = {};
#endif /* !OSI_STRIPPED_LIB */
int ret = 0;
@@ -5188,6 +5216,7 @@ static int ether_configure_car(struct platform_device *pdev,
}
}
#ifndef OSI_STRIPPED_LIB
csr_clk_rate = clk_get_rate(pdata->axi_cbb_clk);
ioctl_data.cmd = OSI_CMD_MDC_CONFIG;
ioctl_data.arg5_u64 = csr_clk_rate;
@@ -5196,12 +5225,15 @@ static int ether_configure_car(struct platform_device *pdev,
dev_err(&pdev->dev, "failed to configure MDC\n");
goto err_mdc;
}
#endif /* !OSI_STRIPPED_LIB */
return ret;
#ifndef OSI_STRIPPED_LIB
err_mdc:
if (pdata->mac_rst) {
reset_control_assert(pdata->mac_rst);
}
#endif /* !OSI_STRIPPED_LIB */
err_rst:
ether_disable_clks(pdata);
err_enable_clks:
@@ -5229,7 +5261,9 @@ exit:
static int ether_init_plat_resources(struct platform_device *pdev,
struct ether_priv_data *pdata)
{
#ifndef OSI_STRIPPED_LIB
bool tegra_hypervisor_mode = is_tegra_hypervisor_mode();
#endif /* !OSI_STRIPPED_LIB */
struct osi_core_priv_data *osi_core = pdata->osi_core;
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct resource *res;
@@ -5243,6 +5277,7 @@ static int ether_init_plat_resources(struct platform_device *pdev,
return PTR_ERR(osi_core->base);
}
#ifndef OSI_STRIPPED_LIB
if (!tegra_hypervisor_mode) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"hypervisor");
@@ -5262,6 +5297,7 @@ static int ether_init_plat_resources(struct platform_device *pdev,
osi_core->hv_base = NULL;
dev_dbg(&pdev->dev, "Hypervisor mode enabled\n");
}
#endif /* !OSI_STRIPPED_LIB */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"dma_base");
@@ -5564,6 +5600,7 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
dev_info(dev, "Invalid promiscuous mode - setting supported\n");
pdata->promisc_mode = OSI_ENABLE;
}
#ifndef OSI_STRIPPED_LIB
/* Read Pause frame feature support */
ret = of_property_read_u32(np, "nvidia,pause_frames",
&pdata->osi_core->pause_frames);
@@ -5572,6 +5609,7 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
" setting to default support as disable\n");
pdata->osi_core->pause_frames = OSI_PAUSE_FRAMES_DISABLE;
}
#endif /* !OSI_STRIPPED_LIB */
/* Check if IOMMU is enabled */
if (iommu_get_domain_for_dev(&pdev->dev) != NULL) {
@@ -5655,7 +5693,6 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
/* Allow to set non zero DMA channel for virtualization */
if (!ether_init_ivc(pdata)) {
osi_dma->use_virtualization = OSI_ENABLE;
osi_core->use_virtualization = OSI_ENABLE;
dev_info(dev, "Virtualization is enabled\n");
} else {
@@ -5697,6 +5734,7 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
ETHER_QUEUE_PRIO_DEFAULT, ETHER_QUEUE_PRIO_MAX,
osi_core->num_mtl_queues);
#ifndef OSI_STRIPPED_LIB
/* Read TX slot enable check array DT node */
ret = of_property_read_u32_array(np, "nvidia,slot_num_check",
tmp_value,
@@ -5731,6 +5769,7 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
}
}
}
#endif /* !OSI_STRIPPED_LIB */
/* Read Rx Queue - User priority mapping for tagged packets */
ret = of_property_read_u32_array(np, "nvidia,rx-queue-prio",
@@ -5804,12 +5843,12 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
if (ret < 0) {
osi_dma->use_tx_usecs = OSI_DISABLE;
} else {
if (osi_dma->tx_usecs > OSI_MAX_TX_COALESCE_USEC ||
osi_dma->tx_usecs < OSI_MIN_TX_COALESCE_USEC) {
if (osi_dma->tx_usecs > ETHER_MAX_TX_COALESCE_USEC ||
osi_dma->tx_usecs < ETHER_MIN_TX_COALESCE_USEC) {
dev_err(dev,
"invalid tx_riwt, must be inrange %d to %d\n",
OSI_MIN_TX_COALESCE_USEC,
OSI_MAX_TX_COALESCE_USEC);
ETHER_MIN_TX_COALESCE_USEC,
ETHER_MAX_TX_COALESCE_USEC);
return -EINVAL;
}
osi_dma->use_tx_usecs = OSI_ENABLE;
@@ -5821,10 +5860,10 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
osi_dma->use_tx_frames = OSI_DISABLE;
} else {
if (osi_dma->tx_frames > ETHER_TX_MAX_FRAME(osi_dma->tx_ring_sz) ||
osi_dma->tx_frames < OSI_MIN_TX_COALESCE_FRAMES) {
osi_dma->tx_frames < ETHER_MIN_TX_COALESCE_FRAMES) {
dev_err(dev,
"invalid tx-frames, must be inrange %d to %ld",
OSI_MIN_TX_COALESCE_FRAMES,
ETHER_MIN_TX_COALESCE_FRAMES,
ETHER_TX_MAX_FRAME(osi_dma->tx_ring_sz));
return -EINVAL;
}
@@ -5844,21 +5883,21 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
osi_dma->use_riwt = OSI_DISABLE;
} else {
if (osi_dma->mac == OSI_MAC_HW_MGBE &&
(osi_dma->rx_riwt > OSI_MAX_RX_COALESCE_USEC ||
osi_dma->rx_riwt < OSI_MGBE_MIN_RX_COALESCE_USEC)) {
(osi_dma->rx_riwt > ETHER_MAX_RX_COALESCE_USEC ||
osi_dma->rx_riwt < ETHER_MGBE_MIN_RX_COALESCE_USEC)) {
dev_err(dev,
"invalid rx_riwt, must be inrange %d to %d\n",
OSI_MGBE_MIN_RX_COALESCE_USEC,
OSI_MAX_RX_COALESCE_USEC);
ETHER_MGBE_MIN_RX_COALESCE_USEC,
ETHER_MAX_RX_COALESCE_USEC);
return -EINVAL;
} else if (osi_dma->mac == OSI_MAC_HW_EQOS &&
(osi_dma->rx_riwt > OSI_MAX_RX_COALESCE_USEC ||
(osi_dma->rx_riwt > ETHER_MAX_RX_COALESCE_USEC ||
osi_dma->rx_riwt <
OSI_EQOS_MIN_RX_COALESCE_USEC)) {
ETHER_EQOS_MIN_RX_COALESCE_USEC)) {
dev_err(dev,
"invalid rx_riwt, must be inrange %d to %d\n",
OSI_EQOS_MIN_RX_COALESCE_USEC,
OSI_MAX_RX_COALESCE_USEC);
ETHER_EQOS_MIN_RX_COALESCE_USEC,
ETHER_MAX_RX_COALESCE_USEC);
return -EINVAL;
}
@@ -5871,10 +5910,10 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
osi_dma->use_rx_frames = OSI_DISABLE;
} else {
if (osi_dma->rx_frames > osi_dma->rx_ring_sz ||
osi_dma->rx_frames < OSI_MIN_RX_COALESCE_FRAMES) {
osi_dma->rx_frames < ETHER_MIN_RX_COALESCE_FRAMES) {
dev_err(dev,
"invalid rx-frames, must be inrange %d to %d",
OSI_MIN_RX_COALESCE_FRAMES, osi_dma->rx_ring_sz);
ETHER_MIN_RX_COALESCE_FRAMES, osi_dma->rx_ring_sz);
return -EINVAL;
}
osi_dma->use_rx_frames = OSI_ENABLE;
@@ -5940,8 +5979,10 @@ static int ether_parse_dt(struct ether_priv_data *pdata)
}
}
#ifndef OSI_STRIPPED_LIB
/* Enable VLAN strip by default */
osi_core->strip_vlan_tag = OSI_ENABLE;
#endif
ret = ether_parse_phy_dt(pdata, np);
if (ret < 0) {
@@ -6122,13 +6163,13 @@ static int ether_set_dma_mask(struct ether_priv_data *pdata)
*/
if (pdata->dma_mask == DMA_MASK_NONE) {
switch (pdata->hw_feat.addr_64) {
case OSI_ADDRESS_32BIT:
case ETHER_ADDRESS_32BIT:
pdata->dma_mask = DMA_BIT_MASK(32);
break;
case OSI_ADDRESS_40BIT:
case ETHER_ADDRESS_40BIT:
pdata->dma_mask = DMA_BIT_MASK(40);
break;
case OSI_ADDRESS_48BIT:
case ETHER_ADDRESS_48BIT:
pdata->dma_mask = DMA_BIT_MASK(48);
break;
default:
@@ -6190,9 +6231,11 @@ static void ether_set_ndev_features(struct net_device *ndev,
features |= NETIF_F_HW_VLAN_CTAG_TX;
}
#ifndef OSI_STRIPPED_LIB
/* Rx VLAN tag stripping/filtering enabled by default */
features |= NETIF_F_HW_VLAN_CTAG_RX;
features |= NETIF_F_HW_VLAN_CTAG_FILTER;
#endif /* !OSI_STRIPPED_LIB */
/* Receive Hashing offload */
if (pdata->hw_feat.rss_en) {
@@ -6234,6 +6277,7 @@ static void init_filter_values(struct ether_priv_data *pdata)
}
}
#ifndef OSI_STRIPPED_LIB
/**
* @brief ether_init_rss - Init OSI RSS structure
*
@@ -6263,6 +6307,7 @@ static void ether_init_rss(struct ether_priv_data *pdata,
for (i = 0; i < OSI_RSS_MAX_TABLE_SIZE; i++)
osi_core->rss.table[i] = ethtool_rxfh_indir_default(i, num_q);
}
#endif /* !OSI_STRIPPED_LIB */
/**
* @brief Ethernet platform driver probe.
@@ -6388,30 +6433,22 @@ static int ether_probe(struct platform_device *pdev)
goto err_init_res;
}
ioctl_data.cmd = OSI_CMD_GET_MAC_VER;
ret = osi_handle_ioctl(osi_core, &ioctl_data);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get MAC version (%u)\n",
osi_core->mac_ver);
goto err_dma_mask;
}
osi_core->mac_ver = ioctl_data.arg1_u32;
ret = ether_get_mac_address(pdata);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get MAC address\n");
goto err_dma_mask;
}
ioctl_data.cmd = OSI_CMD_GET_HW_FEAT;
ret = osi_handle_ioctl(osi_core, &ioctl_data);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get HW features\n");
goto err_dma_mask;
}
osi_core->mac_ver = ioctl_data.arg1_u32;
memcpy(&pdata->hw_feat, &ioctl_data.hw_feat,
sizeof(struct osi_hw_features));
ret = ether_get_mac_address(pdata);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get MAC address\n");
goto err_dma_mask;
}
ret = ether_set_dma_mask(pdata);
if (ret < 0) {
dev_err(&pdev->dev, "failed to set dma mask\n");
@@ -6430,8 +6467,10 @@ static int ether_probe(struct platform_device *pdev)
/* Set netdev features based on hw features */
ether_set_ndev_features(ndev, pdata);
#ifndef OSI_STRIPPED_LIB
/* RSS init */
ether_init_rss(pdata, ndev->features);
#endif /* !OSI_STRIPPED_LIB */
ret = ether_get_irqs(pdev, pdata, num_dma_chans);
if (ret < 0) {
@@ -6659,13 +6698,6 @@ static int ether_resume(struct ether_priv_data *pdata)
}
}
ioctl_data.cmd = OSI_CMD_POLL_FOR_MAC_RST;
ret = osi_handle_ioctl(osi_core, &ioctl_data);
if (ret < 0) {
dev_err(dev, "failed to poll mac software reset\n");
return ret;
}
if (pdata->xpcs_rst) {
ret = reset_control_reset(pdata->xpcs_rst);
if (ret < 0) {