nvethernet: Add IVC support atomic

Issue: completion_timeout doesn't support atomic.

Fix: Remove mutex and wait event and change to poll_timeout.

Bug 3733795

Change-Id: Ieb26623ae6191d442bb2f7e2b8c6925660882cfe
Signed-off-by: Nagaraj Annaiah <nannaiah@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2759245
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Mohan Thadikamalla <mohant@nvidia.com>
Reviewed-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Ashutosh Jha <ajha@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Nagaraj Annaiah
2022-08-10 20:21:47 +00:00
committed by Revanth Kumar Uppala
parent 1cb18af637
commit 1ffcf6d286
3 changed files with 32 additions and 137 deletions

View File

@@ -1437,32 +1437,6 @@ static void ether_free_irqs(struct ether_priv_data *pdata)
} }
} }
/**
* @brief IVC ISR Routine
*
* Algorithm: IVC routine to handle common interrupt.
* 1) Verify if IVC channel is readable
* 2) Read IVC msg
* 3) Schedule ivc_work
*
* @param[in] irq: IRQ number.
* @param[in] data: Private data from ISR.
*
* @note MAC and PHY need to be initialized.
*
* @retval IRQ_HANDLED on success
* @retval IRQ_NONE on failure.
*/
static irqreturn_t ether_ivc_irq(int irq, void *data)
{
struct ether_priv_data *pdata = (struct ether_priv_data *)data;
struct ether_ivc_ctxt *ictxt = &pdata->ictxt;
complete(&ictxt->msg_complete);
return IRQ_HANDLED;
}
/** /**
* @brief Start IVC, initializes IVC. * @brief Start IVC, initializes IVC.
* *
@@ -1473,23 +1447,11 @@ static irqreturn_t ether_ivc_irq(int irq, void *data)
static void ether_start_ivc(struct ether_priv_data *pdata) static void ether_start_ivc(struct ether_priv_data *pdata)
{ {
int ret;
struct ether_ivc_ctxt *ictxt = &pdata->ictxt; struct ether_ivc_ctxt *ictxt = &pdata->ictxt;
if (ictxt->ivck != NULL && !ictxt->ivc_state) { if (ictxt->ivck != NULL && !ictxt->ivc_state) {
tegra_hv_ivc_channel_reset(ictxt->ivck); tegra_hv_ivc_channel_reset(ictxt->ivck);
ret = devm_request_irq(pdata->dev, ictxt->ivck->irq,
ether_ivc_irq,
0, dev_name(pdata->dev), pdata);
if (ret) {
dev_err(pdata->dev,
"Unable to request irq(%d)\n", ictxt->ivck->irq);
tegra_hv_ivc_unreserve(ictxt->ivck);
return;
}
ictxt->ivc_state = 1; ictxt->ivc_state = 1;
// initialize raw_spin_lock_init(&ictxt->ivck_lock);
mutex_init(&ictxt->ivck_lock);
} }
} }
@@ -1506,7 +1468,6 @@ static void ether_stop_ivc(struct ether_priv_data *pdata)
struct ether_ivc_ctxt *ictxt = &pdata->ictxt; struct ether_ivc_ctxt *ictxt = &pdata->ictxt;
if (ictxt->ivck != NULL) { if (ictxt->ivck != NULL) {
tegra_hv_ivc_unreserve(ictxt->ivck); tegra_hv_ivc_unreserve(ictxt->ivck);
devm_free_irq(pdata->dev, ictxt->ivck->irq, pdata);
ictxt->ivc_state = 0; ictxt->ivc_state = 0;
} }
} }
@@ -1562,7 +1523,6 @@ static int ether_init_ivc(struct ether_priv_data *pdata)
dev_info(dev, "Reserved IVC channel #%u - frame_size=%d irq %d\n", dev_info(dev, "Reserved IVC channel #%u - frame_size=%d irq %d\n",
id, ictxt->ivck->frame_size, ictxt->ivck->irq); id, ictxt->ivck->frame_size, ictxt->ivck->irq);
osi_core->osd_ops.ivc_send = osd_ivc_send_cmd; osi_core->osd_ops.ivc_send = osd_ivc_send_cmd;
init_completion(&ictxt->msg_complete);
ether_start_ivc(pdata); ether_start_ivc(pdata);
return 0; return 0;
} }
@@ -2836,7 +2796,6 @@ static inline void ether_delete_l2_filter(struct ether_priv_data *pdata)
if (ret < 0) { if (ret < 0) {
dev_err(pdata->dev, dev_err(pdata->dev,
"failed to delete L2 filter index = %d\n", i); "failed to delete L2 filter index = %d\n", i);
mutex_unlock(&pdata->rx_mode_lock);
return; return;
} }
} }
@@ -2970,8 +2929,6 @@ static int ether_close(struct net_device *ndev)
/* stop tx ts pending SKB workqueue and remove skb nodes */ /* stop tx ts pending SKB workqueue and remove skb nodes */
ether_flush_tx_ts_skb_list(pdata); ether_flush_tx_ts_skb_list(pdata);
cancel_work_sync(&pdata->set_rx_mode_work);
ether_stop_ivc(pdata); ether_stop_ivc(pdata);
if (pdata->xpcs_rst) { if (pdata->xpcs_rst) {
@@ -3628,24 +3585,24 @@ static int ether_prepare_uc_list(struct net_device *dev,
} }
/** /**
* @brief Work Queue function to call rx mode. * @brief This function is used to set RX mode.
* *
* @param[in] work: work structure * Algorithm: Based on Network interface flag, MAC registers are programmed to
* set mode.
*
* @param[in] dev - pointer to net_device structure.
* *
* @note MAC and PHY need to be initialized. * @note MAC and PHY need to be initialized.
*/ */
static inline void set_rx_mode_work_func(struct work_struct *work) void ether_set_rx_mode(struct net_device *dev)
{ {
struct ether_priv_data *pdata = container_of(work, struct ether_priv_data *pdata = netdev_priv(dev);
struct ether_priv_data, set_rx_mode_work);
struct osi_core_priv_data *osi_core = pdata->osi_core; struct osi_core_priv_data *osi_core = pdata->osi_core;
/* store last call last_uc_filter_index in temporary variable */ /* store last call last_uc_filter_index in temporary variable */
struct osi_ioctl ioctl_data = {}; struct osi_ioctl ioctl_data = {};
struct net_device *dev = pdata->ndev;
unsigned int mac_addr_idx = ETHER_MAC_ADDRESS_INDEX + 1U, i; unsigned int mac_addr_idx = ETHER_MAC_ADDRESS_INDEX + 1U, i;
int ret = -1; int ret = -1;
mutex_lock(&pdata->rx_mode_lock);
memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter)); memset(&ioctl_data.l2_filter, 0x0, sizeof(struct osi_filter));
if ((dev->flags & IFF_PROMISC) == IFF_PROMISC) { if ((dev->flags & IFF_PROMISC) == IFF_PROMISC) {
if (pdata->promisc_mode == OSI_ENABLE) { if (pdata->promisc_mode == OSI_ENABLE) {
@@ -3664,8 +3621,6 @@ static inline void set_rx_mode_work_func(struct work_struct *work)
dev_warn(pdata->dev, dev_warn(pdata->dev,
"Promiscuous mode not supported\n"); "Promiscuous mode not supported\n");
} }
mutex_unlock(&pdata->rx_mode_lock);
return; return;
} else if ((dev->flags & IFF_ALLMULTI) == IFF_ALLMULTI) { } else if ((dev->flags & IFF_ALLMULTI) == IFF_ALLMULTI) {
ioctl_data.l2_filter.oper_mode = (OSI_OPER_EN_ALLMULTI | ioctl_data.l2_filter.oper_mode = (OSI_OPER_EN_ALLMULTI |
@@ -3677,8 +3632,6 @@ static inline void set_rx_mode_work_func(struct work_struct *work)
if (ret < 0) { if (ret < 0) {
dev_err(pdata->dev, "Setting All Multicast allow mode failed\n"); dev_err(pdata->dev, "Setting All Multicast allow mode failed\n");
} }
mutex_unlock(&pdata->rx_mode_lock);
return; return;
} else if (!netdev_mc_empty(dev)) { } else if (!netdev_mc_empty(dev)) {
if (ether_prepare_mc_list(dev, &ioctl_data, &mac_addr_idx) != 0) { if (ether_prepare_mc_list(dev, &ioctl_data, &mac_addr_idx) != 0) {
@@ -3713,7 +3666,6 @@ static inline void set_rx_mode_work_func(struct work_struct *work)
if (ret < 0) { if (ret < 0) {
dev_err(pdata->dev, dev_err(pdata->dev,
"failed to delete L2 filter index = %d\n", i); "failed to delete L2 filter index = %d\n", i);
mutex_unlock(&pdata->rx_mode_lock);
return; return;
} }
} }
@@ -3733,28 +3685,9 @@ static inline void set_rx_mode_work_func(struct work_struct *work)
if (ret < 0) { if (ret < 0) {
dev_err(pdata->dev, "failed to set operation mode\n"); dev_err(pdata->dev, "failed to set operation mode\n");
} }
mutex_unlock(&pdata->rx_mode_lock);
return; return;
} }
/**
* @brief This function is used to set RX mode.
*
* Algorithm: Based on Network interface flag, MAC registers are programmed to
* set mode.
*
* @param[in] dev - pointer to net_device structure.
*
* @note MAC and PHY need to be initialized.
*/
void ether_set_rx_mode(struct net_device *dev)
{
struct ether_priv_data *pdata = netdev_priv(dev);
schedule_work(&pdata->set_rx_mode_work);
}
/** /**
* @brief Function to handle PHY read private IOCTL * @brief Function to handle PHY read private IOCTL
* *
@@ -6566,9 +6499,6 @@ static int ether_probe(struct platform_device *pdev)
/* Initialization of delayed workqueue for HSI error reporting */ /* Initialization of delayed workqueue for HSI error reporting */
INIT_DELAYED_WORK(&pdata->ether_hsi_work, ether_hsi_work_func); INIT_DELAYED_WORK(&pdata->ether_hsi_work, ether_hsi_work_func);
#endif #endif
mutex_init(&pdata->rx_mode_lock);
/* Initialization of delayed workqueue */
INIT_WORK(&pdata->set_rx_mode_work, set_rx_mode_work_func);
/* Initialization of set speed workqueue */ /* Initialization of set speed workqueue */
INIT_DELAYED_WORK(&pdata->set_speed_work, set_speed_work_func); INIT_DELAYED_WORK(&pdata->set_speed_work, set_speed_work_func);
osi_core->hw_feature = &pdata->hw_feat; osi_core->hw_feature = &pdata->hw_feat;

View File

@@ -206,21 +206,9 @@
#define ETHER_TX_MAX_FRAME_SIZE GSO_MAX_SIZE #define ETHER_TX_MAX_FRAME_SIZE GSO_MAX_SIZE
/** /**
* @brief IVC wait timeout. * @brief IVC wait timeout cnt in micro seconds.
*/ */
#define IVC_WAIT_TIMEOUT (msecs_to_jiffies(100)) #define IVC_WAIT_TIMEOUT_CNT 200000
/**
* @brief IVC read timeout cnt.
* used as 20*IVC_WAIT_TIMEOUT hence Max is 2 sec timeout.
*/
#define IVC_READ_TIMEOUT_CNT 20
/**
* @brief IVC channel timeout.
* Used with 1 millisec so max timeout is 50 ms.
*/
#define IVC_CHANNEL_TIMEOUT_CNT 50
/** /**
* @brief Broadcast and MAC address macros * @brief Broadcast and MAC address macros
@@ -357,11 +345,7 @@ struct ether_ivc_ctxt {
/** ivc cookie */ /** ivc cookie */
struct tegra_hv_ivc_cookie *ivck; struct tegra_hv_ivc_cookie *ivck;
/** ivc lock */ /** ivc lock */
struct mutex ivck_lock; raw_spinlock_t ivck_lock;
/** ivc work */
struct work_struct ivc_work;
/** wait for event */
struct completion msg_complete;
/** Flag to indicate ivc started or stopped */ /** Flag to indicate ivc started or stopped */
unsigned int ivc_state; unsigned int ivc_state;
}; };
@@ -525,10 +509,6 @@ struct ether_priv_data {
unsigned int promisc_mode; unsigned int promisc_mode;
/** Delayed work queue to read RMON counters periodically */ /** Delayed work queue to read RMON counters periodically */
struct delayed_work ether_stats_work; struct delayed_work ether_stats_work;
/** process rx work */
struct work_struct set_rx_mode_work;
/** rx lock */
struct mutex rx_mode_lock;
/** set speed work */ /** set speed work */
struct delayed_work set_speed_work; struct delayed_work set_speed_work;
/** Flag to check if EEE LPI is enabled for the MAC */ /** Flag to check if EEE LPI is enabled for the MAC */

View File

@@ -946,64 +946,49 @@ int osd_ivc_send_cmd(void *priv, ivc_msg_common_t *ivc_buf, unsigned int len)
struct ether_ivc_ctxt *ictxt = &pdata->ictxt; struct ether_ivc_ctxt *ictxt = &pdata->ictxt;
struct tegra_hv_ivc_cookie *ivck = struct tegra_hv_ivc_cookie *ivck =
(struct tegra_hv_ivc_cookie *) ictxt->ivck; (struct tegra_hv_ivc_cookie *) ictxt->ivck;
int dcnt = IVC_CHANNEL_TIMEOUT_CNT; int status = -1;
int is_atomic = 0; unsigned long flags = 0;
if (len > ETHER_MAX_IVC_BUF) { if (len > ETHER_MAX_IVC_BUF) {
dev_err(pdata->dev, "Invalid IVC len\n"); dev_err(pdata->dev, "Invalid IVC len\n");
return -1; return -1;
} }
ivc_buf->status = -1; ivc_buf->status = -1;
if (in_atomic()) {
preempt_enable();
is_atomic = 1;
}
mutex_lock(&ictxt->ivck_lock);
ivc_buf->count = cnt++; ivc_buf->count = cnt++;
raw_spin_lock_irqsave(&ictxt->ivck_lock, flags);
/* Waiting for the channel to be ready */ /* Waiting for the channel to be ready */
while (tegra_hv_ivc_channel_notified(ivck) != 0){ ret = readx_poll_timeout_atomic(tegra_hv_ivc_channel_notified, ivck,
osd_msleep(1); status, status == 0, 10, IVC_WAIT_TIMEOUT_CNT);
dcnt--; if (ret == -ETIMEDOUT) {
if (!dcnt) { dev_err(pdata->dev, "IVC channel timeout\n");
dev_err(pdata->dev, "IVC channel timeout\n"); goto fail;
goto fail;
}
} }
/* Write the current message for the ethernet server */ /* Write the current message for the ethernet server */
ret = tegra_hv_ivc_write(ivck, ivc_buf, len); ret = tegra_hv_ivc_write(ivck, ivc_buf, len);
if (ret != len) { if (ret != len) {
dev_err(pdata->dev, "IVC write len %d ret %d cmd %d failed\n", dev_err(pdata->dev, "IVC write with len %d ret %d cmd %d ioctlcmd %d failed\n",
len, ret, ivc_buf->cmd); len, ret, ivc_buf->cmd, ivc_buf->data.ioctl_data.cmd);
goto fail; goto fail;
} }
dcnt = IVC_READ_TIMEOUT_CNT; ret = readx_poll_timeout_atomic(tegra_hv_ivc_can_read, ictxt->ivck,
while ((!tegra_hv_ivc_can_read(ictxt->ivck))) { status, status, 10, IVC_WAIT_TIMEOUT_CNT);
if (!wait_for_completion_timeout(&ictxt->msg_complete, if (ret == -ETIMEDOUT) {
IVC_WAIT_TIMEOUT)) { dev_err(pdata->dev, "IVC read timeout status %d\n", status);
ret = -ETIMEDOUT; goto fail;
goto fail;
}
dcnt--;
if (!dcnt) {
dev_err(pdata->dev, "IVC read timeout\n");
break;
}
} }
ret = tegra_hv_ivc_read(ivck, ivc_buf, len); ret = tegra_hv_ivc_read(ivck, ivc_buf, len);
if (ret < 0) { if (ret < 0) {
dev_err(pdata->dev, "IVC read failed: %d\n", ret); dev_err(pdata->dev, "IVC read failed: %d cmd %d ioctlcmd %d\n",
ret, ivc_buf->cmd, ivc_buf->data.ioctl_data.cmd);
} }
ret = ivc_buf->status; ret = ivc_buf->status;
fail: fail:
mutex_unlock(&ictxt->ivck_lock); raw_spin_unlock_irqrestore(&ictxt->ivck_lock, flags);
if (is_atomic) {
preempt_disable();
}
return ret; return ret;
} }