mirror of
git://nv-tegra.nvidia.com/kernel/nvethernetrm.git
synced 2025-12-22 17:34:29 +03:00
osi: Use osd_usleep vs osd_udelay appropriately
Issue: osd_udelay is used irrespective of the
duration of the delay/sleep. In certain
environments like HVRTOS, udelay is a
busy loop and it starves other tasks on the CPU.
Fix: Use udelay only for small tight checks, and use
usleep for larger delays.
Bug 4676601
Change-Id: I59d9a403f34d46c6e2d17ca6f7e8a277d5283db5
Signed-off-by: Srinivas Ramachandran <srinivasra@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/3176350
(cherry picked from commit c2abe16a34af853f86fcaa4bb91b7036e2a8eb79)
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/3250083
Reviewed-by: Hareesh Kesireddy <hkesireddy@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Tested-by: Hareesh Kesireddy <hkesireddy@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
8b85eca5f2
commit
1e8dfafa84
@@ -514,6 +514,7 @@ typedef my_lint_64 nvel64_t;
|
|||||||
#define OSI_DELAY_1000US 1000U
|
#define OSI_DELAY_1000US 1000U
|
||||||
#define OSI_DELAY_1US 1U
|
#define OSI_DELAY_1US 1U
|
||||||
#define RCHLIST_SIZE 48U
|
#define RCHLIST_SIZE 48U
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @addtogroup PTP PTP related information
|
* @addtogroup PTP PTP related information
|
||||||
*
|
*
|
||||||
@@ -1260,9 +1261,9 @@ struct osd_core_ops {
|
|||||||
void (*ops_log)(void *priv, const nve8_t *func, nveu32_t line,
|
void (*ops_log)(void *priv, const nve8_t *func, nveu32_t line,
|
||||||
nveu32_t level, nveu32_t type, const nve8_t *err,
|
nveu32_t level, nveu32_t type, const nve8_t *err,
|
||||||
nveul64_t loga);
|
nveul64_t loga);
|
||||||
/** udelay callback */
|
/** udelay callback for sleep < 7usec as this is busy wait in most OSes */
|
||||||
void (*udelay)(nveu64_t usec);
|
void (*udelay)(nveu64_t usec);
|
||||||
/** usleep range callback */
|
/** usleep range callback for longer sleep duration */
|
||||||
void (*usleep_range)(nveu64_t umin, nveu64_t umax);
|
void (*usleep_range)(nveu64_t umin, nveu64_t umax);
|
||||||
/** ivcsend callback*/
|
/** ivcsend callback*/
|
||||||
nve32_t (*ivc_send)(void *priv, struct ivc_msg_common *ivc,
|
nve32_t (*ivc_send)(void *priv, struct ivc_msg_common *ivc,
|
||||||
|
|||||||
@@ -25,47 +25,6 @@
|
|||||||
#include <nvethernet_type.h>
|
#include <nvethernet_type.h>
|
||||||
#include <osi_common.h>
|
#include <osi_common.h>
|
||||||
|
|
||||||
/**
|
|
||||||
* @addtogroup Generic helper MACROS
|
|
||||||
*
|
|
||||||
* @brief These are Generic helper macros used at various places.
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
#define RETRY_COUNT 1000U
|
|
||||||
#define COND_MET 0
|
|
||||||
#define COND_NOT_MET 1
|
|
||||||
#define RETRY_DELAY 1U
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief osi_readl_poll_timeout - Periodically poll an address until
|
|
||||||
* a condition is met or a timeout occurs
|
|
||||||
*
|
|
||||||
* @param[in] addr: Memory mapped address.
|
|
||||||
* @param[in] fn: function to be used.
|
|
||||||
* @param[in] val: Variable to read the value.
|
|
||||||
* @param[in] cond: Break condition.
|
|
||||||
* @param[in] delay_us: Maximum time to sleep between reads in us.
|
|
||||||
* @param[in] retry: Retry count.
|
|
||||||
|
|
||||||
* @note Physical address has to be memmory mapped.
|
|
||||||
*
|
|
||||||
* @retval 0 on success
|
|
||||||
* @retval -1 on failure.
|
|
||||||
*/
|
|
||||||
#define osi_readl_poll_timeout(addr, fn, val, cond, delay_us, retry) \
|
|
||||||
({ \
|
|
||||||
nveu32_t count = 0; \
|
|
||||||
while (count++ < retry) { \
|
|
||||||
val = osi_readl((nveu8_t *)addr); \
|
|
||||||
if ((cond)) { \
|
|
||||||
break; \
|
|
||||||
} \
|
|
||||||
fn(delay_us); \
|
|
||||||
} \
|
|
||||||
(cond) ? 0 : -1; \
|
|
||||||
})
|
|
||||||
|
|
||||||
struct osi_core_priv_data;
|
struct osi_core_priv_data;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -51,7 +51,8 @@ nve32_t poll_check(struct osi_core_priv_data *const osi_core, nveu8_t *addr,
|
|||||||
if ((*value & bit_check) == OSI_NONE) {
|
if ((*value & bit_check) == OSI_NONE) {
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(OSI_DELAY_1000US);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
||||||
|
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
@@ -790,7 +791,9 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core,
|
|||||||
OSI_UNUSED nveu32_t gcla, nveu32_t bunk,
|
OSI_UNUSED nveu32_t gcla, nveu32_t bunk,
|
||||||
nveu32_t mac)
|
nveu32_t mac)
|
||||||
{
|
{
|
||||||
nve32_t retry = 1000;
|
/* 1 busy wait, and the remaining retries are sleeps of granularity MIN_USLEEP_10US */
|
||||||
|
nveu32_t retry = (RETRY_COUNT / MIN_USLEEP_10US) + 1U;
|
||||||
|
nveu32_t once = 0U;
|
||||||
nveu32_t val = 0U;
|
nveu32_t val = 0U;
|
||||||
nve32_t ret;
|
nve32_t ret;
|
||||||
const nveu32_t MTL_EST_GCL_CONTROL[OSI_MAX_MAC_IP_TYPES] = {
|
const nveu32_t MTL_EST_GCL_CONTROL[OSI_MAX_MAC_IP_TYPES] = {
|
||||||
@@ -812,14 +815,24 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core,
|
|||||||
osi_writela(osi_core, val, (nveu8_t *)osi_core->base +
|
osi_writela(osi_core, val, (nveu8_t *)osi_core->base +
|
||||||
MTL_EST_GCL_CONTROL[mac]);
|
MTL_EST_GCL_CONTROL[mac]);
|
||||||
|
|
||||||
while (--retry > 0) {
|
while (retry > 0U) {
|
||||||
|
retry--;
|
||||||
val = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
val = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
||||||
MTL_EST_GCL_CONTROL[mac]);
|
MTL_EST_GCL_CONTROL[mac]);
|
||||||
if ((val & MTL_EST_SRWO) == MTL_EST_SRWO) {
|
if ((val & MTL_EST_SRWO) == MTL_EST_SRWO) {
|
||||||
|
if (once == 0U) {
|
||||||
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
|
/* udelay is a busy wait, so don't call it too frequently.
|
||||||
|
* call it once to be optimistic, and then use usleep
|
||||||
|
* with a longer timeout to yield to other CPU users.
|
||||||
|
*/
|
||||||
|
once = 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
||||||
|
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1086,7 +1099,9 @@ static nve32_t hw_est_write(struct osi_core_priv_data *osi_core,
|
|||||||
nveu32_t addr_val, nveu32_t data,
|
nveu32_t addr_val, nveu32_t data,
|
||||||
nveu32_t gcla)
|
nveu32_t gcla)
|
||||||
{
|
{
|
||||||
nve32_t retry = 1000;
|
/* 1 busy wait, and the remaining retries are sleeps of granularity MIN_USLEEP_10US */
|
||||||
|
nveu32_t retry = (RETRY_COUNT / MIN_USLEEP_10US) + 1U;
|
||||||
|
nveu32_t once = 0U;
|
||||||
nveu32_t val = 0x0;
|
nveu32_t val = 0x0;
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
const nveu32_t MTL_EST_DATA[OSI_MAX_MAC_IP_TYPES] = {
|
const nveu32_t MTL_EST_DATA[OSI_MAX_MAC_IP_TYPES] = {
|
||||||
@@ -1110,11 +1125,22 @@ static nve32_t hw_est_write(struct osi_core_priv_data *osi_core,
|
|||||||
osi_writela(osi_core, val, (nveu8_t *)osi_core->base +
|
osi_writela(osi_core, val, (nveu8_t *)osi_core->base +
|
||||||
MTL_EST_GCL_CONTROL[osi_core->mac]);
|
MTL_EST_GCL_CONTROL[osi_core->mac]);
|
||||||
|
|
||||||
while (--retry > 0) {
|
while (retry > 0U) {
|
||||||
|
retry--;
|
||||||
val = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
val = osi_readla(osi_core, (nveu8_t *)osi_core->base +
|
||||||
MTL_EST_GCL_CONTROL[osi_core->mac]);
|
MTL_EST_GCL_CONTROL[osi_core->mac]);
|
||||||
if ((val & MTL_EST_SRWO) == MTL_EST_SRWO) {
|
if ((val & MTL_EST_SRWO) == MTL_EST_SRWO) {
|
||||||
|
if (once == 0U) {
|
||||||
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
|
/* udelay is a busy wait, so don't call it too frequently.
|
||||||
|
* call it once to be optimistic, and then use usleep
|
||||||
|
* with a longer timeout to yield to other CPU users.
|
||||||
|
*/
|
||||||
|
once = 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
||||||
|
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@
|
|||||||
#ifdef MACSEC_SUPPORT
|
#ifdef MACSEC_SUPPORT
|
||||||
#include <osi_macsec.h>
|
#include <osi_macsec.h>
|
||||||
#endif /* MACSEC_SUPPORT */
|
#endif /* MACSEC_SUPPORT */
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Maximum number of OSI core instances.
|
* @brief Maximum number of OSI core instances.
|
||||||
@@ -567,4 +568,87 @@ static inline nveu64_t osi_update_stats_counter(nveu64_t last_value,
|
|||||||
{
|
{
|
||||||
return ((last_value & (nveu64_t)OSI_LLONG_MAX) + (incr & (nveu64_t)OSI_LLONG_MAX));
|
return ((last_value & (nveu64_t)OSI_LLONG_MAX) + (incr & (nveu64_t)OSI_LLONG_MAX));
|
||||||
}
|
}
|
||||||
|
/**
|
||||||
|
* @addtogroup Generic helper MACROS
|
||||||
|
*
|
||||||
|
* @brief These are Generic helper macros used at various places.
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
/* RETRY_COUNT should be atleast MIN_USLEEP_10US
|
||||||
|
* so that RETRY_COUNT/MIN_USLEEP_10US will result in
|
||||||
|
* atleast 1 iteration.
|
||||||
|
*/
|
||||||
|
#define RETRY_COUNT 1000U
|
||||||
|
#define RETRY_ONCE 1U
|
||||||
|
#define COND_MET 0
|
||||||
|
#define COND_NOT_MET 1
|
||||||
|
#define RETRY_DELAY 1U
|
||||||
|
#define OSI_DELAY_4US 4U
|
||||||
|
#define OSI_DELAY_10US 10U
|
||||||
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
#define OSI_DELAY_100US 100U
|
||||||
|
#endif
|
||||||
|
#define OSI_DELAY_200US 200U
|
||||||
|
#define OSI_DELAY_1000US 1000U
|
||||||
|
#define OSI_DELAY_10000US 10000U
|
||||||
|
#define OSI_DELAY_30000US 30000U
|
||||||
|
/* 7usec is minimum to use usleep, anything less should use udelay, set to 10us */
|
||||||
|
#define MIN_USLEEP_10US 10U
|
||||||
|
|
||||||
|
/** @} */
|
||||||
|
|
||||||
|
/** \cond DO_NOT_DOCUMENT */
|
||||||
|
/**
|
||||||
|
* @brief osi_readl_poll_timeout - Periodically poll an address until
|
||||||
|
* a condition is met or a timeout occurs
|
||||||
|
*
|
||||||
|
* @param[in] addr: Memory mapped address.
|
||||||
|
* @param[in] osi_core: OSI core private data structure.
|
||||||
|
* @param[in] lmask: input mask to be masked against register value for poll condition.
|
||||||
|
* @param[in] rmask: expected output value to be compared against masked register value
|
||||||
|
* with lmask for poll condition.
|
||||||
|
* @param[in] delay_us: Maximum time to sleep between reads in us.
|
||||||
|
* @param[in] retry: Retry count.
|
||||||
|
|
||||||
|
* @note Physical address has to be memmory mapped.
|
||||||
|
*
|
||||||
|
* @retval 0 on success
|
||||||
|
* @retval -1 on failure.
|
||||||
|
*/
|
||||||
|
/* note: all users of osi_readl_poll_timeout are calling delay_us with 1us.
|
||||||
|
* if delay_us > MIN_USLEEP_10US, then min_delay can be adjusted to input param instead.
|
||||||
|
* currently adding this check to avoid logical dead code
|
||||||
|
*/
|
||||||
|
static inline nve32_t osi_readl_poll_timeout(void *addr, struct osi_core_priv_data *osi_core,
|
||||||
|
nveu32_t lmask, nveu32_t rmask, nveu32_t delay_us,
|
||||||
|
nveu32_t retry)
|
||||||
|
{
|
||||||
|
nveu32_t once = 0;
|
||||||
|
nveu32_t total_delay = (delay_us) * (retry);
|
||||||
|
nveu16_t min_delay = MIN_USLEEP_10US;
|
||||||
|
nveu32_t elapsed_delay = 0;
|
||||||
|
nve32_t ret = -1;
|
||||||
|
nveu32_t val;
|
||||||
|
|
||||||
|
while (elapsed_delay < total_delay) {
|
||||||
|
val = osi_readl((nveu8_t *)addr);
|
||||||
|
if ((val & lmask) == rmask) {
|
||||||
|
ret = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (once == 0U) {
|
||||||
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
|
once = 1U;
|
||||||
|
elapsed_delay += 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.usleep_range(min_delay, min_delay + MIN_USLEEP_10US);
|
||||||
|
elapsed_delay &= (nveu32_t)INT_MAX;
|
||||||
|
elapsed_delay += min_delay;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
/** \endcond */
|
||||||
|
|
||||||
#endif /* INCLUDED_CORE_LOCAL_H */
|
#endif /* INCLUDED_CORE_LOCAL_H */
|
||||||
|
|||||||
@@ -389,10 +389,8 @@ static nve32_t eqos_config_frp(struct osi_core_priv_data *const osi_core,
|
|||||||
|
|
||||||
/* Verify RXPI bit set in MTL_RXP_Control_Status */
|
/* Verify RXPI bit set in MTL_RXP_Control_Status */
|
||||||
ret = osi_readl_poll_timeout((base + EQOS_MTL_RXP_CS),
|
ret = osi_readl_poll_timeout((base + EQOS_MTL_RXP_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
EQOS_MTL_RXP_CS_RXPI, EQOS_MTL_RXP_CS_RXPI,
|
||||||
((val & EQOS_MTL_RXP_CS_RXPI) ==
|
|
||||||
EQOS_MTL_RXP_CS_RXPI),
|
|
||||||
(EQOS_MTL_FRP_READ_UDELAY),
|
(EQOS_MTL_FRP_READ_UDELAY),
|
||||||
(EQOS_MTL_FRP_READ_RETRY));
|
(EQOS_MTL_FRP_READ_RETRY));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -481,10 +479,8 @@ static nve32_t eqos_frp_write(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
/* Wait for ready */
|
/* Wait for ready */
|
||||||
ret = osi_readl_poll_timeout((base + EQOS_MTL_RXP_IND_CS),
|
ret = osi_readl_poll_timeout((base + EQOS_MTL_RXP_IND_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
EQOS_MTL_RXP_IND_CS_BUSY, OSI_NONE,
|
||||||
((val & EQOS_MTL_RXP_IND_CS_BUSY) ==
|
|
||||||
OSI_NONE),
|
|
||||||
(EQOS_MTL_FRP_READ_UDELAY),
|
(EQOS_MTL_FRP_READ_UDELAY),
|
||||||
(EQOS_MTL_FRP_READ_RETRY));
|
(EQOS_MTL_FRP_READ_RETRY));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -511,10 +507,8 @@ static nve32_t eqos_frp_write(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
/* Wait for complete */
|
/* Wait for complete */
|
||||||
ret = osi_readl_poll_timeout((base + EQOS_MTL_RXP_IND_CS),
|
ret = osi_readl_poll_timeout((base + EQOS_MTL_RXP_IND_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
EQOS_MTL_RXP_IND_CS_BUSY, OSI_NONE,
|
||||||
((val & EQOS_MTL_RXP_IND_CS_BUSY) ==
|
|
||||||
OSI_NONE),
|
|
||||||
(EQOS_MTL_FRP_READ_UDELAY),
|
(EQOS_MTL_FRP_READ_UDELAY),
|
||||||
(EQOS_MTL_FRP_READ_RETRY));
|
(EQOS_MTL_FRP_READ_RETRY));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -2364,7 +2358,7 @@ static inline nve32_t eqos_poll_for_update_ts_complete(
|
|||||||
}
|
}
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
osi_core->osd_ops.udelay(OSI_DELAY_1000US);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US, OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
@@ -2599,7 +2593,7 @@ static inline nve32_t poll_for_mii_idle(struct osi_core_priv_data *osi_core)
|
|||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
/* wait on GMII Busy set */
|
/* wait on GMII Busy set */
|
||||||
osi_core->osd_ops.udelay(10U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_10US, OSI_DELAY_10US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
@@ -3895,7 +3889,7 @@ static inline nve32_t poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_cor
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* wait */
|
/* wait */
|
||||||
osi_core->osd_ops.udelay(OSI_DELAY_COUNT);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_COUNT, OSI_DELAY_COUNT + MIN_USLEEP_10US);
|
||||||
retry++;
|
retry++;
|
||||||
}
|
}
|
||||||
if (retry >= OSI_TXRX_IDLE_RETRY) {
|
if (retry >= OSI_TXRX_IDLE_RETRY) {
|
||||||
|
|||||||
@@ -61,7 +61,8 @@ static nve32_t mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* wait for 10 usec for OB clear and retry */
|
/* wait for 10 usec for OB clear and retry */
|
||||||
osi_core->osd_ops.udelay(MGBE_MAC_INDIR_AC_OB_WAIT);
|
osi_core->osd_ops.usleep_range(MGBE_MAC_INDIR_AC_OB_WAIT,
|
||||||
|
MGBE_MAC_INDIR_AC_OB_WAIT + MIN_USLEEP_10US);
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -431,12 +432,10 @@ static nve32_t mgbe_rchlist_write(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
/* Wait for ready */
|
/* Wait for ready */
|
||||||
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS),
|
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
MGBE_MTL_RXP_IND_CS_BUSY, OSI_NONE,
|
||||||
((val & MGBE_MTL_RXP_IND_CS_BUSY) ==
|
MGBE_MTL_RCHlist_READ_UDELAY,
|
||||||
OSI_NONE),
|
MGBE_MTL_RCHlist_READ_RETRY);
|
||||||
(MGBE_MTL_RCHlist_READ_UDELAY),
|
|
||||||
(MGBE_MTL_RCHlist_READ_RETRY));
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to read/write\n",
|
"Fail to read/write\n",
|
||||||
@@ -477,12 +476,10 @@ static nve32_t mgbe_rchlist_write(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
/* Wait for complete */
|
/* Wait for complete */
|
||||||
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS),
|
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
MGBE_MTL_RXP_IND_CS_BUSY, OSI_NONE,
|
||||||
((val & MGBE_MTL_RXP_IND_CS_BUSY) ==
|
MGBE_MTL_RCHlist_READ_UDELAY,
|
||||||
OSI_NONE),
|
MGBE_MTL_RCHlist_READ_RETRY);
|
||||||
(MGBE_MTL_RCHlist_READ_UDELAY),
|
|
||||||
(MGBE_MTL_RCHlist_READ_RETRY));
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to write\n",
|
"Fail to write\n",
|
||||||
@@ -856,7 +853,8 @@ static nve32_t mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core)
|
|||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
/* wait for 10 usec for XB clear */
|
/* wait for 10 usec for XB clear */
|
||||||
osi_core->osd_ops.udelay(MGBE_MAC_XB_WAIT);
|
osi_core->osd_ops.usleep_range(MGBE_MAC_XB_WAIT,
|
||||||
|
MGBE_MAC_XB_WAIT + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
@@ -1321,12 +1319,10 @@ static nve32_t mgbe_config_frp(struct osi_core_priv_data *const osi_core,
|
|||||||
|
|
||||||
/* Verify RXPI bit set in MTL_RXP_Control_Status */
|
/* Verify RXPI bit set in MTL_RXP_Control_Status */
|
||||||
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_CS),
|
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
MGBE_MTL_RXP_CS_RXPI, MGBE_MTL_RXP_CS_RXPI,
|
||||||
((val & MGBE_MTL_RXP_CS_RXPI) ==
|
|
||||||
MGBE_MTL_RXP_CS_RXPI),
|
|
||||||
(MGBE_MTL_FRP_READ_UDELAY),
|
(MGBE_MTL_FRP_READ_UDELAY),
|
||||||
(MGBE_MTL_FRP_READ_RETRY));
|
MGBE_MTL_FRP_READ_RETRY);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
OSI_CORE_ERR(osi_core->osd, OSI_LOG_ARG_HW_FAIL,
|
||||||
"Fail to enable FRP\n",
|
"Fail to enable FRP\n",
|
||||||
@@ -1349,10 +1345,8 @@ static nve32_t mgbe_config_frp(struct osi_core_priv_data *const osi_core,
|
|||||||
|
|
||||||
/* Verify RXPI bit reset in MTL_RXP_Control_Status */
|
/* Verify RXPI bit reset in MTL_RXP_Control_Status */
|
||||||
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_CS),
|
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
MGBE_MTL_RXP_CS_RXPI, OSI_NONE,
|
||||||
((val & MGBE_MTL_RXP_CS_RXPI) ==
|
|
||||||
OSI_NONE),
|
|
||||||
(MGBE_MTL_FRP_READ_UDELAY),
|
(MGBE_MTL_FRP_READ_UDELAY),
|
||||||
(MGBE_MTL_FRP_READ_RETRY));
|
(MGBE_MTL_FRP_READ_RETRY));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -1405,10 +1399,8 @@ static nve32_t mgbe_frp_write(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
/* Wait for ready */
|
/* Wait for ready */
|
||||||
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS),
|
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
MGBE_MTL_RXP_IND_CS_BUSY, OSI_NONE,
|
||||||
((val & MGBE_MTL_RXP_IND_CS_BUSY) ==
|
|
||||||
OSI_NONE),
|
|
||||||
(MGBE_MTL_FRP_READ_UDELAY),
|
(MGBE_MTL_FRP_READ_UDELAY),
|
||||||
(MGBE_MTL_FRP_READ_RETRY));
|
(MGBE_MTL_FRP_READ_RETRY));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -1443,10 +1435,8 @@ static nve32_t mgbe_frp_write(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
/* Wait for complete */
|
/* Wait for complete */
|
||||||
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS),
|
ret = osi_readl_poll_timeout((base + MGBE_MTL_RXP_IND_CS),
|
||||||
(osi_core->osd_ops.udelay),
|
osi_core,
|
||||||
(val),
|
MGBE_MTL_RXP_IND_CS_BUSY, OSI_NONE,
|
||||||
((val & MGBE_MTL_RXP_IND_CS_BUSY) ==
|
|
||||||
OSI_NONE),
|
|
||||||
(MGBE_MTL_FRP_READ_UDELAY),
|
(MGBE_MTL_FRP_READ_UDELAY),
|
||||||
(MGBE_MTL_FRP_READ_RETRY));
|
(MGBE_MTL_FRP_READ_RETRY));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -1856,7 +1846,8 @@ static nve32_t mgbe_rss_write_reg(struct osi_core_priv_data *osi_core,
|
|||||||
if ((value & MGBE_MAC_RSS_ADDR_OB) == OSI_NONE) {
|
if ((value & MGBE_MAC_RSS_ADDR_OB) == OSI_NONE) {
|
||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(100);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_100US,
|
||||||
|
OSI_DELAY_100US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4053,7 +4044,7 @@ static nve32_t mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core)
|
|||||||
if ((mac_gmiiar & MGBE_MDIO_SCCD_SBUSY) == 0U) {
|
if ((mac_gmiiar & MGBE_MDIO_SCCD_SBUSY) == 0U) {
|
||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(10U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_10US, OSI_DELAY_10US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
@@ -4572,7 +4563,8 @@ static inline nve32_t mgbe_poll_for_update_ts_complete(
|
|||||||
}
|
}
|
||||||
|
|
||||||
retry++;
|
retry++;
|
||||||
osi_core->osd_ops.udelay(OSI_DELAY_1000US);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
||||||
|
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -202,8 +202,12 @@ static inline nve32_t poll_for_vlan_filter_reg_rw(
|
|||||||
/* Set cond to 0 to exit loop */
|
/* Set cond to 0 to exit loop */
|
||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
/* wait for 10 usec for XB clear */
|
/* wait for 10 usec for XB clear.
|
||||||
osi_core->osd_ops.udelay(10U);
|
* Use usleep instead of udelay to
|
||||||
|
* yield to other CPU users.
|
||||||
|
*/
|
||||||
|
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
||||||
|
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
112
osi/core/xpcs.c
112
osi/core/xpcs.c
@@ -74,7 +74,8 @@ static inline nve32_t xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_c
|
|||||||
status = xpcs_read(xpcs_base, XPCS_VR_MII_AN_INTR_STS);
|
status = xpcs_read(xpcs_base, XPCS_VR_MII_AN_INTR_STS);
|
||||||
if ((status & XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR) == 0U) {
|
if ((status & XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR) == 0U) {
|
||||||
/* autoneg not completed - poll */
|
/* autoneg not completed - poll */
|
||||||
osi_core->osd_ops.udelay(1000U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
||||||
|
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
} else {
|
} else {
|
||||||
/* 15. clear interrupt */
|
/* 15. clear interrupt */
|
||||||
status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR;
|
status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR;
|
||||||
@@ -207,30 +208,31 @@ static nve32_t xpcs_poll_flt_rx_link(struct osi_core_priv_data *osi_core)
|
|||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
nve32_t cond = COND_NOT_MET;
|
nve32_t cond = COND_NOT_MET;
|
||||||
nveu32_t retry = RETRY_COUNT;
|
nveu32_t retry = RETRY_ONCE;
|
||||||
nveu32_t count = 0;
|
nveu32_t count = 0;
|
||||||
|
nveu32_t once = 0;
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
nveu32_t ctrl = 0;
|
nveu32_t ctrl = 0;
|
||||||
|
|
||||||
/* poll for Rx link up */
|
/* poll for Rx link up */
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
|
||||||
ret = -1;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
count++;
|
|
||||||
|
|
||||||
ctrl = xpcs_read(xpcs_base, XPCS_SR_XS_PCS_STS1);
|
ctrl = xpcs_read(xpcs_base, XPCS_SR_XS_PCS_STS1);
|
||||||
if ((ctrl & XPCS_SR_XS_PCS_STS1_RLU) == XPCS_SR_XS_PCS_STS1_RLU) {
|
if ((ctrl & XPCS_SR_XS_PCS_STS1_RLU) == XPCS_SR_XS_PCS_STS1_RLU) {
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
/* Maximum wait delay as per HW team is 1msec.
|
/* Maximum wait delay as per HW team is 1msec */
|
||||||
* So add a loop for 1000 iterations with 1usec delay,
|
if (count > retry) {
|
||||||
* so that if check get satisfies before 1msec will come
|
ret = -1;
|
||||||
* out of loop and it can save some boot time
|
goto fail;
|
||||||
*/
|
}
|
||||||
osi_core->osd_ops.udelay(1U);
|
count++;
|
||||||
|
if (once == 0U) {
|
||||||
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
|
once = 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
||||||
|
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -239,6 +241,7 @@ static nve32_t xpcs_poll_flt_rx_link(struct osi_core_priv_data *osi_core)
|
|||||||
/* poll for FLT bit to 0 */
|
/* poll for FLT bit to 0 */
|
||||||
cond = COND_NOT_MET;
|
cond = COND_NOT_MET;
|
||||||
count = 0;
|
count = 0;
|
||||||
|
retry = RETRY_COUNT;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
if (count > retry) {
|
||||||
ret = -1;
|
ret = -1;
|
||||||
@@ -252,12 +255,14 @@ static nve32_t xpcs_poll_flt_rx_link(struct osi_core_priv_data *osi_core)
|
|||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
/* Maximum wait delay as 1s */
|
/* Maximum wait delay as 1s */
|
||||||
osi_core->osd_ops.udelay(1000U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
||||||
|
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* delay 10ms to wait the staus propagate to MAC block */
|
/* delay 10ms to wait the staus propagate to MAC block */
|
||||||
osi_core->osd_ops.udelay(10000U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_10000US, OSI_DELAY_10000US + MIN_USLEEP_10US);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -359,7 +364,8 @@ nve32_t xpcs_start(struct osi_core_priv_data *osi_core)
|
|||||||
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST) == 0U) {
|
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST) == 0U) {
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(1000U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
||||||
|
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -472,11 +478,12 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
|
|||||||
nveu32_t lane_init_en)
|
nveu32_t lane_init_en)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
nveu32_t retry = 5U;
|
nveu32_t retry = RETRY_ONCE;
|
||||||
nve32_t cond = COND_NOT_MET;
|
nve32_t cond = COND_NOT_MET;
|
||||||
nveu32_t val = 0;
|
nveu32_t val = 0;
|
||||||
nveu32_t count;
|
nveu32_t count;
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
nveu32_t once = 0;
|
||||||
nveu64_t retry_delay = 1U;
|
nveu64_t retry_delay = 1U;
|
||||||
const nveu32_t uphy_status_reg[OSI_MAX_MAC_IP_TYPES] = {
|
const nveu32_t uphy_status_reg[OSI_MAX_MAC_IP_TYPES] = {
|
||||||
EQOS_XPCS_WRAP_UPHY_STATUS,
|
EQOS_XPCS_WRAP_UPHY_STATUS,
|
||||||
@@ -513,12 +520,6 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
|
|||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
|
||||||
ret = -1;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
count++;
|
|
||||||
|
|
||||||
val = osi_readla(osi_core,
|
val = osi_readla(osi_core,
|
||||||
(nveu8_t *)xpcs_base +
|
(nveu8_t *)xpcs_base +
|
||||||
uphy_init_ctrl_reg[osi_core->mac]);
|
uphy_init_ctrl_reg[osi_core->mac]);
|
||||||
@@ -526,11 +527,21 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
|
|||||||
/* exit loop */
|
/* exit loop */
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
|
if (count > retry) {
|
||||||
|
ret = -1;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
count++;
|
||||||
/* Max wait time is 1usec.
|
/* Max wait time is 1usec.
|
||||||
* Most of the time loop got exited in first iteration.
|
* Most of the time loop got exited in first iteration.
|
||||||
* but added an extra count of 4 for safer side
|
* but added an extra count of 4 for safer side
|
||||||
*/
|
*/
|
||||||
osi_core->osd_ops.udelay(retry_delay);
|
if (once == 0U) {
|
||||||
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
|
once = 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.udelay(OSI_DELAY_4US);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -552,7 +563,7 @@ done:
|
|||||||
static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
nveu32_t retry = RETRY_COUNT;
|
nveu32_t retry = RETRY_ONCE;
|
||||||
nve32_t cond = COND_NOT_MET;
|
nve32_t cond = COND_NOT_MET;
|
||||||
nveu32_t val = 0;
|
nveu32_t val = 0;
|
||||||
nveu32_t count;
|
nveu32_t count;
|
||||||
@@ -565,12 +576,6 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
|||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
|
||||||
ret = -1;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
count++;
|
|
||||||
|
|
||||||
val = osi_readla(osi_core,
|
val = osi_readla(osi_core,
|
||||||
(nveu8_t *)xpcs_base +
|
(nveu8_t *)xpcs_base +
|
||||||
uphy_irq_sts_reg[osi_core->mac]);
|
uphy_irq_sts_reg[osi_core->mac]);
|
||||||
@@ -579,12 +584,15 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
|||||||
/* exit loop */
|
/* exit loop */
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
/* Maximum wait delay as per HW team is 1msec.
|
if (count >= retry) {
|
||||||
* So add a loop for 1000 iterations with 1usec delay,
|
ret = -1;
|
||||||
* so that if check get satisfies before 1msec will come
|
goto fail;
|
||||||
* out of loop and it can save some boot time
|
}
|
||||||
*/
|
count++;
|
||||||
osi_core->osd_ops.udelay(1U);
|
|
||||||
|
/* Maximum wait delay as per HW team is 1msec. */
|
||||||
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
||||||
|
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -693,25 +701,24 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
|
|||||||
cond = COND_NOT_MET;
|
cond = COND_NOT_MET;
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
if (count > retry) {
|
|
||||||
ret = -1;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
count++;
|
|
||||||
val = osi_readla(osi_core,
|
val = osi_readla(osi_core,
|
||||||
(nveu8_t *)osi_core->xpcs_base +
|
(nveu8_t *)osi_core->xpcs_base +
|
||||||
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
||||||
if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0U) {
|
if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0U) {
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
|
if (count > retry) {
|
||||||
|
ret = -1;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
count++;
|
||||||
|
|
||||||
/* Maximum wait delay as per HW team is 100 usec.
|
/* Maximum wait delay as per HW team is 100 usec.
|
||||||
* But most of the time as per experiments it takes
|
* But most of the time as per experiments it takes
|
||||||
* around 14usec to satisy the condition, so add a
|
* around 14usec to satisy the condition.
|
||||||
* minimum delay of 14usec and loop it for 7times.
|
* Use 200US to yield CPU for other users.
|
||||||
* With this 14usec delay condition gets satifies
|
|
||||||
* in first iteration itself.
|
|
||||||
*/
|
*/
|
||||||
osi_core->osd_ops.udelay(200U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_200US, OSI_DELAY_200US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -758,7 +765,7 @@ step10:
|
|||||||
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
|
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
|
||||||
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
||||||
/* Step14: wait for 30ms */
|
/* Step14: wait for 30ms */
|
||||||
osi_core->osd_ops.udelay(30000U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_30000US, OSI_DELAY_30000US + MIN_USLEEP_10US);
|
||||||
|
|
||||||
/* Step15 RX_CDR_RESET */
|
/* Step15 RX_CDR_RESET */
|
||||||
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
|
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
|
||||||
@@ -768,7 +775,7 @@ step10:
|
|||||||
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
||||||
|
|
||||||
/* Step16: wait for 30ms */
|
/* Step16: wait for 30ms */
|
||||||
osi_core->osd_ops.udelay(30000U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_30000US, OSI_DELAY_30000US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
|
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
|
||||||
@@ -945,7 +952,8 @@ static nve32_t vendor_specifc_sw_rst_usxgmii_an_en(struct osi_core_priv_data *os
|
|||||||
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST) == 0U) {
|
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST) == 0U) {
|
||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.udelay(1000U);
|
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
||||||
|
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
#ifndef INCLUDED_XPCS_H_
|
#ifndef INCLUDED_XPCS_H_
|
||||||
#define INCLUDED_XPCS_H_
|
#define INCLUDED_XPCS_H_
|
||||||
|
|
||||||
#include "common.h"
|
#include "core_local.h"
|
||||||
#include <osi_core.h>
|
#include <osi_core.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -294,17 +294,36 @@ static inline nve32_t xpcs_write_safety(struct osi_core_priv_data *osi_core,
|
|||||||
{
|
{
|
||||||
void *xpcs_base = osi_core->xpcs_base;
|
void *xpcs_base = osi_core->xpcs_base;
|
||||||
nveu32_t read_val;
|
nveu32_t read_val;
|
||||||
nve32_t retry = 10;
|
/* 1 busy wait, and the remaining retries are sleeps of granularity MIN_USLEEP_10US */
|
||||||
|
nveu32_t retry = RETRY_ONCE;
|
||||||
|
nveu32_t count = 0;
|
||||||
|
nveu32_t once = 0U;
|
||||||
nve32_t ret = XPCS_WRITE_FAIL_CODE;
|
nve32_t ret = XPCS_WRITE_FAIL_CODE;
|
||||||
|
nve32_t cond = COND_NOT_MET;
|
||||||
|
|
||||||
while (--retry > 0) {
|
while (cond == COND_NOT_MET) {
|
||||||
xpcs_write(xpcs_base, reg_addr, val);
|
xpcs_write(xpcs_base, reg_addr, val);
|
||||||
read_val = xpcs_read(xpcs_base, reg_addr);
|
read_val = xpcs_read(xpcs_base, reg_addr);
|
||||||
if (val == read_val) {
|
if (val == read_val) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
cond = COND_MET;
|
||||||
|
} else {
|
||||||
|
if (count > retry) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
count++;
|
||||||
|
if (once == 0U) {
|
||||||
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
|
/* udelay is a busy wait, so don't call it too frequently.
|
||||||
|
* call it once to be optimistic, and then use usleep with
|
||||||
|
* a longer timeout to yield to other CPU users.
|
||||||
|
*/
|
||||||
|
once = 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
||||||
|
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef OSI_STRIPPED_LIB
|
#ifndef OSI_STRIPPED_LIB
|
||||||
|
|||||||
@@ -71,7 +71,8 @@ static nve32_t add_dummy_sc(struct osi_core_priv_data *const osi_core,
|
|||||||
*/
|
*/
|
||||||
static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core)
|
static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core)
|
||||||
{
|
{
|
||||||
nveu32_t retry = RETRY_COUNT;
|
/* 1 busy wait, and the remaining retries are sleeps of granularity MIN_USLEEP_10US */
|
||||||
|
nveu32_t retry = (RETRY_COUNT / MIN_USLEEP_10US) + 1U;
|
||||||
nveu32_t dbg_buf_config;
|
nveu32_t dbg_buf_config;
|
||||||
nve32_t cond = COND_NOT_MET;
|
nve32_t cond = COND_NOT_MET;
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
@@ -80,6 +81,7 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core
|
|||||||
MACSEC_DEBUG_BUF_CONFIG_0,
|
MACSEC_DEBUG_BUF_CONFIG_0,
|
||||||
MACSEC_DEBUG_BUF_CONFIG_0_T26X
|
MACSEC_DEBUG_BUF_CONFIG_0_T26X
|
||||||
};
|
};
|
||||||
|
nveu32_t once = 0U;
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == COND_NOT_MET) {
|
while (cond == COND_NOT_MET) {
|
||||||
@@ -99,7 +101,17 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core
|
|||||||
|
|
||||||
count++;
|
count++;
|
||||||
/* wait on UPDATE bit to reset */
|
/* wait on UPDATE bit to reset */
|
||||||
|
if (once == 0U) {
|
||||||
osi_core->osd_ops.udelay(RETRY_DELAY);
|
osi_core->osd_ops.udelay(RETRY_DELAY);
|
||||||
|
/* udelay is a busy wait, so don't call it too frequently.
|
||||||
|
* call it once to be optimistic, and then use usleep
|
||||||
|
* with a longer timeout to yield to other CPU users.
|
||||||
|
*/
|
||||||
|
once = 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
||||||
|
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err:
|
err:
|
||||||
return ret;
|
return ret;
|
||||||
@@ -934,10 +946,12 @@ exit:
|
|||||||
static inline nve32_t poll_for_kt_update(struct osi_core_priv_data *osi_core)
|
static inline nve32_t poll_for_kt_update(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
/* half sec timeout */
|
/* half sec timeout */
|
||||||
nveu32_t retry = RETRY_COUNT;
|
/* 1 busy wait, and the remaining retries are sleeps of granularity MIN_USLEEP_10US */
|
||||||
|
nveu32_t retry = (RETRY_COUNT / MIN_USLEEP_10US) + 1U;
|
||||||
nveu32_t kt_config;
|
nveu32_t kt_config;
|
||||||
nveu32_t count;
|
nveu32_t count;
|
||||||
nve32_t cond = 1;
|
nve32_t cond = 1;
|
||||||
|
nveu32_t once = 0U;
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == 1) {
|
while (cond == 1) {
|
||||||
@@ -959,7 +973,17 @@ static inline nve32_t poll_for_kt_update(struct osi_core_priv_data *osi_core)
|
|||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
/* wait on UPDATE bit to reset */
|
/* wait on UPDATE bit to reset */
|
||||||
|
if (once == 0U) {
|
||||||
osi_core->osd_ops.udelay(RETRY_DELAY);
|
osi_core->osd_ops.udelay(RETRY_DELAY);
|
||||||
|
/* udelay is a busy wait, so don't call it too frequently.
|
||||||
|
* call it once to be optimistic, and then use usleep
|
||||||
|
* with a longer timeout to yield to other CPU users.
|
||||||
|
*/
|
||||||
|
once = 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
||||||
|
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1139,11 +1163,13 @@ err:
|
|||||||
static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core)
|
static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core)
|
||||||
{
|
{
|
||||||
/* half sec timeout */
|
/* half sec timeout */
|
||||||
nveu32_t retry = RETRY_COUNT;
|
/* 1 busy wait, and the remaining retries are sleeps of granularity MIN_USLEEP_10US */
|
||||||
|
nveu32_t retry = (RETRY_COUNT / MIN_USLEEP_10US) + 1U;
|
||||||
nveu32_t lut_config;
|
nveu32_t lut_config;
|
||||||
nveu32_t count;
|
nveu32_t count;
|
||||||
nve32_t cond = 1;
|
nve32_t cond = 1;
|
||||||
nve32_t ret = 0;
|
nve32_t ret = 0;
|
||||||
|
nveu32_t once = 0U;
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
while (cond == 1) {
|
while (cond == 1) {
|
||||||
@@ -1166,7 +1192,17 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core)
|
|||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
/* wait on UPDATE bit to reset */
|
/* wait on UPDATE bit to reset */
|
||||||
|
if (once == 0U) {
|
||||||
osi_core->osd_ops.udelay(RETRY_DELAY);
|
osi_core->osd_ops.udelay(RETRY_DELAY);
|
||||||
|
/* udelay is a busy wait, so don't call it too frequently.
|
||||||
|
* call it once to be optimistic, and then use usleep
|
||||||
|
* with a longer timeout to yield to other CPU users.
|
||||||
|
*/
|
||||||
|
once = 1U;
|
||||||
|
} else {
|
||||||
|
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
||||||
|
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exit:
|
exit:
|
||||||
|
|||||||
@@ -547,6 +547,7 @@
|
|||||||
#define COND_MET 0
|
#define COND_MET 0
|
||||||
#define COND_NOT_MET 1
|
#define COND_NOT_MET 1
|
||||||
#define RETRY_DELAY 1U
|
#define RETRY_DELAY 1U
|
||||||
|
#define MIN_USLEEP_10US 10U
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
Reference in New Issue
Block a user