osi: xpcs: add delays as per HW team suggestion

Issue: Random delays were added in xpcs_start and xpcs_init
which can lead to increase in the boot time

Fix: Instead of adding random delays and retries add  the
exact delays suggested by HW team

Bug 3806700

Change-Id: If6f781d86c7de4019883e4b02dc89b2d04ecc768
Signed-off-by: Narayan Reddy <narayanr@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/2826256
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-by: Zuyu Liao <zuyul@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Srinivas Ramachandran <srinivasra@nvidia.com>
Tested-by: Zuyu Liao <zuyul@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Narayan Reddy
2022-12-12 13:23:51 +00:00
committed by mobile promotions
parent a114ece241
commit ddd0ab4e61
2 changed files with 28 additions and 16 deletions

View File

@@ -216,7 +216,12 @@ nve32_t xpcs_start(struct osi_core_priv_data *osi_core)
XPCS_SR_XS_PCS_STS1_RLU) {
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(1000U);
/* Maximum wait delay as per HW team is 1msec.
* So add a loop for 1000 iterations with 1usec delay,
* so that if check get satisfies before 1msec will come
* out of loop and it can save some boot time
*/
osi_core->osd_ops.udelay(1U);
}
}
fail:
@@ -239,7 +244,7 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
nveu32_t lane_init_en)
{
void *xpcs_base = osi_core->xpcs_base;
nveu32_t retry = XPCS_RETRY_COUNT;
nveu32_t retry = 5U;
nve32_t cond = COND_NOT_MET;
nveu32_t val = 0;
nveu32_t count;
@@ -269,7 +274,11 @@ static nve32_t xpcs_uphy_lane_bring_up(struct osi_core_priv_data *osi_core,
/* exit loop */
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(5U);
/* Max wait time is 1usec.
* Most of the time loop got exited in first iteration.
* but added an extra count of 4 for safer side
*/
osi_core->osd_ops.udelay(1U);
}
}
}
@@ -291,7 +300,7 @@ fail:
static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
{
void *xpcs_base = osi_core->xpcs_base;
nveu32_t retry = XPCS_RETRY_COUNT;
nveu32_t retry = RETRY_COUNT;
nve32_t cond = COND_NOT_MET;
nveu32_t val = 0;
nveu32_t count;
@@ -312,7 +321,12 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
/* exit loop */
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(5U);
/* Maximum wait delay as per HW team is 1msec.
* So add a loop for 1000 iterations with 1usec delay,
* so that if check get satisfies before 1msec will come
* out of loop and it can save some boot time
*/
osi_core->osd_ops.udelay(1U);
}
}
@@ -336,7 +350,7 @@ fail:
static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
{
struct core_local *l_core = (struct core_local *)(void *)osi_core;
nveu32_t retry = 1000;
nveu32_t retry = 7U;
nveu32_t count;
nveu32_t val = 0;
nve32_t cond;
@@ -412,7 +426,14 @@ static nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
if ((val & XPCS_WRAP_UPHY_RX_CONTROL_0_0_RX_CAL_EN) == 0U) {
cond = COND_MET;
} else {
osi_core->osd_ops.udelay(1000U);
/* Maximum wait delay as per HW team is 100 usec.
* But most of the time as per experiments it takes
* around 14usec to satisy the condition, so add a
* minimum delay of 14usec and loop it for 7times.
* With this 14usec delay condition gets satifies
* in first iteration itself.
*/
osi_core->osd_ops.udelay(14U);
}
}

View File

@@ -26,15 +26,6 @@
#include "../osi/common/common.h"
#include <osi_core.h>
/**
* @addtogroup XPCS helper macros
*
* @brief XPCS helper macros.
* @{
*/
#define XPCS_RETRY_COUNT (RETRY_COUNT * (2U))
/** @} */
/**
* @addtogroup XPCS Register offsets
*