osi: core: Replace osd_usleep_range with osd_usleep

Bug 4921002

Change-Id: Ia12aa1fb94a2b1fbe1afd0e7da3190857479c4f9
Signed-off-by: Harsukhwinder Singh <harsukhwinde@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/3268811
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: Sanath Kumar Gampa <sgampa@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Ashutosh Jha <ajha@nvidia.com>
This commit is contained in:
Harsukhwinder Singh
2024-12-13 11:08:14 +00:00
committed by mobile promotions
parent 877664c2ec
commit 2078f0d51e
11 changed files with 37 additions and 54 deletions

View File

@@ -1297,8 +1297,8 @@ struct osd_core_ops {
nveul64_t loga);
/** udelay callback for sleep < 7usec as this is busy wait in most OSes */
void (*udelay)(nveu64_t usec);
/** usleep range callback for longer sleep duration */
void (*usleep_range)(nveu64_t umin, nveu64_t umax);
/** usleep callback for longer sleep duration */
void (*usleep)(nveu64_t usec);
/** ivcsend callback*/
nve32_t (*ivc_send)(void *priv, struct ivc_msg_common *ivc,
nveu32_t len);

View File

@@ -932,8 +932,7 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core,
*/
once = 1U;
} else {
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
MIN_USLEEP_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
}
continue;
}
@@ -1242,8 +1241,7 @@ static nve32_t hw_est_write(struct osi_core_priv_data *osi_core,
*/
once = 1U;
} else {
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
MIN_USLEEP_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
}
continue;
}

View File

@@ -636,7 +636,7 @@ static inline nve32_t osi_readl_poll_timeout(void *addr, struct osi_core_priv_da
once = 1U;
elapsed_delay += 1U;
} else {
osi_core->osd_ops.usleep_range(min_delay, min_delay + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(min_delay);
elapsed_delay &= (nveu32_t)INT_MAX;
elapsed_delay += min_delay;
}

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
/* SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION. All rights reserved.
/* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -138,7 +138,7 @@ static nve32_t eqos_config_flow_control(
* - Refer to EQOS column of <<RM_13, (sequence diagram)>> for API details.
* - TraceID:ETHERNET_NVETHERNETRM_013
*
* @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.usleep_range.
* @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.usleep.
*
* @pre
* - MAC should out of reset and clocks enabled.
@@ -180,8 +180,8 @@ static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core)
value |= EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD;
osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL);
/* 2. delay for 1 to 3 usec */
osi_core->osd_ops.usleep_range(1, 3);
/* 2. delay for 1 usec */
osi_core->osd_ops.udelay(OSI_DELAY_1US);
/* 3. Set AUTO_CAL_ENABLE and AUTO_CAL_START in
* reg ETHER_QOS_AUTO_CAL_CONFIG_0.
@@ -208,7 +208,7 @@ static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core)
goto calibration_failed;
}
count++;
osi_core->osd_ops.usleep_range(10, 12);
osi_core->osd_ops.usleep(OSI_DELAY_10US);
value = osi_readla(osi_core, (nveu8_t *)ioaddr +
EQOS_PAD_AUTO_CAL_STAT);
/* calibration done when CAL_STAT_ACTIVE is zero */
@@ -2593,7 +2593,7 @@ static inline nve32_t poll_for_mii_idle(struct osi_core_priv_data *osi_core)
cond = COND_MET;
} else {
/* wait on GMII Busy set */
osi_core->osd_ops.usleep_range(OSI_DELAY_10US, OSI_DELAY_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_10US);
}
}
fail:
@@ -3889,7 +3889,7 @@ static inline nve32_t poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_cor
break;
}
/* wait */
osi_core->osd_ops.usleep_range(OSI_DELAY_COUNT, OSI_DELAY_COUNT + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_COUNT);
retry++;
}
if (retry >= OSI_TXRX_IDLE_RETRY) {

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
/* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION. All rights reserved.
/* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -61,8 +61,7 @@ static nve32_t mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core)
}
/* wait for 10 usec for OB clear and retry */
osi_core->osd_ops.usleep_range(MGBE_MAC_INDIR_AC_OB_WAIT,
MGBE_MAC_INDIR_AC_OB_WAIT + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MGBE_MAC_INDIR_AC_OB_WAIT);
count++;
}
@@ -853,8 +852,7 @@ static nve32_t mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core)
cond = 0;
} else {
/* wait for 10 usec for XB clear */
osi_core->osd_ops.usleep_range(MGBE_MAC_XB_WAIT,
MGBE_MAC_XB_WAIT + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MGBE_MAC_XB_WAIT);
}
}
fail:
@@ -1846,8 +1844,7 @@ static nve32_t mgbe_rss_write_reg(struct osi_core_priv_data *osi_core,
if ((value & MGBE_MAC_RSS_ADDR_OB) == OSI_NONE) {
cond = 0;
} else {
osi_core->osd_ops.usleep_range(OSI_DELAY_100US,
OSI_DELAY_100US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_100US);
}
}
@@ -4044,7 +4041,7 @@ static nve32_t mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core)
if ((mac_gmiiar & MGBE_MDIO_SCCD_SBUSY) == 0U) {
cond = 0;
} else {
osi_core->osd_ops.usleep_range(OSI_DELAY_10US, OSI_DELAY_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_10US);
}
}
fail:

View File

@@ -270,7 +270,7 @@ static nve32_t osi_hal_init_core_ops(struct osi_core_priv_data *const osi_core)
#ifdef OSI_DEBUG
(osi_core->osd_ops.printf == OSI_NULL) ||
#endif /* OSI_DEBUG */
(osi_core->osd_ops.usleep_range == OSI_NULL)) {
(osi_core->osd_ops.usleep == OSI_NULL)) {
goto exit;
}

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
/* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION. All rights reserved.
/* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -206,8 +206,7 @@ static inline nve32_t poll_for_vlan_filter_reg_rw(
* Use usleep instead of udelay to
* yield to other CPU users.
*/
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
MIN_USLEEP_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
}
}

View File

@@ -74,8 +74,7 @@ static inline nve32_t xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_c
status = xpcs_read(xpcs_base, XPCS_VR_MII_AN_INTR_STS);
if ((status & XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR) == 0U) {
/* autoneg not completed - poll */
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
OSI_DELAY_1000US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
} else {
/* 15. clear interrupt */
status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR;
@@ -167,8 +166,7 @@ static nve32_t xpcs_poll_flt_rx_link(struct osi_core_priv_data *osi_core)
osi_core->osd_ops.udelay(OSI_DELAY_1US);
once = 1U;
} else {
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
OSI_DELAY_1000US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
}
}
}
@@ -191,14 +189,13 @@ static nve32_t xpcs_poll_flt_rx_link(struct osi_core_priv_data *osi_core)
if ((ctrl & XPCS_SR_XS_PCS_STS1_FLT) == 0U) {
cond = COND_MET;
} else {
/* Maximum wait delay as 1s */
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
OSI_DELAY_1000US + MIN_USLEEP_10US);
/* Maximum wait delay as 1ms */
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
}
}
}
/* delay 10ms to wait the staus propagate to MAC block */
osi_core->osd_ops.usleep_range(OSI_DELAY_10000US, OSI_DELAY_10000US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_10000US);
fail:
return ret;
@@ -301,8 +298,7 @@ nve32_t xpcs_start(struct osi_core_priv_data *osi_core)
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST) == 0U) {
cond = COND_MET;
} else {
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
OSI_DELAY_1000US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
}
}
}
@@ -524,8 +520,7 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
count++;
/* Maximum wait delay as per HW team is 1msec. */
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
OSI_DELAY_1000US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
}
}
@@ -659,7 +654,7 @@ nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
* around 14usec to satisy the condition.
* Use 200US to yield CPU for other users.
*/
osi_core->osd_ops.usleep_range(OSI_DELAY_200US, OSI_DELAY_200US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_200US);
}
}
@@ -706,7 +701,7 @@ step10:
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step14: wait for 30ms */
osi_core->osd_ops.usleep_range(OSI_DELAY_30000US, OSI_DELAY_30000US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_30000US);
/* Step15 RX_CDR_RESET */
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
@@ -716,7 +711,7 @@ step10:
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
/* Step16: wait for 30ms */
osi_core->osd_ops.usleep_range(OSI_DELAY_30000US, OSI_DELAY_30000US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_30000US);
}
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
@@ -773,8 +768,7 @@ static nve32_t vendor_specifc_sw_rst_usxgmii_an_en(struct osi_core_priv_data *os
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST) == 0U) {
cond = 0;
} else {
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
OSI_DELAY_1000US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
}
}

View File

@@ -297,8 +297,7 @@ static inline nve32_t xpcs_write_safety(struct osi_core_priv_data *osi_core,
*/
once = 1U;
} else {
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
MIN_USLEEP_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
}
}
}

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
/* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION. All rights reserved.
/* SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -109,8 +109,7 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core
*/
once = 1U;
} else {
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
MIN_USLEEP_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
}
}
err:
@@ -981,8 +980,7 @@ static inline nve32_t poll_for_kt_update(struct osi_core_priv_data *osi_core)
*/
once = 1U;
} else {
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
MIN_USLEEP_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
}
}
}
@@ -1200,8 +1198,7 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core)
*/
once = 1U;
} else {
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
MIN_USLEEP_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
}
}
}

View File

@@ -150,8 +150,7 @@ static inline nve32_t nv_xpcs_write_safety(struct osi_core_priv_data *osi_core,
*/
once = 1U;
} else {
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
MIN_USLEEP_10US + MIN_USLEEP_10US);
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
}
}
}