mirror of
git://nv-tegra.nvidia.com/kernel/nvethernetrm.git
synced 2025-12-24 10:34:24 +03:00
osi: core: Replace osd_usleep_range with osd_usleep
Bug 4921002 Change-Id: Ia12aa1fb94a2b1fbe1afd0e7da3190857479c4f9 Signed-off-by: Harsukhwinder Singh <harsukhwinde@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/kernel/nvethernetrm/+/3268811 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: svcacv <svcacv@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: Sanath Kumar Gampa <sgampa@nvidia.com> GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com> Reviewed-by: Ashutosh Jha <ajha@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
877664c2ec
commit
2078f0d51e
@@ -1297,8 +1297,8 @@ struct osd_core_ops {
|
|||||||
nveul64_t loga);
|
nveul64_t loga);
|
||||||
/** udelay callback for sleep < 7usec as this is busy wait in most OSes */
|
/** udelay callback for sleep < 7usec as this is busy wait in most OSes */
|
||||||
void (*udelay)(nveu64_t usec);
|
void (*udelay)(nveu64_t usec);
|
||||||
/** usleep range callback for longer sleep duration */
|
/** usleep callback for longer sleep duration */
|
||||||
void (*usleep_range)(nveu64_t umin, nveu64_t umax);
|
void (*usleep)(nveu64_t usec);
|
||||||
/** ivcsend callback*/
|
/** ivcsend callback*/
|
||||||
nve32_t (*ivc_send)(void *priv, struct ivc_msg_common *ivc,
|
nve32_t (*ivc_send)(void *priv, struct ivc_msg_common *ivc,
|
||||||
nveu32_t len);
|
nveu32_t len);
|
||||||
|
|||||||
@@ -932,8 +932,7 @@ static inline nve32_t hw_est_read(struct osi_core_priv_data *osi_core,
|
|||||||
*/
|
*/
|
||||||
once = 1U;
|
once = 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
|
||||||
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -1242,8 +1241,7 @@ static nve32_t hw_est_write(struct osi_core_priv_data *osi_core,
|
|||||||
*/
|
*/
|
||||||
once = 1U;
|
once = 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
|
||||||
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -636,7 +636,7 @@ static inline nve32_t osi_readl_poll_timeout(void *addr, struct osi_core_priv_da
|
|||||||
once = 1U;
|
once = 1U;
|
||||||
elapsed_delay += 1U;
|
elapsed_delay += 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(min_delay, min_delay + MIN_USLEEP_10US);
|
osi_core->osd_ops.usleep(min_delay);
|
||||||
elapsed_delay &= (nveu32_t)INT_MAX;
|
elapsed_delay &= (nveu32_t)INT_MAX;
|
||||||
elapsed_delay += min_delay;
|
elapsed_delay += min_delay;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
||||||
/* SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION. All rights reserved.
|
/* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -138,7 +138,7 @@ static nve32_t eqos_config_flow_control(
|
|||||||
* - Refer to EQOS column of <<RM_13, (sequence diagram)>> for API details.
|
* - Refer to EQOS column of <<RM_13, (sequence diagram)>> for API details.
|
||||||
* - TraceID:ETHERNET_NVETHERNETRM_013
|
* - TraceID:ETHERNET_NVETHERNETRM_013
|
||||||
*
|
*
|
||||||
* @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.usleep_range.
|
* @param[in] osi_core: OSI core private data structure. Used param is base, osd_ops.usleep.
|
||||||
*
|
*
|
||||||
* @pre
|
* @pre
|
||||||
* - MAC should out of reset and clocks enabled.
|
* - MAC should out of reset and clocks enabled.
|
||||||
@@ -180,8 +180,8 @@ static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core)
|
|||||||
value |= EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD;
|
value |= EQOS_PAD_CRTL_E_INPUT_OR_E_PWRD;
|
||||||
osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL);
|
osi_writela(osi_core, value, (nveu8_t *)ioaddr + EQOS_PAD_CRTL);
|
||||||
|
|
||||||
/* 2. delay for 1 to 3 usec */
|
/* 2. delay for 1 usec */
|
||||||
osi_core->osd_ops.usleep_range(1, 3);
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
|
|
||||||
/* 3. Set AUTO_CAL_ENABLE and AUTO_CAL_START in
|
/* 3. Set AUTO_CAL_ENABLE and AUTO_CAL_START in
|
||||||
* reg ETHER_QOS_AUTO_CAL_CONFIG_0.
|
* reg ETHER_QOS_AUTO_CAL_CONFIG_0.
|
||||||
@@ -208,7 +208,7 @@ static nve32_t eqos_pad_calibrate(struct osi_core_priv_data *const osi_core)
|
|||||||
goto calibration_failed;
|
goto calibration_failed;
|
||||||
}
|
}
|
||||||
count++;
|
count++;
|
||||||
osi_core->osd_ops.usleep_range(10, 12);
|
osi_core->osd_ops.usleep(OSI_DELAY_10US);
|
||||||
value = osi_readla(osi_core, (nveu8_t *)ioaddr +
|
value = osi_readla(osi_core, (nveu8_t *)ioaddr +
|
||||||
EQOS_PAD_AUTO_CAL_STAT);
|
EQOS_PAD_AUTO_CAL_STAT);
|
||||||
/* calibration done when CAL_STAT_ACTIVE is zero */
|
/* calibration done when CAL_STAT_ACTIVE is zero */
|
||||||
@@ -2593,7 +2593,7 @@ static inline nve32_t poll_for_mii_idle(struct osi_core_priv_data *osi_core)
|
|||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
/* wait on GMII Busy set */
|
/* wait on GMII Busy set */
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_10US, OSI_DELAY_10US + MIN_USLEEP_10US);
|
osi_core->osd_ops.usleep(OSI_DELAY_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
@@ -3889,7 +3889,7 @@ static inline nve32_t poll_for_mac_tx_rx_idle(struct osi_core_priv_data *osi_cor
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* wait */
|
/* wait */
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_COUNT, OSI_DELAY_COUNT + MIN_USLEEP_10US);
|
osi_core->osd_ops.usleep(OSI_DELAY_COUNT);
|
||||||
retry++;
|
retry++;
|
||||||
}
|
}
|
||||||
if (retry >= OSI_TXRX_IDLE_RETRY) {
|
if (retry >= OSI_TXRX_IDLE_RETRY) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
||||||
/* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION. All rights reserved.
|
/* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -61,8 +61,7 @@ static nve32_t mgbe_poll_for_mac_acrtl(struct osi_core_priv_data *osi_core)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* wait for 10 usec for OB clear and retry */
|
/* wait for 10 usec for OB clear and retry */
|
||||||
osi_core->osd_ops.usleep_range(MGBE_MAC_INDIR_AC_OB_WAIT,
|
osi_core->osd_ops.usleep(MGBE_MAC_INDIR_AC_OB_WAIT);
|
||||||
MGBE_MAC_INDIR_AC_OB_WAIT + MIN_USLEEP_10US);
|
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -853,8 +852,7 @@ static nve32_t mgbe_poll_for_l3l4crtl(struct osi_core_priv_data *osi_core)
|
|||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
/* wait for 10 usec for XB clear */
|
/* wait for 10 usec for XB clear */
|
||||||
osi_core->osd_ops.usleep_range(MGBE_MAC_XB_WAIT,
|
osi_core->osd_ops.usleep(MGBE_MAC_XB_WAIT);
|
||||||
MGBE_MAC_XB_WAIT + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
@@ -1846,8 +1844,7 @@ static nve32_t mgbe_rss_write_reg(struct osi_core_priv_data *osi_core,
|
|||||||
if ((value & MGBE_MAC_RSS_ADDR_OB) == OSI_NONE) {
|
if ((value & MGBE_MAC_RSS_ADDR_OB) == OSI_NONE) {
|
||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_100US,
|
osi_core->osd_ops.usleep(OSI_DELAY_100US);
|
||||||
OSI_DELAY_100US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4044,7 +4041,7 @@ static nve32_t mgbe_mdio_busy_wait(struct osi_core_priv_data *const osi_core)
|
|||||||
if ((mac_gmiiar & MGBE_MDIO_SCCD_SBUSY) == 0U) {
|
if ((mac_gmiiar & MGBE_MDIO_SCCD_SBUSY) == 0U) {
|
||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_10US, OSI_DELAY_10US + MIN_USLEEP_10US);
|
osi_core->osd_ops.usleep(OSI_DELAY_10US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
|
|||||||
@@ -270,7 +270,7 @@ static nve32_t osi_hal_init_core_ops(struct osi_core_priv_data *const osi_core)
|
|||||||
#ifdef OSI_DEBUG
|
#ifdef OSI_DEBUG
|
||||||
(osi_core->osd_ops.printf == OSI_NULL) ||
|
(osi_core->osd_ops.printf == OSI_NULL) ||
|
||||||
#endif /* OSI_DEBUG */
|
#endif /* OSI_DEBUG */
|
||||||
(osi_core->osd_ops.usleep_range == OSI_NULL)) {
|
(osi_core->osd_ops.usleep == OSI_NULL)) {
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
||||||
/* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION. All rights reserved.
|
/* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -206,8 +206,7 @@ static inline nve32_t poll_for_vlan_filter_reg_rw(
|
|||||||
* Use usleep instead of udelay to
|
* Use usleep instead of udelay to
|
||||||
* yield to other CPU users.
|
* yield to other CPU users.
|
||||||
*/
|
*/
|
||||||
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
|
||||||
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -74,8 +74,7 @@ static inline nve32_t xpcs_poll_for_an_complete(struct osi_core_priv_data *osi_c
|
|||||||
status = xpcs_read(xpcs_base, XPCS_VR_MII_AN_INTR_STS);
|
status = xpcs_read(xpcs_base, XPCS_VR_MII_AN_INTR_STS);
|
||||||
if ((status & XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR) == 0U) {
|
if ((status & XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR) == 0U) {
|
||||||
/* autoneg not completed - poll */
|
/* autoneg not completed - poll */
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
|
||||||
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
|
||||||
} else {
|
} else {
|
||||||
/* 15. clear interrupt */
|
/* 15. clear interrupt */
|
||||||
status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR;
|
status &= ~XPCS_VR_MII_AN_INTR_STS_CL37_ANCMPLT_INTR;
|
||||||
@@ -167,8 +166,7 @@ static nve32_t xpcs_poll_flt_rx_link(struct osi_core_priv_data *osi_core)
|
|||||||
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
osi_core->osd_ops.udelay(OSI_DELAY_1US);
|
||||||
once = 1U;
|
once = 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
|
||||||
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -191,14 +189,13 @@ static nve32_t xpcs_poll_flt_rx_link(struct osi_core_priv_data *osi_core)
|
|||||||
if ((ctrl & XPCS_SR_XS_PCS_STS1_FLT) == 0U) {
|
if ((ctrl & XPCS_SR_XS_PCS_STS1_FLT) == 0U) {
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
/* Maximum wait delay as 1s */
|
/* Maximum wait delay as 1ms */
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
|
||||||
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* delay 10ms to wait the staus propagate to MAC block */
|
/* delay 10ms to wait the staus propagate to MAC block */
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_10000US, OSI_DELAY_10000US + MIN_USLEEP_10US);
|
osi_core->osd_ops.usleep(OSI_DELAY_10000US);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
@@ -301,8 +298,7 @@ nve32_t xpcs_start(struct osi_core_priv_data *osi_core)
|
|||||||
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST) == 0U) {
|
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_USRA_RST) == 0U) {
|
||||||
cond = COND_MET;
|
cond = COND_MET;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
|
||||||
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -524,8 +520,7 @@ static nve32_t xpcs_check_pcs_lock_status(struct osi_core_priv_data *osi_core)
|
|||||||
count++;
|
count++;
|
||||||
|
|
||||||
/* Maximum wait delay as per HW team is 1msec. */
|
/* Maximum wait delay as per HW team is 1msec. */
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
|
||||||
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -659,7 +654,7 @@ nve32_t xpcs_lane_bring_up(struct osi_core_priv_data *osi_core)
|
|||||||
* around 14usec to satisy the condition.
|
* around 14usec to satisy the condition.
|
||||||
* Use 200US to yield CPU for other users.
|
* Use 200US to yield CPU for other users.
|
||||||
*/
|
*/
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_200US, OSI_DELAY_200US + MIN_USLEEP_10US);
|
osi_core->osd_ops.usleep(OSI_DELAY_200US);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -706,7 +701,7 @@ step10:
|
|||||||
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
|
osi_writela(osi_core, val, (nveu8_t *)osi_core->xpcs_base +
|
||||||
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
||||||
/* Step14: wait for 30ms */
|
/* Step14: wait for 30ms */
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_30000US, OSI_DELAY_30000US + MIN_USLEEP_10US);
|
osi_core->osd_ops.usleep(OSI_DELAY_30000US);
|
||||||
|
|
||||||
/* Step15 RX_CDR_RESET */
|
/* Step15 RX_CDR_RESET */
|
||||||
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
|
val = osi_readla(osi_core, (nveu8_t *)osi_core->xpcs_base +
|
||||||
@@ -716,7 +711,7 @@ step10:
|
|||||||
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
XPCS_WRAP_UPHY_RX_CONTROL_0_0);
|
||||||
|
|
||||||
/* Step16: wait for 30ms */
|
/* Step16: wait for 30ms */
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_30000US, OSI_DELAY_30000US + MIN_USLEEP_10US);
|
osi_core->osd_ops.usleep(OSI_DELAY_30000US);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
|
if (xpcs_check_pcs_lock_status(osi_core) < 0) {
|
||||||
@@ -773,8 +768,7 @@ static nve32_t vendor_specifc_sw_rst_usxgmii_an_en(struct osi_core_priv_data *os
|
|||||||
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST) == 0U) {
|
if ((ctrl & XPCS_VR_XS_PCS_DIG_CTRL1_VR_RST) == 0U) {
|
||||||
cond = 0;
|
cond = 0;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(OSI_DELAY_1000US,
|
osi_core->osd_ops.usleep(OSI_DELAY_1000US);
|
||||||
OSI_DELAY_1000US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -297,8 +297,7 @@ static inline nve32_t xpcs_write_safety(struct osi_core_priv_data *osi_core,
|
|||||||
*/
|
*/
|
||||||
once = 1U;
|
once = 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
|
||||||
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
// SPDX-License-Identifier: LicenseRef-NvidiaProprietary
|
||||||
/* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION. All rights reserved.
|
/* SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -109,8 +109,7 @@ static nve32_t poll_for_dbg_buf_update(struct osi_core_priv_data *const osi_core
|
|||||||
*/
|
*/
|
||||||
once = 1U;
|
once = 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
|
||||||
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err:
|
err:
|
||||||
@@ -981,8 +980,7 @@ static inline nve32_t poll_for_kt_update(struct osi_core_priv_data *osi_core)
|
|||||||
*/
|
*/
|
||||||
once = 1U;
|
once = 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
|
||||||
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1200,8 +1198,7 @@ static inline nve32_t poll_for_lut_update(struct osi_core_priv_data *osi_core)
|
|||||||
*/
|
*/
|
||||||
once = 1U;
|
once = 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
|
||||||
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -150,8 +150,7 @@ static inline nve32_t nv_xpcs_write_safety(struct osi_core_priv_data *osi_core,
|
|||||||
*/
|
*/
|
||||||
once = 1U;
|
once = 1U;
|
||||||
} else {
|
} else {
|
||||||
osi_core->osd_ops.usleep_range(MIN_USLEEP_10US,
|
osi_core->osd_ops.usleep(MIN_USLEEP_10US);
|
||||||
MIN_USLEEP_10US + MIN_USLEEP_10US);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user