nvethernet: Update status counters using ioctls

Issue:
TSN, extra, packet error stats counters
are not getting updated during virtualization,
as NVEQOS reads them from the VF OSI core structure.

Fix:
Add new ioctls to update these counters from
the ethernet server OSI core to
the VF OSI core structure.

Bug 3763499

Change-Id: I6e82f9756afe0c2f1adefb5a2449fad479864ea6
Signed-off-by: Mohan Thadikamalla <mohant@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2809358
(cherry picked from commit ab043869fb9b1feb197496d4695b333845689e5e)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2815774
Reviewed-by: Bhadram Varka <vbhadram@nvidia.com>
Reviewed-by: Revanth Kumar Uppala <ruppala@nvidia.com>
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
Tested-by: Revanth Kumar Uppala <ruppala@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Mohan Thadikamalla
2022-11-15 13:02:41 +05:30
committed by mobile promotions
parent 61f908bce9
commit d0ac6d4396
4 changed files with 138 additions and 134 deletions

View File

@@ -1083,8 +1083,8 @@ static void ether_adjust_link(struct net_device *dev)
if (!pdata->oldlink) {
new_state = 1;
pdata->oldlink = 1;
val = pdata->osi_core->xstats.link_connect_count;
pdata->osi_core->xstats.link_connect_count =
val = pdata->xstats.link_connect_count;
pdata->xstats.link_connect_count =
osi_update_stats_counter(val, 1UL);
}
} else if (pdata->oldlink) {
@@ -1092,8 +1092,8 @@ static void ether_adjust_link(struct net_device *dev)
pdata->oldlink = 0;
pdata->speed = 0;
pdata->oldduplex = -1;
val = pdata->osi_core->xstats.link_disconnect_count;
pdata->osi_core->xstats.link_disconnect_count =
val = pdata->xstats.link_disconnect_count;
pdata->xstats.link_disconnect_count =
osi_update_stats_counter(val, 1UL);
ether_en_dis_monitor_clks(pdata, OSI_DISABLE);
} else {
@@ -1276,7 +1276,6 @@ static irqreturn_t ether_tx_chan_isr(int irq, void *data)
struct ether_tx_napi *tx_napi = (struct ether_tx_napi *)data;
struct ether_priv_data *pdata = tx_napi->pdata;
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct osi_core_priv_data *osi_core = pdata->osi_core;
unsigned int chan = tx_napi->chan;
unsigned long flags;
unsigned long val;
@@ -1287,8 +1286,8 @@ static irqreturn_t ether_tx_chan_isr(int irq, void *data)
OSI_DMA_INTR_DISABLE);
raw_spin_unlock_irqrestore(&pdata->rlock, flags);
val = osi_core->xstats.tx_normal_irq_n[chan];
osi_core->xstats.tx_normal_irq_n[chan] =
val = pdata->xstats.tx_normal_irq_n[chan];
pdata->xstats.tx_normal_irq_n[chan] =
osi_update_stats_counter(val, 1U);
if (likely(napi_schedule_prep(&tx_napi->napi))) {
@@ -1325,7 +1324,6 @@ static irqreturn_t ether_rx_chan_isr(int irq, void *data)
struct ether_rx_napi *rx_napi = (struct ether_rx_napi *)data;
struct ether_priv_data *pdata = rx_napi->pdata;
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
struct osi_core_priv_data *osi_core = pdata->osi_core;
unsigned int chan = rx_napi->chan;
unsigned long val, flags;
@@ -1335,8 +1333,8 @@ static irqreturn_t ether_rx_chan_isr(int irq, void *data)
OSI_DMA_INTR_DISABLE);
raw_spin_unlock_irqrestore(&pdata->rlock, flags);
val = osi_core->xstats.rx_normal_irq_n[chan];
osi_core->xstats.rx_normal_irq_n[chan] =
val = pdata->xstats.rx_normal_irq_n[chan];
pdata->xstats.rx_normal_irq_n[chan] =
osi_update_stats_counter(val, 1U);
if (likely(napi_schedule_prep(&rx_napi->napi))) {
@@ -2722,7 +2720,7 @@ err_get_sync:
*
* Algorithm: This routine clears the following sw stats structures.
* 1) struct osi_mmc_counters
* 2) struct osi_xtra_stat_counters
* 2) struct ether_xtra_stat_counters
* 3) struct osi_xtra_dma_stat_counters
* 4) struct osi_pkt_err_stats
*
@@ -2736,8 +2734,8 @@ static inline void ether_reset_stats(struct ether_priv_data *pdata)
struct osi_dma_priv_data *osi_dma = pdata->osi_dma;
memset(&osi_core->mmc, 0U, sizeof(struct osi_mmc_counters));
memset(&osi_core->xstats, 0U,
sizeof(struct osi_xtra_stat_counters));
memset(&pdata->xstats, 0U,
sizeof(struct ether_xtra_stat_counters));
memset(&osi_dma->dstats, 0U,
sizeof(struct osi_xtra_dma_stat_counters));
memset(&osi_dma->pkt_err_stats, 0U, sizeof(struct osi_pkt_err_stats));
@@ -4256,11 +4254,10 @@ static enum hrtimer_restart ether_tx_usecs_hrtimer(struct hrtimer *data)
struct ether_tx_napi *tx_napi = container_of(data, struct ether_tx_napi,
tx_usecs_timer);
struct ether_priv_data *pdata = tx_napi->pdata;
struct osi_core_priv_data *osi_core = pdata->osi_core;
unsigned long val;
val = osi_core->xstats.tx_usecs_swtimer_n[tx_napi->chan];
osi_core->xstats.tx_usecs_swtimer_n[tx_napi->chan] =
val = pdata->xstats.tx_usecs_swtimer_n[tx_napi->chan];
pdata->xstats.tx_usecs_swtimer_n[tx_napi->chan] =
osi_update_stats_counter(val, 1U);
atomic_set(&pdata->tx_napi[tx_napi->chan]->tx_usecs_timer_armed,

View File

@@ -376,6 +376,24 @@ struct ether_tx_ts_skb_list {
unsigned long pkt_jiffies;
};
/**
* @brief ether_xtra_stat_counters - OSI core extra stat counters
*/
struct ether_xtra_stat_counters {
/** rx skb allocation failure count */
nveu64_t re_alloc_rxbuf_failed[OSI_MGBE_MAX_NUM_QUEUES];
/** TX per channel interrupt count */
nveu64_t tx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** TX per channel SW timer callback count */
nveu64_t tx_usecs_swtimer_n[OSI_MGBE_MAX_NUM_QUEUES];
/** RX per channel interrupt count */
nveu64_t rx_normal_irq_n[OSI_MGBE_MAX_NUM_QUEUES];
/** link connect count */
nveu64_t link_connect_count;
/** link disconnect count */
nveu64_t link_disconnect_count;
};
/**
* @brief Ethernet driver private data
*/
@@ -611,6 +629,8 @@ struct ether_priv_data {
unsigned int tx_start_stop;
/** Tasklet for restarting UPHY lanes */
struct tasklet_struct lane_restart_task;
/** xtra sw error counters */
struct ether_xtra_stat_counters xstats;
};
/**

View File

@@ -82,16 +82,6 @@ static const struct ether_stats ether_frpstrings_stats[] = {
offsetof(struct osi_dma_priv_data, pkt_err_stats.y)}
#endif
#if KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE
#define ETHER_CORE_PKT_ERR_STAT(z) \
{ (#z), FIELD_SIZEOF(struct osi_core_pkt_err_stats, z), \
offsetof(struct osi_core_priv_data, pkt_err_stats.z)}
#else
#define ETHER_CORE_PKT_ERR_STAT(z) \
{ (#z), sizeof_field(struct osi_core_pkt_err_stats, z), \
offsetof(struct osi_core_priv_data, pkt_err_stats.z)}
#endif
/**
* @brief ETHER pkt_err statistics
*/
@@ -185,12 +175,12 @@ static const struct ether_stats ether_dstrings_stats[] = {
*/
#if KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE
#define ETHER_EXTRA_STAT(b) \
{ #b, FIELD_SIZEOF(struct osi_xtra_stat_counters, b), \
offsetof(struct osi_core_priv_data, xstats.b)}
{ #b, FIELD_SIZEOF(struct ether_xtra_stat_counters, b), \
offsetof(struct ether_priv_data, xstats.b)}
#else
#define ETHER_EXTRA_STAT(b) \
{ #b, sizeof_field(struct osi_xtra_stat_counters, b), \
offsetof(struct osi_core_priv_data, xstats.b)}
{ #b, sizeof_field(struct ether_xtra_stat_counters, b), \
offsetof(struct ether_priv_data, xstats.b)}
#endif
/**
* @brief Ethernet extra statistics
@@ -207,49 +197,6 @@ static const struct ether_stats ether_gstrings_stats[] = {
ETHER_EXTRA_STAT(re_alloc_rxbuf_failed[8]),
ETHER_EXTRA_STAT(re_alloc_rxbuf_failed[9]),
/* Tx/Rx IRQ error info */
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[0]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[1]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[2]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[3]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[4]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[5]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[6]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[7]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[8]),
ETHER_EXTRA_STAT(tx_proc_stopped_irq_n[9]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[0]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[1]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[2]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[3]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[4]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[5]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[6]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[7]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[8]),
ETHER_EXTRA_STAT(rx_proc_stopped_irq_n[9]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[0]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[1]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[2]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[3]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[4]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[5]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[6]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[7]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[8]),
ETHER_EXTRA_STAT(tx_buf_unavail_irq_n[9]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[0]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[1]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[2]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[3]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[4]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[5]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[6]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[7]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[8]),
ETHER_EXTRA_STAT(rx_buf_unavail_irq_n[9]),
ETHER_EXTRA_STAT(rx_watchdog_irq_n),
ETHER_EXTRA_STAT(fatal_bus_error_irq_n),
/* Tx/Rx IRQ Events */
ETHER_EXTRA_STAT(tx_normal_irq_n[0]),
@@ -284,14 +231,6 @@ static const struct ether_stats ether_gstrings_stats[] = {
ETHER_EXTRA_STAT(rx_normal_irq_n[9]),
ETHER_EXTRA_STAT(link_disconnect_count),
ETHER_EXTRA_STAT(link_connect_count),
ETHER_EXTRA_STAT(ts_lock_add_fail),
ETHER_EXTRA_STAT(ts_lock_del_fail),
/* Packet error stats */
ETHER_CORE_PKT_ERR_STAT(mgbe_ip_header_err),
ETHER_CORE_PKT_ERR_STAT(mgbe_jabber_timeout_err),
ETHER_CORE_PKT_ERR_STAT(mgbe_payload_cs_err),
ETHER_CORE_PKT_ERR_STAT(mgbe_tx_underflow_err),
};
/**
@@ -493,58 +432,101 @@ static const struct ether_stats ether_mmc[] = {
};
/**
* @brief Ethernet extra TSN statistics array length
* @brief Ethernet extra statistics array length
*/
#define ETHER_EXTRA_TSN_STAT_LEN OSI_ARRAY_SIZE(ether_tstrings_stats)
#define ETHER_CORE_STAT_LEN OSI_ARRAY_SIZE(ether_tstrings_stats)
/**
* @brief Name of extra Ethernet stats, with length of name not more than
* ETH_GSTRING_LEN MAC
*/
#if KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE
#define ETHER_MMC_STAT(c) \
{ #c, FIELD_SIZEOF(struct osi_mmc_counters, c), \
offsetof(struct osi_core_priv_data, mmc.c)}
#define ETHER_CORE_STATS(r) \
{ (#r), FIELD_SIZEOF(struct osi_stats, r), \
offsetof(struct osi_core_priv_data, stats.r)}
#else
#define ETHER_MMC_STAT(c) \
{ #c, sizeof_field(struct osi_mmc_counters, c), \
offsetof(struct osi_core_priv_data, mmc.c)}
#define ETHER_CORE_STATS(r) \
{ (#r), sizeof_field(struct osi_stats, r), \
offsetof(struct osi_core_priv_data, stats.r)}
#endif
#if KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE
#define ETHER_TEXTRA_STAT(r) \
{ (#r), FIELD_SIZEOF(struct osi_tsn_stats, r), \
offsetof(struct osi_core_priv_data, tsn_stats.r)}
#else
#define ETHER_TEXTRA_STAT(r) \
{ (#r), sizeof_field(struct osi_tsn_stats, r), \
offsetof(struct osi_core_priv_data, tsn_stats.r)}
#endif
/**
* @brief Ethernet extra statistics
*/
static const struct ether_stats ether_tstrings_stats[] = {
ETHER_TEXTRA_STAT(const_gate_ctr_err),
ETHER_TEXTRA_STAT(head_of_line_blk_sch),
ETHER_TEXTRA_STAT(hlbs_q[0]),
ETHER_TEXTRA_STAT(hlbs_q[1]),
ETHER_TEXTRA_STAT(hlbs_q[2]),
ETHER_TEXTRA_STAT(hlbs_q[3]),
ETHER_TEXTRA_STAT(hlbs_q[4]),
ETHER_TEXTRA_STAT(hlbs_q[5]),
ETHER_TEXTRA_STAT(hlbs_q[6]),
ETHER_TEXTRA_STAT(hlbs_q[7]),
ETHER_TEXTRA_STAT(head_of_line_blk_frm),
ETHER_TEXTRA_STAT(hlbf_q[0]),
ETHER_TEXTRA_STAT(hlbf_q[1]),
ETHER_TEXTRA_STAT(hlbf_q[2]),
ETHER_TEXTRA_STAT(hlbf_q[3]),
ETHER_TEXTRA_STAT(hlbf_q[4]),
ETHER_TEXTRA_STAT(hlbf_q[5]),
ETHER_TEXTRA_STAT(hlbf_q[6]),
ETHER_TEXTRA_STAT(hlbf_q[7]),
ETHER_TEXTRA_STAT(base_time_reg_err),
ETHER_TEXTRA_STAT(sw_own_list_complete),
ETHER_CORE_STATS(const_gate_ctr_err),
ETHER_CORE_STATS(head_of_line_blk_sch),
ETHER_CORE_STATS(hlbs_q[0]),
ETHER_CORE_STATS(hlbs_q[1]),
ETHER_CORE_STATS(hlbs_q[2]),
ETHER_CORE_STATS(hlbs_q[3]),
ETHER_CORE_STATS(hlbs_q[4]),
ETHER_CORE_STATS(hlbs_q[5]),
ETHER_CORE_STATS(hlbs_q[6]),
ETHER_CORE_STATS(hlbs_q[7]),
ETHER_CORE_STATS(head_of_line_blk_frm),
ETHER_CORE_STATS(hlbf_q[0]),
ETHER_CORE_STATS(hlbf_q[1]),
ETHER_CORE_STATS(hlbf_q[2]),
ETHER_CORE_STATS(hlbf_q[3]),
ETHER_CORE_STATS(hlbf_q[4]),
ETHER_CORE_STATS(hlbf_q[5]),
ETHER_CORE_STATS(hlbf_q[6]),
ETHER_CORE_STATS(hlbf_q[7]),
ETHER_CORE_STATS(base_time_reg_err),
ETHER_CORE_STATS(sw_own_list_complete),
/* Tx/Rx IRQ error info */
ETHER_CORE_STATS(tx_proc_stopped_irq_n[0]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[1]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[2]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[3]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[4]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[5]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[6]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[7]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[8]),
ETHER_CORE_STATS(tx_proc_stopped_irq_n[9]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[0]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[1]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[2]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[3]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[4]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[5]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[6]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[7]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[8]),
ETHER_CORE_STATS(rx_proc_stopped_irq_n[9]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[0]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[1]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[2]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[3]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[4]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[5]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[6]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[7]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[8]),
ETHER_CORE_STATS(tx_buf_unavail_irq_n[9]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[0]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[1]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[2]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[3]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[4]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[5]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[6]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[7]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[8]),
ETHER_CORE_STATS(rx_buf_unavail_irq_n[9]),
ETHER_CORE_STATS(rx_watchdog_irq_n),
ETHER_CORE_STATS(fatal_bus_error_irq_n),
ETHER_CORE_STATS(ts_lock_add_fail),
ETHER_CORE_STATS(ts_lock_del_fail),
/* Packet error stats */
ETHER_CORE_STATS(mgbe_ip_header_err),
ETHER_CORE_STATS(mgbe_jabber_timeout_err),
ETHER_CORE_STATS(mgbe_payload_cs_err),
ETHER_CORE_STATS(mgbe_tx_underflow_err),
};
/**
@@ -583,6 +565,16 @@ static void ether_get_ethtool_stats(struct net_device *dev,
return;
}
if (osi_core->use_virtualization == OSI_ENABLE) {
ioctl_data.cmd = OSI_CMD_READ_STATS;
ret = osi_handle_ioctl(osi_core, &ioctl_data);
if (ret == -1) {
dev_err(pdata->dev,
"Fail to read core stats\n");
return;
}
}
for (i = 0; i < ETHER_MMC_STATS_LEN; i++) {
char *p = (char *)osi_core + ether_mmc[i].stat_offset;
@@ -592,7 +584,7 @@ static void ether_get_ethtool_stats(struct net_device *dev,
}
for (i = 0; i < ETHER_EXTRA_STAT_LEN; i++) {
char *p = (char *)osi_core +
char *p = (char *)pdata +
ether_gstrings_stats[i].stat_offset;
data[j++] = (ether_gstrings_stats[i].sizeof_stat ==
@@ -615,8 +607,7 @@ static void ether_get_ethtool_stats(struct net_device *dev,
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
}
for (i = 0; ((i < ETHER_EXTRA_TSN_STAT_LEN) &&
(pdata->hw_feat.est_sel == OSI_ENABLE)); i++) {
for (i = 0; i < ETHER_CORE_STAT_LEN; i++) {
char *p = (char *)osi_core +
ether_tstrings_stats[i].stat_offset;
@@ -626,7 +617,7 @@ static void ether_get_ethtool_stats(struct net_device *dev,
for (i = 0; ((i < ETHER_FRP_STAT_LEN) &&
(pdata->hw_feat.frp_sel == OSI_ENABLE)); i++) {
char *p = (char *)osi_core +
char *p = (char *)osi_dma +
ether_frpstrings_stats[i].stat_offset;
data[j++] = (ether_frpstrings_stats[i].sizeof_stat ==
@@ -675,12 +666,10 @@ static int ether_get_sset_count(struct net_device *dev, int sset)
} else {
len += ETHER_PKT_ERR_STAT_LEN;
}
if (INT_MAX - ETHER_EXTRA_TSN_STAT_LEN < len) {
if (INT_MAX - ETHER_CORE_STAT_LEN < len) {
/* do nothing */
} else {
if (pdata->hw_feat.est_sel == OSI_ENABLE) {
len += ETHER_EXTRA_TSN_STAT_LEN;
}
len += ETHER_CORE_STAT_LEN;
}
if (INT_MAX - ETHER_FRP_STAT_LEN < len) {
/* do nothing */
@@ -752,9 +741,7 @@ static void ether_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
p += ETH_GSTRING_LEN;
}
for (i = 0; ((i < ETHER_EXTRA_TSN_STAT_LEN) &&
(pdata->hw_feat.est_sel == OSI_ENABLE));
i++) {
for (i = 0; i < ETHER_CORE_STAT_LEN; i++) {
str = (u8 *)ether_tstrings_stats[i].stat_string;
if (memcpy(p, str, ETH_GSTRING_LEN) ==
OSI_NULL) {

View File

@@ -234,8 +234,8 @@ static inline int ether_alloc_skb(struct ether_priv_data *pdata,
rx_swcx->buf_virt_addr = pdata->osi_dma->resv_buf_virt_addr;
rx_swcx->buf_phy_addr = pdata->osi_dma->resv_buf_phy_addr;
rx_swcx->flags |= OSI_RX_SWCX_BUF_VALID;
val = pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan];
pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan] =
val = pdata->xstats.re_alloc_rxbuf_failed[chan];
pdata->xstats.re_alloc_rxbuf_failed[chan] =
osi_update_stats_counter(val, 1UL);
return 0;
}
@@ -256,8 +256,8 @@ static inline int ether_alloc_skb(struct ether_priv_data *pdata,
rx_swcx->buf_virt_addr = pdata->osi_dma->resv_buf_virt_addr;
rx_swcx->buf_phy_addr = pdata->osi_dma->resv_buf_phy_addr;
rx_swcx->flags |= OSI_RX_SWCX_BUF_VALID;
val = pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan];
pdata->osi_core->xstats.re_alloc_rxbuf_failed[chan] =
val = pdata->xstats.re_alloc_rxbuf_failed[chan];
pdata->xstats.re_alloc_rxbuf_failed[chan] =
osi_update_stats_counter(val, 1UL);
return 0;
}