Merge branch 'net-wangxun-more-ethtool'

Jiawen Wu says:

====================
Implement more ethtool_ops for Wangxun

Provide ethtool functions to operate pause param, ring param, coalesce
channel number and msglevel, for driver txgbe/ngbe.

v6 -> v7:
- Rebase on net-next.

v5 -> v6:
- Minor fixes address on Jakub Kicinski's comments.

v4 -> v5:
- Fix build error reported by kernel test robot.

v3 -> v4:
- Repartition the patches of phylink.
- Handle failure to allocate memory while changing ring parameters.
- Minor fixes about formatting.

v2 -> v3:
- Address comments:
  https://lore.kernel.org/all/ZW2loxTO6oKNYLew@shell.armlinux.org.uk/

v1 -> v2:
- Add phylink support for ngbe.
- Fix issue on interrupts when queue number is changed.
- Add more marco defines.
- Fix return codes.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2024-01-04 10:49:36 +00:00
commit 168882d440
16 changed files with 1112 additions and 187 deletions

View file

@ -8,6 +8,7 @@
#include "wx_type.h"
#include "wx_ethtool.h"
#include "wx_hw.h"
#include "wx_lib.h"
struct wx_stats {
char stat_string[ETH_GSTRING_LEN];
@ -185,3 +186,238 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
}
}
EXPORT_SYMBOL(wx_get_drvinfo);
int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);
int wx_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct wx *wx = netdev_priv(netdev);
return phylink_ethtool_ksettings_get(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_get_link_ksettings);
int wx_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct wx *wx = netdev_priv(netdev);
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);
void wx_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct wx *wx = netdev_priv(netdev);
phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);
int wx_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct wx *wx = netdev_priv(netdev);
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);
void wx_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct wx *wx = netdev_priv(netdev);
ring->rx_max_pending = WX_MAX_RXD;
ring->tx_max_pending = WX_MAX_TXD;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = wx->rx_ring_count;
ring->tx_pending = wx->tx_ring_count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
EXPORT_SYMBOL(wx_get_ringparam);
int wx_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct wx *wx = netdev_priv(netdev);
ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
/* only valid if in constant ITR mode */
if (wx->rx_itr_setting <= 1)
ec->rx_coalesce_usecs = wx->rx_itr_setting;
else
ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
/* if in mixed tx/rx queues per vector mode, report only rx settings */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
return 0;
/* only valid if in constant ITR mode */
if (wx->tx_itr_setting <= 1)
ec->tx_coalesce_usecs = wx->tx_itr_setting;
else
ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;
return 0;
}
EXPORT_SYMBOL(wx_get_coalesce);
int wx_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct wx *wx = netdev_priv(netdev);
u16 tx_itr_param, rx_itr_param;
struct wx_q_vector *q_vector;
u16 max_eitr;
int i;
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
/* reject Tx specific changes in case of mixed RxTx vectors */
if (ec->tx_coalesce_usecs)
return -EOPNOTSUPP;
}
if (ec->tx_max_coalesced_frames_irq)
wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
if (wx->mac.type == wx_mac_sp)
max_eitr = WX_SP_MAX_EITR;
else
max_eitr = WX_EM_MAX_EITR;
if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
return -EINVAL;
if (ec->rx_coalesce_usecs > 1)
wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
else
wx->rx_itr_setting = ec->rx_coalesce_usecs;
if (wx->rx_itr_setting == 1)
rx_itr_param = WX_20K_ITR;
else
rx_itr_param = wx->rx_itr_setting;
if (ec->tx_coalesce_usecs > 1)
wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
else
wx->tx_itr_setting = ec->tx_coalesce_usecs;
if (wx->tx_itr_setting == 1) {
if (wx->mac.type == wx_mac_sp)
tx_itr_param = WX_12K_ITR;
else
tx_itr_param = WX_20K_ITR;
} else {
tx_itr_param = wx->tx_itr_setting;
}
/* mixed Rx/Tx */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
wx->tx_itr_setting = wx->rx_itr_setting;
for (i = 0; i < wx->num_q_vectors; i++) {
q_vector = wx->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
q_vector->itr = tx_itr_param;
else
/* rx only or mixed */
q_vector->itr = rx_itr_param;
wx_write_eitr(q_vector);
}
return 0;
}
EXPORT_SYMBOL(wx_set_coalesce);
static unsigned int wx_max_channels(struct wx *wx)
{
unsigned int max_combined;
if (!wx->msix_q_entries) {
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
if (wx->mac.type == wx_mac_sp)
max_combined = 63;
else
max_combined = 8;
}
return max_combined;
}
void wx_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct wx *wx = netdev_priv(dev);
/* report maximum channels */
ch->max_combined = wx_max_channels(wx);
/* report info for other vector */
if (wx->msix_q_entries) {
ch->max_other = 1;
ch->other_count = 1;
}
/* record RSS queues */
ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
}
EXPORT_SYMBOL(wx_get_channels);
int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
unsigned int count = ch->combined_count;
struct wx *wx = netdev_priv(dev);
/* verify other_count has not changed */
if (ch->other_count != 1)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
if (count > wx_max_channels(wx))
return -EINVAL;
wx->ring_feature[RING_F_RSS].limit = count;
return 0;
}
EXPORT_SYMBOL(wx_set_channels);
u32 wx_get_msglevel(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
return wx->msg_enable;
}
EXPORT_SYMBOL(wx_get_msglevel);
void wx_set_msglevel(struct net_device *netdev, u32 data)
{
struct wx *wx = netdev_priv(netdev);
wx->msg_enable = data;
}
EXPORT_SYMBOL(wx_set_msglevel);

View file

@ -13,4 +13,31 @@ void wx_get_mac_stats(struct net_device *netdev,
void wx_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info);
int wx_nway_reset(struct net_device *netdev);
int wx_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd);
int wx_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd);
void wx_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause);
int wx_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause);
void wx_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack);
int wx_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack);
int wx_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack);
void wx_get_channels(struct net_device *dev,
struct ethtool_channels *ch);
int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
#endif /* _WX_ETHTOOL_H_ */

View file

@ -149,9 +149,9 @@ void wx_irq_disable(struct wx *wx)
int vector;
for (vector = 0; vector < wx->num_q_vectors; vector++)
synchronize_irq(wx->msix_entries[vector].vector);
synchronize_irq(wx->msix_q_entries[vector].vector);
synchronize_irq(wx->msix_entries[vector].vector);
synchronize_irq(wx->msix_entry->vector);
} else {
synchronize_irq(pdev->irq);
}
@ -1158,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx)
wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
}
#define WX_ETH_FRAMING 20
/**
* wx_hpbthresh - calculate high water mark for flow control
*
* @wx: board private structure to calculate for
**/
static int wx_hpbthresh(struct wx *wx)
{
struct net_device *dev = wx->netdev;
int link, tc, kb, marker;
u32 dv_id, rx_pba;
/* Calculate max LAN frame size */
link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING;
tc = link;
/* Calculate delay value for device */
dv_id = WX_DV(link, tc);
/* Delay value is calculated in bit times convert to KB */
kb = WX_BT2KB(dv_id);
rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
marker = rx_pba - kb;
/* It is possible that the packet buffer is not large enough
* to provide required headroom. In this case throw an error
* to user and a do the best we can.
*/
if (marker < 0) {
dev_warn(&wx->pdev->dev,
"Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n");
marker = tc + 1;
}
return marker;
}
/**
* wx_lpbthresh - calculate low water mark for flow control
*
* @wx: board private structure to calculate for
**/
static int wx_lpbthresh(struct wx *wx)
{
struct net_device *dev = wx->netdev;
u32 dv_id;
int tc;
/* Calculate max LAN frame size */
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* Calculate delay value for device */
dv_id = WX_LOW_DV(tc);
/* Delay value is calculated in bit times convert to KB */
return WX_BT2KB(dv_id);
}
/**
* wx_pbthresh_setup - calculate and setup high low water marks
*
* @wx: board private structure to calculate for
**/
static void wx_pbthresh_setup(struct wx *wx)
{
wx->fc.high_water = wx_hpbthresh(wx);
wx->fc.low_water = wx_lpbthresh(wx);
/* Low water marks must not be larger than high water marks */
if (wx->fc.low_water > wx->fc.high_water)
wx->fc.low_water = 0;
}
static void wx_configure_port(struct wx *wx)
{
u32 value, i;
@ -1522,6 +1597,72 @@ static void wx_restore_vlan(struct wx *wx)
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
}
static void wx_store_reta(struct wx *wx)
{
u8 *indir_tbl = wx->rss_indir_tbl;
u32 reta = 0;
u32 i;
/* Fill out the redirection table as follows:
* - 8 bit wide entries containing 4 bit RSS index
*/
for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
reta |= indir_tbl[i] << (i & 0x3) * 8;
if ((i & 3) == 3) {
wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
reta = 0;
}
}
}
static void wx_setup_reta(struct wx *wx)
{
u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
u32 random_key_size = WX_RSS_KEY_SIZE / 4;
u32 i, j;
/* Fill out hash function seeds */
for (i = 0; i < random_key_size; i++)
wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
/* Fill out redirection table */
memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
if (j == rss_i)
j = 0;
wx->rss_indir_tbl[i] = j;
}
wx_store_reta(wx);
}
static void wx_setup_mrqc(struct wx *wx)
{
u32 rss_field = 0;
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
/* Perform hash on these packet types */
rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
WX_RDB_RA_CTL_RSS_IPV4_TCP |
WX_RDB_RA_CTL_RSS_IPV4_UDP |
WX_RDB_RA_CTL_RSS_IPV6 |
WX_RDB_RA_CTL_RSS_IPV6_TCP |
WX_RDB_RA_CTL_RSS_IPV6_UDP;
netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
wx_setup_reta(wx);
if (wx->rss_enabled)
rss_field |= WX_RDB_RA_CTL_RSS_EN;
wr32(wx, WX_RDB_RA_CTL, rss_field);
}
/**
* wx_configure_rx - Configure Receive Unit after Reset
* @wx: pointer to private structure
@ -1554,6 +1695,8 @@ void wx_configure_rx(struct wx *wx)
wr32(wx, WX_PSR_CTL, psrctl);
}
wx_setup_mrqc(wx);
/* set_rx_buffer_len must be called before ring initialization */
wx_set_rx_buffer_len(wx);
@ -1584,6 +1727,7 @@ static void wx_configure_isb(struct wx *wx)
void wx_configure(struct wx *wx)
{
wx_set_rxpba(wx);
wx_pbthresh_setup(wx);
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
@ -1750,6 +1894,28 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
}
EXPORT_SYMBOL(wx_get_pcie_msix_counts);
/**
* wx_init_rss_key - Initialize wx RSS key
* @wx: device handle
*
* Allocates and initializes the RSS key if it is not allocated.
**/
static int wx_init_rss_key(struct wx *wx)
{
u32 *rss_key;
if (!wx->rss_key) {
rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
if (unlikely(!rss_key))
return -ENOMEM;
netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
wx->rss_key = rss_key;
}
return 0;
}
int wx_sw_init(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
@ -1777,14 +1943,23 @@ int wx_sw_init(struct wx *wx)
wx->subsystem_device_id = swab16((u16)ssid);
}
err = wx_init_rss_key(wx);
if (err < 0) {
wx_err(wx, "rss key allocation failed\n");
return err;
}
wx->mac_table = kcalloc(wx->mac.num_rar_entries,
sizeof(struct wx_mac_addr),
GFP_KERNEL);
if (!wx->mac_table) {
wx_err(wx, "mac_table allocation failed\n");
kfree(wx->rss_key);
return -ENOMEM;
}
wx->msix_in_use = false;
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
@ -2003,6 +2178,102 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
}
EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
{
u16 reg_idx = ring->reg_idx;
u32 srrctl;
srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
srrctl |= WX_PX_RR_CFG_DROP_EN;
wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
}
static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
{
u16 reg_idx = ring->reg_idx;
u32 srrctl;
srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
srrctl &= ~WX_PX_RR_CFG_DROP_EN;
wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
}
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
{
u16 pause_time = WX_DEFAULT_FCPAUSE;
u32 mflcn_reg, fccfg_reg, reg;
u32 fcrtl, fcrth;
int i;
/* Low water mark of zero causes XOFF floods */
if (tx_pause && wx->fc.high_water) {
if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
wx_err(wx, "Invalid water mark configuration\n");
return -EINVAL;
}
}
/* Disable any previous flow control settings */
mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE;
fccfg_reg = rd32(wx, WX_RDB_RFCC);
fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X;
if (rx_pause)
mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE;
if (tx_pause)
fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X;
/* Set 802.3x based flow control settings. */
wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
wr32(wx, WX_RDB_RFCC, fccfg_reg);
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (tx_pause && wx->fc.high_water) {
fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
wr32(wx, WX_RDB_RFCL, fcrtl);
fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
} else {
wr32(wx, WX_RDB_RFCL, 0);
/* In order to prevent Tx hangs when the internal Tx
* switch is enabled we must set the high water mark
* to the Rx packet buffer size - 24KB. This allows
* the Tx switch to function even under heavy Rx
* workloads.
*/
fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
}
wr32(wx, WX_RDB_RFCH, fcrth);
/* Configure pause time */
reg = pause_time * 0x00010001;
wr32(wx, WX_RDB_RFCV, reg);
/* Configure flow control refresh threshold value */
wr32(wx, WX_RDB_RFCRT, pause_time / 2);
/* We should set the drop enable bit if:
* Number of Rx queues > 1 and flow control is disabled
*
* This allows us to avoid head of line blocking for security
* and performance reasons.
*/
if (wx->num_rx_queues > 1 && !tx_pause) {
for (i = 0; i < wx->num_rx_queues; i++)
wx_enable_rx_drop(wx, wx->rx_ring[i]);
} else {
for (i = 0; i < wx->num_rx_queues; i++)
wx_disable_rx_drop(wx, wx->rx_ring[i]);
}
return 0;
}
EXPORT_SYMBOL(wx_fc_enable);
/**
* wx_update_stats - Update the board statistics counters.
* @wx: board private structure

View file

@ -41,6 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
void wx_update_stats(struct wx *wx);
void wx_clear_hw_cntrs(struct wx *wx);

View file

@ -1568,8 +1568,14 @@ EXPORT_SYMBOL(wx_napi_disable_all);
**/
static void wx_set_rss_queues(struct wx *wx)
{
wx->num_rx_queues = wx->mac.max_rx_queues;
wx->num_tx_queues = wx->mac.max_tx_queues;
struct wx_ring_feature *f;
/* set mask for 16 queue limit of RSS */
f = &wx->ring_feature[RING_F_RSS];
f->indices = f->limit;
wx->num_rx_queues = f->limit;
wx->num_tx_queues = f->limit;
}
static void wx_set_num_queues(struct wx *wx)
@ -1595,35 +1601,51 @@ static int wx_acquire_msix_vectors(struct wx *wx)
struct irq_affinity affd = {0, };
int nvecs, i;
nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);
/* We start by asking for one vector per queue pair */
nvecs = max(wx->num_rx_queues, wx->num_tx_queues);
nvecs = min_t(int, nvecs, num_online_cpus());
nvecs = min_t(int, nvecs, wx->mac.max_msix_vectors);
wx->msix_entries = kcalloc(nvecs,
sizeof(struct msix_entry),
GFP_KERNEL);
if (!wx->msix_entries)
wx->msix_q_entries = kcalloc(nvecs, sizeof(struct msix_entry),
GFP_KERNEL);
if (!wx->msix_q_entries)
return -ENOMEM;
/* One for non-queue interrupts */
nvecs += 1;
if (!wx->msix_in_use) {
wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
GFP_KERNEL);
if (!wx->msix_entry) {
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
return -ENOMEM;
}
}
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
nvecs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&affd);
if (nvecs < 0) {
wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
kfree(wx->msix_entries);
wx->msix_entries = NULL;
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
kfree(wx->msix_entry);
wx->msix_entry = NULL;
return nvecs;
}
wx->msix_entry->entry = 0;
wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
nvecs -= 1;
for (i = 0; i < nvecs; i++) {
wx->msix_entries[i].entry = i;
wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i);
wx->msix_q_entries[i].entry = i;
wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
}
/* one for msix_other */
nvecs -= 1;
wx->num_q_vectors = nvecs;
wx->num_rx_queues = nvecs;
wx->num_tx_queues = nvecs;
return 0;
}
@ -1645,9 +1667,11 @@ static int wx_set_interrupt_capability(struct wx *wx)
if (ret == 0 || (ret == -ENOMEM))
return ret;
wx->num_rx_queues = 1;
wx->num_tx_queues = 1;
wx->num_q_vectors = 1;
/* Disable RSS */
dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
wx->ring_feature[RING_F_RSS].limit = 1;
wx_set_num_queues(wx);
/* minmum one for queue, one for misc*/
nvecs = 1;
@ -1905,8 +1929,12 @@ void wx_reset_interrupt_capability(struct wx *wx)
return;
if (pdev->msix_enabled) {
kfree(wx->msix_entries);
wx->msix_entries = NULL;
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
if (!wx->msix_in_use) {
kfree(wx->msix_entry);
wx->msix_entry = NULL;
}
}
pci_free_irq_vectors(wx->pdev);
}
@ -1978,7 +2006,7 @@ void wx_free_irq(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_entries[vector];
struct msix_entry *entry = &wx->msix_q_entries[vector];
/* free only the irqs that were actually requested */
if (!q_vector->rx.ring && !q_vector->tx.ring)
@ -1988,7 +2016,7 @@ void wx_free_irq(struct wx *wx)
}
if (wx->mac.type == wx_mac_em)
free_irq(wx->msix_entries[vector].vector, wx);
free_irq(wx->msix_entry->vector, wx);
}
EXPORT_SYMBOL(wx_free_irq);
@ -2065,6 +2093,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@ -2082,7 +2111,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
* when it needs to update EITR registers at runtime. Hardware
* specific quirks/differences are taken care of here.
*/
static void wx_write_eitr(struct wx_q_vector *q_vector)
void wx_write_eitr(struct wx_q_vector *q_vector)
{
struct wx *wx = q_vector->wx;
int v_idx = q_vector->v_idx;
@ -2095,7 +2124,7 @@ static void wx_write_eitr(struct wx_q_vector *q_vector)
itr_reg |= WX_PX_ITR_CNT_WDIS;
wr32(wx, WX_PX_ITR(v_idx), itr_reg);
wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
}
/**
@ -2141,9 +2170,9 @@ void wx_configure_vectors(struct wx *wx)
wx_write_eitr(q_vector);
}
wx_set_ivar(wx, -1, 0, v_idx);
wx_set_ivar(wx, -1, 0, 0);
if (pdev->msix_enabled)
wr32(wx, WX_PX_ITR(v_idx), 1950);
wr32(wx, WX_PX_ITR(0), 1950);
}
EXPORT_SYMBOL(wx_configure_vectors);
@ -2656,11 +2685,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev);
if (changed & NETIF_F_RXHASH)
if (features & NETIF_F_RXHASH) {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
WX_RDB_RA_CTL_RSS_EN);
else
wx->rss_enabled = true;
} else {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
wx->rss_enabled = false;
}
if (changed &
(NETIF_F_HW_VLAN_CTAG_RX |
@ -2671,4 +2703,70 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
}
EXPORT_SYMBOL(wx_set_features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
int i, err = 0;
/* Setup new Tx resources and free the old Tx resources in that order.
* We can then assign the new resources to the rings via a memcpy.
* The advantage to this approach is that we are guaranteed to still
* have resources even in the case of an allocation failure.
*/
if (new_tx_count != wx->tx_ring_count) {
for (i = 0; i < wx->num_tx_queues; i++) {
memcpy(&temp_ring[i], wx->tx_ring[i],
sizeof(struct wx_ring));
temp_ring[i].count = new_tx_count;
err = wx_setup_tx_resources(&temp_ring[i]);
if (err) {
wx_err(wx, "setup new tx resources failed, keep using the old config\n");
while (i) {
i--;
wx_free_tx_resources(&temp_ring[i]);
}
return;
}
}
for (i = 0; i < wx->num_tx_queues; i++) {
wx_free_tx_resources(wx->tx_ring[i]);
memcpy(wx->tx_ring[i], &temp_ring[i],
sizeof(struct wx_ring));
}
wx->tx_ring_count = new_tx_count;
}
/* Repeat the process for the Rx rings if needed */
if (new_rx_count != wx->rx_ring_count) {
for (i = 0; i < wx->num_rx_queues; i++) {
memcpy(&temp_ring[i], wx->rx_ring[i],
sizeof(struct wx_ring));
temp_ring[i].count = new_rx_count;
err = wx_setup_rx_resources(&temp_ring[i]);
if (err) {
wx_err(wx, "setup new rx resources failed, keep using the old config\n");
while (i) {
i--;
wx_free_rx_resources(&temp_ring[i]);
}
return;
}
}
for (i = 0; i < wx->num_rx_queues; i++) {
wx_free_rx_resources(wx->rx_ring[i]);
memcpy(wx->rx_ring[i], &temp_ring[i],
sizeof(struct wx_ring));
}
wx->rx_ring_count = new_rx_count;
}
}
EXPORT_SYMBOL(wx_set_ring);
MODULE_LICENSE("GPL");

View file

@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx);
int wx_setup_isb_resources(struct wx *wx);
void wx_free_isb_resources(struct wx *wx);
u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
void wx_write_eitr(struct wx_q_vector *q_vector);
void wx_configure_vectors(struct wx *wx);
void wx_clean_all_rx_rings(struct wx *wx);
void wx_clean_all_tx_rings(struct wx *wx);
@ -29,5 +30,7 @@ int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
#endif /* _NGBE_LIB_H_ */

View file

@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <net/ip.h>
#define WX_NCSI_SUP 0x8000
@ -130,6 +131,15 @@
#define WX_RDB_PFCMACDAH 0x19214
#define WX_RDB_LXOFFTXC 0x19218
#define WX_RDB_LXONTXC 0x1921C
/* Flow Control Registers */
#define WX_RDB_RFCV 0x19200
#define WX_RDB_RFCL 0x19220
#define WX_RDB_RFCL_XONE BIT(31)
#define WX_RDB_RFCH 0x19260
#define WX_RDB_RFCH_XOFFE BIT(31)
#define WX_RDB_RFCRT 0x192A0
#define WX_RDB_RFCC 0x192A4
#define WX_RDB_RFCC_RFCE_802_3X BIT(3)
/* ring assignment */
#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
#define WX_RDB_PL_CFG_L4HDR BIT(1)
@ -137,8 +147,16 @@
#define WX_RDB_PL_CFG_L2HDR BIT(3)
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4))
#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4))
#define WX_RDB_RA_CTL 0x194F4
#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */
#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16)
#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17)
#define WX_RDB_RA_CTL_RSS_IPV6 BIT(20)
#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
/******************************* PSR Registers *******************************/
/* psr control */
@ -305,6 +323,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
#define WX_7K_ITR 595
#define WX_12K_ITR 336
#define WX_20K_ITR 200
#define WX_SP_MAX_EITR 0x00000FF8U
#define WX_EM_MAX_EITR 0x00007FFCU
@ -330,6 +349,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */
#define WX_PX_RR_CFG_VLAN BIT(31)
#define WX_PX_RR_CFG_DROP_EN BIT(30)
#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
#define WX_PX_RR_CFG_RR_THER_SHIFT 16
#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
@ -367,8 +387,46 @@ enum WX_MSCA_CMD_value {
#define WX_MAC_STATE_MODIFIED 0x2
#define WX_MAC_STATE_IN_USE 0x4
/* BitTimes (BT) conversion */
#define WX_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024))
#define WX_B2BT(BT) ((BT) * 8)
/* Calculate Delay to respond to PFC */
#define WX_PFC_D 672
/* Calculate Cable Delay */
#define WX_CABLE_DC 5556 /* Delay Copper */
/* Calculate Delay incurred from higher layer */
#define WX_HD 6144
/* Calculate Interface Delay */
#define WX_PHY_D 12800
#define WX_MAC_D 4096
#define WX_XAUI_D (2 * 1024)
#define WX_ID (WX_MAC_D + WX_XAUI_D + WX_PHY_D)
/* Calculate PCI Bus delay for low thresholds */
#define WX_PCI_DELAY 10000
/* Calculate delay value in bit times */
#define WX_DV(_max_frame_link, _max_frame_tc) \
((36 * (WX_B2BT(_max_frame_link) + WX_PFC_D + \
(2 * WX_CABLE_DC) + (2 * WX_ID) + WX_HD) / 25 + 1) + \
2 * WX_B2BT(_max_frame_tc))
/* Calculate low threshold delay values */
#define WX_LOW_DV(_max_frame_tc) \
(2 * (2 * WX_B2BT(_max_frame_tc) + (36 * WX_PCI_DELAY / 25) + 1))
/* flow control */
#define WX_DEFAULT_FCPAUSE 0xFFFF
#define WX_MAX_RXD 8192
#define WX_MAX_TXD 8192
#define WX_MIN_RXD 128
#define WX_MIN_TXD 128
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8
#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
#define VMDQ_P(p) p
@ -871,6 +929,19 @@ struct wx_q_vector {
struct wx_ring ring[] ____cacheline_internodealigned_in_smp;
};
struct wx_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
u16 mask; /* Mask used for feature to ring mapping */
u16 offset; /* offset to start of feature */
};
enum wx_ring_f_enum {
RING_F_NONE = 0,
RING_F_RSS,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
enum wx_isb_idx {
WX_ISB_HEADER,
WX_ISB_MISC,
@ -879,6 +950,11 @@ enum wx_isb_idx {
WX_ISB_MAX
};
struct wx_fc_info {
u32 high_water; /* Flow Ctrl High-water */
u32 low_water; /* Flow Ctrl Low-water */
};
/* Statistics counters collected by the MAC */
struct wx_hw_stats {
u64 gprc;
@ -919,6 +995,7 @@ struct wx {
enum sp_media_type media_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
struct wx_fc_info fc;
struct wx_mac_addr *mac_table;
u16 device_id;
u16 vendor_id;
@ -939,6 +1016,8 @@ struct wx {
int speed;
int duplex;
struct phy_device *phydev;
struct phylink *phylink;
struct phylink_config phylink_config;
bool wol_hw_supported;
bool ncsi_enabled;
@ -966,7 +1045,10 @@ struct wx {
struct wx_q_vector *q_vector[64];
unsigned int queues_per_pool;
struct msix_entry *msix_entries;
struct msix_entry *msix_q_entries;
struct msix_entry *msix_entry;
bool msix_in_use;
struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */
dma_addr_t isb_dma;
@ -974,8 +1056,9 @@ struct wx {
u32 isb_tag[WX_ISB_MAX];
#define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
bool rss_enabled;
#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
u32 wol;
@ -992,7 +1075,7 @@ struct wx {
};
#define WX_INTR_ALL (~0ULL)
#define WX_INTR_Q(i) BIT(i)
#define WX_INTR_Q(i) BIT((i) + 1)
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@ -1044,4 +1127,9 @@ rd64(struct wx *wx, u32 reg)
#define wx_dbg(wx, fmt, arg...) \
dev_dbg(&(wx)->pdev->dev, fmt, ##arg)
static inline struct wx *phylink_to_wx(struct phylink_config *config)
{
return container_of(config, struct wx, phylink_config);
}
#endif /* _WX_TYPE_H_ */

View file

@ -7,7 +7,10 @@
#include "../libwx/wx_ethtool.h"
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_hw.h"
#include "ngbe_ethtool.h"
#include "ngbe_type.h"
static void ngbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
@ -41,12 +44,75 @@ static int ngbe_set_wol(struct net_device *netdev,
return 0;
}
static int ngbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
int i;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
if (new_tx_count == wx->tx_ring_count &&
new_rx_count == wx->rx_ring_count)
return 0;
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->count = new_rx_count;
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
return 0;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
if (!temp_ring)
return -ENOMEM;
ngbe_down(wx);
wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
kvfree(temp_ring);
wx_configure(wx);
ngbe_up(wx);
return 0;
}
static int ngbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
int err;
err = wx_set_channels(dev, ch);
if (err < 0)
return err;
/* use setup TC to update any traffic class queue mapping */
return ngbe_setup_tc(dev, netdev_get_num_tc(dev));
}
static const struct ethtool_ops ngbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.nway_reset = phy_ethtool_nway_reset,
.get_link_ksettings = wx_get_link_ksettings,
.set_link_ksettings = wx_set_link_ksettings,
.nway_reset = wx_nway_reset,
.get_wol = ngbe_get_wol,
.set_wol = ngbe_set_wol,
.get_sset_count = wx_get_sset_count,
@ -54,6 +120,16 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.get_ethtool_stats = wx_get_ethtool_stats,
.get_eth_mac_stats = wx_get_mac_stats,
.get_pause_stats = wx_get_pause_stats,
.get_pauseparam = wx_get_pauseparam,
.set_pauseparam = wx_set_pauseparam,
.get_ringparam = wx_get_ringparam,
.set_ringparam = ngbe_set_ringparam,
.get_coalesce = wx_get_coalesce,
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = ngbe_set_channels,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)

View file

@ -79,28 +79,6 @@ static void ngbe_init_type_code(struct wx *wx)
}
}
/**
* ngbe_init_rss_key - Initialize wx RSS key
* @wx: device handle
*
* Allocates and initializes the RSS key if it is not allocated.
**/
static inline int ngbe_init_rss_key(struct wx *wx)
{
u32 *rss_key;
if (!wx->rss_key) {
rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
if (unlikely(!rss_key))
return -ENOMEM;
netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
wx->rss_key = rss_key;
}
return 0;
}
/**
* ngbe_sw_init - Initialize general software structures
* @wx: board private structure to initialize
@ -134,8 +112,9 @@ static int ngbe_sw_init(struct wx *wx)
dev_err(&pdev->dev, "Do not support MSI-X\n");
wx->mac.max_msix_vectors = msix_count;
if (ngbe_init_rss_key(wx))
return -ENOMEM;
wx->ring_feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES,
num_online_cpus());
wx->rss_enabled = true;
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
@ -175,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
if (queues)
wx_intr_enable(wx, NGBE_INTR_ALL);
else
wx_intr_enable(wx, NGBE_INTR_MISC(wx));
wx_intr_enable(wx, NGBE_INTR_MISC);
}
/**
@ -241,7 +220,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_entries[vector];
struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@ -259,7 +238,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
}
}
err = request_irq(wx->msix_entries[vector].vector,
err = request_irq(wx->msix_entry->vector,
ngbe_msix_other, 0, netdev->name, wx);
if (err) {
@ -272,7 +251,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
free_queue_irqs:
while (vector) {
vector--;
free_irq(wx->msix_entries[vector].vector,
free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
wx_reset_interrupt_capability(wx);
@ -334,15 +313,15 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
static void ngbe_down(struct wx *wx)
void ngbe_down(struct wx *wx)
{
phy_stop(wx->phydev);
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
static void ngbe_up(struct wx *wx)
void ngbe_up(struct wx *wx)
{
wx_configure_vectors(wx);
@ -359,7 +338,7 @@ static void ngbe_up(struct wx *wx)
if (wx->gpio_ctrl)
ngbe_sfp_modules_txrx_powerctl(wx, true);
phy_start(wx->phydev);
phylink_start(wx->phylink);
}
/**
@ -388,7 +367,7 @@ static int ngbe_open(struct net_device *netdev)
if (err)
goto err_free_resources;
err = ngbe_phy_connect(wx);
err = phylink_connect_phy(wx->phylink, wx->phydev);
if (err)
goto err_free_irq;
@ -404,7 +383,7 @@ static int ngbe_open(struct net_device *netdev)
return 0;
err_dis_phy:
phy_disconnect(wx->phydev);
phylink_disconnect_phy(wx->phylink);
err_free_irq:
wx_free_irq(wx);
err_free_resources:
@ -430,7 +409,7 @@ static int ngbe_close(struct net_device *netdev)
ngbe_down(wx);
wx_free_irq(wx);
wx_free_resources(wx);
phy_disconnect(wx->phydev);
phylink_disconnect_phy(wx->phylink);
wx_control_hw(wx, false);
return 0;
@ -480,6 +459,39 @@ static void ngbe_shutdown(struct pci_dev *pdev)
}
}
/**
* ngbe_setup_tc - routine to configure net_device for multiple traffic
* classes.
*
* @dev: net device to configure
* @tc: number of traffic classes to enable
*/
int ngbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
ngbe_close(dev);
wx_clear_interrupt_scheme(wx);
if (tc)
netdev_set_num_tc(dev, tc);
else
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
if (netif_running(dev))
ngbe_open(dev);
return 0;
}
static const struct net_device_ops ngbe_netdev_ops = {
.ndo_open = ngbe_open,
.ndo_stop = ngbe_close,
@ -681,6 +693,7 @@ static int ngbe_probe(struct pci_dev *pdev,
return 0;
err_register:
phylink_destroy(wx->phylink);
wx_control_hw(wx, false);
err_clear_interrupt_scheme:
wx_clear_interrupt_scheme(wx);
@ -710,9 +723,11 @@ static void ngbe_remove(struct pci_dev *pdev)
netdev = wx->netdev;
unregister_netdev(netdev);
phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
kfree(wx->rss_key);
kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx);

View file

@ -56,22 +56,28 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr,
return ret;
}
static void ngbe_handle_link_change(struct net_device *dev)
static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct wx *wx = netdev_priv(dev);
struct phy_device *phydev;
}
static void ngbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
}
static void ngbe_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct wx *wx = phylink_to_wx(config);
u32 lan_speed, reg;
phydev = wx->phydev;
if (!(wx->link != phydev->link ||
wx->speed != phydev->speed ||
wx->duplex != phydev->duplex))
return;
wx_fc_enable(wx, tx_pause, rx_pause);
wx->link = phydev->link;
wx->speed = phydev->speed;
wx->duplex = phydev->duplex;
switch (phydev->speed) {
switch (speed) {
case SPEED_10:
lan_speed = 0;
break;
@ -83,56 +89,53 @@ static void ngbe_handle_link_change(struct net_device *dev)
lan_speed = 2;
break;
}
wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed);
if (phydev->link) {
reg = rd32(wx, WX_MAC_TX_CFG);
reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
wr32(wx, WX_MAC_TX_CFG, reg);
/* Re configure MAC RX */
reg = rd32(wx, WX_MAC_RX_CFG);
wr32(wx, WX_MAC_RX_CFG, reg);
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
}
phy_print_status(phydev);
reg = rd32(wx, WX_MAC_TX_CFG);
reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
wr32(wx, WX_MAC_TX_CFG, reg);
/* Re configure MAC Rx */
reg = rd32(wx, WX_MAC_RX_CFG);
wr32(wx, WX_MAC_RX_CFG, reg);
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
}
int ngbe_phy_connect(struct wx *wx)
{
int ret;
static const struct phylink_mac_ops ngbe_mac_ops = {
.mac_config = ngbe_mac_config,
.mac_link_down = ngbe_mac_link_down,
.mac_link_up = ngbe_mac_link_up,
};
ret = phy_connect_direct(wx->netdev,
wx->phydev,
ngbe_handle_link_change,
PHY_INTERFACE_MODE_RGMII_ID);
if (ret) {
wx_err(wx, "PHY connect failed.\n");
return ret;
}
static int ngbe_phylink_init(struct wx *wx)
{
struct phylink_config *config;
phy_interface_t phy_mode;
struct phylink *phylink;
config = &wx->phylink_config;
config->dev = &wx->netdev->dev;
config->type = PHYLINK_NETDEV;
config->mac_capabilities = MAC_1000FD | MAC_100FD | MAC_10FD |
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
config->mac_managed_pm = true;
phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
__set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces);
phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
wx->phylink = phylink;
return 0;
}
static void ngbe_phy_fixup(struct wx *wx)
{
struct phy_device *phydev = wx->phydev;
struct ethtool_eee eee;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phydev->mac_managed_pm = true;
if (wx->mac_type != em_mac_type_mdi)
return;
/* disable EEE, internal phy does not support eee */
memset(&eee, 0, sizeof(eee));
phy_ethtool_set_eee(phydev, &eee);
}
int ngbe_mdio_init(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
@ -165,11 +168,16 @@ int ngbe_mdio_init(struct wx *wx)
return -ENODEV;
phy_attached_info(wx->phydev);
ngbe_phy_fixup(wx);
wx->link = 0;
wx->speed = 0;
wx->duplex = 0;
ret = ngbe_phylink_init(wx);
if (ret) {
wx_err(wx, "failed to init phylink: %d\n", ret);
return ret;
}
return 0;
}

View file

@ -7,6 +7,5 @@
#ifndef _NGBE_MDIO_H_
#define _NGBE_MDIO_H_
int ngbe_phy_connect(struct wx *wx);
int ngbe_mdio_init(struct wx *wx);
#endif /* _NGBE_MDIO_H_ */

View file

@ -80,7 +80,7 @@
NGBE_PX_MISC_IEN_GPIO)
#define NGBE_INTR_ALL 0x1FF
#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
#define NGBE_INTR_MISC BIT(0)
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
#define NGBE_CFG_LAN_SPEED 0x14440
@ -105,6 +105,7 @@
#define NGBE_FW_CMD_ST_FAIL 0x70657376
#define NGBE_MAX_FDIR_INDICES 7
#define NGBE_MAX_RSS_INDICES 8
#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
@ -130,4 +131,8 @@
extern char ngbe_driver_name[];
void ngbe_down(struct wx *wx);
void ngbe_up(struct wx *wx);
int ngbe_setup_tc(struct net_device *dev, u8 tc);
#endif /* _NGBE_TYPE_H_ */

View file

@ -7,43 +7,93 @@
#include "../libwx/wx_ethtool.h"
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "txgbe_type.h"
#include "txgbe_ethtool.h"
static int txgbe_nway_reset(struct net_device *netdev)
static int txgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct txgbe *txgbe = netdev_to_txgbe(netdev);
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
int i;
return phylink_ethtool_nway_reset(txgbe->phylink);
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
if (new_tx_count == wx->tx_ring_count &&
new_rx_count == wx->rx_ring_count)
return 0;
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->count = new_rx_count;
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
return 0;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
if (!temp_ring)
return -ENOMEM;
txgbe_down(wx);
wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
kvfree(temp_ring);
txgbe_up(wx);
return 0;
}
static int txgbe_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
static int txgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct txgbe *txgbe = netdev_to_txgbe(netdev);
int err;
return phylink_ethtool_ksettings_get(txgbe->phylink, cmd);
}
err = wx_set_channels(dev, ch);
if (err < 0)
return err;
static int txgbe_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct txgbe *txgbe = netdev_to_txgbe(netdev);
return phylink_ethtool_ksettings_set(txgbe->phylink, cmd);
/* use setup TC to update any traffic class queue mapping */
return txgbe_setup_tc(dev, netdev_get_num_tc(dev));
}
static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_drvinfo = wx_get_drvinfo,
.nway_reset = txgbe_nway_reset,
.nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
.get_link_ksettings = txgbe_get_link_ksettings,
.set_link_ksettings = txgbe_set_link_ksettings,
.get_link_ksettings = wx_get_link_ksettings,
.set_link_ksettings = wx_set_link_ksettings,
.get_sset_count = wx_get_sset_count,
.get_strings = wx_get_strings,
.get_ethtool_stats = wx_get_ethtool_stats,
.get_eth_mac_stats = wx_get_mac_stats,
.get_pause_stats = wx_get_pause_stats,
.get_pauseparam = wx_get_pauseparam,
.set_pauseparam = wx_set_pauseparam,
.get_ringparam = wx_get_ringparam,
.set_ringparam = txgbe_set_ringparam,
.get_coalesce = wx_get_coalesce,
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = txgbe_set_channels,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)

View file

@ -86,7 +86,7 @@ static void txgbe_irq_enable(struct wx *wx, bool queues)
wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
/* unmask interrupt */
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
wx_intr_enable(wx, TXGBE_INTR_MISC);
if (queues)
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
@ -145,7 +145,7 @@ static int txgbe_request_msix_irqs(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_entries[vector];
struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@ -168,7 +168,7 @@ static int txgbe_request_msix_irqs(struct wx *wx)
free_queue_irqs:
while (vector) {
vector--;
free_irq(wx->msix_entries[vector].vector,
free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
wx_reset_interrupt_capability(wx);
@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx)
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
struct txgbe *txgbe;
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
txgbe = netdev_to_txgbe(netdev);
phylink_start(txgbe->phylink);
phylink_start(wx->phylink);
/* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0));
@ -290,18 +288,22 @@ static void txgbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
static void txgbe_down(struct wx *wx)
void txgbe_down(struct wx *wx)
{
struct txgbe *txgbe = netdev_to_txgbe(wx->netdev);
txgbe_disable_device(wx);
txgbe_reset(wx);
phylink_stop(txgbe->phylink);
phylink_stop(wx->phylink);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
void txgbe_up(struct wx *wx)
{
wx_configure(wx);
txgbe_up_complete(wx);
}
/**
* txgbe_init_type_code - Initialize the shared code
* @wx: pointer to hardware structure
@ -376,6 +378,10 @@ static int txgbe_sw_init(struct wx *wx)
wx_err(wx, "Do not support MSI-X\n");
wx->mac.max_msix_vectors = msix_count;
wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES,
num_online_cpus());
wx->rss_enabled = true;
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@ -502,6 +508,41 @@ static void txgbe_shutdown(struct pci_dev *pdev)
}
}
/**
* txgbe_setup_tc - routine to configure net_device for multiple traffic
* classes.
*
* @dev: net device to configure
* @tc: number of traffic classes to enable
*/
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
txgbe_close(dev);
else
txgbe_reset(wx);
wx_clear_interrupt_scheme(wx);
if (tc)
netdev_set_num_tc(dev, tc);
else
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
if (netif_running(dev))
txgbe_open(dev);
return 0;
}
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@ -776,6 +817,7 @@ static void txgbe_remove(struct pci_dev *pdev)
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
kfree(wx->rss_key);
kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx);

View file

@ -159,7 +159,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe)
static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config,
phy_interface_t interface)
{
struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev));
struct wx *wx = phylink_to_wx(config);
struct txgbe *txgbe = wx->priv;
if (interface == PHY_INTERFACE_MODE_10GBASER)
return &txgbe->xpcs->pcs;
@ -175,7 +176,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode,
static void txgbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
struct wx *wx = netdev_priv(to_net_dev(config->dev));
struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
}
@ -186,9 +187,11 @@ static void txgbe_mac_link_up(struct phylink_config *config,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct wx *wx = netdev_priv(to_net_dev(config->dev));
struct wx *wx = phylink_to_wx(config);
u32 txcfg, wdg;
wx_fc_enable(wx, tx_pause, rx_pause);
txcfg = rd32(wx, WX_MAC_TX_CFG);
txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK;
@ -217,7 +220,7 @@ static void txgbe_mac_link_up(struct phylink_config *config,
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
struct wx *wx = netdev_priv(to_net_dev(config->dev));
struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
@ -228,7 +231,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
struct wx *wx = netdev_priv(to_net_dev(config->dev));
struct wx *wx = phylink_to_wx(config);
txgbe_enable_sec_tx_path(wx);
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
@ -253,10 +256,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
phy_interface_t phy_mode;
struct phylink *phylink;
config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
config = &wx->phylink_config;
config->dev = &wx->netdev->dev;
config->type = PHYLINK_NETDEV;
config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
@ -287,7 +287,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
}
}
txgbe->phylink = phylink;
wx->phylink = phylink;
return 0;
}
@ -483,11 +483,11 @@ static void txgbe_irq_handler(struct irq_desc *desc)
TXGBE_PX_MISC_ETH_AN)) {
u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
}
/* unmask interrupt */
wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
wx_intr_enable(wx, TXGBE_INTR_MISC);
}
static int txgbe_gpio_init(struct txgbe *txgbe)
@ -531,7 +531,12 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector;
/* now only suuported on MSI-X interrupt */
if (!wx->msix_entry)
return -EPERM;
girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@ -701,6 +706,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
int txgbe_init_phy(struct txgbe *txgbe)
{
struct wx *wx = txgbe->wx;
int ret;
if (txgbe->wx->media_type == sp_media_copper)
@ -708,46 +714,48 @@ int txgbe_init_phy(struct txgbe *txgbe)
ret = txgbe_swnodes_register(txgbe);
if (ret) {
wx_err(txgbe->wx, "failed to register software nodes\n");
wx_err(wx, "failed to register software nodes\n");
return ret;
}
ret = txgbe_mdio_pcs_init(txgbe);
if (ret) {
wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret);
wx_err(wx, "failed to init mdio pcs: %d\n", ret);
goto err_unregister_swnode;
}
ret = txgbe_phylink_init(txgbe);
if (ret) {
wx_err(txgbe->wx, "failed to init phylink\n");
wx_err(wx, "failed to init phylink\n");
goto err_destroy_xpcs;
}
ret = txgbe_gpio_init(txgbe);
if (ret) {
wx_err(txgbe->wx, "failed to init gpio\n");
wx_err(wx, "failed to init gpio\n");
goto err_destroy_phylink;
}
ret = txgbe_clock_register(txgbe);
if (ret) {
wx_err(txgbe->wx, "failed to register clock: %d\n", ret);
wx_err(wx, "failed to register clock: %d\n", ret);
goto err_destroy_phylink;
}
ret = txgbe_i2c_register(txgbe);
if (ret) {
wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret);
wx_err(wx, "failed to init i2c interface: %d\n", ret);
goto err_unregister_clk;
}
ret = txgbe_sfp_register(txgbe);
if (ret) {
wx_err(txgbe->wx, "failed to register sfp\n");
wx_err(wx, "failed to register sfp\n");
goto err_unregister_i2c;
}
wx->msix_in_use = true;
return 0;
err_unregister_i2c:
@ -756,7 +764,7 @@ int txgbe_init_phy(struct txgbe *txgbe)
clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk);
err_destroy_phylink:
phylink_destroy(txgbe->phylink);
phylink_destroy(wx->phylink);
err_destroy_xpcs:
xpcs_destroy(txgbe->xpcs);
err_unregister_swnode:
@ -768,8 +776,8 @@ int txgbe_init_phy(struct txgbe *txgbe)
void txgbe_remove_phy(struct txgbe *txgbe)
{
if (txgbe->wx->media_type == sp_media_copper) {
phylink_disconnect_phy(txgbe->phylink);
phylink_destroy(txgbe->phylink);
phylink_disconnect_phy(txgbe->wx->phylink);
phylink_destroy(txgbe->wx->phylink);
return;
}
@ -777,7 +785,8 @@ void txgbe_remove_phy(struct txgbe *txgbe)
platform_device_unregister(txgbe->i2c_dev);
clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk);
phylink_destroy(txgbe->phylink);
phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group);
txgbe->wx->msix_in_use = false;
}

View file

@ -98,6 +98,7 @@
#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
#define TXGBE_MAX_RSS_INDICES 63
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
@ -122,19 +123,16 @@
#define TXGBE_DEFAULT_RX_WORK 128
#endif
#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
#define TXGBE_INTR_MISC BIT(0)
#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1)
#define TXGBE_MAX_EITR GENMASK(11, 3)
extern char txgbe_driver_name[];
static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
return wx->priv;
}
void txgbe_down(struct wx *wx);
void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
@ -175,7 +173,6 @@ struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
struct dw_xpcs *xpcs;
struct phylink *phylink;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;