Merge branch 'ethernet-use-core-min-max-mtu'

Jarod Wilson says:

====================
ethernet: use core min/max MTU checking

Now that the network stack core min/max MTU checking infrastructure is in
place, time to start making drivers use it. We'll start with the easiest
ones, the ethernet drivers, split roughly by vendor, with a catch-all
patch at the end.

For the most part, every patch does the same essential thing: removes the
MTU range checking from the drivers' ndo_change_mtu function, puts those
ranges into the core net_device min_mtu and max_mtu fields, and where
possible, removes ndo_change_mtu functions entirely.

These patches have all been built through the 0-day build infrastructure
provided by Intel, on top of net-next as of October 17.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-10-18 11:34:22 -04:00
commit 1441dc99fa
102 changed files with 479 additions and 743 deletions

View file

@ -176,6 +176,8 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere S
#define NUM_FBRS 2
#define MAX_PACKETS_HANDLED 256
#define ET131X_MIN_MTU 64
#define ET131X_MAX_MTU 9216
#define ALCATEL_MULTICAST_PKT 0x01000000
#define ALCATEL_BROADCAST_PKT 0x02000000
@ -3869,9 +3871,6 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
if (new_mtu < 64 || new_mtu > 9216)
return -EINVAL;
et131x_disable_txrx(netdev);
netdev->mtu = new_mtu;
@ -3958,6 +3957,8 @@ static int et131x_pci_setup(struct pci_dev *pdev,
netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
netdev->netdev_ops = &et131x_netdev_ops;
netdev->min_mtu = ET131X_MIN_MTU;
netdev->max_mtu = ET131X_MAX_MTU;
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &et131x_ethtool_ops;

View file

@ -443,7 +443,6 @@ struct altera_tse_private {
/* RX/TX MAC FIFO configs */
u32 tx_fifo_depth;
u32 rx_fifo_depth;
u32 max_mtu;
/* Hash filter settings */
u32 hash_filter;

View file

@ -994,20 +994,11 @@ static void tse_set_mac(struct altera_tse_private *priv, bool enable)
*/
static int tse_change_mtu(struct net_device *dev, int new_mtu)
{
struct altera_tse_private *priv = netdev_priv(dev);
unsigned int max_mtu = priv->max_mtu;
unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
if (netif_running(dev)) {
netdev_err(dev, "must be stopped to change its MTU\n");
return -EBUSY;
}
if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
return -EINVAL;
}
dev->mtu = new_mtu;
netdev_update_features(dev);
@ -1446,15 +1437,16 @@ static int altera_tse_probe(struct platform_device *pdev)
of_property_read_bool(pdev->dev.of_node,
"altr,has-supplementary-unicast");
priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
/* Max MTU is 1500, ETH_DATA_LEN */
priv->max_mtu = ETH_DATA_LEN;
priv->dev->max_mtu = ETH_DATA_LEN;
/* Get the max mtu from the device tree. Note that the
* "max-frame-size" parameter is actually max mtu. Definition
* in the ePAPR v1.1 spec and usage differ, so go with usage.
*/
of_property_read_u32(pdev->dev.of_node, "max-frame-size",
&priv->max_mtu);
&priv->dev->max_mtu);
/* The DMA buffer size already accounts for an alignment bias
* to avoid unaligned access exceptions for the NIOS processor,

View file

@ -1556,9 +1556,6 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
struct amd8111e_priv *lp = netdev_priv(dev);
int err;
if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
return -EINVAL;
if (!netif_running(dev)) {
/* new_mtu will be used
* when device starts netxt time
@ -1874,6 +1871,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
dev->ethtool_ops = &ops;
dev->irq =pdev->irq;
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
dev->min_mtu = AMD8111E_MIN_MTU;
dev->max_mtu = AMD8111E_MAX_MTU;
netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
#if AMD8111E_VLAN_TAG_USED

View file

@ -351,7 +351,6 @@ struct alx_rrd {
#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
#define ALX_MAX_TSO_PKT_SIZE (7*1024)
#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
#define ALX_MIN_FRAME_SIZE (ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN)
#define ALX_MAX_RX_QUEUES 8
#define ALX_MAX_TX_QUEUES 4

View file

@ -892,6 +892,9 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
/* MTU range: 34 - 9256 */
alx->dev->min_mtu = 34;
alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE);
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
@ -994,13 +997,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
return -EINVAL;
if (netdev->mtu == mtu)
return 0;
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);

View file

@ -519,6 +519,26 @@ static int atl1c_set_features(struct net_device *netdev,
return 0;
}
static void atl1c_set_max_mtu(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
switch (hw->nic_type) {
/* These (GbE) devices support jumbo packets, max_mtu 6122 */
case athr_l1c:
case athr_l1d:
case athr_l1d_2:
netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
/* The 10/100 devices don't support jumbo packets, max_mtu 1500 */
default:
netdev->max_mtu = ETH_DATA_LEN;
break;
}
}
/**
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
@ -529,22 +549,9 @@ static int atl1c_set_features(struct net_device *netdev,
static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
int old_mtu = netdev->mtu;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
/* Fast Ethernet controller doesn't support jumbo packet */
if (((hw->nic_type == athr_l2c ||
hw->nic_type == athr_l2c_b ||
hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) ||
max_frame < ETH_ZLEN + ETH_FCS_LEN ||
max_frame > MAX_JUMBO_FRAME_SIZE) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
return -EINVAL;
}
/* set MTU */
if (old_mtu != new_mtu && netif_running(netdev)) {
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
netdev->mtu = new_mtu;
@ -2511,6 +2518,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->netdev_ops = &atl1c_netdev_ops;
netdev->watchdog_timeo = AT_TX_WATCHDOG;
netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
atl1c_set_ethtool_ops(netdev);
/* TODO: add when ready */
@ -2613,6 +2621,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "net device private data init failed\n");
goto err_sw_init;
}
/* set max MTU */
atl1c_set_max_mtu(netdev);
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
/* Init GPHY as early as possible due to power saving issue */

View file

@ -439,16 +439,10 @@ static int atl1e_set_features(struct net_device *netdev,
static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
int old_mtu = netdev->mtu;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
netdev_warn(adapter->netdev, "invalid MTU setting\n");
return -EINVAL;
}
/* set MTU */
if (old_mtu != new_mtu && netif_running(netdev)) {
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
netdev->mtu = new_mtu;
@ -2272,6 +2266,10 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->netdev_ops = &atl1e_netdev_ops;
netdev->watchdog_timeo = AT_TX_WATCHDOG;
/* MTU range: 42 - 8170 */
netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
atl1e_set_ethtool_ops(netdev);
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |

View file

@ -2701,23 +2701,15 @@ static void atl1_reset_dev_task(struct work_struct *work)
static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
int old_mtu = netdev->mtu;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
return -EINVAL;
}
adapter->hw.max_frame_size = max_frame;
adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
adapter->rx_buffer_len = (max_frame + 7) & ~7;
adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
netdev->mtu = new_mtu;
if ((old_mtu != new_mtu) && netif_running(netdev)) {
if (netif_running(netdev)) {
atl1_down(adapter);
atl1_up(adapter);
}
@ -3031,6 +3023,11 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* is this valid? see atl1_setup_mac_ctrl() */
netdev->features |= NETIF_F_RXCSUM;
/* MTU range: 42 - 10218 */
netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
/*
* patch for some L1 of old version,
* the final version of L1 may not need these

View file

@ -253,7 +253,7 @@ static int atl2_configure(struct atl2_adapter *adapter)
/* set MTU */
ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
/* 1590 */
ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
@ -925,15 +925,11 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
return -EINVAL;
/* set MTU */
if (hw->max_frame_size != new_mtu) {
netdev->mtu = new_mtu;
ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
VLAN_SIZE + ETHERNET_FCS_SIZE);
}
netdev->mtu = new_mtu;
hw->max_frame_size = new_mtu;
ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN +
VLAN_HLEN + ETH_FCS_LEN);
return 0;
}
@ -1398,6 +1394,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &atl2_netdev_ops;
netdev->ethtool_ops = &atl2_ethtool_ops;
netdev->watchdog_timeo = 5 * HZ;
netdev->min_mtu = 40;
netdev->max_mtu = ETH_DATA_LEN + VLAN_HLEN;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
netdev->mem_start = mmio_start;

View file

@ -228,12 +228,9 @@ static void atl2_force_ps(struct atl2_hw *hw);
#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */
/* The size (in bytes) of a ethernet packet */
#define ENET_HEADER_SIZE 14
#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
#define ETHERNET_FCS_SIZE 4
#define MAX_JUMBO_FRAME_SIZE 0x2000
#define VLAN_SIZE 4
struct tx_pkt_header {
unsigned pkt_size:11;

View file

@ -59,8 +59,8 @@
#define B44_TX_TIMEOUT (5 * HZ)
/* hardware minimum and maximum for a single frame's data payload */
#define B44_MIN_MTU 60
#define B44_MAX_MTU 1500
#define B44_MIN_MTU ETH_ZLEN
#define B44_MAX_MTU ETH_DATA_LEN
#define B44_RX_RING_SIZE 512
#define B44_DEF_RX_RING_PENDING 200
@ -1064,9 +1064,6 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu)
{
struct b44 *bp = netdev_priv(dev);
if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
return -EINVAL;
if (!netif_running(dev)) {
/* We'll just catch it later when the
* device is up'd.
@ -2377,6 +2374,8 @@ static int b44_init_one(struct ssb_device *sdev,
dev->netdev_ops = &b44_netdev_ops;
netif_napi_add(dev, &bp->napi, b44_poll, 64);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->min_mtu = B44_MIN_MTU;
dev->max_mtu = B44_MAX_MTU;
dev->irq = sdev->irq;
dev->ethtool_ops = &b44_ethtool_ops;

View file

@ -1622,20 +1622,19 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
/*
* calculate actual hardware mtu
* adjust mtu, can't be called while device is running
*/
static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
{
int actual_mtu;
struct bcm_enet_priv *priv = netdev_priv(dev);
int actual_mtu = new_mtu;
actual_mtu = mtu;
if (netif_running(dev))
return -EBUSY;
/* add ethernet header + vlan tag size */
actual_mtu += VLAN_ETH_HLEN;
if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
return -EINVAL;
/*
* setup maximum size before we get overflow mark in
* descriptor, note that this will not prevent reception of
@ -1650,22 +1649,7 @@ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
*/
priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
priv->dma_maxburst * 4);
return 0;
}
/*
* adjust mtu, can't be called while device is running
*/
static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
{
int ret;
if (netif_running(dev))
return -EBUSY;
ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
if (ret)
return ret;
dev->mtu = new_mtu;
return 0;
}
@ -1755,7 +1739,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->enet_is_sw = false;
priv->dma_maxburst = BCMENET_DMA_MAXBURST;
ret = compute_hw_mtu(priv, dev->mtu);
ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
goto out;
@ -1888,6 +1872,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enet_ethtool_ops;
/* MTU range: 46 - 2028 */
dev->min_mtu = ETH_ZLEN - ETH_HLEN;
dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev);
@ -2742,7 +2729,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->dma_chan_width = pd->dma_chan_width;
}
ret = compute_hw_mtu(priv, dev->mtu);
ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
goto out;

View file

@ -2298,7 +2298,7 @@ bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
if (bp->dev->mtu > 1500) {
if (bp->dev->mtu > ETH_DATA_LEN) {
u32 val;
/* Set extended packet length bit */
@ -2352,7 +2352,7 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
}
if (bp->dev->mtu > 1500) {
if (bp->dev->mtu > ETH_DATA_LEN) {
/* Set extended packet length bit */
bnx2_write_phy(bp, 0x18, 0x7);
bnx2_read_phy(bp, 0x18, &val);
@ -4985,12 +4985,12 @@ bnx2_init_chip(struct bnx2 *bp)
/* Program the MTU. Also include 4 bytes for CRC32. */
mtu = bp->dev->mtu;
val = mtu + ETH_HLEN + ETH_FCS_LEN;
if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
if (mtu < 1500)
mtu = 1500;
if (mtu < ETH_DATA_LEN)
mtu = ETH_DATA_LEN;
bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
@ -7896,10 +7896,6 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2 *bp = netdev_priv(dev);
if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
return -EINVAL;
dev->mtu = new_mtu;
return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
false);
@ -8589,6 +8585,8 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= dev->hw_features;
dev->priv_flags |= IFF_UNICAST_FLT;
dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;

View file

@ -6530,9 +6530,9 @@ struct l2_fhdr {
#define MII_BNX2_AER_AER_AN_MMD 0x3800
#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0 0xffe0
#define MIN_ETHERNET_PACKET_SIZE 60
#define MAX_ETHERNET_PACKET_SIZE 1514
#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014
#define MIN_ETHERNET_PACKET_SIZE (ETH_ZLEN - ETH_HLEN)
#define MAX_ETHERNET_PACKET_SIZE ETH_DATA_LEN
#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9000
#define BNX2_RX_COPY_THRESH 128

View file

@ -1396,9 +1396,9 @@ struct bnx2x {
int tx_ring_size;
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE (ETH_ZLEN - ETH_HLEN)
#define ETH_MAX_PACKET_SIZE ETH_DATA_LEN
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
/* TCP with Timestamp Option (32) + IPv6 (40) */
#define ETH_MAX_TPA_HEADER_SIZE 72

View file

@ -2023,7 +2023,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
mtu = bp->dev->mtu;
fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
IP_HEADER_ALIGNMENT_PADDING +
ETH_OVREHEAD +
ETH_OVERHEAD +
mtu +
BNX2X_FW_RX_ALIGN_END;
/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
@ -4855,12 +4855,6 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
return -EAGAIN;
}
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
BNX2X_ERR("Can't support requested MTU size\n");
return -EINVAL;
}
/* This does not race with packet allocation
* because the actual alloc size is
* only updated as part of load

View file

@ -34,12 +34,6 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
u8 dev_addr, u16 addr, u8 byte_cnt,
u8 *o_buf, u8);
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
#define WC_LANE_MAX 4
#define I2C_SWITCH_WIDTH 2
@ -1917,7 +1911,7 @@ static int bnx2x_emac_enable(struct link_params *params,
/* Enable emac for jumbo packets */
EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD)));
/* Strip CRC */
REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
@ -2314,19 +2308,19 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
/* Set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
/* Set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
/* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@ -2384,18 +2378,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
/* Set RX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
/* Set TX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
/* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVERHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
udelay(30);
@ -2516,7 +2510,7 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVREHEAD)/16;
ETH_OVERHEAD)/16;
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
/* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);

View file

@ -12080,8 +12080,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
mtu_size, mtu);
/* if valid: update device mtu */
if (((mtu_size + ETH_HLEN) >=
ETH_MIN_PACKET_SIZE) &&
if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
(mtu_size <=
ETH_MAX_JUMBO_PACKET_SIZE))
bp->dev->mtu = mtu_size;
@ -13315,6 +13314,10 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->dcbnl_ops = &bnx2x_dcbnl_ops;
#endif
/* MTU range, 46 - 9600 */
dev->min_mtu = ETH_MIN_PACKET_SIZE;
dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
/* get_port_hwinfo() will set prtad and mmds properly */
bp->mdio.prtad = MDIO_PRTAD_NONE;
bp->mdio.mmds = 0;

View file

@ -6290,9 +6290,6 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
if (new_mtu < 60 || new_mtu > 9500)
return -EINVAL;
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
@ -6870,6 +6867,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 60 - 9500 */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = 9500;
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif

View file

@ -124,7 +124,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_TX_TIMEOUT (5 * HZ)
/* hardware minimum and maximum for a single frame's data payload */
#define TG3_MIN_MTU 60
#define TG3_MIN_MTU ETH_ZLEN
#define TG3_MAX_MTU(tp) \
(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
@ -14199,9 +14199,6 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
int err;
bool reset_phy = false;
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
return -EINVAL;
if (!netif_running(dev)) {
/* We'll just catch it later when the
* device is up'd.
@ -17799,6 +17796,10 @@ static int tg3_init_one(struct pci_dev *pdev,
dev->hw_features |= features;
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 60 - 9000 or 1500, depending on hardware */
dev->min_mtu = TG3_MIN_MTU;
dev->max_mtu = TG3_MAX_MTU(tp);
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {

View file

@ -3296,9 +3296,6 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
struct bnad *bnad = netdev_priv(netdev);
u32 rx_count = 0, frame, new_frame;
if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
return -EINVAL;
mutex_lock(&bnad->conf_mutex);
mtu = netdev->mtu;
@ -3465,6 +3462,10 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
netdev->mem_start = bnad->mmio_start;
netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
/* MTU range: 46 - 9000 */
netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
netdev->max_mtu = BNAD_JUMBO_MTU;
netdev->netdev_ops = &bnad_netdev_ops;
bnad_set_ethtool_ops(netdev);
}

View file

@ -56,7 +56,7 @@
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
#define GEM_MTU_MIN_SIZE 68
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
@ -1986,19 +1986,9 @@ static int macb_close(struct net_device *dev)
static int macb_change_mtu(struct net_device *dev, int new_mtu)
{
struct macb *bp = netdev_priv(dev);
u32 max_mtu;
if (netif_running(dev))
return -EBUSY;
max_mtu = ETH_DATA_LEN;
if (bp->caps & MACB_CAPS_JUMBO)
max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
@ -3027,6 +3017,13 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_free_netdev;
}
/* MTU range: 68 - 1500 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
if (bp->caps & MACB_CAPS_JUMBO)
dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
else
dev->max_mtu = ETH_DATA_LEN;
mac = of_get_mac_address(np);
if (mac)
ether_addr_copy(bp->dev->dev_addr, mac);

View file

@ -394,7 +394,7 @@ struct xgmac_priv {
};
/* XGMAC Configuration Settings */
#define MAX_MTU 9000
#define XGMAC_MAX_MTU 9000
#define PAUSE_TIME 0x400
#define DMA_RX_RING_SZ 256
@ -1360,20 +1360,6 @@ static void xgmac_set_rx_mode(struct net_device *dev)
*/
static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct xgmac_priv *priv = netdev_priv(dev);
int old_mtu;
if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
return -EINVAL;
}
old_mtu = dev->mtu;
/* return early if the buffer sizes will not change */
if (old_mtu == new_mtu)
return 0;
/* Stop everything, get ready to change the MTU */
if (!netif_running(dev))
return 0;
@ -1804,6 +1790,10 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->features |= ndev->hw_features;
ndev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 46 - 9000 */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
ndev->max_mtu = XGMAC_MAX_MTU;
/* Get the MAC address */
xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
if (!is_valid_ether_addr(ndev->dev_addr))

View file

@ -2868,17 +2868,6 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
struct octnic_ctrl_pkt nctrl;
int ret = 0;
/* Limit the MTU to make sure the ethernet packets are between 68 bytes
* and 16000 bytes
*/
if ((new_mtu < LIO_MIN_MTU_SIZE) ||
(new_mtu > LIO_MAX_MTU_SIZE)) {
dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
return -EINVAL;
}
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
@ -3891,6 +3880,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->hw_features = netdev->hw_features &
~NETIF_F_HW_VLAN_CTAG_RX;
/* MTU range: 68 - 16000 */
netdev->min_mtu = LIO_MIN_MTU_SIZE;
netdev->max_mtu = LIO_MAX_MTU_SIZE;
/* Point to the properties for octeon device to which this
* interface belongs.
*/

View file

@ -29,7 +29,7 @@
#include <linux/ptp_clock_kernel.h>
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
#define LIO_MIN_MTU_SIZE 68
#define LIO_MIN_MTU_SIZE ETH_MIN_MTU
struct oct_nic_stats_resp {
u64 rh;

View file

@ -645,16 +645,6 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
struct octeon_mgmt *p = netdev_priv(netdev);
int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
/* Limit the MTU to make sure the ethernet packets are between
* 64 bytes and 16383 bytes.
*/
if (size_without_fcs < 64 || size_without_fcs > 16383) {
dev_warn(p->dev, "MTU must be between %d and %d.\n",
64 - OCTEON_MGMT_RX_HEADROOM,
16383 - OCTEON_MGMT_RX_HEADROOM);
return -EINVAL;
}
netdev->mtu = new_mtu;
cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
@ -1491,6 +1481,9 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->netdev_ops = &octeon_mgmt_ops;
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM;
mac = of_get_mac_address(pdev->dev.of_node);
if (mac)

View file

@ -1312,12 +1312,6 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
if (new_mtu > NIC_HW_MAX_FRS)
return -EINVAL;
if (new_mtu < NIC_HW_MIN_FRS)
return -EINVAL;
if (nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
netdev->mtu = new_mtu;
@ -1630,6 +1624,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
/* MTU range: 64 - 9200 */
netdev->min_mtu = NIC_HW_MIN_FRS;
netdev->max_mtu = NIC_HW_MAX_FRS;
INIT_WORK(&nic->reset_task, nicvf_reset_task);
err = register_netdev(netdev);

View file

@ -85,6 +85,11 @@ struct t1_rx_mode {
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
#define PM3393_MAX_FRAME_SIZE 9600
#define VSC7326_MAX_MTU 9600
enum {
CHBT_BOARD_N110,
CHBT_BOARD_N210,

View file

@ -825,8 +825,6 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
if (!mac->ops->set_mtu)
return -EOPNOTSUPP;
if (new_mtu < 68)
return -EINVAL;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
dev->mtu = new_mtu;
@ -1101,6 +1099,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
netdev->ethtool_ops = &t1_ethtool_ops;
switch (bi->board) {
case CHBT_BOARD_CHT110:
case CHBT_BOARD_N110:
case CHBT_BOARD_N210:
case CHBT_BOARD_CHT210:
netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
case CHBT_BOARD_CHN204:
netdev->max_mtu = VSC7326_MAX_MTU;
break;
default:
netdev->max_mtu = ETH_DATA_LEN;
break;
}
}
if (t1_init_sw_modules(adapter, bi) < 0) {

View file

@ -47,9 +47,6 @@
#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
#define MAX_FRAME_SIZE 9600
#define IPG 12
#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
@ -331,10 +328,7 @@ static int pm3393_set_mtu(struct cmac *cmac, int mtu)
{
int enabled = cmac->instance->enabled;
/* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
mtu += 14 + 4;
if (mtu > MAX_FRAME_SIZE)
return -EINVAL;
mtu += ETH_HLEN + ETH_FCS_LEN;
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)

View file

@ -11,8 +11,6 @@
/* 30 minutes for full statistics update */
#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
#define MAX_MTU 9600
/* The egress WM value 0x01a01fff should be used only when the
* interface is down (MAC port disabled). This is a workaround
* for disabling the T2/MAC flow-control. When the interface is
@ -452,9 +450,6 @@ static int mac_set_mtu(struct cmac *mac, int mtu)
{
int port = mac->instance->index;
if (mtu > MAX_MTU)
return -EINVAL;
/* max_len includes header and FCS */
vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4);
return 0;

View file

@ -1843,9 +1843,6 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
struct enic *enic = netdev_priv(netdev);
int running = netif_running(netdev);
if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
return -EINVAL;
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
@ -2751,6 +2748,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9000 */
netdev->min_mtu = ENIC_MIN_MTU;
netdev->max_mtu = ENIC_MAX_MTU;
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Cannot register net device, aborting\n");

View file

@ -30,7 +30,7 @@
#define ENIC_MIN_RQ_DESCS 64
#define ENIC_MAX_RQ_DESCS 4096
#define ENIC_MIN_MTU 68
#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
#define ENIC_MULTICAST_PERFECT_FILTERS 32

View file

@ -76,7 +76,6 @@ static void rio_free_tx (struct net_device *dev, int irq);
static void tx_error (struct net_device *dev, int tx_status);
static int receive_packet (struct net_device *dev);
static void rio_error (struct net_device *dev, int int_status);
static int change_mtu (struct net_device *dev, int new_mtu);
static void set_multicast (struct net_device *dev);
static struct net_device_stats *get_stats (struct net_device *dev);
static int clear_stats (struct net_device *dev);
@ -106,7 +105,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_set_rx_mode = set_multicast,
.ndo_do_ioctl = rio_ioctl,
.ndo_tx_timeout = rio_tx_timeout,
.ndo_change_mtu = change_mtu,
};
static int
@ -230,6 +228,10 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
#if 0
dev->features = NETIF_F_IP_CSUM;
#endif
/* MTU range: 68 - 1536 or 8000 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE;
pci_set_drvdata (pdev, dev);
ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
@ -1198,22 +1200,6 @@ clear_stats (struct net_device *dev)
return 0;
}
static int
change_mtu (struct net_device *dev, int new_mtu)
{
struct netdev_private *np = netdev_priv(dev);
int max = (np->jumbo) ? MAX_JUMBO : 1536;
if ((new_mtu < 68) || (new_mtu > max)) {
return -EINVAL;
}
dev->mtu = new_mtu;
return 0;
}
static void
set_multicast (struct net_device *dev)
{

View file

@ -580,6 +580,10 @@ static int sundance_probe1(struct pci_dev *pdev,
dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
/* MTU range: 68 - 8191 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = 8191;
pci_set_drvdata(pdev, dev);
i = register_netdev(dev);
@ -713,8 +717,6 @@ static int sundance_probe1(struct pci_dev *pdev,
static int change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
return -EINVAL;
if (netif_running(dev))
return -EBUSY;
dev->mtu = new_mtu;

View file

@ -1338,7 +1338,10 @@ static int gfar_probe(struct platform_device *ofdev)
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
/* MTU range: 50 - 9586 */
dev->mtu = 1500;
dev->min_mtu = 50;
dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
dev->netdev_ops = &gfar_netdev_ops;
dev->ethtool_ops = &gfar_ethtool_ops;
@ -2592,12 +2595,6 @@ static int gfar_set_mac_address(struct net_device *dev)
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
struct gfar_private *priv = netdev_priv(dev);
int frame_size = new_mtu + ETH_HLEN;
if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
cpu_relax();

View file

@ -446,8 +446,7 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
if (mac_cb->mac_type == HNAE_PORT_DEBUG)
max_frm = MAC_MAX_MTU_DBG;
if ((new_mtu < MAC_MIN_MTU) || (new_frm > max_frm) ||
(new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)
return -EINVAL;
if (!drv->config_max_frame_length)

View file

@ -22,6 +22,7 @@
#include "hnae.h"
#include "hns_enet.h"
#include "hns_dsaf_mac.h"
#define NIC_MAX_Q_PER_VF 16
#define HNS_NIC_TX_TIMEOUT (5 * HZ)
@ -1405,10 +1406,6 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
struct hnae_handle *h = priv->ae_handle;
int ret;
/* MTU < 68 is an error and causes problems on some kernels */
if (new_mtu < 68)
return -EINVAL;
if (!h->dev->ops->set_mtu)
return -ENOTSUPP;
@ -1953,14 +1950,20 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
/* MTU range: 68 - 9578 (v1) or 9706 (v2) */
ndev->min_mtu = MAC_MIN_MTU;
switch (priv->enet_ver) {
case AE_VERSION_2:
ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
ndev->max_mtu = MAC_MAX_MTU_V2 -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
default:
ndev->max_mtu = MAC_MAX_MTU -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
}

View file

@ -1981,14 +1981,6 @@ static void ehea_set_multicast_list(struct net_device *dev)
ehea_update_bcmc_registrations();
}
static int ehea_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
{
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
@ -2968,7 +2960,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ehea_set_multicast_list,
.ndo_change_mtu = ehea_change_mtu,
.ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
.ndo_tx_timeout = ehea_tx_watchdog,
@ -3041,6 +3032,10 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
NETIF_F_IP_CSUM;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
/* MTU range: 68 - 9022 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = EHEA_MAX_PACKET_SIZE;
INIT_WORK(&port->reset_task, ehea_reset_port);
INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);

View file

@ -1099,9 +1099,6 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu)
struct emac_instance *dev = netdev_priv(ndev);
int ret = 0;
if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
return -EINVAL;
DBG(dev, "change_mtu(%d)" NL, new_mtu);
if (netif_running(ndev)) {
@ -2564,7 +2561,7 @@ static int emac_init_config(struct emac_instance *dev)
if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
return -ENXIO;
if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
dev->max_mtu = 1500;
dev->max_mtu = ETH_DATA_LEN;
if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
dev->rx_fifo_size = 2048;
if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
@ -2890,6 +2887,10 @@ static int emac_probe(struct platform_device *ofdev)
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &emac_ethtool_ops;
/* MTU range: 46 - 1500 or whatever is in OF */
ndev->min_mtu = EMAC_MIN_MTU;
ndev->max_mtu = dev->max_mtu;
netif_carrier_off(ndev);
err = register_netdev(ndev);

View file

@ -2286,14 +2286,6 @@ static int e100_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
static int e100_change_mtu(struct net_device *netdev, int new_mtu)
{
if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
return -EINVAL;
netdev->mtu = new_mtu;
return 0;
}
static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
@ -2834,7 +2826,6 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = e100_set_multicast_list,
.ndo_set_mac_address = e100_set_mac_address,
.ndo_change_mtu = e100_change_mtu,
.ndo_do_ioctl = e100_do_ioctl,
.ndo_tx_timeout = e100_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER

View file

@ -1085,6 +1085,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 46 - 16110 */
netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
/* initialize eeprom parameters */
@ -3549,13 +3553,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
e_err(probe, "Invalid MTU setting\n");
return -EINVAL;
}
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Adapter-specific max frame size limits. */
switch (hw->mac_type) {

View file

@ -5974,19 +5974,12 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
if ((new_mtu > ETH_DATA_LEN) &&
!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
/* Supported frame sizes */
if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
(max_frame > adapter->max_hw_frame_size)) {
e_err("Unsupported MTU setting\n");
return -EINVAL;
}
/* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
if ((adapter->hw.mac.type >= e1000_pch2lan) &&
!(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
@ -7187,6 +7180,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
/* MTU range: 68 - max_hw_frame_size */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = adapter->max_hw_frame_size -
(VLAN_ETH_HLEN + ETH_FCS_LEN);
if (e1000e_enable_mng_pass_thru(&adapter->hw))
adapter->flags |= FLAG_MNG_PT_ENABLED;

View file

@ -706,16 +706,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
return err;
}
static int fm10k_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < 68 || new_mtu > FM10K_MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
/**
* fm10k_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
@ -1405,7 +1395,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_start_xmit = fm10k_xmit_frame,
.ndo_set_mac_address = fm10k_set_mac,
.ndo_change_mtu = fm10k_change_mtu,
.ndo_tx_timeout = fm10k_tx_timeout,
.ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
@ -1490,5 +1479,9 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
dev->hw_features |= hw_features;
/* MTU range: 68 - 15342 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = FM10K_MAX_JUMBO_FRAME_SIZE;
return dev;
}

View file

@ -2239,13 +2239,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_vsi *vsi = np->vsi;
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
return -EINVAL;
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
@ -9220,6 +9215,11 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
i40e_fcoe_config_netdev(netdev, vsi);
#endif
/* MTU range: 68 - 9706 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = I40E_MAX_RXBUFFER -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
return 0;
}

View file

@ -2133,10 +2133,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
return -EINVAL;
netdev->mtu = new_mtu;
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
@ -2424,6 +2420,10 @@ static void i40evf_init_task(struct work_struct *work)
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN);
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr);

View file

@ -357,7 +357,8 @@
#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
#define MAX_JUMBO_FRAME_SIZE 0x2600
#define MAX_STD_JUMBO_FRAME_SIZE 9216
/* PBA constants */
#define E1000_PBA_34K 0x0022

View file

@ -2468,6 +2468,10 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
/* before reading the NVM, reset the controller to put the device in a
@ -5408,17 +5412,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
dev_err(&pdev->dev, "Invalid MTU setting\n");
return -EINVAL;
}
#define MAX_STD_JUMBO_FRAME_SIZE 9238
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
return -EINVAL;
}
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;

View file

@ -85,7 +85,8 @@
#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */
#define MAX_JUMBO_FRAME_SIZE 0x3F00
#define MAX_JUMBO_FRAME_SIZE 0x3F00
#define MAX_STD_JUMBO_FRAME_SIZE 9216
/* 802.1q VLAN Packet Size */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */

View file

@ -2356,16 +2356,6 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
struct igbvf_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
max_frame > MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
return -EINVAL;
}
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
usleep_range(1000, 2000);
/* igbvf_down has a dependency on max_frame_size */
@ -2786,6 +2776,10 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
/*reset the controller to put the device in a known good state */
err = hw->mac.ops.reset_hw(hw);
if (err) {

View file

@ -487,6 +487,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
/* MTU range: 68 - 16114 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
/* make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
@ -1619,18 +1623,6 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
/* MTU < 68 is an error for IPv4 traffic, just don't allow it */
if ((new_mtu < 68) ||
(max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
netif_err(adapter, probe, adapter->netdev,
"Invalid MTU setting %d\n", new_mtu);
return -EINVAL;
}
if (old_max_frame == max_frame)
return 0;
if (netif_running(netdev))
ixgb_down(adapter, true);

View file

@ -6049,11 +6049,6 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
return -EINVAL;
/*
* For 82599EB we cannot allow legacy VFs to enable their receive
@ -6062,7 +6057,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
(new_mtu > ETH_DATA_LEN))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@ -9608,6 +9603,10 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
netdev->dcbnl_ops = &dcbnl_ops;

View file

@ -3742,24 +3742,8 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
int ret;
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
default:
if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
break;
}
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, max_frame);
@ -4104,6 +4088,23 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 1504 or 9710 */
netdev->min_mtu = ETH_MIN_MTU;
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
default:
if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
else
netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
break;
}
if (IXGBE_REMOVED(hw->hw_addr)) {
err = -EIO;
goto err_sw_init;

View file

@ -3024,29 +3024,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
mvneta_rx_reset(pp);
}
/* Return positive if MTU is valid */
static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
{
if (mtu < 68) {
netdev_err(dev, "cannot change mtu to less than 68\n");
return -EINVAL;
}
/* 9676 == 9700 - 20 and rounding to 8 */
if (mtu > 9676) {
netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
mtu = 9676;
}
if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
return mtu;
}
static void mvneta_percpu_enable(void *arg)
{
struct mvneta_port *pp = arg;
@ -3067,9 +3044,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
struct mvneta_port *pp = netdev_priv(dev);
int ret;
mtu = mvneta_check_mtu_valid(dev, mtu);
if (mtu < 0)
return -EINVAL;
if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
dev->mtu = mtu;
@ -4154,6 +4133,11 @@ static int mvneta_probe(struct platform_device *pdev)
dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
/* MTU range: 68 - 9676 */
dev->min_mtu = ETH_MIN_MTU;
/* 9676 == 9700 - 20 and rounding to 8 */
dev->max_mtu = 9676;
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");

View file

@ -5453,29 +5453,6 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
phy_stop(ndev->phydev);
}
/* Return positive if MTU is valid */
static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
{
if (mtu < 68) {
netdev_err(dev, "cannot change mtu to less than 68\n");
return -EINVAL;
}
/* 9676 == 9700 - 20 and rounding to 8 */
if (mtu > 9676) {
netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
mtu = 9676;
}
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
return mtu;
}
static int mvpp2_check_ringparam_valid(struct net_device *dev,
struct ethtool_ringparam *ring)
{
@ -5717,10 +5694,10 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
struct mvpp2_port *port = netdev_priv(dev);
int err;
mtu = mvpp2_check_mtu_valid(dev, mtu);
if (mtu < 0) {
err = mtu;
goto error;
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
if (!netif_running(dev)) {
@ -6212,6 +6189,11 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
dev->vlan_features |= features;
/* MTU range: 68 - 9676 */
dev->min_mtu = ETH_MIN_MTU;
/* 9676 == 9700 - 20 and rounding to 8 */
dev->max_mtu = 9676;
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");

View file

@ -1209,9 +1209,6 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
int retval;
struct pxa168_eth_private *pep = netdev_priv(dev);
if ((mtu > 9500) || (mtu < 68))
return -EINVAL;
dev->mtu = mtu;
retval = set_port_config_ext(pep);
@ -1459,6 +1456,10 @@ static int pxa168_eth_probe(struct platform_device *pdev)
dev->base_addr = 0;
dev->ethtool_ops = &pxa168_ethtool_ops;
/* MTU range: 68 - 9500 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = 9500;
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
if (pdev->dev.of_node)

View file

@ -2900,9 +2900,6 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
{
int err;
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
if (!netif_running(dev)) {
dev->mtu = new_mtu;
return 0;
@ -3857,6 +3854,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->watchdog_timeo = TX_WATCHDOG;
dev->irq = hw->pdev->irq;
/* MTU range: 60 - 9000 */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = ETH_JUMBO_MTU;
if (highmem)
dev->features |= NETIF_F_HIGHDMA;

View file

@ -2398,16 +2398,6 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u16 ctl, mode;
u32 imask;
/* MTU size outside the spec */
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
/* MTU > 1500 on yukon FE and FE+ not allowed */
if (new_mtu > ETH_DATA_LEN &&
(hw->chip_id == CHIP_ID_YUKON_FE ||
hw->chip_id == CHIP_ID_YUKON_FE_P))
return -EINVAL;
if (!netif_running(dev)) {
dev->mtu = new_mtu;
netdev_update_features(dev);
@ -4808,6 +4798,14 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
dev->features |= dev->hw_features;
/* MTU range: 60 - 1500 or 9000 */
dev->min_mtu = ETH_ZLEN;
if (hw->chip_id == CHIP_ID_YUKON_FE ||
hw->chip_id == CHIP_ID_YUKON_FE_P)
dev->max_mtu = ETH_DATA_LEN;
else
dev->max_mtu = ETH_JUMBO_MTU;
/* try to get mac address in the following order:
* 1) from device tree data
* 2) from internal registers set by bootloader

View file

@ -2205,10 +2205,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
dev->mtu, new_mtu);
if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
en_err(priv, "Bad MTU size:%d.\n", new_mtu);
return -EPERM;
}
if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
en_err(priv, "MTU size:%d requires frags but XDP running\n",
new_mtu);
@ -3288,6 +3284,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
/* MTU range: 46 - hw-specific max */
dev->min_mtu = MLX4_EN_MIN_MTU;
dev->max_mtu = priv->max_mtu;
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;

View file

@ -2851,31 +2851,13 @@ static int mlx5e_set_features(struct net_device *netdev,
return err ? -EINVAL : 0;
}
#define MXL5_HW_MIN_MTU 64
#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened;
u16 max_mtu;
u16 min_mtu;
int err = 0;
bool reset;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
if (new_mtu > max_mtu || new_mtu < min_mtu) {
netdev_err(netdev,
"%s: Bad MTU (%d), valid range is: [%d..%d]\n",
__func__, new_mtu, min_mtu, max_mtu);
return -EINVAL;
}
mutex_lock(&priv->state_lock);
reset = !priv->params.lro_en &&
@ -3835,6 +3817,7 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
{
const struct mlx5e_profile *profile;
struct mlx5e_priv *priv;
u16 max_mtu;
int err;
priv = netdev_priv(netdev);
@ -3865,6 +3848,11 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
mlx5e_init_l2_addr(priv);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
mlx5e_set_dev_port_mtu(netdev);
if (profile->enable)

View file

@ -5807,24 +5807,19 @@ static int netdev_change_mtu(struct net_device *dev, int new_mtu)
if (hw->dev_count > 1)
if (dev != hw_priv->dev)
return 0;
if (new_mtu < 60)
return -EINVAL;
if (dev->mtu != new_mtu) {
hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
if (hw_mtu > MAX_RX_BUF_SIZE)
return -EINVAL;
if (hw_mtu > REGULAR_RX_BUF_SIZE) {
hw->features |= RX_HUGE_FRAME;
hw_mtu = MAX_RX_BUF_SIZE;
} else {
hw->features &= ~RX_HUGE_FRAME;
hw_mtu = REGULAR_RX_BUF_SIZE;
}
hw_mtu = (hw_mtu + 3) & ~3;
hw_priv->mtu = hw_mtu;
dev->mtu = new_mtu;
hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
if (hw_mtu > REGULAR_RX_BUF_SIZE) {
hw->features |= RX_HUGE_FRAME;
hw_mtu = MAX_RX_BUF_SIZE;
} else {
hw->features &= ~RX_HUGE_FRAME;
hw_mtu = REGULAR_RX_BUF_SIZE;
}
hw_mtu = (hw_mtu + 3) & ~3;
hw_priv->mtu = hw_mtu;
dev->mtu = new_mtu;
return 0;
}
@ -7099,6 +7094,12 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev->netdev_ops = &netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
/* MTU range: 60 - 1894 */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = MAX_RX_BUF_SIZE -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
if (register_netdev(dev))
goto pcidev_init_reg_err;
port_set_power_saving(port, true);

View file

@ -3232,10 +3232,6 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
struct myri10ge_priv *mgp = netdev_priv(dev);
int error = 0;
if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
return -EINVAL;
}
netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
/* if we change the mtu on an active device, we must
@ -4086,13 +4082,19 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
myri10ge_setup_dca(mgp);
#endif
pci_set_drvdata(pdev, mgp);
if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
myri10ge_initial_mtu = 68;
/* MTU range: 68 - 9000 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
if (myri10ge_initial_mtu > netdev->max_mtu)
myri10ge_initial_mtu = netdev->max_mtu;
if (myri10ge_initial_mtu < netdev->min_mtu)
myri10ge_initial_mtu = netdev->min_mtu;
netdev->mtu = myri10ge_initial_mtu;
netdev->netdev_ops = &myri10ge_netdev_ops;
netdev->mtu = myri10ge_initial_mtu;
netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
/* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */

View file

@ -929,6 +929,10 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &ethtool_ops;
/* MTU range: 64 - 2024 */
dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
if (mtu)
dev->mtu = mtu;
@ -2526,9 +2530,6 @@ static void __set_rx_mode(struct net_device *dev)
static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
return -EINVAL;
dev->mtu = new_mtu;
/* synchronized against open : rtnl_lock() held by caller */

View file

@ -6678,11 +6678,6 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
struct s2io_nic *sp = netdev_priv(dev);
int ret = 0;
if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
return -EPERM;
}
dev->mtu = new_mtu;
if (netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
@ -8019,6 +8014,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
}
/* MTU range: 46 - 9600 */
dev->min_mtu = MIN_MTU;
dev->max_mtu = S2IO_JUMBO_SIZE;
/* store mac addresses from CAM to s2io_nic structure */
do_s2io_store_unicast_mc(sp);

View file

@ -27,7 +27,7 @@
(((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
#endif
#define VXGE_HW_MIN_MTU 68
#define VXGE_HW_MIN_MTU ETH_MIN_MTU
#define VXGE_HW_MAX_MTU 9600
#define VXGE_HW_DEFAULT_MTU 1500

View file

@ -3074,11 +3074,6 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
vxge_debug_entryexit(vdev->level_trace,
"%s:%d", __func__, __LINE__);
if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
vxge_debug_init(vdev->level_err,
"%s: mtu size is invalid", dev->name);
return -EPERM;
}
/* check if device is down already */
if (unlikely(!is_vxge_card_up(vdev))) {
@ -3462,6 +3457,10 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
"%s : using High DMA", __func__);
}
/* MTU range: 68 - 9600 */
ndev->min_mtu = VXGE_HW_MIN_MTU;
ndev->max_mtu = VXGE_HW_MAX_MTU;
ret = register_netdev(ndev);
if (ret) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),

View file

@ -2278,11 +2278,6 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
struct nfp_net_rx_ring *tmp_rings;
int err;
if (new_mtu < 68 || new_mtu > nn->max_mtu) {
nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
return -EINVAL;
}
old_mtu = netdev->mtu;
old_fl_bufsz = nn->fl_bufsz;
new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
@ -2930,6 +2925,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
ether_setup(netdev);
netdev->netdev_ops = &nfp_net_netdev_ops;
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
netif_carrier_off(netdev);
nfp_net_set_ethtool_ops(netdev);

View file

@ -3008,17 +3008,12 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
struct fe_priv *np = netdev_priv(dev);
int old_mtu;
if (new_mtu < 64 || new_mtu > np->pkt_limit)
return -EINVAL;
old_mtu = dev->mtu;
dev->mtu = new_mtu;
/* return early if the buffer sizes will not change */
if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
return 0;
if (old_mtu == new_mtu)
return 0;
/* synchronized against open : rtnl_lock() held by caller */
if (netif_running(dev)) {
@ -5719,6 +5714,10 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
/* Add loopback capability to the device. */
dev->hw_features |= NETIF_F_LOOPBACK;
/* MTU range: 64 - 1500 or 9100 */
dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
dev->max_mtu = np->pkt_limit;
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||

View file

@ -2260,16 +2260,10 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
int max_frame;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
int err;
max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
netdev_err(netdev, "Invalid MTU setting\n");
return -EINVAL;
}
if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
@ -2633,6 +2627,11 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->features = netdev->hw_features;
pch_gbe_set_ethtool_ops(netdev);
/* MTU range: 46 - 10300 */
netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
pch_gbe_mac_load_mac_addr(&adapter->hw);
pch_gbe_mac_reset_hw(&adapter->hw);

View file

@ -53,7 +53,7 @@
* - Multiqueue RX/TX
*/
#define PE_MIN_MTU 64
#define PE_MIN_MTU (ETH_ZLEN + ETH_HLEN)
#define PE_MAX_MTU 9000
#define PE_DEF_MTU ETH_DATA_LEN
@ -1611,9 +1611,6 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
int running;
int ret = 0;
if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
return -EINVAL;
running = netif_running(dev);
if (running) {
@ -1635,7 +1632,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
}
/* Setup checksum channels if large MTU and none already allocated */
if (new_mtu > 1500 && !mac->num_cs) {
if (new_mtu > PE_DEF_MTU && !mac->num_cs) {
pasemi_mac_setup_csrings(mac);
if (!mac->num_cs) {
ret = -ENOMEM;
@ -1757,6 +1754,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &pasemi_netdev_ops;
dev->mtu = PE_DEF_MTU;
/* MTU range: 64 - 9000 */
dev->min_mtu = PE_MIN_MTU;
dev->max_mtu = PE_MAX_MTU;
/* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;

View file

@ -362,8 +362,9 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */

View file

@ -723,19 +723,11 @@ static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
}
/* Netdevice NDOs */
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define ETH_MIN_PACKET_SIZE 60
int qede_change_mtu(struct net_device *ndev, int new_mtu)
{
struct qede_dev *edev = netdev_priv(ndev);
union qede_reload_args args;
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
DP_ERR(edev, "Can't support requested MTU size\n");
return -EINVAL;
}
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Configuring MTU size of %d\n", new_mtu);

View file

@ -2391,6 +2391,10 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->hw_features = hw_features;
/* MTU range: 46 - 9600 */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
/* Set network device HW mac */
ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
}

View file

@ -1024,12 +1024,6 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int rc = 0;
if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
" not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
return -EINVAL;
}
rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
if (!rc)

View file

@ -2342,6 +2342,10 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
/* MTU range: 68 - 9600 */
netdev->min_mtu = P3P_MIN_MTU;
netdev->max_mtu = P3P_MAX_MTU;
err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
adapter->drv_sds_rings);
if (err)

View file

@ -43,9 +43,9 @@
/* Frame length is invalid */
#define QCAFRM_INVFRAME (QCAFRM_ERR_BASE - 4)
/* Min/Max Ethernet MTU */
#define QCAFRM_ETHMINMTU 46
#define QCAFRM_ETHMAXMTU 1500
/* Min/Max Ethernet MTU: 46/1500 */
#define QCAFRM_ETHMINMTU (ETH_ZLEN - ETH_HLEN)
#define QCAFRM_ETHMAXMTU ETH_DATA_LEN
/* Min/Max frame lengths */
#define QCAFRM_ETHMINLEN (QCAFRM_ETHMINMTU + ETH_HLEN)

View file

@ -780,24 +780,12 @@ qcaspi_netdev_uninit(struct net_device *dev)
dev_kfree_skb(qca->rx_skb);
}
static int
qcaspi_netdev_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < QCAFRM_ETHMINMTU) || (new_mtu > QCAFRM_ETHMAXMTU))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops qcaspi_netdev_ops = {
.ndo_init = qcaspi_netdev_init,
.ndo_uninit = qcaspi_netdev_uninit,
.ndo_open = qcaspi_netdev_open,
.ndo_stop = qcaspi_netdev_close,
.ndo_start_xmit = qcaspi_netdev_xmit,
.ndo_change_mtu = qcaspi_netdev_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = qcaspi_netdev_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
@ -814,6 +802,10 @@ qcaspi_netdev_setup(struct net_device *dev)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->tx_queue_len = 100;
/* MTU range: 46 - 1500 */
dev->min_mtu = QCAFRM_ETHMINMTU;
dev->max_mtu = QCAFRM_ETHMAXMTU;
qca = netdev_priv(dev);
memset(qca, 0, sizeof(struct qcaspi));

View file

@ -1277,10 +1277,6 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
{
struct cp_private *cp = netdev_priv(dev);
/* check for invalid MTU, according to hardware limits */
if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
return -EINVAL;
/* if network interface not up, no need for complexity */
if (!netif_running(dev)) {
dev->mtu = new_mtu;
@ -2010,6 +2006,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
/* MTU range: 60 - 4096 */
dev->min_mtu = CP_MIN_MTU;
dev->max_mtu = CP_MAX_MTU;
rc = register_netdev(dev);
if (rc)
goto err_out_iomap;

View file

@ -924,19 +924,10 @@ static int rtl8139_set_features(struct net_device *dev, netdev_features_t featur
return 0;
}
static int rtl8139_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < 68 || new_mtu > MAX_ETH_DATA_SIZE)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_open = rtl8139_open,
.ndo_stop = rtl8139_close,
.ndo_get_stats64 = rtl8139_get_stats64,
.ndo_change_mtu = rtl8139_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = rtl8139_set_mac_address,
.ndo_start_xmit = rtl8139_start_xmit,
@ -1022,6 +1013,10 @@ static int rtl8139_init_one(struct pci_dev *pdev,
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
/* MTU range: 68 - 1770 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = MAX_ETH_DATA_SIZE;
/* tp zeroed and aligned in alloc_etherdev */
tp = netdev_priv(dev);

View file

@ -6673,10 +6673,6 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
if (new_mtu < ETH_ZLEN ||
new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
return -EINVAL;
if (new_mtu > ETH_DATA_LEN)
rtl_hw_jumbo_enable(tp);
else
@ -8430,6 +8426,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
/* MTU range: 60 - hw-specific max */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = rtl_chip_infos[chipset].jumbo_max;
tp->hw_start = cfg->hw_start;
tp->event_slow = cfg->event_slow;

View file

@ -1953,12 +1953,6 @@ static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
int running = netif_running(dev);
int err;
#define ROCKER_PORT_MIN_MTU 68
#define ROCKER_PORT_MAX_MTU 9000
if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
return -EINVAL;
if (running)
rocker_port_stop(dev);
@ -2536,6 +2530,8 @@ static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
}
}
#define ROCKER_PORT_MIN_MTU ETH_MIN_MTU
#define ROCKER_PORT_MAX_MTU 9000
static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
{
const struct pci_dev *pdev = rocker->pdev;
@ -2570,6 +2566,10 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
dev->max_mtu = ROCKER_PORT_MAX_MTU;
err = rocker_world_port_pre_init(rocker_port);
if (err) {
dev_err(&pdev->dev, "port world pre-init failed\n");

View file

@ -1820,19 +1820,6 @@ static int sxgbe_set_features(struct net_device *dev,
*/
static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
{
/* RFC 791, page 25, "Every internet module must be able to forward
* a datagram of 68 octets without further fragmentation."
*/
if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
MIN_MTU, MAX_MTU);
return -EINVAL;
}
/* Return if the buffer sizes will not change */
if (dev->mtu == new_mtu)
return 0;
dev->mtu = new_mtu;
if (!netif_running(dev))
@ -2144,6 +2131,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
/* assign filtering support */
ndev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9000 */
ndev->min_mtu = MIN_MTU;
ndev->max_mtu = MAX_MTU;
priv->msg_enable = netif_msg_init(debug, default_msg_level);
/* Enable TCP segmentation offload for all DMA channels */

View file

@ -2715,27 +2715,11 @@ static void stmmac_set_rx_mode(struct net_device *dev)
*/
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int max_mtu;
if (netif_running(dev)) {
pr_err("%s: must be stopped to change its MTU\n", dev->name);
return -EBUSY;
}
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
max_mtu = JUMBO_LEN;
else
max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
if (priv->plat->maxmtu < max_mtu)
max_mtu = priv->plat->maxmtu;
if ((new_mtu < 46) || (new_mtu > max_mtu)) {
pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
return -EINVAL;
}
dev->mtu = new_mtu;
netdev_update_features(dev);
@ -3317,6 +3301,15 @@ int stmmac_dvr_probe(struct device *device,
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
ndev->max_mtu = JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
if (priv->plat->maxmtu < ndev->max_mtu)
ndev->max_mtu = priv->plat->maxmtu;
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */

View file

@ -3863,9 +3863,6 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
{
struct cas *cp = netdev_priv(dev);
if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
return -EINVAL;
dev->mtu = new_mtu;
if (!netif_running(dev) || !netif_device_present(dev))
return 0;
@ -5115,6 +5112,10 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
/* MTU range: 60 - varies or 9000 */
dev->min_mtu = CAS_MIN_MTU;
dev->max_mtu = CAS_MAX_MTU;
if (register_netdev(dev)) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_free_consistent;

View file

@ -139,7 +139,6 @@ static const struct net_device_ops vsw_ops = {
.ndo_set_mac_address = sunvnet_set_mac_addr_common,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = sunvnet_tx_timeout_common,
.ndo_change_mtu = sunvnet_change_mtu_common,
.ndo_start_xmit = vsw_start_xmit,
.ndo_select_queue = vsw_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@ -239,6 +238,10 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
NETIF_F_HW_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
/* MTU range: 68 - 65535 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = VNET_MAX_MTU;
SET_NETDEV_DEV(dev, &vdev->dev);
return dev;

View file

@ -6754,9 +6754,6 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
struct niu *np = netdev_priv(dev);
int err, orig_jumbo, new_jumbo;
if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
return -EINVAL;
orig_jumbo = (dev->mtu > ETH_DATA_LEN);
new_jumbo = (new_mtu > ETH_DATA_LEN);
@ -9823,6 +9820,10 @@ static int niu_pci_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
/* MTU range: 68 - 9216 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = NIU_MAX_MTU;
niu_assign_netdev_ops(dev);
err = niu_get_invariants(np);

View file

@ -2476,9 +2476,9 @@ static void gem_set_multicast(struct net_device *dev)
}
/* Jumbo-grams don't seem to work :-( */
#define GEM_MIN_MTU 68
#define GEM_MIN_MTU ETH_MIN_MTU
#if 1
#define GEM_MAX_MTU 1500
#define GEM_MAX_MTU ETH_DATA_LEN
#else
#define GEM_MAX_MTU 9000
#endif
@ -2487,9 +2487,6 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
{
struct gem *gp = netdev_priv(dev);
if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
return -EINVAL;
dev->mtu = new_mtu;
/* We'll just catch it later when the device is up'd or resumed */
@ -2977,6 +2974,10 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - 1500 (Jumbo mode is broken) */
dev->min_mtu = GEM_MIN_MTU;
dev->max_mtu = GEM_MAX_MTU;
/* Register with kernel */
if (register_netdev(dev)) {
pr_err("Cannot register net device, aborting\n");

View file

@ -159,7 +159,6 @@ static const struct net_device_ops vnet_ops = {
.ndo_set_mac_address = sunvnet_set_mac_addr_common,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = sunvnet_tx_timeout_common,
.ndo_change_mtu = sunvnet_change_mtu_common,
.ndo_start_xmit = vnet_start_xmit,
.ndo_select_queue = vnet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@ -202,6 +201,10 @@ static struct vnet *vnet_new(const u64 *local_mac,
NETIF_F_HW_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
/* MTU range: 68 - 65535 */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = VNET_MAX_MTU;
SET_NETDEV_DEV(dev, &vdev->dev);
err = register_netdev(dev);

View file

@ -1583,16 +1583,6 @@ void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
}
EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu)
{
if (new_mtu < 68 || new_mtu > 65535)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
{
return -EINVAL;

View file

@ -15,6 +15,8 @@
#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
#define VNET_MAX_MTU 65535
/* VNET packets are sent in buffers with the first 6 bytes skipped
* so that after the ethernet header the IPv4/IPv6 headers are aligned
* properly.
@ -125,7 +127,6 @@ int sunvnet_close_common(struct net_device *dev);
void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
void sunvnet_tx_timeout_common(struct net_device *dev);
int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu);
int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
struct vnet_port *(*vnet_tx_port)
(struct sk_buff *, struct net_device *));

View file

@ -761,16 +761,6 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
{
ENTER;
if (new_mtu == ndev->mtu)
RET(0);
/* enforce minimum frame size */
if (new_mtu < ETH_ZLEN) {
netdev_err(ndev, "mtu %d is less then minimal %d\n",
new_mtu, ETH_ZLEN);
RET(-EINVAL);
}
ndev->mtu = new_mtu;
if (netif_running(ndev)) {
bdx_close(ndev);
@ -2057,6 +2047,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef BDX_LLTX
ndev->features |= NETIF_F_LLTX;
#endif
/* MTU range: 60 - 16384 */
ndev->min_mtu = ETH_ZLEN;
ndev->max_mtu = BDX_MAX_MTU;
spin_lock_init(&priv->tx_lock);
/*bdx_hw_reset(priv); */

View file

@ -74,6 +74,9 @@
* ifcontig eth1 txqueuelen 3000 - to change it at runtime */
#define BDX_NDEV_TXQ_LEN 3000
/* Max MTU for Jumbo Frame mode, per tehutinetworks.net Features FAQ is 16k */
#define BDX_MAX_MTU (16 * 1024)
#define FIFO_SIZE 4096
#define FIFO_EXTRA_SPACE 1024

View file

@ -1766,21 +1766,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
return (ret == 0) ? 0 : err;
}
static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
{
struct netcp_intf *netcp = netdev_priv(ndev);
/* MTU < 68 is an error for IPv4 traffic */
if ((new_mtu < 68) ||
(new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
return -EINVAL;
}
ndev->mtu = new_mtu;
return 0;
}
static void netcp_ndo_tx_timeout(struct net_device *ndev)
{
struct netcp_intf *netcp = netdev_priv(ndev);
@ -1886,7 +1871,6 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_start_xmit = netcp_ndo_start_xmit,
.ndo_set_rx_mode = netcp_set_rx_mode,
.ndo_do_ioctl = netcp_ndo_ioctl,
.ndo_change_mtu = netcp_ndo_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
@ -1923,6 +1907,10 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
ndev->hw_features = ndev->features;
ndev->vlan_features |= NETIF_F_SG;
/* MTU range: 68 - 9486 */
ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = NETCP_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
netcp = netdev_priv(ndev);
spin_lock_init(&netcp->lock);
INIT_LIST_HEAD(&netcp->module_head);

View file

@ -59,6 +59,9 @@
/* Maximum number of packets to handle per "poll". */
#define TILE_NET_WEIGHT 64
/* Maximum Jumbo Packet MTU */
#define TILE_JUMBO_MAX_MTU 9000
/* Number of entries in each iqueue. */
#define IQUEUE_ENTRIES 512
@ -2101,17 +2104,6 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EOPNOTSUPP;
}
/* Change the MTU. */
static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < 68)
return -EINVAL;
if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
/* Change the Ethernet address of the NIC.
*
* The hypervisor driver does not support changing MAC address. However,
@ -2154,7 +2146,6 @@ static const struct net_device_ops tile_net_ops = {
.ndo_start_xmit = tile_net_tx,
.ndo_select_queue = tile_net_select_queue,
.ndo_do_ioctl = tile_net_ioctl,
.ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@ -2174,7 +2165,11 @@ static void tile_net_setup(struct net_device *dev)
ether_setup(dev);
dev->netdev_ops = &tile_net_ops;
dev->watchdog_timeo = TILE_NET_TIMEOUT;
dev->mtu = 1500;
/* MTU range: 68 - 1500 or 9000 */
dev->mtu = ETH_DATA_LEN;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = jumbo_num ? TILE_JUMBO_MAX_MTU : ETH_DATA_LEN;
features |= NETIF_F_HW_CSUM;
features |= NETIF_F_SG;

View file

@ -87,7 +87,7 @@
/* This should be 1500 if "jumbo" is not set in LIPP. */
/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
/* ISSUE: This has not been thoroughly tested (except at 1500). */
#define TILE_NET_MTU 1500
#define TILE_NET_MTU ETH_DATA_LEN
/* HACK: Define this to verify incoming packets. */
/* #define TILE_NET_VERIFY_INGRESS */
@ -2095,26 +2095,6 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
}
/*
* Change the "mtu".
*
* The "change_mtu" method is usually not needed.
* If you need it, it must be like this.
*/
static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
{
PDEBUG("tile_net_change_mtu()\n");
/* Check ranges. */
if ((new_mtu < 68) || (new_mtu > 1500))
return -EINVAL;
/* Accept the value. */
dev->mtu = new_mtu;
return 0;
}
/*
* Change the Ethernet Address of the NIC.
@ -2229,7 +2209,6 @@ static const struct net_device_ops tile_net_ops = {
.ndo_start_xmit = tile_net_tx,
.ndo_do_ioctl = tile_net_ioctl,
.ndo_get_stats64 = tile_net_get_stats64,
.ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
@ -2252,7 +2231,11 @@ static void tile_net_setup(struct net_device *dev)
dev->netdev_ops = &tile_net_ops;
dev->watchdog_timeo = TILE_NET_TIMEOUT;
dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
/* MTU range: 68 - 1500 */
dev->mtu = TILE_NET_MTU;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = TILE_NET_MTU;
features |= NETIF_F_HW_CSUM;
features |= NETIF_F_SG;

View file

@ -1114,24 +1114,6 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
}
return packets_done;
}
/**
* gelic_net_change_mtu - changes the MTU of an interface
* @netdev: interface device structure
* @new_mtu: new MTU value
*
* returns 0 on success, <0 on failure
*/
int gelic_net_change_mtu(struct net_device *netdev, int new_mtu)
{
/* no need to re-alloc skbs or so -- the max mtu is about 2.3k
* and mtu is outbound only anyway */
if ((new_mtu < GELIC_NET_MIN_MTU) ||
(new_mtu > GELIC_NET_MAX_MTU)) {
return -EINVAL;
}
netdev->mtu = new_mtu;
return 0;
}
/**
* gelic_card_interrupt - event handler for gelic_net
@ -1446,7 +1428,6 @@ static const struct net_device_ops gelic_netdevice_ops = {
.ndo_stop = gelic_net_stop,
.ndo_start_xmit = gelic_net_xmit,
.ndo_set_rx_mode = gelic_net_set_multi,
.ndo_change_mtu = gelic_net_change_mtu,
.ndo_tx_timeout = gelic_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@ -1513,6 +1494,10 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
netdev->features |= NETIF_F_VLAN_CHALLENGED;
}
/* MTU range: 64 - 1518 */
netdev->min_mtu = GELIC_NET_MIN_MTU;
netdev->max_mtu = GELIC_NET_MAX_MTU;
status = register_netdev(netdev);
if (status) {
dev_err(ctodev(card), "%s:Couldn't register %s %d\n",

View file

@ -373,7 +373,6 @@ int gelic_net_stop(struct net_device *netdev);
int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
void gelic_net_set_multi(struct net_device *netdev);
void gelic_net_tx_timeout(struct net_device *netdev);
int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
/* shared ethtool ops */

View file

@ -2558,7 +2558,6 @@ static const struct net_device_ops gelic_wl_netdevice_ops = {
.ndo_stop = gelic_wl_stop,
.ndo_start_xmit = gelic_net_xmit,
.ndo_set_rx_mode = gelic_net_set_multi,
.ndo_change_mtu = gelic_net_change_mtu,
.ndo_tx_timeout = gelic_net_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,

View file

@ -1278,25 +1278,6 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
return packets_done;
}
/**
* spider_net_change_mtu - changes the MTU of an interface
* @netdev: interface device structure
* @new_mtu: new MTU value
*
* returns 0 on success, <0 on failure
*/
static int
spider_net_change_mtu(struct net_device *netdev, int new_mtu)
{
/* no need to re-alloc skbs or so -- the max mtu is about 2.3k
* and mtu is outbound only anyway */
if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
(new_mtu > SPIDER_NET_MAX_MTU) )
return -EINVAL;
netdev->mtu = new_mtu;
return 0;
}
/**
* spider_net_set_mac - sets the MAC of an interface
* @netdev: interface device structure
@ -2229,7 +2210,6 @@ static const struct net_device_ops spider_net_ops = {
.ndo_start_xmit = spider_net_xmit,
.ndo_set_rx_mode = spider_net_set_multi,
.ndo_set_mac_address = spider_net_set_mac,
.ndo_change_mtu = spider_net_change_mtu,
.ndo_do_ioctl = spider_net_do_ioctl,
.ndo_tx_timeout = spider_net_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
@ -2299,6 +2279,10 @@ spider_net_setup_netdev(struct spider_net_card *card)
/* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
* NETIF_F_HW_VLAN_CTAG_FILTER */
/* MTU range: 64 - 2294 */
netdev->min_mtu = SPIDER_NET_MIN_MTU;
netdev->max_mtu = SPIDER_NET_MAX_MTU;
netdev->irq = card->pdev->irq;
card->num_rx_ints = 0;
card->ignore_rx_ramfull = 0;

Some files were not shown because too many files have changed in this diff Show more