mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
[NET]: Fix TX timeout regression in Intel drivers.
This fixes a regression added by changeset
53e52c729c
("[NET]: Make ->poll()
breakout consistent in Intel ethernet drivers.")
As pointed out by Jesse Brandeburg, for three of the drivers edited
above there is breakout logic in the *_clean_tx_irq() code to prevent
running TX reclaim forever. If this occurs, we have to elide NAPI
poll completion or else those TX events will never be serviced.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
This commit is contained in:
parent
d8c89eb3a1
commit
d2c7ddd626
3 changed files with 16 additions and 7 deletions
|
@ -3919,7 +3919,7 @@ e1000_clean(struct napi_struct *napi, int budget)
|
|||
{
|
||||
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
|
||||
struct net_device *poll_dev = adapter->netdev;
|
||||
int work_done = 0;
|
||||
int tx_cleaned = 0, work_done = 0;
|
||||
|
||||
/* Must NOT use netdev_priv macro here. */
|
||||
adapter = poll_dev->priv;
|
||||
|
@ -3929,14 +3929,17 @@ e1000_clean(struct napi_struct *napi, int budget)
|
|||
* simultaneously. A failure obtaining the lock means
|
||||
* tx_ring[0] is currently being cleaned anyway. */
|
||||
if (spin_trylock(&adapter->tx_queue_lock)) {
|
||||
e1000_clean_tx_irq(adapter,
|
||||
&adapter->tx_ring[0]);
|
||||
tx_cleaned = e1000_clean_tx_irq(adapter,
|
||||
&adapter->tx_ring[0]);
|
||||
spin_unlock(&adapter->tx_queue_lock);
|
||||
}
|
||||
|
||||
adapter->clean_rx(adapter, &adapter->rx_ring[0],
|
||||
&work_done, budget);
|
||||
|
||||
if (tx_cleaned)
|
||||
work_done = budget;
|
||||
|
||||
/* If budget not fully consumed, exit the polling mode */
|
||||
if (work_done < budget) {
|
||||
if (likely(adapter->itr_setting & 3))
|
||||
|
|
|
@ -1384,7 +1384,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
|
|||
{
|
||||
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
|
||||
struct net_device *poll_dev = adapter->netdev;
|
||||
int work_done = 0;
|
||||
int tx_cleaned = 0, work_done = 0;
|
||||
|
||||
/* Must NOT use netdev_priv macro here. */
|
||||
adapter = poll_dev->priv;
|
||||
|
@ -1394,12 +1394,15 @@ static int e1000_clean(struct napi_struct *napi, int budget)
|
|||
* simultaneously. A failure obtaining the lock means
|
||||
* tx_ring is currently being cleaned anyway. */
|
||||
if (spin_trylock(&adapter->tx_queue_lock)) {
|
||||
e1000_clean_tx_irq(adapter);
|
||||
tx_cleaned = e1000_clean_tx_irq(adapter);
|
||||
spin_unlock(&adapter->tx_queue_lock);
|
||||
}
|
||||
|
||||
adapter->clean_rx(adapter, &work_done, budget);
|
||||
|
||||
if (tx_cleaned)
|
||||
work_done = budget;
|
||||
|
||||
/* If budget not fully consumed, exit the polling mode */
|
||||
if (work_done < budget) {
|
||||
if (adapter->itr_setting & 3)
|
||||
|
|
|
@ -1468,13 +1468,16 @@ static int ixgbe_clean(struct napi_struct *napi, int budget)
|
|||
struct ixgbe_adapter *adapter = container_of(napi,
|
||||
struct ixgbe_adapter, napi);
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
int work_done = 0;
|
||||
int tx_cleaned = 0, work_done = 0;
|
||||
|
||||
/* In non-MSIX case, there is no multi-Tx/Rx queue */
|
||||
ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
|
||||
tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
|
||||
ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done,
|
||||
budget);
|
||||
|
||||
if (tx_cleaned)
|
||||
work_done = budget;
|
||||
|
||||
/* If budget not fully consumed, exit the polling mode */
|
||||
if (work_done < budget) {
|
||||
netif_rx_complete(netdev, napi);
|
||||
|
|
Loading…
Reference in a new issue