mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
net: Split core bits of netdev_pick_tx into __netdev_pick_tx
This change splits the core bits of netdev_pick_tx into a separate function. The main idea behind this is to make this code accessible to select queue functions when they decide to process the standard path instead of their own custom path in their select queue routine. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c10d73671a
commit
416186fbf8
2 changed files with 34 additions and 26 deletions
|
@ -1403,6 +1403,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
|
|||
|
||||
extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
|
||||
|
||||
/*
|
||||
* Net namespace inlines
|
||||
|
|
|
@ -2495,37 +2495,44 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
|||
#endif
|
||||
}
|
||||
|
||||
u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
int queue_index = sk_tx_queue_get(sk);
|
||||
|
||||
if (queue_index < 0 || skb->ooo_okay ||
|
||||
queue_index >= dev->real_num_tx_queues) {
|
||||
int new_index = get_xps_queue(dev, skb);
|
||||
if (new_index < 0)
|
||||
new_index = skb_tx_hash(dev, skb);
|
||||
|
||||
if (queue_index != new_index && sk) {
|
||||
struct dst_entry *dst =
|
||||
rcu_dereference_check(sk->sk_dst_cache, 1);
|
||||
|
||||
if (dst && skb_dst(skb) == dst)
|
||||
sk_tx_queue_set(sk, queue_index);
|
||||
|
||||
}
|
||||
|
||||
queue_index = new_index;
|
||||
}
|
||||
|
||||
return queue_index;
|
||||
}
|
||||
|
||||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int queue_index;
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
int queue_index = 0;
|
||||
|
||||
if (dev->real_num_tx_queues == 1)
|
||||
queue_index = 0;
|
||||
else if (ops->ndo_select_queue) {
|
||||
queue_index = ops->ndo_select_queue(dev, skb);
|
||||
if (dev->real_num_tx_queues != 1) {
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
if (ops->ndo_select_queue)
|
||||
queue_index = ops->ndo_select_queue(dev, skb);
|
||||
else
|
||||
queue_index = __netdev_pick_tx(dev, skb);
|
||||
queue_index = dev_cap_txqueue(dev, queue_index);
|
||||
} else {
|
||||
struct sock *sk = skb->sk;
|
||||
queue_index = sk_tx_queue_get(sk);
|
||||
|
||||
if (queue_index < 0 || skb->ooo_okay ||
|
||||
queue_index >= dev->real_num_tx_queues) {
|
||||
int old_index = queue_index;
|
||||
|
||||
queue_index = get_xps_queue(dev, skb);
|
||||
if (queue_index < 0)
|
||||
queue_index = skb_tx_hash(dev, skb);
|
||||
|
||||
if (queue_index != old_index && sk) {
|
||||
struct dst_entry *dst =
|
||||
rcu_dereference_check(sk->sk_dst_cache, 1);
|
||||
|
||||
if (dst && skb_dst(skb) == dst)
|
||||
sk_tx_queue_set(sk, queue_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
skb_set_queue_mapping(skb, queue_index);
|
||||
|
|
Loading…
Reference in a new issue