linux/net/core/link_watch.c
Eric Dumazet 123e495ecc net: linkwatch: be more careful about dev->linkwatch_dev_tracker
Apparently a concurrent linkwatch_add_event() could
run while we are in __linkwatch_run_queue().

We need to free dev->linkwatch_dev_tracker tracker
under lweventlist_lock protection to avoid this race.

syzbot report:
[   77.935949][ T3661] reference already released.
[   77.941015][ T3661] allocated in:
[   77.944482][ T3661]  linkwatch_fire_event+0x202/0x260
[   77.950318][ T3661]  netif_carrier_on+0x9c/0x100
[   77.955120][ T3661]  __ieee80211_sta_join_ibss+0xc52/0x1590
[   77.960888][ T3661]  ieee80211_sta_create_ibss.cold+0xd2/0x11f
[   77.966908][ T3661]  ieee80211_ibss_work.cold+0x30e/0x60f
[   77.972483][ T3661]  ieee80211_iface_work+0xb70/0xd00
[   77.977715][ T3661]  process_one_work+0x9ac/0x1680
[   77.982671][ T3661]  worker_thread+0x652/0x11c0
[   77.987371][ T3661]  kthread+0x405/0x4f0
[   77.991465][ T3661]  ret_from_fork+0x1f/0x30
[   77.995895][ T3661] freed in:
[   77.999006][ T3661]  linkwatch_do_dev+0x96/0x160
[   78.004014][ T3661]  __linkwatch_run_queue+0x233/0x6a0
[   78.009496][ T3661]  linkwatch_event+0x4a/0x60
[   78.014099][ T3661]  process_one_work+0x9ac/0x1680
[   78.019034][ T3661]  worker_thread+0x652/0x11c0
[   78.023719][ T3661]  kthread+0x405/0x4f0
[   78.027810][ T3661]  ret_from_fork+0x1f/0x30
[   78.042541][ T3661] ------------[ cut here ]------------
[   78.048253][ T3661] WARNING: CPU: 0 PID: 3661 at lib/ref_tracker.c:120 ref_tracker_free.cold+0x110/0x14e
[   78.062364][ T3661] Modules linked in:
[   78.066424][ T3661] CPU: 0 PID: 3661 Comm: kworker/0:5 Not tainted 5.16.0-rc4-next-20211210-syzkaller #0
[   78.076075][ T3661] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
[   78.090648][ T3661] Workqueue: events linkwatch_event
[   78.095890][ T3661] RIP: 0010:ref_tracker_free.cold+0x110/0x14e
[   78.102191][ T3661] Code: ea 03 48 c1 e0 2a 0f b6 04 02 84 c0 74 04 3c 03 7e 4c 8b 7b 18 e8 6b 54 e9 fa e8 26 4d 57 f8 4c 89 ee 48 89 ef e8 fb 33 36 00 <0f> 0b 41 bd ea ff ff ff e9 bd 60 e9 fa 4c 89 f7 e8 16 45 a2 f8 e9
[   78.127211][ T3661] RSP: 0018:ffffc90002b5fb18 EFLAGS: 00010246
[   78.133684][ T3661] RAX: 0000000000000000 RBX: ffff88807467f700 RCX: 0000000000000000
[   78.141928][ T3661] RDX: 0000000000000001 RSI: 0000000000000001 RDI: 0000000000000001
[   78.150087][ T3661] RBP: ffff888057e105b8 R08: 0000000000000001 R09: ffffffff8ffa1967
[   78.158211][ T3661] R10: 0000000000000001 R11: 0000000000000000 R12: 1ffff9200056bf65
[   78.166204][ T3661] R13: 0000000000000292 R14: ffff88807467f718 R15: 00000000c0e0008c
[   78.174321][ T3661] FS:  0000000000000000(0000) GS:ffff8880b9c00000(0000) knlGS:0000000000000000
[   78.183310][ T3661] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   78.190156][ T3661] CR2: 000000c000208800 CR3: 000000007f7b5000 CR4: 00000000003506f0
[   78.198235][ T3661] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[   78.206214][ T3661] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[   78.214328][ T3661] Call Trace:
[   78.217679][ T3661]  <TASK>
[   78.220621][ T3661]  ? __sanitizer_cov_trace_const_cmp4+0x1c/0x70
[   78.226981][ T3661]  ? nlmsg_notify+0xbe/0x280
[   78.231607][ T3661]  ? ref_tracker_dir_exit+0x330/0x330
[   78.237654][ T3661]  ? linkwatch_do_dev+0x96/0x160
[   78.242628][ T3661]  ? __linkwatch_run_queue+0x233/0x6a0
[   78.248170][ T3661]  ? linkwatch_event+0x4a/0x60
[   78.252946][ T3661]  ? process_one_work+0x9ac/0x1680
[   78.258136][ T3661]  ? worker_thread+0x853/0x11c0
[   78.263020][ T3661]  ? kthread+0x405/0x4f0
[   78.267905][ T3661]  ? ret_from_fork+0x1f/0x30
[   78.272670][ T3661]  ? netdev_state_change+0xa1/0x130
[   78.278019][ T3661]  ? netdev_exit+0xd0/0xd0
[   78.282466][ T3661]  ? dev_activate+0x420/0xa60
[   78.287261][ T3661]  linkwatch_do_dev+0x96/0x160
[   78.292043][ T3661]  __linkwatch_run_queue+0x233/0x6a0
[   78.297505][ T3661]  ? linkwatch_do_dev+0x160/0x160
[   78.302561][ T3661]  linkwatch_event+0x4a/0x60
[   78.307225][ T3661]  process_one_work+0x9ac/0x1680
[   78.312292][ T3661]  ? pwq_dec_nr_in_flight+0x2a0/0x2a0
[   78.317757][ T3661]  ? rwlock_bug.part.0+0x90/0x90
[   78.322726][ T3661]  ? _raw_spin_lock_irq+0x41/0x50
[   78.327844][ T3661]  worker_thread+0x853/0x11c0
[   78.332543][ T3661]  ? process_one_work+0x1680/0x1680
[   78.338500][ T3661]  kthread+0x405/0x4f0
[   78.342610][ T3661]  ? set_kthread_struct+0x130/0x130

Fixes: 63f13937cb ("net: linkwatch: add net device refcount tracker")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Link: https://lore.kernel.org/r/20211214051955.3569843-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-12-14 18:45:58 -08:00

280 lines
6.1 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux network device link state notification
*
* Author:
* Stefan Rompf <sux@loplof.de>
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/rtnetlink.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/types.h>
enum lw_bits {
LW_URGENT = 0,
};
static unsigned long linkwatch_flags;
static unsigned long linkwatch_nextevent;
static void linkwatch_event(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
static unsigned char default_operstate(const struct net_device *dev)
{
if (netif_testing(dev))
return IF_OPER_TESTING;
if (!netif_carrier_ok(dev))
return (dev->ifindex != dev_get_iflink(dev) ?
IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
if (netif_dormant(dev))
return IF_OPER_DORMANT;
return IF_OPER_UP;
}
static void rfc2863_policy(struct net_device *dev)
{
unsigned char operstate = default_operstate(dev);
if (operstate == dev->operstate)
return;
write_lock(&dev_base_lock);
switch(dev->link_mode) {
case IF_LINK_MODE_TESTING:
if (operstate == IF_OPER_UP)
operstate = IF_OPER_TESTING;
break;
case IF_LINK_MODE_DORMANT:
if (operstate == IF_OPER_UP)
operstate = IF_OPER_DORMANT;
break;
case IF_LINK_MODE_DEFAULT:
default:
break;
}
dev->operstate = operstate;
write_unlock(&dev_base_lock);
}
void linkwatch_init_dev(struct net_device *dev)
{
/* Handle pre-registration link state changes */
if (!netif_carrier_ok(dev) || netif_dormant(dev) ||
netif_testing(dev))
rfc2863_policy(dev);
}
static bool linkwatch_urgent_event(struct net_device *dev)
{
if (!netif_running(dev))
return false;
if (dev->ifindex != dev_get_iflink(dev))
return true;
if (netif_is_lag_port(dev) || netif_is_lag_master(dev))
return true;
return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
}
static void linkwatch_add_event(struct net_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&lweventlist_lock, flags);
if (list_empty(&dev->link_watch_list)) {
list_add_tail(&dev->link_watch_list, &lweventlist);
dev_hold_track(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
}
static void linkwatch_schedule_work(int urgent)
{
unsigned long delay = linkwatch_nextevent - jiffies;
if (test_bit(LW_URGENT, &linkwatch_flags))
return;
/* Minimise down-time: drop delay for up event. */
if (urgent) {
if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
return;
delay = 0;
}
/* If we wrap around we'll delay it by at most HZ. */
if (delay > HZ)
delay = 0;
/*
* If urgent, schedule immediate execution; otherwise, don't
* override the existing timer.
*/
if (test_bit(LW_URGENT, &linkwatch_flags))
mod_delayed_work(system_wq, &linkwatch_work, 0);
else
schedule_delayed_work(&linkwatch_work, delay);
}
static void linkwatch_do_dev(struct net_device *dev)
{
/*
* Make sure the above read is complete since it can be
* rewritten as soon as we clear the bit below.
*/
smp_mb__before_atomic();
/* We are about to handle this device,
* so new events can be accepted
*/
clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
rfc2863_policy(dev);
if (dev->flags & IFF_UP) {
if (netif_carrier_ok(dev))
dev_activate(dev);
else
dev_deactivate(dev);
netdev_state_change(dev);
}
/* Note: our callers are responsible for
* calling netdev_tracker_free().
*/
dev_put(dev);
}
static void __linkwatch_run_queue(int urgent_only)
{
#define MAX_DO_DEV_PER_LOOP 100
int do_dev = MAX_DO_DEV_PER_LOOP;
struct net_device *dev;
LIST_HEAD(wrk);
/* Give urgent case more budget */
if (urgent_only)
do_dev += MAX_DO_DEV_PER_LOOP;
/*
* Limit the number of linkwatch events to one
* per second so that a runaway driver does not
* cause a storm of messages on the netlink
* socket. This limit does not apply to up events
* while the device qdisc is down.
*/
if (!urgent_only)
linkwatch_nextevent = jiffies + HZ;
/* Limit wrap-around effect on delay. */
else if (time_after(linkwatch_nextevent, jiffies + HZ))
linkwatch_nextevent = jiffies;
clear_bit(LW_URGENT, &linkwatch_flags);
spin_lock_irq(&lweventlist_lock);
list_splice_init(&lweventlist, &wrk);
while (!list_empty(&wrk) && do_dev > 0) {
dev = list_first_entry(&wrk, struct net_device, link_watch_list);
list_del_init(&dev->link_watch_list);
if (!netif_device_present(dev) ||
(urgent_only && !linkwatch_urgent_event(dev))) {
list_add_tail(&dev->link_watch_list, &lweventlist);
continue;
}
/* We must free netdev tracker under
* the spinlock protection.
*/
netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
spin_unlock_irq(&lweventlist_lock);
linkwatch_do_dev(dev);
do_dev--;
spin_lock_irq(&lweventlist_lock);
}
/* Add the remaining work back to lweventlist */
list_splice_init(&wrk, &lweventlist);
if (!list_empty(&lweventlist))
linkwatch_schedule_work(0);
spin_unlock_irq(&lweventlist_lock);
}
void linkwatch_forget_dev(struct net_device *dev)
{
unsigned long flags;
int clean = 0;
spin_lock_irqsave(&lweventlist_lock, flags);
if (!list_empty(&dev->link_watch_list)) {
list_del_init(&dev->link_watch_list);
clean = 1;
/* We must release netdev tracker under
* the spinlock protection.
*/
netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
if (clean)
linkwatch_do_dev(dev);
}
/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
{
__linkwatch_run_queue(0);
}
static void linkwatch_event(struct work_struct *dummy)
{
rtnl_lock();
__linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
rtnl_unlock();
}
void linkwatch_fire_event(struct net_device *dev)
{
bool urgent = linkwatch_urgent_event(dev);
if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
linkwatch_add_event(dev);
} else if (!urgent)
return;
linkwatch_schedule_work(urgent);
}
EXPORT_SYMBOL(linkwatch_fire_event);