mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
net: Fix soft lockups/OOM issues w/ unix garbage collector
This is an implementation of David Miller's suggested fix in: https://bugzilla.redhat.com/show_bug.cgi?id=470201 It has been updated to use wait_event() instead of wait_event_interruptible(). Paraphrasing the description from the above report, it makes sendmsg() block while UNIX garbage collection is in progress. This avoids a situation where child processes continue to queue new FDs over a AF_UNIX socket to a parent which is in the exit path and running garbage collection on these FDs. This contention can result in soft lockups and oom-killing of unrelated processes. Signed-off-by: dann frazier <dannf@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
efbbced361
commit
5f23b73496
3 changed files with 13 additions and 3 deletions
|
@ -9,6 +9,7 @@
|
|||
extern void unix_inflight(struct file *fp);
|
||||
extern void unix_notinflight(struct file *fp);
|
||||
extern void unix_gc(void);
|
||||
extern void wait_for_unix_gc(void);
|
||||
|
||||
#define UNIX_HASH_SIZE 256
|
||||
|
||||
|
|
|
@ -1343,6 +1343,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
|||
|
||||
if (NULL == siocb->scm)
|
||||
siocb->scm = &tmp_scm;
|
||||
wait_for_unix_gc();
|
||||
err = scm_send(sock, msg, siocb->scm);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
@ -1493,6 +1494,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
|||
|
||||
if (NULL == siocb->scm)
|
||||
siocb->scm = &tmp_scm;
|
||||
wait_for_unix_gc();
|
||||
err = scm_send(sock, msg, siocb->scm);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
|
|
@ -80,6 +80,7 @@
|
|||
#include <linux/file.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <net/sock.h>
|
||||
#include <net/af_unix.h>
|
||||
|
@ -91,6 +92,7 @@
|
|||
static LIST_HEAD(gc_inflight_list);
|
||||
static LIST_HEAD(gc_candidates);
|
||||
static DEFINE_SPINLOCK(unix_gc_lock);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
|
||||
|
||||
unsigned int unix_tot_inflight;
|
||||
|
||||
|
@ -266,12 +268,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
|
|||
list_move_tail(&u->link, &gc_candidates);
|
||||
}
|
||||
|
||||
/* The external entry point: unix_gc() */
|
||||
static bool gc_in_progress = false;
|
||||
|
||||
void wait_for_unix_gc(void)
|
||||
{
|
||||
wait_event(unix_gc_wait, gc_in_progress == false);
|
||||
}
|
||||
|
||||
/* The external entry point: unix_gc() */
|
||||
void unix_gc(void)
|
||||
{
|
||||
static bool gc_in_progress = false;
|
||||
|
||||
struct unix_sock *u;
|
||||
struct unix_sock *next;
|
||||
struct sk_buff_head hitlist;
|
||||
|
@ -376,6 +382,7 @@ void unix_gc(void)
|
|||
/* All candidates should have been detached by now. */
|
||||
BUG_ON(!list_empty(&gc_candidates));
|
||||
gc_in_progress = false;
|
||||
wake_up(&unix_gc_wait);
|
||||
|
||||
out:
|
||||
spin_unlock(&unix_gc_lock);
|
||||
|
|
Loading…
Reference in a new issue