mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-02 20:37:48 +00:00
slirp: Fix intermittent send queue hangs on a socket
if_output() originally sent one mbuf per call and used the slirp->next_m variable to keep track of where it left off. But nowadays it tries to send all of the mbufs from the fastq, and one mbuf from each session on the batchq. The next_m variable is both redundant and harmful: there is a case[0] involving delayed packets in which next_m ends up pointing to &slirp->if_batchq when an active session still exists, and this blocks all traffic for that session until qemu is restarted. The test case was created to reproduce a problem that was seen on long-running Chromium OS VM tests[1] which rapidly create and destroy ssh connections through hostfwd. [0] https://pastebin.com/NNy6LreF [1] https://bugs.chromium.org/p/chromium/issues/detail?id=766323 Signed-off-by: Kevin Cernekee <cernekee@chromium.org> Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
This commit is contained in:
parent
0e7e4fb0a6
commit
e2aad34d73
2 changed files with 17 additions and 35 deletions
51
slirp/if.c
51
slirp/if.c
|
@ -30,7 +30,6 @@ if_init(Slirp *slirp)
|
||||||
{
|
{
|
||||||
slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq;
|
slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq;
|
||||||
slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq;
|
slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq;
|
||||||
slirp->next_m = (struct mbuf *) &slirp->if_batchq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -100,10 +99,6 @@ if_output(struct socket *so, struct mbuf *ifm)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
|
ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
|
||||||
/* Set next_m if the queue was empty so far */
|
|
||||||
if ((struct quehead *) slirp->next_m == &slirp->if_batchq) {
|
|
||||||
slirp->next_m = ifm;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create a new doubly linked list for this session */
|
/* Create a new doubly linked list for this session */
|
||||||
|
@ -143,21 +138,18 @@ diddit:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Send a packet
|
* Send one packet from each session.
|
||||||
* We choose a packet based on its position in the output queues;
|
|
||||||
* If there are packets on the fastq, they are sent FIFO, before
|
* If there are packets on the fastq, they are sent FIFO, before
|
||||||
* everything else. Otherwise we choose the first packet from the
|
* everything else. Then we choose the first packet from each
|
||||||
* batchq and send it. the next packet chosen will be from the session
|
* batchq session (socket) and send it.
|
||||||
* after this one, then the session after that one, and so on.. So,
|
* For example, if there are 3 ftp sessions fighting for bandwidth,
|
||||||
* for example, if there are 3 ftp session's fighting for bandwidth,
|
|
||||||
* one packet will be sent from the first session, then one packet
|
* one packet will be sent from the first session, then one packet
|
||||||
* from the second session, then one packet from the third, then back
|
* from the second session, then one packet from the third.
|
||||||
* to the first, etc. etc.
|
|
||||||
*/
|
*/
|
||||||
void if_start(Slirp *slirp)
|
void if_start(Slirp *slirp)
|
||||||
{
|
{
|
||||||
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||||
bool from_batchq, next_from_batchq;
|
bool from_batchq = false;
|
||||||
struct mbuf *ifm, *ifm_next, *ifqt;
|
struct mbuf *ifm, *ifm_next, *ifqt;
|
||||||
|
|
||||||
DEBUG_CALL("if_start");
|
DEBUG_CALL("if_start");
|
||||||
|
@ -167,26 +159,29 @@ void if_start(Slirp *slirp)
|
||||||
}
|
}
|
||||||
slirp->if_start_busy = true;
|
slirp->if_start_busy = true;
|
||||||
|
|
||||||
|
struct mbuf *batch_head = NULL;
|
||||||
|
if (slirp->if_batchq.qh_link != &slirp->if_batchq) {
|
||||||
|
batch_head = (struct mbuf *) slirp->if_batchq.qh_link;
|
||||||
|
}
|
||||||
|
|
||||||
if (slirp->if_fastq.qh_link != &slirp->if_fastq) {
|
if (slirp->if_fastq.qh_link != &slirp->if_fastq) {
|
||||||
ifm_next = (struct mbuf *) slirp->if_fastq.qh_link;
|
ifm_next = (struct mbuf *) slirp->if_fastq.qh_link;
|
||||||
next_from_batchq = false;
|
} else if (batch_head) {
|
||||||
} else if ((struct quehead *) slirp->next_m != &slirp->if_batchq) {
|
/* Nothing on fastq, pick up from batchq */
|
||||||
/* Nothing on fastq, pick up from batchq via next_m */
|
ifm_next = batch_head;
|
||||||
ifm_next = slirp->next_m;
|
from_batchq = true;
|
||||||
next_from_batchq = true;
|
|
||||||
} else {
|
} else {
|
||||||
ifm_next = NULL;
|
ifm_next = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (ifm_next) {
|
while (ifm_next) {
|
||||||
ifm = ifm_next;
|
ifm = ifm_next;
|
||||||
from_batchq = next_from_batchq;
|
|
||||||
|
|
||||||
ifm_next = ifm->ifq_next;
|
ifm_next = ifm->ifq_next;
|
||||||
if ((struct quehead *) ifm_next == &slirp->if_fastq) {
|
if ((struct quehead *) ifm_next == &slirp->if_fastq) {
|
||||||
/* No more packets in fastq, switch to batchq */
|
/* No more packets in fastq, switch to batchq */
|
||||||
ifm_next = slirp->next_m;
|
ifm_next = batch_head;
|
||||||
next_from_batchq = true;
|
from_batchq = true;
|
||||||
}
|
}
|
||||||
if ((struct quehead *) ifm_next == &slirp->if_batchq) {
|
if ((struct quehead *) ifm_next == &slirp->if_batchq) {
|
||||||
/* end of batchq */
|
/* end of batchq */
|
||||||
|
@ -199,11 +194,6 @@ void if_start(Slirp *slirp)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ifm == slirp->next_m) {
|
|
||||||
/* Set which packet to send on next iteration */
|
|
||||||
slirp->next_m = ifm->ifq_next;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Remove it from the queue */
|
/* Remove it from the queue */
|
||||||
ifqt = ifm->ifq_prev;
|
ifqt = ifm->ifq_prev;
|
||||||
remque(ifm);
|
remque(ifm);
|
||||||
|
@ -214,15 +204,8 @@ void if_start(Slirp *slirp)
|
||||||
|
|
||||||
insque(next, ifqt);
|
insque(next, ifqt);
|
||||||
ifs_remque(ifm);
|
ifs_remque(ifm);
|
||||||
|
|
||||||
if (!from_batchq) {
|
if (!from_batchq) {
|
||||||
/* Next packet in fastq is from the same session */
|
|
||||||
ifm_next = next;
|
ifm_next = next;
|
||||||
next_from_batchq = false;
|
|
||||||
} else if ((struct quehead *) slirp->next_m == &slirp->if_batchq) {
|
|
||||||
/* Set next_m and ifm_next if the session packet is now the
|
|
||||||
* only one on batchq */
|
|
||||||
slirp->next_m = ifm_next = next;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -183,7 +183,6 @@ struct Slirp {
|
||||||
/* if states */
|
/* if states */
|
||||||
struct quehead if_fastq; /* fast queue (for interactive data) */
|
struct quehead if_fastq; /* fast queue (for interactive data) */
|
||||||
struct quehead if_batchq; /* queue for non-interactive data */
|
struct quehead if_batchq; /* queue for non-interactive data */
|
||||||
struct mbuf *next_m; /* pointer to next mbuf to output */
|
|
||||||
bool if_start_busy; /* avoid if_start recursion */
|
bool if_start_busy; /* avoid if_start recursion */
|
||||||
|
|
||||||
/* ip states */
|
/* ip states */
|
||||||
|
|
Loading…
Reference in a new issue