mirror of
https://github.com/SerenityOS/serenity
synced 2024-10-15 12:23:15 +00:00
Kernel: Hold less locks when receiving ICMP packets
* We don't have to lock the "all IPv4 sockets" in exclusive mode, shared mode is enough for just reading the list (as opposed to modifying it). * We don't have to lock socket's own lock at all, the IPv4Socket::did_receive() implementation takes care of this. * Most importantly, we don't have to hold the "all IPv4 sockets" across the IPv4Socket::did_receive() call(s). We can copy the current ICMP socket list while holding the lock, then release the lock, and then call IPv4Socket::did_receive() on all the ICMP sockets in our list. These changes fix a deadlock triggered by receiving ICMP messages when using tap networking setup (as opposed to QEMU's default user/SLIRP networking) on the host.
This commit is contained in:
parent
ffa39f98e8
commit
4717009e3e
|
@ -250,13 +250,17 @@ void handle_icmp(const EthernetFrameHeader& eth, const IPv4Packet& ipv4_packet,
|
|||
#endif
|
||||
|
||||
{
|
||||
LOCKER(IPv4Socket::all_sockets().lock());
|
||||
for (RefPtr<IPv4Socket> socket : IPv4Socket::all_sockets().resource()) {
|
||||
LOCKER(socket->lock());
|
||||
if (socket->protocol() != (unsigned)IPv4Protocol::ICMP)
|
||||
continue;
|
||||
socket->did_receive(ipv4_packet.source(), 0, KBuffer::copy(&ipv4_packet, sizeof(IPv4Packet) + ipv4_packet.payload_size()), packet_timestamp);
|
||||
NonnullRefPtrVector<IPv4Socket> icmp_sockets;
|
||||
{
|
||||
LOCKER(IPv4Socket::all_sockets().lock(), Lock::Mode::Shared);
|
||||
for (auto* socket : IPv4Socket::all_sockets().resource()) {
|
||||
if (socket->protocol() != (unsigned)IPv4Protocol::ICMP)
|
||||
continue;
|
||||
icmp_sockets.append(*socket);
|
||||
}
|
||||
}
|
||||
for (auto& socket : icmp_sockets)
|
||||
socket.did_receive(ipv4_packet.source(), 0, KBuffer::copy(&ipv4_packet, sizeof(IPv4Packet) + ipv4_packet.payload_size()), packet_timestamp);
|
||||
}
|
||||
|
||||
auto adapter = NetworkAdapter::from_ipv4_address(ipv4_packet.destination());
|
||||
|
|
Loading…
Reference in a new issue