Kernel: Protect internal structures in InodeWatcher with spinlocks

This was the last change that was needed to be able boot with the flag
of LOCK_IN_CRITICAL_DEBUG. That flag is not always enabled because there
are still other issues in which we hold a spinlock and still try to lock
a mutex.

Instead of using one global mutex we can protect internal structures of
the InodeWatcher class with SpinlockProtected wrappers. This in turn
allows the InodeWatcher code to be called from other parts in the kernel
while holding a prior spinlock properly.
This commit is contained in:
Liav A 2023-04-21 14:48:11 +03:00 committed by Andreas Kling
parent cf3835b29b
commit ee4e9b807a
2 changed files with 85 additions and 76 deletions

View file

@ -23,18 +23,19 @@ InodeWatcher::~InodeWatcher()
bool InodeWatcher::can_read(OpenFileDescription const&, u64) const
{
MutexLocker locker(m_lock);
return !m_queue.is_empty();
return m_queue.with([](auto& queue) { return !queue.is_empty(); });
}
ErrorOr<size_t> InodeWatcher::read(OpenFileDescription&, u64, UserOrKernelBuffer& buffer, size_t buffer_size)
{
MutexLocker locker(m_lock);
if (m_queue.is_empty())
// can_read will catch the blocking case.
return EAGAIN;
auto event = TRY(m_queue.with([](auto& queue) -> ErrorOr<Event> {
if (queue.is_empty()) {
// can_read will catch the blocking case.
return EAGAIN;
}
auto event = m_queue.dequeue();
return queue.dequeue();
}));
size_t bytes_to_write = sizeof(InodeWatcherEvent);
if (event.path)
@ -68,106 +69,110 @@ ErrorOr<size_t> InodeWatcher::read(OpenFileDescription&, u64, UserOrKernelBuffer
ErrorOr<void> InodeWatcher::close()
{
MutexLocker locker(m_lock);
m_watch_maps.with([this](auto& watch_maps) {
for (auto& entry : watch_maps.wd_to_watches) {
auto& inode = const_cast<Inode&>(entry.value->inode);
inode.unregister_watcher({}, *this);
}
for (auto& entry : m_wd_to_watches) {
auto& inode = const_cast<Inode&>(entry.value->inode);
inode.unregister_watcher({}, *this);
}
m_wd_to_watches.clear();
m_inode_to_watches.clear();
watch_maps.inode_to_watches.clear();
watch_maps.wd_to_watches.clear();
});
return {};
}
ErrorOr<NonnullOwnPtr<KString>> InodeWatcher::pseudo_path(OpenFileDescription const&) const
{
return KString::formatted("InodeWatcher:({})", m_wd_to_watches.size());
return m_watch_maps.with([](auto& watch_maps) -> ErrorOr<NonnullOwnPtr<KString>> {
return KString::formatted("InodeWatcher:({})", watch_maps.wd_to_watches.size());
});
}
void InodeWatcher::notify_inode_event(Badge<Inode>, InodeIdentifier inode_id, InodeWatcherEvent::Type event_type, StringView name)
{
MutexLocker locker(m_lock);
m_watch_maps.with([this, inode_id, event_type, name](auto& watch_maps) {
auto it = watch_maps.inode_to_watches.find(inode_id);
if (it == watch_maps.inode_to_watches.end())
return;
auto it = m_inode_to_watches.find(inode_id);
if (it == m_inode_to_watches.end())
return;
auto& watcher = *it->value;
if (!(watcher.event_mask & static_cast<unsigned>(event_type)))
return;
auto& watcher = *it->value;
if (!(watcher.event_mask & static_cast<unsigned>(event_type)))
return;
m_queue.with([watcher, event_type, name](auto& queue) {
OwnPtr<KString> path;
if (!name.is_null())
path = KString::try_create(name).release_value_but_fixme_should_propagate_errors();
queue.enqueue({ watcher.wd, event_type, move(path) });
});
});
OwnPtr<KString> path;
if (!name.is_null())
path = KString::try_create(name).release_value_but_fixme_should_propagate_errors();
m_queue.enqueue({ watcher.wd, event_type, move(path) });
evaluate_block_conditions();
}
ErrorOr<int> InodeWatcher::register_inode(Inode& inode, unsigned event_mask)
{
MutexLocker locker(m_lock);
return m_watch_maps.with([this, &inode, event_mask](auto& watch_maps) -> ErrorOr<int> {
if (watch_maps.inode_to_watches.find(inode.identifier()) != watch_maps.inode_to_watches.end())
return EEXIST;
if (m_inode_to_watches.find(inode.identifier()) != m_inode_to_watches.end())
return EEXIST;
int wd = -1;
do {
wd = m_wd_counter.value();
int wd;
do {
wd = m_wd_counter.value();
m_wd_counter++;
if (m_wd_counter.has_overflow())
m_wd_counter = 1;
} while (watch_maps.wd_to_watches.find(wd) != watch_maps.wd_to_watches.end());
m_wd_counter++;
if (m_wd_counter.has_overflow())
m_wd_counter = 1;
} while (m_wd_to_watches.find(wd) != m_wd_to_watches.end());
auto description = TRY(WatchDescription::create(wd, inode, event_mask));
auto description = TRY(WatchDescription::create(wd, inode, event_mask));
TRY(watch_maps.inode_to_watches.try_set(inode.identifier(), description.ptr()));
auto set_result = watch_maps.wd_to_watches.try_set(wd, move(description));
if (set_result.is_error()) {
watch_maps.inode_to_watches.remove(inode.identifier());
return set_result.release_error();
}
TRY(m_inode_to_watches.try_set(inode.identifier(), description.ptr()));
auto set_result = m_wd_to_watches.try_set(wd, move(description));
if (set_result.is_error()) {
m_inode_to_watches.remove(inode.identifier());
return set_result.release_error();
}
auto register_result = inode.register_watcher({}, *this);
if (register_result.is_error()) {
watch_maps.inode_to_watches.remove(inode.identifier());
watch_maps.wd_to_watches.remove(wd);
return register_result.release_error();
}
auto register_result = inode.register_watcher({}, *this);
if (register_result.is_error()) {
m_inode_to_watches.remove(inode.identifier());
m_wd_to_watches.remove(wd);
return register_result.release_error();
}
return wd;
return wd;
});
}
ErrorOr<void> InodeWatcher::unregister_by_wd(int wd)
{
MutexLocker locker(m_lock);
TRY(m_watch_maps.with([this, wd](auto& watch_maps) -> ErrorOr<void> {
auto it = watch_maps.wd_to_watches.find(wd);
if (it == watch_maps.wd_to_watches.end())
return ENOENT;
auto it = m_wd_to_watches.find(wd);
if (it == m_wd_to_watches.end())
return ENOENT;
auto& inode = it->value->inode;
inode.unregister_watcher({}, *this);
m_inode_to_watches.remove(inode.identifier());
m_wd_to_watches.remove(it);
auto& inode = it->value->inode;
inode.unregister_watcher({}, *this);
watch_maps.inode_to_watches.remove(inode.identifier());
watch_maps.wd_to_watches.remove(it);
return {};
}));
return {};
}
void InodeWatcher::unregister_by_inode(Badge<Inode>, InodeIdentifier identifier)
{
MutexLocker locker(m_lock);
m_watch_maps.with([identifier](auto& watch_maps) {
auto it = watch_maps.inode_to_watches.find(identifier);
if (it == watch_maps.inode_to_watches.end())
return;
auto it = m_inode_to_watches.find(identifier);
if (it == m_inode_to_watches.end())
return;
// NOTE: no need to call unregister_watcher here, the Inode calls us.
m_inode_to_watches.remove(identifier);
m_wd_to_watches.remove(it->value->wd);
// NOTE: no need to call unregister_watcher here, the Inode calls us.
watch_maps.inode_to_watches.remove(identifier);
watch_maps.wd_to_watches.remove(it->value->wd);
});
}
}

View file

@ -15,6 +15,8 @@
#include <Kernel/API/InodeWatcherEvent.h>
#include <Kernel/FileSystem/File.h>
#include <Kernel/Forward.h>
#include <Kernel/Locking/MutexProtected.h>
#include <Kernel/Locking/SpinlockProtected.h>
namespace Kernel {
@ -63,20 +65,22 @@ public:
private:
explicit InodeWatcher() { }
mutable Mutex m_lock;
struct Event {
int wd { 0 };
InodeWatcherEvent::Type type { InodeWatcherEvent::Type::Invalid };
OwnPtr<KString> path;
};
CircularQueue<Event, 32> m_queue;
SpinlockProtected<CircularQueue<Event, 32>, LockRank::None> m_queue;
Checked<int> m_wd_counter { 1 };
// NOTE: These two hashmaps provide two different ways of reaching the same
// watch description, so they will overlap.
HashMap<int, NonnullOwnPtr<WatchDescription>> m_wd_to_watches;
HashMap<InodeIdentifier, WatchDescription*> m_inode_to_watches;
struct WatchMaps {
HashMap<int, NonnullOwnPtr<WatchDescription>> wd_to_watches;
HashMap<InodeIdentifier, WatchDescription*> inode_to_watches;
};
mutable SpinlockProtected<WatchMaps, LockRank::None> m_watch_maps;
};
}