Kernel: Add formal Processor::verify_no_spinlocks_held() API

In a few places we check `!Processor::in_critical()` to validate
that the current processor doesn't hold any kernel spinlocks.

Instead lets provide it a first class name for readability.
I'll also be adding more of these, so I would rather add more
usages of a nice API instead of this implicit/assumed logic.
This commit is contained in:
Brian Gianforcaro 2022-10-15 00:56:42 -07:00 committed by Andrew Kaster
parent 1a1e0e6364
commit 2079728a74
3 changed files with 20 additions and 10 deletions

View file

@ -147,6 +147,11 @@ public:
return current().m_in_critical;
}
ALWAYS_INLINE static void verify_no_spinlocks_held()
{
VERIFY(!Processor::in_critical());
}
// FIXME: Actually return the idle thread once aarch64 supports threading.
ALWAYS_INLINE static Thread* idle_thread()
{

View file

@ -370,6 +370,11 @@ public:
return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
}
ALWAYS_INLINE static void verify_no_spinlocks_held()
{
VERIFY(!Processor::in_critical());
}
ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; }
static void smp_enable();

View file

@ -410,14 +410,6 @@ void kmalloc_enable_expand()
g_kmalloc_global->enable_expansion();
}
static inline void kmalloc_verify_nospinlock_held()
{
// Catch bad callers allocating under spinlock.
if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
VERIFY(!Processor::in_critical());
}
}
UNMAP_AFTER_INIT void kmalloc_init()
{
// Zero out heap since it's placed after end_of_kernel_bss.
@ -429,7 +421,11 @@ UNMAP_AFTER_INIT void kmalloc_init()
void* kmalloc(size_t size)
{
kmalloc_verify_nospinlock_held();
// Catch bad callers allocating under spinlock.
if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
Processor::verify_no_spinlocks_held();
}
SpinlockLocker lock(s_lock);
++g_kmalloc_call_count;
@ -472,7 +468,11 @@ void kfree_sized(void* ptr, size_t size)
VERIFY(size > 0);
kmalloc_verify_nospinlock_held();
// Catch bad callers allocating under spinlock.
if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
Processor::verify_no_spinlocks_held();
}
SpinlockLocker lock(s_lock);
++g_kfree_call_count;
++g_nested_kfree_calls;