Kernel: Use the AK SetOnce container class in various cases

We have many places in the kernel code that we have boolean flags that
are only set once, and never reset again but are checked multiple times
before and after the time they're being set, which matches the purpose
of the SetOnce class.
This commit is contained in:
Liav A. 2024-04-22 13:30:09 +03:00 committed by Andrew Kaster
parent 122c82a2a1
commit 2bba9411ca
41 changed files with 135 additions and 111 deletions

View file

@ -23,7 +23,7 @@ void ProcessorBase<T>::check_invoke_scheduler()
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
VERIFY(&Processor::current() == this);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
if (m_invoke_scheduler_async && m_scheduler_initialized.was_set()) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
}

View file

@ -8,6 +8,7 @@
#pragma once
#include <AK/Function.h>
#include <AK/SetOnce.h>
#include <Kernel/Arch/CPUID.h>
#include <Kernel/Arch/DeferredCallEntry.h>
#include <Kernel/Arch/DeferredCallPool.h>
@ -195,7 +196,8 @@ private:
// they need to be FlatPtrs or everything becomes highly unsound and breaks. They are actually just booleans.
FlatPtr m_in_scheduler;
FlatPtr m_invoke_scheduler_async;
FlatPtr m_scheduler_initialized;
SetOnce m_scheduler_initialized;
DeferredCallPool m_deferred_call_pool {};
};

View file

@ -7,6 +7,7 @@
#include <AK/Singleton.h>
#include <AK/Types.h>
#include <AK/SetOnce.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Bus/PCI/Initializer.h>
#include <Kernel/Sections.h>
@ -26,13 +27,13 @@ void microseconds_delay(u32)
// Initializer.cpp
namespace Kernel::PCI {
bool g_pci_access_io_probe_failed { false };
bool g_pci_access_is_disabled_from_commandline { true };
SetOnce g_pci_access_io_probe_failed;
SetOnce g_pci_access_is_disabled_from_commandline;
void initialize()
{
dbgln("PCI: FIXME: Enable PCI for aarch64 platforms");
g_pci_access_io_probe_failed = true;
g_pci_access_io_probe_failed.set();
}
}

View file

@ -157,7 +157,7 @@ void ProcessorBase<T>::initialize_context_switching(Thread& initial_thread)
{
VERIFY(initial_thread.process().is_kernel_process());
m_scheduler_initialized = true;
m_scheduler_initialized.set();
// FIXME: Figure out if we need to call {pre_,post_,}init_finished once aarch64 supports SMP
Processor::set_current_in_scheduler(true);

View file

@ -5,6 +5,7 @@
*/
#include <AK/Platform.h>
#include <AK/SetOnce.h>
#include <AK/Types.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/InterruptManagement.h>
@ -100,7 +101,7 @@ extern "C" u8 end_of_kernel_image[];
multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
size_t multiboot_copy_boot_modules_count;
READONLY_AFTER_INIT bool g_in_early_boot;
READONLY_AFTER_INIT SetOnce g_not_in_early_boot;
namespace Kernel {
@ -168,8 +169,6 @@ READONLY_AFTER_INIT static u8 s_command_line_buffer[512];
extern "C" [[noreturn]] UNMAP_AFTER_INIT NO_SANITIZE_COVERAGE void init([[maybe_unused]] BootInfo const& boot_info)
{
g_in_early_boot = true;
#if ARCH(X86_64)
start_of_prekernel_image = PhysicalAddress { boot_info.start_of_prekernel_image };
end_of_prekernel_image = PhysicalAddress { boot_info.end_of_prekernel_image };
@ -460,7 +459,7 @@ void init_stage2(void*)
}
// Switch out of early boot mode.
g_in_early_boot = false;
g_not_in_early_boot.set();
// NOTE: Everything marked READONLY_AFTER_INIT becomes non-writable after this point.
MM.protect_readonly_after_init_memory();

View file

@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/SetOnce.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Boot/CommandLine.h>
#include <Kernel/Bus/PCI/API.h>
@ -15,14 +16,15 @@
namespace Kernel::PCI {
bool g_pci_access_io_probe_failed { false };
bool g_pci_access_is_disabled_from_commandline;
SetOnce g_pci_access_io_probe_failed;
SetOnce g_pci_access_is_disabled_from_commandline;
void initialize()
{
g_pci_access_is_disabled_from_commandline = kernel_command_line().is_pci_disabled();
if (g_pci_access_is_disabled_from_commandline)
if (kernel_command_line().is_pci_disabled()) {
g_pci_access_is_disabled_from_commandline.set();
return;
}
new Access();

View file

@ -187,7 +187,7 @@ void ProcessorBase<T>::initialize_context_switching(Thread& initial_thread)
{
VERIFY(initial_thread.process().is_kernel_process());
m_scheduler_initialized = true;
m_scheduler_initialized.set();
// FIXME: Figure out if we need to call {pre_,post_,}init_finished once riscv64 supports SMP
Processor::set_current_in_scheduler(true);

View file

@ -68,7 +68,7 @@ void SysFSBIOSDirectory::create_components()
UNMAP_AFTER_INIT void SysFSBIOSDirectory::initialize_dmi_exposer()
{
VERIFY(!(m_dmi_entry_point.is_null()));
if (m_using_64bit_dmi_entry_point) {
if (m_using_64bit_dmi_entry_point.was_set()) {
set_dmi_64_bit_entry_initialization_values();
} else {
set_dmi_32_bit_entry_initialization_values();
@ -87,7 +87,7 @@ UNMAP_AFTER_INIT SysFSBIOSDirectory::SysFSBIOSDirectory(SysFSFirmwareDirectory&
auto entry_64bit = find_dmi_entry64bit_point();
if (entry_64bit.has_value()) {
m_dmi_entry_point = entry_64bit.value();
m_using_64bit_dmi_entry_point = true;
m_using_64bit_dmi_entry_point.set();
}
if (m_dmi_entry_point.is_null())
return;

View file

@ -7,6 +7,7 @@
#pragma once
#include <AK/RefPtr.h>
#include <AK/SetOnce.h>
#include <AK/Types.h>
#include <AK/Vector.h>
#include <Kernel/FileSystem/SysFS/Subsystems/Firmware/Directory.h>
@ -34,7 +35,7 @@ private:
PhysicalAddress m_dmi_entry_point;
PhysicalAddress m_smbios_structure_table;
bool m_using_64bit_dmi_entry_point { false };
SetOnce m_using_64bit_dmi_entry_point;
size_t m_smbios_structure_table_length { 0 };
size_t m_dmi_entry_point_length { 0 };
};

View file

@ -147,14 +147,14 @@ void APIC::set_base(PhysicalAddress const& base)
{
MSR msr(APIC_BASE_MSR);
u64 flags = 1 << 11;
if (m_is_x2)
if (m_is_x2.was_set())
flags |= 1 << 10;
msr.set(base.get() | flags);
}
void APIC::write_register(u32 offset, u32 value)
{
if (m_is_x2) {
if (m_is_x2.was_set()) {
MSR msr(APIC_REGS_MSR_BASE + (offset >> 4));
msr.set(value);
} else {
@ -164,7 +164,7 @@ void APIC::write_register(u32 offset, u32 value)
u32 APIC::read_register(u32 offset)
{
if (m_is_x2) {
if (m_is_x2.was_set()) {
MSR msr(APIC_REGS_MSR_BASE + (offset >> 4));
return (u32)msr.get();
}
@ -190,7 +190,7 @@ void APIC::wait_for_pending_icr()
void APIC::write_icr(ICRReg const& icr)
{
if (m_is_x2) {
if (m_is_x2.was_set()) {
MSR msr(APIC_REGS_MSR_BASE + (APIC_REG_ICR_LOW >> 4));
msr.set(icr.x2_value());
} else {
@ -247,13 +247,13 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
if ((id.edx() & (1 << 9)) == 0)
return false;
if (id.ecx() & (1 << 21))
m_is_x2 = true;
m_is_x2.set();
PhysicalAddress apic_base = get_base();
dbgln_if(APIC_DEBUG, "Initializing {}APIC, base: {}", m_is_x2 ? "x2" : "x", apic_base);
dbgln_if(APIC_DEBUG, "Initializing {}APIC, base: {}", m_is_x2.was_set() ? "x2" : "x", apic_base);
set_base(apic_base);
if (!m_is_x2) {
if (!m_is_x2.was_set()) {
auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
if (region_or_error.is_error()) {
dbgln("APIC: Failed to allocate memory for APIC base");
@ -463,10 +463,10 @@ UNMAP_AFTER_INIT void APIC::boot_aps()
UNMAP_AFTER_INIT void APIC::enable(u32 cpu)
{
VERIFY(m_is_x2 || cpu < 8);
VERIFY(m_is_x2.was_set() || cpu < 8);
u32 apic_id;
if (m_is_x2) {
if (m_is_x2.was_set()) {
dbgln_if(APIC_DEBUG, "Enable x2APIC on CPU #{}", cpu);
// We need to enable x2 mode on each core independently
@ -498,7 +498,7 @@ UNMAP_AFTER_INIT void APIC::enable(u32 cpu)
APICIPIInterruptHandler::initialize(IRQ_APIC_IPI);
}
if (!m_is_x2) {
if (!m_is_x2.was_set()) {
// local destination mode (flat mode), not supported in x2 mode
write_register(APIC_REG_DF, 0xf0000000);
}
@ -566,12 +566,12 @@ void APIC::send_ipi(u32 cpu)
VERIFY(cpu != Processor::current_id());
VERIFY(cpu < Processor::count());
wait_for_pending_icr();
write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, m_is_x2 ? Processor::by_id(cpu).info().apic_id() : cpu, ICRReg::Fixed, m_is_x2 ? ICRReg::Physical : ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand });
write_icr({ IRQ_APIC_IPI + IRQ_VECTOR_BASE, m_is_x2.was_set() ? Processor::by_id(cpu).info().apic_id() : cpu, ICRReg::Fixed, m_is_x2.was_set() ? ICRReg::Physical : ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand });
}
UNMAP_AFTER_INIT APICTimer* APIC::initialize_timers(HardwareTimerBase& calibration_timer)
{
if (!m_apic_base && !m_is_x2)
if (!m_apic_base && !m_is_x2.was_set())
return nullptr;
// We should only initialize and calibrate the APIC timer once on the BSP!

View file

@ -6,6 +6,7 @@
#pragma once
#include <AK/SetOnce.h>
#include <AK/Types.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Time/HardwareTimer.h>
@ -99,7 +100,7 @@ private:
u32 m_processor_cnt { 0 };
u32 m_processor_enabled_cnt { 0 };
APICTimer* m_apic_timer { nullptr };
bool m_is_x2 { false };
SetOnce m_is_x2;
static PhysicalAddress get_base();
void set_base(PhysicalAddress const& base);

View file

@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/SetOnce.h>
#include <Kernel/Arch/Interrupts.h>
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Boot/CommandLine.h>
@ -17,8 +18,8 @@
namespace Kernel::PCI {
READONLY_AFTER_INIT bool g_pci_access_io_probe_failed;
READONLY_AFTER_INIT bool g_pci_access_is_disabled_from_commandline;
READONLY_AFTER_INIT SetOnce g_pci_access_io_probe_failed;
READONLY_AFTER_INIT SetOnce g_pci_access_is_disabled_from_commandline;
static bool test_pci_io();
@ -31,7 +32,7 @@ UNMAP_AFTER_INIT static PCIAccessLevel detect_optimal_access_type()
if (boot_determined != PCIAccessLevel::IOAddressing)
return boot_determined;
if (!g_pci_access_io_probe_failed)
if (!g_pci_access_io_probe_failed.was_set())
return PCIAccessLevel::IOAddressing;
PANIC("No PCI bus access method detected!");
@ -39,7 +40,9 @@ UNMAP_AFTER_INIT static PCIAccessLevel detect_optimal_access_type()
UNMAP_AFTER_INIT void initialize()
{
g_pci_access_is_disabled_from_commandline = kernel_command_line().is_pci_disabled();
if (kernel_command_line().is_pci_disabled())
g_pci_access_is_disabled_from_commandline.set();
Optional<PhysicalAddress> possible_mcfg;
// FIXME: There are other arch-specific methods to find the memory range
// for accessing the PCI configuration space.
@ -47,11 +50,13 @@ UNMAP_AFTER_INIT void initialize()
// parse it to find a PCI host bridge.
if (ACPI::is_enabled()) {
possible_mcfg = ACPI::Parser::the()->find_table("MCFG"sv);
g_pci_access_io_probe_failed = (!test_pci_io()) && (!possible_mcfg.has_value());
if ((!test_pci_io()) && (!possible_mcfg.has_value()))
g_pci_access_io_probe_failed.set();
} else {
g_pci_access_io_probe_failed = !test_pci_io();
if (!test_pci_io())
g_pci_access_io_probe_failed.set();
}
if (g_pci_access_is_disabled_from_commandline || g_pci_access_io_probe_failed)
if (g_pci_access_is_disabled_from_commandline.was_set() || g_pci_access_io_probe_failed.was_set())
return;
switch (detect_optimal_access_type()) {
case PCIAccessLevel::MemoryAddressing: {

View file

@ -455,8 +455,6 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
}
}
m_has_qemu_hvf_quirk = false;
if (max_extended_leaf >= 0x80000008) {
// CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor.
CPUID cpuid(0x80000008);
@ -478,7 +476,7 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
if (has_feature(CPUFeature::HYPERVISOR)) {
CPUID hypervisor_leaf_range(0x40000000);
if (!hypervisor_leaf_range.ebx() && m_physical_address_bit_width == 36) {
m_has_qemu_hvf_quirk = true;
m_has_qemu_hvf_quirk.set();
m_virtual_address_bit_width = 48;
}
}
@ -602,7 +600,6 @@ UNMAP_AFTER_INIT void ProcessorBase<T>::early_initialize(u32 cpu)
m_in_critical = 0;
m_invoke_scheduler_async = false;
m_scheduler_initialized = false;
m_in_scheduler = true;
self->m_message_queue = nullptr;
@ -642,7 +639,7 @@ UNMAP_AFTER_INIT void ProcessorBase<T>::initialize(u32 cpu)
dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", current_id());
dmesgln("CPU[{}]: Physical address bit width: {}", current_id(), m_physical_address_bit_width);
dmesgln("CPU[{}]: Virtual address bit width: {}", current_id(), m_virtual_address_bit_width);
if (self->m_has_qemu_hvf_quirk)
if (self->m_has_qemu_hvf_quirk.was_set())
dmesgln("CPU[{}]: Applied correction for QEMU Hypervisor.framework quirk", current_id());
if (cpu == 0)
@ -1688,7 +1685,7 @@ UNMAP_AFTER_INIT void ProcessorBase<T>::initialize_context_switching(Thread& ini
self->m_tss.rsp0l = regs.rsp0 & 0xffffffff;
self->m_tss.rsp0h = regs.rsp0 >> 32;
m_scheduler_initialized = true;
m_scheduler_initialized.set();
// clang-format off
asm volatile(

View file

@ -9,6 +9,7 @@
#include <AK/Array.h>
#include <AK/Concepts.h>
#include <AK/Function.h>
#include <AK/SetOnce.h>
#include <AK/Types.h>
#include <Kernel/Arch/DeferredCallEntry.h>
@ -72,7 +73,7 @@ private:
static Atomic<u32> s_idle_cpu_mask;
TSS m_tss;
bool m_has_qemu_hvf_quirk;
SetOnce m_has_qemu_hvf_quirk;
ProcessorInfo* m_info;

View file

@ -28,7 +28,7 @@ void VGAIOArbiter::disable_vga_emulation_access_permanently(Badge<GraphicsManage
u8 sr1 = IO::in8(0x3c5);
IO::out8(0x3c5, sr1 | 1 << 5);
microseconds_delay(1000);
m_vga_access_is_disabled = true;
m_vga_access_is_disabled.set();
}
void VGAIOArbiter::enable_vga_text_mode_console_cursor(Badge<GraphicsManagement>)
@ -39,7 +39,7 @@ void VGAIOArbiter::enable_vga_text_mode_console_cursor(Badge<GraphicsManagement>
void VGAIOArbiter::enable_vga_text_mode_console_cursor()
{
SpinlockLocker locker(m_main_vga_lock);
if (m_vga_access_is_disabled)
if (m_vga_access_is_disabled.was_set())
return;
IO::out8(0x3D4, 0xA);
IO::out8(0x3D5, 0);
@ -53,7 +53,7 @@ void VGAIOArbiter::disable_vga_text_mode_console_cursor(Badge<GraphicsManagement
void VGAIOArbiter::disable_vga_text_mode_console_cursor()
{
SpinlockLocker locker(m_main_vga_lock);
if (m_vga_access_is_disabled)
if (m_vga_access_is_disabled.was_set())
return;
IO::out8(0x3D4, 0xA);
IO::out8(0x3D5, 0x20);
@ -62,7 +62,7 @@ void VGAIOArbiter::disable_vga_text_mode_console_cursor()
void VGAIOArbiter::unblank_screen(Badge<GraphicsManagement>)
{
SpinlockLocker locker(m_main_vga_lock);
if (m_vga_access_is_disabled)
if (m_vga_access_is_disabled.was_set())
return;
IO::out8(0x3c0, 0x20);
}
@ -70,7 +70,7 @@ void VGAIOArbiter::unblank_screen(Badge<GraphicsManagement>)
void VGAIOArbiter::set_vga_text_mode_cursor(Badge<GraphicsManagement>, size_t console_width, size_t x, size_t y)
{
SpinlockLocker locker(m_main_vga_lock);
if (m_vga_access_is_disabled)
if (m_vga_access_is_disabled.was_set())
return;
enable_vga_text_mode_console_cursor();
u16 value = y * console_width + x;

View file

@ -8,6 +8,7 @@
#include <AK/NonnullOwnPtr.h>
#include <AK/Platform.h>
#include <AK/SetOnce.h>
#include <AK/Types.h>
#include <Kernel/Locking/Spinlock.h>
@ -34,7 +35,7 @@ private:
void enable_vga_text_mode_console_cursor();
RecursiveSpinlock<LockRank::None> m_main_vga_lock {};
bool m_vga_access_is_disabled { false };
SetOnce m_vga_access_is_disabled;
};
}

View file

@ -41,12 +41,12 @@ bool Access::is_initialized()
bool Access::is_hardware_disabled()
{
return g_pci_access_io_probe_failed;
return g_pci_access_io_probe_failed.was_set();
}
bool Access::is_disabled()
{
return g_pci_access_is_disabled_from_commandline || g_pci_access_io_probe_failed;
return g_pci_access_is_disabled_from_commandline.was_set() || g_pci_access_io_probe_failed.was_set();
}
UNMAP_AFTER_INIT bool Access::find_and_register_pci_host_bridges_from_acpi_mcfg_table(PhysicalAddress mcfg_table)

View file

@ -6,10 +6,12 @@
#pragma once
#include <AK/SetOnce.h>
namespace Kernel::PCI {
extern bool g_pci_access_io_probe_failed;
extern bool g_pci_access_is_disabled_from_commandline;
extern SetOnce g_pci_access_io_probe_failed;
extern SetOnce g_pci_access_is_disabled_from_commandline;
void initialize();

View file

@ -40,8 +40,8 @@ void Device::set_status_bit(u8 status_bit)
ErrorOr<void> Device::accept_device_features(u64 device_features, u64 accepted_features)
{
VERIFY(!m_did_accept_features);
m_did_accept_features = true;
VERIFY(!m_did_accept_features.was_set());
m_did_accept_features.set();
if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver
@ -89,8 +89,8 @@ ErrorOr<void> Device::setup_queue(u16 queue_index)
ErrorOr<void> Device::setup_queues(u16 requested_queue_count)
{
VERIFY(!m_did_setup_queues);
m_did_setup_queues = true;
VERIFY(!m_did_setup_queues.was_set());
m_did_setup_queues.set();
auto* common_cfg = TRY(m_transport_entity->get_config(ConfigurationType::Common));
if (common_cfg) {
@ -120,8 +120,8 @@ ErrorOr<void> Device::setup_queues(u16 requested_queue_count)
void Device::finish_init()
{
VERIFY(m_did_accept_features); // ensure features were negotiated
VERIFY(m_did_setup_queues); // ensure queues were set-up
VERIFY(m_did_accept_features.was_set()); // ensure features were negotiated
VERIFY(m_did_setup_queues.was_set()); // ensure queues were set-up
VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization
set_status_bit(DEVICE_STATUS_DRIVER_OK);

View file

@ -6,6 +6,7 @@
#pragma once
#include <AK/SetOnce.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Bus/VirtIO/Definitions.h>
@ -66,7 +67,7 @@ protected:
}
bool is_feature_accepted(u64 feature) const
{
VERIFY(m_did_accept_features);
VERIFY(m_did_accept_features.was_set());
return is_feature_set(m_accepted_features, feature);
}
@ -91,8 +92,8 @@ private:
u16 m_queue_count { 0 };
u8 m_status { 0 };
u64 m_accepted_features { 0 };
bool m_did_accept_features { false };
bool m_did_setup_queues { false };
SetOnce m_did_accept_features;
SetOnce m_did_setup_queues;
NonnullOwnPtr<TransportEntity> const m_transport_entity;
};

View file

@ -10,7 +10,7 @@ namespace Kernel::VirtIO {
auto TransportEntity::mapping_for_resource_index(u8 resource_index) -> IOWindow&
{
VERIFY(m_use_mmio);
VERIFY(m_use_mmio.was_set());
VERIFY(m_register_bases[resource_index]);
return *m_register_bases[resource_index];
}

View file

@ -6,6 +6,7 @@
#pragma once
#include <AK/SetOnce.h>
#include <AK/Types.h>
#include <Kernel/Bus/VirtIO/Definitions.h>
#include <Kernel/Bus/VirtIO/Queue.h>
@ -90,7 +91,7 @@ protected:
IOWindow& base_io_window();
Array<OwnPtr<IOWindow>, 6> m_register_bases;
bool m_use_mmio { false };
SetOnce m_use_mmio;
u32 m_notify_multiplier { 0 };
};

View file

@ -123,7 +123,7 @@ ErrorOr<void> PCIeTransportLink::locate_configurations_and_resources(Badge<VirtI
}
dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, resource: {}, offset: {}, length: {}", device_name(), (u32)config.cfg_type, config.resource_index, config.offset, config.length);
if (config.cfg_type == ConfigurationType::Common)
m_use_mmio = true;
m_use_mmio.set();
else if (config.cfg_type == ConfigurationType::Notify)
m_notify_multiplier = capability.read32(0x10);
@ -131,7 +131,7 @@ ErrorOr<void> PCIeTransportLink::locate_configurations_and_resources(Badge<VirtI
}
}
if (m_use_mmio) {
if (m_use_mmio.was_set()) {
for (auto& cfg : m_configs) {
auto mapping_io_window = TRY(IOWindow::create_for_pci_device_bar(device_identifier(), static_cast<PCI::HeaderType0BaseRegister>(cfg.resource_index)));
m_register_bases[cfg.resource_index] = move(mapping_io_window);

View file

@ -452,7 +452,7 @@ static void* kmalloc_impl(size_t size, size_t alignment, CallerWillInitializeMem
SpinlockLocker lock(s_lock);
++g_kmalloc_call_count;
if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available.was_set()) {
dbgln("kmalloc({})", size);
Kernel::dump_backtrace();
}

View file

@ -17,7 +17,7 @@ namespace Kernel {
FlatPtr g_lowest_kernel_symbol_address = 0xffffffff;
FlatPtr g_highest_kernel_symbol_address = 0;
bool g_kernel_symbols_available = false;
SetOnce g_kernel_symbols_available;
extern "C" {
__attribute__((section(".kernel_symbols"))) char kernel_symbols[5 * MiB] {};
@ -107,7 +107,7 @@ UNMAP_AFTER_INIT static void load_kernel_symbols_from_data(Bytes buffer)
++bufptr;
++current_symbol_index;
}
g_kernel_symbols_available = true;
g_kernel_symbols_available.set();
}
NEVER_INLINE static void dump_backtrace_impl(FlatPtr frame_pointer, bool use_ksyms, PrintToScreen print_to_screen)
@ -121,7 +121,7 @@ NEVER_INLINE static void dump_backtrace_impl(FlatPtr frame_pointer, bool use_ksy
} while (0)
SmapDisabler disabler;
if (use_ksyms && !g_kernel_symbols_available)
if (use_ksyms && !g_kernel_symbols_available.was_set())
Processor::halt();
struct RecognizedSymbol {
@ -235,7 +235,7 @@ void dump_backtrace(PrintToScreen print_to_screen)
TemporaryChange disable_kmalloc_stacks(g_dump_kmalloc_stacks, false);
FlatPtr base_pointer = (FlatPtr)__builtin_frame_address(0);
dump_backtrace_impl(base_pointer, g_kernel_symbols_available, print_to_screen);
dump_backtrace_impl(base_pointer, g_kernel_symbols_available.was_set(), print_to_screen);
}
UNMAP_AFTER_INIT void load_kernel_symbol_table()

View file

@ -7,6 +7,7 @@
#pragma once
#include <AK/Forward.h>
#include <AK/SetOnce.h>
namespace Kernel {
@ -24,7 +25,7 @@ FlatPtr address_for_kernel_symbol(StringView name);
KernelSymbol const* symbolicate_kernel_address(FlatPtr);
void load_kernel_symbol_table();
extern bool g_kernel_symbols_available;
extern SetOnce g_kernel_symbols_available;
extern FlatPtr g_lowest_kernel_symbol_address;
extern FlatPtr g_highest_kernel_symbol_address;

View file

@ -5,10 +5,11 @@
*/
#include <AK/Format.h>
#include <AK/SetOnce.h>
#include <AK/StringBuilder.h>
#include <Kernel/Library/KString.h>
extern bool g_in_early_boot;
extern SetOnce g_not_in_early_boot;
namespace Kernel {
@ -33,7 +34,7 @@ ErrorOr<NonnullOwnPtr<KString>> KString::vformatted(StringView fmtstr, AK::TypeE
NonnullOwnPtr<KString> KString::must_create(StringView string)
{
// We can only enforce success during early boot.
VERIFY(g_in_early_boot);
VERIFY(!g_not_in_early_boot.was_set());
return KString::try_create(string).release_value();
}
@ -51,7 +52,7 @@ ErrorOr<NonnullOwnPtr<KString>> KString::try_create_uninitialized(size_t length,
NonnullOwnPtr<KString> KString::must_create_uninitialized(size_t length, char*& characters)
{
// We can only enforce success during early boot.
VERIFY(g_in_early_boot);
VERIFY(!g_not_in_early_boot.was_set());
return KString::try_create_uninitialized(length, characters).release_value();
}

View file

@ -5,6 +5,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/SetOnce.h>
#include <Kernel/Debug.h>
#include <Kernel/KSyms.h>
#include <Kernel/Locking/LockLocation.h>
@ -12,7 +13,7 @@
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Tasks/Thread.h>
extern bool g_in_early_boot;
extern SetOnce g_not_in_early_boot;
namespace Kernel {
@ -23,7 +24,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
VERIFY(!Processor::current_in_irq());
if constexpr (LOCK_IN_CRITICAL_DEBUG) {
// There are no interrupts enabled in early boot.
if (!g_in_early_boot)
if (g_not_in_early_boot.was_set())
VERIFY_INTERRUPTS_ENABLED();
}
VERIFY(mode != Mode::Unlocked);
@ -151,7 +152,7 @@ void Mutex::unlock()
VERIFY(!Processor::current_in_irq());
if constexpr (LOCK_IN_CRITICAL_DEBUG) {
// There are no interrupts enabled in early boot.
if (!g_in_early_boot)
if (g_not_in_early_boot.was_set())
VERIFY_INTERRUPTS_ENABLED();
}
auto* current_thread = Thread::current();
@ -211,7 +212,7 @@ void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock<Loc
{
if constexpr (LOCK_IN_CRITICAL_DEBUG) {
// There are no interrupts enabled in early boot.
if (!g_in_early_boot)
if (g_not_in_early_boot.was_set())
VERIFY_INTERRUPTS_ENABLED();
}
m_blocked_thread_lists.with([&](auto& lists) {

View file

@ -10,6 +10,7 @@
#include <AK/EnumBits.h>
#include <AK/IntrusiveList.h>
#include <AK/IntrusiveRedBlackTree.h>
#include <AK/SetOnce.h>
#include <Kernel/Forward.h>
#include <Kernel/Library/KString.h>
#include <Kernel/Library/LockWeakable.h>
@ -89,8 +90,8 @@ public:
[[nodiscard]] bool is_stack() const { return m_stack; }
void set_stack(bool stack) { m_stack = stack; }
[[nodiscard]] bool is_immutable() const { return m_immutable; }
void set_immutable() { m_immutable = true; }
[[nodiscard]] bool is_immutable() const { return m_immutable.was_set(); }
void set_immutable() { m_immutable.set(); }
[[nodiscard]] bool is_mmap() const { return m_mmap; }
@ -243,12 +244,13 @@ private:
bool m_cacheable : 1 { false };
bool m_stack : 1 { false };
bool m_mmap : 1 { false };
bool m_immutable : 1 { false };
bool m_syscall_region : 1 { false };
bool m_write_combine : 1 { false };
bool m_mmapped_from_readable : 1 { false };
bool m_mmapped_from_writable : 1 { false };
SetOnce m_immutable;
IntrusiveRedBlackTreeNode<FlatPtr, Region, RawPtr<Region>> m_tree_node;
IntrusiveListNode<Region> m_vmobject_list_node;

View file

@ -97,19 +97,19 @@ void IPv4Socket::get_peer_address(sockaddr* address, socklen_t* address_size)
ErrorOr<void> IPv4Socket::ensure_bound()
{
dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket::ensure_bound() m_bound {}", m_bound);
if (m_bound)
dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket::ensure_bound() m_bound {}", m_bound.was_set());
if (m_bound.was_set())
return {};
auto result = protocol_bind();
if (!result.is_error())
m_bound = true;
m_bound.set();
return result;
}
ErrorOr<void> IPv4Socket::bind(Credentials const& credentials, Userspace<sockaddr const*> user_address, socklen_t address_size)
{
if (m_bound)
if (m_bound.was_set())
return set_so_error(EINVAL);
VERIFY(setup_state() == SetupState::Unstarted);

View file

@ -7,6 +7,7 @@
#pragma once
#include <AK/HashMap.h>
#include <AK/SetOnce.h>
#include <AK/SinglyLinkedList.h>
#include <Kernel/Library/DoubleBuffer.h>
#include <Kernel/Library/KBuffer.h>
@ -73,7 +74,7 @@ protected:
IPv4Socket(int type, int protocol, NonnullOwnPtr<DoubleBuffer> receive_buffer, OwnPtr<KBuffer> optional_scratch_buffer);
virtual StringView class_name() const override { return "IPv4Socket"sv; }
void set_bound(bool bound) { m_bound = bound; }
void set_bound() { m_bound.set(); }
ErrorOr<void> ensure_bound();
virtual ErrorOr<void> protocol_bind() { return {}; }
@ -107,7 +108,7 @@ private:
Vector<IPv4Address> m_multicast_memberships;
bool m_multicast_loop { true };
bool m_bound { false };
SetOnce m_bound;
struct ReceivedPacket {
IPv4Address peer_address;

View file

@ -221,7 +221,7 @@ UNMAP_AFTER_INIT ErrorOr<void> E1000ENetworkAdapter::initialize(Badge<Networking
dmesgln("E1000e: IO base: {}", m_registers_io_window);
dmesgln("E1000e: Interrupt line: {}", interrupt_number());
detect_eeprom();
dmesgln("E1000e: Has EEPROM? {}", m_has_eeprom);
dmesgln("E1000e: Has EEPROM? {}", m_has_eeprom.was_set());
read_mac_address();
auto const& mac = mac_address();
dmesgln("E1000e: MAC address: {}", mac.to_string());
@ -252,12 +252,13 @@ UNMAP_AFTER_INIT E1000ENetworkAdapter::~E1000ENetworkAdapter() = default;
UNMAP_AFTER_INIT void E1000ENetworkAdapter::detect_eeprom()
{
// Section 13.4.3 of https://www.intel.com/content/dam/doc/manual/pci-pci-x-family-gbe-controllers-software-dev-manual.pdf
m_has_eeprom = in32(REG_EECD) & EECD_PRES;
if (in32(REG_EECD) & EECD_PRES)
m_has_eeprom.set();
}
UNMAP_AFTER_INIT u32 E1000ENetworkAdapter::read_eeprom(u8 address)
{
VERIFY(m_has_eeprom);
VERIFY(m_has_eeprom.was_set());
u16 data = 0;
u32 tmp = 0;
out32(REG_EEPROM, ((u32)address << 2) | 1);

View file

@ -195,7 +195,7 @@ UNMAP_AFTER_INIT ErrorOr<void> E1000NetworkAdapter::initialize(Badge<NetworkingM
dmesgln_pci(*this, "IO base: {}", m_registers_io_window);
dmesgln_pci(*this, "Interrupt line: {}", interrupt_number());
detect_eeprom();
dmesgln_pci(*this, "Has EEPROM? {}", m_has_eeprom);
dmesgln_pci(*this, "Has EEPROM? {}", m_has_eeprom.was_set());
read_mac_address();
auto const& mac = mac_address();
dmesgln_pci(*this, "MAC address: {}", mac.to_string());
@ -280,18 +280,17 @@ UNMAP_AFTER_INIT void E1000NetworkAdapter::detect_eeprom()
for (int i = 0; i < 999; ++i) {
u32 data = in32(REG_EEPROM);
if (data & 0x10) {
m_has_eeprom = true;
m_has_eeprom.set();
return;
}
}
m_has_eeprom = false;
}
UNMAP_AFTER_INIT u32 E1000NetworkAdapter::read_eeprom(u8 address)
{
u16 data = 0;
u32 tmp = 0;
if (m_has_eeprom) {
if (m_has_eeprom.was_set()) {
out32(REG_EEPROM, ((u32)address << 8) | 1);
while (!((tmp = in32(REG_EEPROM)) & (1 << 4)))
Processor::wait_check();
@ -306,7 +305,7 @@ UNMAP_AFTER_INIT u32 E1000NetworkAdapter::read_eeprom(u8 address)
UNMAP_AFTER_INIT void E1000NetworkAdapter::read_mac_address()
{
if (m_has_eeprom) {
if (m_has_eeprom.was_set()) {
MACAddress mac {};
u32 tmp = read_eeprom(0);
mac[0] = tmp & 0xff;

View file

@ -7,6 +7,7 @@
#pragma once
#include <AK/OwnPtr.h>
#include <AK/SetOnce.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Interrupts/IRQHandler.h>
@ -96,7 +97,7 @@ protected:
NonnullOwnPtr<Memory::Region> m_tx_buffer_region;
Array<void*, number_of_rx_descriptors> m_rx_buffers;
Array<void*, number_of_tx_descriptors> m_tx_buffers;
bool m_has_eeprom { false };
SetOnce m_has_eeprom;
bool m_link_up { false };
EntropySource m_entropy_source;

View file

@ -156,13 +156,13 @@ ErrorOr<void> LocalSocket::bind(Credentials const& credentials, Userspace<sockad
m_inode = inode;
m_path = move(path);
m_bound = true;
m_bound.set();
return {};
}
ErrorOr<void> LocalSocket::connect(Credentials const& credentials, OpenFileDescription& description, Userspace<sockaddr const*> user_address, socklen_t address_size)
{
if (m_bound)
if (m_bound.was_set())
return set_so_error(EISCONN);
if (address_size > sizeof(sockaddr_un))
@ -260,7 +260,7 @@ void LocalSocket::detach(OpenFileDescription& description)
VERIFY(m_accept_side_fd_open);
m_accept_side_fd_open = false;
if (m_bound) {
if (m_bound.was_set()) {
if (m_inode)
m_inode->unbind_socket();
}

View file

@ -7,6 +7,7 @@
#pragma once
#include <AK/IntrusiveList.h>
#include <AK/SetOnce.h>
#include <Kernel/Library/DoubleBuffer.h>
#include <Kernel/Net/Socket.h>
@ -94,7 +95,7 @@ private:
return m_role;
}
bool m_bound { false };
SetOnce m_bound;
bool m_accept_side_fd_open { false };
OwnPtr<KString> m_path;

View file

@ -157,7 +157,7 @@ ErrorOr<NonnullRefPtr<TCPSocket>> TCPSocket::try_create_client(IPv4Address const
client->set_local_port(new_local_port);
client->set_peer_address(new_peer_address);
client->set_peer_port(new_peer_port);
client->set_bound(true);
client->set_bound();
client->set_direction(Direction::Incoming);
client->set_originator(*this);

View file

@ -5,12 +5,13 @@
*/
#include <AK/Platform.h>
#include <AK/SetOnce.h>
#include <AK/TemporaryChange.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Devices/KCOVDevice.h>
#include <Kernel/Library/Panic.h>
extern bool g_in_early_boot;
extern SetOnce g_not_in_early_boot;
#ifdef ENABLE_KERNEL_COVERAGE_COLLECTION_DEBUG
// Set kcov_emergency_off=true before making calls from __sanitizer_cov_trace_pc to coverage
@ -36,7 +37,7 @@ static void crash_and_burn(Thread* thread)
extern "C" void __sanitizer_cov_trace_pc(void);
extern "C" void __sanitizer_cov_trace_pc(void)
{
if (g_in_early_boot) [[unlikely]]
if (!g_not_in_early_boot.was_set()) [[unlikely]]
return;
auto* thread = Processor::current_thread();

View file

@ -519,7 +519,7 @@ void Process::crash(int signal, Optional<RegisterState const&> regs, bool out_of
if (out_of_memory) {
dbgln("\033[31;1mOut of memory\033[m, killing: {}", *this);
} else {
if (ip >= kernel_load_base && g_kernel_symbols_available) {
if (ip >= kernel_load_base && g_kernel_symbols_available.was_set()) {
auto const* symbol = symbolicate_kernel_address(ip);
dbgln("\033[31;1m{:p} {} +{}\033[0m\n", ip, (symbol ? symbol->name : "(k?)"), (symbol ? ip - symbol->address : 0));
} else {

View file

@ -118,7 +118,7 @@ MonotonicTime TimeManagement::monotonic_time(TimePrecision precision) const
u64 seconds;
u32 ticks;
bool do_query = precision == TimePrecision::Precise && m_can_query_precise_time;
bool do_query = precision == TimePrecision::Precise && m_can_query_precise_time.was_set();
u32 update_iteration;
do {
@ -380,7 +380,7 @@ UNMAP_AFTER_INIT bool TimeManagement::probe_and_set_x86_non_legacy_hardware_time
// Use the HPET main counter frequency for time purposes. This is likely
// a much higher frequency than the interrupt itself and allows us to
// keep a more accurate time
m_can_query_precise_time = true;
m_can_query_precise_time.set();
m_time_ticks_per_second = HPET::the().frequency();
m_system_timer->try_to_set_frequency(m_system_timer->calculate_nearest_possible_frequency(OPTIMAL_TICKS_PER_SECOND_RATE));

View file

@ -9,6 +9,7 @@
#include <AK/Error.h>
#include <AK/OwnPtr.h>
#include <AK/Platform.h>
#include <AK/SetOnce.h>
#include <AK/Time.h>
#include <AK/Types.h>
#include <AK/Vector.h>
@ -72,7 +73,7 @@ public:
// FIXME: Most likely broken, because it does not check m_update[12] for in-progress updates.
void set_remaining_epoch_time_adjustment(Duration adjustment) { m_remaining_epoch_time_adjustment = adjustment; }
bool can_query_precise_time() const { return m_can_query_precise_time; }
bool can_query_precise_time() const { return m_can_query_precise_time.was_set(); }
Memory::VMObject& time_page_vmobject();
@ -108,7 +109,7 @@ private:
Atomic<u32> m_update2 { 0 };
u32 m_time_ticks_per_second { 0 }; // may be different from interrupts/second (e.g. hpet)
bool m_can_query_precise_time { false };
SetOnce m_can_query_precise_time;
bool m_updating_time { false }; // may only be accessed from the BSP!
LockRefPtr<HardwareTimerBase> m_system_timer;