Kernel: Configure PCI interrupt routing based on the FDT

This commit is contained in:
Idan Horowitz 2024-04-23 20:23:55 +03:00 committed by Andrew Kaster
parent 7102d90b2b
commit 002bba4a97
5 changed files with 95 additions and 39 deletions

View file

@ -54,10 +54,12 @@ void initialize()
[[maybe_unused]] auto soc_size_cells = soc.get_property("#size-cells"sv).value().as<u32>();
Optional<u32> domain_counter;
Optional<FlatPtr> pci_32bit_mmio_base;
FlatPtr pci_32bit_mmio_base = 0;
u32 pci_32bit_mmio_size = 0;
Optional<FlatPtr> pci_64bit_mmio_base;
FlatPtr pci_64bit_mmio_base = 0;
u64 pci_64bit_mmio_size = 0;
HashMap<u32, u64> masked_interrupt_mapping;
u32 interrupt_mask = 0;
for (auto const& entry : soc.children()) {
if (!entry.key.starts_with("pci"sv))
continue;
@ -182,24 +184,62 @@ void initialize()
auto prefetchable = (pci_address_metadata >> OpenFirmwareAddress::prefetchable_offset) & OpenFirmwareAddress::prefetchable_mask;
if (prefetchable)
continue; // We currently only use non-prefetchable 32-bit regions, since 64-bit regions are always prefetchable - TODO: Use 32-bit prefetchable regions if only they are available
if (pci_32bit_mmio_base.has_value() && pci_32bit_mmio_size >= mmio_size)
if (pci_32bit_mmio_size >= mmio_size)
continue; // We currently only use the single largest region - TODO: Use all available regions if needed
pci_32bit_mmio_base = mmio_address;
pci_32bit_mmio_size = mmio_size;
} else {
if (pci_64bit_mmio_base.has_value() && pci_64bit_mmio_size >= mmio_size)
if (pci_64bit_mmio_size >= mmio_size)
continue; // We currently only use the single largest region - TODO: Use all available regions if needed
pci_64bit_mmio_base = mmio_address;
pci_64bit_mmio_size = mmio_size;
}
}
}
auto maybe_interrupt_map = node.get_property("interrupt-map"sv);
auto maybe_interrupt_map_mask = node.get_property("interrupt-map-mask"sv);
if (maybe_interrupt_map.has_value() && maybe_interrupt_map_mask.has_value()) {
auto mask_stream = maybe_interrupt_map_mask.value().as_stream();
u32 metadata_mask = MUST(mask_stream.read_value<BigEndian<u32>>());
MUST(mask_stream.discard(sizeof(u32) * 2));
VERIFY(node.get_property("#interrupt-cells"sv)->as<u32>() == 1); // PCI interrupt pin should always fit in one word
u32 pin_mask = MUST(mask_stream.read_value<BigEndian<u32>>());
interrupt_mask = ((metadata_mask >> 8) << 8) | pin_mask;
auto map_stream = maybe_interrupt_map.value().as_stream();
while (!map_stream.is_eof()) {
u32 pci_address_metadata = MUST(map_stream.read_value<BigEndian<u32>>());
MUST(map_stream.discard(sizeof(u32) * 2));
u32 pin = MUST(map_stream.read_value<BigEndian<u32>>());
u32 interrupt_controller_phandle = MUST(map_stream.read_value<BigEndian<u32>>());
auto* interrupt_controller = device_tree.phandle(interrupt_controller_phandle);
VERIFY(interrupt_controller);
auto interrupt_cells = interrupt_controller->get_property("#interrupt-cells"sv)->as<u32>();
VERIFY(interrupt_cells == 1 || interrupt_cells == 2);
u64 interrupt;
if (interrupt_cells == 1)
interrupt = MUST(map_stream.read_value<BigEndian<u32>>());
else
interrupt = MUST(map_stream.read_value<BigEndian<u64>>());
auto masked_specifier = (((pci_address_metadata >> 8) << 8) | pin) & interrupt_mask;
masked_interrupt_mapping.set(masked_specifier, interrupt);
}
}
}
if (pci_32bit_mmio_base.has_value() || pci_64bit_mmio_base.has_value())
Access::the().configure_pci_space(pci_32bit_mmio_base.value_or(0), pci_32bit_mmio_size, pci_64bit_mmio_base.value_or(0), pci_64bit_mmio_size);
else
if (pci_32bit_mmio_size != 0 || pci_64bit_mmio_size != 0) {
PCIConfiguration config {
pci_32bit_mmio_base,
pci_32bit_mmio_base + pci_32bit_mmio_size,
pci_64bit_mmio_base,
pci_64bit_mmio_base + pci_64bit_mmio_size,
move(masked_interrupt_mapping),
interrupt_mask,
};
Access::the().configure_pci_space(config);
} else {
dmesgln("PCI: No MMIO ranges found - assuming pre-configured by bootloader");
}
Access::the().rescan_hardware();
PCIBusSysFSDirectory::initialize();

View file

@ -154,14 +154,12 @@ UNMAP_AFTER_INIT Access::Access()
s_access = this;
}
UNMAP_AFTER_INIT void Access::configure_pci_space(FlatPtr mmio_32bit_base, u32 mmio_32bit_size, FlatPtr mmio_64bit_base, u64 mmio_64bit_size)
UNMAP_AFTER_INIT void Access::configure_pci_space(PCIConfiguration& config)
{
SpinlockLocker locker(m_access_lock);
SpinlockLocker scan_locker(m_scan_lock);
FlatPtr mmio_32bit_end = mmio_32bit_base + mmio_32bit_size;
FlatPtr mmio_64bit_end = mmio_64bit_base + mmio_64bit_size;
for (auto& [_, host_controller] : m_host_controllers)
host_controller->configure_attached_devices(mmio_32bit_base, mmio_32bit_end, mmio_64bit_base, mmio_64bit_end);
host_controller->configure_attached_devices(config);
}
UNMAP_AFTER_INIT void Access::rescan_hardware()

View file

@ -27,7 +27,7 @@ public:
#endif
ErrorOr<void> fast_enumerate(Function<void(DeviceIdentifier const&)>&) const;
void configure_pci_space(FlatPtr mmio_32bit_base, u32 mmio_32bit_size, FlatPtr mmio_64bit_base, u64 mmio_64bit_size);
void configure_pci_space(PCIConfiguration&);
void rescan_hardware();
static Access& the();

View file

@ -154,7 +154,7 @@ UNMAP_AFTER_INIT void HostController::enumerate_attached_devices(Function<void(E
}
}
void HostController::configure_attached_devices(FlatPtr& mmio_32bit_base, FlatPtr mmio_32bit_end, FlatPtr& mmio_64bit_base, FlatPtr mmio_64bit_end)
void HostController::configure_attached_devices(PCIConfiguration& config)
{
// First, Assign PCI-to-PCI bridge bus numbering
u8 bus_id = 0;
@ -171,9 +171,9 @@ void HostController::configure_attached_devices(FlatPtr& mmio_32bit_base, FlatPt
write8_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::SUBORDINATE_BUS, bus_id);
});
// Second, Assign BAR addresses
// Second, Assign BAR addresses & Interrupt numbers
// TODO: We currently naively assign addresses bump-allocator style - Switch to a proper allocator if this is not good enough
enumerate_attached_devices([this, &mmio_32bit_base, mmio_32bit_end, &mmio_64bit_base, mmio_64bit_end](EnumerableDeviceIdentifier const& device_identifier) {
enumerate_attached_devices([this, &config](EnumerableDeviceIdentifier const& device_identifier) {
// device-generic handling
auto header_type = read8_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::HEADER_TYPE);
auto const max_bar = (header_type == 0) ? RegisterOffset::BAR5 : RegisterOffset::BAR1;
@ -191,16 +191,16 @@ void HostController::configure_attached_devices(FlatPtr& mmio_32bit_base, FlatPt
bar_size = (~bar_size) + 1;
if (bar_size == 0)
continue;
auto mmio_32bit_address = align_up_to(mmio_32bit_base, bar_size);
if (mmio_32bit_address + bar_size <= mmio_32bit_end) {
auto mmio_32bit_address = align_up_to(config.mmio_32bit_base, bar_size);
if (mmio_32bit_address + bar_size <= config.mmio_32bit_end) {
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), bar_offset, mmio_32bit_address);
mmio_32bit_base = mmio_32bit_address + bar_size;
config.mmio_32bit_base = mmio_32bit_address + bar_size;
continue;
}
auto mmio_64bit_address = align_up_to(mmio_64bit_base, bar_size);
if (bar_prefetchable && mmio_64bit_address + bar_size <= mmio_64bit_end && mmio_64bit_address + bar_size <= NumericLimits<u32>::max()) {
auto mmio_64bit_address = align_up_to(config.mmio_64bit_base, bar_size);
if (bar_prefetchable && mmio_64bit_address + bar_size <= config.mmio_64bit_end && mmio_64bit_address + bar_size <= NumericLimits<u32>::max()) {
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), bar_offset, mmio_64bit_address);
mmio_64bit_base = mmio_64bit_address + bar_size;
config.mmio_64bit_base = mmio_64bit_address + bar_size;
continue;
}
dmesgln("PCI: Ran out of 32-bit MMIO address space");
@ -220,19 +220,19 @@ void HostController::configure_attached_devices(FlatPtr& mmio_32bit_base, FlatPt
bar_offset += 4;
continue;
}
auto mmio_64bit_address = align_up_to(mmio_64bit_base, bar_size);
if (bar_prefetchable && mmio_64bit_address + bar_size <= mmio_64bit_end) {
auto mmio_64bit_address = align_up_to(config.mmio_64bit_base, bar_size);
if (bar_prefetchable && mmio_64bit_address + bar_size <= config.mmio_64bit_end) {
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), bar_offset, mmio_64bit_address & 0xFFFFFFFF);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), bar_offset + 4, mmio_64bit_address >> 32);
mmio_64bit_base = mmio_64bit_address + bar_size;
config.mmio_64bit_base = mmio_64bit_address + bar_size;
bar_offset += 4;
continue;
}
auto mmio_32bit_address = align_up_to(mmio_32bit_base, bar_size);
if (mmio_32bit_address + bar_size <= mmio_32bit_end) {
auto mmio_32bit_address = align_up_to(config.mmio_32bit_base, bar_size);
if (mmio_32bit_address + bar_size <= config.mmio_32bit_end) {
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), bar_offset, mmio_32bit_address & 0xFFFFFFFF);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), bar_offset + 4, mmio_32bit_address >> 32);
mmio_32bit_base = mmio_32bit_address + bar_size;
config.mmio_32bit_base = mmio_32bit_address + bar_size;
bar_offset += 4;
continue;
}
@ -243,24 +243,30 @@ void HostController::configure_attached_devices(FlatPtr& mmio_32bit_base, FlatPt
auto command_value = read16_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::COMMAND);
command_value |= 1 << 1; // memory space enable
write16_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::COMMAND, command_value);
// assign interrupt number
auto interrupt_pin = read8_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::INTERRUPT_PIN);
auto masked_identifier = (((u32)device_identifier.address().bus() << 16) | ((u32)device_identifier.address().device() << 11) | ((u32)device_identifier.address().function() << 8) | interrupt_pin) & config.interrupt_mask;
auto interrupt_number = config.masked_interrupt_mapping.get(masked_identifier);
if (interrupt_number.has_value())
write8_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::INTERRUPT_LINE, interrupt_number.value());
if (read8_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::CLASS) != PCI::ClassID::Bridge)
return;
if (read8_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::SUBCLASS) != PCI::Bridge::SubclassID::PCI_TO_PCI)
return;
// bridge-specific handling
mmio_32bit_base = align_up_to(mmio_32bit_base, MiB);
mmio_64bit_base = align_up_to(mmio_64bit_base, MiB);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::MEMORY_BASE, mmio_32bit_base >> 16);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::PREFETCHABLE_MEMORY_BASE, mmio_64bit_base >> 16);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::PREFETCHABLE_MEMORY_BASE_UPPER_32_BITS, mmio_64bit_base >> 32); },
[this, &mmio_32bit_base, &mmio_64bit_base](EnumerableDeviceIdentifier const& device_identifier) {
config.mmio_32bit_base = align_up_to(config.mmio_32bit_base, MiB);
config.mmio_64bit_base = align_up_to(config.mmio_64bit_base, MiB);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::MEMORY_BASE, config.mmio_32bit_base >> 16);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::PREFETCHABLE_MEMORY_BASE, config.mmio_64bit_base >> 16);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::PREFETCHABLE_MEMORY_BASE_UPPER_32_BITS, config.mmio_64bit_base >> 32); },
[this, &config](EnumerableDeviceIdentifier const& device_identifier) {
// called after a bridge was recursively enumerated
mmio_32bit_base = align_up_to(mmio_32bit_base, MiB);
mmio_64bit_base = align_up_to(mmio_64bit_base, MiB);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::MEMORY_LIMIT, mmio_32bit_base >> 16);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::PREFETCHABLE_MEMORY_LIMIT, mmio_64bit_base >> 16);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::PREFETCHABLE_MEMORY_LIMIT_UPPER_32_BITS, mmio_64bit_base >> 32);
config.mmio_32bit_base = align_up_to(config.mmio_32bit_base, MiB);
config.mmio_64bit_base = align_up_to(config.mmio_64bit_base, MiB);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::MEMORY_LIMIT, config.mmio_32bit_base >> 16);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::PREFETCHABLE_MEMORY_LIMIT, config.mmio_64bit_base >> 16);
write32_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::PREFETCHABLE_MEMORY_LIMIT_UPPER_32_BITS, config.mmio_64bit_base >> 32);
// enable bridging
auto command_value = read16_field(device_identifier.address().bus(), device_identifier.address().device(), device_identifier.address().function(), PCI::RegisterOffset::COMMAND);
command_value |= 1 << 2; // enable forwarding of requests by the bridge

View file

@ -7,6 +7,7 @@
#pragma once
#include <AK/Bitmap.h>
#include <AK/HashMap.h>
#include <AK/Vector.h>
#include <Kernel/Bus/PCI/Definitions.h>
#include <Kernel/Locking/Spinlock.h>
@ -17,6 +18,17 @@ AK_TYPEDEF_DISTINCT_ORDERED_ID(u8, BusNumber);
AK_TYPEDEF_DISTINCT_ORDERED_ID(u8, DeviceNumber);
AK_TYPEDEF_DISTINCT_ORDERED_ID(u8, FunctionNumber);
struct PCIConfiguration {
FlatPtr mmio_32bit_base { 0 };
FlatPtr mmio_32bit_end { 0 };
FlatPtr mmio_64bit_base { 0 };
FlatPtr mmio_64bit_end { 0 };
// The keys contains the bus, device & function at the same offsets as OpenFirmware PCI addresses,
// with the least significant 8 bits being the interrupt pin.
HashMap<u32, u64> masked_interrupt_mapping;
u32 interrupt_mask { 0 };
};
class HostController {
public:
virtual ~HostController() = default;
@ -32,7 +44,7 @@ public:
u32 domain_number() const { return m_domain.domain_number(); }
void enumerate_attached_devices(Function<void(EnumerableDeviceIdentifier const&)> callback, Function<void(EnumerableDeviceIdentifier const&)> post_bridge_callback = nullptr);
void configure_attached_devices(FlatPtr& mmio_32bit_base, FlatPtr mmio_32bit_end, FlatPtr& mmio_64bit_base, FlatPtr mmio_64bit_end);
void configure_attached_devices(PCIConfiguration&);
private:
void enumerate_bus(Function<void(EnumerableDeviceIdentifier const&)> const& callback, Function<void(EnumerableDeviceIdentifier const&)>& post_bridge_callback, BusNumber, bool recursive_search_into_bridges);