Kernel/VirtIO: Introduce the concept of transport options

The VirtIO specification defines many types of devices with different
purposes, and it also defines 3 possible transport mediums where devices
could be connected to the host machine.

We only care about the PCIe transport, but this commit puts the actual
foundations for supporting the lean MMIO transport too in the future.

To ensure things are kept abstracted but still functional, the VirtIO
transport code is responsible for what is deemed as related to an actual
transport type - allocation of interrupt handlers and tinkering with low
level transport-related registers, etc.
This commit is contained in:
Liav A 2023-06-10 14:46:47 +03:00 committed by Andrew Kaster
parent 68c3f9aa5a
commit d61c23569e
24 changed files with 732 additions and 429 deletions

View file

@ -14,6 +14,7 @@
#include <Kernel/Bus/PCI/Initializer.h>
#include <Kernel/Bus/USB/USBManagement.h>
#include <Kernel/Bus/VirtIO/Device.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/Detect.h>
#include <Kernel/Devices/Audio/Management.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/GPU/Console/BootFramebufferConsole.h>
@ -394,7 +395,7 @@ void init_stage2(void*)
SysFSFirmwareDirectory::initialize();
if (!PCI::Access::is_disabled()) {
VirtIO::detect();
VirtIO::detect_pci_instances();
}
NetworkingManagement::the().initialize();

View file

@ -6,6 +6,7 @@
*/
#include <Kernel/Bus/VirtIO/Console.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Sections.h>
#include <Kernel/Tasks/WorkQueue.h>
@ -14,15 +15,16 @@ namespace Kernel::VirtIO {
unsigned Console::next_device_id = 0;
UNMAP_AFTER_INIT NonnullLockRefPtr<Console> Console::must_create(PCI::DeviceIdentifier const& pci_device_identifier)
UNMAP_AFTER_INIT NonnullLockRefPtr<Console> Console::must_create_for_pci_instance(PCI::DeviceIdentifier const& pci_device_identifier)
{
return adopt_lock_ref_if_nonnull(new Console(pci_device_identifier)).release_nonnull();
auto pci_transport_link = MUST(PCIeTransportLink::create(pci_device_identifier));
return adopt_lock_ref_if_nonnull(new (nothrow) Console(move(pci_transport_link))).release_nonnull();
}
UNMAP_AFTER_INIT ErrorOr<void> Console::initialize_virtio_resources()
{
TRY(Device::initialize_virtio_resources());
auto const* cfg = TRY(get_config(VirtIO::ConfigurationType::Device));
auto const* cfg = TRY(transport_entity().get_config(VirtIO::ConfigurationType::Device));
bool success = negotiate_features([&](u64 supported_features) {
u64 negotiated = 0;
if (is_feature_set(supported_features, VIRTIO_CONSOLE_F_SIZE))
@ -35,13 +37,13 @@ UNMAP_AFTER_INIT ErrorOr<void> Console::initialize_virtio_resources()
return Error::from_errno(EIO);
u32 max_nr_ports = 0;
u16 cols = 0, rows = 0;
read_config_atomic([&]() {
transport_entity().read_config_atomic([&]() {
if (is_feature_accepted(VIRTIO_CONSOLE_F_SIZE)) {
cols = config_read16(*cfg, 0x0);
rows = config_read16(*cfg, 0x2);
cols = transport_entity().config_read16(*cfg, 0x0);
rows = transport_entity().config_read16(*cfg, 0x2);
}
if (is_feature_accepted(VIRTIO_CONSOLE_F_MULTIPORT)) {
max_nr_ports = config_read32(*cfg, 0x4);
max_nr_ports = transport_entity().config_read32(*cfg, 0x4);
m_ports.resize(max_nr_ports);
}
});
@ -62,8 +64,8 @@ UNMAP_AFTER_INIT ErrorOr<void> Console::initialize_virtio_resources()
return {};
}
UNMAP_AFTER_INIT Console::Console(PCI::DeviceIdentifier const& pci_device_identifier)
: VirtIO::Device(pci_device_identifier)
UNMAP_AFTER_INIT Console::Console(NonnullOwnPtr<TransportEntity> transport_entity)
: VirtIO::Device(move(transport_entity))
, m_device_id(next_device_id++)
{
}

View file

@ -18,12 +18,9 @@ class Console
friend VirtIO::ConsolePort;
public:
static NonnullLockRefPtr<Console> must_create(PCI::DeviceIdentifier const&);
static NonnullLockRefPtr<Console> must_create_for_pci_instance(PCI::DeviceIdentifier const&);
virtual ~Console() override = default;
virtual StringView purpose() const override { return class_name(); }
virtual StringView device_name() const override { return class_name(); }
unsigned device_id() const
{
return m_device_id;
@ -33,7 +30,7 @@ public:
private:
virtual StringView class_name() const override { return "VirtIOConsole"sv; }
explicit Console(PCI::DeviceIdentifier const&);
explicit Console(NonnullOwnPtr<TransportEntity>);
enum class ControlEvent : u16 {
DeviceReady = 0,
DeviceAdd = 1,

View file

@ -68,7 +68,7 @@ enum class ConfigurationType : u8 {
struct Configuration {
ConfigurationType cfg_type;
u8 bar;
u8 resource_index; // NOTE: For PCI devices, this is the BAR index
u32 offset;
u32 length;
};

View file

@ -4,274 +4,38 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Boot/CommandLine.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/IDs.h>
#include <Kernel/Bus/VirtIO/Console.h>
#include <Kernel/Bus/VirtIO/Device.h>
#include <Kernel/Bus/VirtIO/RNG.h>
#include <Kernel/Sections.h>
namespace Kernel::VirtIO {
UNMAP_AFTER_INIT void detect()
{
if (kernel_command_line().disable_virtio())
return;
MUST(PCI::enumerate([&](PCI::DeviceIdentifier const& device_identifier) {
if (device_identifier.hardware_id().is_null())
return;
// TODO: We should also be checking that the device_id is in between 0x1000 - 0x107F inclusive
if (device_identifier.hardware_id().vendor_id != PCI::VendorID::VirtIO)
return;
switch (device_identifier.hardware_id().device_id) {
case PCI::DeviceID::VirtIOConsole: {
auto& console = Console::must_create(device_identifier).leak_ref();
MUST(console.initialize_virtio_resources());
break;
}
case PCI::DeviceID::VirtIOEntropy: {
auto& rng = RNG::must_create(device_identifier).leak_ref();
MUST(rng.initialize_virtio_resources());
break;
}
case PCI::DeviceID::VirtIOGPU: {
// This should have been initialized by the graphics subsystem
break;
}
default:
dbgln_if(VIRTIO_DEBUG, "VirtIO: Unknown VirtIO device with ID: {}", device_identifier.hardware_id().device_id);
break;
}
}));
}
static StringView determine_device_class(PCI::DeviceIdentifier const& device_identifier)
{
if (device_identifier.revision_id().value() == 0) {
// Note: If the device is a legacy (or transitional) device, therefore,
// probe the subsystem ID in the PCI header and figure out the
auto subsystem_device_id = device_identifier.subsystem_id().value();
switch (subsystem_device_id) {
case 1:
return "VirtIONetAdapter"sv;
case 2:
return "VirtIOBlockDevice"sv;
case 3:
return "VirtIOConsole"sv;
case 4:
return "VirtIORNG"sv;
default:
dbgln("VirtIO: Unknown subsystem_device_id {}", subsystem_device_id);
VERIFY_NOT_REACHED();
}
}
auto id = device_identifier.hardware_id();
VERIFY(id.vendor_id == PCI::VendorID::VirtIO);
switch (id.device_id) {
case PCI::DeviceID::VirtIONetAdapter:
return "VirtIONetAdapter"sv;
case PCI::DeviceID::VirtIOBlockDevice:
return "VirtIOBlockDevice"sv;
case PCI::DeviceID::VirtIOConsole:
return "VirtIOConsole"sv;
case PCI::DeviceID::VirtIOEntropy:
return "VirtIORNG"sv;
case PCI::DeviceID::VirtIOGPU:
return "VirtIOGPU"sv;
default:
dbgln("VirtIO: Unknown device_id {}", id.vendor_id);
VERIFY_NOT_REACHED();
}
}
UNMAP_AFTER_INIT ErrorOr<void> Device::initialize_virtio_resources()
{
enable_bus_mastering(device_identifier());
auto capabilities = device_identifier().capabilities();
for (auto& capability : capabilities) {
if (capability.id().value() == PCI::Capabilities::ID::VendorSpecific) {
// We have a virtio_pci_cap
Configuration config {};
auto raw_config_type = capability.read8(0x3);
// NOTE: The VirtIO specification allows iteration of configurations
// through a special PCI capbility structure with the VIRTIO_PCI_CAP_PCI_CFG tag:
//
// "Each structure can be mapped by a Base Address register (BAR) belonging to the function, or accessed via
// the special VIRTIO_PCI_CAP_PCI_CFG field in the PCI configuration space"
//
// "The VIRTIO_PCI_CAP_PCI_CFG capability creates an alternative (and likely suboptimal) access method
// to the common configuration, notification, ISR and device-specific configuration regions."
//
// Also, it is *very* likely to see this PCI capability as the first vendor-specific capbility of a certain PCI function,
// but this is not guaranteed by the VirtIO specification.
// Therefore, ignore this type of configuration as this is not needed by our implementation currently.
if (raw_config_type == static_cast<u8>(ConfigurationType::PCICapabilitiesAccess))
continue;
if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCICapabilitiesAccess)) {
dbgln("{}: Unknown capability configuration type: {}", m_class_name, raw_config_type);
return Error::from_errno(ENXIO);
}
config.cfg_type = static_cast<ConfigurationType>(raw_config_type);
auto cap_length = capability.read8(0x2);
if (cap_length < 0x10) {
dbgln("{}: Unexpected capability size: {}", m_class_name, cap_length);
break;
}
config.bar = capability.read8(0x4);
if (config.bar > 0x5) {
dbgln("{}: Unexpected capability bar value: {}", m_class_name, config.bar);
break;
}
config.offset = capability.read32(0x8);
config.length = capability.read32(0xc);
// NOTE: Configuration length of zero is an invalid configuration, or at the very least a configuration
// type we don't know how to handle correctly...
// The VIRTIO_PCI_CAP_PCI_CFG configuration structure has length of 0
// but because we ignore that type and all other types should have a length
// greater than 0, we should ignore any other configuration in case this condition is not met.
if (config.length == 0) {
dbgln("{}: Found configuration {}, with invalid length of 0", m_class_name, (u32)config.cfg_type);
continue;
}
dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", m_class_name, (u32)config.cfg_type, config.bar, config.offset, config.length);
if (config.cfg_type == ConfigurationType::Common)
m_use_mmio = true;
else if (config.cfg_type == ConfigurationType::Notify)
m_notify_multiplier = capability.read32(0x10);
m_configs.append(config);
}
}
if (m_use_mmio) {
for (auto& cfg : m_configs) {
auto mapping_io_window = TRY(IOWindow::create_for_pci_device_bar(device_identifier(), static_cast<PCI::HeaderType0BaseRegister>(cfg.bar)));
m_register_bases[cfg.bar] = move(mapping_io_window);
}
m_common_cfg = TRY(get_config(ConfigurationType::Common, 0));
m_notify_cfg = TRY(get_config(ConfigurationType::Notify, 0));
m_isr_cfg = TRY(get_config(ConfigurationType::ISR, 0));
} else {
auto mapping_io_window = TRY(IOWindow::create_for_pci_device_bar(device_identifier(), PCI::HeaderType0BaseRegister::BAR0));
m_register_bases[0] = move(mapping_io_window);
}
// Note: We enable interrupts at least after the m_register_bases[0] ptr is
TRY(m_transport_entity->locate_configurations_and_resources({}, *this));
// NOTE: We enable interrupts at least after the m_register_bases[0] ptr is
// assigned with an IOWindow, to ensure that in case of getting an interrupt
// we can access registers from that IO window range.
PCI::enable_interrupt_line(device_identifier());
enable_irq();
m_transport_entity->enable_interrupts({});
reset_device();
// NOTE: Status bits should be set to 0 to keep them in sync, because
// we reset the device shortly afterwards.
m_status = 0;
m_transport_entity->reset_device({});
set_status_bit(DEVICE_STATUS_ACKNOWLEDGE);
set_status_bit(DEVICE_STATUS_DRIVER);
return {};
}
UNMAP_AFTER_INIT VirtIO::Device::Device(PCI::DeviceIdentifier const& device_identifier)
: PCI::Device(const_cast<PCI::DeviceIdentifier&>(device_identifier))
, IRQHandler(device_identifier.interrupt_line().value())
, m_class_name(VirtIO::determine_device_class(device_identifier))
UNMAP_AFTER_INIT VirtIO::Device::Device(NonnullOwnPtr<TransportEntity> transport_entity)
: m_class_name(transport_entity->determine_device_class_name())
, m_transport_entity(move(transport_entity))
{
dbgln("{}: Found @ {}", m_class_name, device_identifier.address());
}
void Device::notify_queue(u16 queue_index)
{
dbgln_if(VIRTIO_DEBUG, "{}: notifying about queue change at idx: {}", m_class_name, queue_index);
if (!m_notify_cfg)
base_io_window().write16(REG_QUEUE_NOTIFY, queue_index);
else
config_write16(*m_notify_cfg, get_queue(queue_index).notify_offset() * m_notify_multiplier, queue_index);
}
auto Device::mapping_for_bar(u8 bar) -> IOWindow&
{
VERIFY(m_use_mmio);
VERIFY(m_register_bases[bar]);
return *m_register_bases[bar];
}
u8 Device::config_read8(Configuration const& config, u32 offset)
{
return mapping_for_bar(config.bar).read8(config.offset + offset);
}
u16 Device::config_read16(Configuration const& config, u32 offset)
{
return mapping_for_bar(config.bar).read16(config.offset + offset);
}
u32 Device::config_read32(Configuration const& config, u32 offset)
{
return mapping_for_bar(config.bar).read32(config.offset + offset);
}
void Device::config_write8(Configuration const& config, u32 offset, u8 value)
{
mapping_for_bar(config.bar).write8(config.offset + offset, value);
}
void Device::config_write16(Configuration const& config, u32 offset, u16 value)
{
mapping_for_bar(config.bar).write16(config.offset + offset, value);
}
void Device::config_write32(Configuration const& config, u32 offset, u32 value)
{
mapping_for_bar(config.bar).write32(config.offset + offset, value);
}
void Device::config_write64(Configuration const& config, u32 offset, u64 value)
{
mapping_for_bar(config.bar).write32(config.offset + offset, (u32)(value & 0xFFFFFFFF));
mapping_for_bar(config.bar).write32(config.offset + offset + 4, (u32)(value >> 32));
}
u8 Device::read_status_bits()
{
if (!m_common_cfg)
return base_io_window().read8(REG_DEVICE_STATUS);
return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
}
void Device::mask_status_bits(u8 status_mask)
{
m_status &= status_mask;
if (!m_common_cfg)
base_io_window().write8(REG_DEVICE_STATUS, m_status);
else
config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
}
void Device::set_status_bit(u8 status_bit)
{
m_status |= status_bit;
if (!m_common_cfg)
base_io_window().write8(REG_DEVICE_STATUS, m_status);
else
config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
}
u64 Device::get_device_features()
{
if (!m_common_cfg)
return base_io_window().read32(REG_DEVICE_FEATURES);
config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
return upper_bits | lower_bits;
}
IOWindow& Device::base_io_window()
{
VERIFY(m_register_bases[0]);
return *m_register_bases[0];
m_transport_entity->set_status_bits({}, m_status);
}
bool Device::accept_device_features(u64 device_features, u64 accepted_features)
@ -300,16 +64,9 @@ bool Device::accept_device_features(u64 device_features, u64 accepted_features)
dbgln_if(VIRTIO_DEBUG, "{}: Device features: {}", m_class_name, device_features);
dbgln_if(VIRTIO_DEBUG, "{}: Accepted features: {}", m_class_name, accepted_features);
if (!m_common_cfg) {
base_io_window().write32(REG_GUEST_FEATURES, accepted_features);
} else {
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
}
m_transport_entity->accept_device_features({}, accepted_features);
set_status_bit(DEVICE_STATUS_FEATURES_OK);
m_status = read_status_bits();
m_status = m_transport_entity->read_status_bits();
if (!(m_status & DEVICE_STATUS_FEATURES_OK)) {
set_status_bit(DEVICE_STATUS_FAILED);
dbgln("{}: Features not accepted by host!", m_class_name);
@ -321,70 +78,27 @@ bool Device::accept_device_features(u64 device_features, u64 accepted_features)
return true;
}
void Device::reset_device()
{
dbgln_if(VIRTIO_DEBUG, "{}: Reset device", m_class_name);
if (!m_common_cfg) {
mask_status_bits(0);
while (read_status_bits() != 0) {
// TODO: delay a bit?
}
return;
}
config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
// TODO: delay a bit?
}
}
bool Device::setup_queue(u16 queue_index)
{
if (!m_common_cfg)
return false;
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
if (queue_size == 0) {
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", m_class_name, queue_index);
return true;
}
u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
auto queue_or_error = Queue::try_create(queue_size, queue_notify_offset);
auto queue_or_error = m_transport_entity->setup_queue({}, queue_index);
if (queue_or_error.is_error())
return false;
auto queue = queue_or_error.release_value();
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] configured with size: {}", m_class_name, queue_index, queue_size);
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] configured with size: {}", m_class_name, queue_index, queue->size());
m_queues.append(move(queue));
return true;
}
bool Device::activate_queue(u16 queue_index)
{
if (!m_common_cfg)
return false;
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_ENABLE, true);
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] activated", m_class_name, queue_index);
return true;
}
bool Device::setup_queues(u16 requested_queue_count)
{
VERIFY(!m_did_setup_queues);
m_did_setup_queues = true;
if (m_common_cfg) {
auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
auto* common_cfg = m_transport_entity->get_config(ConfigurationType::Common).release_value_but_fixme_should_propagate_errors();
if (common_cfg) {
auto maximum_queue_count = m_transport_entity->config_read16(*common_cfg, COMMON_CFG_NUM_QUEUES);
if (requested_queue_count == 0) {
m_queue_count = maximum_queue_count;
} else if (requested_queue_count > maximum_queue_count) {
@ -404,7 +118,7 @@ bool Device::setup_queues(u16 requested_queue_count)
return false;
}
for (u16 i = 0; i < m_queue_count; i++) { // Queues can only be activated *after* all others queues were also configured
if (!activate_queue(i))
if (!m_transport_entity->activate_queue({}, i))
return false;
}
return true;
@ -420,16 +134,9 @@ void Device::finish_init()
dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", m_class_name);
}
u8 Device::isr_status()
bool Device::handle_irq(Badge<TransportInterruptHandler>)
{
if (!m_isr_cfg)
return base_io_window().read8(REG_ISR_STATUS);
return config_read8(*m_isr_cfg, 0);
}
bool Device::handle_irq(RegisterState const&)
{
u8 isr_type = isr_status();
u8 isr_type = m_transport_entity->isr_status();
if ((isr_type & (QUEUE_INTERRUPT | DEVICE_CONFIG_INTERRUPT)) == 0) {
dbgln_if(VIRTIO_DEBUG, "{}: Handling interrupt with unknown type: {}", class_name(), isr_type);
return false;
@ -460,8 +167,9 @@ void Device::supply_chain_and_notify(u16 queue_index, QueueChain& chain)
VERIFY(&chain.queue() == &queue);
VERIFY(queue.lock().is_locked());
chain.submit_to_queue();
auto descriptor = TransportEntity::NotifyQueueDescriptor { queue_index, get_queue(queue_index).notify_offset() };
if (queue.should_notify())
notify_queue(queue_index);
m_transport_entity->notify_queue({}, descriptor);
}
}

View file

@ -10,6 +10,8 @@
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Bus/VirtIO/Definitions.h>
#include <Kernel/Bus/VirtIO/Queue.h>
#include <Kernel/Bus/VirtIO/Transport/Entity.h>
#include <Kernel/Bus/VirtIO/Transport/InterruptHandler.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Library/IOWindow.h>
#include <Kernel/Memory/MemoryManager.h>
@ -18,61 +20,21 @@ namespace Kernel::VirtIO {
void detect();
class Device
: public PCI::Device
, public IRQHandler {
class Device {
public:
virtual ~Device() override = default;
virtual ~Device() = default;
virtual ErrorOr<void> initialize_virtio_resources();
bool handle_irq(Badge<TransportInterruptHandler>);
protected:
virtual StringView class_name() const { return "VirtIO::Device"sv; }
explicit Device(PCI::DeviceIdentifier const&);
ErrorOr<Configuration const*> get_config(ConfigurationType cfg_type, u32 index = 0) const
{
for (auto const& cfg : m_configs) {
if (cfg.cfg_type != cfg_type)
continue;
if (index > 0) {
index--;
continue;
}
return &cfg;
}
return Error::from_errno(ENXIO);
}
explicit Device(NonnullOwnPtr<TransportEntity>);
template<typename F>
void read_config_atomic(F f)
{
if (m_common_cfg) {
u8 generation_before, generation_after;
do {
generation_before = config_read8(*m_common_cfg, 0x15);
f();
generation_after = config_read8(*m_common_cfg, 0x15);
} while (generation_before != generation_after);
} else {
f();
}
}
u8 config_read8(Configuration const&, u32);
u16 config_read16(Configuration const&, u32);
u32 config_read32(Configuration const&, u32);
void config_write8(Configuration const&, u32, u8);
void config_write16(Configuration const&, u32, u16);
void config_write32(Configuration const&, u32, u32);
void config_write64(Configuration const&, u32, u64);
auto mapping_for_bar(u8) -> IOWindow&;
u8 read_status_bits();
void mask_status_bits(u8 status_mask);
void set_status_bit(u8);
u64 get_device_features();
bool setup_queues(u16 requested_queue_count = 0);
void finish_init();
@ -91,7 +53,7 @@ protected:
template<typename F>
bool negotiate_features(F f)
{
u64 device_features = get_device_features();
u64 device_features = m_transport_entity->get_device_features();
u64 accept_features = f(device_features);
VERIFY(!(~device_features & accept_features));
return accept_device_features(device_features, accept_features);
@ -113,6 +75,8 @@ protected:
virtual bool handle_device_config_change() = 0;
virtual void handle_queue_update(u16 queue_index) = 0;
TransportEntity& transport_entity() { return *m_transport_entity; }
private:
bool accept_device_features(u64 device_features, u64 accepted_features);
@ -120,29 +84,16 @@ private:
bool activate_queue(u16 queue_index);
void notify_queue(u16 queue_index);
void reset_device();
u8 isr_status();
virtual bool handle_irq(RegisterState const&) override;
Vector<NonnullOwnPtr<Queue>> m_queues;
Vector<Configuration> m_configs;
Configuration const* m_common_cfg { nullptr }; // Cached due to high usage
Configuration const* m_notify_cfg { nullptr }; // Cached due to high usage
Configuration const* m_isr_cfg { nullptr }; // Cached due to high usage
IOWindow& base_io_window();
Array<OwnPtr<IOWindow>, 6> m_register_bases;
StringView const m_class_name;
u16 m_queue_count { 0 };
bool m_use_mmio { false };
u8 m_status { 0 };
u64 m_accepted_features { 0 };
bool m_did_accept_features { false };
bool m_did_setup_queues { false };
u32 m_notify_multiplier { 0 };
};
NonnullOwnPtr<TransportEntity> const m_transport_entity;
};
}

View file

@ -51,6 +51,8 @@ public:
bool should_notify() const;
u16 size() const { return m_queue_size; }
private:
Queue(NonnullOwnPtr<Memory::Region> queue_region, u16 queue_size, u16 notify_offset);

View file

@ -5,13 +5,15 @@
*/
#include <Kernel/Bus/VirtIO/RNG.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.h>
#include <Kernel/Sections.h>
namespace Kernel::VirtIO {
UNMAP_AFTER_INIT NonnullLockRefPtr<RNG> RNG::must_create(PCI::DeviceIdentifier const& device_identifier)
UNMAP_AFTER_INIT NonnullLockRefPtr<RNG> RNG::must_create_for_pci_instance(PCI::DeviceIdentifier const& device_identifier)
{
return adopt_lock_ref_if_nonnull(new RNG(device_identifier)).release_nonnull();
auto pci_transport_link = MUST(PCIeTransportLink::create(device_identifier));
return adopt_lock_ref_if_nonnull(new RNG(move(pci_transport_link))).release_nonnull();
}
UNMAP_AFTER_INIT ErrorOr<void> RNG::initialize_virtio_resources()
@ -32,8 +34,8 @@ UNMAP_AFTER_INIT ErrorOr<void> RNG::initialize_virtio_resources()
return {};
}
UNMAP_AFTER_INIT RNG::RNG(PCI::DeviceIdentifier const& device_identifier)
: VirtIO::Device(device_identifier)
UNMAP_AFTER_INIT RNG::RNG(NonnullOwnPtr<TransportEntity> transport_entity)
: VirtIO::Device(move(transport_entity))
{
}

View file

@ -19,16 +19,14 @@ class RNG final
: public AtomicRefCounted<RNG>
, public VirtIO::Device {
public:
static NonnullLockRefPtr<RNG> must_create(PCI::DeviceIdentifier const&);
virtual StringView purpose() const override { return class_name(); }
virtual StringView device_name() const override { return class_name(); }
static NonnullLockRefPtr<RNG> must_create_for_pci_instance(PCI::DeviceIdentifier const&);
virtual ~RNG() override = default;
virtual ErrorOr<void> initialize_virtio_resources() override;
private:
virtual StringView class_name() const override { return "VirtIORNG"sv; }
explicit RNG(PCI::DeviceIdentifier const&);
explicit RNG(NonnullOwnPtr<TransportEntity>);
virtual bool handle_device_config_change() override;
virtual void handle_queue_update(u16 queue_index) override;
void request_entropy_from_host();

View file

@ -0,0 +1,168 @@
/*
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/VirtIO/Transport/Entity.h>
namespace Kernel::VirtIO {
auto TransportEntity::mapping_for_resource_index(u8 resource_index) -> IOWindow&
{
VERIFY(m_use_mmio);
VERIFY(m_register_bases[resource_index]);
return *m_register_bases[resource_index];
}
u8 TransportEntity::config_read8(Configuration const& config, u32 offset)
{
return mapping_for_resource_index(config.resource_index).read8(config.offset + offset);
}
u16 TransportEntity::config_read16(Configuration const& config, u32 offset)
{
return mapping_for_resource_index(config.resource_index).read16(config.offset + offset);
}
u32 TransportEntity::config_read32(Configuration const& config, u32 offset)
{
return mapping_for_resource_index(config.resource_index).read32(config.offset + offset);
}
void TransportEntity::config_write8(Configuration const& config, u32 offset, u8 value)
{
mapping_for_resource_index(config.resource_index).write8(config.offset + offset, value);
}
void TransportEntity::config_write16(Configuration const& config, u32 offset, u16 value)
{
mapping_for_resource_index(config.resource_index).write16(config.offset + offset, value);
}
void TransportEntity::config_write32(Configuration const& config, u32 offset, u32 value)
{
mapping_for_resource_index(config.resource_index).write32(config.offset + offset, value);
}
void TransportEntity::config_write64(Configuration const& config, u32 offset, u64 value)
{
mapping_for_resource_index(config.resource_index).write32(config.offset + offset, (u32)(value & 0xFFFFFFFF));
mapping_for_resource_index(config.resource_index).write32(config.offset + offset + 4, (u32)(value >> 32));
}
IOWindow& TransportEntity::base_io_window()
{
VERIFY(m_register_bases[0]);
return *m_register_bases[0];
}
u8 TransportEntity::isr_status()
{
if (!m_isr_cfg)
return base_io_window().read8(REG_ISR_STATUS);
return config_read8(*m_isr_cfg, 0);
}
void TransportEntity::set_status_bits(Badge<VirtIO::Device>, u8 status_bits)
{
return set_status_bits(status_bits);
}
void TransportEntity::set_status_bits(u8 status_bits)
{
if (!m_common_cfg)
base_io_window().write8(REG_DEVICE_STATUS, status_bits);
else
config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, status_bits);
}
ErrorOr<NonnullOwnPtr<Queue>> TransportEntity::setup_queue(Badge<VirtIO::Device>, u16 queue_index)
{
if (!m_common_cfg)
return Error::from_errno(ENXIO);
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
if (queue_size == 0) {
dbgln_if(VIRTIO_DEBUG, "Queue[{}] is unavailable!", queue_index);
return Error::from_errno(ENXIO);
}
u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
auto queue = TRY(Queue::try_create(queue_size, queue_notify_offset));
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
return queue;
}
void TransportEntity::accept_device_features(Badge<VirtIO::Device>, u64 accepted_features)
{
if (!m_common_cfg) {
base_io_window().write32(REG_GUEST_FEATURES, accepted_features);
} else {
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
}
}
void TransportEntity::reset_device(Badge<VirtIO::Device>)
{
if (!m_common_cfg) {
set_status_bits(0);
while (read_status_bits() != 0) {
// TODO: delay a bit?
}
return;
}
config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
// TODO: delay a bit?
}
}
void TransportEntity::notify_queue(Badge<VirtIO::Device>, NotifyQueueDescriptor descriptor)
{
dbgln_if(VIRTIO_DEBUG, "notifying about queue change at idx: {}", descriptor.queue_index);
if (!m_notify_cfg)
base_io_window().write16(REG_QUEUE_NOTIFY, descriptor.queue_index);
else
config_write16(*m_notify_cfg, descriptor.possible_notify_offset * m_notify_multiplier, descriptor.queue_index);
}
bool TransportEntity::activate_queue(Badge<VirtIO::Device>, u16 queue_index)
{
if (!m_common_cfg)
return false;
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
config_write16(*m_common_cfg, COMMON_CFG_QUEUE_ENABLE, true);
dbgln_if(VIRTIO_DEBUG, "Queue[{}] activated", queue_index);
return true;
}
u64 TransportEntity::get_device_features()
{
if (!m_common_cfg)
return base_io_window().read32(REG_DEVICE_FEATURES);
config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
return upper_bits | lower_bits;
}
u8 TransportEntity::read_status_bits()
{
if (!m_common_cfg)
return base_io_window().read8(REG_DEVICE_STATUS);
return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
}
}

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Bus/VirtIO/Definitions.h>
#include <Kernel/Bus/VirtIO/Queue.h>
#include <Kernel/Library/IOWindow.h>
namespace Kernel::VirtIO {
class TransportEntity {
public:
virtual ~TransportEntity() = default;
virtual ErrorOr<void> locate_configurations_and_resources(Badge<VirtIO::Device>, VirtIO::Device&) = 0;
virtual void disable_interrupts(Badge<VirtIO::Device>) = 0;
virtual void enable_interrupts(Badge<VirtIO::Device>) = 0;
virtual StringView determine_device_class_name() const = 0;
void accept_device_features(Badge<VirtIO::Device>, u64 accepted_features);
struct NotifyQueueDescriptor {
u16 queue_index;
u16 possible_notify_offset;
};
void notify_queue(Badge<VirtIO::Device>, NotifyQueueDescriptor);
bool activate_queue(Badge<VirtIO::Device>, u16 queue_index);
ErrorOr<NonnullOwnPtr<Queue>> setup_queue(Badge<VirtIO::Device>, u16 queue_index);
void set_status_bits(Badge<VirtIO::Device>, u8 status_bits);
void reset_device(Badge<VirtIO::Device>);
u8 read_status_bits();
u8 isr_status();
u64 get_device_features();
ErrorOr<Configuration const*> get_config(ConfigurationType cfg_type, u32 index = 0) const
{
for (auto const& cfg : m_configs) {
if (cfg.cfg_type != cfg_type)
continue;
if (index > 0) {
index--;
continue;
}
return &cfg;
}
return Error::from_errno(ENXIO);
}
u8 config_read8(Configuration const&, u32);
u16 config_read16(Configuration const&, u32);
u32 config_read32(Configuration const&, u32);
void config_write8(Configuration const&, u32, u8);
void config_write16(Configuration const&, u32, u16);
void config_write32(Configuration const&, u32, u32);
void config_write64(Configuration const&, u32, u64);
template<typename F>
void read_config_atomic(F f)
{
if (m_common_cfg) {
u8 generation_before, generation_after;
do {
generation_before = config_read8(*m_common_cfg, 0x15);
f();
generation_after = config_read8(*m_common_cfg, 0x15);
} while (generation_before != generation_after);
} else {
f();
}
}
protected:
TransportEntity() = default;
auto mapping_for_resource_index(u8) -> IOWindow&;
void set_status_bits(u8 status_bits);
Vector<Configuration> m_configs;
Configuration const* m_common_cfg { nullptr }; // Cached due to high usage
Configuration const* m_notify_cfg { nullptr }; // Cached due to high usage
Configuration const* m_isr_cfg { nullptr }; // Cached due to high usage
IOWindow& base_io_window();
Array<OwnPtr<IOWindow>, 6> m_register_bases;
bool m_use_mmio { false };
u32 m_notify_multiplier { 0 };
};
};

View file

@ -0,0 +1,22 @@
/*
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/VirtIO/Device.h>
#include <Kernel/Bus/VirtIO/Transport/InterruptHandler.h>
namespace Kernel::VirtIO {
TransportInterruptHandler::TransportInterruptHandler(VirtIO::Device& parent_device)
: m_parent_device(parent_device)
{
}
bool TransportInterruptHandler::notify_parent_device_on_interrupt()
{
return m_parent_device.handle_irq({});
}
}

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
namespace Kernel::VirtIO {
class Device;
class TransportInterruptHandler {
protected:
TransportInterruptHandler(VirtIO::Device&);
bool notify_parent_device_on_interrupt();
private:
VirtIO::Device& m_parent_device;
};
}

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Try.h>
#include <Kernel/Boot/CommandLine.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/IDs.h>
#include <Kernel/Bus/VirtIO/Console.h>
#include <Kernel/Bus/VirtIO/Device.h>
#include <Kernel/Bus/VirtIO/RNG.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/Detect.h>
#include <Kernel/Sections.h>
namespace Kernel::VirtIO {
UNMAP_AFTER_INIT void detect_pci_instances()
{
if (kernel_command_line().disable_virtio())
return;
MUST(PCI::enumerate([&](PCI::DeviceIdentifier const& device_identifier) {
if (device_identifier.hardware_id().is_null())
return;
// TODO: We should also be checking that the device_id is in between 0x1000 - 0x107F inclusive
if (device_identifier.hardware_id().vendor_id != PCI::VendorID::VirtIO)
return;
switch (device_identifier.hardware_id().device_id) {
case PCI::DeviceID::VirtIOConsole: {
auto& console = Console::must_create_for_pci_instance(device_identifier).leak_ref();
MUST(console.initialize_virtio_resources());
break;
}
case PCI::DeviceID::VirtIOEntropy: {
auto& rng = RNG::must_create_for_pci_instance(device_identifier).leak_ref();
MUST(rng.initialize_virtio_resources());
break;
}
case PCI::DeviceID::VirtIOGPU: {
// This should have been initialized by the graphics subsystem
break;
}
default:
dbgln_if(VIRTIO_DEBUG, "VirtIO: Unknown VirtIO device with ID: {}", device_identifier.hardware_id().device_id);
break;
}
}));
}
}

View file

@ -0,0 +1,13 @@
/*
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
namespace Kernel::VirtIO {
void detect_pci_instances();
}

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2021-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/VirtIO/Transport/PCIe/InterruptHandler.h>
namespace Kernel::VirtIO {
ErrorOr<NonnullOwnPtr<PCIeTransportInterruptHandler>> PCIeTransportInterruptHandler::create(PCIeTransportLink& transport_link, VirtIO::Device& parent_device, u8 irq)
{
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) PCIeTransportInterruptHandler(transport_link, parent_device, irq)));
}
PCIeTransportInterruptHandler::PCIeTransportInterruptHandler(PCIeTransportLink& transport_link, VirtIO::Device& parent_device, u8 irq)
: TransportInterruptHandler(parent_device)
, PCI::IRQHandler(transport_link, irq)
{
}
bool PCIeTransportInterruptHandler::handle_irq(RegisterState const&)
{
return notify_parent_device_on_interrupt();
}
}

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Bus/VirtIO/Device.h>
#include <Kernel/Bus/VirtIO/Transport/InterruptHandler.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.h>
#include <Kernel/Interrupts/PCIIRQHandler.h>
namespace Kernel::VirtIO {
class PCIeTransportInterruptHandler final
: public TransportInterruptHandler
, public PCI::IRQHandler {
public:
static ErrorOr<NonnullOwnPtr<PCIeTransportInterruptHandler>> create(PCIeTransportLink&, VirtIO::Device&, u8 irq);
virtual ~PCIeTransportInterruptHandler() override = default;
virtual StringView purpose() const override { return "VirtIO PCI IRQ Handler"sv; }
private:
PCIeTransportInterruptHandler(PCIeTransportLink&, VirtIO::Device&, u8 irq);
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
};
}

View file

@ -0,0 +1,161 @@
/*
* Copyright (c) 2023, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/IDs.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/InterruptHandler.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.h>
namespace Kernel::VirtIO {
ErrorOr<NonnullOwnPtr<TransportEntity>> PCIeTransportLink::create(PCI::DeviceIdentifier const& pci_identifier)
{
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) PCIeTransportLink(pci_identifier)));
}
StringView PCIeTransportLink::determine_device_class_name() const
{
if (device_identifier().revision_id().value() == 0) {
// Note: If the device is a legacy (or transitional) device, therefore,
// probe the subsystem ID in the PCI header and figure out the
auto subsystem_device_id = device_identifier().subsystem_id().value();
switch (subsystem_device_id) {
case 1:
return "VirtIONetAdapter"sv;
case 2:
return "VirtIOBlockDevice"sv;
case 3:
return "VirtIOConsole"sv;
case 4:
return "VirtIORNG"sv;
default:
dbgln("VirtIO: Unknown subsystem_device_id {}", subsystem_device_id);
VERIFY_NOT_REACHED();
}
}
auto id = device_identifier().hardware_id();
VERIFY(id.vendor_id == PCI::VendorID::VirtIO);
switch (id.device_id) {
case PCI::DeviceID::VirtIONetAdapter:
return "VirtIONetAdapter"sv;
case PCI::DeviceID::VirtIOBlockDevice:
return "VirtIOBlockDevice"sv;
case PCI::DeviceID::VirtIOConsole:
return "VirtIOConsole"sv;
case PCI::DeviceID::VirtIOEntropy:
return "VirtIORNG"sv;
case PCI::DeviceID::VirtIOGPU:
return "VirtIOGPU"sv;
default:
dbgln("VirtIO: Unknown device_id {}", id.vendor_id);
VERIFY_NOT_REACHED();
}
}
ErrorOr<void> PCIeTransportLink::create_interrupt_handler(VirtIO::Device& parent_device)
{
TRY(reserve_irqs(1, false));
auto irq = MUST(allocate_irq(0));
m_irq_handler = TRY(PCIeTransportInterruptHandler::create(*this, parent_device, irq));
return {};
}
PCIeTransportLink::PCIeTransportLink(PCI::DeviceIdentifier const& pci_identifier)
: PCI::Device(pci_identifier)
{
dbgln("{}: Found @ {}", determine_device_class_name(), device_identifier().address());
}
ErrorOr<void> PCIeTransportLink::locate_configurations_and_resources(Badge<VirtIO::Device>, VirtIO::Device& parent_device)
{
TRY(create_interrupt_handler(parent_device));
PCI::enable_bus_mastering(device_identifier());
auto capabilities = device_identifier().capabilities();
for (auto& capability : capabilities) {
if (capability.id().value() == PCI::Capabilities::ID::VendorSpecific) {
// We have a virtio_pci_cap
Configuration config {};
auto raw_config_type = capability.read8(0x3);
// NOTE: The VirtIO specification allows iteration of configurations
// through a special PCI capbility structure with the VIRTIO_PCI_CAP_PCI_CFG tag:
//
// "Each structure can be mapped by a Base Address register (BAR) belonging to the function, or accessed via
// the special VIRTIO_PCI_CAP_PCI_CFG field in the PCI configuration space"
//
// "The VIRTIO_PCI_CAP_PCI_CFG capability creates an alternative (and likely suboptimal) access method
// to the common configuration, notification, ISR and device-specific configuration regions."
//
// Also, it is *very* likely to see this PCI capability as the first vendor-specific capbility of a certain PCI function,
// but this is not guaranteed by the VirtIO specification.
// Therefore, ignore this type of configuration as this is not needed by our implementation currently.
if (raw_config_type == static_cast<u8>(ConfigurationType::PCICapabilitiesAccess))
continue;
if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCICapabilitiesAccess)) {
dbgln("{}: Unknown capability configuration type: {}", device_name(), raw_config_type);
return Error::from_errno(ENXIO);
}
config.cfg_type = static_cast<ConfigurationType>(raw_config_type);
auto cap_length = capability.read8(0x2);
if (cap_length < 0x10) {
dbgln("{}: Unexpected capability size: {}", device_name(), cap_length);
break;
}
config.resource_index = capability.read8(0x4);
if (config.resource_index > 0x5) {
dbgln("{}: Unexpected capability BAR value: {}", device_name(), config.resource_index);
break;
}
config.offset = capability.read32(0x8);
config.length = capability.read32(0xc);
// NOTE: Configuration length of zero is an invalid configuration, or at the very least a configuration
// type we don't know how to handle correctly...
// The VIRTIO_PCI_CAP_PCI_CFG configuration structure has length of 0
// but because we ignore that type and all other types should have a length
// greater than 0, we should ignore any other configuration in case this condition is not met.
if (config.length == 0) {
dbgln("{}: Found configuration {}, with invalid length of 0", device_name(), (u32)config.cfg_type);
continue;
}
dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, resource: {}, offset: {}, length: {}", device_name(), (u32)config.cfg_type, config.resource_index, config.offset, config.length);
if (config.cfg_type == ConfigurationType::Common)
m_use_mmio = true;
else if (config.cfg_type == ConfigurationType::Notify)
m_notify_multiplier = capability.read32(0x10);
m_configs.append(config);
}
}
if (m_use_mmio) {
for (auto& cfg : m_configs) {
auto mapping_io_window = TRY(IOWindow::create_for_pci_device_bar(device_identifier(), static_cast<PCI::HeaderType0BaseRegister>(cfg.resource_index)));
m_register_bases[cfg.resource_index] = move(mapping_io_window);
}
m_common_cfg = TRY(get_config(ConfigurationType::Common, 0));
m_notify_cfg = TRY(get_config(ConfigurationType::Notify, 0));
m_isr_cfg = TRY(get_config(ConfigurationType::ISR, 0));
} else {
auto mapping_io_window = TRY(IOWindow::create_for_pci_device_bar(device_identifier(), PCI::HeaderType0BaseRegister::BAR0));
m_register_bases[0] = move(mapping_io_window);
}
return {};
}
void PCIeTransportLink::disable_interrupts(Badge<VirtIO::Device>)
{
disable_pin_based_interrupts();
m_irq_handler->disable_irq();
}
void PCIeTransportLink::enable_interrupts(Badge<VirtIO::Device>)
{
m_irq_handler->enable_irq();
enable_pin_based_interrupts();
}
}

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Bus/VirtIO/Transport/Entity.h>
#include <Kernel/Interrupts/PCIIRQHandler.h>
namespace Kernel::VirtIO {
class PCIeTransportLink final
: public TransportEntity
, public PCI::Device {
public:
static ErrorOr<NonnullOwnPtr<TransportEntity>> create(PCI::DeviceIdentifier const& pci_identifier);
virtual StringView device_name() const override { return "VirtIOTransportLink"sv; }
virtual StringView determine_device_class_name() const override;
private:
explicit PCIeTransportLink(PCI::DeviceIdentifier const& pci_identifier);
// ^TransportEntity
virtual ErrorOr<void> locate_configurations_and_resources(Badge<VirtIO::Device>, VirtIO::Device&) override;
virtual void disable_interrupts(Badge<VirtIO::Device>) override;
virtual void enable_interrupts(Badge<VirtIO::Device>) override;
ErrorOr<void> create_interrupt_handler(VirtIO::Device&);
// FIXME: There could be multiple IRQ (MSI-X) handlers for a VirtIO device.
// Find a way to use all of them.
OwnPtr<PCI::IRQHandler> m_irq_handler;
};
};

View file

@ -38,6 +38,11 @@ set(KERNEL_SOURCES
Bus/USB/USBManagement.cpp
Bus/USB/USBPipe.cpp
Bus/USB/USBTransfer.cpp
Bus/VirtIO/Transport/PCIe/Detect.cpp
Bus/VirtIO/Transport/PCIe/InterruptHandler.cpp
Bus/VirtIO/Transport/PCIe/TransportLink.cpp
Bus/VirtIO/Transport/Entity.cpp
Bus/VirtIO/Transport/InterruptHandler.cpp
Bus/VirtIO/Console.cpp
Bus/VirtIO/ConsolePort.cpp
Bus/VirtIO/Device.cpp

View file

@ -8,6 +8,7 @@
#include <Kernel/Arch/Delay.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/IDs.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/GPU/Console/GenericFramebufferConsole.h>
#include <Kernel/Devices/GPU/Management.h>
@ -36,7 +37,8 @@ ErrorOr<NonnullLockRefPtr<GenericGraphicsAdapter>> VirtIOGraphicsAdapter::create
Memory::Region::Access::ReadWrite));
auto active_context_ids = TRY(Bitmap::create(VREND_MAX_CTX, false));
auto adapter = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) VirtIOGraphicsAdapter(device_identifier, move(active_context_ids), move(scratch_space_region))));
auto pci_transport_link = TRY(VirtIO::PCIeTransportLink::create(device_identifier));
auto adapter = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) VirtIOGraphicsAdapter(move(pci_transport_link), move(active_context_ids), move(scratch_space_region))));
TRY(adapter->initialize_virtio_resources());
TRY(adapter->initialize_adapter());
return adapter;
@ -132,8 +134,8 @@ ErrorOr<void> VirtIOGraphicsAdapter::attach_physical_range_to_framebuffer(VirtIO
return {};
}
VirtIOGraphicsAdapter::VirtIOGraphicsAdapter(PCI::DeviceIdentifier const& device_identifier, Bitmap&& active_context_ids, NonnullOwnPtr<Memory::Region> scratch_space_region)
: VirtIO::Device(device_identifier)
VirtIOGraphicsAdapter::VirtIOGraphicsAdapter(NonnullOwnPtr<VirtIO::TransportEntity> transport_entity, Bitmap&& active_context_ids, NonnullOwnPtr<Memory::Region> scratch_space_region)
: VirtIO::Device(move(transport_entity))
, m_scratch_space(move(scratch_space_region))
{
m_active_context_ids.with([&](Bitmap& my_active_context_ids) {
@ -146,7 +148,7 @@ VirtIOGraphicsAdapter::VirtIOGraphicsAdapter(PCI::DeviceIdentifier const& device
ErrorOr<void> VirtIOGraphicsAdapter::initialize_virtio_resources()
{
TRY(VirtIO::Device::initialize_virtio_resources());
auto* config = TRY(get_config(VirtIO::ConfigurationType::Device));
auto* config = TRY(transport_entity().get_config(VirtIO::ConfigurationType::Device));
m_device_configuration = config;
bool success = negotiate_features([&](u64 supported_features) {
u64 negotiated = 0;
@ -160,8 +162,8 @@ ErrorOr<void> VirtIOGraphicsAdapter::initialize_virtio_resources()
return negotiated;
});
if (success) {
read_config_atomic([&]() {
m_num_scanouts = config_read32(*config, DEVICE_NUM_SCANOUTS);
transport_entity().read_config_atomic([&]() {
m_num_scanouts = transport_entity().config_read32(*config, DEVICE_NUM_SCANOUTS);
});
dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: num_scanouts: {}", m_num_scanouts);
success = setup_queues(2); // CONTROLQ + CURSORQ
@ -193,12 +195,12 @@ void VirtIOGraphicsAdapter::handle_queue_update(u16)
u32 VirtIOGraphicsAdapter::get_pending_events()
{
return config_read32(*m_device_configuration, DEVICE_EVENTS_READ);
return transport_entity().config_read32(*m_device_configuration, DEVICE_EVENTS_READ);
}
void VirtIOGraphicsAdapter::clear_pending_events(u32 event_bitmask)
{
config_write32(*m_device_configuration, DEVICE_EVENTS_CLEAR, event_bitmask);
transport_entity().config_write32(*m_device_configuration, DEVICE_EVENTS_CLEAR, event_bitmask);
}
static void populate_virtio_gpu_request_header(Graphics::VirtIOGPU::Protocol::ControlHeader& header, Graphics::VirtIOGPU::Protocol::CommandType ctrl_type, u32 flags)

View file

@ -42,8 +42,6 @@ public:
virtual ErrorOr<void> initialize_virtio_resources() override;
virtual StringView device_name() const override { return "VirtIOGraphicsAdapter"sv; }
ErrorOr<void> mode_set_resolution(Badge<VirtIODisplayConnector>, VirtIODisplayConnector&, size_t width, size_t height);
void set_dirty_displayed_rect(Badge<VirtIODisplayConnector>, VirtIODisplayConnector&, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect, bool main_buffer);
ErrorOr<void> flush_displayed_image(Badge<VirtIODisplayConnector>, VirtIODisplayConnector&, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect, bool main_buffer);
@ -67,7 +65,7 @@ private:
PhysicalBuffer back_buffer;
};
VirtIOGraphicsAdapter(PCI::DeviceIdentifier const&, Bitmap&& active_context_ids, NonnullOwnPtr<Memory::Region> scratch_space_region);
VirtIOGraphicsAdapter(NonnullOwnPtr<VirtIO::TransportEntity>, Bitmap&& active_context_ids, NonnullOwnPtr<Memory::Region> scratch_space_region);
ErrorOr<void> initialize_adapter();

View file

@ -5,6 +5,7 @@
*/
#include <Kernel/Bus/PCI/IDs.h>
#include <Kernel/Bus/VirtIO/Transport/PCIe/TransportLink.h>
#include <Kernel/Net/NetworkingManagement.h>
#include <Kernel/Net/VirtIO/VirtIONetworkAdapter.h>
@ -102,11 +103,12 @@ UNMAP_AFTER_INIT ErrorOr<bool> VirtIONetworkAdapter::probe(PCI::DeviceIdentifier
UNMAP_AFTER_INIT ErrorOr<NonnullRefPtr<NetworkAdapter>> VirtIONetworkAdapter::create(PCI::DeviceIdentifier const& pci_device_identifier)
{
auto interface_name = TRY(NetworkingManagement::generate_interface_name_from_pci_address(pci_device_identifier));
return TRY(adopt_nonnull_ref_or_enomem(new (nothrow) VirtIONetworkAdapter(interface_name.representable_view(), pci_device_identifier)));
auto pci_transport_link = TRY(VirtIO::PCIeTransportLink::create(pci_device_identifier));
return TRY(adopt_nonnull_ref_or_enomem(new (nothrow) VirtIONetworkAdapter(interface_name.representable_view(), move(pci_transport_link))));
}
UNMAP_AFTER_INIT VirtIONetworkAdapter::VirtIONetworkAdapter(StringView interface_name, PCI::DeviceIdentifier const& pci_device_identifier)
: VirtIO::Device(pci_device_identifier)
UNMAP_AFTER_INIT VirtIONetworkAdapter::VirtIONetworkAdapter(StringView interface_name, NonnullOwnPtr<VirtIO::TransportEntity> pci_transport_link)
: VirtIO::Device(move(pci_transport_link))
, NetworkAdapter(interface_name)
{
}
@ -123,7 +125,7 @@ UNMAP_AFTER_INIT ErrorOr<void> VirtIONetworkAdapter::initialize_virtio_resources
{
dbgln_if(VIRTIO_DEBUG, "VirtIONetworkAdapter: initialize_virtio_resources");
TRY(Device::initialize_virtio_resources());
m_device_config = TRY(get_config(VirtIO::ConfigurationType::Device));
m_device_config = TRY(transport_entity().get_config(VirtIO::ConfigurationType::Device));
bool success = negotiate_features([&](u64 supported_features) {
u64 negotiated = 0;
@ -169,28 +171,28 @@ UNMAP_AFTER_INIT ErrorOr<void> VirtIONetworkAdapter::initialize_virtio_resources
bool VirtIONetworkAdapter::handle_device_config_change()
{
dbgln_if(VIRTIO_DEBUG, "VirtIONetworkAdapter: handle_device_config_change");
read_config_atomic([&]() {
transport_entity().read_config_atomic([&]() {
if (is_feature_accepted(VIRTIO_NET_F_MAC)) {
set_mac_address(MACAddress(
config_read8(*m_device_config, 0x0),
config_read8(*m_device_config, 0x1),
config_read8(*m_device_config, 0x2),
config_read8(*m_device_config, 0x3),
config_read8(*m_device_config, 0x4),
config_read8(*m_device_config, 0x5)));
transport_entity().config_read8(*m_device_config, 0x0),
transport_entity().config_read8(*m_device_config, 0x1),
transport_entity().config_read8(*m_device_config, 0x2),
transport_entity().config_read8(*m_device_config, 0x3),
transport_entity().config_read8(*m_device_config, 0x4),
transport_entity().config_read8(*m_device_config, 0x5)));
}
if (is_feature_accepted(VIRTIO_NET_F_STATUS)) {
u16 status = config_read16(*m_device_config, offsetof(VirtIONetConfig, status));
u16 status = transport_entity().config_read16(*m_device_config, offsetof(VirtIONetConfig, status));
m_link_up = (status & VIRTIO_NET_S_LINK_UP) != 0;
}
if (is_feature_accepted(VIRTIO_NET_F_MTU)) {
u16 mtu = config_read16(*m_device_config, offsetof(VirtIONetConfig, mtu));
u16 mtu = transport_entity().config_read16(*m_device_config, offsetof(VirtIONetConfig, mtu));
set_mtu(mtu);
}
if (is_feature_accepted(VIRTIO_NET_F_SPEED_DUPLEX)) {
u32 speed = config_read32(*m_device_config, offsetof(VirtIONetConfig, speed));
u32 speed = transport_entity().config_read32(*m_device_config, offsetof(VirtIONetConfig, speed));
m_link_speed = speed;
u32 duplex = config_read32(*m_device_config, offsetof(VirtIONetConfig, duplex));
u32 duplex = transport_entity().config_read32(*m_device_config, offsetof(VirtIONetConfig, duplex));
m_link_duplex = duplex == 0x01;
}
});

View file

@ -23,7 +23,6 @@ public:
// VirtIO::Device
virtual ErrorOr<void> initialize_virtio_resources() override;
virtual StringView device_name() const override { return class_name(); }
// NetworkAdapter
virtual StringView class_name() const override { return "VirtIONetworkAdapter"sv; }
@ -35,7 +34,7 @@ public:
virtual i32 link_speed() override { return m_link_speed; }
private:
explicit VirtIONetworkAdapter(StringView interface_name, PCI::DeviceIdentifier const&);
explicit VirtIONetworkAdapter(StringView interface_name, NonnullOwnPtr<VirtIO::TransportEntity>);
// VirtIO::Device
virtual bool handle_device_config_change() override;