Kernel: Add base support for VirtIO devices

Based on pull #3236 by tomuta, this adds helper methods for generic
device initialization, and partily-broken virtqueue helper methods

Co-authored-by: Tom <tomut@yahoo.com>
Co-authored-by: Sahan <sahan.h.fernando@gmail.com>
This commit is contained in:
Idan Horowitz 2021-01-02 19:53:05 +02:00 committed by Andreas Kling
parent 40a1f89d67
commit 62303d46d1
9 changed files with 832 additions and 2 deletions

View file

@ -40,6 +40,8 @@ set(KERNEL_SOURCES
Devices/SB16.cpp
Devices/SerialDevice.cpp
Devices/USB/UHCIController.cpp
VirtIO/VirtIO.cpp
VirtIO/VirtIOQueue.cpp
Devices/VMWareBackdoor.cpp
Devices/ZeroDevice.cpp
Devices/HID/I8042Controller.cpp

View file

@ -314,6 +314,10 @@
#cmakedefine01 VOLATILE_PAGE_RANGES_DEBUG
#endif
#ifndef VIRTIO_DEBUG
#cmakedefine01 VIRTIO_DEBUG
#endif
#ifndef VRA_DEBUG
#cmakedefine01 VRA_DEBUG
#endif

View file

@ -254,7 +254,7 @@ u32 get_BAR5(Address address)
u32 get_BAR(Address address, u8 bar)
{
ASSERT(bar <= 5);
VERIFY(bar <= 5);
switch (bar) {
case 0:
return get_BAR0(address);
@ -269,7 +269,7 @@ u32 get_BAR(Address address, u8 bar)
case 5:
return get_BAR5(address);
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

362
Kernel/VirtIO/VirtIO.cpp Normal file
View file

@ -0,0 +1,362 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/VirtIO/VirtIO.h>
namespace Kernel {
void VirtIO::detect()
{
PCI::enumerate([&](const PCI::Address& address, PCI::ID id) {
if (address.is_null() || id.is_null())
return;
if (id.vendor_id != VIRTIO_PCI_VENDOR_ID)
return;
});
}
VirtIODevice::VirtIODevice(PCI::Address address, const char* class_name)
: PCI::Device(address, PCI::get_interrupt_line(address))
, m_class_name(class_name)
, m_io_base(IOAddress(PCI::get_BAR0(pci_address()) & ~1))
{
dbgln("{}: Found @ {}", m_class_name, pci_address());
enable_bus_mastering(pci_address());
reset_device();
set_status_bit(DEVICE_STATUS_ACKNOWLEDGE);
auto capabilities = PCI::get_physical_id(address).capabilities();
for (auto& capability : capabilities) {
if (capability.id() == PCI_CAPABILITY_VENDOR_SPECIFIC) {
// We have a virtio_pci_cap
Configuration cfg = {};
cfg.cfg_type = capability.read8(0x3);
switch (cfg.cfg_type) {
case VIRTIO_PCI_CAP_COMMON_CFG:
case VIRTIO_PCI_CAP_NOTIFY_CFG:
case VIRTIO_PCI_CAP_ISR_CFG:
case VIRTIO_PCI_CAP_DEVICE_CFG:
case VIRTIO_PCI_CAP_PCI_CFG: {
auto cap_length = capability.read8(0x2);
if (cap_length < 0x10) {
dbgln("{}: Unexpected capability size: {}", m_class_name, cap_length);
break;
}
cfg.bar = capability.read8(0x4);
if (cfg.bar > 0x5) {
dbgln("{}: Unexpected capability bar value: {}", m_class_name, cfg.bar);
break;
}
cfg.offset = capability.read32(0x8);
cfg.length = capability.read32(0xc);
dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", m_class_name, cfg.cfg_type, cfg.bar, cfg.offset, cfg.length);
m_configs.append(cfg);
if (cfg.cfg_type == VIRTIO_PCI_CAP_COMMON_CFG)
m_use_mmio = true;
else if (cfg.cfg_type == VIRTIO_PCI_CAP_NOTIFY_CFG)
m_notify_multiplier = capability.read32(0x10);
break;
}
default:
dbgln("{}: Unknown capability configuration type: {}", m_class_name, cfg.cfg_type);
break;
}
}
}
m_common_cfg = get_config(VIRTIO_PCI_CAP_COMMON_CFG, 0);
m_notify_cfg = get_config(VIRTIO_PCI_CAP_NOTIFY_CFG, 0);
m_isr_cfg = get_config(VIRTIO_PCI_CAP_ISR_CFG, 0);
set_status_bit(DEVICE_STATUS_DRIVER);
}
VirtIODevice::~VirtIODevice()
{
}
auto VirtIODevice::mapping_for_bar(u8 bar) -> MappedMMIO&
{
VERIFY(m_use_mmio);
auto& mapping = m_mmio[bar];
if (!mapping.base) {
mapping.size = PCI::get_BAR_space_size(pci_address(), bar);
mapping.base = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), bar))), page_round_up(mapping.size), "VirtIO MMIO", Region::Access::Read | Region::Access::Write, Region::Cacheable::No);
if (!mapping.base)
dbgln("{}: Failed to map bar {}", m_class_name, bar);
}
return mapping;
}
void VirtIODevice::notify_queue(u16 queue_index)
{
dbgln("VirtIODevice: notifying about queue change at idx: {}", queue_index);
if (!m_use_mmio)
out<u16>(REG_QUEUE_NOTIFY, queue_index);
else
config_write16(m_notify_cfg, get_queue(queue_index)->notify_offset() * m_notify_multiplier, queue_index);
}
u8 VirtIODevice::config_read8(const Configuration* config, u32 offset)
{
return mapping_for_bar(config->bar).read<u8>(config->offset + offset);
}
u16 VirtIODevice::config_read16(const Configuration* config, u32 offset)
{
return mapping_for_bar(config->bar).read<u16>(config->offset + offset);
}
u32 VirtIODevice::config_read32(const Configuration* config, u32 offset)
{
return mapping_for_bar(config->bar).read<u32>(config->offset + offset);
}
void VirtIODevice::config_write8(const Configuration* config, u32 offset, u8 value)
{
mapping_for_bar(config->bar).write(config->offset + offset, value);
}
void VirtIODevice::config_write16(const Configuration* config, u32 offset, u16 value)
{
mapping_for_bar(config->bar).write(config->offset + offset, value);
}
void VirtIODevice::config_write32(const Configuration* config, u32 offset, u32 value)
{
mapping_for_bar(config->bar).write(config->offset + offset, value);
}
void VirtIODevice::config_write64(const Configuration* config, u32 offset, u64 value)
{
mapping_for_bar(config->bar).write(config->offset + offset, value);
}
u8 VirtIODevice::read_status_bits()
{
if (!m_use_mmio)
return in<u8>(REG_DEVICE_STATUS);
return config_read8(m_common_cfg, COMMON_CFG_DEVICE_STATUS);
}
void VirtIODevice::clear_status_bit(u8 status_bit)
{
m_status &= status_bit;
if (!m_use_mmio)
out<u8>(REG_DEVICE_STATUS, m_status);
else
config_write8(m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
}
void VirtIODevice::set_status_bit(u8 status_bit)
{
m_status |= status_bit;
if (!m_use_mmio)
out<u8>(REG_DEVICE_STATUS, m_status);
else
config_write8(m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
}
u64 VirtIODevice::get_device_features()
{
if (!m_use_mmio)
return in<u32>(REG_DEVICE_FEATURES);
config_write32(m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
auto lower_bits = config_read32(m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
config_write32(m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
u64 upper_bits = (u64)config_read32(m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
return upper_bits | lower_bits;
}
bool VirtIODevice::accept_device_features(u64 device_features, u64 accepted_features)
{
VERIFY(!m_did_accept_features);
m_did_accept_features = true;
if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
accepted_features |= VIRTIO_F_VERSION_1;
} else {
dbgln("{}: legacy device detected", m_class_name);
}
if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
dbgln("{}: packed queues not yet supported", m_class_name);
accepted_features &= ~(VIRTIO_F_RING_PACKED);
}
dbgln("VirtIOConsole: Device features: {}", device_features);
dbgln("VirtIOConsole: Accepted features: {}", accepted_features);
if (!m_use_mmio) {
out<u32>(REG_GUEST_FEATURES, accepted_features);
} else {
config_write32(m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
config_write32(m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
config_write32(m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
config_write32(m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
}
set_status_bit(DEVICE_STATUS_FEATURES_OK);
m_status = read_status_bits();
if (!(m_status & DEVICE_STATUS_FEATURES_OK)) {
set_status_bit(DEVICE_STATUS_FAILED);
dbgln("{}: Features not accepted by host!", m_class_name);
return false;
}
m_accepted_features = accepted_features;
dbgln_if(VIRTIO_DEBUG, "{}: Features accepted by host", m_class_name);
return true;
}
auto VirtIODevice::get_common_config(u32 index) const -> const Configuration*
{
if (index == 0)
return m_common_cfg;
return get_config(VIRTIO_PCI_CAP_COMMON_CFG, index);
}
auto VirtIODevice::get_device_config(u32 index) const -> const Configuration*
{
return get_config(VIRTIO_PCI_CAP_DEVICE_CFG, index);
}
void VirtIODevice::reset_device()
{
dbgln_if(VIRTIO_DEBUG, "{}: Reset device", m_class_name);
if (!m_use_mmio) {
clear_status_bit(0);
while (read_status_bits() != 0) {
// TODO: delay a bit?
}
return;
} else if (m_common_cfg) {
config_write8(m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
while (config_read8(m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
// TODO: delay a bit?
}
return;
}
dbgln_if(VIRTIO_DEBUG, "{}: No handle to device, cant reset", m_class_name);
}
bool VirtIODevice::setup_queue(u16 queue_index)
{
if (!m_use_mmio || !m_common_cfg)
return false;
config_write16(m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
u16 queue_size = config_read16(m_common_cfg, COMMON_CFG_QUEUE_SIZE);
if (queue_size == 0) {
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", m_class_name, queue_index);
return true;
}
u16 queue_notify_offset = config_read16(m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
auto queue = make<VirtIOQueue>(queue_size, queue_notify_offset);
if (queue->is_null())
return false;
config_write64(m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
config_write64(m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
config_write64(m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] size: {}", m_class_name, queue_index, queue_size);
m_queues.append(move(queue));
return true;
}
void VirtIODevice::set_requested_queue_count(u16 count)
{
m_queue_count = count;
}
bool VirtIODevice::setup_queues()
{
if (m_common_cfg) {
auto maximum_queue_count = config_read16(m_common_cfg, COMMON_CFG_NUM_QUEUES);
if (m_queue_count == 0) {
m_queue_count = maximum_queue_count;
} else if (m_queue_count > maximum_queue_count) {
dbgln("{}: {} queues requested but only {} available!", m_class_name, m_queue_count, maximum_queue_count);
return false;
}
}
dbgln_if(VIRTIO_DEBUG, "{}: Setting up {} queues", m_class_name, m_queue_count);
for (u16 i = 0; i < m_queue_count; i++) {
if (!setup_queue(i))
return false;
}
return true;
}
bool VirtIODevice::finish_init()
{
VERIFY(m_did_accept_features);
VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK));
if (!setup_queues()) {
dbgln("{}: Failed to setup queues", m_class_name);
return false;
}
set_status_bit(DEVICE_STATUS_DRIVER_OK);
dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", m_class_name);
return true;
}
void VirtIODevice::supply_buffer_and_notify(u16 queue_index, const u8* buffer, u32 len, BufferType buffer_type)
{
VERIFY(queue_index < m_queue_count);
if (get_queue(queue_index)->supply_buffer(buffer, len, buffer_type))
notify_queue(queue_index);
}
u8 VirtIODevice::isr_status()
{
if (!m_use_mmio)
return in<u8>(REG_ISR_STATUS);
return config_read8(m_isr_cfg, 0);
}
void VirtIODevice::handle_irq(const RegisterState&)
{
u8 isr_type = isr_status();
dbgln_if(VIRTIO_DEBUG, "VirtIODevice: Handling interrupt with status: {}", isr_type);
if (isr_type & DEVICE_CONFIG_INTERRUPT)
handle_device_config_change();
if (isr_type & QUEUE_INTERRUPT) {
for (auto& queue : m_queues) {
if (queue.handle_interrupt())
return;
}
}
}
}

249
Kernel/VirtIO/VirtIO.h Normal file
View file

@ -0,0 +1,249 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <AK/NonnullOwnPtrVector.h>
#include <Kernel/IO.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/PCI/Access.h>
#include <Kernel/PCI/Device.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VirtIO/VirtIOQueue.h>
namespace Kernel {
#define VIRTIO_PCI_VENDOR_ID 0x1AF4
#define REG_DEVICE_FEATURES 0x0
#define REG_GUEST_FEATURES 0x4
#define REG_QUEUE_ADDRESS 0x8
#define REG_QUEUE_SIZE 0xc
#define REG_QUEUE_SELECT 0xe
#define REG_QUEUE_NOTIFY 0x10
#define REG_DEVICE_STATUS 0x12
#define REG_ISR_STATUS 0x13
#define DEVICE_STATUS_ACKNOWLEDGE (1 << 0)
#define DEVICE_STATUS_DRIVER (1 << 1)
#define DEVICE_STATUS_DRIVER_OK (1 << 2)
#define DEVICE_STATUS_FEATURES_OK (1 << 3)
#define DEVICE_STATUS_DEVICE_NEEDS_RESET (1 << 6)
#define DEVICE_STATUS_FAILED (1 << 7)
#define VIRTIO_F_VERSION_1 ((u64)1 << 32)
#define VIRTIO_F_RING_PACKED ((u64)1 << 34)
#define VIRTIO_PCI_CAP_COMMON_CFG 1
#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
#define VIRTIO_PCI_CAP_ISR_CFG 3
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
#define VIRTIO_PCI_CAP_PCI_CFG 5
// virtio_pci_common_cfg
#define COMMON_CFG_DEVICE_FEATURE_SELECT 0x0
#define COMMON_CFG_DEVICE_FEATURE 0x4
#define COMMON_CFG_DRIVER_FEATURE_SELECT 0x8
#define COMMON_CFG_DRIVER_FEATURE 0xc
#define COMMON_CFG_MSIX_CONFIG 0x10
#define COMMON_CFG_NUM_QUEUES 0x12
#define COMMON_CFG_DEVICE_STATUS 0x14
#define COMMON_CFG_CONFIG_GENERATION 0x15
#define COMMON_CFG_QUEUE_SELECT 0x16
#define COMMON_CFG_QUEUE_SIZE 0x18
#define COMMON_CFG_QUEUE_MSIX_VECTOR 0x1a
#define COMMON_CFG_QUEUE_ENABLE 0x1c
#define COMMON_CFG_QUEUE_NOTIFY_OFF 0x1e
#define COMMON_CFG_QUEUE_DESC 0x20
#define COMMON_CFG_QUEUE_DRIVER 0x28
#define COMMON_CFG_QUEUE_DEVICE 0x30
#define QUEUE_INTERRUPT 0x1
#define DEVICE_CONFIG_INTERRUPT 0x2
class VirtIO {
public:
static void detect();
};
class VirtIODevice : public PCI::Device {
public:
VirtIODevice(PCI::Address, const char*);
virtual ~VirtIODevice() override;
protected:
const char* const m_class_name;
struct MappedMMIO {
OwnPtr<Region> base;
size_t size { 0 };
template<typename T>
T read(u32 offset) const
{
if (!base)
return 0;
VERIFY(size >= sizeof(T));
VERIFY(offset + sizeof(T) <= size);
return *(volatile T*)(base->vaddr().offset(offset).get());
}
template<typename T>
void write(u32 offset, T value)
{
if (!base)
return;
VERIFY(size >= sizeof(T));
VERIFY(offset + sizeof(T) <= size);
*(volatile T*)(base->vaddr().offset(offset).get()) = value;
}
};
struct Configuration {
u8 cfg_type;
u8 bar;
u32 offset;
u32 length;
};
const Configuration* get_config(u8 cfg_type, u32 index = 0) const
{
for (const auto& cfg : m_configs) {
if (cfg.cfg_type != cfg_type)
continue;
if (index > 0) {
index--;
continue;
}
return &cfg;
}
return nullptr;
}
const Configuration* get_common_config(u32 index = 0) const;
const Configuration* get_device_config(u32 index = 0) const;
template<typename F>
void read_config_atomic(F f)
{
if (m_common_cfg) {
u8 generation_before, generation_after;
do {
generation_before = config_read8(m_common_cfg, 0x15);
f();
generation_after = config_read8(m_common_cfg, 0x15);
} while (generation_before != generation_after);
} else {
f();
}
}
u8 config_read8(const Configuration*, u32);
u16 config_read16(const Configuration*, u32);
u32 config_read32(const Configuration*, u32);
void config_write8(const Configuration*, u32, u8);
void config_write16(const Configuration*, u32, u16);
void config_write32(const Configuration*, u32, u32);
void config_write64(const Configuration*, u32, u64);
auto mapping_for_bar(u8) -> MappedMMIO&;
u8 read_status_bits();
void clear_status_bit(u8);
void set_status_bit(u8);
u64 get_device_features();
bool finish_init();
VirtIOQueue* get_queue(u16 queue_index)
{
return &m_queues[queue_index];
}
void set_requested_queue_count(u16);
template<typename F>
bool negotiate_features(F f)
{
u64 device_features = get_device_features();
u64 accept_features = f(device_features);
VERIFY(!(~device_features & accept_features));
return accept_device_features(device_features, accept_features);
}
static bool is_feature_set(u64 feature_set, u64 test_feature)
{
// features can have more than one bit
return (feature_set & test_feature) == test_feature;
}
bool is_feature_accepted(u64 feature) const
{
VERIFY(m_did_accept_features);
return is_feature_set(m_accepted_features, feature);
}
void supply_buffer_and_notify(u16 queue_index, const u8* buffer, u32 len, BufferType);
virtual void handle_irq(const RegisterState&) override;
virtual void handle_device_config_change() = 0;
private:
template<typename T>
void out(u16 address, T value)
{
m_io_base.offset(address).out(value);
}
template<typename T>
T in(u16 address)
{
return m_io_base.offset(address).in<T>();
}
bool accept_device_features(u64 device_features, u64 accepted_features);
bool setup_queues();
bool setup_queue(u16 queue_index);
void notify_queue(u16 queue_index);
void reset_device();
u8 isr_status();
NonnullOwnPtrVector<VirtIOQueue> m_queues;
Vector<Configuration> m_configs;
const Configuration* m_common_cfg { nullptr }; // Cached due to high usage
const Configuration* m_notify_cfg { nullptr }; // Cached due to high usage
const Configuration* m_isr_cfg { nullptr }; // Cached due to high usage
IOAddress m_io_base;
MappedMMIO m_mmio[6];
u16 m_queue_count { 0 };
bool m_use_mmio { false };
u8 m_status { 0 };
u64 m_accepted_features { 0 };
bool m_did_accept_features { false };
u32 m_notify_multiplier { 0 };
};
}

View file

@ -0,0 +1,106 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/StdLib.h>
#include <Kernel/VirtIO/VirtIOQueue.h>
namespace Kernel {
VirtIOQueue::VirtIOQueue(u16 queue_size, u16 notify_offset)
: m_queue_size(queue_size)
, m_notify_offset(notify_offset)
, m_free_buffers(queue_size)
{
size_t size_of_descriptors = sizeof(VirtIOQueueDescriptor) * queue_size;
size_t size_of_driver = sizeof(VirtIOQueueDriver) + queue_size * sizeof(u16);
size_t size_of_device = sizeof(VirtIOQueueDevice) + queue_size * sizeof(VirtIOQueueDeviceItem);
m_region = MM.allocate_contiguous_kernel_region(page_round_up(size_of_descriptors + size_of_driver + size_of_device), "VirtIO Queue", Region::Access::Read | Region::Access::Write);
if (m_region) {
// TODO: ensure alignment!!!
u8* ptr = m_region->vaddr().as_ptr();
memset(ptr, 0, m_region->size());
m_descriptors = reinterpret_cast<VirtIOQueueDescriptor*>(ptr);
m_driver = reinterpret_cast<VirtIOQueueDriver*>(ptr + size_of_descriptors);
m_device = reinterpret_cast<VirtIOQueueDevice*>(ptr + size_of_descriptors + size_of_driver);
enable_interrupts();
}
}
VirtIOQueue::~VirtIOQueue()
{
}
void VirtIOQueue::enable_interrupts()
{
m_driver->flags = 0;
}
void VirtIOQueue::disable_interrupts()
{
m_driver->flags = 1;
}
bool VirtIOQueue::supply_buffer(const u8* buffer, u32 len, BufferType buffer_type)
{
VERIFY(buffer && len > 0);
VERIFY(m_free_buffers > 0);
auto descriptor_index = m_free_head;
m_descriptors[descriptor_index].flags = static_cast<u16>(buffer_type);
m_descriptors[descriptor_index].address = reinterpret_cast<u64>(buffer);
m_descriptors[descriptor_index].length = len;
m_free_buffers--;
m_free_head = (m_free_head + 1) % m_queue_size;
m_driver->rings[m_driver->index % m_queue_size] = descriptor_index;
full_memory_barrier();
m_driver->index++;
full_memory_barrier();
auto device_flags = m_device->flags;
dbgln("VirtIODevice: supplied buffer... should notify: {}", device_flags);
return device_flags & 1;
}
bool VirtIOQueue::new_data_available() const
{
return m_device->index != m_used_tail;
}
bool VirtIOQueue::handle_interrupt()
{
if (!new_data_available())
return false;
if (on_data_available)
on_data_available();
return true;
}
}

103
Kernel/VirtIO/VirtIOQueue.h Normal file
View file

@ -0,0 +1,103 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <Kernel/SpinLock.h>
#include <Kernel/VM/MemoryManager.h>
namespace Kernel {
enum class BufferType {
DeviceReadable = 0,
DeviceWritable = 1
};
class VirtIOQueue {
public:
VirtIOQueue(u16 queue_size, u16 notify_offset);
~VirtIOQueue();
bool is_null() const { return !m_region; }
u16 notify_offset() const { return m_notify_offset; }
void enable_interrupts();
void disable_interrupts();
PhysicalAddress descriptor_area() const { return to_physical(m_descriptors); }
PhysicalAddress driver_area() const { return to_physical(m_driver); }
PhysicalAddress device_area() const { return to_physical(m_device); }
bool supply_buffer(const u8* buffer, u32 len, BufferType);
bool new_data_available() const;
bool handle_interrupt();
Function<void()> on_data_available;
private:
PhysicalAddress to_physical(void* ptr) const
{
auto offset = FlatPtr(ptr) - m_region->vaddr().get();
return m_region->physical_page(0)->paddr().offset(offset);
}
struct VirtIOQueueDescriptor {
u64 address;
u32 length;
u16 flags;
u16 next;
};
struct VirtIOQueueDriver {
u16 flags;
u16 index;
u16 rings[];
};
struct VirtIOQueueDeviceItem {
u32 index;
u32 length;
};
struct VirtIOQueueDevice {
u16 flags;
u16 index;
VirtIOQueueDeviceItem rings[];
};
const u16 m_queue_size;
const u16 m_notify_offset;
u16 m_free_buffers;
u16 m_free_head { 0 };
u16 m_used_tail { 0 };
VirtIOQueueDescriptor* m_descriptors { nullptr };
VirtIOQueueDriver* m_driver { nullptr };
VirtIOQueueDevice* m_device { nullptr };
OwnPtr<Region> m_region;
SpinLock<u8> m_lock;
};
}

View file

@ -72,6 +72,7 @@
#include <Kernel/Tasks/SyncTask.h>
#include <Kernel/Time/TimeManagement.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VirtIO/VirtIO.h>
#include <Kernel/WorkQueue.h>
#include <Kernel/kstdio.h>
@ -281,6 +282,8 @@ void init_stage2(void*)
DMIExpose::initialize();
VirtIO::detect();
E1000NetworkAdapter::detect();
NE2000NetworkAdapter::detect();
RTL8139NetworkAdapter::detect();

View file

@ -19,6 +19,7 @@ set(VMWARE_BACKDOOR_DEBUG ON)
set(FILEDESCRIPTION_DEBUG ON)
set(PROCFS_DEBUG ON)
set(VFS_DEBUG ON)
set(VIRTIO_DEBUG ON)
set(IOAPIC_DEBUG ON)
set(IRQ_DEBUG ON)
set(INTERRUPT_DEBUG ON)