Kernel/Storage: Introduce basic abstraction layer for ATA components

This abstraction layer is mainly for ATA ports (AHCI ports, IDE ports).
The goal is to create a convenient and flexible framework so it's
possible to expand to support other types of controller (e.g. Intel PIIX
and ICH IDE controllers) and to abstract operations that are possible on
each component.

Currently only the ATA IDE code is affected by this, making it much
cleaner and readable - the ATA bus mastering code is moved to the
ATAPort code so more implementations in the near future can take
advantage of such functionality easily.

In addition to that, the hierarchy of the ATA IDE code resembles more of
the SATA AHCI code now, which means the IDEChannel class is solely
responsible for getting interrupts, passing them for further processing
in the ATAPort code to take care of the rest of the handling logic.
This commit is contained in:
Liav A 2021-11-26 19:39:26 +02:00 committed by Linus Groh
parent 7719ef3a61
commit 0810c1b972
12 changed files with 1003 additions and 746 deletions

View file

@ -102,6 +102,7 @@ set(KERNEL_SOURCES
Storage/ATA/ATADevice.cpp
Storage/ATA/ATADiskDevice.cpp
Storage/ATA/ATAPIDiscDevice.cpp
Storage/ATA/ATAPort.cpp
Storage/Partition/DiskPartition.cpp
Storage/Partition/DiskPartitionMetadata.cpp
Storage/Partition/EBRPartitionTable.cpp

View file

@ -31,6 +31,10 @@
#cmakedefine01 ARP_DEBUG
#endif
#ifndef ATA_DEBUG
#cmakedefine01 ATA_DEBUG
#endif
#ifndef BBFS_DEBUG
#cmakedefine01 BBFS_DEBUG
#endif

View file

@ -0,0 +1,519 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86/IO.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/ATAPort.h>
#include <Kernel/Storage/ATA/Definitions.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
class ATAPortInterruptDisabler {
public:
ATAPortInterruptDisabler(ATAPort& port)
: m_port(port)
{
(void)port.disable_interrupts();
}
~ATAPortInterruptDisabler()
{
(void)m_port->enable_interrupts();
};
private:
RefPtr<ATAPort> m_port;
};
class ATAPortInterruptCleaner {
public:
ATAPortInterruptCleaner(ATAPort& port)
: m_port(port)
{
}
~ATAPortInterruptCleaner()
{
(void)m_port->force_clear_interrupts();
};
private:
RefPtr<ATAPort> m_port;
};
void ATAPort::fix_name_string_in_identify_device_block()
{
VERIFY(m_lock.is_locked());
auto* wbuf = (u16*)m_ata_identify_data_buffer->data();
auto* bbuf = m_ata_identify_data_buffer->data() + 27 * 2;
for (size_t word_index = 27; word_index < 47; word_index++) {
u16 data = wbuf[word_index];
*(bbuf++) = MSB(data);
*(bbuf++) = LSB(data);
}
}
ErrorOr<void> ATAPort::detect_connected_devices()
{
MutexLocker locker(m_lock);
for (size_t device_index = 0; device_index < max_possible_devices_connected(); device_index++) {
TRY(device_select(device_index));
auto device_presence = TRY(detect_presence_on_selected_device());
if (!device_presence)
continue;
TaskFile identify_taskfile;
memset(&identify_taskfile, 0, sizeof(TaskFile));
identify_taskfile.command = ATA_CMD_IDENTIFY;
auto buffer = UserOrKernelBuffer::for_kernel_buffer(m_ata_identify_data_buffer->data());
{
auto result = execute_polled_command(TransactionDirection::Read, LBAMode::None, identify_taskfile, buffer, 0, 256, 100, 100);
if (result.is_error()) {
continue;
}
}
ATAIdentifyBlock volatile& identify_block = (ATAIdentifyBlock volatile&)(*m_ata_identify_data_buffer->data());
u16 capabilities = identify_block.capabilities[0];
StringView device_name = StringView((char const*)const_cast<u16*>(identify_block.model_number), 40);
fix_name_string_in_identify_device_block();
u64 max_addressable_block = identify_block.max_28_bit_addressable_logical_sector;
dbgln("ATAPort: device found: Name={}, Capacity={}, Capabilities={:#04x}", device_name.trim_whitespace(), max_addressable_block * 512, capabilities);
// If the drive is so old that it doesn't support LBA, ignore it.
if (!(capabilities & ATA_CAP_LBA)) {
dbgln("ATAPort: device found but without LBA support (what kind of dinosaur we see here?)");
continue;
}
// if we support 48-bit LBA, use that value instead.
if (identify_block.commands_and_feature_sets_supported[1] & (1 << 10))
max_addressable_block = identify_block.user_addressable_logical_sectors_count;
// FIXME: Don't assume all drives will have logical sector size of 512 bytes.
ATADevice::Address address = { m_port_index, static_cast<u8>(device_index) };
m_ata_devices.append(ATADiskDevice::create(m_parent_ata_controller, address, capabilities, 512, max_addressable_block));
}
return {};
}
RefPtr<StorageDevice> ATAPort::connected_device(size_t device_index) const
{
MutexLocker locker(m_lock);
if (m_ata_devices.size() > device_index)
return m_ata_devices[device_index];
return {};
}
ErrorOr<void> ATAPort::start_request(ATADevice const& associated_device, AsyncBlockDeviceRequest& request)
{
MutexLocker locker(m_lock);
VERIFY(m_current_request.is_null());
VERIFY(pio_capable() || dma_capable());
dbgln_if(ATA_DEBUG, "ATAPort::start_request");
m_current_request = request;
m_current_request_block_index = 0;
m_current_request_flushing_cache = false;
if (dma_capable()) {
TRY(prepare_and_initiate_dma_transaction(associated_device));
return {};
}
TRY(prepare_and_initiate_pio_transaction(associated_device));
return {};
}
void ATAPort::complete_pio_transaction(AsyncDeviceRequest::RequestResult result)
{
VERIFY(m_current_request);
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(ATA_DEBUG, "ATAPort::complete_pio_transaction result: {}", (int)result);
MutexLocker locker(m_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
void ATAPort::complete_dma_transaction(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
VERIFY(m_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(ATA_DEBUG, "ATAPort::complete_dma_transaction result: {}", (int)result);
MutexLocker locker(m_lock);
if (!m_current_request)
return;
auto current_request = m_current_request;
m_current_request.clear();
if (result == AsyncDeviceRequest::Success) {
{
auto result = force_busmastering_status_clean();
if (result.is_error()) {
locker.unlock();
current_request->complete(AsyncDeviceRequest::Failure);
return;
}
}
if (current_request->request_type() == AsyncBlockDeviceRequest::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * current_request->block_count()); result.is_error()) {
locker.unlock();
current_request->complete(AsyncDeviceRequest::MemoryFault);
return;
}
}
}
locker.unlock();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
static void print_ata_status(u8 status)
{
dbgln("ATAPort: print_status: DRQ={} BSY={}, DRDY={}, DSC={}, DF={}, CORR={}, IDX={}, ERR={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0,
(status & ATA_SR_DSC) != 0,
(status & ATA_SR_DF) != 0,
(status & ATA_SR_CORR) != 0,
(status & ATA_SR_IDX) != 0,
(status & ATA_SR_ERR) != 0);
}
static void try_disambiguate_ata_error(u8 error)
{
dbgln("ATAPort: Error cause:");
switch (error) {
case ATA_ER_BBK:
dbgln("ATAPort: - Bad block");
break;
case ATA_ER_UNC:
dbgln("ATAPort: - Uncorrectable data");
break;
case ATA_ER_MC:
dbgln("ATAPort: - Media changed");
break;
case ATA_ER_IDNF:
dbgln("ATAPort: - ID mark not found");
break;
case ATA_ER_MCR:
dbgln("ATAPort: - Media change request");
break;
case ATA_ER_ABRT:
dbgln("ATAPort: - Command aborted");
break;
case ATA_ER_TK0NF:
dbgln("ATAPort: - Track 0 not found");
break;
case ATA_ER_AMNF:
dbgln("ATAPort: - No address mark");
break;
default:
dbgln("ATAPort: - No one knows");
break;
}
}
ErrorOr<bool> ATAPort::handle_interrupt_after_dma_transaction()
{
if (!dma_capable())
return false;
u8 bstatus = TRY(busmastering_status());
if (!(bstatus & 0x4)) {
// interrupt not from this device, ignore
dbgln_if(ATA_DEBUG, "ATAPort: ignore interrupt");
return false;
}
auto work_item_creation_result = g_ata_work->try_queue([this]() -> void {
MutexLocker locker(m_lock);
u8 status = task_file_status().release_value();
m_entropy_source.add_random_event(status);
// clear bus master interrupt status
{
auto result = force_busmastering_status_clean();
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
SpinlockLocker lock(m_hard_lock);
dbgln_if(ATA_DEBUG, "ATAPort: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0);
if (!m_current_request) {
dbgln("ATAPort: IRQ but no pending request!");
return;
}
if (status & ATA_SR_ERR) {
print_ata_status(status);
auto device_error = task_file_error().release_value();
dbgln("ATAPort: Error {:#02x}!", (u8)device_error);
try_disambiguate_ata_error(device_error);
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
complete_dma_transaction(AsyncDeviceRequest::Success);
return;
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
return Error::from_errno(ENOMEM);
}
return true;
}
ErrorOr<void> ATAPort::prepare_and_initiate_dma_transaction(ATADevice const& associated_device)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
// Note: We might be called here from an interrupt handler (like the page fault handler), so queue a read afterwards.
auto work_item_creation_result = g_ata_work->try_queue([this, &associated_device]() -> void {
MutexLocker locker(m_lock);
dbgln_if(ATA_DEBUG, "ATAPort::prepare_and_initiate_dma_transaction ({} x {})", m_current_request->block_index(), m_current_request->block_count());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
{
auto result = device_select(associated_device.ata_address().subport);
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
if (m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write) {
if (auto result = m_current_request->read_from_buffer(m_current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * m_current_request->block_count()); result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::MemoryFault);
return;
}
}
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = 512 * m_current_request->block_count();
VERIFY(prdt().size <= PAGE_SIZE);
SpinlockLocker hard_lock_locker(m_hard_lock);
{
auto result = stop_busmastering();
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
if (m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write) {
auto result = prepare_transaction_with_busmastering(TransactionDirection::Write, m_prdt_page->paddr());
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
} else {
auto result = prepare_transaction_with_busmastering(TransactionDirection::Read, m_prdt_page->paddr());
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
TaskFile taskfile;
LBAMode lba_mode = LBAMode::TwentyEightBit;
auto lba = m_current_request->block_index();
if ((lba + m_current_request->block_count()) >= 0x10000000) {
lba_mode = LBAMode::FortyEightBit;
}
memset(&taskfile, 0, sizeof(TaskFile));
taskfile.lba_low[0] = (lba & 0x000000FF) >> 0;
taskfile.lba_low[1] = (lba & 0x0000FF00) >> 8;
taskfile.lba_low[2] = (lba & 0x00FF0000) >> 16;
taskfile.lba_high[0] = (lba & 0xFF000000) >> 24;
taskfile.lba_high[1] = (lba & 0xFF00000000ull) >> 32;
taskfile.lba_high[2] = (lba & 0xFF0000000000ull) >> 40;
taskfile.count = m_current_request->block_count();
if (lba_mode == LBAMode::TwentyEightBit)
taskfile.command = m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write ? ATA_CMD_WRITE_DMA : ATA_CMD_READ_DMA;
else
taskfile.command = m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write ? ATA_CMD_WRITE_DMA_EXT : ATA_CMD_READ_DMA_EXT;
{
auto result = load_taskfile_into_registers(taskfile, lba_mode, 1000);
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
if (m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write) {
auto result = start_busmastering(TransactionDirection::Write);
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
else {
auto result = start_busmastering(TransactionDirection::Read);
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
return Error::from_errno(ENOMEM);
}
return {};
}
ErrorOr<void> ATAPort::prepare_and_initiate_pio_transaction(ATADevice const& associated_device)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
dbgln_if(ATA_DEBUG, "ATAPort::prepare_and_initiate_pio_transaction ({} x {})", m_current_request->block_index(), m_current_request->block_count());
// Note: We might be called here from an interrupt handler (like the page fault handler), so queue a read afterwards.
auto work_item_creation_result = g_ata_work->try_queue([this, &associated_device]() -> void {
MutexLocker locker(m_lock);
{
auto result = device_select(associated_device.ata_address().subport);
if (result.is_error()) {
complete_pio_transaction(AsyncDeviceRequest::Failure);
return;
}
}
for (size_t block_index = 0; block_index < m_current_request->block_count(); block_index++) {
TaskFile taskfile;
LBAMode lba_mode = LBAMode::TwentyEightBit;
auto lba = m_current_request->block_index() + block_index;
if (lba >= 0x10000000) {
lba_mode = LBAMode::FortyEightBit;
}
memset(&taskfile, 0, sizeof(TaskFile));
taskfile.lba_low[0] = (lba & 0x000000FF) >> 0;
taskfile.lba_low[1] = (lba & 0x0000FF00) >> 8;
taskfile.lba_low[2] = (lba & 0x00FF0000) >> 16;
taskfile.lba_high[0] = (lba & 0xFF000000) >> 24;
taskfile.lba_high[1] = (lba & 0xFF00000000ull) >> 32;
taskfile.lba_high[2] = (lba & 0xFF0000000000ull) >> 40;
taskfile.count = 1;
if (lba_mode == LBAMode::TwentyEightBit)
taskfile.command = m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write ? ATA_CMD_WRITE_PIO : ATA_CMD_READ_PIO;
else
taskfile.command = m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write ? ATA_CMD_WRITE_PIO_EXT : ATA_CMD_READ_PIO_EXT;
if (m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
auto result = execute_polled_command(TransactionDirection::Read, lba_mode, taskfile, m_current_request->buffer(), block_index, 256, 100, 100);
if (result.is_error()) {
complete_pio_transaction(AsyncDeviceRequest::Failure);
return;
}
} else {
auto result = execute_polled_command(TransactionDirection::Write, lba_mode, taskfile, m_current_request->buffer(), block_index, 256, 100, 100);
if (result.is_error()) {
complete_pio_transaction(AsyncDeviceRequest::Failure);
return;
}
}
}
complete_pio_transaction(AsyncDeviceRequest::Success);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
return Error::from_errno(ENOMEM);
}
return {};
}
ErrorOr<void> ATAPort::execute_polled_command(TransactionDirection direction, LBAMode lba_mode, TaskFile const& taskfile, UserOrKernelBuffer& buffer, size_t block_offset, size_t words_count, size_t preparation_timeout_in_milliseconds, size_t completion_timeout_in_milliseconds)
{
// Disable interrupts temporarily, just in case we have that enabled,
// remember the value to re-enable (and clean) later if needed.
ATAPortInterruptDisabler disabler(*this);
ATAPortInterruptCleaner cleaner(*this);
MutexLocker locker(m_lock);
{
SpinlockLocker hard_locker(m_hard_lock);
// Wait for device to be not busy or timeout
TRY(wait_if_busy_until_timeout(preparation_timeout_in_milliseconds));
// Send command, wait for result or timeout
TRY(load_taskfile_into_registers(taskfile, lba_mode, preparation_timeout_in_milliseconds));
size_t milliseconds_elapsed = 0;
for (;;) {
if (milliseconds_elapsed > completion_timeout_in_milliseconds)
break;
u8 status = task_file_status().release_value();
if (status & ATA_SR_ERR) {
return Error::from_errno(EINVAL);
}
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRQ)) {
break;
}
IO::delay(1000);
milliseconds_elapsed++;
}
if (milliseconds_elapsed > completion_timeout_in_milliseconds) {
critical_dmesgln("ATAPort: device state unknown. Timeout exceeded.");
return Error::from_errno(EINVAL);
}
}
VERIFY_INTERRUPTS_ENABLED();
if (direction == TransactionDirection::Read)
TRY(read_pio_data_to_buffer(buffer, block_offset, words_count));
else
TRY(write_pio_data_from_buffer(buffer, block_offset, words_count));
return {};
}
}

View file

@ -0,0 +1,156 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Storage/ATA/ATADevice.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class ATAPort
: public RefCounted<ATAPort>
, public Weakable<ATAPort> {
friend class ATAPortInterruptDisabler;
friend class ATAPortInterruptCleaner;
public:
struct TaskFile {
u8 command;
u8 lba_low[3];
u8 device;
u8 lba_high[3];
u8 features_high;
u16 count;
u8 icc;
u8 control;
u32 reserved;
};
enum class TransactionDirection : u8 {
Read,
Write,
};
struct [[gnu::packed]] PhysicalRegionDescriptor {
u32 offset;
u16 size { 0 };
u16 end_of_table { 0 };
};
enum class LBAMode : u8 {
None,
TwentyEightBit,
FortyEightBit,
};
public:
RefPtr<StorageDevice> connected_device(size_t device_index) const;
virtual ~ATAPort() = default;
virtual ErrorOr<void> disable() = 0;
virtual ErrorOr<void> power_on() = 0;
ErrorOr<void> detect_connected_devices();
ErrorOr<bool> handle_interrupt_after_dma_transaction();
ErrorOr<void> start_request(ATADevice const& associated_device, AsyncBlockDeviceRequest&);
// Note: Generic (P)ATA IDE "ports" are tied to the IDE channel link (cable), and trying to
// reset the master port or slave port and vice versa requires to actually reset
// both at once...
// This is due to the fact that IDE devices can be connected together (master-slave)
// with one 80 pin cable which forms one (primary/secondary) "ATA bus".
// Intel AHCI controllers generally allow individual phy port reset. The caller
// of this method should know this in advance...
// Note: ATAPI devices are an exception to this, so even if we initiate a
// a port reset, there's no guarantee that ATAPI devices will reset anyway,
// so resetting them requires to actually send the ATA "DEVICE RESET" command.
virtual ErrorOr<void> port_phy_reset() = 0;
// Note: Software reset means individual reset to a selected device on the "bus" (port).
// This means that this will likely work for devices that indicate support for
// PACKET commands (ATAPI devices) that also support DEVICE RESET. For other devices
// there's no other method to reset them besides (full) PHY reset.
// For devices that don't support this feature, just return ENOTSUP.
virtual ErrorOr<void> soft_reset() { return Error::from_errno(ENOTSUP); }
ErrorOr<void> execute_polled_command(TransactionDirection direction, LBAMode lba_mode, TaskFile const& taskfile, UserOrKernelBuffer&, size_t block_offset, size_t words_count, size_t preparation_timeout_in_milliseconds, size_t completion_timeout_in_milliseconds);
virtual bool has_sata_capabilities() { return false; }
virtual bool pio_capable() const = 0;
virtual bool dma_capable() const = 0;
virtual size_t max_possible_devices_connected() const = 0;
private:
ErrorOr<void> prepare_and_initiate_dma_transaction(ATADevice const& associated_device);
ErrorOr<void> prepare_and_initiate_pio_transaction(ATADevice const& associated_device);
void complete_dma_transaction(AsyncDeviceRequest::RequestResult result);
void complete_pio_transaction(AsyncDeviceRequest::RequestResult result);
void fix_name_string_in_identify_device_block();
protected:
virtual ErrorOr<u8> task_file_status() = 0;
virtual ErrorOr<u8> task_file_error() = 0;
virtual ErrorOr<void> wait_if_busy_until_timeout(size_t timeout_in_milliseconds) = 0;
virtual ErrorOr<void> device_select(size_t device_index) = 0;
virtual ErrorOr<bool> detect_presence_on_selected_device() = 0;
virtual ErrorOr<void> enable_interrupts() = 0;
virtual ErrorOr<void> disable_interrupts() = 0;
virtual ErrorOr<void> stop_busmastering() = 0;
virtual ErrorOr<void> start_busmastering(TransactionDirection) = 0;
virtual ErrorOr<void> force_busmastering_status_clean() = 0;
virtual ErrorOr<u8> busmastering_status() = 0;
virtual ErrorOr<void> prepare_transaction_with_busmastering(TransactionDirection, PhysicalAddress prdt_buffer) = 0;
virtual ErrorOr<void> initiate_transaction(TransactionDirection) = 0;
virtual ErrorOr<void> force_clear_interrupts() = 0;
// Note: This method assume we already selected the correct device!
virtual ErrorOr<void> load_taskfile_into_registers(TaskFile const&, LBAMode lba_mode, size_t completion_timeout_in_milliseconds) = 0;
virtual ErrorOr<void> read_pio_data_to_buffer(UserOrKernelBuffer&, size_t block_offset, size_t words_count) = 0;
virtual ErrorOr<void> write_pio_data_from_buffer(UserOrKernelBuffer const&, size_t block_offset, size_t words_count) = 0;
PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_region->vaddr().as_ptr()); }
ATAPort(ATAController const& parent_controller, u8 port_index, NonnullOwnPtr<KBuffer> ata_identify_data_buffer)
: m_port_index(port_index)
, m_ata_identify_data_buffer(move(ata_identify_data_buffer))
, m_parent_ata_controller(parent_controller)
{
}
mutable Mutex m_lock;
Spinlock m_hard_lock;
EntropySource m_entropy_source;
RefPtr<AsyncBlockDeviceRequest> m_current_request;
u64 m_current_request_block_index { 0 };
bool m_current_request_flushing_cache { false };
OwnPtr<Memory::Region> m_prdt_region;
OwnPtr<Memory::Region> m_dma_buffer_region;
RefPtr<Memory::PhysicalPage> m_prdt_page;
RefPtr<Memory::PhysicalPage> m_dma_buffer_page;
const u8 m_port_index;
NonnullRefPtrVector<ATADevice> m_ata_devices;
NonnullOwnPtr<KBuffer> m_ata_identify_data_buffer;
NonnullRefPtr<ATAController> m_parent_ata_controller;
};
}

View file

@ -25,44 +25,41 @@ namespace Kernel {
UNMAP_AFTER_INIT NonnullRefPtr<IDEChannel> IDEChannel::create(IDEController const& controller, IOAddressGroup io_group, ChannelType type)
{
return adopt_ref(*new IDEChannel(controller, io_group, type));
auto ata_identify_data_buffer = KBuffer::try_create_with_size("ATA Identify Page"sv, 4096, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
return adopt_ref(*new IDEChannel(controller, io_group, type, move(ata_identify_data_buffer)));
}
UNMAP_AFTER_INIT NonnullRefPtr<IDEChannel> IDEChannel::create(IDEController const& controller, u8 irq, IOAddressGroup io_group, ChannelType type)
{
return adopt_ref(*new IDEChannel(controller, irq, io_group, type));
auto ata_identify_data_buffer = KBuffer::try_create_with_size("ATA Identify Page"sv, 4096, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
return adopt_ref(*new IDEChannel(controller, irq, io_group, type, move(ata_identify_data_buffer)));
}
RefPtr<StorageDevice> IDEChannel::master_device() const
StringView IDEChannel::channel_type_string() const
{
return m_master;
if (m_channel_type == ChannelType::Primary)
return "Primary"sv;
return "Secondary"sv;
}
RefPtr<StorageDevice> IDEChannel::slave_device() const
bool IDEChannel::select_device_and_wait_until_not_busy(DeviceType device_type, size_t milliseconds_timeout)
{
return m_slave;
IO::delay(20);
u8 slave = device_type == DeviceType::Slave;
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | (slave << 4)); // First, we need to select the drive itself
IO::delay(20);
size_t time_elapsed = 0;
while (m_io_group.control_base().in<u8>() & ATA_SR_BSY && time_elapsed <= milliseconds_timeout) {
IO::delay(1000);
time_elapsed++;
}
return time_elapsed <= milliseconds_timeout;
}
UNMAP_AFTER_INIT void IDEChannel::initialize_with_isa_controller(Badge<ISAIDEController>, bool force_pio)
ErrorOr<void> IDEChannel::port_phy_reset()
{
initialize(force_pio);
}
UNMAP_AFTER_INIT void IDEChannel::initialize_with_pci_controller(Badge<PCIIDEController>, bool force_pio)
{
initialize(force_pio);
}
UNMAP_AFTER_INIT void IDEChannel::initialize(bool force_pio)
{
disable_irq();
dbgln_if(PATA_DEBUG, "IDEChannel: {} IO base: {}", channel_type_string(), m_io_group.io_base());
dbgln_if(PATA_DEBUG, "IDEChannel: {} control base: {}", channel_type_string(), m_io_group.control_base());
if (m_io_group.bus_master_base().has_value())
dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base: {}", channel_type_string(), m_io_group.bus_master_base().value());
else
dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base disabled", channel_type_string());
MutexLocker locker(m_lock);
SpinlockLocker hard_locker(m_hard_lock);
// reset the channel
u8 device_control = m_io_group.control_base().in<u8>();
// Wait 30 milliseconds
@ -74,700 +71,263 @@ UNMAP_AFTER_INIT void IDEChannel::initialize(bool force_pio)
// Wait up to 30 seconds before failing
if (!select_device_and_wait_until_not_busy(DeviceType::Master, 30000)) {
dbgln("IDEChannel: reset failed, busy flag on master stuck");
return;
return Error::from_errno(EBUSY);
}
// Wait up to 30 seconds before failing
if (!select_device_and_wait_until_not_busy(DeviceType::Slave, 30000)) {
dbgln("IDEChannel: reset failed, busy flag on slave stuck");
return;
return Error::from_errno(EBUSY);
}
return {};
}
detect_disks();
ErrorOr<void> IDEChannel::allocate_resources_for_pci_ide_controller(Badge<PCIIDEController>, bool force_pio)
{
return allocate_resources(force_pio);
}
ErrorOr<void> IDEChannel::allocate_resources_for_isa_ide_controller(Badge<ISAIDEController>)
{
return allocate_resources(false);
}
// Note: calling to detect_disks could generate an interrupt, clear it if that's the case
clear_pending_interrupts();
UNMAP_AFTER_INIT ErrorOr<void> IDEChannel::allocate_resources(bool force_pio)
{
dbgln_if(PATA_DEBUG, "IDEChannel: {} IO base: {}", channel_type_string(), m_io_group.io_base());
dbgln_if(PATA_DEBUG, "IDEChannel: {} control base: {}", channel_type_string(), m_io_group.control_base());
if (m_io_group.bus_master_base().has_value())
dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base: {}", channel_type_string(), m_io_group.bus_master_base().value());
else
dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base disabled", channel_type_string());
if (!force_pio) {
m_dma_enabled = true;
VERIFY(m_io_group.bus_master_base().has_value());
// Let's try to set up DMA transfers.
{
auto region_or_error = MM.allocate_dma_buffer_page("IDE PRDT"sv, Memory::Region::Access::ReadWrite, m_prdt_page);
if (region_or_error.is_error())
TODO();
m_prdt_region = region_or_error.release_value();
VERIFY(!m_prdt_page.is_null());
}
{
auto region_or_error = MM.allocate_dma_buffer_page("IDE DMA region"sv, Memory::Region::Access::ReadWrite, m_dma_buffer_page);
if (region_or_error.is_error())
TODO();
m_dma_buffer_region = region_or_error.release_value();
VERIFY(!m_dma_buffer_page.is_null());
}
m_prdt_region = TRY(MM.allocate_dma_buffer_page("IDE PRDT"sv, Memory::Region::Access::ReadWrite, m_prdt_page));
VERIFY(!m_prdt_page.is_null());
m_dma_buffer_region = TRY(MM.allocate_dma_buffer_page("IDE DMA region"sv, Memory::Region::Access::ReadWrite, m_dma_buffer_page));
VERIFY(!m_dma_buffer_page.is_null());
prdt().end_of_table = 0x8000;
// clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
}
return {};
}
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, u8 irq, IOAddressGroup io_group, ChannelType type)
: IRQHandler(irq)
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, u8 irq, IOAddressGroup io_group, ChannelType type, NonnullOwnPtr<KBuffer> ata_identify_data_buffer)
: ATAPort(controller, (type == ChannelType::Primary ? 0 : 1), move(ata_identify_data_buffer))
, IRQHandler(irq)
, m_channel_type(type)
, m_io_group(io_group)
, m_parent_controller(controller)
{
}
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, IOAddressGroup io_group, ChannelType type)
: IRQHandler(type == ChannelType::Primary ? PATA_PRIMARY_IRQ : PATA_SECONDARY_IRQ)
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, IOAddressGroup io_group, ChannelType type, NonnullOwnPtr<KBuffer> ata_identify_data_buffer)
: ATAPort(controller, (type == ChannelType::Primary ? 0 : 1), move(ata_identify_data_buffer))
, IRQHandler(type == ChannelType::Primary ? PATA_PRIMARY_IRQ : PATA_SECONDARY_IRQ)
, m_channel_type(type)
, m_io_group(io_group)
, m_parent_controller(controller)
{
}
void IDEChannel::clear_pending_interrupts() const
{
m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
}
UNMAP_AFTER_INIT IDEChannel::~IDEChannel() = default;
void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool is_slave, u16 capabilities)
{
MutexLocker locker(m_lock);
VERIFY(m_current_request.is_null());
dbgln_if(PATA_DEBUG, "IDEChannel::start_request");
m_current_request = request;
m_current_request_block_index = 0;
m_current_request_flushing_cache = false;
if (m_dma_enabled) {
if (request.request_type() == AsyncBlockDeviceRequest::Read)
ata_read_sectors_with_dma(is_slave, capabilities);
else
ata_write_sectors_with_dma(is_slave, capabilities);
return;
}
if (request.request_type() == AsyncBlockDeviceRequest::Read)
ata_read_sectors_with_pio(is_slave, capabilities);
else
ata_write_sectors_with_pio(is_slave, capabilities);
}
void IDEChannel::complete_dma_transaction(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
VERIFY(m_request_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "IDEChannel::complete_dma_transaction result: {}", (int)result);
SpinlockLocker lock(m_request_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
if (result == AsyncDeviceRequest::Success) {
if (current_request->request_type() == AsyncBlockDeviceRequest::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
lock.unlock();
current_request->complete(AsyncDeviceRequest::MemoryFault);
return;
}
}
// I read somewhere that this may trigger a cache flush so let's do it.
VERIFY(m_io_group.bus_master_base().has_value());
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
}
lock.unlock();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
void IDEChannel::complete_pio_transaction(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
VERIFY(m_request_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "IDEChannel::complete_pio_transaction result: {}", (int)result);
MutexLocker locker(m_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
static void print_ide_status(u8 status)
{
dbgln("IDEChannel: print_ide_status: DRQ={} BSY={}, DRDY={}, DSC={}, DF={}, CORR={}, IDX={}, ERR={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0,
(status & ATA_SR_DSC) != 0,
(status & ATA_SR_DF) != 0,
(status & ATA_SR_CORR) != 0,
(status & ATA_SR_IDX) != 0,
(status & ATA_SR_ERR) != 0);
}
void IDEChannel::try_disambiguate_error()
{
VERIFY(m_request_lock.is_locked());
dbgln("IDEChannel: Error cause:");
switch (m_device_error) {
case ATA_ER_BBK:
dbgln("IDEChannel: - Bad block");
break;
case ATA_ER_UNC:
dbgln("IDEChannel: - Uncorrectable data");
break;
case ATA_ER_MC:
dbgln("IDEChannel: - Media changed");
break;
case ATA_ER_IDNF:
dbgln("IDEChannel: - ID mark not found");
break;
case ATA_ER_MCR:
dbgln("IDEChannel: - Media change request");
break;
case ATA_ER_ABRT:
dbgln("IDEChannel: - Command aborted");
break;
case ATA_ER_TK0NF:
dbgln("IDEChannel: - Track 0 not found");
break;
case ATA_ER_AMNF:
dbgln("IDEChannel: - No address mark");
break;
default:
dbgln("IDEChannel: - No one knows");
break;
}
}
bool IDEChannel::handle_irq_for_dma_transaction()
{
u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
m_entropy_source.add_random_event(status);
VERIFY(m_io_group.bus_master_base().has_value());
u8 bstatus = m_io_group.bus_master_base().value().offset(2).in<u8>();
if (!(bstatus & 0x4)) {
// interrupt not from this device, ignore
dbgln_if(PATA_DEBUG, "IDEChannel: ignore interrupt");
return false;
}
// clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
SpinlockLocker lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0);
if (!m_current_request) {
dbgln("IDEChannel: IRQ but no pending request!");
return false;
}
if (status & ATA_SR_ERR) {
print_ide_status(status);
m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
dbgln("IDEChannel: Error {:#02x}!", (u8)m_device_error);
try_disambiguate_error();
complete_dma_transaction(AsyncDeviceRequest::Failure);
return true;
}
m_device_error = 0;
complete_dma_transaction(AsyncDeviceRequest::Success);
return true;
}
bool IDEChannel::handle_irq_for_pio_transaction()
{
u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
m_entropy_source.add_random_event(status);
SpinlockLocker lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0);
if (!m_current_request) {
dbgln("IDEChannel: IRQ but no pending request!");
return false;
}
if (status & ATA_SR_ERR) {
print_ide_status(status);
m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
dbgln("IDEChannel: Error {:#02x}!", (u8)m_device_error);
try_disambiguate_error();
complete_pio_transaction(AsyncDeviceRequest::Failure);
return true;
}
m_device_error = 0;
// Now schedule reading/writing the buffer as soon as we leave the irq handler.
// This is important so that we can safely access the buffers, which could
// trigger page faults
auto work_item_creation_result = g_io_work->try_queue([this]() {
MutexLocker locker(m_lock);
SpinlockLocker lock(m_request_lock);
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());
if (ata_do_pio_read_sector()) {
if (++m_current_request_block_index >= m_current_request->block_count()) {
complete_pio_transaction(AsyncDeviceRequest::Success);
return;
}
// Wait for the next block
enable_irq();
}
} else {
if (!m_current_request_flushing_cache) {
dbgln_if(PATA_DEBUG, "IDEChannel: Wrote block {}/{}", m_current_request_block_index, m_current_request->block_count());
if (++m_current_request_block_index >= m_current_request->block_count()) {
// We read the last block, flush cache
VERIFY(!m_current_request_flushing_cache);
m_current_request_flushing_cache = true;
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH);
} else {
// Read next block
ata_do_pio_write_sector();
}
} else {
complete_pio_transaction(AsyncDeviceRequest::Success);
}
}
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
return true;
}
bool IDEChannel::handle_irq(RegisterState const&)
{
if (!m_dma_enabled)
return handle_irq_for_pio_transaction();
return handle_irq_for_dma_transaction();
auto result = handle_interrupt_after_dma_transaction();
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
return result.release_value();
}
static void io_delay()
{
for (int i = 0; i < 4; ++i)
IO::in8(0x3f6);
}
bool IDEChannel::select_device_and_wait_until_not_busy(DeviceType device_type, size_t milliseconds_timeout)
{
IO::delay(20);
u8 slave = device_type == DeviceType::Slave;
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | (slave << 4));
IO::delay(20);
size_t time_elapsed = 0;
while (m_io_group.control_base().in<u8>() & ATA_SR_BSY && time_elapsed <= milliseconds_timeout) {
IO::delay(1000);
time_elapsed++;
}
return time_elapsed <= milliseconds_timeout;
}
bool IDEChannel::wait_until_not_busy(size_t milliseconds_timeout)
{
size_t time_elapsed = 0;
while (m_io_group.control_base().in<u8>() & ATA_SR_BSY && time_elapsed <= milliseconds_timeout) {
IO::delay(1000);
time_elapsed++;
}
return time_elapsed <= milliseconds_timeout;
}
StringView IDEChannel::channel_type_string() const
{
if (m_channel_type == ChannelType::Primary)
return "Primary"sv;
return "Secondary"sv;
}
UNMAP_AFTER_INIT void IDEChannel::detect_disks()
{
auto channel_string = [](u8 i) -> StringView {
if (i == 0)
return "master"sv;
return "slave"sv;
};
// There are only two possible disks connected to a channel
for (auto i = 0; i < 2; i++) {
if (!select_device_and_wait_until_not_busy(i == 0 ? DeviceType::Master : DeviceType::Slave, 32000)) {
dbgln("IDEChannel: Timeout waiting for busy flag to clear during {} {} detection", channel_type_string(), channel_string(i));
continue;
}
auto status = m_io_group.control_base().in<u8>();
if (status == 0x0) {
dbgln_if(PATA_DEBUG, "IDEChannel: No {} {} disk detected!", channel_type_string(), channel_string(i));
continue;
}
m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_LBA1).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_LBA2).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_IDENTIFY); // Send the ATA_IDENTIFY command
// Wait 10 second for the BSY flag to clear
if (!wait_until_not_busy(2000)) {
dbgln_if(PATA_DEBUG, "IDEChannel: No {} {} disk detected, BSY flag was not reset!", channel_type_string(), channel_string(i));
continue;
}
bool check_for_atapi = false;
bool device_presence = true;
bool command_set_is_atapi = false;
size_t milliseconds_elapsed = 0;
for (;;) {
// Wait about 10 seconds
if (milliseconds_elapsed > 2000)
break;
u8 status = m_io_group.control_base().in<u8>();
if (status & ATA_SR_ERR) {
dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device is not ATA. Will check for ATAPI.", channel_type_string(), channel_string(i));
check_for_atapi = true;
break;
}
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRQ)) {
dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device appears to be ATA.", channel_type_string(), channel_string(i));
break;
}
if (status == 0 || status == 0xFF) {
dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device presence - none.", channel_type_string(), channel_string(i));
device_presence = false;
break;
}
IO::delay(1000);
milliseconds_elapsed++;
}
if (!device_presence) {
continue;
}
if (milliseconds_elapsed > 10000) {
dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device state unknown. Timeout exceeded.", channel_type_string(), channel_string(i));
continue;
}
if (check_for_atapi) {
u8 cl = m_io_group.io_base().offset(ATA_REG_LBA1).in<u8>();
u8 ch = m_io_group.io_base().offset(ATA_REG_LBA2).in<u8>();
if ((cl == 0x14 && ch == 0xEB) || (cl == 0x69 && ch == 0x96)) {
command_set_is_atapi = true;
dbgln("IDEChannel: {} {} device appears to be ATAPI. We're going to ignore it for now as we don't support it.", channel_type_string(), channel_string(i));
continue;
} else {
dbgln("IDEChannel: {} {} device doesn't appear to be ATA or ATAPI. Ignoring it.", channel_type_string(), channel_string(i));
continue;
}
}
// FIXME: Handle possible OOM situation here.
ByteBuffer wbuf = ByteBuffer::create_uninitialized(m_logical_sector_size).release_value_but_fixme_should_propagate_errors();
ByteBuffer bbuf = ByteBuffer::create_uninitialized(m_logical_sector_size).release_value_but_fixme_should_propagate_errors();
u8* b = bbuf.data();
u16* w = (u16*)wbuf.data();
for (u32 i = 0; i < 256; ++i) {
u16 data = m_io_group.io_base().offset(ATA_REG_DATA).in<u16>();
*(w++) = data;
*(b++) = MSB(data);
*(b++) = LSB(data);
}
// "Unpad" the device name string.
for (u32 i = 93; i > 54 && bbuf[i] == ' '; --i)
bbuf[i] = 0;
ATAIdentifyBlock volatile& identify_block = (ATAIdentifyBlock volatile&)(*wbuf.data());
u16 capabilities = identify_block.capabilities[0];
// If the drive is so old that it doesn't support LBA, ignore it.
if (!(capabilities & ATA_CAP_LBA))
continue;
u64 max_addressable_block = identify_block.max_28_bit_addressable_logical_sector;
// if we support 48-bit LBA, use that value instead.
if (identify_block.commands_and_feature_sets_supported[1] & (1 << 10))
max_addressable_block = identify_block.user_addressable_logical_sectors_count;
dbgln("IDEChannel: {} {} {} device found: Name={}, Capacity={}, Capabilities={:#04x}", channel_type_string(), channel_string(i), !command_set_is_atapi ? "ATA" : "ATAPI", ((char*)bbuf.data() + 54), max_addressable_block * m_logical_sector_size, capabilities);
ATADevice::Address address = { m_channel_type == ChannelType::Primary ? static_cast<u8>(0) : static_cast<u8>(1), static_cast<u8>(i) };
if (i == 0) {
m_master = ATADiskDevice::create(m_parent_controller, address, capabilities, m_logical_sector_size, max_addressable_block);
} else {
m_slave = ATADiskDevice::create(m_parent_controller, address, capabilities, m_logical_sector_size, max_addressable_block);
}
}
}
void IDEChannel::ata_access(Direction direction, bool slave_request, u64 lba, u8 block_count, u16 capabilities)
ErrorOr<void> IDEChannel::stop_busmastering()
{
VERIFY(m_lock.is_locked());
VERIFY(m_request_lock.is_locked());
LBAMode lba_mode;
u8 head = 0;
VERIFY(m_io_group.bus_master_base().has_value());
m_io_group.bus_master_base().value().out<u8>(0);
return {};
}
ErrorOr<void> IDEChannel::start_busmastering(TransactionDirection direction)
{
VERIFY(m_lock.is_locked());
VERIFY(m_io_group.bus_master_base().has_value());
m_io_group.bus_master_base().value().out<u8>(direction != TransactionDirection::Write ? 0x9 : 0x1);
return {};
}
ErrorOr<void> IDEChannel::force_busmastering_status_clean()
{
VERIFY(m_lock.is_locked());
VERIFY(m_io_group.bus_master_base().has_value());
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
return {};
}
ErrorOr<u8> IDEChannel::busmastering_status()
{
VERIFY(m_io_group.bus_master_base().has_value());
return m_io_group.bus_master_base().value().offset(2).in<u8>();
}
ErrorOr<void> IDEChannel::prepare_transaction_with_busmastering(TransactionDirection direction, PhysicalAddress prdt_buffer)
{
VERIFY(m_lock.is_locked());
m_io_group.bus_master_base().value().offset(4).out<u32>(prdt_buffer.get());
m_io_group.bus_master_base().value().out<u8>(direction != TransactionDirection::Write ? 0x8 : 0);
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
return {};
}
ErrorOr<void> IDEChannel::initiate_transaction(TransactionDirection)
{
VERIFY(m_lock.is_locked());
return {};
}
VERIFY(capabilities & ATA_CAP_LBA);
if (lba >= 0x10000000) {
lba_mode = LBAMode::FortyEightBit;
ErrorOr<u8> IDEChannel::task_file_status()
{
VERIFY(m_lock.is_locked());
return m_io_group.control_base().in<u8>();
}
ErrorOr<u8> IDEChannel::task_file_error()
{
VERIFY(m_lock.is_locked());
return m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
}
ErrorOr<bool> IDEChannel::detect_presence_on_selected_device()
{
VERIFY(m_lock.is_locked());
m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(0x55);
m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>(0xAA);
m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(0xAA);
m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>(0x55);
m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(0x55);
m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>(0xAA);
auto nsectors_value = m_io_group.io_base().offset(ATA_REG_SECCOUNT0).in<u8>();
auto lba0 = m_io_group.io_base().offset(ATA_REG_LBA0).in<u8>();
if (lba0 == 0xAA && nsectors_value == 0x55)
return true;
return false;
}
ErrorOr<void> IDEChannel::wait_if_busy_until_timeout(size_t timeout_in_milliseconds)
{
size_t time_elapsed = 0;
while (m_io_group.control_base().in<u8>() & ATA_SR_BSY && time_elapsed <= timeout_in_milliseconds) {
IO::delay(1000);
time_elapsed++;
}
if (time_elapsed <= timeout_in_milliseconds)
return {};
return Error::from_errno(EBUSY);
}
ErrorOr<void> IDEChannel::force_clear_interrupts()
{
VERIFY(m_lock.is_locked());
m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
return {};
}
ErrorOr<void> IDEChannel::load_taskfile_into_registers(ATAPort::TaskFile const& task_file, LBAMode lba_mode, size_t completion_timeout_in_milliseconds)
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
u8 head = 0;
if (lba_mode == LBAMode::FortyEightBit) {
head = 0;
} else {
lba_mode = LBAMode::TwentyEightBit;
head = (lba & 0xF000000) >> 24;
} else if (lba_mode == LBAMode::TwentyEightBit) {
head = (task_file.lba_high[0] & 0x0F);
}
// Wait 1 second
wait_until_not_busy(1000);
// We need to select the drive and then we wait 20 microseconds... and it doesn't hurt anything so let's just do it.
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xE0 | (static_cast<u8>(slave_request) << 4) | head);
IO::delay(20);
// Note: Preserve the selected drive, always use LBA addressing
auto driver_register = ((m_io_group.io_base().offset(ATA_REG_HDDEVSEL).in<u8>() & (1 << 4)) | (head | (1 << 5) | (1 << 6)));
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(driver_register);
IO::delay(50);
if (lba_mode == LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_SECCOUNT1).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_LBA3).out<u8>((lba & 0xFF000000) >> 24);
m_io_group.io_base().offset(ATA_REG_LBA4).out<u8>((lba & 0xFF00000000ull) >> 32);
m_io_group.io_base().offset(ATA_REG_LBA5).out<u8>((lba & 0xFF0000000000ull) >> 40);
m_io_group.io_base().offset(ATA_REG_SECCOUNT1).out<u8>((task_file.count >> 8) & 0xFF);
m_io_group.io_base().offset(ATA_REG_LBA3).out<u8>(task_file.lba_high[0]);
m_io_group.io_base().offset(ATA_REG_LBA4).out<u8>(task_file.lba_high[1]);
m_io_group.io_base().offset(ATA_REG_LBA5).out<u8>(task_file.lba_high[2]);
}
m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(block_count);
m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>((lba & 0x000000FF) >> 0);
m_io_group.io_base().offset(ATA_REG_LBA1).out<u8>((lba & 0x0000FF00) >> 8);
m_io_group.io_base().offset(ATA_REG_LBA2).out<u8>((lba & 0x00FF0000) >> 16);
m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(task_file.count & 0xFF);
m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>(task_file.lba_low[0]);
m_io_group.io_base().offset(ATA_REG_LBA1).out<u8>(task_file.lba_low[1]);
m_io_group.io_base().offset(ATA_REG_LBA2).out<u8>(task_file.lba_low[2]);
// FIXME: Set a timeout here?
size_t time_elapsed = 0;
for (;;) {
if (time_elapsed > completion_timeout_in_milliseconds)
return Error::from_errno(EBUSY);
// FIXME: Use task_file_status method
auto status = m_io_group.control_base().in<u8>();
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRDY))
break;
IO::delay(1000);
time_elapsed++;
}
if (m_dma_enabled)
send_ata_dma_command(lba_mode, direction);
else
send_ata_pio_command(lba_mode, direction);
enable_irq();
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(task_file.command);
return {};
}
void IDEChannel::send_ata_pio_command(LBAMode lba_mode, Direction direction) const
ErrorOr<void> IDEChannel::device_select(size_t device_index)
{
if (lba_mode != LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_PIO : ATA_CMD_WRITE_PIO);
} else {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_PIO_EXT : ATA_CMD_WRITE_PIO_EXT);
VERIFY(m_lock.is_locked());
if (device_index > 1)
return Error::from_errno(EINVAL);
IO::delay(20);
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((device_index) << 4));
IO::delay(20);
return {};
}
ErrorOr<void> IDEChannel::enable_interrupts()
{
VERIFY(m_lock.is_locked());
m_io_group.control_base().out<u8>(0);
m_interrupts_enabled = true;
return {};
}
ErrorOr<void> IDEChannel::disable_interrupts()
{
VERIFY(m_lock.is_locked());
m_io_group.control_base().out<u8>(1 << 1);
m_interrupts_enabled = false;
return {};
}
ErrorOr<void> IDEChannel::read_pio_data_to_buffer(UserOrKernelBuffer& buffer, size_t block_offset, size_t words_count)
{
VERIFY(m_lock.is_locked());
VERIFY(words_count == 256);
for (u32 i = 0; i < 256; ++i) {
u16 data = m_io_group.io_base().offset(ATA_REG_DATA).in<u16>();
// FIXME: Don't assume 512 bytes sector
TRY(buffer.write(&data, block_offset * 512 + (i * 2), 2));
}
return {};
}
bool IDEChannel::ata_do_pio_read_sector()
ErrorOr<void> IDEChannel::write_pio_data_from_buffer(UserOrKernelBuffer const& buffer, size_t block_offset, size_t words_count)
{
VERIFY(m_lock.is_locked());
VERIFY(m_request_lock.is_locked());
VERIFY(!m_current_request.is_null());
dbgln_if(PATA_DEBUG, "IDEChannel::ata_do_pio_read_sector");
auto& request = *m_current_request;
auto block_size = m_current_request->block_size();
auto out_buffer = request.buffer().offset(m_current_request_block_index * block_size);
auto result = request.write_to_buffer_buffered<m_logical_sector_size>(out_buffer, block_size, [&](Bytes bytes) {
for (size_t i = 0; i < bytes.size(); i += sizeof(u16))
*(u16*)bytes.offset_pointer(i) = IO::in16(m_io_group.io_base().offset(ATA_REG_DATA).get());
return bytes.size();
});
if (result.is_error()) {
// TODO: Do we need to abort the PATA read if this wasn't the last block?
complete_pio_transaction(AsyncDeviceRequest::MemoryFault);
return false;
VERIFY(words_count == 256);
for (u32 i = 0; i < 256; ++i) {
u16 buf;
// FIXME: Don't assume 512 bytes sector
TRY(buffer.read(&buf, block_offset * 512 + (i * 2), 2));
IO::out16(m_io_group.io_base().offset(ATA_REG_DATA).get(), buf);
}
return true;
}
void IDEChannel::ata_read_sectors_with_pio(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors_with_pio");
dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
}
void IDEChannel::ata_do_pio_write_sector()
{
VERIFY(m_lock.is_locked());
VERIFY(m_request_lock.is_locked());
VERIFY(!m_current_request.is_null());
auto& request = *m_current_request;
io_delay();
while ((m_io_group.control_base().in<u8>() & ATA_SR_BSY) || !(m_io_group.control_base().in<u8>() & ATA_SR_DRQ))
;
u8 status = m_io_group.control_base().in<u8>();
VERIFY(status & ATA_SR_DRQ);
auto block_size = m_current_request->block_size();
auto in_buffer = request.buffer().offset(m_current_request_block_index * block_size);
dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} bytes (part {}) (status={:#02x})...", block_size, m_current_request_block_index, status);
auto result = request.read_from_buffer_buffered<m_logical_sector_size>(in_buffer, block_size, [&](ReadonlyBytes readonly_bytes) {
for (size_t i = 0; i < readonly_bytes.size(); i += sizeof(u16))
IO::out16(m_io_group.io_base().offset(ATA_REG_DATA).get(), *(const u16*)readonly_bytes.offset(i));
return readonly_bytes.size();
});
if (result.is_error())
complete_pio_transaction(AsyncDeviceRequest::MemoryFault);
}
// FIXME: I'm assuming this doesn't work based on the fact PIO read doesn't work.
void IDEChannel::ata_write_sectors_with_pio(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
ata_do_pio_write_sector();
}
void IDEChannel::send_ata_dma_command(LBAMode lba_mode, Direction direction) const
{
if (lba_mode != LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA : ATA_CMD_WRITE_DMA);
} else {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA_EXT : ATA_CMD_WRITE_DMA_EXT);
}
}
void IDEChannel::ata_read_sectors_with_dma(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors_with_dma ({} x {})", m_current_request->block_index(), m_current_request->block_count());
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine.
// We need to select the drive and then we wait 10 microseconds... and it doesn't hurt anything
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((slave_request ? 1 : 0) << 4));
IO::delay(10);
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = 512 * m_current_request->block_count();
VERIFY(prdt().size <= PAGE_SIZE);
VERIFY(m_io_group.bus_master_base().has_value());
// Stop bus master
m_io_group.bus_master_base().value().out<u8>(0);
// Write the PRDT location
m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
// Set transfer direction
m_io_group.bus_master_base().value().out<u8>(0x8);
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
// Start bus master
m_io_group.bus_master_base().value().out<u8>(0x9);
}
void IDEChannel::ata_write_sectors_with_dma(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_write_sectors_with_dma ({} x {})", m_current_request->block_index(), m_current_request->block_count());
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = 512 * m_current_request->block_count();
if (auto result = m_current_request->read_from_buffer(m_current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * m_current_request->block_count()); result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::MemoryFault);
return;
}
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine.
// We need to select the drive and then we wait 10 microseconds... and it doesn't hurt anything
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((slave_request ? 1 : 0) << 4));
IO::delay(10);
VERIFY(prdt().size <= PAGE_SIZE);
VERIFY(m_io_group.bus_master_base().has_value());
// Stop bus master
m_io_group.bus_master_base().value().out<u8>(0);
// Write the PRDT location
m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
// Start bus master
m_io_group.bus_master_base().value().out<u8>(0x1);
return {};
}
}

View file

@ -17,7 +17,7 @@
#pragma once
#include <AK/Badge.h>
#include <AK/Error.h>
#include <AK/RefPtr.h>
#include <Kernel/Arch/x86/IO.h>
#include <Kernel/Devices/Device.h>
@ -27,7 +27,7 @@
#include <Kernel/PhysicalAddress.h>
#include <Kernel/Random.h>
#include <Kernel/Storage/ATA/ATADevice.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Storage/ATA/ATAPort.h>
#include <Kernel/Storage/StorageDevice.h>
#include <Kernel/WaitQueue.h>
@ -35,9 +35,11 @@ namespace Kernel {
class AsyncBlockDeviceRequest;
class IDEController;
class PCIIDEController;
class ISAIDEController;
class IDEChannel : public RefCounted<IDEChannel>
class IDEChannel
: public ATAPort
, public IRQHandler {
friend class IDEController;
@ -104,92 +106,65 @@ public:
static NonnullRefPtr<IDEChannel> create(IDEController const&, IOAddressGroup, ChannelType type);
static NonnullRefPtr<IDEChannel> create(IDEController const&, u8 irq, IOAddressGroup, ChannelType type);
void initialize_with_pci_controller(Badge<PCIIDEController>, bool force_pio);
void initialize_with_isa_controller(Badge<ISAIDEController>, bool force_pio);
virtual ~IDEChannel() override;
RefPtr<StorageDevice> master_device() const;
RefPtr<StorageDevice> slave_device() const;
virtual StringView purpose() const override { return "PATA Channel"sv; }
ErrorOr<void> allocate_resources_for_pci_ide_controller(Badge<PCIIDEController>, bool force_pio);
ErrorOr<void> allocate_resources_for_isa_ide_controller(Badge<ISAIDEController>);
private:
static constexpr size_t m_logical_sector_size = 512;
void initialize(bool force_pio);
struct [[gnu::packed]] PhysicalRegionDescriptor {
u32 offset;
u16 size { 0 };
u16 end_of_table { 0 };
};
enum class LBAMode : u8 {
None, // CHS
TwentyEightBit,
FortyEightBit,
};
enum class Direction : u8 {
Read,
Write,
};
IDEChannel(IDEController const&, IOAddressGroup, ChannelType type);
IDEChannel(IDEController const&, u8 irq, IOAddressGroup, ChannelType type);
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
bool handle_irq_for_dma_transaction();
void complete_dma_transaction(AsyncDeviceRequest::RequestResult);
bool handle_irq_for_pio_transaction();
void complete_pio_transaction(AsyncDeviceRequest::RequestResult);
void send_ata_pio_command(LBAMode lba_mode, Direction direction) const;
void ata_read_sectors_with_pio(bool, u16);
void ata_write_sectors_with_pio(bool, u16);
void send_ata_dma_command(LBAMode lba_mode, Direction direction) const;
void ata_read_sectors_with_dma(bool, u16);
void ata_write_sectors_with_dma(bool, u16);
void detect_disks();
ErrorOr<void> allocate_resources(bool force_pio);
StringView channel_type_string() const;
void try_disambiguate_error();
virtual ErrorOr<void> disable() override { TODO(); }
virtual ErrorOr<void> power_on() override { TODO(); }
virtual ErrorOr<void> port_phy_reset() override;
bool select_device_and_wait_until_not_busy(DeviceType, size_t milliseconds_timeout);
bool wait_until_not_busy(size_t milliseconds_timeout);
void start_request(AsyncBlockDeviceRequest&, bool, u16);
virtual bool pio_capable() const override { return true; }
virtual bool dma_capable() const override { return m_dma_enabled; }
void clear_pending_interrupts() const;
virtual size_t max_possible_devices_connected() const override { return 2; }
void ata_access(Direction, bool, u64, u8, u16);
virtual ErrorOr<void> stop_busmastering() override;
virtual ErrorOr<void> start_busmastering(TransactionDirection) override;
virtual ErrorOr<void> force_busmastering_status_clean() override;
virtual ErrorOr<u8> busmastering_status() override;
virtual ErrorOr<void> prepare_transaction_with_busmastering(TransactionDirection, PhysicalAddress prdt_buffer) override;
virtual ErrorOr<void> initiate_transaction(TransactionDirection) override;
bool ata_do_pio_read_sector();
void ata_do_pio_write_sector();
virtual ErrorOr<u8> task_file_status() override;
virtual ErrorOr<u8> task_file_error() override;
PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_region->vaddr().as_ptr()); }
virtual ErrorOr<void> wait_if_busy_until_timeout(size_t timeout_in_milliseconds) override;
virtual ErrorOr<void> device_select(size_t device_index) override;
virtual ErrorOr<bool> detect_presence_on_selected_device() override;
virtual ErrorOr<void> enable_interrupts() override;
virtual ErrorOr<void> disable_interrupts() override;
virtual ErrorOr<void> force_clear_interrupts() override;
virtual ErrorOr<void> load_taskfile_into_registers(TaskFile const&, LBAMode lba_mode, size_t completion_timeout_in_milliseconds) override;
virtual ErrorOr<void> read_pio_data_to_buffer(UserOrKernelBuffer&, size_t block_offset, size_t words_count) override;
virtual ErrorOr<void> write_pio_data_from_buffer(UserOrKernelBuffer const&, size_t block_offset, size_t words_count) override;
IDEChannel(IDEController const&, IOAddressGroup, ChannelType type, NonnullOwnPtr<KBuffer> ata_identify_data_buffer);
IDEChannel(IDEController const&, u8 irq, IOAddressGroup, ChannelType type, NonnullOwnPtr<KBuffer> ata_identify_data_buffer);
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
// Data members
ChannelType m_channel_type { ChannelType::Primary };
volatile u8 m_device_error { 0 };
EntropySource m_entropy_source;
RefPtr<ATADevice> m_master;
RefPtr<ATADevice> m_slave;
RefPtr<AsyncBlockDeviceRequest> m_current_request;
u64 m_current_request_block_index { 0 };
bool m_current_request_flushing_cache { false };
Spinlock m_request_lock;
Mutex m_lock { "IDEChannel"sv };
bool m_dma_enabled { false };
bool m_interrupts_enabled { true };
IOAddressGroup m_io_group;
OwnPtr<Memory::Region> m_prdt_region;
OwnPtr<Memory::Region> m_dma_buffer_region;
RefPtr<Memory::PhysicalPage> m_prdt_page;
RefPtr<Memory::PhysicalPage> m_dma_buffer_page;
NonnullRefPtr<IDEController> m_parent_controller;
};
}

View file

@ -46,13 +46,19 @@ void IDEController::start_request(ATADevice const& device, AsyncBlockDeviceReque
auto& address = device.ata_address();
VERIFY(address.subport < 2);
switch (address.port) {
case 0:
m_channels[0].start_request(request, address.subport == 0 ? false : true, device.ata_capabilites());
case 0: {
auto result = m_channels[0].start_request(device, request);
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
return;
case 1:
m_channels[1].start_request(request, address.subport == 0 ? false : true, device.ata_capabilites());
}
case 1: {
auto result = m_channels[1].start_request(device, request);
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
return;
}
}
VERIFY_NOT_REACHED();
}
@ -61,23 +67,20 @@ void IDEController::complete_current_request(AsyncDeviceRequest::RequestResult)
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT IDEController::IDEController()
{
}
UNMAP_AFTER_INIT IDEController::IDEController() = default;
UNMAP_AFTER_INIT IDEController::~IDEController() = default;
RefPtr<StorageDevice> IDEController::device_by_channel_and_position(u32 index) const
{
switch (index) {
case 0:
return m_channels[0].master_device();
return m_channels[0].connected_device(0);
case 1:
return m_channels[0].slave_device();
return m_channels[0].connected_device(1);
case 2:
return m_channels[1].master_device();
return m_channels[1].connected_device(0);
case 3:
return m_channels[1].slave_device();
return m_channels[1].connected_device(1);
}
VERIFY_NOT_REACHED();
}

View file

@ -33,12 +33,25 @@ UNMAP_AFTER_INIT void ISAIDEController::initialize_channels()
auto secondary_base_io = IOAddress(0x170);
auto secondary_control_io = IOAddress(0x376);
auto initialize_and_enumerate = [](IDEChannel& channel) -> void {
{
auto result = channel.allocate_resources_for_isa_ide_controller({});
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
}
{
auto result = channel.detect_connected_devices();
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
}
};
m_channels.append(IDEChannel::create(*this, { primary_base_io, primary_control_io }, IDEChannel::ChannelType::Primary));
m_channels[0].initialize_with_isa_controller({}, true);
initialize_and_enumerate(m_channels[0]);
m_channels[0].enable_irq();
m_channels.append(IDEChannel::create(*this, { secondary_base_io, secondary_control_io }, IDEChannel::ChannelType::Secondary));
m_channels[1].initialize_with_isa_controller({}, true);
initialize_and_enumerate(m_channels[1]);
m_channels[1].enable_irq();
dbgln("ISA IDE controller detected and initialized");
}

View file

@ -87,19 +87,41 @@ UNMAP_AFTER_INIT void PCIIDEController::initialize(bool force_pio)
dbgln("IDE controller @ {}: primary channel DMA capable? {}", pci_address(), ((bus_master_base.offset(2).in<u8>() >> 5) & 0b11));
dbgln("IDE controller @ {}: secondary channel DMA capable? {}", pci_address(), ((bus_master_base.offset(2 + 8).in<u8>() >> 5) & 0b11));
auto initialize_and_enumerate = [&force_pio](IDEChannel& channel) -> void {
{
auto result = channel.allocate_resources_for_pci_ide_controller({}, force_pio);
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
}
{
auto result = channel.detect_connected_devices();
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
}
};
if (!is_bus_master_capable())
force_pio = true;
auto bar0 = PCI::get_BAR0(pci_address());
auto primary_base_io = (bar0 == 0x1 || bar0 == 0) ? IOAddress(0x1F0) : IOAddress(bar0 & (~1));
auto bar1 = PCI::get_BAR1(pci_address());
auto primary_control_io = (bar1 == 0x1 || bar1 == 0) ? IOAddress(0x3F6) : IOAddress(bar1 & (~1));
auto bar2 = PCI::get_BAR2(pci_address());
auto secondary_base_io = (bar2 == 0x1 || bar2 == 0) ? IOAddress(0x170) : IOAddress(bar2 & (~1));
auto bar3 = PCI::get_BAR3(pci_address());
auto secondary_control_io = (bar3 == 0x1 || bar3 == 0) ? IOAddress(0x376) : IOAddress(bar3 & (~1));
auto primary_base_io = (bar0 == 0x1 || bar0 == 0) ? IOAddress(0x1F0) : IOAddress(bar0 & (~1));
// Note: the PCI IDE specification says we should access the IO address with an offset of 2
// on native PCI IDE controllers.
auto primary_control_io = (bar1 == 0x1 || bar1 == 0) ? IOAddress(0x3F6) : IOAddress((bar1 & (~1)) | 2);
auto secondary_base_io = (bar2 == 0x1 || bar2 == 0) ? IOAddress(0x170) : IOAddress(bar2 & (~1));
// Note: the PCI IDE specification says we should access the IO address with an offset of 2
// on native PCI IDE controllers.
auto secondary_control_io = (bar3 == 0x1 || bar3 == 0) ? IOAddress(0x376) : IOAddress((bar3 & (~1)) | 2);
// FIXME: On IOAPIC based system, this value might be completely wrong
// On QEMU for example, it should be "u8 irq_line = 22;" to actually work.
auto irq_line = m_interrupt_line.value();
if (is_pci_native_mode_enabled()) {
VERIFY(irq_line != 0);
}
@ -109,7 +131,7 @@ UNMAP_AFTER_INIT void PCIIDEController::initialize(bool force_pio)
} else {
m_channels.append(IDEChannel::create(*this, { primary_base_io, primary_control_io, bus_master_base }, IDEChannel::ChannelType::Primary));
}
m_channels[0].initialize_with_pci_controller({}, force_pio);
initialize_and_enumerate(m_channels[0]);
m_channels[0].enable_irq();
if (is_pci_native_mode_enabled_on_secondary_channel()) {
@ -117,7 +139,7 @@ UNMAP_AFTER_INIT void PCIIDEController::initialize(bool force_pio)
} else {
m_channels.append(IDEChannel::create(*this, { secondary_base_io, secondary_control_io, bus_master_base.offset(8) }, IDEChannel::ChannelType::Secondary));
}
m_channels[1].initialize_with_pci_controller({}, force_pio);
initialize_and_enumerate(m_channels[1]);
m_channels[1].enable_irq();
}

View file

@ -13,10 +13,12 @@
namespace Kernel {
WorkQueue* g_io_work;
WorkQueue* g_ata_work;
UNMAP_AFTER_INIT void WorkQueue::initialize()
{
g_io_work = new WorkQueue("IO WorkQueue Task"sv);
g_ata_work = new WorkQueue("ATA WorkQueue Task"sv);
}
UNMAP_AFTER_INIT WorkQueue::WorkQueue(StringView name)

View file

@ -16,6 +16,7 @@
namespace Kernel {
extern WorkQueue* g_io_work;
extern WorkQueue* g_ata_work;
class WorkQueue {
AK_MAKE_NONCOPYABLE(WorkQueue);

View file

@ -4,6 +4,7 @@ set(AHCI_DEBUG ON)
set(APIC_DEBUG ON)
set(APIC_SMP_DEBUG ON)
set(ARP_DEBUG ON)
set(ATA_DEBUG ON)
set(AWAVLOADER_DEBUG ON)
set(AFLACLOADER_DEBUG ON)
set(BBFS_DEBUG ON)