1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 14:47:35 +00:00

Kernel/Storage: Move AHCI and IDE code into new subdirectories

We do that to increase clarity of the major and secondary components in
the subsystem. To ensure it's even more understandable, we rename the
files to better represent the class within them and to remove redundancy
in the name.

Also, some includes are removed from the general components of the ATA
components' classes.
This commit is contained in:
Liav A 2021-11-19 11:52:07 +02:00 committed by Linus Groh
parent a70e1a0340
commit c001e3f567
23 changed files with 37 additions and 43 deletions

View file

@ -0,0 +1,241 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATA.h>
#include <Kernel/Storage/ATA/GenericIDE/BusMasterChannel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
UNMAP_AFTER_INIT NonnullRefPtr<BMIDEChannel> BMIDEChannel::create(IDEController const& ide_controller, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
{
return adopt_ref(*new BMIDEChannel(ide_controller, io_group, type));
}
UNMAP_AFTER_INIT NonnullRefPtr<BMIDEChannel> BMIDEChannel::create(IDEController const& ide_controller, u8 irq, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
{
return adopt_ref(*new BMIDEChannel(ide_controller, irq, io_group, type));
}
UNMAP_AFTER_INIT BMIDEChannel::BMIDEChannel(IDEController const& controller, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
: IDEChannel(controller, io_group, type)
{
initialize();
}
UNMAP_AFTER_INIT BMIDEChannel::BMIDEChannel(IDEController const& controller, u8 irq, IDEChannel::IOAddressGroup io_group, IDEChannel::ChannelType type)
: IDEChannel(controller, irq, io_group, type)
{
initialize();
}
UNMAP_AFTER_INIT void BMIDEChannel::initialize()
{
VERIFY(m_io_group.bus_master_base().has_value());
// Let's try to set up DMA transfers.
{
auto region_or_error = MM.allocate_dma_buffer_page("IDE PRDT"sv, Memory::Region::Access::ReadWrite, m_prdt_page);
if (region_or_error.is_error())
TODO();
m_prdt_region = region_or_error.release_value();
}
{
auto region_or_error = MM.allocate_dma_buffer_page("IDE DMA region"sv, Memory::Region::Access::ReadWrite, m_dma_buffer_page);
if (region_or_error.is_error())
TODO();
m_dma_buffer_region = region_or_error.release_value();
}
prdt().end_of_table = 0x8000;
// clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
}
static void print_ide_status(u8 status)
{
dbgln("BMIDEChannel: print_ide_status: DRQ={} BSY={}, DRDY={}, DSC={}, DF={}, CORR={}, IDX={}, ERR={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0,
(status & ATA_SR_DSC) != 0,
(status & ATA_SR_DF) != 0,
(status & ATA_SR_CORR) != 0,
(status & ATA_SR_IDX) != 0,
(status & ATA_SR_ERR) != 0);
}
bool BMIDEChannel::handle_irq(RegisterState const&)
{
u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
m_entropy_source.add_random_event(status);
VERIFY(m_io_group.bus_master_base().has_value());
u8 bstatus = m_io_group.bus_master_base().value().offset(2).in<u8>();
if (!(bstatus & 0x4)) {
// interrupt not from this device, ignore
dbgln_if(PATA_DEBUG, "BMIDEChannel: ignore interrupt");
return false;
}
// clear bus master interrupt status
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
SpinlockLocker lock(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0);
if (!m_current_request) {
dbgln("BMIDEChannel: IRQ but no pending request!");
return false;
}
if (status & ATA_SR_ERR) {
print_ide_status(status);
m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
dbgln("BMIDEChannel: Error {:#02x}!", (u8)m_device_error);
try_disambiguate_error();
complete_current_request(AsyncDeviceRequest::Failure);
return true;
}
m_device_error = 0;
complete_current_request(AsyncDeviceRequest::Success);
return true;
}
void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
VERIFY(m_request_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result);
SpinlockLocker lock(m_request_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
if (result == AsyncDeviceRequest::Success) {
if (current_request->request_type() == AsyncBlockDeviceRequest::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
lock.unlock();
current_request->complete(AsyncDeviceRequest::MemoryFault);
return;
}
}
// I read somewhere that this may trigger a cache flush so let's do it.
VERIFY(m_io_group.bus_master_base().has_value());
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
}
lock.unlock();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker locker(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count());
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = m_current_request->buffer_size();
if (auto result = m_current_request->read_from_buffer(m_current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), m_current_request->buffer_size()); result.is_error()) {
complete_current_request(AsyncDeviceRequest::MemoryFault);
return;
}
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine.
// We need to select the drive and then we wait 10 microseconds... and it doesn't hurt anything
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((slave_request ? 1 : 0) << 4));
IO::delay(10);
VERIFY(prdt().size <= PAGE_SIZE);
VERIFY(m_io_group.bus_master_base().has_value());
// Stop bus master
m_io_group.bus_master_base().value().out<u8>(0);
// Write the PRDT location
m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
// Start bus master
m_io_group.bus_master_base().value().out<u8>(0x1);
}
void BMIDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) const
{
if (lba_mode != LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA : ATA_CMD_WRITE_DMA);
} else {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_DMA_EXT : ATA_CMD_WRITE_DMA_EXT);
}
}
void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker locker(m_request_lock);
dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count());
// Note: This is a fix for a quirk for an IDE controller on ICH7 machine.
// We need to select the drive and then we wait 10 microseconds... and it doesn't hurt anything
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | ((slave_request ? 1 : 0) << 4));
IO::delay(10);
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = m_current_request->buffer_size();
VERIFY(prdt().size <= PAGE_SIZE);
VERIFY(m_io_group.bus_master_base().has_value());
// Stop bus master
m_io_group.bus_master_base().value().out<u8>(0);
// Write the PRDT location
m_io_group.bus_master_base().value().offset(4).out<u32>(m_prdt_page->paddr().get());
// Set transfer direction
m_io_group.bus_master_base().value().out<u8>(0x8);
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 0x6);
ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
// Start bus master
m_io_group.bus_master_base().value().out<u8>(0x9);
}
}

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
struct [[gnu::packed]] PhysicalRegionDescriptor {
u32 offset;
u16 size { 0 };
u16 end_of_table { 0 };
};
class IDEController;
class BMIDEChannel final : public IDEChannel {
friend class IDEController;
friend class PATADiskDevice;
public:
static NonnullRefPtr<BMIDEChannel> create(IDEController const&, IDEChannel::IOAddressGroup, IDEChannel::ChannelType type);
static NonnullRefPtr<BMIDEChannel> create(IDEController const&, u8 irq, IDEChannel::IOAddressGroup, IDEChannel::ChannelType type);
virtual ~BMIDEChannel() override {};
virtual bool is_dma_enabled() const override { return true; };
private:
BMIDEChannel(IDEController const&, IDEChannel::IOAddressGroup, IDEChannel::ChannelType type);
BMIDEChannel(IDEController const&, u8 irq, IDEChannel::IOAddressGroup, IDEChannel::ChannelType type);
void initialize();
void complete_current_request(AsyncDeviceRequest::RequestResult);
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
//* IDEChannel
virtual void send_ata_io_command(LBAMode lba_mode, Direction direction) const override;
virtual void ata_read_sectors(bool, u16) override;
virtual void ata_write_sectors(bool, u16) override;
PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_region->vaddr().as_ptr()); }
OwnPtr<Memory::Region> m_prdt_region;
OwnPtr<Memory::Region> m_dma_buffer_region;
RefPtr<Memory::PhysicalPage> m_prdt_page;
RefPtr<Memory::PhysicalPage> m_dma_buffer_page;
};
}

View file

@ -0,0 +1,556 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/ByteBuffer.h>
#include <AK/Singleton.h>
#include <AK/StringView.h>
#include <Kernel/Arch/x86/IO.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Process.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATA.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
#define PATA_PRIMARY_IRQ 14
#define PATA_SECONDARY_IRQ 15
UNMAP_AFTER_INIT NonnullRefPtr<IDEChannel> IDEChannel::create(IDEController const& controller, IOAddressGroup io_group, ChannelType type)
{
return adopt_ref(*new IDEChannel(controller, io_group, type));
}
UNMAP_AFTER_INIT NonnullRefPtr<IDEChannel> IDEChannel::create(IDEController const& controller, u8 irq, IOAddressGroup io_group, ChannelType type)
{
return adopt_ref(*new IDEChannel(controller, irq, io_group, type));
}
RefPtr<StorageDevice> IDEChannel::master_device() const
{
return m_master;
}
RefPtr<StorageDevice> IDEChannel::slave_device() const
{
return m_slave;
}
UNMAP_AFTER_INIT void IDEChannel::initialize()
{
disable_irq();
dbgln_if(PATA_DEBUG, "IDEChannel: {} IO base: {}", channel_type_string(), m_io_group.io_base());
dbgln_if(PATA_DEBUG, "IDEChannel: {} control base: {}", channel_type_string(), m_io_group.control_base());
if (m_io_group.bus_master_base().has_value())
dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base: {}", channel_type_string(), m_io_group.bus_master_base().value());
else
dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base disabled", channel_type_string());
// reset the channel
u8 device_control = m_io_group.control_base().in<u8>();
// Wait 30 milliseconds
IO::delay(30000);
m_io_group.control_base().out<u8>(device_control | (1 << 2));
// Wait 30 milliseconds
IO::delay(30000);
m_io_group.control_base().out<u8>(device_control);
// Wait up to 30 seconds before failing
if (!select_device_and_wait_until_not_busy(DeviceType::Master, 30000)) {
dbgln("IDEChannel: reset failed, busy flag on master stuck");
return;
}
// Wait up to 30 seconds before failing
if (!select_device_and_wait_until_not_busy(DeviceType::Slave, 30000)) {
dbgln("IDEChannel: reset failed, busy flag on slave stuck");
return;
}
detect_disks();
// Note: calling to detect_disks could generate an interrupt, clear it if that's the case
clear_pending_interrupts();
}
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, u8 irq, IOAddressGroup io_group, ChannelType type)
: IRQHandler(irq)
, m_channel_type(type)
, m_io_group(io_group)
, m_parent_controller(controller)
{
initialize();
}
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, IOAddressGroup io_group, ChannelType type)
: IRQHandler(type == ChannelType::Primary ? PATA_PRIMARY_IRQ : PATA_SECONDARY_IRQ)
, m_channel_type(type)
, m_io_group(io_group)
, m_parent_controller(controller)
{
initialize();
}
void IDEChannel::clear_pending_interrupts() const
{
m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
}
UNMAP_AFTER_INIT IDEChannel::~IDEChannel() = default;
void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool is_slave, u16 capabilities)
{
MutexLocker locker(m_lock);
VERIFY(m_current_request.is_null());
dbgln_if(PATA_DEBUG, "IDEChannel::start_request");
m_current_request = request;
m_current_request_block_index = 0;
m_current_request_flushing_cache = false;
if (request.request_type() == AsyncBlockDeviceRequest::Read)
ata_read_sectors(is_slave, capabilities);
else
ata_write_sectors(is_slave, capabilities);
}
void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
VERIFY(m_request_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result);
MutexLocker locker(m_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
static void print_ide_status(u8 status)
{
dbgln("IDEChannel: print_ide_status: DRQ={} BSY={}, DRDY={}, DSC={}, DF={}, CORR={}, IDX={}, ERR={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0,
(status & ATA_SR_DSC) != 0,
(status & ATA_SR_DF) != 0,
(status & ATA_SR_CORR) != 0,
(status & ATA_SR_IDX) != 0,
(status & ATA_SR_ERR) != 0);
}
void IDEChannel::try_disambiguate_error()
{
VERIFY(m_request_lock.is_locked());
dbgln("IDEChannel: Error cause:");
switch (m_device_error) {
case ATA_ER_BBK:
dbgln("IDEChannel: - Bad block");
break;
case ATA_ER_UNC:
dbgln("IDEChannel: - Uncorrectable data");
break;
case ATA_ER_MC:
dbgln("IDEChannel: - Media changed");
break;
case ATA_ER_IDNF:
dbgln("IDEChannel: - ID mark not found");
break;
case ATA_ER_MCR:
dbgln("IDEChannel: - Media change request");
break;
case ATA_ER_ABRT:
dbgln("IDEChannel: - Command aborted");
break;
case ATA_ER_TK0NF:
dbgln("IDEChannel: - Track 0 not found");
break;
case ATA_ER_AMNF:
dbgln("IDEChannel: - No address mark");
break;
default:
dbgln("IDEChannel: - No one knows");
break;
}
}
bool IDEChannel::handle_irq(RegisterState const&)
{
u8 status = m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>();
m_entropy_source.add_random_event(status);
SpinlockLocker lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0);
if (!m_current_request) {
dbgln("IDEChannel: IRQ but no pending request!");
return false;
}
if (status & ATA_SR_ERR) {
print_ide_status(status);
m_device_error = m_io_group.io_base().offset(ATA_REG_ERROR).in<u8>();
dbgln("IDEChannel: Error {:#02x}!", (u8)m_device_error);
try_disambiguate_error();
complete_current_request(AsyncDeviceRequest::Failure);
return true;
}
m_device_error = 0;
// Now schedule reading/writing the buffer as soon as we leave the irq handler.
// This is important so that we can safely access the buffers, which could
// trigger page faults
auto work_item_creation_result = g_io_work->try_queue([this]() {
MutexLocker locker(m_lock);
SpinlockLocker lock(m_request_lock);
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());
if (ata_do_read_sector()) {
if (++m_current_request_block_index >= m_current_request->block_count()) {
complete_current_request(AsyncDeviceRequest::Success);
return;
}
// Wait for the next block
enable_irq();
}
} else {
if (!m_current_request_flushing_cache) {
dbgln_if(PATA_DEBUG, "IDEChannel: Wrote block {}/{}", m_current_request_block_index, m_current_request->block_count());
if (++m_current_request_block_index >= m_current_request->block_count()) {
// We read the last block, flush cache
VERIFY(!m_current_request_flushing_cache);
m_current_request_flushing_cache = true;
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH);
} else {
// Read next block
ata_do_write_sector();
}
} else {
complete_current_request(AsyncDeviceRequest::Success);
}
}
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
return true;
}
static void io_delay()
{
for (int i = 0; i < 4; ++i)
IO::in8(0x3f6);
}
bool IDEChannel::select_device_and_wait_until_not_busy(DeviceType device_type, size_t milliseconds_timeout)
{
IO::delay(20);
u8 slave = device_type == DeviceType::Slave;
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xA0 | (slave << 4));
IO::delay(20);
size_t time_elapsed = 0;
while (m_io_group.control_base().in<u8>() & ATA_SR_BSY && time_elapsed <= milliseconds_timeout) {
IO::delay(1000);
time_elapsed++;
}
return time_elapsed <= milliseconds_timeout;
}
bool IDEChannel::wait_until_not_busy(size_t milliseconds_timeout)
{
size_t time_elapsed = 0;
while (m_io_group.control_base().in<u8>() & ATA_SR_BSY && time_elapsed <= milliseconds_timeout) {
IO::delay(1000);
time_elapsed++;
}
return time_elapsed <= milliseconds_timeout;
}
StringView IDEChannel::channel_type_string() const
{
if (m_channel_type == ChannelType::Primary)
return "Primary"sv;
return "Secondary"sv;
}
UNMAP_AFTER_INIT void IDEChannel::detect_disks()
{
auto channel_string = [](u8 i) -> StringView {
if (i == 0)
return "master"sv;
return "slave"sv;
};
// There are only two possible disks connected to a channel
for (auto i = 0; i < 2; i++) {
if (!select_device_and_wait_until_not_busy(i == 0 ? DeviceType::Master : DeviceType::Slave, 32000)) {
dbgln("IDEChannel: Timeout waiting for busy flag to clear during {} {} detection", channel_type_string(), channel_string(i));
continue;
}
auto status = m_io_group.control_base().in<u8>();
if (status == 0x0) {
dbgln_if(PATA_DEBUG, "IDEChannel: No {} {} disk detected!", channel_type_string(), channel_string(i));
continue;
}
m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_LBA1).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_LBA2).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_IDENTIFY); // Send the ATA_IDENTIFY command
// Wait 10 second for the BSY flag to clear
if (!wait_until_not_busy(2000)) {
dbgln_if(PATA_DEBUG, "IDEChannel: No {} {} disk detected, BSY flag was not reset!", channel_type_string(), channel_string(i));
continue;
}
bool check_for_atapi = false;
bool device_presence = true;
bool command_set_is_atapi = false;
size_t milliseconds_elapsed = 0;
for (;;) {
// Wait about 10 seconds
if (milliseconds_elapsed > 2000)
break;
u8 status = m_io_group.control_base().in<u8>();
if (status & ATA_SR_ERR) {
dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device is not ATA. Will check for ATAPI.", channel_type_string(), channel_string(i));
check_for_atapi = true;
break;
}
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRQ)) {
dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device appears to be ATA.", channel_type_string(), channel_string(i));
break;
}
if (status == 0 || status == 0xFF) {
dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device presence - none.", channel_type_string(), channel_string(i));
device_presence = false;
break;
}
IO::delay(1000);
milliseconds_elapsed++;
}
if (!device_presence) {
continue;
}
if (milliseconds_elapsed > 10000) {
dbgln_if(PATA_DEBUG, "IDEChannel: {} {} device state unknown. Timeout exceeded.", channel_type_string(), channel_string(i));
continue;
}
if (check_for_atapi) {
u8 cl = m_io_group.io_base().offset(ATA_REG_LBA1).in<u8>();
u8 ch = m_io_group.io_base().offset(ATA_REG_LBA2).in<u8>();
if ((cl == 0x14 && ch == 0xEB) || (cl == 0x69 && ch == 0x96)) {
command_set_is_atapi = true;
dbgln("IDEChannel: {} {} device appears to be ATAPI. We're going to ignore it for now as we don't support it.", channel_type_string(), channel_string(i));
continue;
} else {
dbgln("IDEChannel: {} {} device doesn't appear to be ATA or ATAPI. Ignoring it.", channel_type_string(), channel_string(i));
continue;
}
}
// FIXME: Handle possible OOM situation here.
ByteBuffer wbuf = ByteBuffer::create_uninitialized(m_logical_sector_size).release_value_but_fixme_should_propagate_errors();
ByteBuffer bbuf = ByteBuffer::create_uninitialized(m_logical_sector_size).release_value_but_fixme_should_propagate_errors();
u8* b = bbuf.data();
u16* w = (u16*)wbuf.data();
for (u32 i = 0; i < 256; ++i) {
u16 data = m_io_group.io_base().offset(ATA_REG_DATA).in<u16>();
*(w++) = data;
*(b++) = MSB(data);
*(b++) = LSB(data);
}
// "Unpad" the device name string.
for (u32 i = 93; i > 54 && bbuf[i] == ' '; --i)
bbuf[i] = 0;
ATAIdentifyBlock volatile& identify_block = (ATAIdentifyBlock volatile&)(*wbuf.data());
u16 capabilities = identify_block.capabilities[0];
// If the drive is so old that it doesn't support LBA, ignore it.
if (!(capabilities & ATA_CAP_LBA))
continue;
u64 max_addressable_block = identify_block.max_28_bit_addressable_logical_sector;
// if we support 48-bit LBA, use that value instead.
if (identify_block.commands_and_feature_sets_supported[1] & (1 << 10))
max_addressable_block = identify_block.user_addressable_logical_sectors_count;
dbgln("IDEChannel: {} {} {} device found: Name={}, Capacity={}, Capabilities={:#04x}", channel_type_string(), channel_string(i), !command_set_is_atapi ? "ATA" : "ATAPI", ((char*)bbuf.data() + 54), max_addressable_block * m_logical_sector_size, capabilities);
ATADevice::Address address = { m_channel_type == ChannelType::Primary ? static_cast<u8>(0) : static_cast<u8>(1), static_cast<u8>(i) };
if (i == 0) {
m_master = ATADiskDevice::create(m_parent_controller, address, capabilities, m_logical_sector_size, max_addressable_block);
} else {
m_slave = ATADiskDevice::create(m_parent_controller, address, capabilities, m_logical_sector_size, max_addressable_block);
}
}
}
void IDEChannel::ata_access(Direction direction, bool slave_request, u64 lba, u8 block_count, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(m_request_lock.is_locked());
LBAMode lba_mode;
u8 head = 0;
VERIFY(capabilities & ATA_CAP_LBA);
if (lba >= 0x10000000) {
lba_mode = LBAMode::FortyEightBit;
head = 0;
} else {
lba_mode = LBAMode::TwentyEightBit;
head = (lba & 0xF000000) >> 24;
}
// Wait 1 second
wait_until_not_busy(1000);
// We need to select the drive and then we wait 20 microseconds... and it doesn't hurt anything so let's just do it.
m_io_group.io_base().offset(ATA_REG_HDDEVSEL).out<u8>(0xE0 | (static_cast<u8>(slave_request) << 4) | head);
IO::delay(20);
if (lba_mode == LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_SECCOUNT1).out<u8>(0);
m_io_group.io_base().offset(ATA_REG_LBA3).out<u8>((lba & 0xFF000000) >> 24);
m_io_group.io_base().offset(ATA_REG_LBA4).out<u8>((lba & 0xFF00000000ull) >> 32);
m_io_group.io_base().offset(ATA_REG_LBA5).out<u8>((lba & 0xFF0000000000ull) >> 40);
}
m_io_group.io_base().offset(ATA_REG_SECCOUNT0).out<u8>(block_count);
m_io_group.io_base().offset(ATA_REG_LBA0).out<u8>((lba & 0x000000FF) >> 0);
m_io_group.io_base().offset(ATA_REG_LBA1).out<u8>((lba & 0x0000FF00) >> 8);
m_io_group.io_base().offset(ATA_REG_LBA2).out<u8>((lba & 0x00FF0000) >> 16);
for (;;) {
auto status = m_io_group.control_base().in<u8>();
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRDY))
break;
}
send_ata_io_command(lba_mode, direction);
enable_irq();
}
void IDEChannel::send_ata_io_command(LBAMode lba_mode, Direction direction) const
{
if (lba_mode != LBAMode::FortyEightBit) {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_PIO : ATA_CMD_WRITE_PIO);
} else {
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(direction == Direction::Read ? ATA_CMD_READ_PIO_EXT : ATA_CMD_WRITE_PIO_EXT);
}
}
bool IDEChannel::ata_do_read_sector()
{
VERIFY(m_lock.is_locked());
VERIFY(m_request_lock.is_locked());
VERIFY(!m_current_request.is_null());
dbgln_if(PATA_DEBUG, "IDEChannel::ata_do_read_sector");
auto& request = *m_current_request;
auto block_size = m_current_request->block_size();
auto out_buffer = request.buffer().offset(m_current_request_block_index * block_size);
auto result = request.write_to_buffer_buffered<m_logical_sector_size>(out_buffer, block_size, [&](Bytes bytes) {
for (size_t i = 0; i < bytes.size(); i += sizeof(u16))
*(u16*)bytes.offset_pointer(i) = IO::in16(m_io_group.io_base().offset(ATA_REG_DATA).get());
return bytes.size();
});
if (result.is_error()) {
// TODO: Do we need to abort the PATA read if this wasn't the last block?
complete_current_request(AsyncDeviceRequest::MemoryFault);
return false;
}
return true;
}
// FIXME: This doesn't quite work and locks up reading LBA 3.
void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors");
dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
}
void IDEChannel::ata_do_write_sector()
{
VERIFY(m_lock.is_locked());
VERIFY(m_request_lock.is_locked());
VERIFY(!m_current_request.is_null());
auto& request = *m_current_request;
io_delay();
while ((m_io_group.control_base().in<u8>() & ATA_SR_BSY) || !(m_io_group.control_base().in<u8>() & ATA_SR_DRQ))
;
u8 status = m_io_group.control_base().in<u8>();
VERIFY(status & ATA_SR_DRQ);
auto block_size = m_current_request->block_size();
auto in_buffer = request.buffer().offset(m_current_request_block_index * block_size);
dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} bytes (part {}) (status={:#02x})...", block_size, m_current_request_block_index, status);
auto result = request.read_from_buffer_buffered<m_logical_sector_size>(in_buffer, block_size, [&](ReadonlyBytes readonly_bytes) {
for (size_t i = 0; i < readonly_bytes.size(); i += sizeof(u16))
IO::out16(m_io_group.io_base().offset(ATA_REG_DATA).get(), *(const u16*)readonly_bytes.offset(i));
return readonly_bytes.size();
});
if (result.is_error())
complete_current_request(AsyncDeviceRequest::MemoryFault);
}
// FIXME: I'm assuming this doesn't work based on the fact PIO read doesn't work.
void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
SpinlockLocker m_lock(m_request_lock);
dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
ata_do_write_sector();
}
}

View file

@ -0,0 +1,166 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
//
// Parallel ATA (PATA) controller driver
//
// This driver describes a logical PATA Channel. Each channel can connect up to 2
// IDE Hard Disk Drives. The drives themselves can be either the master drive (hd0)
// or the slave drive (hd1).
//
// More information about the ATA spec for PATA can be found here:
// ftp://ftp.seagate.com/acrobat/reference/111-1c.pdf
//
#pragma once
#include <AK/RefPtr.h>
#include <Kernel/Arch/x86/IO.h>
#include <Kernel/Devices/Device.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/PhysicalAddress.h>
#include <Kernel/Random.h>
#include <Kernel/Storage/ATA/ATADevice.h>
#include <Kernel/Storage/StorageDevice.h>
#include <Kernel/WaitQueue.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class IDEController;
class IDEChannel : public RefCounted<IDEChannel>
, public IRQHandler {
friend class IDEController;
public:
enum class ChannelType : u8 {
Primary,
Secondary
};
enum class DeviceType : u8 {
Master,
Slave,
};
struct IOAddressGroup {
IOAddressGroup(IOAddress io_base, IOAddress control_base, IOAddress bus_master_base)
: m_io_base(io_base)
, m_control_base(control_base)
, m_bus_master_base(bus_master_base)
{
}
IOAddressGroup(IOAddress io_base, IOAddress control_base)
: m_io_base(io_base)
, m_control_base(control_base)
, m_bus_master_base()
{
}
IOAddressGroup(IOAddressGroup const& other, IOAddress bus_master_base)
: m_io_base(other.io_base())
, m_control_base(other.control_base())
, m_bus_master_base(bus_master_base)
{
}
IOAddressGroup(IOAddressGroup const&) = default;
// Disable default implementations that would use surprising integer promotion.
bool operator==(IOAddressGroup const&) const = delete;
bool operator<=(IOAddressGroup const&) const = delete;
bool operator>=(IOAddressGroup const&) const = delete;
bool operator<(IOAddressGroup const&) const = delete;
bool operator>(IOAddressGroup const&) const = delete;
IOAddress io_base() const { return m_io_base; };
IOAddress control_base() const { return m_control_base; }
Optional<IOAddress> bus_master_base() const { return m_bus_master_base; }
private:
IOAddress m_io_base;
IOAddress m_control_base;
Optional<IOAddress> m_bus_master_base;
};
public:
static NonnullRefPtr<IDEChannel> create(IDEController const&, IOAddressGroup, ChannelType type);
static NonnullRefPtr<IDEChannel> create(IDEController const&, u8 irq, IOAddressGroup, ChannelType type);
virtual ~IDEChannel() override;
RefPtr<StorageDevice> master_device() const;
RefPtr<StorageDevice> slave_device() const;
virtual StringView purpose() const override { return "PATA Channel"sv; }
virtual bool is_dma_enabled() const { return false; }
private:
void complete_current_request(AsyncDeviceRequest::RequestResult);
void initialize();
static constexpr size_t m_logical_sector_size = 512;
protected:
enum class LBAMode : u8 {
None, // CHS
TwentyEightBit,
FortyEightBit,
};
enum class Direction : u8 {
Read,
Write,
};
IDEChannel(IDEController const&, IOAddressGroup, ChannelType type);
IDEChannel(IDEController const&, u8 irq, IOAddressGroup, ChannelType type);
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
virtual void send_ata_io_command(LBAMode lba_mode, Direction direction) const;
virtual void ata_read_sectors(bool, u16);
virtual void ata_write_sectors(bool, u16);
void detect_disks();
StringView channel_type_string() const;
void try_disambiguate_error();
bool select_device_and_wait_until_not_busy(DeviceType, size_t milliseconds_timeout);
bool wait_until_not_busy(size_t milliseconds_timeout);
void start_request(AsyncBlockDeviceRequest&, bool, u16);
void clear_pending_interrupts() const;
void ata_access(Direction, bool, u64, u8, u16);
bool ata_do_read_sector();
void ata_do_write_sector();
// Data members
ChannelType m_channel_type { ChannelType::Primary };
volatile u8 m_device_error { 0 };
EntropySource m_entropy_source;
RefPtr<ATADevice> m_master;
RefPtr<ATADevice> m_slave;
RefPtr<AsyncBlockDeviceRequest> m_current_request;
u64 m_current_request_block_index { 0 };
bool m_current_request_flushing_cache { false };
Spinlock m_request_lock;
Mutex m_lock { "IDEChannel"sv };
IOAddressGroup m_io_group;
NonnullRefPtr<IDEController> m_parent_controller;
};
}

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/FileSystem/ProcFS.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/BusMasterChannel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
namespace Kernel {
UNMAP_AFTER_INIT NonnullRefPtr<IDEController> IDEController::initialize()
{
return adopt_ref(*new IDEController());
}
bool IDEController::reset()
{
TODO();
}
bool IDEController::shutdown()
{
TODO();
}
size_t IDEController::devices_count() const
{
size_t count = 0;
for (u32 index = 0; index < 4; index++) {
if (!device(index).is_null())
count++;
}
return count;
}
void IDEController::start_request(ATADevice const& device, AsyncBlockDeviceRequest& request)
{
auto& address = device.ata_address();
VERIFY(address.subport < 2);
switch (address.port) {
case 0:
m_channels[0].start_request(request, address.subport == 0 ? false : true, device.ata_capabilites());
return;
case 1:
m_channels[1].start_request(request, address.subport == 0 ? false : true, device.ata_capabilites());
return;
}
VERIFY_NOT_REACHED();
}
void IDEController::complete_current_request(AsyncDeviceRequest::RequestResult)
{
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT IDEController::IDEController()
{
}
UNMAP_AFTER_INIT IDEController::~IDEController() = default;
RefPtr<StorageDevice> IDEController::device_by_channel_and_position(u32 index) const
{
switch (index) {
case 0:
return m_channels[0].master_device();
case 1:
return m_channels[0].slave_device();
case 2:
return m_channels[1].master_device();
case 3:
return m_channels[1].slave_device();
}
VERIFY_NOT_REACHED();
}
RefPtr<StorageDevice> IDEController::device(u32 index) const
{
NonnullRefPtrVector<StorageDevice> connected_devices;
for (size_t index = 0; index < 4; index++) {
auto checked_device = device_by_channel_and_position(index);
if (checked_device.is_null())
continue;
connected_devices.append(checked_device.release_nonnull());
}
if (index >= connected_devices.size())
return nullptr;
return connected_devices[index];
}
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Storage/ATA/ATAController.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/StorageDevice.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class IDEController : public ATAController {
public:
static NonnullRefPtr<IDEController> initialize();
virtual ~IDEController() override;
virtual RefPtr<StorageDevice> device(u32 index) const override final;
virtual bool reset() override final;
virtual bool shutdown() override final;
virtual size_t devices_count() const override final;
virtual void start_request(ATADevice const&, AsyncBlockDeviceRequest&) override final;
virtual void complete_current_request(AsyncDeviceRequest::RequestResult) override final;
protected:
IDEController();
RefPtr<StorageDevice> device_by_channel_and_position(u32 index) const;
NonnullRefPtrVector<IDEChannel> m_channels;
};
}

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/FileSystem/ProcFS.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/BusMasterChannel.h>
#include <Kernel/Storage/ATA/GenericIDE/ISAController.h>
namespace Kernel {
UNMAP_AFTER_INIT NonnullRefPtr<ISAIDEController> ISAIDEController::initialize()
{
return adopt_ref(*new ISAIDEController());
}
UNMAP_AFTER_INIT ISAIDEController::ISAIDEController()
{
initialize_channels();
}
UNMAP_AFTER_INIT void ISAIDEController::initialize_channels()
{
auto primary_base_io = IOAddress(0x1F0);
auto primary_control_io = IOAddress(0x3F6);
auto secondary_base_io = IOAddress(0x170);
auto secondary_control_io = IOAddress(0x376);
m_channels.append(IDEChannel::create(*this, { primary_base_io, primary_control_io }, IDEChannel::ChannelType::Primary));
m_channels[0].enable_irq();
m_channels.append(IDEChannel::create(*this, { secondary_base_io, secondary_control_io }, IDEChannel::ChannelType::Secondary));
m_channels[1].enable_irq();
dbgln("ISA IDE controller detected and initialized");
}
}

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Storage/StorageDevice.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class ISAIDEController final : public IDEController {
public:
static NonnullRefPtr<ISAIDEController> initialize();
private:
ISAIDEController();
RefPtr<StorageDevice> device_by_channel_and_position(u32 index) const;
void initialize_channels();
};
}

View file

@ -0,0 +1,136 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/FileSystem/ProcFS.h>
#include <Kernel/Sections.h>
#include <Kernel/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Storage/ATA/GenericIDE/BusMasterChannel.h>
#include <Kernel/Storage/ATA/GenericIDE/PCIController.h>
namespace Kernel {
UNMAP_AFTER_INIT NonnullRefPtr<PCIIDEController> PCIIDEController::initialize(PCI::DeviceIdentifier const& device_identifier, bool force_pio)
{
return adopt_ref(*new PCIIDEController(device_identifier, force_pio));
}
UNMAP_AFTER_INIT PCIIDEController::PCIIDEController(PCI::DeviceIdentifier const& device_identifier, bool force_pio)
: PCI::Device(device_identifier.address())
, m_prog_if(device_identifier.prog_if())
, m_interrupt_line(device_identifier.interrupt_line())
{
PCI::enable_io_space(device_identifier.address());
PCI::enable_memory_space(device_identifier.address());
PCI::enable_bus_mastering(device_identifier.address());
enable_pin_based_interrupts();
initialize(force_pio);
}
bool PCIIDEController::is_pci_native_mode_enabled() const
{
return (m_prog_if.value() & 0x05) != 0;
}
bool PCIIDEController::is_pci_native_mode_enabled_on_primary_channel() const
{
return (m_prog_if.value() & 0x1) == 0x1;
}
bool PCIIDEController::is_pci_native_mode_enabled_on_secondary_channel() const
{
return (m_prog_if.value() & 0x4) == 0x4;
}
bool PCIIDEController::is_bus_master_capable() const
{
return m_prog_if.value() & (1 << 7);
}
static char const* detect_controller_type(u8 programming_value)
{
switch (programming_value) {
case 0x00:
return "ISA Compatibility mode-only controller";
case 0x05:
return "PCI native mode-only controller";
case 0x0A:
return "ISA Compatibility mode controller, supports both channels switched to PCI native mode";
case 0x0F:
return "PCI native mode controller, supports both channels switched to ISA compatibility mode";
case 0x80:
return "ISA Compatibility mode-only controller, supports bus mastering";
case 0x85:
return "PCI native mode-only controller, supports bus mastering";
case 0x8A:
return "ISA Compatibility mode controller, supports both channels switched to PCI native mode, supports bus mastering";
case 0x8F:
return "PCI native mode controller, supports both channels switched to ISA compatibility mode, supports bus mastering";
default:
VERIFY_NOT_REACHED();
}
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT void PCIIDEController::initialize(bool force_pio)
{
auto bus_master_base = IOAddress(PCI::get_BAR4(pci_address()) & (~1));
dbgln("IDE controller @ {}: bus master base was set to {}", pci_address(), bus_master_base);
dbgln("IDE controller @ {}: interrupt line was set to {}", pci_address(), m_interrupt_line.value());
dbgln("IDE controller @ {}: {}", pci_address(), detect_controller_type(m_prog_if.value()));
dbgln("IDE controller @ {}: primary channel DMA capable? {}", pci_address(), ((bus_master_base.offset(2).in<u8>() >> 5) & 0b11));
dbgln("IDE controller @ {}: secondary channel DMA capable? {}", pci_address(), ((bus_master_base.offset(2 + 8).in<u8>() >> 5) & 0b11));
if (!is_bus_master_capable())
force_pio = true;
auto bar0 = PCI::get_BAR0(pci_address());
auto primary_base_io = (bar0 == 0x1 || bar0 == 0) ? IOAddress(0x1F0) : IOAddress(bar0 & (~1));
auto bar1 = PCI::get_BAR1(pci_address());
auto primary_control_io = (bar1 == 0x1 || bar1 == 0) ? IOAddress(0x3F6) : IOAddress(bar1 & (~1));
auto bar2 = PCI::get_BAR2(pci_address());
auto secondary_base_io = (bar2 == 0x1 || bar2 == 0) ? IOAddress(0x170) : IOAddress(bar2 & (~1));
auto bar3 = PCI::get_BAR3(pci_address());
auto secondary_control_io = (bar3 == 0x1 || bar3 == 0) ? IOAddress(0x376) : IOAddress(bar3 & (~1));
auto irq_line = m_interrupt_line.value();
if (is_pci_native_mode_enabled()) {
VERIFY(irq_line != 0);
}
if (is_pci_native_mode_enabled_on_primary_channel()) {
if (force_pio)
m_channels.append(IDEChannel::create(*this, irq_line, { primary_base_io, primary_control_io }, IDEChannel::ChannelType::Primary));
else
m_channels.append(BMIDEChannel::create(*this, irq_line, { primary_base_io, primary_control_io, bus_master_base }, IDEChannel::ChannelType::Primary));
} else {
if (force_pio)
m_channels.append(IDEChannel::create(*this, { primary_base_io, primary_control_io }, IDEChannel::ChannelType::Primary));
else
m_channels.append(BMIDEChannel::create(*this, { primary_base_io, primary_control_io, bus_master_base }, IDEChannel::ChannelType::Primary));
}
m_channels[0].enable_irq();
if (is_pci_native_mode_enabled_on_secondary_channel()) {
if (force_pio)
m_channels.append(IDEChannel::create(*this, irq_line, { secondary_base_io, secondary_control_io }, IDEChannel::ChannelType::Secondary));
else
m_channels.append(BMIDEChannel::create(*this, irq_line, { secondary_base_io, secondary_control_io, bus_master_base.offset(8) }, IDEChannel::ChannelType::Secondary));
} else {
if (force_pio)
m_channels.append(IDEChannel::create(*this, { secondary_base_io, secondary_control_io }, IDEChannel::ChannelType::Secondary));
else
m_channels.append(BMIDEChannel::create(*this, { secondary_base_io, secondary_control_io, bus_master_base.offset(8) }, IDEChannel::ChannelType::Secondary));
}
m_channels[1].enable_irq();
}
}

View file

@ -0,0 +1,41 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Storage/StorageDevice.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class PCIIDEController final : public IDEController
, public PCI::Device {
public:
static NonnullRefPtr<PCIIDEController> initialize(PCI::DeviceIdentifier const&, bool force_pio);
bool is_bus_master_capable() const;
bool is_pci_native_mode_enabled() const;
private:
bool is_pci_native_mode_enabled_on_primary_channel() const;
bool is_pci_native_mode_enabled_on_secondary_channel() const;
PCIIDEController(PCI::DeviceIdentifier const&, bool force_pio);
RefPtr<StorageDevice> device_by_channel_and_position(u32 index) const;
void initialize(bool force_pio);
void detect_disks();
// FIXME: Find a better way to get the ProgrammingInterface
PCI::ProgrammingInterface m_prog_if;
PCI::InterruptLine m_interrupt_line;
};
}