1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 02:47:35 +00:00

Kernel: Move the Storage directory to be a new directory under Devices

The Storage subsystem, like the Audio and HID subsystems, exposes Unix
device files (for example, in the /dev directory). To ensure consistency
across the repository, we should make the Storage subsystem to reside in
the Kernel/Devices directory like the two other mentioned subsystems.
This commit is contained in:
Liav A 2023-03-18 13:32:12 +02:00 committed by Jelle Raaijmakers
parent f3a58f3a5a
commit 500b7b08d6
59 changed files with 133 additions and 133 deletions

View file

@ -0,0 +1,231 @@
/*
* Copyright (c) 2021-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Atomic.h>
#include <AK/BuiltinWrappers.h>
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/CommandLine.h>
#include <Kernel/Devices/Storage/ATA/AHCI/Controller.h>
#include <Kernel/Devices/Storage/ATA/AHCI/InterruptHandler.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Memory/MemoryManager.h>
namespace Kernel {
UNMAP_AFTER_INIT ErrorOr<NonnullRefPtr<AHCIController>> AHCIController::initialize(PCI::DeviceIdentifier const& pci_device_identifier)
{
auto controller = adopt_ref_if_nonnull(new (nothrow) AHCIController(pci_device_identifier)).release_nonnull();
TRY(controller->initialize_hba(pci_device_identifier));
return controller;
}
ErrorOr<void> AHCIController::reset()
{
dmesgln_pci(*this, "{}: AHCI controller reset", device_identifier().address());
{
SpinlockLocker locker(m_hba_control_lock);
hba().control_regs.ghc = 1;
dbgln_if(AHCI_DEBUG, "{}: AHCI Controller reset", device_identifier().address());
full_memory_barrier();
size_t retry = 0;
// Note: The HBA is locked or hung if we waited more than 1 second!
while (true) {
if (retry > 1000)
return Error::from_errno(ETIMEDOUT);
if (!(hba().control_regs.ghc & 1))
break;
microseconds_delay(1000);
retry++;
}
// Note: Turn on AHCI HBA and Global HBA Interrupts.
full_memory_barrier();
hba().control_regs.ghc = (1 << 31) | (1 << 1);
full_memory_barrier();
}
// Note: According to the AHCI spec the PI register indicates which ports are exposed by the HBA.
// It is loaded by the BIOS. It indicates which ports that the HBA supports are available for software to use.
// For example, on an HBA that supports 6 ports as indicated in CAP.NP, only ports 1 and 3 could be available,
// with ports 0, 2, 4, and 5 being unavailable.
// Which means that even without clearing the AHCI ports array, we are never able to encounter
// a case that we would have stale left-over ports in there. We still clear the array
// for the sake of clarity and completeness, as it doesn't harm anything anyway.
m_ports.fill({});
auto implemented_ports = AHCI::MaskedBitField((u32 volatile&)(hba().control_regs.pi));
for (auto index : implemented_ports.to_vector()) {
auto port = AHCIPort::create(*this, m_hba_capabilities, static_cast<volatile AHCI::PortRegisters&>(hba().port_regs[index]), index).release_value_but_fixme_should_propagate_errors();
m_ports[index] = port;
port->reset();
}
return {};
}
ErrorOr<void> AHCIController::shutdown()
{
return Error::from_errno(ENOTIMPL);
}
size_t AHCIController::devices_count() const
{
SpinlockLocker locker(m_hba_control_lock);
size_t count = 0;
for (auto port : m_ports) {
if (port && port->connected_device())
count++;
}
return count;
}
void AHCIController::start_request(ATADevice const& device, AsyncBlockDeviceRequest& request)
{
auto port = m_ports[device.ata_address().port];
VERIFY(port);
port->start_request(request);
}
void AHCIController::complete_current_request(AsyncDeviceRequest::RequestResult)
{
VERIFY_NOT_REACHED();
}
volatile AHCI::PortRegisters& AHCIController::port(size_t port_number) const
{
VERIFY(port_number < (size_t)AHCI::Limits::MaxPorts);
return static_cast<volatile AHCI::PortRegisters&>(hba().port_regs[port_number]);
}
volatile AHCI::HBA& AHCIController::hba() const
{
return const_cast<AHCI::HBA&>(*m_hba_mapping);
}
UNMAP_AFTER_INIT AHCIController::AHCIController(PCI::DeviceIdentifier const& pci_device_identifier)
: ATAController()
, PCI::Device(const_cast<PCI::DeviceIdentifier&>(pci_device_identifier))
{
}
UNMAP_AFTER_INIT AHCI::HBADefinedCapabilities AHCIController::capabilities() const
{
u32 capabilities = hba().control_regs.cap;
u32 extended_capabilities = hba().control_regs.cap2;
dbgln_if(AHCI_DEBUG, "{}: AHCI Controller Capabilities = {:#08x}, Extended Capabilities = {:#08x}", device_identifier().address(), capabilities, extended_capabilities);
return (AHCI::HBADefinedCapabilities) {
(capabilities & 0b11111) + 1,
((capabilities >> 8) & 0b11111) + 1,
(u8)((capabilities >> 20) & 0b1111),
(capabilities & (u32)(AHCI::HBACapabilities::SXS)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::EMS)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::CCCS)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::PSC)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SSC)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::PMD)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::FBSS)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SPM)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SAM)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SCLO)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SAL)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SALP)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SSS)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SMPS)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SSNTF)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::SNCQ)) != 0,
(capabilities & (u32)(AHCI::HBACapabilities::S64A)) != 0,
(extended_capabilities & (u32)(AHCI::HBACapabilitiesExtended::BOH)) != 0,
(extended_capabilities & (u32)(AHCI::HBACapabilitiesExtended::NVMP)) != 0,
(extended_capabilities & (u32)(AHCI::HBACapabilitiesExtended::APST)) != 0,
(extended_capabilities & (u32)(AHCI::HBACapabilitiesExtended::SDS)) != 0,
(extended_capabilities & (u32)(AHCI::HBACapabilitiesExtended::SADM)) != 0,
(extended_capabilities & (u32)(AHCI::HBACapabilitiesExtended::DESO)) != 0
};
}
UNMAP_AFTER_INIT ErrorOr<Memory::TypedMapping<AHCI::HBA volatile>> AHCIController::map_default_hba_region(PCI::DeviceIdentifier const& pci_device_identifier)
{
return Memory::map_typed_writable<AHCI::HBA volatile>(PhysicalAddress(PCI::get_BAR5(pci_device_identifier)));
}
AHCIController::~AHCIController() = default;
UNMAP_AFTER_INIT ErrorOr<void> AHCIController::initialize_hba(PCI::DeviceIdentifier const& pci_device_identifier)
{
m_hba_mapping = TRY(map_default_hba_region(pci_device_identifier));
m_hba_capabilities = capabilities();
u32 version = hba().control_regs.version;
hba().control_regs.ghc = 0x80000000; // Ensure that HBA knows we are AHCI aware.
PCI::enable_bus_mastering(device_identifier());
TRY(reserve_irqs(1, true));
auto irq = MUST(allocate_irq(0));
enable_global_interrupts();
auto implemented_ports = AHCI::MaskedBitField((u32 volatile&)(hba().control_regs.pi));
m_irq_handler = AHCIInterruptHandler::create(*this, irq, implemented_ports).release_value_but_fixme_should_propagate_errors();
TRY(reset());
dbgln_if(AHCI_DEBUG, "{}: AHCI Controller Version = {:#08x}", device_identifier().address(), version);
dbgln("{}: AHCI command list entries count - {}", device_identifier().address(), m_hba_capabilities.max_command_list_entries_count);
return {};
}
void AHCIController::handle_interrupt_for_port(Badge<AHCIInterruptHandler>, u32 port_index) const
{
auto port = m_ports[port_index];
VERIFY(port);
port->handle_interrupt();
}
void AHCIController::disable_global_interrupts() const
{
hba().control_regs.ghc = hba().control_regs.ghc & 0xfffffffd;
}
void AHCIController::enable_global_interrupts() const
{
hba().control_regs.ghc = hba().control_regs.ghc | (1 << 1);
}
LockRefPtr<StorageDevice> AHCIController::device_by_port(u32 port_index) const
{
SpinlockLocker locker(m_hba_control_lock);
auto port = m_ports[port_index];
if (!port)
return {};
SpinlockLocker port_hard_locker(port->m_hard_lock);
return port->connected_device();
}
LockRefPtr<StorageDevice> AHCIController::device(u32 index) const
{
Vector<NonnullLockRefPtr<StorageDevice>> connected_devices;
u32 pi = hba().control_regs.pi;
u32 bit = bit_scan_forward(pi);
while (bit) {
dbgln_if(AHCI_DEBUG, "Checking implemented port {}, pi {:b}", bit - 1, pi);
pi &= ~(1u << (bit - 1));
auto checked_device = device_by_port(bit - 1);
bit = bit_scan_forward(pi);
if (checked_device.is_null())
continue;
connected_devices.append(checked_device.release_nonnull());
}
dbgln_if(AHCI_DEBUG, "Connected device count: {}, Index: {}", connected_devices.size(), index);
if (index >= connected_devices.size())
return nullptr;
return connected_devices[index];
}
}

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2021-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Devices/Storage/ATA/AHCI/Definitions.h>
#include <Kernel/Devices/Storage/ATA/ATAController.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Memory/TypedMapping.h>
#include <Kernel/Sections.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class AHCIInterruptHandler;
class AHCIPort;
class AHCIController final : public ATAController
, public PCI::Device {
friend class AHCIInterruptHandler;
public:
static ErrorOr<NonnullRefPtr<AHCIController>> initialize(PCI::DeviceIdentifier const& pci_device_identifier);
virtual ~AHCIController() override;
virtual StringView device_name() const override { return "AHCI"sv; }
virtual LockRefPtr<StorageDevice> device(u32 index) const override;
virtual ErrorOr<void> reset() override;
virtual ErrorOr<void> shutdown() override;
virtual size_t devices_count() const override;
virtual void start_request(ATADevice const&, AsyncBlockDeviceRequest&) override;
virtual void complete_current_request(AsyncDeviceRequest::RequestResult) override;
void handle_interrupt_for_port(Badge<AHCIInterruptHandler>, u32 port_index) const;
private:
void disable_global_interrupts() const;
void enable_global_interrupts() const;
explicit AHCIController(PCI::DeviceIdentifier const&);
ErrorOr<void> initialize_hba(PCI::DeviceIdentifier const&);
AHCI::HBADefinedCapabilities capabilities() const;
LockRefPtr<StorageDevice> device_by_port(u32 index) const;
volatile AHCI::PortRegisters& port(size_t port_number) const;
ErrorOr<Memory::TypedMapping<AHCI::HBA volatile>> map_default_hba_region(PCI::DeviceIdentifier const&);
volatile AHCI::HBA& hba() const;
Array<LockRefPtr<AHCIPort>, 32> m_ports;
Memory::TypedMapping<AHCI::HBA volatile> m_hba_mapping;
AHCI::HBADefinedCapabilities m_hba_capabilities;
// FIXME: There could be multiple IRQ (MSI) handlers for AHCI. Find a way to use all of them.
OwnPtr<AHCIInterruptHandler> m_irq_handler;
// Note: This lock is intended to be locked when doing changes to HBA registers
// that affect its core functionality in a manner that controls all attached storage devices
// to the HBA SATA ports.
mutable Spinlock<LockRank::None> m_hba_control_lock {};
};
}

View file

@ -0,0 +1,429 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
namespace Kernel::FIS {
enum class Type : u8 {
RegisterHostToDevice = 0x27,
RegisterDeviceToHost = 0x34,
DMAActivate = 0x39,
DMASetup = 0x41,
Data = 0x46,
BISTActivate = 0x58,
PIOSetup = 0x5F,
SetDeviceBits = 0xA1
};
enum class DwordCount : size_t {
RegisterHostToDevice = 5,
RegisterDeviceToHost = 5,
DMAActivate = 1,
DMASetup = 7,
PIOSetup = 5,
SetDeviceBits = 2
};
enum HeaderAttributes : u8 {
C = (1 << 7), /* Updates Command register */
};
struct [[gnu::packed]] Header {
u8 fis_type;
u8 port_muliplier;
};
}
namespace Kernel::FIS::HostToDevice {
struct [[gnu::packed]] Register {
Header header;
u8 command;
u8 features_low;
u8 lba_low[3];
u8 device;
u8 lba_high[3];
u8 features_high;
u16 count;
u8 icc; /* Isochronous Command Completion */
u8 control;
u32 reserved;
};
};
namespace Kernel::FIS::DeviceToHost {
struct [[gnu::packed]] Register {
Header header;
u8 status;
u8 error;
u8 lba_low[3];
u8 device;
u8 lba_high[3];
u8 reserved;
u16 count;
u8 reserved2[6];
};
struct [[gnu::packed]] SetDeviceBits {
Header header;
u8 status;
u8 error;
u32 protocol_specific;
};
struct [[gnu::packed]] DMAActivate {
Header header;
u16 reserved;
};
struct [[gnu::packed]] PIOSetup {
Header header;
u8 status;
u8 error;
u8 lba_low[3];
u8 device;
u8 lba_high[3];
u8 reserved;
u16 count;
u8 reserved2;
u8 e_status;
u16 transfer_count;
u16 reserved3;
};
}
namespace Kernel::FIS::BiDirectional {
struct [[gnu::packed]] Data {
Header header;
u16 reserved;
u32 data[];
};
struct [[gnu::packed]] BISTActivate {
};
struct [[gnu::packed]] DMASetup {
Header header;
u16 reserved;
u32 dma_buffer_identifier_low;
u32 dma_buffer_identifier_high;
u32 reserved2;
u32 dma_buffer_offset;
u32 dma_transfer_count;
u32 reserved3;
};
}
namespace Kernel::AHCI {
class MaskedBitField {
public:
explicit MaskedBitField(u32 volatile& bitfield_register)
: m_bitfield(bitfield_register)
, m_bit_mask(0xffffffff)
{
}
MaskedBitField(u32 volatile& bitfield_register, u32 bit_mask)
: m_bitfield(bitfield_register)
, m_bit_mask(bit_mask)
{
}
void set_at(u8 index) const
{
VERIFY(((1u << index) & m_bit_mask) != 0);
m_bitfield = m_bitfield | ((1u << index) & m_bit_mask);
}
void set_all() const
{
m_bitfield = m_bitfield | (0xffffffff & m_bit_mask);
}
bool is_set_at(u8 port_index) const
{
return m_bitfield & ((1u << port_index) & m_bit_mask);
}
bool is_zeroed() const
{
return (m_bitfield & m_bit_mask) == 0;
}
Vector<u8> to_vector() const
{
// FIXME: Add a sync mechanism!
Vector<u8> indices;
u32 bitfield = m_bitfield & m_bit_mask;
for (size_t index = 0; index < 32; index++) {
if (bitfield & 1) {
indices.append(index);
}
bitfield >>= 1;
}
return indices;
}
u32 bit_mask() const { return m_bit_mask; };
// Disable default implementations that would use surprising integer promotion.
bool operator==(MaskedBitField const&) const = delete;
bool operator<=(MaskedBitField const&) const = delete;
bool operator>=(MaskedBitField const&) const = delete;
bool operator<(MaskedBitField const&) const = delete;
bool operator>(MaskedBitField const&) const = delete;
private:
u32 volatile& m_bitfield;
const u32 m_bit_mask;
};
enum Limits : u16 {
MaxPorts = 32,
MaxCommands = 32,
MaxMultiplierConnectedPorts = 16,
};
enum CommandHeaderAttributes : u16 {
C = (1 << 10), /* Clear Busy upon R_OK */
P = (1 << 7), /* Prefetchable */
W = (1 << 6), /* Write */
A = (1 << 5), /* ATAPI */
R = (1 << 8) /* Reset */
};
enum HBACapabilities : u32 {
S64A = (u32)1 << 31, /* Supports 64-bit Addressing */
SNCQ = 1 << 30, /* Supports Native Command Queuing */
SSNTF = 1 << 29, /* Supports SNotification Register */
SMPS = 1 << 28, /* Supports Mechanical Presence Switch */
SSS = 1 << 27, /* Supports Staggered Spin-up */
SALP = 1 << 26, /* Supports Aggressive Link Power Management */
SAL = 1 << 25, /* Supports Activity LED */
SCLO = 1 << 24, /* Supports Command List Override */
SAM = 1 << 18, /* Supports AHCI mode only */
SPM = 1 << 17, /* Supports Port Multiplier */
FBSS = 1 << 16, /* FIS-based Switching Supported */
PMD = 1 << 15, /* PIO Multiple DRQ Block */
SSC = 1 << 14, /* Slumber State Capable */
PSC = 1 << 13, /* Partial State Capable */
CCCS = 1 << 7, /* Command Completion Coalescing Supported */
EMS = 1 << 6, /* Enclosure Management Supported */
SXS = 1 << 5 /* Supports External SATA */
};
enum HBACapabilitiesExtended : u32 {
DESO = 1 << 5, /* DevSleep Entrance from Slumber Only */
SADM = 1 << 4, /* Supports Aggressive Device Sleep Management */
SDS = 1 << 3, /* Supports Device Sleep */
APST = 1 << 2, /* Automatic Partial to Slumber Transitions */
NVMP = 1 << 1, /* NVMHCI Present */
BOH = 1 << 0, /* BIOS/OS Handoff */
};
// This structure is not defined by the AHCI spec, but is used within the code
struct [[gnu::packed]] HBADefinedCapabilities {
size_t ports_count { 1 };
size_t max_command_list_entries_count { 1 };
u8 interface_speed_generation { 1 };
bool external_sata_supported : 1 { false };
bool enclosure_management_supported : 1 { false };
bool command_completion_coalescing_supported : 1 { false };
bool partial_state_capable : 1 { false };
bool slumber_state_capable : 1 { false };
bool pio_multiple_drq_block : 1 { false };
bool fis_based_switching_supported : 1 { false };
bool port_multiplier_supported : 1 { false };
bool ahci_mode_only : 1 { true };
bool command_list_override_supported : 1 { false };
bool activity_led_supported : 1 { false };
bool aggressive_link_power_management_supported : 1 { false };
bool staggered_spin_up_supported : 1 { false };
bool mechanical_presence_switch_supported : 1 { false };
bool snotification_register_supported : 1 { false };
bool native_command_queuing_supported : 1 { false };
bool addressing_64_bit_supported : 1 { false };
bool bios_os_handoff : 1 { false };
bool nvmhci_present : 1 { false };
bool automatic_partial_to_slumber_transitions : 1 { false };
bool device_sleep_supported : 1 { false };
bool aggressive_device_sleep_management_supported : 1 { false };
bool devsleep_entrance_from_slumber_only : 1 { false };
};
enum class DeviceDetectionInitialization {
NoActionRequested,
PerformInterfaceInitializationSequence,
DisableInterface
};
enum PortInterruptFlag : u32 {
CPD = (u32)1 << 31, /* Cold Port Detect */
TFE = 1 << 30, /* Task File Error */
HBF = 1 << 29, /* Host Bus Fatal Error */
HBD = 1 << 28, /* Host Bus Data Error */
IF = 1 << 27, /* Interface Fatal Error */
INF = 1 << 26, /* Interface Non-fatal Error */
OF = 1 << 24, /* Overflow */
IPM = 1 << 23, /* Incorrect Port Multiplier */
PRC = 1 << 22, /* PhyRdy Change */
DMP = 1 << 7, /* Device Mechanical Presence */
PC = 1 << 6, /* Port Connect Change */
DP = 1 << 5, /* Descriptor Processed */
UF = 1 << 4, /* Unknown FIS */
SDB = 1 << 3, /* Set Device FIS */
DS = 1 << 2, /* DMA Setup FIS */
PS = 1 << 1, /* PIO Setup FIS */
DHR = 1 << 0 /* Device to Host Register FIS */
};
enum SErr : u32 {
DIAG_X = 1 << 26, /* Exchanged */
DIAG_F = 1 << 25, /* Unknown FIS Type */
DIAG_T = 1 << 24, /* Transport state transition error */
DIAG_S = 1 << 23, /* Link sequence error */
DIAG_H = 1 << 22, /* Handshake error */
DIAG_C = 1 << 21, /* CRC error */
DIAG_D = 1 << 20, /* Disparity error */
DIAG_B = 1 << 19, /* 10B to 8B decode error */
DIAG_W = 1 << 18, /* Comm Wake */
DIAG_I = 1 << 17, /* Phy Internal Error */
DIAG_N = 1 << 16, /* PhyRdy Change */
ERR_E = 1 << 11, /* Internal error */
ERR_P = 1 << 10, /* Protocol error */
ERR_C = 1 << 9, /* Persistent communication or data integrity error */
ERR_T = 1 << 8, /* Transient data integrity error */
ERR_M = 1 << 1, /* Received communications error */
ERR_I = 1 << 0, /* Recovered data integrity error */
};
class PortInterruptStatusBitField {
public:
explicit PortInterruptStatusBitField(u32 volatile& bitfield_register)
: m_bitfield(bitfield_register)
{
}
u32 raw_value() const { return m_bitfield; }
bool is_set(PortInterruptFlag flag) const { return m_bitfield & (u32)flag; }
void clear() { m_bitfield = 0xffffffff; }
// Disable default implementations that would use surprising integer promotion.
bool operator==(MaskedBitField const&) const = delete;
bool operator<=(MaskedBitField const&) const = delete;
bool operator>=(MaskedBitField const&) const = delete;
bool operator<(MaskedBitField const&) const = delete;
bool operator>(MaskedBitField const&) const = delete;
private:
u32 volatile& m_bitfield;
};
class PortInterruptEnableBitField {
public:
explicit PortInterruptEnableBitField(u32 volatile& bitfield_register)
: m_bitfield(bitfield_register)
{
}
u32 raw_value() const { return m_bitfield; }
bool is_set(PortInterruptFlag flag) { return m_bitfield & (u32)flag; }
void set_at(PortInterruptFlag flag) { m_bitfield = m_bitfield | static_cast<u32>(flag); }
void clear() { m_bitfield = 0; }
bool is_cleared() const { return m_bitfield == 0; }
void set_all() { m_bitfield = 0xffffffff; }
// Disable default implementations that would use surprising integer promotion.
bool operator==(MaskedBitField const&) const = delete;
bool operator<=(MaskedBitField const&) const = delete;
bool operator>=(MaskedBitField const&) const = delete;
bool operator<(MaskedBitField const&) const = delete;
bool operator>(MaskedBitField const&) const = delete;
private:
u32 volatile& m_bitfield;
};
struct [[gnu::packed]] PortRegisters {
u32 clb; /* Port x Command List Base Address */
u32 clbu; /* Port x Command List Base Address Upper 32-Bits */
u32 fb; /* Port x FIS Base Address */
u32 fbu; /* Port x FIS Base Address Upper 32-Bits */
u32 is; /* Port x Interrupt Status */
u32 ie; /* Port x Interrupt Enable */
u32 cmd; /* Port x Command and Status */
u32 reserved;
u32 tfd; /* Port x Task File Data */
u32 sig; /* Port x Signature */
u32 ssts; /* Port x Serial ATA Status (SCR0: SStatus) */
u32 sctl; /* Port x Serial ATA Control (SCR2: SControl) */
u32 serr; /* Port x Serial ATA Error (SCR1: SError) */
u32 sact; /* Port x Serial ATA Active (SCR3: SActive) */
u32 ci; /* Port x Command Issue */
u32 sntf; /* Port x Serial ATA Notification (SCR4: SNotification) */
u32 fbs; /* Port x FIS-based Switching Control */
u32 devslp; /* Port x Device Sleep */
u8 reserved2[0x70 - 0x48];
u8 vs[16]; /* Port x Vendor Specific */
};
struct [[gnu::packed]] GenericHostControl {
u32 cap; /* Host Capabilities */
u32 ghc; /* Global Host Control */
u32 is; /* Interrupt Status */
u32 pi; /* Ports Implemented */
u32 version;
u32 ccc_ctl; /* Command Completion Coalescing Control */
u32 ccc_ports; /* Command Completion Coalsecing Ports */
u32 em_loc; /* Enclosure Management Location */
u32 em_ctl; /* Enclosure Management Control */
u32 cap2; /* Host Capabilities Extended */
u32 bohc; /* BIOS/OS Handoff Control and Status */
};
struct [[gnu::packed]] HBA {
GenericHostControl control_regs;
u8 reserved[52];
u8 nvmhci[64];
u8 vendor_specific[96];
PortRegisters port_regs[32];
};
struct [[gnu::packed]] CommandHeader {
u16 attributes;
u16 prdtl; /* Physical Region Descriptor Table Length */
u32 prdbc; /* Physical Region Descriptor Byte Count */
u32 ctba; /* Command Table Descriptor Base Address */
u32 ctbau; /* Command Table Descriptor Base Address Upper 32-bits */
u32 reserved[4];
};
struct [[gnu::packed]] PhysicalRegionDescriptor {
u32 base_low;
u32 base_high;
u32 reserved;
u32 byte_count; /* Bit 31 - Interrupt completion, Bit 0 to 21 - Data Byte Count */
};
struct [[gnu::packed]] CommandTable {
u8 command_fis[64];
u8 atapi_command[32];
u8 reserved[32];
PhysicalRegionDescriptor descriptors[];
};
}

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2021-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Devices/Storage/ATA/AHCI/InterruptHandler.h>
namespace Kernel {
UNMAP_AFTER_INIT ErrorOr<NonnullOwnPtr<AHCIInterruptHandler>> AHCIInterruptHandler::create(AHCIController& controller, u8 irq, AHCI::MaskedBitField taken_ports)
{
auto port_handler = TRY(adopt_nonnull_own_or_enomem(new (nothrow) AHCIInterruptHandler(controller, irq, taken_ports)));
port_handler->allocate_resources_and_initialize_ports();
return port_handler;
}
void AHCIInterruptHandler::allocate_resources_and_initialize_ports()
{
// Clear pending interrupts, if there are any!
m_pending_ports_interrupts.set_all();
enable_irq();
}
UNMAP_AFTER_INIT AHCIInterruptHandler::AHCIInterruptHandler(AHCIController& controller, u8 irq, AHCI::MaskedBitField taken_ports)
: PCIIRQHandler(controller, irq)
, m_parent_controller(controller)
, m_taken_ports(taken_ports)
, m_pending_ports_interrupts(create_pending_ports_interrupts_bitfield())
{
dbgln_if(AHCI_DEBUG, "AHCI Port Handler: IRQ {}", irq);
}
AHCI::MaskedBitField AHCIInterruptHandler::create_pending_ports_interrupts_bitfield() const
{
return AHCI::MaskedBitField((u32 volatile&)m_parent_controller->hba().control_regs.is, m_taken_ports.bit_mask());
}
AHCIInterruptHandler::~AHCIInterruptHandler() = default;
bool AHCIInterruptHandler::handle_irq(RegisterState const&)
{
dbgln_if(AHCI_DEBUG, "AHCI Port Handler: IRQ received");
if (m_pending_ports_interrupts.is_zeroed())
return false;
for (auto port_index : m_pending_ports_interrupts.to_vector()) {
dbgln_if(AHCI_DEBUG, "AHCI Port Handler: Handling IRQ for port {}", port_index);
m_parent_controller->handle_interrupt_for_port({}, port_index);
// We do this to clear the pending interrupt after we handled it.
m_pending_ports_interrupts.set_at(port_index);
}
return true;
}
}

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2021-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Devices/Device.h>
#include <Kernel/Devices/Storage/ATA/AHCI/Controller.h>
#include <Kernel/Devices/Storage/ATA/AHCI/Port.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/Interrupts/PCIIRQHandler.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/PhysicalAddress.h>
#include <Kernel/Random.h>
#include <Kernel/Sections.h>
#include <Kernel/WaitQueue.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class AHCIController;
class AHCIPort;
class AHCIInterruptHandler final : public PCIIRQHandler {
friend class AHCIController;
public:
static ErrorOr<NonnullOwnPtr<AHCIInterruptHandler>> create(AHCIController&, u8 irq, AHCI::MaskedBitField taken_ports);
virtual ~AHCIInterruptHandler() override;
virtual StringView purpose() const override { return "SATA IRQ Handler"sv; }
bool is_responsible_for_port_index(u32 port_index) const { return m_taken_ports.is_set_at(port_index); }
private:
AHCIInterruptHandler(AHCIController&, u8 irq, AHCI::MaskedBitField taken_ports);
void allocate_resources_and_initialize_ports();
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
enum class Direction : u8 {
Read,
Write,
};
AHCI::MaskedBitField create_pending_ports_interrupts_bitfield() const;
// Data members
NonnullLockRefPtr<AHCIController> m_parent_controller;
AHCI::MaskedBitField m_taken_ports;
AHCI::MaskedBitField m_pending_ports_interrupts;
};
}

View file

@ -0,0 +1,815 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
// For more information about locking in this code
// please look at Documentation/Kernel/AHCILocking.md
#include <AK/Atomic.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Devices/Storage/ATA/AHCI/Port.h>
#include <Kernel/Devices/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Devices/Storage/ATA/Definitions.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/ScatterGatherList.h>
#include <Kernel/Memory/TypedMapping.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
UNMAP_AFTER_INIT ErrorOr<NonnullLockRefPtr<AHCIPort>> AHCIPort::create(AHCIController const& controller, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index)
{
auto identify_buffer_page = MUST(MM.allocate_physical_page());
auto port = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) AHCIPort(controller, move(identify_buffer_page), hba_capabilities, registers, port_index)));
TRY(port->allocate_resources_and_initialize_ports());
return port;
}
ErrorOr<void> AHCIPort::allocate_resources_and_initialize_ports()
{
if (is_interface_disabled()) {
m_disabled_by_firmware = true;
return {};
}
m_fis_receive_page = TRY(MM.allocate_physical_page());
for (size_t index = 0; index < 1; index++) {
auto dma_page = TRY(MM.allocate_physical_page());
m_dma_buffers.append(move(dma_page));
}
for (size_t index = 0; index < 1; index++) {
auto command_table_page = TRY(MM.allocate_physical_page());
m_command_table_pages.append(move(command_table_page));
}
m_command_list_region = TRY(MM.allocate_dma_buffer_page("AHCI Port Command List"sv, Memory::Region::Access::ReadWrite, m_command_list_page));
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Command list page at {}", representative_port_index(), m_command_list_page->paddr());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: FIS receive page at {}", representative_port_index(), m_fis_receive_page->paddr());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Command list region at {}", representative_port_index(), m_command_list_region->vaddr());
return {};
}
UNMAP_AFTER_INIT AHCIPort::AHCIPort(AHCIController const& controller, NonnullRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities hba_capabilities, volatile AHCI::PortRegisters& registers, u32 port_index)
: m_port_index(port_index)
, m_hba_capabilities(hba_capabilities)
, m_identify_buffer_page(move(identify_buffer_page))
, m_port_registers(registers)
, m_parent_controller(controller)
, m_interrupt_status((u32 volatile&)m_port_registers.is)
, m_interrupt_enable((u32 volatile&)m_port_registers.ie)
{
}
void AHCIPort::clear_sata_error_register() const
{
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Clearing SATA error register.", representative_port_index());
m_port_registers.serr = m_port_registers.serr;
}
void AHCIPort::handle_interrupt()
{
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Interrupt handled, PxIS {}", representative_port_index(), m_interrupt_status.raw_value());
if (m_interrupt_status.raw_value() == 0) {
return;
}
if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::PRC) && m_interrupt_status.is_set(AHCI::PortInterruptFlag::PC)) {
clear_sata_error_register();
if ((m_port_registers.ssts & 0xf) != 3 && m_connected_device) {
m_connected_device->prepare_for_unplug();
StorageManagement::the().remove_device(*m_connected_device);
auto work_item_creation_result = g_io_work->try_queue([this]() {
m_connected_device.clear();
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
} else {
auto work_item_creation_result = g_io_work->try_queue([this]() {
reset();
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
return;
}
if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::PRC)) {
clear_sata_error_register();
}
if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::INF)) {
// We need to defer the reset, because we can receive interrupts when
// resetting the device.
auto work_item_creation_result = g_io_work->try_queue([this]() {
reset();
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
return;
}
if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::IF) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::TFE) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::HBD) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::HBF)) {
auto work_item_creation_result = g_io_work->try_queue([this]() {
recover_from_fatal_error();
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
return;
}
if (m_interrupt_status.is_set(AHCI::PortInterruptFlag::DHR) || m_interrupt_status.is_set(AHCI::PortInterruptFlag::PS)) {
m_wait_for_completion = false;
// Now schedule reading/writing the buffer as soon as we leave the irq handler.
// This is important so that we can safely access the buffers, which could
// trigger page faults
if (!m_current_request) {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request handled, probably identify request", representative_port_index());
} else {
auto work_item_creation_result = g_io_work->try_queue([this]() {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request handled", representative_port_index());
MutexLocker locker(m_lock);
VERIFY(m_current_request);
VERIFY(m_current_scatter_list);
if (!m_connected_device) {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request success", representative_port_index());
complete_current_request(AsyncDeviceRequest::Failure);
return;
}
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
if (auto result = m_current_request->write_to_buffer(m_current_request->buffer(), m_current_scatter_list->dma_region().as_ptr(), m_connected_device->block_size() * m_current_request->block_count()); result.is_error()) {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request failure, memory fault occurred when reading in data.", representative_port_index());
m_current_scatter_list = nullptr;
complete_current_request(AsyncDeviceRequest::MemoryFault);
return;
}
}
m_current_scatter_list = nullptr;
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request success", representative_port_index());
complete_current_request(AsyncDeviceRequest::Success);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
}
m_interrupt_status.clear();
}
bool AHCIPort::is_interrupts_enabled() const
{
return !m_interrupt_enable.is_cleared();
}
void AHCIPort::recover_from_fatal_error()
{
MutexLocker locker(m_lock);
SpinlockLocker lock(m_hard_lock);
LockRefPtr<AHCIController> controller = m_parent_controller.strong_ref();
if (!controller) {
dmesgln("AHCI Port {}: fatal error, controller not available", representative_port_index());
return;
}
dmesgln("{}: AHCI Port {} fatal error, shutting down!", controller->device_identifier().address(), representative_port_index());
dmesgln("{}: AHCI Port {} fatal error, SError {}", controller->device_identifier().address(), representative_port_index(), (u32)m_port_registers.serr);
stop_command_list_processing();
stop_fis_receiving();
m_interrupt_enable.clear();
}
bool AHCIPort::reset()
{
MutexLocker locker(m_lock);
SpinlockLocker lock(m_hard_lock);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Resetting", representative_port_index());
if (m_disabled_by_firmware) {
dmesgln("AHCI Port {}: Disabled by firmware ", representative_port_index());
return false;
}
full_memory_barrier();
m_interrupt_enable.clear();
m_interrupt_status.clear();
full_memory_barrier();
start_fis_receiving();
full_memory_barrier();
clear_sata_error_register();
full_memory_barrier();
if (!initiate_sata_reset()) {
return false;
}
return initialize();
}
UNMAP_AFTER_INIT bool AHCIPort::initialize_without_reset()
{
MutexLocker locker(m_lock);
SpinlockLocker lock(m_hard_lock);
dmesgln("AHCI Port {}: {}", representative_port_index(), try_disambiguate_sata_status());
return initialize();
}
bool AHCIPort::initialize()
{
VERIFY(m_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Initialization. Signature = {:#08x}", representative_port_index(), static_cast<u32>(m_port_registers.sig));
if (!is_phy_enabled()) {
// Note: If PHY is not enabled, just clear the interrupt status and enable interrupts, in case
// we are going to hotplug a device later.
m_interrupt_status.clear();
m_interrupt_enable.set_all();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Bailing initialization, Phy is not enabled.", representative_port_index());
return false;
}
rebase();
power_on();
spin_up();
clear_sata_error_register();
start_fis_receiving();
set_active_state();
m_interrupt_status.clear();
m_interrupt_enable.set_all();
full_memory_barrier();
// This actually enables the port...
start_command_list_processing();
full_memory_barrier();
size_t logical_sector_size = 512;
size_t physical_sector_size = 512;
u64 max_addressable_sector = 0;
if (identify_device()) {
auto identify_block = Memory::map_typed<ATAIdentifyBlock>(m_identify_buffer_page->paddr()).release_value_but_fixme_should_propagate_errors();
// Check if word 106 is valid before using it!
if ((identify_block->physical_sector_size_to_logical_sector_size >> 14) == 1) {
if (identify_block->physical_sector_size_to_logical_sector_size & (1 << 12)) {
VERIFY(identify_block->logical_sector_size != 0);
logical_sector_size = identify_block->logical_sector_size;
}
if (identify_block->physical_sector_size_to_logical_sector_size & (1 << 13)) {
physical_sector_size = logical_sector_size << (identify_block->physical_sector_size_to_logical_sector_size & 0xf);
}
}
// Check if the device supports LBA48 mode
if (identify_block->commands_and_feature_sets_supported[1] & (1 << 10)) {
max_addressable_sector = identify_block->user_addressable_logical_sectors_count;
} else {
max_addressable_sector = identify_block->max_28_bit_addressable_logical_sector;
}
if (is_atapi_attached()) {
m_port_registers.cmd = m_port_registers.cmd | (1 << 24);
}
dmesgln("AHCI Port {}: Device found, Capacity={}, Bytes per logical sector={}, Bytes per physical sector={}", representative_port_index(), max_addressable_sector * logical_sector_size, logical_sector_size, physical_sector_size);
// FIXME: We don't support ATAPI devices yet, so for now we don't "create" them
if (!is_atapi_attached()) {
LockRefPtr<AHCIController> controller = m_parent_controller.strong_ref();
if (!controller) {
dmesgln("AHCI Port {}: Device found, but parent controller is not available, abort.", representative_port_index());
return false;
}
m_connected_device = ATADiskDevice::create(*controller, { m_port_index, 0 }, 0, logical_sector_size, max_addressable_sector);
} else {
dbgln("AHCI Port {}: Ignoring ATAPI devices as we don't support them.", representative_port_index());
}
}
return true;
}
char const* AHCIPort::try_disambiguate_sata_status()
{
switch (m_port_registers.ssts & 0xf) {
case 0:
return "Device not detected, Phy not enabled";
case 1:
return "Device detected, Phy disabled";
case 3:
return "Device detected, Phy enabled";
case 4:
return "interface disabled";
}
VERIFY_NOT_REACHED();
}
void AHCIPort::try_disambiguate_sata_error()
{
dmesgln("AHCI Port {}: SErr breakdown:", representative_port_index());
dmesgln("AHCI Port {}: Diagnostics:", representative_port_index());
constexpr u32 diagnostics_bitfield = 0xFFFF0000;
if ((m_port_registers.serr & diagnostics_bitfield) > 0) {
if (m_port_registers.serr & AHCI::SErr::DIAG_X)
dmesgln("AHCI Port {}: - Exchanged", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_F)
dmesgln("AHCI Port {}: - Unknown FIS Type", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_T)
dmesgln("AHCI Port {}: - Transport state transition error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_S)
dmesgln("AHCI Port {}: - Link sequence error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_H)
dmesgln("AHCI Port {}: - Handshake error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_C)
dmesgln("AHCI Port {}: - CRC error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_D)
dmesgln("AHCI Port {}: - Disparity error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_B)
dmesgln("AHCI Port {}: - 10B to 8B decode error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_W)
dmesgln("AHCI Port {}: - Comm Wake", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_I)
dmesgln("AHCI Port {}: - Phy Internal Error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::DIAG_N)
dmesgln("AHCI Port {}: - PhyRdy Change", representative_port_index());
} else {
dmesgln("AHCI Port {}: - No diagnostic information provided.", representative_port_index());
}
dmesgln("AHCI Port {}: Error(s):", representative_port_index());
constexpr u32 error_bitfield = 0xFFFF;
if ((m_port_registers.serr & error_bitfield) > 0) {
if (m_port_registers.serr & AHCI::SErr::ERR_E)
dmesgln("AHCI Port {}: - Internal error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::ERR_P)
dmesgln("AHCI Port {}: - Protocol error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::ERR_C)
dmesgln("AHCI Port {}: - Persistent communication or data integrity error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::ERR_T)
dmesgln("AHCI Port {}: - Transient data integrity error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::ERR_M)
dmesgln("AHCI Port {}: - Recovered communications error", representative_port_index());
if (m_port_registers.serr & AHCI::SErr::ERR_I)
dmesgln("AHCI Port {}: - Recovered data integrity error", representative_port_index());
} else {
dmesgln("AHCI Port {}: - No error information provided.", representative_port_index());
}
}
void AHCIPort::rebase()
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
VERIFY(!m_command_list_page.is_null() && !m_fis_receive_page.is_null());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Rebasing.", representative_port_index());
full_memory_barrier();
stop_command_list_processing();
stop_fis_receiving();
full_memory_barrier();
// Try to wait 1 second for HBA to clear Command List Running and FIS Receive Running
wait_until_condition_met_or_timeout(1000, 1000, [this]() -> bool {
return !(m_port_registers.cmd & (1 << 15)) && !(m_port_registers.cmd & (1 << 14));
});
full_memory_barrier();
m_port_registers.clbu = 0;
m_port_registers.clb = m_command_list_page->paddr().get();
m_port_registers.fbu = 0;
m_port_registers.fb = m_fis_receive_page->paddr().get();
}
bool AHCIPort::is_operable() const
{
// Note: The definition of "operable" is somewhat ambiguous, but we determine it
// by 3 parameters as shown below.
return (!m_command_list_page.is_null())
&& (!m_fis_receive_page.is_null())
&& ((m_port_registers.cmd & (1 << 14)) != 0);
}
void AHCIPort::set_active_state() const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Switching to active state.", representative_port_index());
m_port_registers.cmd = (m_port_registers.cmd & 0x0ffffff) | (1 << 28);
}
void AHCIPort::set_sleep_state() const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
m_port_registers.cmd = (m_port_registers.cmd & 0x0ffffff) | (0b1000 << 28);
}
size_t AHCIPort::calculate_descriptors_count(size_t block_count) const
{
VERIFY(m_connected_device);
size_t needed_dma_regions_count = Memory::page_round_up((block_count * m_connected_device->block_size())).value() / PAGE_SIZE;
VERIFY(needed_dma_regions_count <= m_dma_buffers.size());
return needed_dma_regions_count;
}
Optional<AsyncDeviceRequest::RequestResult> AHCIPort::prepare_and_set_scatter_list(AsyncBlockDeviceRequest& request)
{
VERIFY(m_lock.is_locked());
VERIFY(request.block_count() > 0);
Vector<NonnullRefPtr<Memory::PhysicalPage>> allocated_dma_regions;
for (size_t index = 0; index < calculate_descriptors_count(request.block_count()); index++) {
allocated_dma_regions.append(m_dma_buffers.at(index));
}
m_current_scatter_list = Memory::ScatterGatherList::try_create(request, allocated_dma_regions.span(), m_connected_device->block_size(), "AHCI Scattered DMA"sv).release_value_but_fixme_should_propagate_errors();
if (!m_current_scatter_list)
return AsyncDeviceRequest::Failure;
if (request.request_type() == AsyncBlockDeviceRequest::Write) {
if (auto result = request.read_from_buffer(request.buffer(), m_current_scatter_list->dma_region().as_ptr(), m_connected_device->block_size() * request.block_count()); result.is_error()) {
return AsyncDeviceRequest::MemoryFault;
}
}
return {};
}
void AHCIPort::start_request(AsyncBlockDeviceRequest& request)
{
MutexLocker locker(m_lock);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request start", representative_port_index());
VERIFY(!m_current_request);
VERIFY(!m_current_scatter_list);
m_current_request = request;
auto result = prepare_and_set_scatter_list(request);
if (result.has_value()) {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request failure.", representative_port_index());
locker.unlock();
complete_current_request(result.value());
return;
}
auto success = access_device(request.request_type(), request.block_index(), request.block_count());
if (!success) {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request failure.", representative_port_index());
locker.unlock();
complete_current_request(AsyncDeviceRequest::Failure);
return;
}
}
void AHCIPort::complete_current_request(AsyncDeviceRequest::RequestResult result)
{
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(result);
}
bool AHCIPort::spin_until_ready() const
{
VERIFY(m_lock.is_locked());
size_t spin = 0;
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Spinning until ready.", representative_port_index());
while ((m_port_registers.tfd & (ATA_SR_BSY | ATA_SR_DRQ)) && spin <= 100) {
microseconds_delay(1000);
spin++;
}
if (spin == 100) {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: SPIN exceeded 100 milliseconds threshold", representative_port_index());
return false;
}
return true;
}
bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64 lba, u8 block_count)
{
VERIFY(m_connected_device);
VERIFY(is_operable());
VERIFY(m_lock.is_locked());
VERIFY(m_current_scatter_list);
SpinlockLocker lock(m_hard_lock);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {}", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count);
if (!spin_until_ready())
return false;
auto unused_command_header = try_to_find_unused_command_header();
VERIFY(unused_command_header.has_value());
auto* command_list_entries = (volatile AHCI::CommandHeader*)m_command_list_region->vaddr().as_ptr();
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()]->paddr().get();
command_list_entries[unused_command_header.value()].ctbau = 0;
command_list_entries[unused_command_header.value()].prdbc = 0;
command_list_entries[unused_command_header.value()].prdtl = m_current_scatter_list->scatters_count();
// Note: we must set the correct Dword count in this register. Real hardware
// AHCI controllers do care about this field! QEMU doesn't care if we don't
// set the correct CFL field in this register, real hardware will set an
// handshake error bit in PxSERR register if CFL is incorrect.
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P | (is_atapi_attached() ? AHCI::CommandHeaderAttributes::A : 0) | (direction == AsyncBlockDeviceRequest::RequestType::Write ? AHCI::CommandHeaderAttributes::W : 0);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes);
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr());
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
size_t scatter_entry_index = 0;
size_t data_transfer_count = (block_count * m_connected_device->block_size());
for (auto scatter_page : m_current_scatter_list->vmobject().physical_pages()) {
VERIFY(data_transfer_count != 0);
VERIFY(scatter_page);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Add a transfer scatter entry @ {}", representative_port_index(), scatter_page->paddr());
command_table.descriptors[scatter_entry_index].base_high = 0;
command_table.descriptors[scatter_entry_index].base_low = scatter_page->paddr().get();
if (data_transfer_count <= PAGE_SIZE) {
command_table.descriptors[scatter_entry_index].byte_count = data_transfer_count - 1;
data_transfer_count = 0;
} else {
command_table.descriptors[scatter_entry_index].byte_count = PAGE_SIZE - 1;
data_transfer_count -= PAGE_SIZE;
}
scatter_entry_index++;
}
command_table.descriptors[scatter_entry_index].byte_count = (PAGE_SIZE - 1) | (1 << 31);
memset(const_cast<u8*>(command_table.atapi_command), 0, 32);
auto& fis = *(volatile FIS::HostToDevice::Register*)command_table.command_fis;
fis.header.fis_type = (u8)FIS::Type::RegisterHostToDevice;
if (is_atapi_attached()) {
fis.command = ATA_CMD_PACKET;
TODO();
} else {
if (direction == AsyncBlockDeviceRequest::RequestType::Write)
fis.command = ATA_CMD_WRITE_DMA_EXT;
else
fis.command = ATA_CMD_READ_DMA_EXT;
}
full_memory_barrier();
fis.device = ATA_USE_LBA_ADDRESSING;
fis.header.port_muliplier = (u8)FIS::HeaderAttributes::C;
fis.lba_high[0] = (lba >> 24) & 0xff;
fis.lba_high[1] = (lba >> 32) & 0xff;
fis.lba_high[2] = (lba >> 40) & 0xff;
fis.lba_low[0] = lba & 0xff;
fis.lba_low[1] = (lba >> 8) & 0xff;
fis.lba_low[2] = (lba >> 16) & 0xff;
fis.count = (block_count);
// The below loop waits until the port is no longer busy before issuing a new command
if (!spin_until_ready())
return false;
full_memory_barrier();
mark_command_header_ready_to_process(unused_command_header.value());
full_memory_barrier();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {} @ {}, ended", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count, m_dma_buffers[0]->paddr());
return true;
}
bool AHCIPort::identify_device()
{
VERIFY(m_lock.is_locked());
VERIFY(is_operable());
if (!spin_until_ready())
return false;
LockRefPtr<AHCIController> controller = m_parent_controller.strong_ref();
if (!controller)
return false;
auto unused_command_header = try_to_find_unused_command_header();
VERIFY(unused_command_header.has_value());
auto* command_list_entries = (volatile AHCI::CommandHeader*)m_command_list_region->vaddr().as_ptr();
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()]->paddr().get();
command_list_entries[unused_command_header.value()].ctbau = 0;
command_list_entries[unused_command_header.value()].prdbc = 512;
command_list_entries[unused_command_header.value()].prdtl = 1;
// Note: we must set the correct Dword count in this register. Real hardware AHCI controllers do care about this field!
// QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register.
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P;
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
command_table.descriptors[0].base_high = 0;
command_table.descriptors[0].base_low = m_identify_buffer_page->paddr().get();
command_table.descriptors[0].byte_count = 512 - 1;
auto& fis = *(volatile FIS::HostToDevice::Register*)command_table.command_fis;
fis.header.fis_type = (u8)FIS::Type::RegisterHostToDevice;
fis.command = m_port_registers.sig == ATA::DeviceSignature::ATAPI ? ATA_CMD_IDENTIFY_PACKET : ATA_CMD_IDENTIFY;
fis.device = 0;
fis.header.port_muliplier = fis.header.port_muliplier | (u8)FIS::HeaderAttributes::C;
// The below loop waits until the port is no longer busy before issuing a new command
if (!spin_until_ready())
return false;
// Just in case we have a pending interrupt.
m_interrupt_enable.clear();
m_interrupt_status.clear();
full_memory_barrier();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Marking command header at index {} as ready to identify device", representative_port_index(), unused_command_header.value());
m_port_registers.ci = 1 << unused_command_header.value();
full_memory_barrier();
size_t time_elapsed = 0;
bool success = false;
while (1) {
// Note: We allow it to spin for 256 milliseconds, which should be enough for a device to respond.
if (time_elapsed >= 256) {
break;
}
if (m_port_registers.serr != 0) {
dbgln("AHCI Port {}: Identify failed, SError {:#08x}", representative_port_index(), (u32)m_port_registers.serr);
try_disambiguate_sata_error();
break;
}
if (!(m_port_registers.ci & (1 << unused_command_header.value()))) {
success = true;
break;
}
microseconds_delay(1000); // delay with 1 milliseconds
time_elapsed++;
}
// Note: We probably ended up triggering an interrupt but we don't really want to handle it,
// so just get rid of it.
// FIXME: Do that in a better way so we don't need to actually remember this every time
// we need to do this.
m_interrupt_status.clear();
m_interrupt_enable.set_all();
return success;
}
void AHCIPort::wait_until_condition_met_or_timeout(size_t delay_in_microseconds, size_t retries, Function<bool(void)> condition_being_met) const
{
size_t retry = 0;
while (retry < retries) {
if (condition_being_met())
break;
microseconds_delay(delay_in_microseconds);
retry++;
}
}
bool AHCIPort::shutdown()
{
MutexLocker locker(m_lock);
SpinlockLocker lock(m_hard_lock);
rebase();
set_interface_state(AHCI::DeviceDetectionInitialization::DisableInterface);
return true;
}
Optional<u8> AHCIPort::try_to_find_unused_command_header()
{
VERIFY(m_lock.is_locked());
u32 commands_issued = m_port_registers.ci;
for (size_t index = 0; index < 32; index++) {
if (!(commands_issued & 1)) {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: unused command header at index {}", representative_port_index(), index);
return index;
}
commands_issued >>= 1;
}
return {};
}
void AHCIPort::start_command_list_processing() const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
VERIFY(is_operable());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Starting command list processing.", representative_port_index());
m_port_registers.cmd = m_port_registers.cmd | 1;
}
void AHCIPort::mark_command_header_ready_to_process(u8 command_header_index) const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
VERIFY(is_operable());
VERIFY(!m_wait_for_completion);
m_wait_for_completion = true;
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Marking command header at index {} as ready to process.", representative_port_index(), command_header_index);
m_port_registers.ci = 1 << command_header_index;
}
void AHCIPort::stop_command_list_processing() const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Stopping command list processing.", representative_port_index());
m_port_registers.cmd = m_port_registers.cmd & 0xfffffffe;
}
void AHCIPort::start_fis_receiving() const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Starting FIS receiving.", representative_port_index());
m_port_registers.cmd = m_port_registers.cmd | (1 << 4);
}
void AHCIPort::power_on() const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Power on. Cold presence detection? {}", representative_port_index(), (bool)(m_port_registers.cmd & (1 << 20)));
if (!(m_port_registers.cmd & (1 << 20)))
return;
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Powering on device.", representative_port_index());
m_port_registers.cmd = m_port_registers.cmd | (1 << 2);
}
void AHCIPort::spin_up() const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Spin up. Staggered spin up? {}", representative_port_index(), m_hba_capabilities.staggered_spin_up_supported);
if (!m_hba_capabilities.staggered_spin_up_supported)
return;
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Spinning up device.", representative_port_index());
m_port_registers.cmd = m_port_registers.cmd | (1 << 1);
}
void AHCIPort::stop_fis_receiving() const
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Stopping FIS receiving.", representative_port_index());
m_port_registers.cmd = m_port_registers.cmd & 0xFFFFFFEF;
}
bool AHCIPort::initiate_sata_reset()
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Initiate SATA reset", representative_port_index());
stop_command_list_processing();
full_memory_barrier();
// Note: The AHCI specification says to wait now a 500 milliseconds
// Try to wait 1 second for HBA to clear Command List Running
wait_until_condition_met_or_timeout(100, 5000, [this]() -> bool {
return !(m_port_registers.cmd & (1 << 15));
});
full_memory_barrier();
spin_up();
full_memory_barrier();
set_interface_state(AHCI::DeviceDetectionInitialization::PerformInterfaceInitializationSequence);
// The AHCI specification says to wait now a 1 millisecond
microseconds_delay(1000);
full_memory_barrier();
set_interface_state(AHCI::DeviceDetectionInitialization::NoActionRequested);
full_memory_barrier();
wait_until_condition_met_or_timeout(10, 1000, [this]() -> bool {
return is_phy_enabled();
});
dmesgln("AHCI Port {}: {}", representative_port_index(), try_disambiguate_sata_status());
full_memory_barrier();
clear_sata_error_register();
return (m_port_registers.ssts & 0xf) == 3;
}
void AHCIPort::set_interface_state(AHCI::DeviceDetectionInitialization requested_action)
{
switch (requested_action) {
case AHCI::DeviceDetectionInitialization::NoActionRequested:
m_port_registers.sctl = (m_port_registers.sctl & 0xfffffff0);
return;
case AHCI::DeviceDetectionInitialization::PerformInterfaceInitializationSequence:
m_port_registers.sctl = (m_port_registers.sctl & 0xfffffff0) | 1;
return;
case AHCI::DeviceDetectionInitialization::DisableInterface:
m_port_registers.sctl = (m_port_registers.sctl & 0xfffffff0) | 4;
return;
}
VERIFY_NOT_REACHED();
}
}

View file

@ -0,0 +1,136 @@
/*
* Copyright (c) 2021-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <Kernel/Devices/Device.h>
#include <Kernel/Devices/Storage/ATA/AHCI/Definitions.h>
#include <Kernel/Devices/Storage/ATA/AHCI/InterruptHandler.h>
#include <Kernel/Devices/Storage/ATA/ATADevice.h>
#include <Kernel/Devices/Storage/ATA/Definitions.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Library/LockWeakPtr.h>
#include <Kernel/Library/LockWeakable.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/ScatterGatherList.h>
#include <Kernel/PhysicalAddress.h>
#include <Kernel/Random.h>
#include <Kernel/Sections.h>
#include <Kernel/WaitQueue.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class AHCIInterruptHandler;
class AHCIPort
: public AtomicRefCounted<AHCIPort>
, public LockWeakable<AHCIPort> {
friend class AHCIController;
public:
static ErrorOr<NonnullLockRefPtr<AHCIPort>> create(AHCIController const&, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index);
u32 port_index() const { return m_port_index; }
u32 representative_port_index() const { return port_index() + 1; }
bool is_operable() const;
bool is_atapi_attached() const { return m_port_registers.sig == (u32)ATA::DeviceSignature::ATAPI; };
LockRefPtr<StorageDevice> connected_device() const { return m_connected_device; }
bool reset();
bool initialize_without_reset();
void handle_interrupt();
private:
ErrorOr<void> allocate_resources_and_initialize_ports();
bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; }
bool initialize();
AHCIPort(AHCIController const&, NonnullRefPtr<Memory::PhysicalPage> identify_buffer_page, AHCI::HBADefinedCapabilities, volatile AHCI::PortRegisters&, u32 port_index);
ALWAYS_INLINE void clear_sata_error_register() const;
char const* try_disambiguate_sata_status();
void try_disambiguate_sata_error();
bool initiate_sata_reset();
void rebase();
void recover_from_fatal_error();
bool shutdown();
ALWAYS_INLINE void spin_up() const;
ALWAYS_INLINE void power_on() const;
void start_request(AsyncBlockDeviceRequest&);
void complete_current_request(AsyncDeviceRequest::RequestResult);
bool access_device(AsyncBlockDeviceRequest::RequestType, u64 lba, u8 block_count);
size_t calculate_descriptors_count(size_t block_count) const;
[[nodiscard]] Optional<AsyncDeviceRequest::RequestResult> prepare_and_set_scatter_list(AsyncBlockDeviceRequest& request);
ALWAYS_INLINE bool is_interrupts_enabled() const;
bool spin_until_ready() const;
bool identify_device();
ALWAYS_INLINE void start_command_list_processing() const;
ALWAYS_INLINE void mark_command_header_ready_to_process(u8 command_header_index) const;
ALWAYS_INLINE void stop_command_list_processing() const;
ALWAYS_INLINE void start_fis_receiving() const;
ALWAYS_INLINE void stop_fis_receiving() const;
ALWAYS_INLINE void set_active_state() const;
ALWAYS_INLINE void set_sleep_state() const;
void set_interface_state(AHCI::DeviceDetectionInitialization);
Optional<u8> try_to_find_unused_command_header();
ALWAYS_INLINE bool is_interface_disabled() const { return (m_port_registers.ssts & 0xf) == 4; };
ALWAYS_INLINE void wait_until_condition_met_or_timeout(size_t delay_in_microseconds, size_t retries, Function<bool(void)> condition_being_met) const;
// Data members
EntropySource m_entropy_source;
LockRefPtr<AsyncBlockDeviceRequest> m_current_request;
Spinlock<LockRank::None> m_hard_lock {};
Mutex m_lock { "AHCIPort"sv };
mutable bool m_wait_for_completion { false };
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_dma_buffers;
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_command_table_pages;
RefPtr<Memory::PhysicalPage> m_command_list_page;
OwnPtr<Memory::Region> m_command_list_region;
RefPtr<Memory::PhysicalPage> m_fis_receive_page;
LockRefPtr<ATADevice> m_connected_device;
u32 m_port_index;
// Note: Ideally the AHCIController should be the only object to hold this data
// but because using the m_parent_controller means we need to take a strong ref,
// it's probably better to just "cache" this here instead.
AHCI::HBADefinedCapabilities const m_hba_capabilities;
NonnullRefPtr<Memory::PhysicalPage> const m_identify_buffer_page;
volatile AHCI::PortRegisters& m_port_registers;
LockWeakPtr<AHCIController> m_parent_controller;
AHCI::PortInterruptStatusBitField m_interrupt_status;
AHCI::PortInterruptEnableBitField m_interrupt_enable;
LockRefPtr<Memory::ScatterGatherList> m_current_scatter_list;
bool m_disabled_by_firmware { false };
};
}

View file

@ -0,0 +1,17 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Devices/Storage/ATA/ATAController.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
namespace Kernel {
ATAController::ATAController()
: StorageController(StorageManagement::generate_relative_ata_controller_id({}))
{
}
}

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Devices/BlockDevice.h>
#include <Kernel/Devices/Storage/StorageController.h>
#include <Kernel/Library/LockRefPtr.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class ATADevice;
class ATAController
: public StorageController
, public LockWeakable<ATAController> {
public:
virtual void start_request(ATADevice const&, AsyncBlockDeviceRequest&) = 0;
protected:
ATAController();
};
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/StringView.h>
#include <Kernel/Devices/Storage/ATA/ATADevice.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
#include <Kernel/Sections.h>
namespace Kernel {
static StorageDevice::LUNAddress convert_ata_address_to_lun_address(ATAController const& controller, ATADevice::Address ata_address)
{
return StorageDevice::LUNAddress { controller.controller_id(), ata_address.port, ata_address.subport };
}
ATADevice::ATADevice(ATAController const& controller, ATADevice::Address ata_address, u16 capabilities, u16 logical_sector_size, u64 max_addressable_block)
: StorageDevice(convert_ata_address_to_lun_address(controller, ata_address), controller.hardware_relative_controller_id(), logical_sector_size, max_addressable_block)
, m_controller(controller)
, m_ata_address(ata_address)
, m_capabilities(capabilities)
{
}
ATADevice::~ATADevice() = default;
void ATADevice::start_request(AsyncBlockDeviceRequest& request)
{
auto controller = m_controller.strong_ref();
VERIFY(controller);
controller->start_request(*this, request);
}
}

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Devices/Storage/ATA/ATAController.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Locking/Mutex.h>
namespace Kernel {
class ATADevice : public StorageDevice {
public:
// Note: For IDE drives, port means Primary or Secondary (0 or 1),
// and subport means Master or Slave (0 or 1).
// For SATA drives (AHCI driven HBAs), a port can be a number from 0 to 31,
// and subport can be a number from 0 to 14 (only 15 devices are allowed to
// be connected to one SATA port multiplier).
struct Address {
// FIXME: u32 for this value is wasteful, because even AHCI only support 32 ports
u32 port;
u8 subport;
};
public:
virtual ~ATADevice() override;
// ^BlockDevice
virtual void start_request(AsyncBlockDeviceRequest&) override;
u16 ata_capabilites() const { return m_capabilities; }
Address const& ata_address() const { return m_ata_address; }
protected:
ATADevice(ATAController const&, Address, u16, u16, u64);
LockWeakPtr<ATAController> m_controller;
const Address m_ata_address;
const u16 m_capabilities;
};
}

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/StringView.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
#include <Kernel/Sections.h>
namespace Kernel {
NonnullLockRefPtr<ATADiskDevice> ATADiskDevice::create(ATAController const& controller, ATADevice::Address ata_address, u16 capabilities, u16 logical_sector_size, u64 max_addressable_block)
{
auto disk_device_or_error = DeviceManagement::try_create_device<ATADiskDevice>(controller, ata_address, capabilities, logical_sector_size, max_addressable_block);
// FIXME: Find a way to propagate errors
VERIFY(!disk_device_or_error.is_error());
return disk_device_or_error.release_value();
}
ATADiskDevice::ATADiskDevice(ATAController const& controller, ATADevice::Address ata_address, u16 capabilities, u16 logical_sector_size, u64 max_addressable_block)
: ATADevice(controller, ata_address, capabilities, logical_sector_size, max_addressable_block)
{
}
ATADiskDevice::~ATADiskDevice() = default;
StringView ATADiskDevice::class_name() const
{
return "ATADiskDevice"sv;
}
}

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Devices/Storage/ATA/ATADevice.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Locking/Mutex.h>
namespace Kernel {
class IDEController;
class ATADiskDevice final : public ATADevice {
friend class IDEController;
friend class DeviceManagement;
public:
static NonnullLockRefPtr<ATADiskDevice> create(ATAController const&, ATADevice::Address, u16 capabilities, u16 logical_sector_size, u64 max_addressable_block);
virtual ~ATADiskDevice() override;
// ^StorageDevice
virtual CommandSet command_set() const override { return CommandSet::ATA; }
private:
ATADiskDevice(ATAController const&, Address, u16, u16, u64);
// ^DiskDevice
virtual StringView class_name() const override;
};
}

View file

@ -0,0 +1,520 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Devices/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Devices/Storage/ATA/ATAPort.h>
#include <Kernel/Devices/Storage/ATA/Definitions.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
class ATAPortInterruptDisabler {
public:
ATAPortInterruptDisabler(ATAPort& port)
: m_port(port)
{
(void)port.disable_interrupts();
}
~ATAPortInterruptDisabler()
{
(void)m_port->enable_interrupts();
};
private:
LockRefPtr<ATAPort> m_port;
};
class ATAPortInterruptCleaner {
public:
ATAPortInterruptCleaner(ATAPort& port)
: m_port(port)
{
}
~ATAPortInterruptCleaner()
{
(void)m_port->force_clear_interrupts();
};
private:
LockRefPtr<ATAPort> m_port;
};
void ATAPort::fix_name_string_in_identify_device_block()
{
VERIFY(m_lock.is_locked());
auto* wbuf = (u16*)m_ata_identify_data_buffer->data();
auto* bbuf = m_ata_identify_data_buffer->data() + 27 * 2;
for (size_t word_index = 27; word_index < 47; word_index++) {
u16 data = wbuf[word_index];
*(bbuf++) = MSB(data);
*(bbuf++) = LSB(data);
}
}
ErrorOr<void> ATAPort::detect_connected_devices()
{
MutexLocker locker(m_lock);
for (size_t device_index = 0; device_index < max_possible_devices_connected(); device_index++) {
TRY(device_select(device_index));
auto device_presence = TRY(detect_presence_on_selected_device());
if (!device_presence)
continue;
TaskFile identify_taskfile;
memset(&identify_taskfile, 0, sizeof(TaskFile));
identify_taskfile.command = ATA_CMD_IDENTIFY;
auto buffer = UserOrKernelBuffer::for_kernel_buffer(m_ata_identify_data_buffer->data());
{
auto result = execute_polled_command(TransactionDirection::Read, LBAMode::None, identify_taskfile, buffer, 0, 256, 100, 100);
if (result.is_error()) {
continue;
}
}
ATAIdentifyBlock volatile& identify_block = (ATAIdentifyBlock volatile&)(*m_ata_identify_data_buffer->data());
u16 capabilities = identify_block.capabilities[0];
StringView device_name = StringView((char const*)const_cast<u16*>(identify_block.model_number), 40);
fix_name_string_in_identify_device_block();
u64 max_addressable_block = identify_block.max_28_bit_addressable_logical_sector;
dbgln("ATAPort: device found: Name={}, Capacity={}, Capabilities={:#04x}", device_name.trim_whitespace(), max_addressable_block * 512, capabilities);
// If the drive is so old that it doesn't support LBA, ignore it.
if (!(capabilities & ATA_CAP_LBA)) {
dbgln("ATAPort: device found but without LBA support (what kind of dinosaur we see here?)");
continue;
}
// if we support 48-bit LBA, use that value instead.
if (identify_block.commands_and_feature_sets_supported[1] & (1 << 10))
max_addressable_block = identify_block.user_addressable_logical_sectors_count;
// FIXME: Don't assume all drives will have logical sector size of 512 bytes.
ATADevice::Address address = { m_port_index, static_cast<u8>(device_index) };
m_ata_devices.append(ATADiskDevice::create(m_parent_ata_controller, address, capabilities, 512, max_addressable_block));
}
return {};
}
LockRefPtr<StorageDevice> ATAPort::connected_device(size_t device_index) const
{
MutexLocker locker(m_lock);
if (m_ata_devices.size() > device_index)
return m_ata_devices[device_index];
return {};
}
ErrorOr<void> ATAPort::start_request(ATADevice const& associated_device, AsyncBlockDeviceRequest& request)
{
MutexLocker locker(m_lock);
VERIFY(m_current_request.is_null());
VERIFY(pio_capable() || dma_capable());
dbgln_if(ATA_DEBUG, "ATAPort::start_request");
m_current_request = request;
m_current_request_block_index = 0;
m_current_request_flushing_cache = false;
if (dma_capable()) {
TRY(prepare_and_initiate_dma_transaction(associated_device));
return {};
}
TRY(prepare_and_initiate_pio_transaction(associated_device));
return {};
}
void ATAPort::complete_pio_transaction(AsyncDeviceRequest::RequestResult result)
{
VERIFY(m_current_request);
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(ATA_DEBUG, "ATAPort::complete_pio_transaction result: {}", (int)result);
MutexLocker locker(m_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
void ATAPort::complete_dma_transaction(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
VERIFY(m_current_request);
VERIFY(m_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
auto work_item_creation_result = g_io_work->try_queue([this, result]() {
dbgln_if(ATA_DEBUG, "ATAPort::complete_dma_transaction result: {}", (int)result);
MutexLocker locker(m_lock);
if (!m_current_request)
return;
auto current_request = m_current_request;
m_current_request.clear();
if (result == AsyncDeviceRequest::Success) {
{
auto result = force_busmastering_status_clean();
if (result.is_error()) {
locker.unlock();
current_request->complete(AsyncDeviceRequest::Failure);
return;
}
}
if (current_request->request_type() == AsyncBlockDeviceRequest::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * current_request->block_count()); result.is_error()) {
locker.unlock();
current_request->complete(AsyncDeviceRequest::MemoryFault);
return;
}
}
}
locker.unlock();
current_request->complete(result);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
}
}
static void print_ata_status(u8 status)
{
dbgln("ATAPort: print_status: DRQ={} BSY={}, DRDY={}, DSC={}, DF={}, CORR={}, IDX={}, ERR={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0,
(status & ATA_SR_DSC) != 0,
(status & ATA_SR_DF) != 0,
(status & ATA_SR_CORR) != 0,
(status & ATA_SR_IDX) != 0,
(status & ATA_SR_ERR) != 0);
}
static void try_disambiguate_ata_error(u8 error)
{
dbgln("ATAPort: Error cause:");
switch (error) {
case ATA_ER_BBK:
dbgln("ATAPort: - Bad block");
break;
case ATA_ER_UNC:
dbgln("ATAPort: - Uncorrectable data");
break;
case ATA_ER_MC:
dbgln("ATAPort: - Media changed");
break;
case ATA_ER_IDNF:
dbgln("ATAPort: - ID mark not found");
break;
case ATA_ER_MCR:
dbgln("ATAPort: - Media change request");
break;
case ATA_ER_ABRT:
dbgln("ATAPort: - Command aborted");
break;
case ATA_ER_TK0NF:
dbgln("ATAPort: - Track 0 not found");
break;
case ATA_ER_AMNF:
dbgln("ATAPort: - No address mark");
break;
default:
dbgln("ATAPort: - No one knows");
break;
}
}
ErrorOr<bool> ATAPort::handle_interrupt_after_dma_transaction()
{
if (!dma_capable())
return false;
u8 bstatus = TRY(busmastering_status());
if (!(bstatus & 0x4)) {
// interrupt not from this device, ignore
dbgln_if(ATA_DEBUG, "ATAPort: ignore interrupt");
return false;
}
auto work_item_creation_result = g_ata_work->try_queue([this]() -> void {
MutexLocker locker(m_lock);
u8 status = task_file_status().release_value();
m_entropy_source.add_random_event(status);
// clear bus master interrupt status
{
auto result = force_busmastering_status_clean();
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
SpinlockLocker lock(m_hard_lock);
dbgln_if(ATA_DEBUG, "ATAPort: interrupt: DRQ={}, BSY={}, DRDY={}",
(status & ATA_SR_DRQ) != 0,
(status & ATA_SR_BSY) != 0,
(status & ATA_SR_DRDY) != 0);
if (!m_current_request) {
dbgln("ATAPort: IRQ but no pending request!");
return;
}
if (status & ATA_SR_ERR) {
print_ata_status(status);
auto device_error = task_file_error().release_value();
dbgln("ATAPort: Error {:#02x}!", (u8)device_error);
try_disambiguate_ata_error(device_error);
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
complete_dma_transaction(AsyncDeviceRequest::Success);
return;
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
return Error::from_errno(ENOMEM);
}
return true;
}
ErrorOr<void> ATAPort::prepare_and_initiate_dma_transaction(ATADevice const& associated_device)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
// Note: We might be called here from an interrupt handler (like the page fault handler), so queue a read afterwards.
auto work_item_creation_result = g_ata_work->try_queue([this, &associated_device]() -> void {
MutexLocker locker(m_lock);
dbgln_if(ATA_DEBUG, "ATAPort::prepare_and_initiate_dma_transaction ({} x {})", m_current_request->block_index(), m_current_request->block_count());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
{
auto result = device_select(associated_device.ata_address().subport);
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
if (m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write) {
if (auto result = m_current_request->read_from_buffer(m_current_request->buffer(), m_dma_buffer_region->vaddr().as_ptr(), 512 * m_current_request->block_count()); result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::MemoryFault);
return;
}
}
prdt().offset = m_dma_buffer_page->paddr().get();
prdt().size = 512 * m_current_request->block_count();
VERIFY(prdt().size <= PAGE_SIZE);
SpinlockLocker hard_lock_locker(m_hard_lock);
{
auto result = stop_busmastering();
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
if (m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write) {
auto result = prepare_transaction_with_busmastering(TransactionDirection::Write, m_prdt_page->paddr());
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
} else {
auto result = prepare_transaction_with_busmastering(TransactionDirection::Read, m_prdt_page->paddr());
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
TaskFile taskfile;
LBAMode lba_mode = LBAMode::TwentyEightBit;
auto lba = m_current_request->block_index();
if ((lba + m_current_request->block_count()) >= 0x10000000) {
lba_mode = LBAMode::FortyEightBit;
}
memset(&taskfile, 0, sizeof(TaskFile));
taskfile.lba_low[0] = (lba & 0x000000FF) >> 0;
taskfile.lba_low[1] = (lba & 0x0000FF00) >> 8;
taskfile.lba_low[2] = (lba & 0x00FF0000) >> 16;
taskfile.lba_high[0] = (lba & 0xFF000000) >> 24;
taskfile.lba_high[1] = (lba & 0xFF00000000ull) >> 32;
taskfile.lba_high[2] = (lba & 0xFF0000000000ull) >> 40;
taskfile.count = m_current_request->block_count();
if (lba_mode == LBAMode::TwentyEightBit)
taskfile.command = m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write ? ATA_CMD_WRITE_DMA : ATA_CMD_READ_DMA;
else
taskfile.command = m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write ? ATA_CMD_WRITE_DMA_EXT : ATA_CMD_READ_DMA_EXT;
{
auto result = load_taskfile_into_registers(taskfile, lba_mode, 1000);
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
if (m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write) {
auto result = start_busmastering(TransactionDirection::Write);
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
else {
auto result = start_busmastering(TransactionDirection::Read);
if (result.is_error()) {
complete_dma_transaction(AsyncDeviceRequest::Failure);
return;
}
}
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
return Error::from_errno(ENOMEM);
}
return {};
}
ErrorOr<void> ATAPort::prepare_and_initiate_pio_transaction(ATADevice const& associated_device)
{
VERIFY(m_lock.is_locked());
VERIFY(!m_current_request.is_null());
VERIFY(m_current_request->block_count() <= 256);
dbgln_if(ATA_DEBUG, "ATAPort::prepare_and_initiate_pio_transaction ({} x {})", m_current_request->block_index(), m_current_request->block_count());
// Note: We might be called here from an interrupt handler (like the page fault handler), so queue a read afterwards.
auto work_item_creation_result = g_ata_work->try_queue([this, &associated_device]() -> void {
MutexLocker locker(m_lock);
{
auto result = device_select(associated_device.ata_address().subport);
if (result.is_error()) {
complete_pio_transaction(AsyncDeviceRequest::Failure);
return;
}
}
for (size_t block_index = 0; block_index < m_current_request->block_count(); block_index++) {
TaskFile taskfile;
LBAMode lba_mode = LBAMode::TwentyEightBit;
auto lba = m_current_request->block_index() + block_index;
if (lba >= 0x10000000) {
lba_mode = LBAMode::FortyEightBit;
}
memset(&taskfile, 0, sizeof(TaskFile));
taskfile.lba_low[0] = (lba & 0x000000FF) >> 0;
taskfile.lba_low[1] = (lba & 0x0000FF00) >> 8;
taskfile.lba_low[2] = (lba & 0x00FF0000) >> 16;
taskfile.lba_high[0] = (lba & 0xFF000000) >> 24;
taskfile.lba_high[1] = (lba & 0xFF00000000ull) >> 32;
taskfile.lba_high[2] = (lba & 0xFF0000000000ull) >> 40;
taskfile.count = 1;
if (lba_mode == LBAMode::TwentyEightBit)
taskfile.command = m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write ? ATA_CMD_WRITE_PIO : ATA_CMD_READ_PIO;
else
taskfile.command = m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Write ? ATA_CMD_WRITE_PIO_EXT : ATA_CMD_READ_PIO_EXT;
if (m_current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
auto result = execute_polled_command(TransactionDirection::Read, lba_mode, taskfile, m_current_request->buffer(), block_index, 256, 100, 100);
if (result.is_error()) {
complete_pio_transaction(AsyncDeviceRequest::Failure);
return;
}
} else {
auto result = execute_polled_command(TransactionDirection::Write, lba_mode, taskfile, m_current_request->buffer(), block_index, 256, 100, 100);
if (result.is_error()) {
complete_pio_transaction(AsyncDeviceRequest::Failure);
return;
}
}
}
complete_pio_transaction(AsyncDeviceRequest::Success);
});
if (work_item_creation_result.is_error()) {
auto current_request = m_current_request;
m_current_request.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
return Error::from_errno(ENOMEM);
}
return {};
}
ErrorOr<void> ATAPort::execute_polled_command(TransactionDirection direction, LBAMode lba_mode, TaskFile const& taskfile, UserOrKernelBuffer& buffer, size_t block_offset, size_t words_count, size_t preparation_timeout_in_milliseconds, size_t completion_timeout_in_milliseconds)
{
// Disable interrupts temporarily, just in case we have that enabled,
// remember the value to re-enable (and clean) later if needed.
ATAPortInterruptDisabler disabler(*this);
ATAPortInterruptCleaner cleaner(*this);
MutexLocker locker(m_lock);
{
SpinlockLocker hard_locker(m_hard_lock);
// Wait for device to be not busy or timeout
TRY(wait_if_busy_until_timeout(preparation_timeout_in_milliseconds));
// Send command, wait for result or timeout
TRY(load_taskfile_into_registers(taskfile, lba_mode, preparation_timeout_in_milliseconds));
size_t milliseconds_elapsed = 0;
for (;;) {
if (milliseconds_elapsed > completion_timeout_in_milliseconds)
break;
u8 status = task_file_status().release_value();
if (status & ATA_SR_ERR) {
return Error::from_errno(EINVAL);
}
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRQ)) {
break;
}
microseconds_delay(1000);
milliseconds_elapsed++;
}
if (milliseconds_elapsed > completion_timeout_in_milliseconds) {
critical_dmesgln("ATAPort: device state unknown. Timeout exceeded.");
return Error::from_errno(EINVAL);
}
}
VERIFY_INTERRUPTS_ENABLED();
if (direction == TransactionDirection::Read)
TRY(read_pio_data_to_buffer(buffer, block_offset, words_count));
else
TRY(write_pio_data_from_buffer(buffer, block_offset, words_count));
return {};
}
}

View file

@ -0,0 +1,156 @@
/*
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Devices/Storage/ATA/ATADevice.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class ATAPort
: public AtomicRefCounted<ATAPort>
, public LockWeakable<ATAPort> {
friend class ATAPortInterruptDisabler;
friend class ATAPortInterruptCleaner;
public:
struct TaskFile {
u8 command;
u8 lba_low[3];
u8 device;
u8 lba_high[3];
u8 features_high;
u16 count;
u8 icc;
u8 control;
u32 reserved;
};
enum class TransactionDirection : u8 {
Read,
Write,
};
struct [[gnu::packed]] PhysicalRegionDescriptor {
u32 offset;
u16 size { 0 };
u16 end_of_table { 0 };
};
enum class LBAMode : u8 {
None,
TwentyEightBit,
FortyEightBit,
};
public:
LockRefPtr<StorageDevice> connected_device(size_t device_index) const;
virtual ~ATAPort() = default;
virtual ErrorOr<void> disable() = 0;
virtual ErrorOr<void> power_on() = 0;
ErrorOr<void> detect_connected_devices();
ErrorOr<bool> handle_interrupt_after_dma_transaction();
ErrorOr<void> start_request(ATADevice const& associated_device, AsyncBlockDeviceRequest&);
// Note: Generic (P)ATA IDE "ports" are tied to the IDE channel link (cable), and trying to
// reset the master port or slave port and vice versa requires to actually reset
// both at once...
// This is due to the fact that IDE devices can be connected together (master-slave)
// with one 80 pin cable which forms one (primary/secondary) "ATA bus".
// Intel AHCI controllers generally allow individual phy port reset. The caller
// of this method should know this in advance...
// Note: ATAPI devices are an exception to this, so even if we initiate a
// a port reset, there's no guarantee that ATAPI devices will reset anyway,
// so resetting them requires to actually send the ATA "DEVICE RESET" command.
virtual ErrorOr<void> port_phy_reset() = 0;
// Note: Software reset means individual reset to a selected device on the "bus" (port).
// This means that this will likely work for devices that indicate support for
// PACKET commands (ATAPI devices) that also support DEVICE RESET. For other devices
// there's no other method to reset them besides (full) PHY reset.
// For devices that don't support this feature, just return ENOTSUP.
virtual ErrorOr<void> soft_reset() { return Error::from_errno(ENOTSUP); }
ErrorOr<void> execute_polled_command(TransactionDirection direction, LBAMode lba_mode, TaskFile const& taskfile, UserOrKernelBuffer&, size_t block_offset, size_t words_count, size_t preparation_timeout_in_milliseconds, size_t completion_timeout_in_milliseconds);
virtual bool has_sata_capabilities() { return false; }
virtual bool pio_capable() const = 0;
virtual bool dma_capable() const = 0;
virtual size_t max_possible_devices_connected() const = 0;
private:
ErrorOr<void> prepare_and_initiate_dma_transaction(ATADevice const& associated_device);
ErrorOr<void> prepare_and_initiate_pio_transaction(ATADevice const& associated_device);
void complete_dma_transaction(AsyncDeviceRequest::RequestResult result);
void complete_pio_transaction(AsyncDeviceRequest::RequestResult result);
void fix_name_string_in_identify_device_block();
protected:
virtual ErrorOr<u8> task_file_status() = 0;
virtual ErrorOr<u8> task_file_error() = 0;
virtual ErrorOr<void> wait_if_busy_until_timeout(size_t timeout_in_milliseconds) = 0;
virtual ErrorOr<void> device_select(size_t device_index) = 0;
virtual ErrorOr<bool> detect_presence_on_selected_device() = 0;
virtual ErrorOr<void> enable_interrupts() = 0;
virtual ErrorOr<void> disable_interrupts() = 0;
virtual ErrorOr<void> stop_busmastering() = 0;
virtual ErrorOr<void> start_busmastering(TransactionDirection) = 0;
virtual ErrorOr<void> force_busmastering_status_clean() = 0;
virtual ErrorOr<u8> busmastering_status() = 0;
virtual ErrorOr<void> prepare_transaction_with_busmastering(TransactionDirection, PhysicalAddress prdt_buffer) = 0;
virtual ErrorOr<void> initiate_transaction(TransactionDirection) = 0;
virtual ErrorOr<void> force_clear_interrupts() = 0;
// Note: This method assume we already selected the correct device!
virtual ErrorOr<void> load_taskfile_into_registers(TaskFile const&, LBAMode lba_mode, size_t completion_timeout_in_milliseconds) = 0;
virtual ErrorOr<void> read_pio_data_to_buffer(UserOrKernelBuffer&, size_t block_offset, size_t words_count) = 0;
virtual ErrorOr<void> write_pio_data_from_buffer(UserOrKernelBuffer const&, size_t block_offset, size_t words_count) = 0;
PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_region->vaddr().as_ptr()); }
ATAPort(ATAController const& parent_controller, u8 port_index, NonnullOwnPtr<KBuffer> ata_identify_data_buffer)
: m_port_index(port_index)
, m_ata_identify_data_buffer(move(ata_identify_data_buffer))
, m_parent_ata_controller(parent_controller)
{
}
mutable Mutex m_lock;
Spinlock<LockRank::None> m_hard_lock {};
EntropySource m_entropy_source;
LockRefPtr<AsyncBlockDeviceRequest> m_current_request;
u64 m_current_request_block_index { 0 };
bool m_current_request_flushing_cache { false };
OwnPtr<Memory::Region> m_prdt_region;
OwnPtr<Memory::Region> m_dma_buffer_region;
RefPtr<Memory::PhysicalPage> m_prdt_page;
RefPtr<Memory::PhysicalPage> m_dma_buffer_page;
const u8 m_port_index;
Vector<NonnullLockRefPtr<ATADevice>> m_ata_devices;
NonnullOwnPtr<KBuffer> m_ata_identify_data_buffer;
NonnullLockRefPtr<ATAController> m_parent_ata_controller;
};
}

View file

@ -0,0 +1,212 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
namespace Kernel::ATA {
enum DeviceSignature : u32 {
ATA = 0x00000101,
ATAPI = 0xEB140101,
EnclosureManagementBridge = 0xC33C0101,
PortMultiplier = 0x96690101,
Unconnected = 0xFFFFFFFF
};
}
#define ATA_SR_BSY 0x80
#define ATA_SR_DRDY 0x40
#define ATA_SR_DF 0x20
#define ATA_SR_DSC 0x10
#define ATA_SR_DRQ 0x08
#define ATA_SR_CORR 0x04
#define ATA_SR_IDX 0x02
#define ATA_SR_ERR 0x01
#define ATA_ER_BBK 0x80
#define ATA_ER_UNC 0x40
#define ATA_ER_MC 0x20
#define ATA_ER_IDNF 0x10
#define ATA_ER_MCR 0x08
#define ATA_ER_ABRT 0x04
#define ATA_ER_TK0NF 0x02
#define ATA_ER_AMNF 0x01
#define ATA_CMD_READ_PIO 0x20
#define ATA_CMD_READ_PIO_EXT 0x24
#define ATA_CMD_READ_DMA 0xC8
#define ATA_CMD_READ_DMA_EXT 0x25
#define ATA_CMD_WRITE_PIO 0x30
#define ATA_CMD_WRITE_PIO_EXT 0x34
#define ATA_CMD_WRITE_DMA 0xCA
#define ATA_CMD_WRITE_DMA_EXT 0x35
#define ATA_CMD_CACHE_FLUSH 0xE7
#define ATA_CMD_CACHE_FLUSH_EXT 0xEA
#define ATA_CMD_PACKET 0xA0
#define ATA_CMD_IDENTIFY_PACKET 0xA1
#define ATA_CMD_IDENTIFY 0xEC
#define ATAPI_CMD_READ 0xA8
#define ATAPI_CMD_EJECT 0x1B
#define ATA_IDENT_DEVICETYPE 0
#define ATA_IDENT_CYLINDERS 2
#define ATA_IDENT_HEADS 6
#define ATA_IDENT_SECTORS 12
#define ATA_IDENT_SERIAL 20
#define ATA_IDENT_MODEL 54
#define ATA_IDENT_CAPABILITIES 98
#define ATA_IDENT_FIELDVALID 106
#define ATA_IDENT_MAX_LBA 120
#define ATA_IDENT_COMMANDSETS 164
#define ATA_IDENT_MAX_LBA_EXT 200
#define ATA_USE_LBA_ADDRESSING (1 << 6)
#define IDE_ATA 0x00
#define IDE_ATAPI 0x01
#define ATA_REG_DATA 0x00
#define ATA_REG_ERROR 0x01
#define ATA_REG_FEATURES 0x01
#define ATA_REG_SECCOUNT0 0x02
#define ATA_REG_LBA0 0x03
#define ATA_REG_LBA1 0x04
#define ATA_REG_LBA2 0x05
#define ATA_REG_HDDEVSEL 0x06
#define ATA_REG_COMMAND 0x07
#define ATA_REG_STATUS 0x07
#define ATA_REG_SECCOUNT1 0x08
#define ATA_REG_LBA3 0x09
#define ATA_REG_LBA4 0x0A
#define ATA_REG_LBA5 0x0B
#define ATA_CTL_CONTROL 0x00
#define ATA_CTL_ALTSTATUS 0x00
#define ATA_CTL_DEVADDRESS 0x01
#define ATA_CAP_LBA 0x200
namespace Kernel {
struct [[gnu::packed]] ATAIdentifyBlock {
u16 general_configuration;
u16 obsolete;
u16 specific_configuration;
u16 obsolete2;
u16 retired[2];
u16 obsolete3;
u16 reserved_for_cfa[2];
u16 retired2;
u16 serial_number[10];
u16 retired3[2];
u16 obsolete4;
u16 firmware_revision[4];
u16 model_number[20];
u16 maximum_logical_sectors_per_drq;
u16 trusted_computing_features;
u16 capabilities[2];
u16 obsolete5[2];
u16 validity_flags;
u16 obsolete6[5];
u16 security_features;
u32 max_28_bit_addressable_logical_sector;
u16 obsolete7;
u16 dma_modes;
u16 pio_modes;
u16 minimum_multiword_dma_transfer_cycle;
u16 recommended_multiword_dma_transfer_cycle;
u16 minimum_multiword_pio_transfer_cycle_without_flow_control;
u16 minimum_multiword_pio_transfer_cycle_with_flow_control;
u16 additional_supported;
u16 reserved3[5];
u16 queue_depth;
u16 serial_ata_capabilities;
u16 serial_ata_additional_capabilities;
u16 serial_ata_features_supported;
u16 serial_ata_features_enabled;
u16 major_version_number;
u16 minor_version_number;
u16 commands_and_feature_sets_supported[3];
u16 commands_and_feature_sets_supported_or_enabled[3];
u16 ultra_dma_modes;
u16 timing_for_security_features[2];
u16 apm_level;
u16 master_password_id;
u16 hardware_reset_results;
u16 obsolete8;
u16 stream_minimum_request_time;
u16 streaming_transfer_time_for_dma;
u16 streaming_access_latency;
u16 streaming_performance_granularity[2];
u64 user_addressable_logical_sectors_count;
u16 streaming_transfer_time_for_pio;
u16 max_512_byte_blocks_per_data_set_management_command;
u16 physical_sector_size_to_logical_sector_size;
u16 inter_seek_delay_for_acoustic_testing;
u16 world_wide_name[4];
u16 reserved4[4];
u16 obsolete9;
u32 logical_sector_size;
u16 commands_and_feature_sets_supported2;
u16 commands_and_feature_sets_supported_or_enabled2;
u16 reserved_for_expanded_supported_and_enabled_settings[6];
u16 obsolete10;
u16 security_status;
u16 vendor_specific[31];
u16 reserved_for_cfa2[8];
u16 device_nominal_form_factor;
u16 data_set_management_command_support;
u16 additional_product_id[4];
u16 reserved5[2];
u16 current_media_serial_number[30];
u16 sct_command_transport;
u16 reserved6[2];
u16 logical_sectors_alignment_within_physical_sector;
u32 write_read_verify_sector_mode_3_count;
u32 write_read_verify_sector_mode_2_count;
u16 obsolete11[3];
u16 nominal_media_rotation_rate;
u16 reserved7;
u16 obsolete12;
u16 write_read_verify_feature_set_current_mode;
u16 reserved8;
u16 transport_major_version_number;
u16 transport_minor_version_number;
u16 reserved9[6];
u64 extended_user_addressable_logical_sectors_count;
u16 minimum_512_byte_data_blocks_per_download_microcode_operation;
u16 max_512_byte_data_blocks_per_download_microcode_operation;
u16 reserved10[19];
u16 integrity;
};
};

View file

@ -0,0 +1,335 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/ByteBuffer.h>
#include <AK/Singleton.h>
#include <AK/StringView.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Devices/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Devices/Storage/ATA/Definitions.h>
#include <Kernel/Devices/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Devices/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/IOWindow.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Process.h>
#include <Kernel/Sections.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
#define PATA_PRIMARY_IRQ 14
#define PATA_SECONDARY_IRQ 15
UNMAP_AFTER_INIT ErrorOr<NonnullRefPtr<IDEChannel>> IDEChannel::create(IDEController const& controller, IOWindowGroup io_window_group, ChannelType type)
{
auto ata_identify_data_buffer = KBuffer::try_create_with_size("ATA Identify Page"sv, 4096, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
return adopt_nonnull_ref_or_enomem(new (nothrow) IDEChannel(controller, move(io_window_group), type, move(ata_identify_data_buffer)));
}
UNMAP_AFTER_INIT ErrorOr<NonnullRefPtr<IDEChannel>> IDEChannel::create(IDEController const& controller, u8 irq, IOWindowGroup io_window_group, ChannelType type)
{
auto ata_identify_data_buffer = KBuffer::try_create_with_size("ATA Identify Page"sv, 4096, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
return adopt_nonnull_ref_or_enomem(new (nothrow) IDEChannel(controller, irq, move(io_window_group), type, move(ata_identify_data_buffer)));
}
StringView IDEChannel::channel_type_string() const
{
if (m_channel_type == ChannelType::Primary)
return "Primary"sv;
return "Secondary"sv;
}
bool IDEChannel::select_device_and_wait_until_not_busy(DeviceType device_type, size_t milliseconds_timeout)
{
microseconds_delay(20);
u8 slave = device_type == DeviceType::Slave;
m_io_window_group.io_window().write8(ATA_REG_HDDEVSEL, 0xA0 | (slave << 4)); // First, we need to select the drive itself
microseconds_delay(20);
size_t time_elapsed = 0;
while (m_io_window_group.control_window().read8(0) & ATA_SR_BSY && time_elapsed <= milliseconds_timeout) {
microseconds_delay(1000);
time_elapsed++;
}
return time_elapsed <= milliseconds_timeout;
}
ErrorOr<void> IDEChannel::port_phy_reset()
{
MutexLocker locker(m_lock);
SpinlockLocker hard_locker(m_hard_lock);
// reset the channel
u8 device_control = m_io_window_group.control_window().read8(0);
// Wait 30 milliseconds
microseconds_delay(30000);
m_io_window_group.control_window().write8(0, device_control | (1 << 2));
// Wait 30 milliseconds
microseconds_delay(30000);
m_io_window_group.control_window().write8(0, device_control);
// Wait up to 30 seconds before failing
if (!select_device_and_wait_until_not_busy(DeviceType::Master, 30000)) {
dbgln("IDEChannel: reset failed, busy flag on master stuck");
return Error::from_errno(EBUSY);
}
// Wait up to 30 seconds before failing
if (!select_device_and_wait_until_not_busy(DeviceType::Slave, 30000)) {
dbgln("IDEChannel: reset failed, busy flag on slave stuck");
return Error::from_errno(EBUSY);
}
return {};
}
#if ARCH(X86_64)
ErrorOr<void> IDEChannel::allocate_resources_for_pci_ide_controller(Badge<PCIIDELegacyModeController>, bool force_pio)
{
return allocate_resources(force_pio);
}
ErrorOr<void> IDEChannel::allocate_resources_for_isa_ide_controller(Badge<ISAIDEController>)
{
return allocate_resources(true);
}
#endif
UNMAP_AFTER_INIT ErrorOr<void> IDEChannel::allocate_resources(bool force_pio)
{
dbgln_if(PATA_DEBUG, "IDEChannel: {} IO base: {}", channel_type_string(), m_io_window_group.io_window());
dbgln_if(PATA_DEBUG, "IDEChannel: {} control base: {}", channel_type_string(), m_io_window_group.control_window());
if (m_io_window_group.bus_master_window())
dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base: {}", channel_type_string(), m_io_window_group.bus_master_window());
else
dbgln_if(PATA_DEBUG, "IDEChannel: {} bus master base disabled", channel_type_string());
if (!force_pio) {
m_dma_enabled = true;
VERIFY(m_io_window_group.bus_master_window());
// Let's try to set up DMA transfers.
m_prdt_region = TRY(MM.allocate_dma_buffer_page("IDE PRDT"sv, Memory::Region::Access::ReadWrite, m_prdt_page));
VERIFY(!m_prdt_page.is_null());
m_dma_buffer_region = TRY(MM.allocate_dma_buffer_page("IDE DMA region"sv, Memory::Region::Access::ReadWrite, m_dma_buffer_page));
VERIFY(!m_dma_buffer_page.is_null());
prdt().end_of_table = 0x8000;
// clear bus master interrupt status
m_io_window_group.bus_master_window()->write8(2, m_io_window_group.bus_master_window()->read8(2) | 4);
}
return {};
}
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, u8 irq, IOWindowGroup io_group, ChannelType type, NonnullOwnPtr<KBuffer> ata_identify_data_buffer)
: ATAPort(controller, (type == ChannelType::Primary ? 0 : 1), move(ata_identify_data_buffer))
, IRQHandler(irq)
, m_channel_type(type)
, m_io_window_group(move(io_group))
{
}
UNMAP_AFTER_INIT IDEChannel::IDEChannel(IDEController const& controller, IOWindowGroup io_group, ChannelType type, NonnullOwnPtr<KBuffer> ata_identify_data_buffer)
: ATAPort(controller, (type == ChannelType::Primary ? 0 : 1), move(ata_identify_data_buffer))
, IRQHandler(type == ChannelType::Primary ? PATA_PRIMARY_IRQ : PATA_SECONDARY_IRQ)
, m_channel_type(type)
, m_io_window_group(move(io_group))
{
}
UNMAP_AFTER_INIT IDEChannel::~IDEChannel() = default;
bool IDEChannel::handle_irq(RegisterState const&)
{
auto result = handle_interrupt_after_dma_transaction();
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
return result.release_value();
}
ErrorOr<void> IDEChannel::stop_busmastering()
{
VERIFY(m_lock.is_locked());
VERIFY(m_io_window_group.bus_master_window());
m_io_window_group.bus_master_window()->write8(0, 0);
return {};
}
ErrorOr<void> IDEChannel::start_busmastering(TransactionDirection direction)
{
VERIFY(m_lock.is_locked());
VERIFY(m_io_window_group.bus_master_window());
m_io_window_group.bus_master_window()->write8(0, (direction != TransactionDirection::Write ? 0x9 : 0x1));
return {};
}
ErrorOr<void> IDEChannel::force_busmastering_status_clean()
{
VERIFY(m_lock.is_locked());
VERIFY(m_io_window_group.bus_master_window());
m_io_window_group.bus_master_window()->write8(2, m_io_window_group.bus_master_window()->read8(2) | 4);
return {};
}
ErrorOr<u8> IDEChannel::busmastering_status()
{
VERIFY(m_io_window_group.bus_master_window());
return m_io_window_group.bus_master_window()->read8(2);
}
ErrorOr<void> IDEChannel::prepare_transaction_with_busmastering(TransactionDirection direction, PhysicalAddress prdt_buffer)
{
VERIFY(m_lock.is_locked());
m_io_window_group.bus_master_window()->write32(4, prdt_buffer.get());
m_io_window_group.bus_master_window()->write8(0, direction != TransactionDirection::Write ? 0x8 : 0);
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
m_io_window_group.bus_master_window()->write8(2, m_io_window_group.bus_master_window()->read8(2) | 0x6);
return {};
}
ErrorOr<void> IDEChannel::initiate_transaction(TransactionDirection)
{
VERIFY(m_lock.is_locked());
return {};
}
ErrorOr<u8> IDEChannel::task_file_status()
{
VERIFY(m_lock.is_locked());
return m_io_window_group.control_window().read8(0);
}
ErrorOr<u8> IDEChannel::task_file_error()
{
VERIFY(m_lock.is_locked());
return m_io_window_group.io_window().read8(ATA_REG_ERROR);
}
ErrorOr<bool> IDEChannel::detect_presence_on_selected_device()
{
VERIFY(m_lock.is_locked());
m_io_window_group.io_window().write8(ATA_REG_SECCOUNT0, 0x55);
m_io_window_group.io_window().write8(ATA_REG_LBA0, 0xAA);
m_io_window_group.io_window().write8(ATA_REG_SECCOUNT0, 0xAA);
m_io_window_group.io_window().write8(ATA_REG_LBA0, 0x55);
m_io_window_group.io_window().write8(ATA_REG_SECCOUNT0, 0x55);
m_io_window_group.io_window().write8(ATA_REG_LBA0, 0xAA);
auto nsectors_value = m_io_window_group.io_window().read8(ATA_REG_SECCOUNT0);
auto lba0 = m_io_window_group.io_window().read8(ATA_REG_LBA0);
if (lba0 == 0xAA && nsectors_value == 0x55)
return true;
return false;
}
ErrorOr<void> IDEChannel::wait_if_busy_until_timeout(size_t timeout_in_milliseconds)
{
size_t time_elapsed = 0;
while (m_io_window_group.control_window().read8(0) & ATA_SR_BSY && time_elapsed <= timeout_in_milliseconds) {
microseconds_delay(1000);
time_elapsed++;
}
if (time_elapsed <= timeout_in_milliseconds)
return {};
return Error::from_errno(EBUSY);
}
ErrorOr<void> IDEChannel::force_clear_interrupts()
{
VERIFY(m_lock.is_locked());
m_io_window_group.io_window().read8(ATA_REG_STATUS);
return {};
}
ErrorOr<void> IDEChannel::load_taskfile_into_registers(ATAPort::TaskFile const& task_file, LBAMode lba_mode, size_t completion_timeout_in_milliseconds)
{
VERIFY(m_lock.is_locked());
VERIFY(m_hard_lock.is_locked());
u8 head = 0;
if (lba_mode == LBAMode::FortyEightBit) {
head = 0;
} else if (lba_mode == LBAMode::TwentyEightBit) {
head = (task_file.lba_high[0] & 0x0F);
}
// Note: Preserve the selected drive, always use LBA addressing
auto driver_register = ((m_io_window_group.io_window().read8(ATA_REG_HDDEVSEL) & (1 << 4)) | (head | (1 << 5) | (1 << 6)));
m_io_window_group.io_window().write8(ATA_REG_HDDEVSEL, driver_register);
microseconds_delay(50);
if (lba_mode == LBAMode::FortyEightBit) {
m_io_window_group.io_window().write8(ATA_REG_SECCOUNT1, (task_file.count >> 8) & 0xFF);
m_io_window_group.io_window().write8(ATA_REG_LBA3, task_file.lba_high[0]);
m_io_window_group.io_window().write8(ATA_REG_LBA4, task_file.lba_high[1]);
m_io_window_group.io_window().write8(ATA_REG_LBA5, task_file.lba_high[2]);
}
m_io_window_group.io_window().write8(ATA_REG_SECCOUNT0, task_file.count & 0xFF);
m_io_window_group.io_window().write8(ATA_REG_LBA0, task_file.lba_low[0]);
m_io_window_group.io_window().write8(ATA_REG_LBA1, task_file.lba_low[1]);
m_io_window_group.io_window().write8(ATA_REG_LBA2, task_file.lba_low[2]);
// FIXME: Set a timeout here?
size_t time_elapsed = 0;
for (;;) {
if (time_elapsed > completion_timeout_in_milliseconds)
return Error::from_errno(EBUSY);
// FIXME: Use task_file_status method
auto status = m_io_window_group.control_window().read8(0);
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRDY))
break;
microseconds_delay(1000);
time_elapsed++;
}
m_io_window_group.io_window().write8(ATA_REG_COMMAND, task_file.command);
return {};
}
ErrorOr<void> IDEChannel::device_select(size_t device_index)
{
VERIFY(m_lock.is_locked());
if (device_index > 1)
return Error::from_errno(EINVAL);
microseconds_delay(20);
m_io_window_group.io_window().write8(ATA_REG_HDDEVSEL, (0xA0 | ((device_index) << 4)));
microseconds_delay(20);
return {};
}
ErrorOr<void> IDEChannel::enable_interrupts()
{
VERIFY(m_lock.is_locked());
m_io_window_group.control_window().write8(0, 0);
m_interrupts_enabled = true;
return {};
}
ErrorOr<void> IDEChannel::disable_interrupts()
{
VERIFY(m_lock.is_locked());
m_io_window_group.control_window().write8(0, 1 << 1);
m_interrupts_enabled = false;
return {};
}
ErrorOr<void> IDEChannel::read_pio_data_to_buffer(UserOrKernelBuffer& buffer, size_t block_offset, size_t words_count)
{
VERIFY(m_lock.is_locked());
VERIFY(words_count == 256);
for (u32 i = 0; i < 256; ++i) {
u16 data = m_io_window_group.io_window().read16(ATA_REG_DATA);
// FIXME: Don't assume 512 bytes sector
TRY(buffer.write(&data, block_offset * 512 + (i * 2), 2));
}
return {};
}
ErrorOr<void> IDEChannel::write_pio_data_from_buffer(UserOrKernelBuffer const& buffer, size_t block_offset, size_t words_count)
{
VERIFY(m_lock.is_locked());
VERIFY(words_count == 256);
for (u32 i = 0; i < 256; ++i) {
u16 buf;
// FIXME: Don't assume 512 bytes sector
TRY(buffer.read(&buf, block_offset * 512 + (i * 2), 2));
m_io_window_group.io_window().write16(ATA_REG_DATA, buf);
}
return {};
}
}

View file

@ -0,0 +1,156 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
//
// Parallel ATA (PATA) controller driver
//
// This driver describes a logical PATA Channel. Each channel can connect up to 2
// IDE Hard Disk Drives. The drives themselves can be either the master drive (hd0)
// or the slave drive (hd1).
//
// More information about the ATA spec for PATA can be found here:
// ftp://ftp.seagate.com/acrobat/reference/111-1c.pdf
//
#pragma once
#include <AK/Error.h>
#include <Kernel/Devices/Device.h>
#include <Kernel/Devices/Storage/ATA/ATADevice.h>
#include <Kernel/Devices/Storage/ATA/ATAPort.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/IOWindow.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/PhysicalAddress.h>
#include <Kernel/Random.h>
#include <Kernel/WaitQueue.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class IDEController;
#if ARCH(X86_64)
class PCIIDELegacyModeController;
class ISAIDEController;
#endif
class IDEChannel
: public ATAPort
, public IRQHandler {
friend class IDEController;
public:
enum class ChannelType : u8 {
Primary,
Secondary
};
enum class DeviceType : u8 {
Master,
Slave,
};
struct IOWindowGroup {
IOWindowGroup(NonnullOwnPtr<IOWindow> io_window, NonnullOwnPtr<IOWindow> control_window, NonnullOwnPtr<IOWindow> m_bus_master_window)
: m_io_window(move(io_window))
, m_control_window(move(control_window))
, m_bus_master_window(move(m_bus_master_window))
{
}
IOWindowGroup(NonnullOwnPtr<IOWindow> io_window, NonnullOwnPtr<IOWindow> control_window)
: m_io_window(move(io_window))
, m_control_window(move(control_window))
{
}
// Disable default implementations that would use surprising integer promotion.
bool operator==(IOWindowGroup const&) const = delete;
bool operator<=(IOWindowGroup const&) const = delete;
bool operator>=(IOWindowGroup const&) const = delete;
bool operator<(IOWindowGroup const&) const = delete;
bool operator>(IOWindowGroup const&) const = delete;
IOWindow& io_window() const { return *m_io_window; };
IOWindow& control_window() const { return *m_control_window; }
IOWindow* bus_master_window() const { return m_bus_master_window.ptr(); }
private:
mutable NonnullOwnPtr<IOWindow> m_io_window;
mutable NonnullOwnPtr<IOWindow> m_control_window;
mutable OwnPtr<IOWindow> m_bus_master_window;
};
public:
static ErrorOr<NonnullRefPtr<IDEChannel>> create(IDEController const&, IOWindowGroup, ChannelType type);
static ErrorOr<NonnullRefPtr<IDEChannel>> create(IDEController const&, u8 irq, IOWindowGroup, ChannelType type);
virtual ~IDEChannel() override;
virtual StringView purpose() const override { return "PATA Channel"sv; }
#if ARCH(X86_64)
ErrorOr<void> allocate_resources_for_pci_ide_controller(Badge<PCIIDELegacyModeController>, bool force_pio);
ErrorOr<void> allocate_resources_for_isa_ide_controller(Badge<ISAIDEController>);
#endif
private:
static constexpr size_t m_logical_sector_size = 512;
ErrorOr<void> allocate_resources(bool force_pio);
StringView channel_type_string() const;
virtual ErrorOr<void> disable() override { TODO(); }
virtual ErrorOr<void> power_on() override { TODO(); }
virtual ErrorOr<void> port_phy_reset() override;
bool select_device_and_wait_until_not_busy(DeviceType, size_t milliseconds_timeout);
virtual bool pio_capable() const override { return true; }
virtual bool dma_capable() const override { return m_dma_enabled; }
virtual size_t max_possible_devices_connected() const override { return 2; }
virtual ErrorOr<void> stop_busmastering() override;
virtual ErrorOr<void> start_busmastering(TransactionDirection) override;
virtual ErrorOr<void> force_busmastering_status_clean() override;
virtual ErrorOr<u8> busmastering_status() override;
virtual ErrorOr<void> prepare_transaction_with_busmastering(TransactionDirection, PhysicalAddress prdt_buffer) override;
virtual ErrorOr<void> initiate_transaction(TransactionDirection) override;
virtual ErrorOr<u8> task_file_status() override;
virtual ErrorOr<u8> task_file_error() override;
virtual ErrorOr<void> wait_if_busy_until_timeout(size_t timeout_in_milliseconds) override;
virtual ErrorOr<void> device_select(size_t device_index) override;
virtual ErrorOr<bool> detect_presence_on_selected_device() override;
virtual ErrorOr<void> enable_interrupts() override;
virtual ErrorOr<void> disable_interrupts() override;
virtual ErrorOr<void> force_clear_interrupts() override;
virtual ErrorOr<void> load_taskfile_into_registers(TaskFile const&, LBAMode lba_mode, size_t completion_timeout_in_milliseconds) override;
virtual ErrorOr<void> read_pio_data_to_buffer(UserOrKernelBuffer&, size_t block_offset, size_t words_count) override;
virtual ErrorOr<void> write_pio_data_from_buffer(UserOrKernelBuffer const&, size_t block_offset, size_t words_count) override;
IDEChannel(IDEController const&, IOWindowGroup, ChannelType type, NonnullOwnPtr<KBuffer> ata_identify_data_buffer);
IDEChannel(IDEController const&, u8 irq, IOWindowGroup, ChannelType type, NonnullOwnPtr<KBuffer> ata_identify_data_buffer);
//^ IRQHandler
virtual bool handle_irq(RegisterState const&) override;
// Data members
ChannelType m_channel_type { ChannelType::Primary };
bool m_dma_enabled { false };
bool m_interrupts_enabled { true };
IOWindowGroup m_io_window_group;
};
}

View file

@ -0,0 +1,95 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Devices/Storage/ATA/ATADiskDevice.h>
#include <Kernel/Devices/Storage/ATA/GenericIDE/Channel.h>
#include <Kernel/Devices/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Sections.h>
namespace Kernel {
ErrorOr<void> IDEController::reset()
{
return Error::from_errno(ENOTIMPL);
}
ErrorOr<void> IDEController::shutdown()
{
return Error::from_errno(ENOTIMPL);
}
size_t IDEController::devices_count() const
{
size_t count = 0;
for (u32 index = 0; index < 4; index++) {
if (!device(index).is_null())
count++;
}
return count;
}
void IDEController::start_request(ATADevice const& device, AsyncBlockDeviceRequest& request)
{
auto& address = device.ata_address();
VERIFY(address.subport < 2);
switch (address.port) {
case 0: {
auto result = m_channels[0]->start_request(device, request);
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
return;
}
case 1: {
auto result = m_channels[1]->start_request(device, request);
// FIXME: Propagate errors properly
VERIFY(!result.is_error());
return;
}
}
VERIFY_NOT_REACHED();
}
void IDEController::complete_current_request(AsyncDeviceRequest::RequestResult)
{
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT IDEController::IDEController() = default;
UNMAP_AFTER_INIT IDEController::~IDEController() = default;
LockRefPtr<StorageDevice> IDEController::device_by_channel_and_position(u32 index) const
{
switch (index) {
case 0:
return m_channels[0]->connected_device(0);
case 1:
return m_channels[0]->connected_device(1);
case 2:
return m_channels[1]->connected_device(0);
case 3:
return m_channels[1]->connected_device(1);
}
VERIFY_NOT_REACHED();
}
LockRefPtr<StorageDevice> IDEController::device(u32 index) const
{
Vector<NonnullLockRefPtr<StorageDevice>> connected_devices;
for (size_t index = 0; index < 4; index++) {
auto checked_device = device_by_channel_and_position(index);
if (checked_device.is_null())
continue;
connected_devices.append(checked_device.release_nonnull());
}
if (index >= connected_devices.size())
return nullptr;
return connected_devices[index];
}
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Devices/Storage/ATA/ATAController.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/Library/LockRefPtr.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class IDEChannel;
class IDEController : public ATAController {
public:
virtual ~IDEController() override;
virtual LockRefPtr<StorageDevice> device(u32 index) const override final;
virtual ErrorOr<void> reset() override final;
virtual ErrorOr<void> shutdown() override final;
virtual size_t devices_count() const override final;
virtual void start_request(ATADevice const&, AsyncBlockDeviceRequest&) override final;
virtual void complete_current_request(AsyncDeviceRequest::RequestResult) override final;
protected:
IDEController();
LockRefPtr<StorageDevice> device_by_channel_and_position(u32 index) const;
Array<RefPtr<IDEChannel>, 2> m_channels;
};
}

View file

@ -0,0 +1,81 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Debug.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/Storage/DiskPartition.h>
#include <Kernel/FileSystem/OpenFileDescription.h>
namespace Kernel {
NonnullLockRefPtr<DiskPartition> DiskPartition::create(BlockDevice& device, MinorNumber minor_number, Partition::DiskPartitionMetadata metadata)
{
auto partition_or_error = DeviceManagement::try_create_device<DiskPartition>(device, minor_number, metadata);
// FIXME: Find a way to propagate errors
VERIFY(!partition_or_error.is_error());
return partition_or_error.release_value();
}
DiskPartition::DiskPartition(BlockDevice& device, MinorNumber minor_number, Partition::DiskPartitionMetadata metadata)
: BlockDevice(100, minor_number, device.block_size())
, m_device(device)
, m_metadata(metadata)
{
}
DiskPartition::~DiskPartition() = default;
Partition::DiskPartitionMetadata const& DiskPartition::metadata() const
{
return m_metadata;
}
void DiskPartition::start_request(AsyncBlockDeviceRequest& request)
{
auto device = m_device.strong_ref();
if (!device)
request.complete(AsyncBlockDeviceRequest::RequestResult::Failure);
auto sub_request_or_error = device->try_make_request<AsyncBlockDeviceRequest>(request.request_type(),
request.block_index() + m_metadata.start_block(), request.block_count(), request.buffer(), request.buffer_size());
if (sub_request_or_error.is_error())
TODO();
request.add_sub_request(sub_request_or_error.release_value());
}
ErrorOr<size_t> DiskPartition::read(OpenFileDescription& fd, u64 offset, UserOrKernelBuffer& outbuf, size_t len)
{
u64 adjust = m_metadata.start_block() * block_size();
dbgln_if(OFFD_DEBUG, "DiskPartition::read offset={}, adjust={}, len={}", fd.offset(), adjust, len);
return m_device.strong_ref()->read(fd, offset + adjust, outbuf, len);
}
bool DiskPartition::can_read(OpenFileDescription const& fd, u64 offset) const
{
u64 adjust = m_metadata.start_block() * block_size();
dbgln_if(OFFD_DEBUG, "DiskPartition::can_read offset={}, adjust={}", offset, adjust);
return m_device.strong_ref()->can_read(fd, offset + adjust);
}
ErrorOr<size_t> DiskPartition::write(OpenFileDescription& fd, u64 offset, UserOrKernelBuffer const& inbuf, size_t len)
{
u64 adjust = m_metadata.start_block() * block_size();
dbgln_if(OFFD_DEBUG, "DiskPartition::write offset={}, adjust={}, len={}", offset, adjust, len);
return m_device.strong_ref()->write(fd, offset + adjust, inbuf, len);
}
bool DiskPartition::can_write(OpenFileDescription const& fd, u64 offset) const
{
u64 adjust = m_metadata.start_block() * block_size();
dbgln_if(OFFD_DEBUG, "DiskPartition::can_write offset={}, adjust={}", offset, adjust);
return m_device.strong_ref()->can_write(fd, offset + adjust);
}
StringView DiskPartition::class_name() const
{
return "DiskPartition"sv;
}
}

View file

@ -0,0 +1,41 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Devices/BlockDevice.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Library/LockWeakPtr.h>
#include <LibPartition/DiskPartitionMetadata.h>
namespace Kernel {
class DiskPartition final : public BlockDevice {
friend class DeviceManagement;
public:
static NonnullLockRefPtr<DiskPartition> create(BlockDevice&, MinorNumber, Partition::DiskPartitionMetadata);
virtual ~DiskPartition();
virtual void start_request(AsyncBlockDeviceRequest&) override;
// ^BlockDevice
virtual ErrorOr<size_t> read(OpenFileDescription&, u64, UserOrKernelBuffer&, size_t) override;
virtual bool can_read(OpenFileDescription const&, u64) const override;
virtual ErrorOr<size_t> write(OpenFileDescription&, u64, UserOrKernelBuffer const&, size_t) override;
virtual bool can_write(OpenFileDescription const&, u64) const override;
Partition::DiskPartitionMetadata const& metadata() const;
private:
DiskPartition(BlockDevice&, MinorNumber, Partition::DiskPartitionMetadata);
virtual StringView class_name() const override;
LockWeakPtr<BlockDevice> m_device;
Partition::DiskPartitionMetadata m_metadata;
};
}

View file

@ -0,0 +1,360 @@
/*
* Copyright (c) 2021, Pankaj R <pankydev8@gmail.com>
* Copyright (c) 2022, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Format.h>
#include <AK/Types.h>
#include <Kernel/Arch/Delay.h>
#include <Kernel/Arch/Interrupts.h>
#include <Kernel/Arch/SafeMem.h>
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/CommandLine.h>
#include <Kernel/Devices/Device.h>
#include <Kernel/Devices/Storage/NVMe/NVMeController.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Sections.h>
namespace Kernel {
UNMAP_AFTER_INIT ErrorOr<NonnullRefPtr<NVMeController>> NVMeController::try_initialize(Kernel::PCI::DeviceIdentifier const& device_identifier, bool is_queue_polled)
{
auto controller = TRY(adopt_nonnull_ref_or_enomem(new NVMeController(device_identifier, StorageManagement::generate_relative_nvme_controller_id({}))));
TRY(controller->initialize(is_queue_polled));
return controller;
}
UNMAP_AFTER_INIT NVMeController::NVMeController(const PCI::DeviceIdentifier& device_identifier, u32 hardware_relative_controller_id)
: PCI::Device(const_cast<PCI::DeviceIdentifier&>(device_identifier))
, StorageController(hardware_relative_controller_id)
{
}
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::initialize(bool is_queue_polled)
{
// Nr of queues = one queue per core
auto nr_of_queues = Processor::count();
auto queue_type = is_queue_polled ? QueueType::Polled : QueueType::IRQ;
PCI::enable_memory_space(device_identifier());
PCI::enable_bus_mastering(device_identifier());
m_bar = PCI::get_BAR0(device_identifier()) & PCI::bar_address_mask;
static_assert(sizeof(ControllerRegister) == REG_SQ0TDBL_START);
static_assert(sizeof(NVMeSubmission) == (1 << SQ_WIDTH));
// Map only until doorbell register for the controller
// Queues will individually map the doorbell register respectively
m_controller_regs = TRY(Memory::map_typed_writable<ControllerRegister volatile>(PhysicalAddress(m_bar)));
auto caps = m_controller_regs->cap;
m_ready_timeout = Duration::from_milliseconds((CAP_TO(caps) + 1) * 500); // CAP.TO is in 500ms units
calculate_doorbell_stride();
// IO queues + 1 admin queue
m_irq_type = TRY(reserve_irqs(nr_of_queues + 1, true));
TRY(create_admin_queue(queue_type));
VERIFY(m_admin_queue_ready == true);
VERIFY(IO_QUEUE_SIZE < MQES(caps));
dbgln_if(NVME_DEBUG, "NVMe: IO queue depth is: {}", IO_QUEUE_SIZE);
// Create an IO queue per core
for (u32 cpuid = 0; cpuid < nr_of_queues; ++cpuid) {
// qid is zero is used for admin queue
TRY(create_io_queue(cpuid + 1, queue_type));
}
TRY(identify_and_init_namespaces());
return {};
}
bool NVMeController::wait_for_ready(bool expected_ready_bit_value)
{
constexpr size_t one_ms_io_delay = 1000;
auto wait_iterations = m_ready_timeout.to_milliseconds();
u32 expected_rdy = expected_ready_bit_value ? 1 : 0;
while (((m_controller_regs->csts >> CSTS_RDY_BIT) & 0x1) != expected_rdy) {
microseconds_delay(one_ms_io_delay);
if (--wait_iterations == 0) {
if (((m_controller_regs->csts >> CSTS_RDY_BIT) & 0x1) != expected_rdy) {
dbgln_if(NVME_DEBUG, "NVMEController: CSTS.RDY still not set to {} after {} ms", expected_rdy, m_ready_timeout.to_milliseconds());
return false;
}
break;
}
}
return true;
}
ErrorOr<void> NVMeController::reset_controller()
{
if ((m_controller_regs->cc & (1 << CC_EN_BIT)) != 0) {
// If the EN bit is already set, we need to wait
// until the RDY bit is 1, otherwise the behavior is undefined
if (!wait_for_ready(true))
return Error::from_errno(ETIMEDOUT);
}
auto cc = m_controller_regs->cc;
cc = cc & ~(1 << CC_EN_BIT);
m_controller_regs->cc = cc;
full_memory_barrier();
// Wait until the RDY bit is cleared
if (!wait_for_ready(false))
return Error::from_errno(ETIMEDOUT);
return {};
}
ErrorOr<void> NVMeController::start_controller()
{
if (!(m_controller_regs->cc & (1 << CC_EN_BIT))) {
// If the EN bit is not already set, we need to wait
// until the RDY bit is 0, otherwise the behavior is undefined
if (!wait_for_ready(false))
return Error::from_errno(ETIMEDOUT);
}
auto cc = m_controller_regs->cc;
cc = cc | (1 << CC_EN_BIT);
cc = cc | (CQ_WIDTH << CC_IOCQES_BIT);
cc = cc | (SQ_WIDTH << CC_IOSQES_BIT);
m_controller_regs->cc = cc;
full_memory_barrier();
// Wait until the RDY bit is set
if (!wait_for_ready(true))
return Error::from_errno(ETIMEDOUT);
return {};
}
UNMAP_AFTER_INIT u32 NVMeController::get_admin_q_dept()
{
u32 aqa = m_controller_regs->aqa;
// Queue depth is 0 based
u32 q_depth = min(ACQ_SIZE(aqa), ASQ_SIZE(aqa)) + 1;
dbgln_if(NVME_DEBUG, "NVMe: Admin queue depth is {}", q_depth);
return q_depth;
}
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::identify_and_init_namespaces()
{
RefPtr<Memory::PhysicalPage> prp_dma_buffer;
OwnPtr<Memory::Region> prp_dma_region;
auto namespace_data_struct = TRY(ByteBuffer::create_zeroed(NVMe_IDENTIFY_SIZE));
u32 active_namespace_list[NVMe_IDENTIFY_SIZE / sizeof(u32)];
{
auto buffer = TRY(MM.allocate_dma_buffer_page("Identify PRP"sv, Memory::Region::Access::ReadWrite, prp_dma_buffer));
prp_dma_region = move(buffer);
}
// Get the active namespace
{
NVMeSubmission sub {};
u16 status = 0;
sub.op = OP_ADMIN_IDENTIFY;
sub.identify.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(prp_dma_buffer->paddr().as_ptr()));
sub.identify.cns = NVMe_CNS_ID_ACTIVE_NS & 0xff;
status = submit_admin_command(sub, true);
if (status) {
dmesgln_pci(*this, "Failed to identify active namespace command");
return EFAULT;
}
if (void* fault_at; !safe_memcpy(active_namespace_list, prp_dma_region->vaddr().as_ptr(), NVMe_IDENTIFY_SIZE, fault_at)) {
return EFAULT;
}
}
// Get the NAMESPACE attributes
{
NVMeSubmission sub {};
IdentifyNamespace id_ns {};
u16 status = 0;
for (auto nsid : active_namespace_list) {
memset(prp_dma_region->vaddr().as_ptr(), 0, NVMe_IDENTIFY_SIZE);
// Invalid NS
if (nsid == 0)
break;
sub.op = OP_ADMIN_IDENTIFY;
sub.identify.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(prp_dma_buffer->paddr().as_ptr()));
sub.identify.cns = NVMe_CNS_ID_NS & 0xff;
sub.identify.nsid = nsid;
status = submit_admin_command(sub, true);
if (status) {
dmesgln_pci(*this, "Failed identify namespace with nsid {}", nsid);
return EFAULT;
}
static_assert(sizeof(IdentifyNamespace) == NVMe_IDENTIFY_SIZE);
if (void* fault_at; !safe_memcpy(&id_ns, prp_dma_region->vaddr().as_ptr(), NVMe_IDENTIFY_SIZE, fault_at)) {
return EFAULT;
}
auto val = get_ns_features(id_ns);
auto block_counts = val.get<0>();
auto block_size = 1 << val.get<1>();
dbgln_if(NVME_DEBUG, "NVMe: Block count is {} and Block size is {}", block_counts, block_size);
m_namespaces.append(TRY(NVMeNameSpace::try_create(*this, m_queues, nsid, block_counts, block_size)));
m_device_count++;
dbgln_if(NVME_DEBUG, "NVMe: Initialized namespace with NSID: {}", nsid);
}
}
return {};
}
UNMAP_AFTER_INIT Tuple<u64, u8> NVMeController::get_ns_features(IdentifyNamespace& identify_data_struct)
{
auto flbas = identify_data_struct.flbas & FLBA_SIZE_MASK;
auto namespace_size = identify_data_struct.nsze;
auto lba_format = identify_data_struct.lbaf[flbas];
auto lba_size = (lba_format & LBA_SIZE_MASK) >> 16;
return Tuple<u64, u8>(namespace_size, lba_size);
}
LockRefPtr<StorageDevice> NVMeController::device(u32 index) const
{
return m_namespaces.at(index);
}
size_t NVMeController::devices_count() const
{
return m_device_count;
}
ErrorOr<void> NVMeController::reset()
{
TRY(reset_controller());
TRY(start_controller());
return {};
}
ErrorOr<void> NVMeController::shutdown()
{
return Error::from_errno(ENOTIMPL);
}
void NVMeController::complete_current_request([[maybe_unused]] AsyncDeviceRequest::RequestResult result)
{
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(QueueType queue_type)
{
auto qdepth = get_admin_q_dept();
OwnPtr<Memory::Region> cq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
OwnPtr<Memory::Region> sq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_pages;
auto cq_size = round_up_to_power_of_two(CQ_SIZE(qdepth), 4096);
auto sq_size = round_up_to_power_of_two(SQ_SIZE(qdepth), 4096);
auto maybe_error = reset_controller();
if (maybe_error.is_error()) {
dmesgln_pci(*this, "Failed to reset the NVMe controller");
return maybe_error;
}
{
auto buffer = TRY(MM.allocate_dma_buffer_pages(cq_size, "Admin CQ queue"sv, Memory::Region::Access::ReadWrite, cq_dma_pages));
cq_dma_region = move(buffer);
}
// Phase bit is important to determine completion, so zero out the space
// so that we don't get any garbage phase bit value
memset(cq_dma_region->vaddr().as_ptr(), 0, cq_size);
{
auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "Admin SQ queue"sv, Memory::Region::Access::ReadWrite, sq_dma_pages));
sq_dma_region = move(buffer);
}
auto doorbell_regs = TRY(Memory::map_typed_writable<DoorbellRegister volatile>(PhysicalAddress(m_bar + REG_SQ0TDBL_START)));
m_controller_regs->acq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first()->paddr().as_ptr()));
m_controller_regs->asq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first()->paddr().as_ptr()));
auto irq = TRY(allocate_irq(0)); // Admin queue always uses the 0th index when using MSIx
maybe_error = start_controller();
if (maybe_error.is_error()) {
dmesgln_pci(*this, "Failed to restart the NVMe controller");
return maybe_error;
}
set_admin_queue_ready_flag();
m_admin_queue = TRY(NVMeQueue::try_create(*this, 0, irq, qdepth, move(cq_dma_region), move(sq_dma_region), move(doorbell_regs), queue_type));
dbgln_if(NVME_DEBUG, "NVMe: Admin queue created");
return {};
}
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, QueueType queue_type)
{
OwnPtr<Memory::Region> cq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
OwnPtr<Memory::Region> sq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_pages;
auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096);
auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096);
{
auto buffer = TRY(MM.allocate_dma_buffer_pages(cq_size, "IO CQ queue"sv, Memory::Region::Access::ReadWrite, cq_dma_pages));
cq_dma_region = move(buffer);
}
// Phase bit is important to determine completion, so zero out the space
// so that we don't get any garbage phase bit value
memset(cq_dma_region->vaddr().as_ptr(), 0, cq_size);
{
auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "IO SQ queue"sv, Memory::Region::Access::ReadWrite, sq_dma_pages));
sq_dma_region = move(buffer);
}
{
NVMeSubmission sub {};
sub.op = OP_ADMIN_CREATE_COMPLETION_QUEUE;
sub.create_cq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first()->paddr().as_ptr()));
sub.create_cq.cqid = qid;
// The queue size is 0 based
sub.create_cq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
auto flags = (queue_type == QueueType::IRQ) ? QUEUE_IRQ_ENABLED : QUEUE_IRQ_DISABLED;
flags |= QUEUE_PHY_CONTIGUOUS;
// When using MSIx interrupts, qid is used as an index into the interrupt table
sub.create_cq.irq_vector = (m_irq_type == PCI::InterruptType::PIN) ? 0 : qid;
sub.create_cq.cq_flags = AK::convert_between_host_and_little_endian(flags & 0xFFFF);
submit_admin_command(sub, true);
}
{
NVMeSubmission sub {};
sub.op = OP_ADMIN_CREATE_SUBMISSION_QUEUE;
sub.create_sq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first()->paddr().as_ptr()));
sub.create_sq.sqid = qid;
// The queue size is 0 based
sub.create_sq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
auto flags = QUEUE_PHY_CONTIGUOUS;
sub.create_sq.cqid = qid;
sub.create_sq.sq_flags = AK::convert_between_host_and_little_endian(flags);
submit_admin_command(sub, true);
}
auto queue_doorbell_offset = REG_SQ0TDBL_START + ((2 * qid) * (4 << m_dbl_stride));
auto doorbell_regs = TRY(Memory::map_typed_writable<DoorbellRegister volatile>(PhysicalAddress(m_bar + queue_doorbell_offset)));
auto irq = TRY(allocate_irq(qid));
m_queues.append(TRY(NVMeQueue::try_create(*this, qid, irq, IO_QUEUE_SIZE, move(cq_dma_region), move(sq_dma_region), move(doorbell_regs), queue_type)));
dbgln_if(NVME_DEBUG, "NVMe: Created IO Queue with QID{}", m_queues.size());
return {};
}
}

View file

@ -0,0 +1,84 @@
/*
* Copyright (c) 2021, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/Time.h>
#include <AK/Tuple.h>
#include <AK/Types.h>
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Devices/Storage/NVMe/NVMeDefinitions.h>
#include <Kernel/Devices/Storage/NVMe/NVMeNameSpace.h>
#include <Kernel/Devices/Storage/NVMe/NVMeQueue.h>
#include <Kernel/Devices/Storage/StorageController.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/TypedMapping.h>
namespace Kernel {
class NVMeController : public PCI::Device
, public StorageController {
public:
static ErrorOr<NonnullRefPtr<NVMeController>> try_initialize(PCI::DeviceIdentifier const&, bool is_queue_polled);
ErrorOr<void> initialize(bool is_queue_polled);
LockRefPtr<StorageDevice> device(u32 index) const override;
size_t devices_count() const override;
virtual StringView device_name() const override { return "NVMeController"sv; }
protected:
ErrorOr<void> reset() override;
ErrorOr<void> shutdown() override;
void complete_current_request(AsyncDeviceRequest::RequestResult result) override;
public:
ErrorOr<void> reset_controller();
ErrorOr<void> start_controller();
u32 get_admin_q_dept();
u16 submit_admin_command(NVMeSubmission& sub, bool sync = false)
{
// First queue is always the admin queue
if (sync) {
return m_admin_queue->submit_sync_sqe(sub);
}
m_admin_queue->submit_sqe(sub);
return 0;
}
bool is_admin_queue_ready() { return m_admin_queue_ready; };
void set_admin_queue_ready_flag() { m_admin_queue_ready = true; };
private:
NVMeController(PCI::DeviceIdentifier const&, u32 hardware_relative_controller_id);
ErrorOr<void> identify_and_init_namespaces();
Tuple<u64, u8> get_ns_features(IdentifyNamespace& identify_data_struct);
ErrorOr<void> create_admin_queue(QueueType queue_type);
ErrorOr<void> create_io_queue(u8 qid, QueueType queue_type);
void calculate_doorbell_stride()
{
m_dbl_stride = (m_controller_regs->cap >> CAP_DBL_SHIFT) & CAP_DBL_MASK;
}
bool wait_for_ready(bool);
private:
LockRefPtr<NVMeQueue> m_admin_queue;
Vector<NonnullLockRefPtr<NVMeQueue>> m_queues;
Vector<NonnullLockRefPtr<NVMeNameSpace>> m_namespaces;
Memory::TypedMapping<ControllerRegister volatile> m_controller_regs;
bool m_admin_queue_ready { false };
size_t m_device_count { 0 };
AK::Duration m_ready_timeout;
u32 m_bar { 0 };
u8 m_dbl_stride { 0 };
PCI::InterruptType m_irq_type;
QueueType m_queue_type { QueueType::IRQ };
static Atomic<u8> s_controller_id;
};
}

View file

@ -0,0 +1,216 @@
/*
* Copyright (c) 2021, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Endian.h>
#include <AK/Types.h>
struct ControllerRegister {
u64 cap;
u32 vs;
u32 intms;
u32 intmc;
u32 cc;
u32 rsvd1;
u32 csts;
u32 nssr;
u32 aqa;
u64 asq;
u64 acq;
u64 rsvd2[505];
};
struct IdentifyNamespace {
u64 nsze;
u64 ncap;
u8 rsdv1[10];
u8 flbas;
u8 rsvd2[100];
u32 lbaf[16];
u64 rsvd3[488];
};
// DOORBELL
static constexpr u32 REG_SQ0TDBL_START = 0x1000;
static constexpr u32 REG_SQ0TDBL_END = 0x1003;
static constexpr u8 DBL_REG_SIZE = 8;
// CAP
static constexpr u8 CAP_DBL_SHIFT = 32;
static constexpr u8 CAP_DBL_MASK = 0xf;
static constexpr u8 CAP_TO_SHIFT = 24;
static constexpr u64 CAP_TO_MASK = 0xff << CAP_TO_SHIFT;
static constexpr u16 MQES(u64 cap)
{
return (cap & 0xffff) + 1;
}
static constexpr u32 CAP_TO(u64 cap)
{
return (cap & CAP_TO_MASK) >> CAP_TO_SHIFT;
}
// CC Controller Configuration
static constexpr u8 CC_EN_BIT = 0x0;
static constexpr u8 CSTS_RDY_BIT = 0x0;
static constexpr u8 CSTS_SHST_SHIFT = 2;
static constexpr u32 CSTS_SHST_MASK = 0x3 << CSTS_SHST_SHIFT;
static constexpr u8 CC_IOSQES_BIT = 16;
static constexpr u8 CC_IOCQES_BIT = 20;
static constexpr u32 CSTS_SHST(u32 x)
{
return (x & CSTS_SHST_MASK) >> CSTS_SHST_SHIFT;
}
static constexpr u16 CC_AQA_MASK = (0xfff);
static constexpr u16 ACQ_SIZE(u32 x)
{
return (x >> 16) & CC_AQA_MASK;
}
static constexpr u16 ASQ_SIZE(u32 x)
{
return x & CC_AQA_MASK;
}
static constexpr u8 CQ_WIDTH = 4; // CQ is 16 bytes(2^4) in size.
static constexpr u8 SQ_WIDTH = 6; // SQ size is 64 bytes(2^6) in size.
static constexpr u16 CQ_SIZE(u16 q_depth)
{
return q_depth << CQ_WIDTH;
}
static constexpr u16 SQ_SIZE(u16 q_depth)
{
return q_depth << SQ_WIDTH;
}
static constexpr u8 PHASE_TAG(u16 x)
{
return x & 0x1;
}
static constexpr u16 CQ_STATUS_FIELD_MASK = 0xfffe;
static constexpr u16 CQ_STATUS_FIELD(u16 x)
{
return (x & CQ_STATUS_FIELD_MASK) >> 1;
}
static constexpr u16 IO_QUEUE_SIZE = 64; // TODO:Need to be configurable
// IDENTIFY
static constexpr u16 NVMe_IDENTIFY_SIZE = 4096;
static constexpr u8 NVMe_CNS_ID_ACTIVE_NS = 0x2;
static constexpr u8 NVMe_CNS_ID_NS = 0x0;
static constexpr u8 FLBA_SIZE_INDEX = 26;
static constexpr u8 FLBA_SIZE_MASK = 0xf;
static constexpr u8 LBA_FORMAT_SUPPORT_INDEX = 128;
static constexpr u32 LBA_SIZE_MASK = 0x00ff0000;
// OPCODES
// ADMIN COMMAND SET
enum AdminCommandOpCode {
OP_ADMIN_CREATE_COMPLETION_QUEUE = 0x5,
OP_ADMIN_CREATE_SUBMISSION_QUEUE = 0x1,
OP_ADMIN_IDENTIFY = 0x6,
};
// IO opcodes
enum IOCommandOpcode {
OP_NVME_WRITE = 0x1,
OP_NVME_READ = 0x2
};
// FLAGS
static constexpr u8 QUEUE_PHY_CONTIGUOUS = (1 << 0);
static constexpr u8 QUEUE_IRQ_ENABLED = (1 << 1);
static constexpr u8 QUEUE_IRQ_DISABLED = (0 << 1);
struct [[gnu::packed]] NVMeCompletion {
LittleEndian<u32> cmd_spec;
LittleEndian<u32> res;
LittleEndian<u16> sq_head; /* how much of this queue may be reclaimed */
LittleEndian<u16> sq_id; /* submission queue that generated this entry */
u16 command_id; /* of the command which completed */
LittleEndian<u16> status; /* did the command fail, and if so, why? */
};
struct [[gnu::packed]] DataPtr {
LittleEndian<u64> prp1;
LittleEndian<u64> prp2;
};
struct [[gnu::packed]] NVMeGenericCmd {
LittleEndian<u32> nsid;
LittleEndian<u64> rsvd;
LittleEndian<u64> metadata;
struct DataPtr data_ptr;
LittleEndian<u32> cdw10;
LittleEndian<u32> cdw11;
LittleEndian<u32> cdw12;
LittleEndian<u32> cdw13;
LittleEndian<u32> cdw14;
LittleEndian<u32> cdw15;
};
struct [[gnu::packed]] NVMeRWCmd {
LittleEndian<u32> nsid;
LittleEndian<u64> rsvd;
LittleEndian<u64> metadata;
struct DataPtr data_ptr;
LittleEndian<u64> slba;
LittleEndian<u16> length;
LittleEndian<u16> control;
LittleEndian<u32> dsmgmt;
LittleEndian<u32> reftag;
LittleEndian<u16> apptag;
LittleEndian<u16> appmask;
};
struct [[gnu::packed]] NVMeIdentifyCmd {
LittleEndian<u32> nsid;
LittleEndian<u64> rsvd1[2];
struct DataPtr data_ptr;
u8 cns;
u8 rsvd2;
LittleEndian<u16> ctrlid;
u8 rsvd3[3];
u8 csi;
u64 rsvd4[2];
};
struct [[gnu::packed]] NVMeCreateCQCmd {
u32 rsvd1[5];
LittleEndian<u64> prp1;
u64 rsvd2;
LittleEndian<u16> cqid;
LittleEndian<u16> qsize;
LittleEndian<u16> cq_flags;
LittleEndian<u16> irq_vector;
u64 rsvd12[2];
};
struct [[gnu::packed]] NVMeCreateSQCmd {
u32 rsvd1[5];
LittleEndian<u64> prp1;
u64 rsvd2;
LittleEndian<u16> sqid;
LittleEndian<u16> qsize;
LittleEndian<u16> sq_flags;
LittleEndian<u16> cqid;
u64 rsvd12[2];
};
struct [[gnu::packed]] NVMeSubmission {
u8 op;
u8 flags;
LittleEndian<u16> cmdid;
union [[gnu::packed]] {
NVMeGenericCmd generic;
NVMeIdentifyCmd identify;
NVMeRWCmd rw;
NVMeCreateCQCmd create_cq;
NVMeCreateSQCmd create_sq;
};
};

View file

@ -0,0 +1,93 @@
/*
* Copyright (c) 2022, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Devices/BlockDevice.h>
#include <Kernel/Devices/Storage/NVMe/NVMeDefinitions.h>
#include <Kernel/Devices/Storage/NVMe/NVMeInterruptQueue.h>
#include <Kernel/WorkQueue.h>
namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeInterruptQueue>> NVMeInterruptQueue::try_create(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
{
auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMeInterruptQueue(device, move(rw_dma_region), rw_dma_page, qid, irq, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))));
queue->initialize_interrupt_queue();
return queue;
}
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))
, PCIIRQHandler(device, irq)
{
}
void NVMeInterruptQueue::initialize_interrupt_queue()
{
enable_irq();
}
bool NVMeInterruptQueue::handle_irq(RegisterState const&)
{
SpinlockLocker lock(m_request_lock);
return process_cq() ? true : false;
}
void NVMeInterruptQueue::submit_sqe(NVMeSubmission& sub)
{
NVMeQueue::submit_sqe(sub);
}
void NVMeInterruptQueue::complete_current_request(u16 cmdid, u16 status)
{
auto work_item_creation_result = g_io_work->try_queue([this, cmdid, status]() {
SpinlockLocker lock(m_request_lock);
auto& request_pdu = m_requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
ScopeGuard guard = [req_result, status, &request_pdu, &lock] {
// FIXME: We should unlock at the end of this function to make sure no new requests is inserted
// before we complete the request and calling end_io_handler but that results in a deadlock
// For now this is avoided by asserting the `used` field while inserting.
lock.unlock();
if (request_pdu.request)
request_pdu.request->complete(req_result);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.used = false;
};
// There can be submission without any request associated with it such as with
// admin queue commands during init. If there is no request, we are done
if (!current_request)
return;
if (status) {
req_result = AsyncBlockDeviceRequest::Failure;
return;
}
if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
req_result = AsyncBlockDeviceRequest::MemoryFault;
return;
}
}
return;
});
if (work_item_creation_result.is_error()) {
SpinlockLocker lock(m_request_lock);
auto& request_pdu = m_requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
current_request->complete(AsyncDeviceRequest::OutOfMemory);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.used = false;
}
}
}

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2022, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Devices/Storage/NVMe/NVMeQueue.h>
#include <Kernel/Interrupts/PCIIRQHandler.h>
namespace Kernel {
class NVMeInterruptQueue : public NVMeQueue
, public PCIIRQHandler {
public:
static ErrorOr<NonnullLockRefPtr<NVMeInterruptQueue>> try_create(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
void submit_sqe(NVMeSubmission& submission) override;
virtual ~NVMeInterruptQueue() override {};
virtual StringView purpose() const override { return "NVMe"sv; };
void initialize_interrupt_queue();
protected:
NVMeInterruptQueue(PCI::Device& device, NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
private:
virtual void complete_current_request(u16 cmdid, u16 status) override;
bool handle_irq(RegisterState const&) override;
};
}

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2021, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/NonnullOwnPtr.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/Storage/NVMe/NVMeController.h>
#include <Kernel/Devices/Storage/NVMe/NVMeNameSpace.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
namespace Kernel {
UNMAP_AFTER_INIT ErrorOr<NonnullLockRefPtr<NVMeNameSpace>> NVMeNameSpace::try_create(NVMeController const& controller, Vector<NonnullLockRefPtr<NVMeQueue>> queues, u16 nsid, size_t storage_size, size_t lba_size)
{
auto device = TRY(DeviceManagement::try_create_device<NVMeNameSpace>(StorageDevice::LUNAddress { controller.controller_id(), nsid, 0 }, controller.hardware_relative_controller_id(), move(queues), storage_size, lba_size, nsid));
return device;
}
UNMAP_AFTER_INIT NVMeNameSpace::NVMeNameSpace(LUNAddress logical_unit_number_address, u32 hardware_relative_controller_id, Vector<NonnullLockRefPtr<NVMeQueue>> queues, size_t max_addresable_block, size_t lba_size, u16 nsid)
: StorageDevice(logical_unit_number_address, hardware_relative_controller_id, lba_size, max_addresable_block)
, m_nsid(nsid)
, m_queues(move(queues))
{
}
void NVMeNameSpace::start_request(AsyncBlockDeviceRequest& request)
{
auto index = Processor::current_id();
auto& queue = m_queues.at(index);
// TODO: For now we support only IO transfers of size PAGE_SIZE (Going along with the current constraint in the block layer)
// Eventually remove this constraint by using the PRP2 field in the submission struct and remove block layer constraint for NVMe driver.
VERIFY(request.block_count() <= (PAGE_SIZE / block_size()));
if (request.request_type() == AsyncBlockDeviceRequest::Read) {
queue->read(request, m_nsid, request.block_index(), request.block_count());
} else {
queue->write(request, m_nsid, request.block_index(), request.block_count());
}
}
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2021, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <AK/kmalloc.h>
#include <Kernel/Devices/Storage/NVMe/NVMeDefinitions.h>
#include <Kernel/Devices/Storage/NVMe/NVMeQueue.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <Kernel/Locking/Spinlock.h>
namespace Kernel {
class NVMeController;
class NVMeNameSpace : public StorageDevice {
friend class DeviceManagement;
public:
static ErrorOr<NonnullLockRefPtr<NVMeNameSpace>> try_create(NVMeController const&, Vector<NonnullLockRefPtr<NVMeQueue>> queues, u16 nsid, size_t storage_size, size_t lba_size);
CommandSet command_set() const override { return CommandSet::NVMe; };
void start_request(AsyncBlockDeviceRequest& request) override;
private:
NVMeNameSpace(LUNAddress, u32 hardware_relative_controller_id, Vector<NonnullLockRefPtr<NVMeQueue>> queues, size_t storage_size, size_t lba_size, u16 nsid);
u16 m_nsid;
Vector<NonnullLockRefPtr<NVMeQueue>> m_queues;
};
}

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2022, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Delay.h>
#include <Kernel/Devices/BlockDevice.h>
#include <Kernel/Devices/Storage/NVMe/NVMeDefinitions.h>
#include <Kernel/Devices/Storage/NVMe/NVMePollQueue.h>
namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMePollQueue>> NVMePollQueue::try_create(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
{
return TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))));
}
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs))
{
}
void NVMePollQueue::submit_sqe(NVMeSubmission& sub)
{
NVMeQueue::submit_sqe(sub);
SpinlockLocker lock_cq(m_cq_lock);
while (!process_cq()) {
microseconds_delay(1);
}
}
void NVMePollQueue::complete_current_request(u16 cmdid, u16 status)
{
SpinlockLocker lock(m_request_lock);
auto& request_pdu = m_requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
ScopeGuard guard = [req_result, status, &request_pdu] {
if (request_pdu.request)
request_pdu.request->complete(req_result);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.used = false;
};
// There can be submission without any request associated with it such as with
// admin queue commands during init. If there is no request, we are done
if (!current_request)
return;
if (status) {
req_result = AsyncBlockDeviceRequest::Failure;
return;
}
if (current_request->request_type() == AsyncBlockDeviceRequest::RequestType::Read) {
if (auto result = current_request->write_to_buffer(current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), current_request->buffer_size()); result.is_error()) {
req_result = AsyncBlockDeviceRequest::MemoryFault;
return;
}
}
return;
}
}

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2022, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Devices/Storage/NVMe/NVMeQueue.h>
namespace Kernel {
class NVMePollQueue : public NVMeQueue {
public:
static ErrorOr<NonnullLockRefPtr<NVMePollQueue>> try_create(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
void submit_sqe(NVMeSubmission& submission) override;
virtual ~NVMePollQueue() override {};
protected:
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, NonnullRefPtr<Memory::PhysicalPage> rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
private:
virtual void complete_current_request(u16 cmdid, u16 status) override;
};
}

View file

@ -0,0 +1,181 @@
/*
* Copyright (c) 2021, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Delay.h>
#include <Kernel/Devices/Storage/NVMe/NVMeController.h>
#include <Kernel/Devices/Storage/NVMe/NVMeInterruptQueue.h>
#include <Kernel/Devices/Storage/NVMe/NVMePollQueue.h>
#include <Kernel/Devices/Storage/NVMe/NVMeQueue.h>
#include <Kernel/StdLib.h>
namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(NVMeController& device, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs, QueueType queue_type)
{
// Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
RefPtr<Memory::PhysicalPage> rw_dma_page;
auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page));
if (rw_dma_page.is_null())
return ENOMEM;
if (queue_type == QueueType::Polled) {
auto queue = NVMePollQueue::try_create(move(rw_dma_region), rw_dma_page.release_nonnull(), qid, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs));
return queue;
}
auto queue = NVMeInterruptQueue::try_create(device, move(rw_dma_region), rw_dma_page.release_nonnull(), qid, irq, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs));
return queue;
}
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: m_rw_dma_region(move(rw_dma_region))
, m_qid(qid)
, m_admin_queue(qid == 0)
, m_qdepth(q_depth)
, m_cq_dma_region(move(cq_dma_region))
, m_sq_dma_region(move(sq_dma_region))
, m_db_regs(move(db_regs))
, m_rw_dma_page(rw_dma_page)
{
m_requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
m_sqe_array = { reinterpret_cast<NVMeSubmission*>(m_sq_dma_region->vaddr().as_ptr()), m_qdepth };
m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
}
bool NVMeQueue::cqe_available()
{
return PHASE_TAG(m_cqe_array[m_cq_head].status) == m_cq_valid_phase;
}
void NVMeQueue::update_cqe_head()
{
// To prevent overflow, use a temp variable
u32 temp_cq_head = m_cq_head + 1;
if (temp_cq_head == m_qdepth) {
m_cq_head = 0;
m_cq_valid_phase ^= 1;
} else {
m_cq_head = temp_cq_head;
}
}
u32 NVMeQueue::process_cq()
{
u32 nr_of_processed_cqes = 0;
while (cqe_available()) {
u16 status;
u16 cmdid;
++nr_of_processed_cqes;
status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status);
cmdid = m_cqe_array[m_cq_head].command_id;
dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head);
if (!m_requests.contains(cmdid)) {
dmesgln("Bogus cmd id: {}", cmdid);
VERIFY_NOT_REACHED();
}
complete_current_request(cmdid, status);
update_cqe_head();
}
if (nr_of_processed_cqes) {
update_cq_doorbell();
}
return nr_of_processed_cqes;
}
void NVMeQueue::submit_sqe(NVMeSubmission& sub)
{
SpinlockLocker lock(m_sq_lock);
memcpy(&m_sqe_array[m_sq_tail], &sub, sizeof(NVMeSubmission));
{
u32 temp_sq_tail = m_sq_tail + 1;
if (temp_sq_tail == m_qdepth)
m_sq_tail = 0;
else
m_sq_tail = temp_sq_tail;
}
dbgln_if(NVME_DEBUG, "NVMe: Submission with command identifier {}. SQ_TAIL: {}", sub.cmdid, m_sq_tail);
full_memory_barrier();
update_sq_doorbell();
}
u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
{
// For now let's use sq tail as a unique command id.
u16 cmd_status;
u16 cid = get_request_cid();
sub.cmdid = cid;
{
SpinlockLocker req_lock(m_request_lock);
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
VERIFY_NOT_REACHED();
m_requests.set(sub.cmdid, { nullptr, true, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
}
submit_sqe(sub);
// FIXME: Only sync submissions (usually used for admin commands) use a WaitQueue based IO. Eventually we need to
// move this logic into the block layer instead of sprinkling them in the driver code.
m_sync_wait_queue.wait_forever("NVMe sync submit"sv);
return cmd_status;
}
void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count)
{
NVMeSubmission sub {};
sub.op = OP_NVME_READ;
sub.rw.nsid = nsid;
sub.rw.slba = AK::convert_between_host_and_little_endian(index);
// No. of lbas is 0 based
sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
sub.cmdid = get_request_cid();
{
SpinlockLocker req_lock(m_request_lock);
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
VERIFY_NOT_REACHED();
m_requests.set(sub.cmdid, { request, true, nullptr });
}
full_memory_barrier();
submit_sqe(sub);
}
void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count)
{
NVMeSubmission sub {};
sub.op = OP_NVME_WRITE;
sub.rw.nsid = nsid;
sub.rw.slba = AK::convert_between_host_and_little_endian(index);
// No. of lbas is 0 based
sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
sub.cmdid = get_request_cid();
{
SpinlockLocker req_lock(m_request_lock);
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
VERIFY_NOT_REACHED();
m_requests.set(sub.cmdid, { request, true, nullptr });
}
if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
complete_current_request(sub.cmdid, AsyncDeviceRequest::MemoryFault);
return;
}
full_memory_barrier();
submit_sqe(sub);
}
UNMAP_AFTER_INIT NVMeQueue::~NVMeQueue() = default;
}

View file

@ -0,0 +1,106 @@
/*
* Copyright (c) 2021, Pankaj R <pankydev8@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/AtomicRefCounted.h>
#include <AK/HashMap.h>
#include <AK/OwnPtr.h>
#include <AK/Types.h>
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Devices/Storage/NVMe/NVMeDefinitions.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/TypedMapping.h>
namespace Kernel {
struct DoorbellRegister {
u32 sq_tail;
u32 cq_head;
};
enum class QueueType {
Polled,
IRQ
};
class AsyncBlockDeviceRequest;
struct NVMeIO {
RefPtr<AsyncBlockDeviceRequest> request;
bool used = false;
Function<void(u16 status)> end_io_handler;
};
class NVMeController;
class NVMeQueue : public AtomicRefCounted<NVMeQueue> {
public:
static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(NVMeController& device, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs, QueueType queue_type);
bool is_admin_queue() { return m_admin_queue; };
u16 submit_sync_sqe(NVMeSubmission&);
void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
void write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
virtual void submit_sqe(NVMeSubmission&);
virtual ~NVMeQueue();
protected:
u32 process_cq();
void update_sq_doorbell()
{
m_db_regs->sq_tail = m_sq_tail;
}
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
[[nodiscard]] u32 get_request_cid()
{
u32 expected_tag = m_tag.load(AK::memory_order_acquire);
for (;;) {
u32 cid = expected_tag + 1;
if (cid == m_qdepth)
cid = 0;
if (m_tag.compare_exchange_strong(expected_tag, cid, AK::memory_order_acquire))
return cid;
}
}
private:
bool cqe_available();
void update_cqe_head();
virtual void complete_current_request(u16 cmdid, u16 status) = 0;
void update_cq_doorbell()
{
m_db_regs->cq_head = m_cq_head;
}
protected:
Spinlock<LockRank::Interrupts> m_cq_lock {};
HashMap<u16, NVMeIO> m_requests;
NonnullOwnPtr<Memory::Region> m_rw_dma_region;
Spinlock<LockRank::None> m_request_lock {};
private:
u16 m_qid {};
u8 m_cq_valid_phase { 1 };
u16 m_sq_tail {};
u16 m_cq_head {};
bool m_admin_queue { false };
u32 m_qdepth {};
Atomic<u32> m_tag { 0 }; // used for the cid in a submission queue entry
Spinlock<LockRank::Interrupts> m_sq_lock {};
OwnPtr<Memory::Region> m_cq_dma_region;
Span<NVMeSubmission> m_sqe_array;
OwnPtr<Memory::Region> m_sq_dma_region;
Span<NVMeCompletion> m_cqe_array;
WaitQueue m_sync_wait_queue;
Memory::TypedMapping<DoorbellRegister volatile> m_db_regs;
NonnullRefPtr<Memory::PhysicalPage const> const m_rw_dma_page;
};
}

View file

@ -0,0 +1,404 @@
/*
* Copyright (c) 2023, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
namespace Kernel::SD {
// Relevant Specifications:
// * (SDHC): SD Host Controller Simplified Specification (https://www.sdcard.org/downloads/pls/)
// * (PLSS) Physical Layer Simplified Specification (https://www.sdcard.org/downloads/pls/)
// PLSS 4.7.4: "Detailed Command Description"
enum class CommandIndex : u8 {
GoIdleState = 0,
AllSendCid = 2,
SendRelativeAddr = 3,
AppSetBusWidth = 6,
SelectCard = 7,
SendIfCond = 8,
SendCsd = 9,
GoInactiveState = 15,
SetBlockLen = 16,
ReadSingleBlock = 17,
ReadMultipleBlock = 18,
WriteSingleBlock = 24,
WriteMultipleBlock = 25,
AppSendOpCond = 41,
AppSendScr = 51,
AppCmd = 55,
};
enum class CommandType : u8 {
Normal,
Suspend,
Resume,
Abort
};
enum class ResponseType : u8 {
NoResponse,
ResponseOf136Bits,
ResponseOf48Bits,
ResponseOf48BitsWithBusy
};
enum class DataTransferDirection : u8 {
HostToCard,
CardToHost
};
enum class SendAutoCommand : u8 {
Disabled,
Command12,
Command23
};
// SDHC 2.2.5 & 2.2.6: "Transfer Mode Register" & "Command Register"
union Command {
u32 raw;
struct {
u32 dma_enable : 1;
u32 block_counter : 1;
SendAutoCommand auto_command : 2;
DataTransferDirection direction : 1;
u32 multiblock : 1;
u32 response_type_r1r5 : 1; // v4.10
u32 response_error_check : 1; // v4.10
u32 response_interrupt_disable : 1; // v4.10
u32 reserved1 : 7;
ResponseType response_type : 2;
u32 sub_command_flag : 1; // v4.10
u32 crc_enable : 1;
u32 idx_enable : 1;
u32 is_data : 1;
CommandType type : 2;
CommandIndex index : 6;
u32 reserved3 : 2;
};
bool requires_dat_line() const
{
return is_data;
}
bool uses_transfer_complete_interrupt() const
{
// FIXME: I don't know how to determine this.
return false;
}
};
static_assert(AssertSize<Command, 4>());
namespace Commands {
constexpr Command go_idle_state = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::NoResponse,
.sub_command_flag = 0,
.crc_enable = 0,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::GoIdleState,
.reserved3 = 0
};
constexpr Command all_send_cid = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf136Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::AllSendCid,
.reserved3 = 0
};
constexpr Command send_relative_addr = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::SendRelativeAddr,
.reserved3 = 0
};
constexpr Command app_set_bus_width = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::AppSetBusWidth,
.reserved3 = 0
};
constexpr Command select_card = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48BitsWithBusy,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::SelectCard,
.reserved3 = 0
};
constexpr Command send_if_cond = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::SendIfCond,
.reserved3 = 0
};
constexpr Command send_csd = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf136Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::SendCsd,
.reserved3 = 0
};
constexpr Command set_block_len = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 0,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::SetBlockLen,
.reserved3 = 0
};
constexpr Command read_single_block = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::CardToHost,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 1,
.type = CommandType::Normal,
.index = CommandIndex::ReadSingleBlock,
.reserved3 = 0
};
constexpr Command read_multiple_block = {
.dma_enable = 0,
.block_counter = 1,
.auto_command = SendAutoCommand::Command12,
.direction = DataTransferDirection::CardToHost,
.multiblock = 1,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 1,
.type = CommandType::Normal,
.index = CommandIndex::ReadMultipleBlock,
.reserved3 = 0
};
constexpr Command write_single_block = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 1,
.type = CommandType::Normal,
.index = CommandIndex::WriteSingleBlock,
.reserved3 = 0
};
constexpr Command write_multiple_block = {
.dma_enable = 0,
.block_counter = 1,
.auto_command = SendAutoCommand::Command12,
.direction = DataTransferDirection::HostToCard,
.multiblock = 1,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 1,
.type = CommandType::Normal,
.index = CommandIndex::WriteMultipleBlock,
.reserved3 = 0
};
constexpr Command app_send_op_cond = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 0,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::AppSendOpCond,
.reserved3 = 0
};
constexpr Command app_send_scr = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::CardToHost,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 0,
.idx_enable = 0,
.is_data = 1,
.type = CommandType::Normal,
.index = CommandIndex::AppSendScr,
.reserved3 = 0
};
constexpr Command app_cmd = {
.dma_enable = 0,
.block_counter = 0,
.auto_command = SendAutoCommand::Disabled,
.direction = DataTransferDirection::HostToCard,
.multiblock = 0,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 0,
.type = CommandType::Normal,
.index = CommandIndex::AppCmd,
.reserved3 = 0
};
}
}

View file

@ -0,0 +1,41 @@
/*
* Copyright (c) 2023, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Devices/Storage/SD/PCISDHostController.h>
namespace Kernel {
ErrorOr<NonnullRefPtr<PCISDHostController>> PCISDHostController::try_initialize(PCI::DeviceIdentifier const& device_identifier)
{
auto sdhc = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PCISDHostController(device_identifier)));
TRY(sdhc->initialize());
PCI::enable_bus_mastering(sdhc->device_identifier());
PCI::enable_memory_space(sdhc->device_identifier());
sdhc->try_enable_dma();
return sdhc;
}
PCISDHostController::PCISDHostController(PCI::DeviceIdentifier const& device_identifier)
: PCI::Device(device_identifier)
, SDHostController()
{
auto slot_information_register = read_slot_information();
if (slot_information_register.slots_available() != 1) {
// TODO: Support multiple slots
dmesgln("SD Host Controller has {} slots, but we currently only support using only one", slot_information_register.slots_available());
}
auto physical_address_of_sdhc_registers = PhysicalAddress {
PCI::get_BAR(device_identifier, static_cast<PCI::HeaderType0BaseRegister>(slot_information_register.first_bar_number))
};
m_registers = Memory::map_typed_writable<SD::HostControlRegisterMap volatile>(physical_address_of_sdhc_registers).release_value_but_fixme_should_propagate_errors();
}
}

View file

@ -0,0 +1,50 @@
/*
* Copyright (c) 2023, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Devices/Storage/SD/SDHostController.h>
#include <Kernel/Memory/TypedMapping.h>
namespace Kernel {
class PCISDHostController : public PCI::Device
, public SDHostController {
public:
static ErrorOr<NonnullRefPtr<PCISDHostController>> try_initialize(PCI::DeviceIdentifier const& device_identifier);
// ^PCI::Device
virtual StringView device_name() const override { return "SD Host Controller"sv; }
protected:
// ^SDHostController
virtual SD::HostControlRegisterMap volatile* get_register_map_base_address() override { return m_registers.ptr(); }
private:
PCISDHostController(PCI::DeviceIdentifier const& device_identifier);
struct [[gnu::packed]] SlotInformationRegister {
u8 first_bar_number : 3;
u8 : 1;
u8 number_of_slots : 3;
u8 : 1;
u8 slots_available() const { return number_of_slots + 1; }
};
static_assert(AssertSize<SlotInformationRegister, 1>());
SlotInformationRegister read_slot_information() const
{
SpinlockLocker locker(device_identifier().operation_lock());
return bit_cast<SlotInformationRegister>(PCI::Access::the().read8_field(device_identifier(), 0x40));
}
Memory::TypedMapping<SD::HostControlRegisterMap volatile> m_registers;
};
}

View file

@ -0,0 +1,329 @@
/*
* Copyright (c) 2023, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Endian.h>
#include <AK/Types.h>
namespace Kernel::SD {
// Relevant Specifications:
// * (SDHC): SD Host Controller Simplified Specification (https://www.sdcard.org/downloads/pls/)
// * (PLSS) Physical Layer Simplified Specification (https://www.sdcard.org/downloads/pls/)
// * (BCM2835) BCM2835 ARM Peripherals (https://www.raspberrypi.org/app/uploads/2012/02/BCM2835-ARM-Peripherals.pdf)
enum class HostVersion : u8 {
Version1,
Version2,
Version3,
Unknown
};
enum class ADMAErrorState : u32 {
Stop = 0b00,
FetchDescriptor = 0b01,
Reserved = 0b10,
TransferData = 0b11
};
// SDHC 2.1.1 "SD Host Control Register Map"
// NOTE: The registers must be 32 bits, because of a quirk in the RPI.
struct HostControlRegisterMap {
u32 argument_2;
u32 block_size_and_block_count;
u32 argument_1;
u32 transfer_mode_and_command;
u32 response_0;
u32 response_1;
u32 response_2;
u32 response_3;
u32 buffer_data_port;
u32 present_state;
u32 host_configuration_0;
u32 host_configuration_1;
union InterruptStatus {
struct { // SDHC 2.2.18 Normal Interrupt Status Register (Cat.C Offset 030h)
u32 command_complete : 1;
u32 transfer_complete : 1;
u32 block_gap_event : 1;
u32 dma_interrupt : 1;
u32 buffer_write_ready : 1;
u32 buffer_read_ready : 1;
u32 card_insertion : 1;
u32 card_removal : 1;
u32 card_interrupt : 1;
u32 int_a : 1;
u32 int_b : 1;
u32 int_c : 1;
u32 retuning_event : 1;
u32 fx_event : 1;
u32 : 1;
u32 error_interrupt : 1;
// SDHC 2.2.19 Error Interrupt Status Register (Cat.C Offset 032
u32 command_timeout_error : 1;
u32 command_crc_error : 1;
u32 cammand_index_error : 1;
u32 data_timeout_error : 1;
u32 data_crc_error : 1;
u32 data_end_bit_error : 1;
u32 current_limit_error : 1;
u32 auto_cmd_error : 1;
u32 adma_error : 1;
u32 tuning_error : 1;
u32 response_error : 1;
u32 vendor_specific_error : 1;
};
u32 raw;
} interrupt_status;
u32 interrupt_status_enable;
u32 interrupt_signal_enable;
u32 host_configuration_2;
// SDHC 2.2.26 Capabilities Register (Cat.C Offset 040h)
struct CapabilitesRegister {
u32 timeout_clock_frequency : 6;
u32 : 1;
u32 timeout_clock_unit : 1;
u32 base_clock_frequency : 8;
u32 max_block_length : 2;
u32 eight_bit_support_for_embedded_devices : 1;
u32 adma2 : 1;
u32 : 1;
u32 high_speed : 1;
u32 sdma : 1;
u32 suspend_resume : 1;
u32 three_point_three_volt : 1;
u32 three_point_zero_volt : 1;
u32 one_point_eight_volt : 1;
u32 dma_64_bit_addressing_v4 : 1;
u32 dma_64_bit_addressing_v3 : 1;
u32 async_interrupt : 1;
u32 slot_type : 2;
u32 sdr50 : 1;
u32 sdr140 : 1;
u32 ddr50 : 1;
u32 uhs_ii : 1;
u32 driver_type_A : 1;
u32 driver_type_C : 1;
u32 driver_type_D : 1;
u32 : 1;
u32 timer_count_for_retuning : 4;
u32 : 1;
u32 use_tuning_for_sdr50 : 1;
u32 retuning_modes : 2;
u32 clock_multiplier : 8;
u32 : 3;
u32 adma3 : 1;
u32 one_point_eight_vdd2 : 1;
u32 : 3;
} capabilities;
u32 maximum_current_capabilities;
u32 maximum_current_capabilities_reserved;
u32 force_event_for_auto_cmd_error_status;
struct {
ADMAErrorState state : 2;
u32 length_mismatch_error : 1;
u32 : 5;
u32 : 24;
} adma_error_status;
u32 adma_system_address[2];
u32 preset_value[4];
u32 reserved_0[28];
u32 shared_bus_control;
u32 reserved_1[6];
struct [[gnu::packed]] {
u8 interrupt_signal_for_each_slot;
u8 : 8;
HostVersion specification_version_number;
u8 vendor_version_number;
} slot_interrupt_status_and_version;
};
static_assert(AssertSize<HostControlRegisterMap, 256>());
// SDHC Figure 1-10 : General Descriptor Table Format
enum class DMAAction : u8 {
// ADMA 2
Nop = 0b000,
Rsv0 = 0b010,
Tran = 0b100,
Link = 0b110,
// ADMA 3
CommandDescriptor_SD = 0b001,
CommandDescriptor_UHS_II = 0b011,
Rsv1 = 0b101,
IntegratedDescriptor = 0b111,
};
// Both of these represent the ADMA2 version, ADMA3 might have slight differences
// SDHC 1.13.3.1 ADMA2 Descriptor Format
struct alignas(4) DMADescriptor64 {
u32 valid : 1;
u32 end : 1;
u32 interrupt : 1;
DMAAction action : 3;
u32 length_upper : 10; // Version 4.10+ only
u32 length_lower : 16;
u32 address : 32;
};
static_assert(AssertSize<DMADescriptor64, 8>());
struct alignas(8) DMADescriptor128 {
u32 valid : 1;
u32 end : 1;
u32 interrupt : 1;
DMAAction action : 3;
u32 length_upper : 10; // Version 4.10+ only
u32 length_lower : 16;
u32 address_low : 32;
u32 address_high : 32;
u32 : 32;
};
static_assert(AssertSize<DMADescriptor128, 16>());
// PLSS 5.1: "OCR Register"
union OperatingConditionRegister {
u32 raw;
struct {
u32 : 15;
u32 vdd_voltage_window_27_28 : 1;
u32 vdd_voltage_window_28_29 : 1;
u32 vdd_voltage_window_29_30 : 1;
u32 vdd_voltage_window_30_31 : 1;
u32 vdd_voltage_window_31_32 : 1;
u32 vdd_voltage_window_32_33 : 1;
u32 vdd_voltage_window_33_34 : 1;
u32 vdd_voltage_window_34_35 : 1;
u32 vdd_voltage_window_35_36 : 1;
u32 switching_to_18v_accepted : 1;
u32 : 2;
u32 over_2tb_support_status : 1;
u32 : 1;
u32 uhs2_card_status : 1;
u32 card_capacity_status : 1;
u32 card_power_up_status : 1;
};
};
static_assert(AssertSize<OperatingConditionRegister, 4>());
// PLSS 5.2: "CID Register"
union CardIdentificationRegister {
u32 raw[4];
struct [[gnu::packed]] {
u64 manufacturing_date : 12;
u64 : 4;
u64 product_serial_number : 32;
u64 product_revision : 8;
u64 product_name : 40;
u64 oem_id : 16;
u64 manufacturer_id : 8;
};
};
static_assert(AssertSize<CardIdentificationRegister, 16>());
// PLSS 5.3.2: "CSD Register (CSD Version 1.0)"
union CardSpecificDataRegister {
u64 raw[2];
struct [[gnu::packed]] {
// Note that the physical layer spec says there are 7 bits of checksum and 1 reserved bit here,
// but they are removed
u32 : 1;
u32 write_protection_until_power_cycle : 1;
u32 file_format : 2;
u32 temporary_write_protection : 1;
u32 permanent_write_protection : 1;
u32 copy_flag : 1;
u32 file_format_group : 1;
u32 : 5;
u32 partial_blocks_for_write_allowed : 1;
u32 max_write_data_block_length : 4;
u32 write_speed_factor : 3;
u32 : 2;
u32 write_protect_group_enable : 1;
u32 write_protect_group_size : 7;
u32 erase_sector_size : 7;
u32 erase_single_block_enable : 1;
u32 device_size_multiplier : 3;
u32 max_write_current_at_vdd_max : 3;
u32 max_write_current_at_vdd_min : 3;
u32 max_read_current_at_vdd_max : 3;
u32 max_read_current_at_vdd_min : 3;
u32 device_size : 12;
u32 : 2;
u32 dsr_implemented : 1;
u32 read_block_misalignment : 1;
u32 write_block_misalignment : 1;
u32 partial_blocks_for_read_allowed : 1;
u32 max_read_data_block_length : 4;
u32 card_command_classes : 12;
u32 max_data_transfer_rate : 8;
u32 data_read_access_time2 : 8;
u32 data_read_access_time1 : 8;
u32 : 6;
u32 csd_structure : 2;
};
};
static_assert(AssertSize<CardSpecificDataRegister, 16>());
// PLSS 5.6: "SCR Register"
union SDConfigurationRegister {
u8 raw[8];
struct {
u32 scr_structure : 4;
u32 sd_specification : 4;
u32 data_status_after_erase : 1;
u32 sd_security : 3;
u32 sd_bus_widths : 4;
u32 sd_specification3 : 1;
u32 extended_security : 4;
u32 sd_specification4 : 1;
u32 sd_specification_x : 4;
u32 : 1;
u32 command_support : 5;
u32 : 32;
};
};
static_assert(AssertSize<SDConfigurationRegister, 8>());
// PLSS 4.10.1: "Card Status"
union CardStatus {
u32 raw;
struct {
u32 : 3;
u32 ake_seq_error : 1;
u32 : 1;
u32 app_cmd : 1;
u32 fx_event : 1;
u32 : 1;
u32 ready_for_data : 1;
u32 current_state : 4;
u32 erase_reset : 1;
u32 card_ecc_disabled : 1;
u32 wp_erase_skip : 1;
u32 csd_overwrite : 1;
u32 : 2;
u32 error : 1;
u32 cc_error : 1;
u32 card_ecc_failed : 1;
u32 illegal_command : 1;
u32 com_crc_error : 1;
u32 lock_unlock_failed : 1;
u32 card_is_locked : 1;
u32 wp_violation : 1;
u32 erase_param : 1;
u32 erase_seq_error : 1;
u32 block_len_error : 1;
u32 address_error : 1;
u32 out_of_range : 1;
};
};
static_assert(AssertSize<CardStatus, 4>());
}

View file

@ -0,0 +1,996 @@
/*
* Copyright (c) 2023, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Format.h>
#include <AK/StdLibExtras.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/Storage/SD/Commands.h>
#include <Kernel/Devices/Storage/SD/SDHostController.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
#include <Kernel/Panic.h>
#include <Kernel/Time/TimeManagement.h>
#if ARCH(AARCH64)
# include <Kernel/Arch/aarch64/RPi/SDHostController.h>
#endif
namespace Kernel {
// Relevant Specifications:
// * (SDHC): SD Host Controller Simplified Specification (https://www.sdcard.org/downloads/pls/)
// * (PLSS) Physical Layer Simplified Specification (https://www.sdcard.org/downloads/pls/)
// * (BCM2835) BCM2835 ARM Peripherals (https://www.raspberrypi.org/app/uploads/2012/02/BCM2835-ARM-Peripherals.pdf)
static void delay(i64 nanoseconds)
{
auto start = TimeManagement::the().monotonic_time();
auto end = start + Duration::from_nanoseconds(nanoseconds);
while (TimeManagement::the().monotonic_time() < end)
Processor::pause();
}
constexpr u32 max_supported_sdsc_frequency = 25000000;
constexpr u32 max_supported_sdsc_frequency_high_speed = 50000000;
// In "m_registers->host_configuration_0"
// 2.2.11 Host Control 1 Register
constexpr u32 high_speed_enable = 1 << 2;
constexpr u32 dma_select_adma2_32 = 0b10 << 3;
constexpr u32 dma_select_adma2_64 = 0b11 << 3;
// In "m_registers->host_configuration_1"
// In sub-register "Clock Control"
constexpr u32 internal_clock_enable = 1 << 0;
constexpr u32 internal_clock_stable = 1 << 1;
constexpr u32 sd_clock_enable = 1 << 2;
// In sub-register "Software Reset"
constexpr u32 software_reset_for_all = 0x01000000;
// In Interrupt Status Register
constexpr u32 command_complete = 1 << 0;
constexpr u32 transfer_complete = 1 << 1;
constexpr u32 buffer_write_ready = 1 << 4;
constexpr u32 buffer_read_ready = 1 << 5;
// PLSS 5.1: all voltage windows
constexpr u32 acmd41_voltage = 0x00ff8000;
// PLSS 4.2.3.1: All voltage windows, XPC = 1, SDHC = 1
constexpr u32 acmd41_arg = 0x50ff8000;
constexpr size_t block_len = 512;
SDHostController::SDHostController()
: StorageController(StorageManagement::generate_relative_sd_controller_id({}))
{
}
ErrorOr<void> SDHostController::reset() { return ENOTIMPL; }
ErrorOr<void> SDHostController::shutdown() { return ENOTIMPL; }
void SDHostController::complete_current_request(AsyncDeviceRequest::RequestResult)
{
VERIFY_NOT_REACHED();
}
ErrorOr<void> SDHostController::initialize()
{
m_registers = get_register_map_base_address();
if (!m_registers)
return EIO;
if (host_version() != SD::HostVersion::Version3 && host_version() != SD::HostVersion::Version2)
return ENOTSUP;
TRY(reset_host_controller());
m_registers->interrupt_status_enable = 0xffffffff;
auto card_or_error = try_initialize_inserted_card();
if (card_or_error.is_error() && card_or_error.error().code() != ENODEV) {
dmesgln("SDHostController: Failed to initialize inserted card: {}", card_or_error.error());
} else if (!card_or_error.is_error()) {
m_card = card_or_error.release_value();
}
return {};
}
void SDHostController::try_enable_dma()
{
if (m_registers->capabilities.adma2) {
auto maybe_dma_buffer = MM.allocate_dma_buffer_pages(dma_region_size, "SDHC DMA Buffer"sv, Memory::Region::Access::ReadWrite);
if (maybe_dma_buffer.is_error()) {
dmesgln("Could not allocate DMA pages for SDHC: {}", maybe_dma_buffer.error());
} else {
m_dma_region = maybe_dma_buffer.release_value();
dbgln("Allocated SDHC DMA buffer at {}", m_dma_region->physical_page(0)->paddr());
// FIXME: This check does not seem to work, qemu supports 64 bit addressing, but we don't seem to detect it
// FIXME: Hardcoding to use the 64 bit mode leads to transfer timeouts, without any errors reported from qemu
if (host_version() != SD::HostVersion::Version3 && m_registers->capabilities.dma_64_bit_addressing_v3) {
dbgln("Setting SDHostController to operate using ADMA2 with 64 bit addressing");
m_mode = OperatingMode::ADMA2_64;
m_registers->host_configuration_0 = m_registers->host_configuration_0 | dma_select_adma2_64;
} else {
// FIXME: Use a way that guarantees memory addresses below the 32 bit threshold
VERIFY(m_dma_region->physical_page(0)->paddr().get() >> 32 == 0);
VERIFY(m_dma_region->physical_page(dma_region_size / PAGE_SIZE - 1)->paddr().get() >> 32 == 0);
dbgln("Setting SDHostController to operate using ADMA2 with 32 bit addressing");
m_mode = OperatingMode::ADMA2_32;
m_registers->host_configuration_0 = m_registers->host_configuration_0 | dma_select_adma2_32;
}
}
}
}
ErrorOr<NonnullLockRefPtr<SDMemoryCard>> SDHostController::try_initialize_inserted_card()
{
if (!is_card_inserted())
return ENODEV;
// PLSS 4.2: "Card Identification Mode"
// "After power-on ...the cards are initialized with ... 400KHz clock frequency."
// NOTE: The SDHC might already have been initialized (e.g. by the bootloader), let's reset it to a known configuration
if (is_sd_clock_enabled())
sd_clock_stop();
TRY(sd_clock_supply(400000));
// PLSS 4.2.3: "Card Initialization and Identification Process"
// Also see Figure 4-2 in the PLSS spec for a flowchart of the initialization process.
// Note that the steps correspond to the steps in the flowchart, although I made up the numbering and text
// 1. Send CMD0 (GO_IDLE_STATE) to the card
TRY(issue_command(SD::Commands::go_idle_state, 0));
TRY(wait_for_response());
// 2. Send CMD8 (SEND_IF_COND) to the card
// SD interface condition: 7:0 = check pattern, 11:8 = supply voltage
// 0x1aa: check pattern = 10101010, supply voltage = 1 => 2.7-3.6V
const u32 voltage_window = 0x1aa;
TRY(issue_command(SD::Commands::send_if_cond, voltage_window));
auto interface_condition_response = wait_for_response();
// 3. If the card does not respond to CMD8 it means that (Ver2.00 or later
// SD Memory Card(voltage mismatch) or Ver1.X SD Memory Card or not SD
// Memory Card)
if (interface_condition_response.is_error()) {
// TODO: This is supposed to be the "No Response" branch of the
// flowchart in Figure 4-2 of the PLSS spec
return ENOTSUP;
}
// 4. If the card responds to CMD8, but it's not a valid response then the
// card is not usable
if (interface_condition_response.value().response[0] != voltage_window) {
// FIXME: We should probably try again with a lower voltage window
return ENODEV;
}
// 5. Send ACMD41 (SEND_OP_COND) with HCS=1 to the card, repeat this until the card is ready or timeout
SD::OperatingConditionRegister ocr = {};
bool card_is_usable = true;
if (!retry_with_timeout([&]() {
if (issue_command(SD::Commands::app_cmd, 0).is_error() || wait_for_response().is_error())
return false;
if (issue_command(SD::Commands::app_send_op_cond, acmd41_arg).is_error())
return false;
if (auto acmd41_response = wait_for_response();
!acmd41_response.is_error()) {
// 20. Check if the card supports the voltage windows we requested and SDHC
u32 response = acmd41_response.value().response[0];
if ((response & acmd41_voltage) != acmd41_voltage) {
card_is_usable = false;
return false;
}
ocr.raw = acmd41_response.value().response[0];
}
return ocr.card_power_up_status == 1;
})) {
return card_is_usable ? EIO : ENODEV;
}
// 6. If you requested to switch to 1.8V, and the card accepts, execute a voltage switch sequence
// (we didn't ask it)
// 7. Send CMD2 (ALL_SEND_CID) to the card
TRY(issue_command(SD::Commands::all_send_cid, 0));
auto all_send_cid_response = TRY(wait_for_response());
auto cid = bit_cast<SD::CardIdentificationRegister>(all_send_cid_response.response);
// 8. Send CMD3 (SEND_RELATIVE_ADDR) to the card
TRY(issue_command(SD::Commands::send_relative_addr, 0));
auto send_relative_addr_response = TRY(wait_for_response());
u32 rca = send_relative_addr_response.response[0]; // FIXME: Might need to clear some bits here
// Extra steps:
TRY(issue_command(SD::Commands::send_csd, rca));
auto send_csd_response = TRY(wait_for_response());
auto csd = bit_cast<SD::CardSpecificDataRegister>(send_csd_response.response);
u32 block_count = (csd.device_size + 1) * (1 << (csd.device_size_multiplier + 2));
u32 block_size = (1 << csd.max_read_data_block_length);
u64 capacity = static_cast<u64>(block_count) * block_size;
u64 card_capacity_in_blocks = capacity / block_len;
if (m_registers->capabilities.high_speed) {
dbgln("SDHC: Enabling High Speed mode");
m_registers->host_configuration_0 = m_registers->host_configuration_0 | high_speed_enable;
TRY(sd_clock_frequency_change(max_supported_sdsc_frequency_high_speed));
} else {
TRY(sd_clock_frequency_change(max_supported_sdsc_frequency));
}
TRY(issue_command(SD::Commands::select_card, rca));
TRY(wait_for_response());
// Set block length to 512 if the card is SDSC.
// All other models only support 512 byte blocks so they don't need to be explicitly told
if (!ocr.card_capacity_status) {
TRY(issue_command(SD::Commands::set_block_len, block_len));
TRY(wait_for_response());
}
auto scr = TRY(retrieve_sd_configuration_register(rca));
TRY(issue_command(SD::Commands::app_cmd, rca));
TRY(wait_for_response());
TRY(issue_command(SD::Commands::app_set_bus_width, 0x2)); // 0b00=1 bit bus, 0b10=4 bit bus
TRY(wait_for_response());
return TRY(DeviceManagement::try_create_device<SDMemoryCard>(
*this,
StorageDevice::LUNAddress { controller_id(), 0, 0 },
hardware_relative_controller_id(), block_len,
card_capacity_in_blocks, rca, ocr, cid, scr));
}
bool SDHostController::retry_with_timeout(Function<bool()> f, i64 delay_between_tries)
{
int timeout = 1000;
bool success = false;
while (!success && timeout > 0) {
success = f();
if (!success)
delay(delay_between_tries);
timeout--;
}
return timeout > 0;
}
ErrorOr<void> SDHostController::issue_command(SD::Command const& cmd, u32 argument)
{
// SDHC 3.7.1: "Transaction Control without Data Transfer Using DAT Line"
constexpr u32 command_inhibit = 1 << 1;
// 1. Check Command Inhibit (CMD) in the Present State register.
// Repeat this step until Command Inhibit (CMD) is 0.
// That is, when Command Inhibit (CMD) is 1, the Host Driver
// shall not issue an SD Command.
if (!retry_with_timeout(
[&]() { return !(m_registers->present_state & command_inhibit); })) {
return EIO;
}
// 2. If the Host Driver issues an SD Command using DAT lines
// including busy signal, go to step (3).
// If without using DAT lines including busy signal, go to step (5).
// 3. If the Host Driver is issuing an abort command, go to step (5). In the
// case of non-abort command, go to step (4).
if (cmd.requires_dat_line() && cmd.type != SD::CommandType::Abort) {
// 4. Check Command Inhibit (DAT) in the Present State register. Repeat
// this step until Command Inhibit (DAT) is set to 0.
constexpr u32 data_inhibit = 1 << 2;
if (!retry_with_timeout([&]() { return !(m_registers->present_state & data_inhibit); })) {
return EIO;
}
}
// 5. Set registers as described in Table 1-2 except Command register.
m_registers->argument_1 = argument;
// 6. Set the Command register.
m_registers->transfer_mode_and_command = cmd.raw;
// 7. Perform Command Completion Sequence in accordance with 3.7.1.2.
// Done in wait_for_response()
return {};
}
ErrorOr<SDHostController::Response> SDHostController::wait_for_response()
{
// SDHC 3.7.1.2 The Sequence to Finalize a Command
// 1. Wait for the Command Complete Interrupt. If the Command Complete
// Interrupt has occurred, go to step (2).
if (!retry_with_timeout(
[&]() { return m_registers->interrupt_status.command_complete; })) {
return EIO;
}
// 2. Write 1 to Command Complete in the Normal Interrupt Status register to clear this bit
m_registers->interrupt_status.raw = command_complete;
// 3. Read the Response register(s) to get the response.
// NOTE: We read fewer bits than ResponseType because the missing bits are only
// relevant for the physical layer, and the device filters them before they
// reach us
Response r = {};
auto cmd = last_sent_command();
switch (cmd.response_type) {
case SD::ResponseType::NoResponse:
break;
case SD::ResponseType::ResponseOf136Bits:
r.response[0] = m_registers->response_0;
r.response[1] = m_registers->response_1;
r.response[2] = m_registers->response_2;
r.response[3] = m_registers->response_3;
break;
case SD::ResponseType::ResponseOf48Bits:
r.response[0] = m_registers->response_0;
break;
case SD::ResponseType::ResponseOf48BitsWithBusy:
// FIXME: Idk what to do here
break;
}
// 4. Judge whether the command uses the Transfer Complete Interrupt or not.
// If it uses Transfer Complete, go to step (5). If not, go to step (7).
if (last_sent_command().uses_transfer_complete_interrupt())
TODO();
// 7. Check for errors in Response Data. If there is no error, go to step (8). If there is an error, go to step (9).
if (cmd.response_type != SD::ResponseType::ResponseOf136Bits) {
if (card_status_contains_errors(cmd, r.response[0])) {
return EIO;
}
}
// NOTE: Steps 7, 8 and 9 consist of checking the response for errors, which
// are specific to each command therefore those steps are not fully implemented
// here.
return { r };
}
bool SDHostController::is_sd_clock_enabled()
{
return m_registers->host_configuration_1 & sd_clock_enable;
}
ErrorOr<u32> SDHostController::calculate_sd_clock_divisor(u32 sd_clock_frequency, u32 frequency)
{
// SDHC 2.2.14: "Clock Control Register"
// (1) 10-bit Divisor Mode
// This mode is supported by the Host Controller Version 1.00 and 2.00.
// The frequency is not programmed directly; rather this register holds the divisor of
// the Base Clock Frequency For SD Clock in the Capabilities register. Only
// the following settings are allowed.
//
// +-----+---------------------------+
// | 80h | base clock divided by 256 |
// | 40h | base clock divided by 128 |
// | 20h | base clock divided by 64 |
// | 10h | base clock divided by 32 |
// | 08h | base clock divided by 16 |
// | 04h | base clock divided by 8 |
// | 02h | base clock divided by 4 |
// | 01h | base clock divided by 2 |
// | 00h | Base clock (10MHz-63MHz) |
// +-----+---------------------------+
//
if (host_version() == SD::HostVersion::Version2 || host_version() == SD::HostVersion::Version1) {
for (u32 divisor = 1; divisor <= 256; divisor *= 2) {
if (sd_clock_frequency / divisor <= frequency)
return divisor >> 1;
}
dmesgln("SDHostController: Could not find a suitable divisor for the requested frequency");
return ENOTSUP;
}
// (2) 10-bit Divided Clock Mode
// Host Controller Version 3.00 supports this mandatory mode instead of the
// 8-bit Divided Clock Mode. The length of divider is extended to 10 bits and all
// divider values shall be supported.
//
// +------+-------------------------------+
// | 3FFh | 1/2046 Divided Clock |
// | .... | ............................. |
// | N | 1/2N Divided Clock (Duty 50%) |
// | .... | ............................. |
// | 002h | 1/4 Divided Clock |
// | 001h | 1/2 Divided Clock |
// | 000h | Base Clock (10MHz-255MHz) |
// +------+-------------------------------+
//
if (host_version() == SD::HostVersion::Version3) {
if (frequency == sd_clock_frequency)
return 0;
auto divisor = AK::ceil_div(sd_clock_frequency, 2 * frequency);
if (divisor > 0x3ff) {
dmesgln("SDHostController: Cannot represent the divisor for the requested frequency");
return ENOTSUP;
}
return divisor;
}
VERIFY_NOT_REACHED();
}
ErrorOr<void> SDHostController::sd_clock_supply(u32 frequency)
{
// SDHC 3.2.1: "SD Clock Supply Sequence"
// The *Clock Control* register is in the lower 16 bits of *Host Configuration 1*
VERIFY((m_registers->host_configuration_1 & sd_clock_enable) == 0);
// 1. Find out the divisor to determine the SD Clock Frequency
const u32 sd_clock_frequency = TRY(retrieve_sd_clock_frequency());
u32 divisor = TRY(calculate_sd_clock_divisor(sd_clock_frequency, frequency));
// 2. Set Internal Clock Enable and SDCLK Frequency Select in the Clock Control register
const u32 eight_lower_bits_of_sdclk_frequency_select = (divisor & 0xff) << 8;
u32 sdclk_frequency_select = eight_lower_bits_of_sdclk_frequency_select;
if (host_version() == SD::HostVersion::Version3) {
const u32 two_upper_bits_of_sdclk_frequency_select = (divisor >> 8 & 0x3) << 6;
sdclk_frequency_select |= two_upper_bits_of_sdclk_frequency_select;
}
m_registers->host_configuration_1 = m_registers->host_configuration_1 | internal_clock_enable | sdclk_frequency_select;
// 3. Check Internal Clock Stable in the Clock Control register until it is 1
if (!retry_with_timeout([&] { return m_registers->host_configuration_1 & internal_clock_stable; })) {
return EIO;
}
// 4. Set SD Clock Enable in the Clock Control register to 1
m_registers->host_configuration_1 = m_registers->host_configuration_1 | sd_clock_enable;
return {};
}
void SDHostController::sd_clock_stop()
{
// SDHC 3.2.2: "SD Clock Stop Sequence"
// 1. Set SD Clock Enable in the Clock Control register to 0
m_registers->host_configuration_1 = m_registers->host_configuration_1 & ~sd_clock_enable;
}
ErrorOr<void> SDHostController::sd_clock_frequency_change(u32 new_frequency)
{
// SDHC 3.2.3: "SD Clock Frequency Change Sequence"
// 1. Execute the SD Clock Stop Sequence
sd_clock_stop();
// 2. Execute the SD Clock Supply Sequence
return sd_clock_supply(new_frequency);
}
ErrorOr<void> SDHostController::reset_host_controller()
{
m_registers->host_configuration_0 = 0;
m_registers->host_configuration_1 = m_registers->host_configuration_1 | software_reset_for_all;
if (!retry_with_timeout(
[&] {
return (m_registers->host_configuration_1 & software_reset_for_all) == 0;
})) {
return EIO;
}
return {};
}
ErrorOr<void> SDHostController::transaction_control_with_data_transfer_using_the_dat_line_without_dma(
SD::Command const& command,
u32 argument,
u32 block_count,
u32 block_size,
UserOrKernelBuffer buf,
DataTransferType data_transfer_type)
{
// SDHC 3.7.2: "Transaction Control with Data Transfer Using DAT Line (without DMA)"
// 1. Set the value corresponding to the executed data byte length of one block to Block Size register.
// 2. Set the value corresponding to the executed data block count to Block Count register in accordance with Table 2-8.
m_registers->block_size_and_block_count = (block_count << 16) | block_size;
// 3. Set the argument value to Argument 1 register.
m_registers->argument_1 = argument;
// 4. Set the value to the Transfer Mode register. The host driver
// determines Multi / Single Block
// Select, Block Count Enable, Data Transfer Direction, Auto CMD12 Enable
// and DMA Enable. Multi / Single Block Select and Block Count Enable are
// determined according to Table 2-8. (NOTE: We assume `cmd` already has
// the correct flags set)
// 5. Set the value to Command register.
m_registers->transfer_mode_and_command = command.raw;
// 6. Then, wait for the Command Complete Interrupt.
if (!retry_with_timeout([&]() { return m_registers->interrupt_status.command_complete; })) {
return EIO;
}
// 7. Write 1 to the Command Complete in the Normal Interrupt Status
// register for clearing this bit.
m_registers->interrupt_status.raw = command_complete;
// 8. Read Response register and get necessary information of the issued
// command
// (FIXME: Return the value for better error handling)
// 9. In the case where this sequence is for write to a card, go to step
// (10).
// In case of read from a card, go to step (14).
if (data_transfer_type == DataTransferType::Write) {
for (u32 i = 0; i < block_count; i++) {
// 10. Then wait for Buffer Write Ready Interrupt.
if (!retry_with_timeout(
[&]() {
return m_registers->interrupt_status.buffer_write_ready;
})) {
return EIO;
}
// 11. Write 1 to the Buffer Write Ready in the Normal Interrupt Status register for clearing this bit.
m_registers->interrupt_status.raw = buffer_write_ready;
// 12. Write block data (in according to the number of bytes specified at the step (1)) to Buffer Data Port register.
u32 temp;
for (u32 j = 0; j < block_size / sizeof(u32); j++) {
TRY(buf.read(&temp, i * block_size + sizeof(u32) * j, sizeof(u32)));
m_registers->buffer_data_port = temp;
}
// 13. Repeat until all blocks are sent and then go to step (18).
}
} else {
for (u32 i = 0; i < block_count; i++) {
// 14. Then wait for the Buffer Read Ready Interrupt.
if (!retry_with_timeout([&]() { return m_registers->interrupt_status.buffer_read_ready; })) {
return EIO;
}
// 15. Write 1 to the Buffer Read Ready in the Normal Interrupt Status
// register for clearing this bit.
m_registers->interrupt_status.raw = buffer_read_ready;
// 16. Read block data (in according to the number of bytes specified at
// the step (1)) from the Buffer Data Port register
u32 temp;
for (u32 j = 0; j < block_size / sizeof(u32); j++) {
temp = m_registers->buffer_data_port;
TRY(buf.write(&temp, i * block_size + sizeof(u32) * j, sizeof(u32)));
}
// 17. Repeat until all blocks are received and then go to step (18).
}
}
// 18. If this sequence is for Single or Multiple Block Transfer, go to step
// (19). In case of Infinite Block Transfer, go to step (21)
// 19. Wait for Transfer Complete Interrupt.
if (!retry_with_timeout(
[&]() { return m_registers->interrupt_status.transfer_complete; })) {
return EIO;
}
// 20. Write 1 to the Transfer Complete in the Normal Interrupt Status
// register for clearing this bit
m_registers->interrupt_status.raw = transfer_complete;
return {};
}
u32 SDHostController::make_adma_descriptor_table(u32 block_count)
{
// FIXME: We might be able to write to the destination buffer directly
// Especially with 64 bit addressing enabled
// This might cost us more descriptor entries but avoids the memcpy at the end
// of each read cycle
FlatPtr adma_descriptor_physical = m_dma_region->physical_page(0)->paddr().get();
FlatPtr adma_dma_region_physical = adma_descriptor_physical + PAGE_SIZE;
FlatPtr adma_descriptor_virtual = m_dma_region->vaddr().get();
u32 offset = 0;
u32 blocks_transferred = 0;
u32 blocks_per_descriptor = (1 << 16) / block_len;
using enum OperatingMode;
switch (m_mode) {
case ADMA2_32: {
u32 i = 0;
Array<SD::DMADescriptor64, 64>& command_buffer = *bit_cast<Array<SD::DMADescriptor64, 64>*>(adma_descriptor_virtual);
for (; i < 64; ++i) {
FlatPtr physical_transfer_address = adma_dma_region_physical + offset;
VERIFY(physical_transfer_address >> 32 == 0);
// If the remaining block count is less than the maximum addressable blocks
// we need to set the actual length and break out of the loop
if (block_count - blocks_transferred < blocks_per_descriptor) {
u32 blocks_to_transfer = block_count - blocks_transferred;
command_buffer[i] = SD::DMADescriptor64 {
.valid = 1,
.end = 1,
.interrupt = 0,
.action = SD::DMAAction::Tran,
.length_upper = 0,
.length_lower = static_cast<u32>(blocks_to_transfer * block_len),
.address = static_cast<u32>(physical_transfer_address),
};
blocks_transferred += blocks_to_transfer;
offset += static_cast<size_t>(blocks_to_transfer) * block_len;
break;
}
command_buffer[i] = SD::DMADescriptor64 {
.valid = 1,
.end = 0,
.interrupt = 0,
.action = SD::DMAAction::Tran,
.length_upper = 0,
.length_lower = 0, // length of 0 means 1<<16 bytes
.address = static_cast<u32>(physical_transfer_address),
};
blocks_transferred += blocks_per_descriptor;
offset += (1 << 16);
}
command_buffer[min(i, 63)].end = 1;
break;
}
case ADMA2_64: {
u32 i = 0;
Array<SD::DMADescriptor128, 32>& command_buffer = *bit_cast<Array<SD::DMADescriptor128, 32>*>(adma_descriptor_virtual);
for (; i < 32; ++i) {
FlatPtr physical_transfer_address = adma_dma_region_physical + offset;
VERIFY(physical_transfer_address >> 32 == 0);
// If the remaining block count is less than the maximum addressable blocks
// we need to set the actual length and break out of the loop
if (block_count - blocks_transferred < blocks_per_descriptor) {
u32 blocks_to_read = block_count - blocks_transferred;
command_buffer[i] = SD::DMADescriptor128 {
.valid = 1,
.end = 1,
.interrupt = 0,
.action = SD::DMAAction::Tran,
.length_upper = 0,
.length_lower = static_cast<u32>(blocks_to_read * block_len),
.address_low = static_cast<u32>((physical_transfer_address + offset) & 0xFFFF'FFFF),
.address_high = static_cast<u32>((physical_transfer_address + offset) >> 32),
};
blocks_transferred += blocks_to_read;
offset += static_cast<size_t>(blocks_to_read) * block_len;
break;
}
command_buffer[i] = SD::DMADescriptor128 {
.valid = 1,
.end = 0,
.interrupt = 0,
.action = SD::DMAAction::Tran,
.length_upper = 0,
.length_lower = 0, // length of 0 means 1<<16 bytes
.address_low = static_cast<u32>((physical_transfer_address + offset) & 0xFFFF'FFFF),
.address_high = static_cast<u32>((physical_transfer_address + offset) >> 32),
};
blocks_transferred += blocks_per_descriptor;
offset += (1 << 16);
}
command_buffer[min(i, 31)].end = 1;
break;
}
case PIO:
VERIFY_NOT_REACHED();
}
return blocks_transferred;
}
ErrorOr<void> SDHostController::transfer_blocks_adma2(u32 block_address, u32 block_count, UserOrKernelBuffer out, SD::DataTransferDirection direction)
{
using enum OperatingMode;
FlatPtr adma_descriptor_physical = m_dma_region->physical_page(0)->paddr().get();
FlatPtr adma_descriptor_virtual = m_dma_region->vaddr().get();
FlatPtr adma_dma_region_virtual = adma_descriptor_virtual + PAGE_SIZE;
AK::ArmedScopeGuard abort_guard {
[] {
dbgln("Aborting SDHC ADMA read");
TODO();
}
};
// 3.7.2.3 Using ADMA
u32 blocks_per_descriptor = (1 << 16) / block_len;
u32 addressable_blocks_per_transfer = blocks_per_descriptor * (m_mode == ADMA2_32 ? 64 : 32);
size_t host_offset = 0;
size_t card_offset = 0;
u32 blocks_transferred_total = 0;
while (blocks_transferred_total < block_count) {
// When writing to the card we must prime the transfer buffer with the data we want to write
// FIXME: We might be able to transfer to/from the destination/origin buffer directly
// Especially with 64 bit addressing enabled
// This might cost us more descriptor entries, when the physical range is segmented,
// but avoids the memcpy at the end of each transfer cycle
if (direction == SD::DataTransferDirection::HostToCard)
TRY(out.read(bit_cast<void*>(adma_dma_region_virtual), host_offset, min(block_count - blocks_transferred_total, addressable_blocks_per_transfer) * block_len));
// (1) Create Descriptor table for ADMA in the system memory
u32 blocks_transferred = make_adma_descriptor_table(block_count);
card_offset += blocks_transferred * block_len;
// (2) Set the Descriptor address for ADMA in the ADMA System Address register.
m_registers->adma_system_address[0] = static_cast<u32>(adma_descriptor_physical & 0xFFFF'FFFF);
if (m_mode == ADMA2_64)
m_registers->adma_system_address[1] = static_cast<u32>(adma_descriptor_physical >> 32);
// (3) Set the value corresponding to the executed data byte length of one block in the Block Size
// register.
// (4) Set the value corresponding to the executed data block count in the Block Count register in
// accordance with Table 2-9. Refer to Section 1.15 for more details.
// Note: To avoid the restriction of the 16 bit block count we disable the block counter
// and do not set the block count, resulting in an "Infinite Transfer" (SDHC Table 2-9)
// ADMA has its own way of encoding block counts and to signal transfer termination
m_registers->block_size_and_block_count = block_len;
// (5) Set the argument value to the Argument register.
m_registers->argument_1 = block_address;
// (6) Set the value to the Transfer Mode register. The Host Driver determines Multi / Single Block
// Select, Block Count Enable, Data Transfer Direction, Auto CMD12 Enable and DMA
// Enable. Multi / Single Block Select and Block Count Enable are determined according to
// Table 2-9.
// If response check is enabled (Response Error Check Enable =1), set Response Interrupt
// Disable to 1 and select Response Type R1 / R5
SD::Command command = {
.dma_enable = 1,
.block_counter = 0,
.auto_command = blocks_transferred > 1 ? SD::SendAutoCommand::Command12 : SD::SendAutoCommand::Disabled,
.direction = direction,
.multiblock = blocks_transferred > 1,
.response_type_r1r5 = 0,
.response_error_check = 0,
.response_interrupt_disable = 0,
.reserved1 = 0,
.response_type = SD::ResponseType::ResponseOf48Bits,
.sub_command_flag = 0,
.crc_enable = 1,
.idx_enable = 0,
.is_data = 1,
.type = SD::CommandType::Normal,
.index = direction == SD::DataTransferDirection::HostToCard ? (blocks_transferred > 1 ? SD::CommandIndex::WriteMultipleBlock : SD::CommandIndex::WriteSingleBlock)
: (blocks_transferred > 1 ? SD::CommandIndex::ReadMultipleBlock : SD::CommandIndex::ReadSingleBlock),
.reserved3 = 0
};
// (7) Set the value to the Command register.
// Note: When writing to the upper byte [3] of the Command register, the SD command is issued
// and DMA is started.
m_registers->transfer_mode_and_command = command.raw;
// (8) If response check is enabled, go to stop (11) else wait for the Command Complete Interrupt.
// Note: We never enabled response checking
if (!retry_with_timeout([this]() { return m_registers->interrupt_status.command_complete; })) {
dbgln("SDHC: ADMA2 command response timed out");
}
// (9) Write 1 to the Command Complete in the Normal Interrupt Status register to clear this bit.
// Note: We cannot write to the nit field member directly, due to that also possibly
// setting the already completed `transfer_complete` flag, making the next check time out.
m_registers->interrupt_status.raw = command_complete;
// TODO: (10) Read Response register and get necessary information of the issued command
// (11) Wait for the Transfer Complete Interrupt and ADMA Error Interrupt.
// FIXME: Especially with big transfers this might timeout before the transfer is finished, although
// No error has has happened
// We should set this up so that it actually waits for the interrupts via a designated handler
// Note, that the SDHC has a way to detect transfer timeouts on its own
if (!retry_with_timeout([this]() { return m_registers->interrupt_status.transfer_complete || m_registers->interrupt_status.adma_error; })) {
dbgln("SDHC: ADMA2 transfer timed out");
return EIO;
}
// (12) If Transfer Complete is set to 1, go to Step (13)
if (m_registers->interrupt_status.transfer_complete) {
// (13) Write 1 to the Transfer Complete Status in the Normal Interrupt Status register to clear this bit.
m_registers->interrupt_status.transfer_complete = 1;
}
// else if ADMA Error Interrupt is set to 1, go to Step (14).
else if (m_registers->interrupt_status.adma_error) {
// (14) Write 1 to the ADMA Error Interrupt Status in the Error Interrupt Status register to clear this bit.
m_registers->interrupt_status.adma_error = 1;
// (15) Abort ADMA operation. SD card operation should be stopped by issuing abort command. If
// necessary, the Host Driver checks ADMA Error Status register to detect why ADMA error is
// generated
dmesgln("SDHC transfer failed, ADMA Error Status: {:2b}", AK::to_underlying(m_registers->adma_error_status.state));
// The scope guard will handle the Abort
return EIO;
} else {
VERIFY_NOT_REACHED();
}
// Copy the read data to the correct memory location
// FIXME: As described above, we may be able to target the destination buffer directly
if (direction == SD::DataTransferDirection::CardToHost)
TRY(out.write(bit_cast<void const*>(adma_dma_region_virtual), host_offset, blocks_transferred * block_len));
blocks_transferred_total += blocks_transferred;
host_offset = card_offset;
block_address += card_offset;
card_offset = 0;
}
abort_guard.disarm();
return {};
}
ErrorOr<void> SDHostController::read_block(Badge<SDMemoryCard>, u32 block_address, u32 block_count, UserOrKernelBuffer out)
{
VERIFY(is_card_inserted());
using enum OperatingMode;
switch (m_mode) {
case OperatingMode::ADMA2_32:
case OperatingMode::ADMA2_64:
return transfer_blocks_adma2(block_address, block_count, out, SD::DataTransferDirection::CardToHost);
case PIO: {
if (block_count > 1) {
return transaction_control_with_data_transfer_using_the_dat_line_without_dma(
SD::Commands::read_multiple_block,
block_address,
block_count,
block_len,
out,
DataTransferType::Read);
}
return transaction_control_with_data_transfer_using_the_dat_line_without_dma(
SD::Commands::read_single_block,
block_address,
block_count,
block_len,
out,
DataTransferType::Read);
}
default:
VERIFY_NOT_REACHED();
}
}
ErrorOr<void> SDHostController::write_block(Badge<SDMemoryCard>, u32 block_address, u32 block_count, UserOrKernelBuffer in)
{
VERIFY(is_card_inserted());
using enum OperatingMode;
switch (m_mode) {
case OperatingMode::ADMA2_32:
case OperatingMode::ADMA2_64:
return transfer_blocks_adma2(block_address, block_count, in, SD::DataTransferDirection::HostToCard);
case PIO: {
if (block_count > 1) {
return transaction_control_with_data_transfer_using_the_dat_line_without_dma(
SD::Commands::write_multiple_block,
block_address,
block_count,
block_len,
in,
DataTransferType::Write);
}
return transaction_control_with_data_transfer_using_the_dat_line_without_dma(
SD::Commands::write_single_block,
block_address,
block_count,
block_len,
in,
DataTransferType::Write);
}
default:
VERIFY_NOT_REACHED();
};
}
ErrorOr<SD::SDConfigurationRegister> SDHostController::retrieve_sd_configuration_register(u32 relative_card_address)
{
SD::SDConfigurationRegister scr;
TRY(issue_command(SD::Commands::app_cmd, relative_card_address));
TRY(wait_for_response());
TRY(transaction_control_with_data_transfer_using_the_dat_line_without_dma(
SD::Commands::app_send_scr,
0, 1, 8,
UserOrKernelBuffer::for_kernel_buffer(scr.raw), DataTransferType::Read));
return scr;
}
ErrorOr<u32> SDHostController::retrieve_sd_clock_frequency()
{
if (m_registers->capabilities.base_clock_frequency == 0) {
// Spec says:
// If these bits are all 0, the Host System has to get information via another method
TODO();
}
const i64 one_mhz = 1'000'000;
return { m_registers->capabilities.base_clock_frequency * one_mhz };
}
// PLSS Table 4-43 : Card Status Field/Command
bool SDHostController::card_status_contains_errors(SD::Command const& command, u32 resp)
{
SD::CardStatus status;
// PLSS 4.9.5 R6
if (command.index == SD::CommandIndex::SendRelativeAddr) {
status.raw = (resp & 0x1fff) | ((resp & 0x2000) << 6) | ((resp & 0x4000) << 8) | ((resp & 0x8000) << 8);
} else {
status.raw = resp;
}
bool common_errors = status.error || status.cc_error || status.card_ecc_failed || status.illegal_command || status.com_crc_error || status.lock_unlock_failed || status.card_is_locked || status.wp_violation || status.erase_param || status.csd_overwrite;
bool contains_errors = false;
switch (command.index) {
case SD::CommandIndex::SendRelativeAddr:
if (status.error || status.illegal_command || status.com_crc_error) {
contains_errors = true;
}
break;
case SD::CommandIndex::SelectCard:
if (common_errors) {
contains_errors = true;
}
break;
case SD::CommandIndex::SetBlockLen:
if (common_errors || status.block_len_error) {
contains_errors = true;
}
break;
case SD::CommandIndex::ReadSingleBlock:
case SD::CommandIndex::ReadMultipleBlock:
if (common_errors || status.address_error || status.out_of_range) {
contains_errors = true;
}
break;
case SD::CommandIndex::WriteSingleBlock:
case SD::CommandIndex::WriteMultipleBlock:
if (common_errors || status.block_len_error || status.address_error || status.out_of_range) {
contains_errors = true;
}
break;
case SD::CommandIndex::AppSendScr:
if (common_errors) {
contains_errors = true;
}
break;
case SD::CommandIndex::AppCmd:
if (common_errors) {
contains_errors = true;
}
break;
default:
break;
}
return contains_errors;
}
}

View file

@ -0,0 +1,113 @@
/*
* Copyright (c) 2023, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Badge.h>
#include <AK/Function.h>
#include <AK/Result.h>
#include <AK/Types.h>
#include <Kernel/Devices/Storage/SD/Commands.h>
#include <Kernel/Devices/Storage/SD/Registers.h>
#include <Kernel/Devices/Storage/SD/SDMemoryCard.h>
#include <Kernel/Locking/Mutex.h>
namespace Kernel {
class SDHostController : public StorageController {
public:
SDHostController();
ErrorOr<void> initialize();
virtual ~SDHostController() = default;
virtual LockRefPtr<StorageDevice> device(u32 index) const override { return index == 0 ? m_card : nullptr; }
virtual ErrorOr<void> reset() override;
virtual ErrorOr<void> shutdown() override;
virtual size_t devices_count() const override { return m_card ? 1 : 0; }
virtual void complete_current_request(AsyncDeviceRequest::RequestResult) override;
ErrorOr<void> read_block(Badge<SDMemoryCard>, u32 block_address, u32 block_count, UserOrKernelBuffer out);
ErrorOr<void> write_block(Badge<SDMemoryCard>, u32 block_address, u32 block_count, UserOrKernelBuffer in);
void try_enable_dma();
protected:
virtual SD::HostControlRegisterMap volatile* get_register_map_base_address() = 0;
private:
ErrorOr<NonnullLockRefPtr<SDMemoryCard>> try_initialize_inserted_card();
bool is_card_inserted() const
{
constexpr u32 card_inserted = 1 << 16;
return m_registers->present_state & card_inserted;
}
SD::HostVersion host_version() { return m_registers->slot_interrupt_status_and_version.specification_version_number; }
ErrorOr<void> reset_host_controller();
SD::Command last_sent_command()
{
SD::Command command {};
command.raw = m_registers->transfer_mode_and_command;
return command;
}
bool currently_active_command_uses_transfer_complete_interrupt();
ErrorOr<u32> calculate_sd_clock_divisor(u32 sd_clock_frequency, u32 frequency);
bool is_sd_clock_enabled();
ErrorOr<void> sd_clock_supply(u32 frequency);
void sd_clock_stop();
ErrorOr<void> sd_clock_frequency_change(u32 frequency);
ErrorOr<u32> retrieve_sd_clock_frequency();
struct Response {
u32 response[4];
};
ErrorOr<void> issue_command(SD::Command const&, u32 argument);
ErrorOr<Response> wait_for_response();
bool card_status_contains_errors(SD::Command const&, u32);
bool retry_with_timeout(Function<bool()>, i64 delay_between_tries = 100);
enum class DataTransferType {
Read,
Write
};
enum class OperatingMode {
PIO,
ADMA2_32,
ADMA2_64
};
ErrorOr<void> transaction_control_with_data_transfer_using_the_dat_line_without_dma(SD::Command const&, u32 argument, u32 block_count, u32 block_size, UserOrKernelBuffer, DataTransferType data_transfer_type);
ErrorOr<void> transfer_blocks_adma2(u32 block_address, u32 block_count, UserOrKernelBuffer, SD::DataTransferDirection);
ErrorOr<SD::SDConfigurationRegister> retrieve_sd_configuration_register(u32 relative_card_address);
u32 make_adma_descriptor_table(u32 block_count);
volatile SD::HostControlRegisterMap* m_registers;
LockRefPtr<SDMemoryCard> m_card { nullptr };
u32 m_hardware_relative_controller_id { 0 };
OperatingMode m_mode { OperatingMode::PIO };
Mutex m_lock { "SDHostController"sv };
// For ADMA2
// One page of descriptor tables with 16 bit lengths can address writes of
// Up to 4 MiB ADMA2_32
// Up to 2 MiB ADMA2_64
// To not over allocate we use a buffer of just 16 pages
// FIXME: Investigate the average usage and adjust this
constexpr static size_t dma_rw_buffer_size = 16 * PAGE_SIZE;
constexpr static size_t dma_region_size = PAGE_SIZE + dma_rw_buffer_size;
OwnPtr<Memory::Region> m_dma_region;
};
}

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2023, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Devices/Storage/SD/Commands.h>
#include <Kernel/Devices/Storage/SD/SDHostController.h>
#include <Kernel/Devices/Storage/SD/SDMemoryCard.h>
namespace Kernel {
SDMemoryCard::SDMemoryCard(SDHostController& sdhc, StorageDevice::LUNAddress lun_address, u32 hardware_relative_controller_id, u32 block_len, u64 capacity_in_blocks, u32 relative_card_address, SD::OperatingConditionRegister ocr, SD::CardIdentificationRegister cid, SD::SDConfigurationRegister scr)
: StorageDevice(lun_address, hardware_relative_controller_id, block_len,
capacity_in_blocks)
, m_sdhc(sdhc)
, m_relative_card_address(relative_card_address)
, m_ocr(ocr)
, m_cid(cid)
, m_scr(scr)
{
}
void SDMemoryCard::start_request(AsyncBlockDeviceRequest& request)
{
// FIXME: Make this asynchronous
MutexLocker locker(m_lock);
VERIFY(request.block_size() == block_size());
auto buffer = request.buffer();
u32 block_address = request.block_index();
if (card_addressing_mode() == CardAddressingMode::ByteAddressing) {
block_address *= block_size();
}
if (request.request_type() == AsyncBlockDeviceRequest::RequestType::Write) {
if (m_sdhc.write_block({}, block_address, request.block_count(), buffer).is_error()) {
request.complete(AsyncDeviceRequest::Failure);
return;
}
} else {
if (m_sdhc.read_block({}, block_address, request.block_count(), buffer).is_error()) {
request.complete(AsyncDeviceRequest::Failure);
return;
}
}
request.complete(AsyncDeviceRequest::Success);
}
}

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2023, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Function.h>
#include <AK/Result.h>
#include <AK/Types.h>
#include <Kernel/Devices/Storage/SD/Registers.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/Locking/Mutex.h>
namespace Kernel {
class SDHostController;
class SDMemoryCard : public StorageDevice {
public:
SDMemoryCard(SDHostController& sdhc, StorageDevice::LUNAddress, u32 hardware_relative_controller_id, u32 block_len, u64 capacity_in_blocks, u32 relative_card_address, SD::OperatingConditionRegister ocr, SD::CardIdentificationRegister cid, SD::SDConfigurationRegister scr);
// ^StorageDevice
virtual CommandSet command_set() const override { return CommandSet::SD; }
// ^BlockDevice
virtual void start_request(AsyncBlockDeviceRequest&) override;
private:
enum class CardAddressingMode {
ByteAddressing,
BlockAddressing
};
CardAddressingMode card_addressing_mode() const
{
return m_ocr.card_capacity_status ? CardAddressingMode::BlockAddressing : CardAddressingMode::ByteAddressing;
}
Mutex m_lock { "SDMemoryCard"sv };
SDHostController& m_sdhc;
u32 m_relative_card_address;
SD::OperatingConditionRegister m_ocr;
SD::CardIdentificationRegister m_cid;
SD::SDConfigurationRegister m_scr;
};
}

View file

@ -0,0 +1,18 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Devices/Storage/StorageController.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
namespace Kernel {
StorageController::StorageController(u32 hardware_relative_controller_id)
: m_controller_id(StorageManagement::generate_controller_id())
, m_hardware_relative_controller_id(hardware_relative_controller_id)
{
}
}

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/OwnPtr.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Bus/PCI/Device.h>
#include <Kernel/Devices/Device.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/PhysicalAddress.h>
#include <Kernel/Random.h>
#include <Kernel/WaitQueue.h>
namespace Kernel {
class AsyncBlockDeviceRequest;
class StorageDevice;
class StorageController : public AtomicRefCounted<StorageController> {
public:
virtual ~StorageController() = default;
virtual LockRefPtr<StorageDevice> device(u32 index) const = 0;
virtual size_t devices_count() const = 0;
u32 controller_id() const { return m_controller_id; }
u32 hardware_relative_controller_id() const { return m_hardware_relative_controller_id; }
protected:
virtual ErrorOr<void> reset() = 0;
virtual ErrorOr<void> shutdown() = 0;
virtual void complete_current_request(AsyncDeviceRequest::RequestResult) = 0;
explicit StorageController(u32 hardware_relative_controller_id);
private:
u32 const m_controller_id { 0 };
u32 const m_hardware_relative_controller_id { 0 };
};
}

View file

@ -0,0 +1,265 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/StringView.h>
#include <Kernel/API/Ioctl.h>
#include <Kernel/Debug.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
#include <Kernel/FileSystem/OpenFileDescription.h>
#include <Kernel/FileSystem/SysFS/Subsystems/DeviceIdentifiers/BlockDevicesDirectory.h>
#include <Kernel/FileSystem/SysFS/Subsystems/DeviceIdentifiers/SymbolicLinkDeviceComponent.h>
#include <Kernel/FileSystem/SysFS/Subsystems/Devices/Storage/DeviceDirectory.h>
#include <Kernel/FileSystem/SysFS/Subsystems/Devices/Storage/Directory.h>
namespace Kernel {
StorageDevice::StorageDevice(LUNAddress logical_unit_number_address, u32 hardware_relative_controller_id, size_t sector_size, u64 max_addressable_block)
: BlockDevice(StorageManagement::storage_type_major_number(), StorageManagement::generate_storage_minor_number(), sector_size)
, m_logical_unit_number_address(logical_unit_number_address)
, m_hardware_relative_controller_id(hardware_relative_controller_id)
, m_max_addressable_block(max_addressable_block)
, m_blocks_per_page(PAGE_SIZE / block_size())
{
}
StorageDevice::StorageDevice(Badge<RamdiskDevice>, LUNAddress logical_unit_number_address, u32 hardware_relative_controller_id, MajorNumber major, MinorNumber minor, size_t sector_size, u64 max_addressable_block)
: BlockDevice(major, minor, sector_size)
, m_logical_unit_number_address(logical_unit_number_address)
, m_hardware_relative_controller_id(hardware_relative_controller_id)
, m_max_addressable_block(max_addressable_block)
, m_blocks_per_page(PAGE_SIZE / block_size())
{
}
ErrorOr<void> StorageDevice::after_inserting()
{
auto sysfs_storage_device_directory = StorageDeviceSysFSDirectory::create(SysFSStorageDirectory::the(), *this);
m_sysfs_device_directory = sysfs_storage_device_directory;
SysFSStorageDirectory::the().plug({}, *sysfs_storage_device_directory);
VERIFY(!m_symlink_sysfs_component);
auto sys_fs_component = TRY(SysFSSymbolicLinkDeviceComponent::try_create(SysFSBlockDevicesDirectory::the(), *this, *m_sysfs_device_directory));
m_symlink_sysfs_component = sys_fs_component;
after_inserting_add_symlink_to_device_identifier_directory();
after_inserting_add_to_device_management();
return {};
}
void StorageDevice::will_be_destroyed()
{
// NOTE: We check if m_symlink_sysfs_component is not null, because if we failed
// in StorageDevice::after_inserting(), then that method will not set m_symlink_sysfs_component.
if (m_symlink_sysfs_component) {
before_will_be_destroyed_remove_symlink_from_device_identifier_directory();
m_symlink_sysfs_component.clear();
}
SysFSStorageDirectory::the().unplug({}, *m_sysfs_device_directory);
before_will_be_destroyed_remove_from_device_management();
}
StringView StorageDevice::class_name() const
{
return "StorageDevice"sv;
}
StringView StorageDevice::command_set_to_string_view() const
{
switch (command_set()) {
case CommandSet::SCSI:
return "scsi"sv;
case CommandSet::ATA:
return "ata"sv;
case CommandSet::NVMe:
return "nvme"sv;
case CommandSet::SD:
return "sd"sv;
default:
break;
}
VERIFY_NOT_REACHED();
}
ErrorOr<size_t> StorageDevice::read(OpenFileDescription&, u64 offset, UserOrKernelBuffer& outbuf, size_t len)
{
u64 index = offset >> block_size_log();
off_t offset_within_block = 0;
size_t whole_blocks = len >> block_size_log();
size_t remaining = len - (whole_blocks << block_size_log());
// PATAChannel will chuck a wobbly if we try to read more than PAGE_SIZE
// at a time, because it uses a single page for its DMA buffer.
if (whole_blocks >= m_blocks_per_page) {
whole_blocks = m_blocks_per_page;
remaining = 0;
}
if (len < block_size())
offset_within_block = offset - (index << block_size_log());
dbgln_if(STORAGE_DEVICE_DEBUG, "StorageDevice::read() index={}, whole_blocks={}, remaining={}", index, whole_blocks, remaining);
if (whole_blocks > 0) {
auto read_request = TRY(try_make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, whole_blocks, outbuf, whole_blocks * block_size()));
auto result = read_request->wait();
if (result.wait_result().was_interrupted())
return EINTR;
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
case AsyncDeviceRequest::Cancelled:
return EIO;
case AsyncDeviceRequest::MemoryFault:
return EFAULT;
default:
break;
}
}
off_t pos = whole_blocks * block_size();
if (remaining > 0) {
auto data = TRY(ByteBuffer::create_uninitialized(block_size()));
auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data());
auto read_request = TRY(try_make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size()));
auto result = read_request->wait();
if (result.wait_result().was_interrupted())
return EINTR;
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
return pos;
case AsyncDeviceRequest::Cancelled:
return EIO;
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
VERIFY_NOT_REACHED();
default:
break;
}
TRY(outbuf.write(data.offset_pointer(offset_within_block), pos, remaining));
}
return pos + remaining;
}
bool StorageDevice::can_read(OpenFileDescription const&, u64 offset) const
{
return offset < (max_addressable_block() * block_size());
}
ErrorOr<size_t> StorageDevice::write(OpenFileDescription&, u64 offset, UserOrKernelBuffer const& inbuf, size_t len)
{
u64 index = offset >> block_size_log();
off_t offset_within_block = 0;
size_t whole_blocks = len >> block_size_log();
size_t remaining = len - (whole_blocks << block_size_log());
// PATAChannel will chuck a wobbly if we try to write more than PAGE_SIZE
// at a time, because it uses a single page for its DMA buffer.
if (whole_blocks >= m_blocks_per_page) {
whole_blocks = m_blocks_per_page;
remaining = 0;
}
if (len < block_size())
offset_within_block = offset - (index << block_size_log());
// We try to allocate the temporary block buffer for partial writes *before* we start any full block writes,
// to try and prevent partial writes
Optional<ByteBuffer> partial_write_block;
if (remaining > 0)
partial_write_block = TRY(ByteBuffer::create_zeroed(block_size()));
dbgln_if(STORAGE_DEVICE_DEBUG, "StorageDevice::write() index={}, whole_blocks={}, remaining={}", index, whole_blocks, remaining);
if (whole_blocks > 0) {
auto write_request = TRY(try_make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, whole_blocks, inbuf, whole_blocks * block_size()));
auto result = write_request->wait();
if (result.wait_result().was_interrupted())
return EINTR;
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
case AsyncDeviceRequest::Cancelled:
return EIO;
case AsyncDeviceRequest::MemoryFault:
return EFAULT;
default:
break;
}
}
off_t pos = whole_blocks * block_size();
// since we can only write in block_size() increments, if we want to do a
// partial write, we have to read the block's content first, modify it,
// then write the whole block back to the disk.
if (remaining > 0) {
auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(partial_write_block->data());
{
auto read_request = TRY(try_make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size()));
auto result = read_request->wait();
if (result.wait_result().was_interrupted())
return EINTR;
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
return pos;
case AsyncDeviceRequest::Cancelled:
return EIO;
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
VERIFY_NOT_REACHED();
default:
break;
}
}
TRY(inbuf.read(partial_write_block->offset_pointer(offset_within_block), pos, remaining));
{
auto write_request = TRY(try_make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index + whole_blocks, 1, data_buffer, block_size()));
auto result = write_request->wait();
if (result.wait_result().was_interrupted())
return EINTR;
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
return pos;
case AsyncDeviceRequest::Cancelled:
return EIO;
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
VERIFY_NOT_REACHED();
default:
break;
}
}
}
return pos + remaining;
}
bool StorageDevice::can_write(OpenFileDescription const&, u64 offset) const
{
return offset < (max_addressable_block() * block_size());
}
ErrorOr<void> StorageDevice::ioctl(OpenFileDescription&, unsigned request, Userspace<void*> arg)
{
switch (request) {
case STORAGE_DEVICE_GET_SIZE: {
u64 disk_size = m_max_addressable_block * block_size();
return copy_to_user(static_ptr_cast<u64*>(arg), &disk_size);
break;
}
case STORAGE_DEVICE_GET_BLOCK_SIZE: {
size_t size = block_size();
return copy_to_user(static_ptr_cast<size_t*>(arg), &size);
break;
}
default:
return EINVAL;
}
}
}

View file

@ -0,0 +1,111 @@
/*
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/IntrusiveList.h>
#include <Kernel/Devices/BlockDevice.h>
#include <Kernel/Devices/Storage/DiskPartition.h>
#include <Kernel/Devices/Storage/StorageController.h>
#include <Kernel/Interrupts/IRQHandler.h>
#include <Kernel/Locking/Mutex.h>
namespace Kernel {
class RamdiskDevice;
class StorageDevice : public BlockDevice {
friend class StorageManagement;
friend class DeviceManagement;
public:
// Note: this attribute describes the internal command set of a Storage device.
// For example, an ordinary harddrive utilizes the ATA command set, while
// an ATAPI device (e.g. Optical drive) that is connected to the ATA bus,
// is actually using SCSI commands (packets) encapsulated inside an ATA command.
// The IDE controller code being aware of the possibility of ATAPI devices attached
// to the ATA bus, will check whether the Command set is ATA or SCSI and will act
// accordingly.
// Note: For now, there's simply no distinction between the interface type and the commandset.
// As mentioned above, ATAPI devices use the ATA interface with actual SCSI packets so
// the commandset is SCSI while the interface type is ATA. We simply don't support SCSI over ATA (ATAPI)
// and ATAPI is the exception to no-distinction rule. If we ever put SCSI support in the kernel,
// we can create another enum class to put the distinction.
enum class CommandSet {
SCSI,
ATA,
NVMe,
SD,
};
// Note: The most reliable way to address this device from userspace interfaces,
// such as SysFS, is to have one way to enumerate everything in the eyes of userspace.
// Therefore, SCSI LUN (logical unit number) addressing seem to be the most generic way to do this.
// For example, on a legacy ATA instance, one might connect an harddrive to the second IDE controller,
// to the Primary channel as a slave device, which translates to LUN 1:0:1.
// On NVMe, for example, connecting a second PCIe NVMe storage device as a sole NVMe namespace translates
// to LUN 1:1:0.
struct LUNAddress {
u32 controller_id;
u32 target_id;
u32 disk_id;
};
public:
virtual u64 max_addressable_block() const { return m_max_addressable_block; }
// ^BlockDevice
virtual ErrorOr<size_t> read(OpenFileDescription&, u64, UserOrKernelBuffer&, size_t) override;
virtual bool can_read(OpenFileDescription const&, u64) const override;
virtual ErrorOr<size_t> write(OpenFileDescription&, u64, UserOrKernelBuffer const&, size_t) override;
virtual bool can_write(OpenFileDescription const&, u64) const override;
virtual void prepare_for_unplug() { m_partitions.clear(); }
Vector<NonnullLockRefPtr<DiskPartition>> const& partitions() const { return m_partitions; }
void add_partition(NonnullLockRefPtr<DiskPartition> disk_partition) { MUST(m_partitions.try_append(disk_partition)); }
LUNAddress const& logical_unit_number_address() const { return m_logical_unit_number_address; }
u32 parent_controller_hardware_relative_id() const { return m_hardware_relative_controller_id; }
virtual CommandSet command_set() const = 0;
StringView command_set_to_string_view() const;
// ^File
virtual ErrorOr<void> ioctl(OpenFileDescription&, unsigned request, Userspace<void*> arg) final;
protected:
StorageDevice(LUNAddress, u32 hardware_relative_controller_id, size_t sector_size, u64);
// Note: We want to be able to put distinction between Storage devices and Ramdisk-based devices.
// We do this because it will make selecting ramdisk devices much more easier in boot time in the kernel commandline.
StorageDevice(Badge<RamdiskDevice>, LUNAddress, u32 hardware_relative_controller_id, MajorNumber, MinorNumber, size_t sector_size, u64);
// ^DiskDevice
virtual StringView class_name() const override;
private:
virtual ErrorOr<void> after_inserting() override;
virtual void will_be_destroyed() override;
mutable IntrusiveListNode<StorageDevice, LockRefPtr<StorageDevice>> m_list_node;
Vector<NonnullLockRefPtr<DiskPartition>> m_partitions;
LUNAddress const m_logical_unit_number_address;
// Note: This data member should be used with LUNAddress target_id and disk_id.
// LUNs are agnostic system-wide addresses, so they are assigned without caring about the specific hardware interfaces.
// This class member on the other side, is meant to be assigned *per hardware type*,
// which means in contrast to the LUNAddress controller_id struct member, we take the index of the hardware
// controller among its fellow controllers of the same hardware type in the system.
u32 const m_hardware_relative_controller_id { 0 };
u64 m_max_addressable_block { 0 };
size_t m_blocks_per_page { 0 };
};
}

View file

@ -0,0 +1,507 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
* Copyright (c) 2022, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Platform.h>
#include <AK/Singleton.h>
#include <AK/StringView.h>
#include <AK/UUID.h>
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/ISABus/IDEController.h>
# include <Kernel/Arch/x86_64/PCI/IDELegacyModeController.h>
#endif
#if ARCH(AARCH64)
# include <Kernel/Arch/aarch64/RPi/SDHostController.h>
#endif
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/Access.h>
#include <Kernel/Bus/PCI/Controller/VolumeManagementDevice.h>
#include <Kernel/CommandLine.h>
#include <Kernel/Devices/BlockDevice.h>
#include <Kernel/Devices/DeviceManagement.h>
#include <Kernel/Devices/Storage/ATA/AHCI/Controller.h>
#include <Kernel/Devices/Storage/ATA/GenericIDE/Controller.h>
#include <Kernel/Devices/Storage/NVMe/NVMeController.h>
#include <Kernel/Devices/Storage/SD/PCISDHostController.h>
#include <Kernel/Devices/Storage/SD/SDHostController.h>
#include <Kernel/Devices/Storage/StorageManagement.h>
#include <Kernel/FileSystem/Ext2FS/FileSystem.h>
#include <Kernel/FileSystem/VirtualFileSystem.h>
#include <Kernel/Panic.h>
#include <LibPartition/EBRPartitionTable.h>
#include <LibPartition/GUIDPartitionTable.h>
#include <LibPartition/MBRPartitionTable.h>
namespace Kernel {
static Singleton<StorageManagement> s_the;
static Atomic<u32> s_storage_device_minor_number;
static Atomic<u32> s_partition_device_minor_number;
static Atomic<u32> s_controller_id;
static Atomic<u32> s_relative_ata_controller_id;
static Atomic<u32> s_relative_nvme_controller_id;
static Atomic<u32> s_relative_sd_controller_id;
static constexpr StringView partition_uuid_prefix = "PARTUUID:"sv;
static constexpr StringView partition_number_prefix = "part"sv;
static constexpr StringView block_device_prefix = "block"sv;
static constexpr StringView ata_device_prefix = "ata"sv;
static constexpr StringView nvme_device_prefix = "nvme"sv;
static constexpr StringView logical_unit_number_device_prefix = "lun"sv;
static constexpr StringView sd_device_prefix = "sd"sv;
UNMAP_AFTER_INIT StorageManagement::StorageManagement()
{
}
u32 StorageManagement::generate_relative_nvme_controller_id(Badge<NVMeController>)
{
auto controller_id = s_relative_nvme_controller_id.load();
s_relative_nvme_controller_id++;
return controller_id;
}
u32 StorageManagement::generate_relative_ata_controller_id(Badge<ATAController>)
{
auto controller_id = s_relative_ata_controller_id.load();
s_relative_ata_controller_id++;
return controller_id;
}
u32 StorageManagement::generate_relative_sd_controller_id(Badge<SDHostController>)
{
auto controller_id = s_relative_sd_controller_id.load();
s_relative_sd_controller_id++;
return controller_id;
}
void StorageManagement::remove_device(StorageDevice& device)
{
m_storage_devices.remove(device);
}
UNMAP_AFTER_INIT void StorageManagement::enumerate_pci_controllers(bool force_pio, bool nvme_poll)
{
VERIFY(m_controllers.is_empty());
if (!kernel_command_line().disable_physical_storage()) {
// NOTE: Search for VMD devices before actually searching for storage controllers
// because the VMD device is only a bridge to such (NVMe) controllers.
MUST(PCI::enumerate([&](PCI::DeviceIdentifier const& device_identifier) -> void {
constexpr PCI::HardwareID vmd_device = { 0x8086, 0x9a0b };
if (device_identifier.hardware_id() == vmd_device) {
auto controller = PCI::VolumeManagementDevice::must_create(device_identifier);
MUST(PCI::Access::the().add_host_controller_and_scan_for_devices(move(controller)));
}
}));
auto const& handle_mass_storage_device = [&](PCI::DeviceIdentifier const& device_identifier) {
using SubclassID = PCI::MassStorage::SubclassID;
auto subclass_code = static_cast<SubclassID>(device_identifier.subclass_code().value());
#if ARCH(X86_64)
if (subclass_code == SubclassID::IDEController && kernel_command_line().is_ide_enabled()) {
if (auto ide_controller_or_error = PCIIDELegacyModeController::initialize(device_identifier, force_pio); !ide_controller_or_error.is_error())
m_controllers.append(ide_controller_or_error.release_value());
else
dmesgln("Unable to initialize IDE controller: {}", ide_controller_or_error.error());
}
#elif ARCH(AARCH64)
(void)force_pio;
TODO_AARCH64();
#else
# error Unknown architecture
#endif
if (subclass_code == SubclassID::SATAController
&& device_identifier.prog_if().value() == to_underlying(PCI::MassStorage::SATAProgIF::AHCI)) {
if (auto ahci_controller_or_error = AHCIController::initialize(device_identifier); !ahci_controller_or_error.is_error())
m_controllers.append(ahci_controller_or_error.value());
else
dmesgln("Unable to initialize AHCI controller: {}", ahci_controller_or_error.error());
}
if (subclass_code == SubclassID::NVMeController) {
auto controller = NVMeController::try_initialize(device_identifier, nvme_poll);
if (controller.is_error()) {
dmesgln("Unable to initialize NVMe controller: {}", controller.error());
} else {
m_controllers.append(controller.release_value());
}
}
};
auto const& handle_base_device = [&](PCI::DeviceIdentifier const& device_identifier) {
using SubclassID = PCI::Base::SubclassID;
auto subclass_code = static_cast<SubclassID>(device_identifier.subclass_code().value());
if (subclass_code == SubclassID::SDHostController) {
auto sdhc_or_error = PCISDHostController::try_initialize(device_identifier);
if (sdhc_or_error.is_error()) {
dmesgln("PCI: Failed to initialize SD Host Controller ({} - {}): {}", device_identifier.address(), device_identifier.hardware_id(), sdhc_or_error.error());
} else {
m_controllers.append(sdhc_or_error.release_value());
}
}
};
MUST(PCI::enumerate([&](PCI::DeviceIdentifier const& device_identifier) -> void {
auto class_code = device_identifier.class_code().value();
if (class_code == to_underlying(PCI::ClassID::MassStorage)) {
handle_mass_storage_device(device_identifier);
} else if (class_code == to_underlying(PCI::ClassID::Base)) {
handle_base_device(device_identifier);
}
}));
}
}
UNMAP_AFTER_INIT void StorageManagement::enumerate_storage_devices()
{
VERIFY(!m_controllers.is_empty());
for (auto& controller : m_controllers) {
for (size_t device_index = 0; device_index < controller->devices_count(); device_index++) {
auto device = controller->device(device_index);
if (device.is_null())
continue;
m_storage_devices.append(device.release_nonnull());
}
}
}
UNMAP_AFTER_INIT void StorageManagement::dump_storage_devices_and_partitions() const
{
dbgln("StorageManagement: Detected {} storage devices", m_storage_devices.size_slow());
for (auto const& storage_device : m_storage_devices) {
auto const& partitions = storage_device.partitions();
if (partitions.is_empty()) {
dbgln(" Device: block{}:{} (no partitions)", storage_device.major(), storage_device.minor());
} else {
dbgln(" Device: block{}:{} ({} partitions)", storage_device.major(), storage_device.minor(), partitions.size());
unsigned partition_number = 1;
for (auto const& partition : partitions) {
dbgln(" Partition: {}, block{}:{} (UUID {})", partition_number, partition->major(), partition->minor(), partition->metadata().unique_guid().to_string());
partition_number++;
}
}
}
}
UNMAP_AFTER_INIT ErrorOr<NonnullOwnPtr<Partition::PartitionTable>> StorageManagement::try_to_initialize_partition_table(StorageDevice& device) const
{
auto mbr_table_or_error = Partition::MBRPartitionTable::try_to_initialize(device);
if (!mbr_table_or_error.is_error())
return mbr_table_or_error.release_value();
auto ebr_table_or_error = Partition::EBRPartitionTable::try_to_initialize(device);
if (!ebr_table_or_error.is_error()) {
return ebr_table_or_error.release_value();
}
return TRY(Partition::GUIDPartitionTable::try_to_initialize(device));
}
UNMAP_AFTER_INIT void StorageManagement::enumerate_disk_partitions()
{
VERIFY(!m_storage_devices.is_empty());
for (auto& device : m_storage_devices) {
auto partition_table_or_error = try_to_initialize_partition_table(device);
if (partition_table_or_error.is_error())
continue;
auto partition_table = partition_table_or_error.release_value();
for (size_t partition_index = 0; partition_index < partition_table->partitions_count(); partition_index++) {
auto partition_metadata = partition_table->partition(partition_index);
if (!partition_metadata.has_value())
continue;
auto disk_partition = DiskPartition::create(device, generate_partition_minor_number(), partition_metadata.value());
device.add_partition(disk_partition);
}
}
}
UNMAP_AFTER_INIT Optional<unsigned> StorageManagement::extract_boot_device_partition_number_parameter(StringView device_prefix)
{
VERIFY(m_boot_argument.starts_with(device_prefix));
VERIFY(!m_boot_argument.starts_with(partition_uuid_prefix));
auto storage_device_relative_address_view = m_boot_argument.substring_view(device_prefix.length());
auto parameter_view = storage_device_relative_address_view.find_last_split_view(';');
if (parameter_view == storage_device_relative_address_view)
return {};
if (!parameter_view.starts_with(partition_number_prefix)) {
PANIC("StorageManagement: Invalid root boot parameter.");
}
auto parameter_number = parameter_view.substring_view(partition_number_prefix.length()).to_uint<unsigned>();
if (!parameter_number.has_value()) {
PANIC("StorageManagement: Invalid root boot parameter.");
}
return parameter_number.value();
}
UNMAP_AFTER_INIT Array<unsigned, 3> StorageManagement::extract_boot_device_address_parameters(StringView device_prefix)
{
VERIFY(!m_boot_argument.starts_with(partition_uuid_prefix));
Array<unsigned, 3> address_parameters;
auto parameters_view = m_boot_argument.substring_view(device_prefix.length()).find_first_split_view(';');
size_t parts_count = 0;
bool parse_failure = false;
parameters_view.for_each_split_view(':', SplitBehavior::Nothing, [&](StringView parameter_view) {
if (parse_failure)
return;
if (parts_count > 2)
return;
auto parameter_number = parameter_view.to_uint<unsigned>();
if (!parameter_number.has_value()) {
parse_failure = true;
return;
}
address_parameters[parts_count] = parameter_number.value();
parts_count++;
});
if (parts_count > 3) {
dbgln("StorageManagement: Detected {} parts in boot device parameter.", parts_count);
PANIC("StorageManagement: Invalid root boot parameter.");
}
if (parse_failure) {
PANIC("StorageManagement: Invalid root boot parameter.");
}
return address_parameters;
}
UNMAP_AFTER_INIT void StorageManagement::resolve_partition_from_boot_device_parameter(StorageDevice const& chosen_storage_device, StringView boot_device_prefix)
{
auto possible_partition_number = extract_boot_device_partition_number_parameter(boot_device_prefix);
if (!possible_partition_number.has_value())
return;
auto partition_number = possible_partition_number.value();
if (chosen_storage_device.partitions().size() <= partition_number)
PANIC("StorageManagement: Invalid partition number parameter.");
m_boot_block_device = chosen_storage_device.partitions()[partition_number];
}
UNMAP_AFTER_INIT void StorageManagement::determine_hardware_relative_boot_device(StringView relative_hardware_prefix, Function<bool(StorageDevice const&)> filter_device_callback)
{
VERIFY(m_boot_argument.starts_with(relative_hardware_prefix));
auto address_parameters = extract_boot_device_address_parameters(relative_hardware_prefix);
RefPtr<StorageDevice> chosen_storage_device;
for (auto& storage_device : m_storage_devices) {
if (!filter_device_callback(storage_device))
continue;
auto storage_device_lun = storage_device.logical_unit_number_address();
if (storage_device.parent_controller_hardware_relative_id() == address_parameters[0]
&& storage_device_lun.target_id == address_parameters[1]
&& storage_device_lun.disk_id == address_parameters[2]) {
m_boot_block_device = storage_device;
chosen_storage_device = storage_device;
break;
}
}
if (chosen_storage_device)
resolve_partition_from_boot_device_parameter(*chosen_storage_device, relative_hardware_prefix);
}
UNMAP_AFTER_INIT void StorageManagement::determine_ata_boot_device()
{
determine_hardware_relative_boot_device(ata_device_prefix, [](StorageDevice const& device) -> bool {
return device.command_set() == StorageDevice::CommandSet::ATA;
});
}
UNMAP_AFTER_INIT void StorageManagement::determine_nvme_boot_device()
{
determine_hardware_relative_boot_device(nvme_device_prefix, [](StorageDevice const& device) -> bool {
return device.command_set() == StorageDevice::CommandSet::NVMe;
});
}
UNMAP_AFTER_INIT void StorageManagement::determine_sd_boot_device()
{
determine_hardware_relative_boot_device(sd_device_prefix, [](StorageDevice const& device) -> bool {
return device.command_set() == StorageDevice::CommandSet::SD;
});
}
UNMAP_AFTER_INIT void StorageManagement::determine_block_boot_device()
{
VERIFY(m_boot_argument.starts_with(block_device_prefix));
auto parameters_view = extract_boot_device_address_parameters(block_device_prefix);
// Note: We simply fetch the corresponding BlockDevice with the major and minor parameters.
// We don't try to accept and resolve a partition number as it will make this code much more
// complicated. This rule is also explained in the boot_device_addressing(7) manual page.
LockRefPtr<Device> device = DeviceManagement::the().get_device(parameters_view[0], parameters_view[1]);
if (device && device->is_block_device())
m_boot_block_device = static_ptr_cast<BlockDevice>(device);
}
UNMAP_AFTER_INIT void StorageManagement::determine_boot_device_with_logical_unit_number()
{
VERIFY(m_boot_argument.starts_with(logical_unit_number_device_prefix));
auto address_parameters = extract_boot_device_address_parameters(logical_unit_number_device_prefix);
RefPtr<StorageDevice> chosen_storage_device;
for (auto& storage_device : m_storage_devices) {
auto storage_device_lun = storage_device.logical_unit_number_address();
if (storage_device_lun.controller_id == address_parameters[0]
&& storage_device_lun.target_id == address_parameters[1]
&& storage_device_lun.disk_id == address_parameters[2]) {
m_boot_block_device = storage_device;
chosen_storage_device = storage_device;
break;
}
}
if (chosen_storage_device)
resolve_partition_from_boot_device_parameter(*chosen_storage_device, logical_unit_number_device_prefix);
}
UNMAP_AFTER_INIT void StorageManagement::determine_boot_device()
{
VERIFY(!m_controllers.is_empty());
if (m_boot_argument.starts_with(block_device_prefix)) {
determine_block_boot_device();
return;
}
if (m_boot_argument.starts_with(partition_uuid_prefix)) {
determine_boot_device_with_partition_uuid();
return;
}
if (m_boot_argument.starts_with(logical_unit_number_device_prefix)) {
determine_boot_device_with_logical_unit_number();
return;
}
if (m_boot_argument.starts_with(ata_device_prefix)) {
determine_ata_boot_device();
return;
}
if (m_boot_argument.starts_with(nvme_device_prefix)) {
determine_nvme_boot_device();
return;
}
if (m_boot_argument.starts_with(sd_device_prefix)) {
determine_sd_boot_device();
return;
}
PANIC("StorageManagement: Invalid root boot parameter.");
}
UNMAP_AFTER_INIT void StorageManagement::determine_boot_device_with_partition_uuid()
{
VERIFY(!m_storage_devices.is_empty());
VERIFY(m_boot_argument.starts_with(partition_uuid_prefix));
auto partition_uuid = UUID(m_boot_argument.substring_view(partition_uuid_prefix.length()), UUID::Endianness::Mixed);
for (auto& storage_device : m_storage_devices) {
for (auto& partition : storage_device.partitions()) {
if (partition->metadata().unique_guid().is_zero())
continue;
if (partition->metadata().unique_guid() == partition_uuid) {
m_boot_block_device = partition;
break;
}
}
}
}
LockRefPtr<BlockDevice> StorageManagement::boot_block_device() const
{
return m_boot_block_device.strong_ref();
}
MajorNumber StorageManagement::storage_type_major_number()
{
return 3;
}
MinorNumber StorageManagement::generate_storage_minor_number()
{
return s_storage_device_minor_number.fetch_add(1);
}
MinorNumber StorageManagement::generate_partition_minor_number()
{
return s_partition_device_minor_number.fetch_add(1);
}
u32 StorageManagement::generate_controller_id()
{
return s_controller_id.fetch_add(1);
}
NonnullRefPtr<FileSystem> StorageManagement::root_filesystem() const
{
auto boot_device_description = boot_block_device();
if (!boot_device_description) {
dump_storage_devices_and_partitions();
PANIC("StorageManagement: Couldn't find a suitable device to boot from");
}
auto description_or_error = OpenFileDescription::try_create(boot_device_description.release_nonnull());
VERIFY(!description_or_error.is_error());
auto file_system = Ext2FS::try_create(description_or_error.release_value()).release_value();
if (auto result = file_system->initialize(); result.is_error()) {
dump_storage_devices_and_partitions();
PANIC("StorageManagement: Couldn't open root filesystem: {}", result.error());
}
return file_system;
}
UNMAP_AFTER_INIT void StorageManagement::initialize(StringView root_device, bool force_pio, bool poll)
{
VERIFY(s_storage_device_minor_number == 0);
m_boot_argument = root_device;
if (PCI::Access::is_disabled()) {
#if ARCH(X86_64)
// Note: If PCI is disabled, we assume that at least we have an ISA IDE controller
// to probe and use
auto isa_ide_controller = MUST(ISAIDEController::initialize());
m_controllers.append(isa_ide_controller);
#endif
} else {
enumerate_pci_controllers(force_pio, poll);
}
#if ARCH(AARCH64)
auto& rpi_sdhc = RPi::SDHostController::the();
if (auto maybe_error = rpi_sdhc.initialize(); maybe_error.is_error()) {
dmesgln("Unable to initialize RaspberryPi's SD Host Controller: {}", maybe_error.error());
} else {
m_controllers.append(rpi_sdhc);
}
#endif
enumerate_storage_devices();
enumerate_disk_partitions();
determine_boot_device();
if (m_boot_block_device.is_null()) {
dump_storage_devices_and_partitions();
PANIC("StorageManagement: boot device {} not found", m_boot_argument);
}
}
StorageManagement& StorageManagement::the()
{
return *s_the;
}
}

View file

@ -0,0 +1,75 @@
/*
* Copyright (c) 2020-2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/IntrusiveList.h>
#include <AK/Types.h>
#include <Kernel/Devices/Storage/DiskPartition.h>
#include <Kernel/Devices/Storage/SD/SDHostController.h>
#include <Kernel/Devices/Storage/StorageController.h>
#include <Kernel/Devices/Storage/StorageDevice.h>
#include <Kernel/FileSystem/FileSystem.h>
#include <Kernel/Library/NonnullLockRefPtr.h>
#include <LibPartition/PartitionTable.h>
namespace Kernel {
class ATAController;
class NVMeController;
class StorageManagement {
public:
StorageManagement();
void initialize(StringView boot_argument, bool force_pio, bool nvme_poll);
static StorageManagement& the();
NonnullRefPtr<FileSystem> root_filesystem() const;
static MajorNumber storage_type_major_number();
static MinorNumber generate_storage_minor_number();
static MinorNumber generate_partition_minor_number();
static u32 generate_controller_id();
static u32 generate_relative_nvme_controller_id(Badge<NVMeController>);
static u32 generate_relative_ata_controller_id(Badge<ATAController>);
static u32 generate_relative_sd_controller_id(Badge<SDHostController>);
void remove_device(StorageDevice&);
private:
void enumerate_pci_controllers(bool force_pio, bool nvme_poll);
void enumerate_storage_devices();
void enumerate_disk_partitions();
void determine_boot_device();
void determine_boot_device_with_partition_uuid();
void resolve_partition_from_boot_device_parameter(StorageDevice const& chosen_storage_device, StringView boot_device_prefix);
void determine_boot_device_with_logical_unit_number();
void determine_block_boot_device();
void determine_nvme_boot_device();
void determine_sd_boot_device();
void determine_ata_boot_device();
void determine_hardware_relative_boot_device(StringView relative_hardware_prefix, Function<bool(StorageDevice const&)> filter_device_callback);
Array<unsigned, 3> extract_boot_device_address_parameters(StringView device_prefix);
Optional<unsigned> extract_boot_device_partition_number_parameter(StringView device_prefix);
void dump_storage_devices_and_partitions() const;
ErrorOr<NonnullOwnPtr<Partition::PartitionTable>> try_to_initialize_partition_table(StorageDevice&) const;
LockRefPtr<BlockDevice> boot_block_device() const;
StringView m_boot_argument;
LockWeakPtr<BlockDevice> m_boot_block_device;
Vector<NonnullRefPtr<StorageController>> m_controllers;
IntrusiveList<&StorageDevice::m_list_node> m_storage_devices;
};
}