1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-20 02:55:07 +00:00

Kernel: Implement an asynchronous device request stack

This allows issuing asynchronous requests for devices and waiting
on the completion of the request. The requests can cascade into
multiple sub-requests.

Since IRQs may complete at any time, if the current process is no
longer the same that started the process, we need to swich the
paging context before accessing user buffers.

Change the PATA driver to use this model.
This commit is contained in:
Tom 2020-11-02 11:16:01 -07:00 committed by Andreas Kling
parent 96081010dc
commit 2fd5ce1eb0
17 changed files with 804 additions and 246 deletions

View file

@ -14,6 +14,7 @@ set(KERNEL_SOURCES
CMOS.cpp CMOS.cpp
CommandLine.cpp CommandLine.cpp
Console.cpp Console.cpp
Devices/AsyncDeviceRequest.cpp
Devices/BXVGADevice.cpp Devices/BXVGADevice.cpp
Devices/BlockDevice.cpp Devices/BlockDevice.cpp
Devices/CharacterDevice.cpp Devices/CharacterDevice.cpp

View file

@ -0,0 +1,175 @@
/*
* Copyright (c) 2020, The SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/Devices/AsyncDeviceRequest.h>
#include <Kernel/Devices/Device.h>
namespace Kernel {
AsyncDeviceRequest::AsyncDeviceRequest(Device& device)
: m_device(device)
, m_process(*Process::current())
{
}
AsyncDeviceRequest::~AsyncDeviceRequest()
{
{
ScopedSpinLock lock(m_lock);
ASSERT(is_completed_result(m_result));
ASSERT(m_sub_requests_pending.is_empty());
}
// We should not need any locking here anymore. The destructor should
// only be called until either wait() or cancel() (once implemented) returned.
// At that point no sub-request should be adding more requests and all
// sub-requests should be completed (either succeeded, failed, or cancelled).
// Which means there should be no more pending sub-requests and the
// entire AsyncDeviceRequest hirarchy should be immutable.
for (auto& sub_request : m_sub_requests_complete) {
ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
ASSERT(sub_request.m_parent_request == this);
sub_request.m_parent_request = nullptr;
}
}
void AsyncDeviceRequest::request_finished()
{
if (m_parent_request)
m_parent_request->sub_request_finished(*this);
// Trigger processing the next request
m_device.process_next_queued_request({}, *this);
// Wake anyone who may be waiting
m_queue.wake_all();
}
auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult
{
ASSERT(!m_parent_request);
auto request_result = get_request_result();
if (is_completed_result(request_result))
return { request_result, Thread::BlockResult::NotBlocked };
auto wait_result = Thread::current()->wait_on(m_queue, name(), timeout);
return { get_request_result(), wait_result };
}
auto AsyncDeviceRequest::get_request_result() const -> RequestResult
{
ScopedSpinLock lock(m_lock);
return m_result;
}
void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
{
// Sub-requests cannot be for the same device
ASSERT(&m_device != &sub_request->m_device);
ASSERT(sub_request->m_parent_request == nullptr);
sub_request->m_parent_request = this;
bool should_start;
{
ScopedSpinLock lock(m_lock);
ASSERT(!is_completed_result(m_result));
m_sub_requests_pending.append(sub_request);
should_start = (m_result == Started);
}
if (should_start)
sub_request->do_start();
}
void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
{
bool all_completed;
{
ScopedSpinLock lock(m_lock);
ASSERT(m_result == Started);
size_t index;
for (index = 0; index < m_sub_requests_pending.size(); index++) {
if (&m_sub_requests_pending[index] == &sub_request) {
NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]);
m_sub_requests_pending.remove(index);
m_sub_requests_complete.append(move(request));
break;
}
}
ASSERT(index < m_sub_requests_pending.size());
all_completed = m_sub_requests_pending.is_empty();
if (all_completed) {
// Aggregate any errors
bool any_failures = false;
bool any_memory_faults = false;
for (index = 0; index < m_sub_requests_complete.size(); index++) {
auto& sub_request = m_sub_requests_complete[index];
auto sub_result = sub_request.get_request_result();
ASSERT(is_completed_result(sub_result));
switch (sub_result) {
case Failure:
any_failures = true;
break;
case MemoryFault:
any_memory_faults = true;
break;
default:
break;
}
if (any_failures && any_memory_faults)
break; // Stop checking if all error conditions were found
}
if (any_failures)
m_result = Failure;
else if (any_memory_faults)
m_result = MemoryFault;
else
m_result = Success;
}
}
if (all_completed)
request_finished();
}
void AsyncDeviceRequest::complete(RequestResult result)
{
ASSERT(result == Success || result == Failure || result == MemoryFault);
ScopedCritical critical;
{
ScopedSpinLock lock(m_lock);
ASSERT(m_result == Started);
m_result = result;
}
if (Processor::current().in_irq()) {
ref(); // Make sure we don't get freed
Processor::deferred_call_queue([this]() {
request_finished();
unref();
});
} else {
request_finished();
}
}
}

View file

@ -0,0 +1,175 @@
/*
* Copyright (c) 2020, The SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <AK/NonnullRefPtrVector.h>
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
#include <Kernel/UserOrKernelBuffer.h>
#include <Kernel/VM/ProcessPagingScope.h>
#include <Kernel/WaitQueue.h>
namespace Kernel {
class Device;
class AsyncDeviceRequest : public RefCounted<AsyncDeviceRequest> {
AK_MAKE_NONCOPYABLE(AsyncDeviceRequest);
AK_MAKE_NONMOVABLE(AsyncDeviceRequest);
public:
enum RequestResult {
Pending = 0,
Started,
Success,
Failure,
MemoryFault,
Cancelled
};
class RequestWaitResult {
friend class AsyncDeviceRequest;
public:
RequestResult request_result() const { return m_request_result; }
Thread::BlockResult wait_result() const { return m_wait_result; }
private:
RequestWaitResult(RequestResult request_result, Thread::BlockResult wait_result)
: m_request_result(request_result)
, m_wait_result(wait_result)
{
}
RequestResult m_request_result;
Thread::BlockResult m_wait_result;
};
virtual ~AsyncDeviceRequest();
virtual const char* name() const = 0;
virtual void start() = 0;
void add_sub_request(NonnullRefPtr<AsyncDeviceRequest>);
[[nodiscard]] RequestWaitResult wait(timeval* = nullptr);
void do_start(Badge<Device>)
{
do_start();
}
void complete(RequestResult result);
void set_private(void* priv)
{
ASSERT(!m_private || !priv);
m_private = priv;
}
void* get_private() const { return m_private; }
template<typename... Args>
[[nodiscard]] bool write_to_buffer(UserOrKernelBuffer& buffer, Args... args)
{
if (in_target_context(buffer))
return buffer.write(forward<Args>(args)...);
ProcessPagingScope paging_scope(m_process);
return buffer.write(forward<Args>(args)...);
}
template<size_t BUFFER_BYTES, typename... Args>
[[nodiscard]] bool write_to_buffer_buffered(UserOrKernelBuffer& buffer, Args... args)
{
if (in_target_context(buffer))
return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...);
ProcessPagingScope paging_scope(m_process);
return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...);
}
template<typename... Args>
[[nodiscard]] bool read_from_buffer(const UserOrKernelBuffer& buffer, Args... args)
{
if (in_target_context(buffer))
return buffer.read(forward<Args>(args)...);
ProcessPagingScope paging_scope(m_process);
return buffer.read(forward<Args>(args)...);
}
template<size_t BUFFER_BYTES, typename... Args>
[[nodiscard]] bool read_from_buffer_buffered(const UserOrKernelBuffer& buffer, Args... args)
{
if (in_target_context(buffer))
return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...);
ProcessPagingScope paging_scope(m_process);
return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...);
}
protected:
AsyncDeviceRequest(Device&);
RequestResult get_request_result() const;
private:
void sub_request_finished(AsyncDeviceRequest&);
void request_finished();
void do_start()
{
{
ScopedSpinLock lock(m_lock);
if (is_completed_result(m_result))
return;
m_result = Started;
}
start();
}
bool in_target_context(const UserOrKernelBuffer& buffer) const
{
if (buffer.is_kernel_buffer())
return true;
return m_process == Process::current();
}
static bool is_completed_result(RequestResult result)
{
return result > Started;
}
Device& m_device;
AsyncDeviceRequest* m_parent_request { nullptr };
RequestResult m_result { Pending };
NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_pending;
NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_complete;
WaitQueue m_queue;
NonnullRefPtr<Process> m_process;
void* m_private { nullptr };
mutable SpinLock<u8> m_lock;
};
}

View file

@ -48,10 +48,9 @@ private:
virtual const char* class_name() const override { return "BXVGA"; } virtual const char* class_name() const override { return "BXVGA"; }
virtual bool can_read(const FileDescription&, size_t) const override { return true; } virtual bool can_read(const FileDescription&, size_t) const override { return true; }
virtual bool can_write(const FileDescription&, size_t) const override { return true; } virtual bool can_write(const FileDescription&, size_t) const override { return true; }
virtual void start_request(AsyncBlockDeviceRequest& request) override { request.complete(AsyncDeviceRequest::Failure); }
virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; } virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; }
virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; } virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; }
virtual bool read_blocks(unsigned, u16, UserOrKernelBuffer&) override { return false; }
virtual bool write_blocks(unsigned, u16, const UserOrKernelBuffer&) override { return false; }
void set_safe_resolution(); void set_safe_resolution();

View file

@ -28,18 +28,66 @@
namespace Kernel { namespace Kernel {
AsyncBlockDeviceRequest::AsyncBlockDeviceRequest(Device& block_device, RequestType request_type, u32 block_index, u32 block_count, const UserOrKernelBuffer& buffer, size_t buffer_size)
: AsyncDeviceRequest(block_device)
, m_block_device(static_cast<BlockDevice&>(block_device))
, m_request_type(request_type)
, m_block_index(block_index)
, m_block_count(block_count)
, m_buffer(buffer)
, m_buffer_size(buffer_size)
{
}
void AsyncBlockDeviceRequest::start()
{
m_block_device.start_request(*this);
}
BlockDevice::~BlockDevice() BlockDevice::~BlockDevice()
{ {
} }
bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer) const bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer)
{ {
return const_cast<BlockDevice*>(this)->read_blocks(index, 1, buffer); auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, 1, buffer, 512);
switch (read_request->wait().request_result()) {
case AsyncDeviceRequest::Success:
return true;
case AsyncDeviceRequest::Failure:
dbg() << "BlockDevice::read_block(" << index << ") IO error";
break;
case AsyncDeviceRequest::MemoryFault:
dbg() << "BlockDevice::read_block(" << index << ") EFAULT";
break;
case AsyncDeviceRequest::Cancelled:
dbg() << "BlockDevice::read_block(" << index << ") cancelled";
break;
default:
ASSERT_NOT_REACHED();
}
return false;
} }
bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& data) bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& buffer)
{ {
return write_blocks(index, 1, data); auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, 1, buffer, 512);
switch (write_request->wait().request_result()) {
case AsyncDeviceRequest::Success:
return true;
case AsyncDeviceRequest::Failure:
dbg() << "BlockDevice::write_block(" << index << ") IO error";
break;
case AsyncDeviceRequest::MemoryFault:
dbg() << "BlockDevice::write_block(" << index << ") EFAULT";
break;
case AsyncDeviceRequest::Cancelled:
dbg() << "BlockDevice::write_block(" << index << ") cancelled";
break;
default:
ASSERT_NOT_REACHED();
}
return false;
} }
} }

View file

@ -30,6 +30,46 @@
namespace Kernel { namespace Kernel {
class BlockDevice;
class AsyncBlockDeviceRequest : public AsyncDeviceRequest {
public:
enum RequestType {
Read,
Write
};
AsyncBlockDeviceRequest(Device& block_device, RequestType request_type,
u32 block_index, u32 block_count, const UserOrKernelBuffer& buffer, size_t buffer_size);
RequestType request_type() const { return m_request_type; }
u32 block_index() const { return m_block_index; }
u32 block_count() const { return m_block_count; }
UserOrKernelBuffer& buffer() { return m_buffer; }
const UserOrKernelBuffer& buffer() const { return m_buffer; }
size_t buffer_size() const { return m_buffer_size; }
virtual void start() override;
virtual const char* name() const override
{
switch (m_request_type) {
case Read:
return "BlockDeviceRequest (read)";
case Write:
return "BlockDeviceRequest (read)";
default:
ASSERT_NOT_REACHED();
}
}
private:
BlockDevice& m_block_device;
const RequestType m_request_type;
const u32 m_block_index;
const u32 m_block_count;
UserOrKernelBuffer m_buffer;
const size_t m_buffer_size;
};
class BlockDevice : public Device { class BlockDevice : public Device {
public: public:
virtual ~BlockDevice() override; virtual ~BlockDevice() override;
@ -37,11 +77,10 @@ public:
size_t block_size() const { return m_block_size; } size_t block_size() const { return m_block_size; }
virtual bool is_seekable() const override { return true; } virtual bool is_seekable() const override { return true; }
bool read_block(unsigned index, UserOrKernelBuffer&) const; bool read_block(unsigned index, UserOrKernelBuffer&);
bool write_block(unsigned index, const UserOrKernelBuffer&); bool write_block(unsigned index, const UserOrKernelBuffer&);
virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) = 0; virtual void start_request(AsyncBlockDeviceRequest&) = 0;
virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) = 0;
protected: protected:
BlockDevice(unsigned major, unsigned minor, size_t block_size = PAGE_SIZE) BlockDevice(unsigned major, unsigned minor, size_t block_size = PAGE_SIZE)

View file

@ -80,4 +80,21 @@ String Device::absolute_path(const FileDescription&) const
return absolute_path(); return absolute_path();
} }
void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest& completed_request)
{
AsyncDeviceRequest* next_request = nullptr;
{
ScopedSpinLock lock(m_requests_lock);
ASSERT(!m_requests.is_empty());
ASSERT(m_requests.first().ptr() == &completed_request);
m_requests.remove(m_requests.begin());
if (!m_requests.is_empty())
next_request = m_requests.first().ptr();
}
if (next_request)
next_request->start();
}
} }

View file

@ -34,10 +34,12 @@
// There are two main subclasses: // There are two main subclasses:
// - BlockDevice (random access) // - BlockDevice (random access)
// - CharacterDevice (sequential) // - CharacterDevice (sequential)
#include <AK/DoublyLinkedList.h>
#include <AK/Function.h> #include <AK/Function.h>
#include <AK/HashMap.h> #include <AK/HashMap.h>
#include <Kernel/Arch/i386/CPU.h> #include <Kernel/Devices/AsyncDeviceRequest.h>
#include <Kernel/FileSystem/File.h> #include <Kernel/FileSystem/File.h>
#include <Kernel/Lock.h>
#include <Kernel/UnixTypes.h> #include <Kernel/UnixTypes.h>
namespace Kernel { namespace Kernel {
@ -61,6 +63,23 @@ public:
static void for_each(Function<void(Device&)>); static void for_each(Function<void(Device&)>);
static Device* get_device(unsigned major, unsigned minor); static Device* get_device(unsigned major, unsigned minor);
void process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest&);
template<typename AsyncRequestType, typename... Args>
NonnullRefPtr<AsyncRequestType> make_request(Args&&... args)
{
auto request = adopt(*new AsyncRequestType(*this, forward<Args>(args)...));
bool was_empty;
{
ScopedSpinLock lock(m_requests_lock);
was_empty = m_requests.is_empty();
m_requests.append(request);
}
if (was_empty)
request->do_start({});
return request;
}
protected: protected:
Device(unsigned major, unsigned minor); Device(unsigned major, unsigned minor);
void set_uid(uid_t uid) { m_uid = uid; } void set_uid(uid_t uid) { m_uid = uid; }
@ -73,6 +92,9 @@ private:
unsigned m_minor { 0 }; unsigned m_minor { 0 };
uid_t m_uid { 0 }; uid_t m_uid { 0 };
gid_t m_gid { 0 }; gid_t m_gid { 0 };
SpinLock<u8> m_requests_lock;
DoublyLinkedList<RefPtr<AsyncDeviceRequest>> m_requests;
}; };
} }

View file

@ -48,6 +48,12 @@ DiskPartition::~DiskPartition()
{ {
} }
void DiskPartition::start_request(AsyncBlockDeviceRequest& request)
{
request.add_sub_request(m_device->make_request<AsyncBlockDeviceRequest>(request.request_type(),
request.block_index() + m_block_offset, request.block_count(), request.buffer(), request.buffer_size()));
}
KResultOr<size_t> DiskPartition::read(FileDescription& fd, size_t offset, UserOrKernelBuffer& outbuf, size_t len) KResultOr<size_t> DiskPartition::read(FileDescription& fd, size_t offset, UserOrKernelBuffer& outbuf, size_t len)
{ {
unsigned adjust = m_block_offset * block_size(); unsigned adjust = m_block_offset * block_size();
@ -92,24 +98,6 @@ bool DiskPartition::can_write(const FileDescription& fd, size_t offset) const
return m_device->can_write(fd, offset + adjust); return m_device->can_write(fd, offset + adjust);
} }
bool DiskPartition::read_blocks(unsigned index, u16 count, UserOrKernelBuffer& out)
{
#ifdef OFFD_DEBUG
klog() << "DiskPartition::read_blocks " << index << " (really: " << (m_block_offset + index) << ") count=" << count;
#endif
return m_device->read_blocks(m_block_offset + index, count, out);
}
bool DiskPartition::write_blocks(unsigned index, u16 count, const UserOrKernelBuffer& data)
{
#ifdef OFFD_DEBUG
klog() << "DiskPartition::write_blocks " << index << " (really: " << (m_block_offset + index) << ") count=" << count;
#endif
return m_device->write_blocks(m_block_offset + index, count, data);
}
const char* DiskPartition::class_name() const const char* DiskPartition::class_name() const
{ {
return "DiskPartition"; return "DiskPartition";

View file

@ -36,8 +36,7 @@ public:
static NonnullRefPtr<DiskPartition> create(BlockDevice&, unsigned block_offset, unsigned block_limit); static NonnullRefPtr<DiskPartition> create(BlockDevice&, unsigned block_offset, unsigned block_limit);
virtual ~DiskPartition(); virtual ~DiskPartition();
virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) override; virtual void start_request(AsyncBlockDeviceRequest&) override;
virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) override;
// ^BlockDevice // ^BlockDevice
virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override; virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override;

View file

@ -63,6 +63,9 @@ int EBRPartitionTable::index_of_ebr_container() const
bool EBRPartitionTable::initialize() bool EBRPartitionTable::initialize()
{ {
auto mbr_header_request = m_device->make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read,
0, 1, UserOrKernelBuffer::for_kernel_buffer(m_cached_mbr_header), sizeof(m_cached_mbr_header));
auto mbr_header_buffer = UserOrKernelBuffer::for_kernel_buffer(m_cached_mbr_header); auto mbr_header_buffer = UserOrKernelBuffer::for_kernel_buffer(m_cached_mbr_header);
if (!m_device->read_block(0, mbr_header_buffer)) { if (!m_device->read_block(0, mbr_header_buffer)) {
return false; return false;

View file

@ -84,7 +84,7 @@ RefPtr<DiskPartition> GPTPartitionTable::partition(unsigned index)
GPTPartitionEntry entries[entries_per_sector]; GPTPartitionEntry entries[entries_per_sector];
auto entries_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&entries); auto entries_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&entries);
this->m_device->read_blocks(lba, 1, entries_buffer); this->m_device->read_block(lba, entries_buffer);
GPTPartitionEntry& entry = entries[((index - 1) % entries_per_sector)]; GPTPartitionEntry& entry = entries[((index - 1) % entries_per_sector)];
#ifdef GPT_DEBUG #ifdef GPT_DEBUG

View file

@ -49,8 +49,7 @@ private:
virtual bool can_write(const FileDescription&, size_t) const override { return true; } virtual bool can_write(const FileDescription&, size_t) const override { return true; }
virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; } virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; }
virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; } virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; }
virtual bool read_blocks(unsigned, u16, UserOrKernelBuffer&) override { return false; } virtual void start_request(AsyncBlockDeviceRequest& request) override { request.complete(AsyncDeviceRequest::Failure); }
virtual bool write_blocks(unsigned, u16, const UserOrKernelBuffer&) override { return false; }
size_t framebuffer_size_in_bytes() const { return m_framebuffer_pitch * m_framebuffer_height; } size_t framebuffer_size_in_bytes() const { return m_framebuffer_pitch * m_framebuffer_height; }

View file

@ -108,13 +108,6 @@ namespace Kernel {
#define PCI_Mass_Storage_Class 0x1 #define PCI_Mass_Storage_Class 0x1
#define PCI_IDE_Controller_Subclass 0x1 #define PCI_IDE_Controller_Subclass 0x1
static AK::Singleton<Lock> s_pata_lock;
static Lock& s_lock()
{
return *s_pata_lock;
};
OwnPtr<PATAChannel> PATAChannel::create(ChannelType type, bool force_pio) OwnPtr<PATAChannel> PATAChannel::create(ChannelType type, bool force_pio)
{ {
PCI::Address pci_address; PCI::Address pci_address;
@ -148,10 +141,59 @@ PATAChannel::~PATAChannel()
{ {
} }
void PATAChannel::prepare_for_irq() void PATAChannel::start_request(AsyncBlockDeviceRequest& request, bool use_dma, bool is_slave)
{ {
cli(); m_current_request = &request;
enable_irq(); m_current_request_block_index = 0;
m_current_request_uses_dma = use_dma;
m_current_request_flushing_cache = false;
if (request.request_type() == AsyncBlockDeviceRequest::Read) {
if (use_dma)
ata_read_sectors_with_dma(is_slave);
else
ata_read_sectors(is_slave);
} else {
if (use_dma)
ata_write_sectors_with_dma(is_slave);
else
ata_write_sectors(is_slave);
}
}
void PATAChannel::complete_current_request(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
ASSERT(m_current_request);
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
// which could cause page faults. Note that this may be called immediately
// before Processor::deferred_call_queue returns!
Processor::deferred_call_queue([this, result]() {
#ifdef PATA_DEBUG
dbg() << "PATAChannel::complete_current_request result: " << result;
#endif
ASSERT(m_current_request);
auto& request = *m_current_request;
m_current_request = nullptr;
if (m_current_request_uses_dma) {
if (result == AsyncDeviceRequest::Success) {
if (request.request_type() == AsyncBlockDeviceRequest::Read) {
if (!request.write_to_buffer(request.buffer(), m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * request.block_count())) {
request.complete(AsyncDeviceRequest::MemoryFault);
return;
}
}
// I read somewhere that this may trigger a cache flush so let's do it.
m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6);
}
}
request.complete(result);
});
} }
void PATAChannel::initialize(bool force_pio) void PATAChannel::initialize(bool force_pio)
@ -175,12 +217,6 @@ static void print_ide_status(u8 status)
klog() << "PATAChannel: print_ide_status: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0) << " DSC=" << ((status & ATA_SR_DSC) != 0) << " DF=" << ((status & ATA_SR_DF) != 0) << " CORR=" << ((status & ATA_SR_CORR) != 0) << " IDX=" << ((status & ATA_SR_IDX) != 0) << " ERR=" << ((status & ATA_SR_ERR) != 0); klog() << "PATAChannel: print_ide_status: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0) << " DSC=" << ((status & ATA_SR_DSC) != 0) << " DF=" << ((status & ATA_SR_DF) != 0) << " CORR=" << ((status & ATA_SR_CORR) != 0) << " IDX=" << ((status & ATA_SR_IDX) != 0) << " ERR=" << ((status & ATA_SR_ERR) != 0);
} }
void PATAChannel::wait_for_irq()
{
Thread::current()->wait_on(m_irq_queue, "PATAChannel");
disable_irq();
}
void PATAChannel::handle_irq(const RegisterState&) void PATAChannel::handle_irq(const RegisterState&)
{ {
u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>();
@ -196,16 +232,63 @@ void PATAChannel::handle_irq(const RegisterState&)
return; return;
} }
#ifdef PATA_DEBUG
klog() << "PATAChannel: interrupt: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0);
#endif
bool received_all_irqs = m_current_request_uses_dma || m_current_request_block_index + 1 >= m_current_request->block_count();
disable_irq();
if (status & ATA_SR_ERR) { if (status & ATA_SR_ERR) {
print_ide_status(status); print_ide_status(status);
m_device_error = m_io_base.offset(ATA_REG_ERROR).in<u8>(); m_device_error = m_io_base.offset(ATA_REG_ERROR).in<u8>();
klog() << "PATAChannel: Error " << String::format("%b", m_device_error) << "!"; klog() << "PATAChannel: Error " << String::format("%b", m_device_error) << "!";
} else { complete_current_request(AsyncDeviceRequest::Failure);
m_device_error = 0; return;
} }
#ifdef PATA_DEBUG
klog() << "PATAChannel: interrupt: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0); m_device_error = 0;
#endif if (received_all_irqs) {
complete_current_request(AsyncDeviceRequest::Success);
} else {
ASSERT(!m_current_request_uses_dma);
// Now schedule reading/writing the buffer as soon as we leave the irq handler.
// This is important so that we can safely access the buffers, which could
// trigger page faults
Processor::deferred_call_queue([this]() {
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
dbg() << "PATAChannel: Read block " << m_current_request_block_index << "/" << m_current_request->block_count();
if (ata_do_read_sector()) {
if (++m_current_request_block_index >= m_current_request->block_count()) {
complete_current_request(AsyncDeviceRequest::Success);
return;
}
// Wait for the next block
enable_irq();
}
} else {
if (!m_current_request_flushing_cache) {
dbg() << "PATAChannel: Wrote block " << m_current_request_block_index << "/" << m_current_request->block_count();
if (++m_current_request_block_index >= m_current_request->block_count()) {
// We read the last block, flush cache
ASSERT(!m_current_request_flushing_cache);
m_current_request_flushing_cache = true;
enable_irq();
m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH);
} else {
// Read next block
enable_irq();
ata_do_write_sector();
}
} else {
complete_current_request(AsyncDeviceRequest::Success);
}
}
});
}
m_irq_queue.wake_all(); m_irq_queue.wake_all();
} }
@ -274,15 +357,16 @@ void PATAChannel::detect_disks()
} }
} }
bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer& outbuf, bool slave_request) void PATAChannel::ata_read_sectors_with_dma(bool slave_request)
{ {
LOCKER(s_lock()); auto& request = *m_current_request;
u32 lba = request.block_index();
#ifdef PATA_DEBUG #ifdef PATA_DEBUG
dbg() << "PATAChannel::ata_read_sectors_with_dma (" << lba << " x" << count << ") -> " << outbuf.user_or_kernel_ptr(); dbg() << "PATAChannel::ata_read_sectors_with_dma (" << lba << " x" << request.block_count() << ")";
#endif #endif
prdt().offset = m_dma_buffer_page->paddr(); prdt().offset = m_dma_buffer_page->paddr();
prdt().size = 512 * count; prdt().size = 512 * request.block_count();
ASSERT(prdt().size <= PAGE_SIZE); ASSERT(prdt().size <= PAGE_SIZE);
@ -312,7 +396,7 @@ bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuff
m_io_base.offset(ATA_REG_LBA1).out<u8>(0); m_io_base.offset(ATA_REG_LBA1).out<u8>(0);
m_io_base.offset(ATA_REG_LBA2).out<u8>(0); m_io_base.offset(ATA_REG_LBA2).out<u8>(0);
m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count); m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count());
m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0); m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0);
m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8); m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8);
m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16); m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16);
@ -326,35 +410,89 @@ bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuff
m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_DMA_EXT); m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_DMA_EXT);
io_delay(); io_delay();
prepare_for_irq(); enable_irq();
// Start bus master // Start bus master
m_bus_master_base.out<u8>(0x9); m_bus_master_base.out<u8>(0x9);
}
wait_for_irq(); bool PATAChannel::ata_do_read_sector()
{
if (m_device_error) auto& request = *m_current_request;
auto out_buffer = request.buffer().offset(m_current_request_block_index * 512);
ssize_t nwritten = request.write_to_buffer_buffered<512>(out_buffer, 512, [&](u8* buffer, size_t buffer_bytes) {
for (size_t i = 0; i < buffer_bytes; i += sizeof(u16))
*(u16*)&buffer[i] = IO::in16(m_io_base.offset(ATA_REG_DATA).get());
return (ssize_t)buffer_bytes;
});
if (nwritten < 0) {
// TODO: Do we need to abort the PATA read if this wasn't the last block?
complete_current_request(AsyncDeviceRequest::MemoryFault);
return false; return false;
}
if (!outbuf.write(m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * count))
return false; // TODO: -EFAULT
// I read somewhere that this may trigger a cache flush so let's do it.
m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6);
return true; return true;
} }
bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer& inbuf, bool slave_request) void PATAChannel::ata_read_sectors(bool slave_request)
{ {
LOCKER(s_lock()); auto& request = *m_current_request;
ASSERT(request.block_count() <= 256);
#ifdef PATA_DEBUG #ifdef PATA_DEBUG
dbg() << "PATAChannel::ata_write_sectors_with_dma (" << lba << " x" << count << ") <- " << inbuf.user_or_kernel_ptr(); dbg() << "PATAChannel::ata_read_sectors";
#endif
while (m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY)
;
auto lba = request.block_index();
#ifdef PATA_DEBUG
klog() << "PATAChannel: Reading " << request.block_count() << " sector(s) @ LBA " << lba;
#endif
u8 devsel = 0xe0;
if (slave_request)
devsel |= 0x10;
m_control_base.offset(ATA_CTL_CONTROL).out<u8>(0);
m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(devsel | (static_cast<u8>(slave_request) << 4) | 0x40);
io_delay();
m_io_base.offset(ATA_REG_FEATURES).out<u8>(0);
m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(0);
m_io_base.offset(ATA_REG_LBA0).out<u8>(0);
m_io_base.offset(ATA_REG_LBA1).out<u8>(0);
m_io_base.offset(ATA_REG_LBA2).out<u8>(0);
m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count());
m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0);
m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8);
m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16);
for (;;) {
auto status = m_io_base.offset(ATA_REG_STATUS).in<u8>();
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRDY))
break;
}
enable_irq();
m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_PIO);
}
void PATAChannel::ata_write_sectors_with_dma(bool slave_request)
{
auto& request = *m_current_request;
u32 lba = request.block_index();
#ifdef PATA_DEBUG
dbg() << "PATAChannel::ata_write_sectors_with_dma (" << lba << " x" << request.block_count() << ")";
#endif #endif
prdt().offset = m_dma_buffer_page->paddr(); prdt().offset = m_dma_buffer_page->paddr();
prdt().size = 512 * count; prdt().size = 512 * request.block_count();
if (!inbuf.read(m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * count)) if (!request.read_from_buffer(request.buffer(), m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * request.block_count())) {
return false; // TODO: -EFAULT complete_current_request(AsyncDeviceRequest::MemoryFault);
return;
}
ASSERT(prdt().size <= PAGE_SIZE); ASSERT(prdt().size <= PAGE_SIZE);
@ -381,7 +519,7 @@ bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const UserOrKer
m_io_base.offset(ATA_REG_LBA1).out<u8>(0); m_io_base.offset(ATA_REG_LBA1).out<u8>(0);
m_io_base.offset(ATA_REG_LBA2).out<u8>(0); m_io_base.offset(ATA_REG_LBA2).out<u8>(0);
m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count); m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(request.block_count());
m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0); m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0);
m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8); m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8);
m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16); m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16);
@ -395,100 +533,42 @@ bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const UserOrKer
m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_DMA_EXT); m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_DMA_EXT);
io_delay(); io_delay();
prepare_for_irq(); enable_irq();
// Start bus master // Start bus master
m_bus_master_base.out<u8>(0x1); m_bus_master_base.out<u8>(0x1);
wait_for_irq();
if (m_device_error)
return false;
// I read somewhere that this may trigger a cache flush so let's do it.
m_bus_master_base.offset(2).out<u8>(m_bus_master_base.offset(2).in<u8>() | 0x6);
return true;
} }
bool PATAChannel::ata_read_sectors(u32 lba, u16 count, UserOrKernelBuffer& outbuf, bool slave_request) void PATAChannel::ata_do_write_sector()
{ {
ASSERT(count <= 256); auto& request = *m_current_request;
LOCKER(s_lock());
#ifdef PATA_DEBUG
dbg() << "PATAChannel::ata_read_sectors request (" << count << " sector(s) @ " << lba << " into " << outbuf.user_or_kernel_ptr() << ")";
#endif
while (m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) io_delay();
while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ))
; ;
u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>();
ASSERT(status & ATA_SR_DRQ);
auto in_buffer = request.buffer().offset(m_current_request_block_index * 512);
#ifdef PATA_DEBUG #ifdef PATA_DEBUG
klog() << "PATAChannel: Reading " << count << " sector(s) @ LBA " << lba; dbg() << "PATAChannel: Writing 512 bytes (part " << m_current_request_block_index << ") (status=" << String::format("%b", status) << ")...";
#endif #endif
ssize_t nread = request.read_from_buffer_buffered<512>(in_buffer, 512, [&](const u8* buffer, size_t buffer_bytes) {
u8 devsel = 0xe0; for (size_t i = 0; i < buffer_bytes; i += sizeof(u16))
if (slave_request) IO::out16(m_io_base.offset(ATA_REG_DATA).get(), *(const u16*)&buffer[i]);
devsel |= 0x10; return (ssize_t)buffer_bytes;
});
m_control_base.offset(ATA_CTL_CONTROL).out<u8>(0); if (nread < 0)
m_io_base.offset(ATA_REG_HDDEVSEL).out<u8>(devsel | (static_cast<u8>(slave_request) << 4) | 0x40); complete_current_request(AsyncDeviceRequest::MemoryFault);
io_delay();
m_io_base.offset(ATA_REG_FEATURES).out<u8>(0);
m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(0);
m_io_base.offset(ATA_REG_LBA0).out<u8>(0);
m_io_base.offset(ATA_REG_LBA1).out<u8>(0);
m_io_base.offset(ATA_REG_LBA2).out<u8>(0);
m_io_base.offset(ATA_REG_SECCOUNT0).out<u8>(count);
m_io_base.offset(ATA_REG_LBA0).out<u8>((lba & 0x000000ff) >> 0);
m_io_base.offset(ATA_REG_LBA1).out<u8>((lba & 0x0000ff00) >> 8);
m_io_base.offset(ATA_REG_LBA2).out<u8>((lba & 0x00ff0000) >> 16);
for (;;) {
auto status = m_io_base.offset(ATA_REG_STATUS).in<u8>();
if (!(status & ATA_SR_BSY) && (status & ATA_SR_DRDY))
break;
}
prepare_for_irq();
m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_READ_PIO);
for (int i = 0; i < count; i++) {
if (i > 0)
prepare_for_irq();
wait_for_irq();
if (m_device_error)
return false;
u8 status = m_control_base.offset(ATA_CTL_ALTSTATUS).in<u8>();
ASSERT(!(status & ATA_SR_BSY));
auto out = outbuf.offset(i * 512);
#ifdef PATA_DEBUG
dbg() << "PATAChannel: Retrieving 512 bytes (part " << i << ") (status=" << String::format("%b", status) << "), outbuf=(" << out.user_or_kernel_ptr() << ")...";
#endif
prepare_for_irq();
ssize_t nwritten = out.write_buffered<512>(512, [&](u8* buffer, size_t buffer_bytes) {
for (size_t i = 0; i < buffer_bytes; i += sizeof(u16))
*(u16*)&buffer[i] = IO::in16(m_io_base.offset(ATA_REG_DATA).get());
return (ssize_t)buffer_bytes;
});
if (nwritten < 0) {
sti();
disable_irq();
return false; // TODO: -EFAULT
}
}
sti();
disable_irq();
return true;
} }
bool PATAChannel::ata_write_sectors(u32 start_sector, u16 count, const UserOrKernelBuffer& inbuf, bool slave_request) void PATAChannel::ata_write_sectors(bool slave_request)
{ {
ASSERT(count <= 256); auto& request = *m_current_request;
LOCKER(s_lock());
ASSERT(request.block_count() <= 256);
u32 start_sector = request.block_index();
u32 count = request.block_count();
#ifdef PATA_DEBUG #ifdef PATA_DEBUG
klog() << "PATAChannel::ata_write_sectors request (" << count << " sector(s) @ " << start_sector << ")"; klog() << "PATAChannel::ata_write_sectors request (" << count << " sector(s) @ " << start_sector << ")";
#endif #endif
@ -516,37 +596,12 @@ bool PATAChannel::ata_write_sectors(u32 start_sector, u16 count, const UserOrKer
m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_PIO); m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_WRITE_PIO);
for (int i = 0; i < count; i++) { io_delay();
io_delay(); while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ))
while ((m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY) || !(m_io_base.offset(ATA_REG_STATUS).in<u8>() & ATA_SR_DRQ)) ;
;
u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>(); enable_irq();
ASSERT(status & ATA_SR_DRQ); ata_do_write_sector();
auto in = inbuf.offset(i * 512);
#ifdef PATA_DEBUG
dbg() << "PATAChannel: Writing 512 bytes (part " << i << ") (status=" << String::format("%b", status) << "), inbuf=(" << in.user_or_kernel_ptr() << ")...";
#endif
prepare_for_irq();
ssize_t nread = in.read_buffered<512>(512, [&](const u8* buffer, size_t buffer_bytes) {
for (size_t i = 0; i < buffer_bytes; i += sizeof(u16))
IO::out16(m_io_base.offset(ATA_REG_DATA).get(), *(const u16*)&buffer[i]);
return (ssize_t)buffer_bytes;
});
wait_for_irq();
status = m_io_base.offset(ATA_REG_STATUS).in<u8>();
ASSERT(!(status & ATA_SR_BSY));
if (nread < 0)
return false; // TODO: -EFAULT
}
prepare_for_irq();
m_io_base.offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH);
wait_for_irq();
u8 status = m_io_base.offset(ATA_REG_STATUS).in<u8>();
ASSERT(!(status & ATA_SR_BSY));
return !m_device_error;
} }
} }

View file

@ -39,6 +39,7 @@
#include <AK/OwnPtr.h> #include <AK/OwnPtr.h>
#include <AK/RefPtr.h> #include <AK/RefPtr.h>
#include <Kernel/Devices/Device.h>
#include <Kernel/IO.h> #include <Kernel/IO.h>
#include <Kernel/Lock.h> #include <Kernel/Lock.h>
#include <Kernel/PCI/Access.h> #include <Kernel/PCI/Access.h>
@ -50,6 +51,8 @@
namespace Kernel { namespace Kernel {
class AsyncBlockDeviceRequest;
struct PhysicalRegionDescriptor { struct PhysicalRegionDescriptor {
PhysicalAddress offset; PhysicalAddress offset;
u16 size { 0 }; u16 size { 0 };
@ -83,13 +86,15 @@ private:
void initialize(bool force_pio); void initialize(bool force_pio);
void detect_disks(); void detect_disks();
void wait_for_irq(); void start_request(AsyncBlockDeviceRequest&, bool, bool);
bool ata_read_sectors_with_dma(u32, u16, UserOrKernelBuffer&, bool); void complete_current_request(AsyncDeviceRequest::RequestResult);
bool ata_write_sectors_with_dma(u32, u16, const UserOrKernelBuffer&, bool);
bool ata_read_sectors(u32, u16, UserOrKernelBuffer&, bool);
bool ata_write_sectors(u32, u16, const UserOrKernelBuffer&, bool);
inline void prepare_for_irq(); void ata_read_sectors_with_dma(bool);
void ata_read_sectors(bool);
bool ata_do_read_sector();
void ata_write_sectors_with_dma(bool);
void ata_write_sectors(bool);
void ata_do_write_sector();
// Data members // Data members
u8 m_channel_number { 0 }; // Channel number. 0 = master, 1 = slave u8 m_channel_number { 0 }; // Channel number. 0 = master, 1 = slave
@ -108,5 +113,10 @@ private:
RefPtr<PATADiskDevice> m_master; RefPtr<PATADiskDevice> m_master;
RefPtr<PATADiskDevice> m_slave; RefPtr<PATADiskDevice> m_slave;
AsyncBlockDeviceRequest* m_current_request { nullptr };
u32 m_current_request_block_index { 0 };
bool m_current_request_uses_dma { false };
bool m_current_request_flushing_cache { false };
}; };
} }

View file

@ -55,22 +55,10 @@ const char* PATADiskDevice::class_name() const
return "PATADiskDevice"; return "PATADiskDevice";
} }
bool PATADiskDevice::read_blocks(unsigned index, u16 count, UserOrKernelBuffer& out) void PATADiskDevice::start_request(AsyncBlockDeviceRequest& request)
{ {
if (!m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource()) bool use_dma = !m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource();
return read_sectors_with_dma(index, count, out); m_channel.start_request(request, use_dma, is_slave());
return read_sectors(index, count, out);
}
bool PATADiskDevice::write_blocks(unsigned index, u16 count, const UserOrKernelBuffer& data)
{
if (!m_channel.m_bus_master_base.is_null() && m_channel.m_dma_enabled.resource())
return write_sectors_with_dma(index, count, data);
for (unsigned i = 0; i < count; ++i) {
if (!write_sectors(index + i, 1, data.offset(i * 512)))
return false;
}
return true;
} }
void PATADiskDevice::set_drive_geometry(u16 cyls, u16 heads, u16 spt) void PATADiskDevice::set_drive_geometry(u16 cyls, u16 heads, u16 spt)
@ -100,8 +88,19 @@ KResultOr<size_t> PATADiskDevice::read(FileDescription&, size_t offset, UserOrKe
#endif #endif
if (whole_blocks > 0) { if (whole_blocks > 0) {
if (!read_blocks(index, whole_blocks, outbuf)) auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, whole_blocks, outbuf, whole_blocks * block_size());
return -1; auto result = read_request->wait();
if (result.wait_result().was_interrupted())
return KResult(-EINTR);
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
case AsyncDeviceRequest::Cancelled:
return KResult(-EIO);
case AsyncDeviceRequest::MemoryFault:
return KResult(-EFAULT);
default:
break;
}
} }
off_t pos = whole_blocks * block_size(); off_t pos = whole_blocks * block_size();
@ -109,8 +108,21 @@ KResultOr<size_t> PATADiskDevice::read(FileDescription&, size_t offset, UserOrKe
if (remaining > 0) { if (remaining > 0) {
auto data = ByteBuffer::create_uninitialized(block_size()); auto data = ByteBuffer::create_uninitialized(block_size());
auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data()); auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data());
if (!read_blocks(index + whole_blocks, 1, data_buffer)) auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size());
auto result = read_request->wait();
if (result.wait_result().was_interrupted())
return KResult(-EINTR);
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
return pos; return pos;
case AsyncDeviceRequest::Cancelled:
return KResult(-EIO);
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
ASSERT_NOT_REACHED();
default:
break;
}
if (!outbuf.write(data.data(), pos, remaining)) if (!outbuf.write(data.data(), pos, remaining))
return KResult(-EFAULT); return KResult(-EFAULT);
} }
@ -143,8 +155,19 @@ KResultOr<size_t> PATADiskDevice::write(FileDescription&, size_t offset, const U
#endif #endif
if (whole_blocks > 0) { if (whole_blocks > 0) {
if (!write_blocks(index, whole_blocks, inbuf)) auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, whole_blocks, inbuf, whole_blocks * block_size());
return -1; auto result = write_request->wait();
if (result.wait_result().was_interrupted())
return KResult(-EINTR);
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
case AsyncDeviceRequest::Cancelled:
return KResult(-EIO);
case AsyncDeviceRequest::MemoryFault:
return KResult(-EFAULT);
default:
break;
}
} }
off_t pos = whole_blocks * block_size(); off_t pos = whole_blocks * block_size();
@ -155,12 +178,45 @@ KResultOr<size_t> PATADiskDevice::write(FileDescription&, size_t offset, const U
if (remaining > 0) { if (remaining > 0) {
auto data = ByteBuffer::create_zeroed(block_size()); auto data = ByteBuffer::create_zeroed(block_size());
auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data()); auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data());
if (!read_blocks(index + whole_blocks, 1, data_buffer))
return pos; {
auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size());
auto result = read_request->wait();
if (result.wait_result().was_interrupted())
return KResult(-EINTR);
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
return pos;
case AsyncDeviceRequest::Cancelled:
return KResult(-EIO);
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
ASSERT_NOT_REACHED();
default:
break;
}
}
if (!inbuf.read(data.data(), pos, remaining)) if (!inbuf.read(data.data(), pos, remaining))
return KResult(-EFAULT); return KResult(-EFAULT);
if (!write_blocks(index + whole_blocks, 1, data_buffer))
return pos; {
auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index + whole_blocks, 1, data_buffer, block_size());
auto result = write_request->wait();
if (result.wait_result().was_interrupted())
return KResult(-EINTR);
switch (result.request_result()) {
case AsyncDeviceRequest::Failure:
return pos;
case AsyncDeviceRequest::Cancelled:
return KResult(-EIO);
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
ASSERT_NOT_REACHED();
default:
break;
}
}
} }
return pos + remaining; return pos + remaining;
@ -171,26 +227,6 @@ bool PATADiskDevice::can_write(const FileDescription&, size_t offset) const
return offset < (m_cylinders * m_heads * m_sectors_per_track * block_size()); return offset < (m_cylinders * m_heads * m_sectors_per_track * block_size());
} }
bool PATADiskDevice::read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer& outbuf)
{
return m_channel.ata_read_sectors_with_dma(lba, count, outbuf, is_slave());
}
bool PATADiskDevice::read_sectors(u32 start_sector, u16 count, UserOrKernelBuffer& outbuf)
{
return m_channel.ata_read_sectors(start_sector, count, outbuf, is_slave());
}
bool PATADiskDevice::write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer& inbuf)
{
return m_channel.ata_write_sectors_with_dma(lba, count, inbuf, is_slave());
}
bool PATADiskDevice::write_sectors(u32 start_sector, u16 count, const UserOrKernelBuffer& inbuf)
{
return m_channel.ata_write_sectors(start_sector, count, inbuf, is_slave());
}
bool PATADiskDevice::is_slave() const bool PATADiskDevice::is_slave() const
{ {
return m_drive_type == DriveType::Slave; return m_drive_type == DriveType::Slave;

View file

@ -54,13 +54,10 @@ public:
static NonnullRefPtr<PATADiskDevice> create(PATAChannel&, DriveType, int major, int minor); static NonnullRefPtr<PATADiskDevice> create(PATAChannel&, DriveType, int major, int minor);
virtual ~PATADiskDevice() override; virtual ~PATADiskDevice() override;
// ^DiskDevice
virtual bool read_blocks(unsigned index, u16 count, UserOrKernelBuffer&) override;
virtual bool write_blocks(unsigned index, u16 count, const UserOrKernelBuffer&) override;
void set_drive_geometry(u16, u16, u16); void set_drive_geometry(u16, u16, u16);
// ^BlockDevice // ^BlockDevice
virtual void start_request(AsyncBlockDeviceRequest&) override;
virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override; virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override;
virtual bool can_read(const FileDescription&, size_t) const override; virtual bool can_read(const FileDescription&, size_t) const override;
virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override; virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override;
@ -73,11 +70,6 @@ private:
// ^DiskDevice // ^DiskDevice
virtual const char* class_name() const override; virtual const char* class_name() const override;
bool wait_for_irq();
bool read_sectors_with_dma(u32 lba, u16 count, UserOrKernelBuffer&);
bool write_sectors_with_dma(u32 lba, u16 count, const UserOrKernelBuffer&);
bool read_sectors(u32 lba, u16 count, UserOrKernelBuffer& buffer);
bool write_sectors(u32 lba, u16 count, const UserOrKernelBuffer& data);
bool is_slave() const; bool is_slave() const;
Lock m_lock { "IDEDiskDevice" }; Lock m_lock { "IDEDiskDevice" };