1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 12:28:12 +00:00

Kernel: Implement an asynchronous device request stack

This allows issuing asynchronous requests for devices and waiting
on the completion of the request. The requests can cascade into
multiple sub-requests.

Since IRQs may complete at any time, if the current process is no
longer the same that started the process, we need to swich the
paging context before accessing user buffers.

Change the PATA driver to use this model.
This commit is contained in:
Tom 2020-11-02 11:16:01 -07:00 committed by Andreas Kling
parent 96081010dc
commit 2fd5ce1eb0
17 changed files with 804 additions and 246 deletions

View file

@ -39,6 +39,7 @@
#include <AK/OwnPtr.h>
#include <AK/RefPtr.h>
#include <Kernel/Devices/Device.h>
#include <Kernel/IO.h>
#include <Kernel/Lock.h>
#include <Kernel/PCI/Access.h>
@ -50,6 +51,8 @@
namespace Kernel {
class AsyncBlockDeviceRequest;
struct PhysicalRegionDescriptor {
PhysicalAddress offset;
u16 size { 0 };
@ -83,13 +86,15 @@ private:
void initialize(bool force_pio);
void detect_disks();
void wait_for_irq();
bool ata_read_sectors_with_dma(u32, u16, UserOrKernelBuffer&, bool);
bool ata_write_sectors_with_dma(u32, u16, const UserOrKernelBuffer&, bool);
bool ata_read_sectors(u32, u16, UserOrKernelBuffer&, bool);
bool ata_write_sectors(u32, u16, const UserOrKernelBuffer&, bool);
void start_request(AsyncBlockDeviceRequest&, bool, bool);
void complete_current_request(AsyncDeviceRequest::RequestResult);
inline void prepare_for_irq();
void ata_read_sectors_with_dma(bool);
void ata_read_sectors(bool);
bool ata_do_read_sector();
void ata_write_sectors_with_dma(bool);
void ata_write_sectors(bool);
void ata_do_write_sector();
// Data members
u8 m_channel_number { 0 }; // Channel number. 0 = master, 1 = slave
@ -108,5 +113,10 @@ private:
RefPtr<PATADiskDevice> m_master;
RefPtr<PATADiskDevice> m_slave;
AsyncBlockDeviceRequest* m_current_request { nullptr };
u32 m_current_request_block_index { 0 };
bool m_current_request_uses_dma { false };
bool m_current_request_flushing_cache { false };
};
}