1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-14 09:14:58 +00:00

Everywhere: Move global Kernel pattern code to Kernel/Library directory

This has KString, KBuffer, DoubleBuffer, KBufferBuilder, IOWindow,
UserOrKernelBuffer and ScopedCritical classes being moved to the
Kernel/Library subdirectory.

Also, move the panic and assertions handling code to that directory.
This commit is contained in:
Liav A 2023-02-24 20:10:59 +02:00 committed by Jelle Raaijmakers
parent f1cbfc5a6e
commit 7c0540a229
193 changed files with 238 additions and 240 deletions

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Platform.h>
#define __STRINGIFY_HELPER(x) #x
#define __STRINGIFY(x) __STRINGIFY_HELPER(x)
[[noreturn]] void __assertion_failed(char const* msg, char const* file, unsigned line, char const* func);
#define VERIFY(expr) \
do { \
if (!static_cast<bool>(expr)) [[unlikely]] \
__assertion_failed(#expr, __FILE__, __LINE__, __PRETTY_FUNCTION__); \
} while (0)
#define VERIFY_NOT_REACHED() __assertion_failed("not reached", __FILE__, __LINE__, __PRETTY_FUNCTION__)
extern "C" {
[[noreturn]] void _abort();
[[noreturn]] void abort();
}
#define TODO() __assertion_failed("TODO", __FILE__, __LINE__, __PRETTY_FUNCTION__)
#define TODO_AARCH64() __assertion_failed("TODO_AARCH64", __FILE__, __LINE__, __PRETTY_FUNCTION__)
#define VERIFY_INTERRUPTS_DISABLED() VERIFY(!(Processor::are_interrupts_enabled()))
#define VERIFY_INTERRUPTS_ENABLED() VERIFY(Processor::are_interrupts_enabled())

View file

@ -0,0 +1,93 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/StringView.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Library/DoubleBuffer.h>
namespace Kernel {
inline void DoubleBuffer::compute_lockfree_metadata()
{
InterruptDisabler disabler;
m_empty = m_read_buffer_index >= m_read_buffer->size && m_write_buffer->size == 0;
m_space_for_writing = m_capacity - m_write_buffer->size;
}
ErrorOr<NonnullOwnPtr<DoubleBuffer>> DoubleBuffer::try_create(StringView name, size_t capacity)
{
auto storage = TRY(KBuffer::try_create_with_size(name, capacity * 2, Memory::Region::Access::ReadWrite));
return adopt_nonnull_own_or_enomem(new (nothrow) DoubleBuffer(capacity, move(storage)));
}
DoubleBuffer::DoubleBuffer(size_t capacity, NonnullOwnPtr<KBuffer> storage)
: m_write_buffer(&m_buffer1)
, m_read_buffer(&m_buffer2)
, m_storage(move(storage))
, m_capacity(capacity)
{
m_buffer1.data = m_storage->data();
m_buffer1.size = 0;
m_buffer2.data = m_storage->data() + capacity;
m_buffer2.size = 0;
m_space_for_writing = capacity;
}
void DoubleBuffer::flip()
{
VERIFY(m_read_buffer_index == m_read_buffer->size);
swap(m_read_buffer, m_write_buffer);
m_write_buffer->size = 0;
m_read_buffer_index = 0;
compute_lockfree_metadata();
}
ErrorOr<size_t> DoubleBuffer::write(UserOrKernelBuffer const& data, size_t size)
{
if (!size)
return 0;
MutexLocker locker(m_lock);
size_t bytes_to_write = min(size, m_space_for_writing);
u8* write_ptr = m_write_buffer->data + m_write_buffer->size;
TRY(data.read(write_ptr, bytes_to_write));
m_write_buffer->size += bytes_to_write;
compute_lockfree_metadata();
if (m_unblock_callback && !m_empty)
m_unblock_callback();
return bytes_to_write;
}
ErrorOr<size_t> DoubleBuffer::read_impl(UserOrKernelBuffer& data, size_t size, MutexLocker&, bool advance_buffer_index)
{
if (size == 0)
return 0;
if (m_read_buffer_index >= m_read_buffer->size && m_write_buffer->size != 0)
flip();
if (m_read_buffer_index >= m_read_buffer->size)
return 0;
size_t nread = min(m_read_buffer->size - m_read_buffer_index, size);
TRY(data.write(m_read_buffer->data + m_read_buffer_index, nread));
if (advance_buffer_index)
m_read_buffer_index += nread;
compute_lockfree_metadata();
if (m_unblock_callback && m_space_for_writing > 0)
m_unblock_callback();
return nread;
}
ErrorOr<size_t> DoubleBuffer::read(UserOrKernelBuffer& data, size_t size)
{
MutexLocker locker(m_lock);
return read_impl(data, size, locker, true);
}
ErrorOr<size_t> DoubleBuffer::peek(UserOrKernelBuffer& data, size_t size)
{
MutexLocker locker(m_lock);
return read_impl(data, size, locker, false);
}
}

View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <Kernel/Library/KBuffer.h>
#include <Kernel/Library/UserOrKernelBuffer.h>
#include <Kernel/Locking/Mutex.h>
#include <Kernel/Tasks/Thread.h>
namespace Kernel {
class DoubleBuffer {
public:
static ErrorOr<NonnullOwnPtr<DoubleBuffer>> try_create(StringView name, size_t capacity = 65536);
ErrorOr<size_t> write(UserOrKernelBuffer const&, size_t);
ErrorOr<size_t> write(u8 const* data, size_t size)
{
return write(UserOrKernelBuffer::for_kernel_buffer(const_cast<u8*>(data)), size);
}
ErrorOr<size_t> read(UserOrKernelBuffer&, size_t);
ErrorOr<size_t> read(u8* data, size_t size)
{
auto buffer = UserOrKernelBuffer::for_kernel_buffer(data);
return read(buffer, size);
}
ErrorOr<size_t> peek(UserOrKernelBuffer&, size_t);
ErrorOr<size_t> peek(u8* data, size_t size)
{
auto buffer = UserOrKernelBuffer::for_kernel_buffer(data);
return peek(buffer, size);
}
bool is_empty() const { return m_empty; }
size_t space_for_writing() const { return m_space_for_writing; }
size_t immediately_readable() const
{
return (m_read_buffer->size - m_read_buffer_index) + m_write_buffer->size;
}
void set_unblock_callback(Function<void()> callback)
{
VERIFY(!m_unblock_callback);
m_unblock_callback = move(callback);
}
private:
explicit DoubleBuffer(size_t capacity, NonnullOwnPtr<KBuffer> storage);
void flip();
void compute_lockfree_metadata();
ErrorOr<size_t> read_impl(UserOrKernelBuffer&, size_t, MutexLocker&, bool advance_buffer_index);
struct InnerBuffer {
u8* data { nullptr };
size_t size { 0 };
};
InnerBuffer* m_write_buffer { nullptr };
InnerBuffer* m_read_buffer { nullptr };
InnerBuffer m_buffer1;
InnerBuffer m_buffer2;
NonnullOwnPtr<KBuffer> m_storage;
Function<void()> m_unblock_callback;
size_t m_capacity { 0 };
size_t m_read_buffer_index { 0 };
size_t m_space_for_writing { 0 };
bool m_empty { true };
mutable Mutex m_lock { "DoubleBuffer"sv };
};
}

265
Kernel/Library/IOWindow.cpp Normal file
View file

@ -0,0 +1,265 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Bus/PCI/API.h>
#include <Kernel/Bus/PCI/Definitions.h>
#include <Kernel/Library/IOWindow.h>
namespace Kernel {
#if ARCH(X86_64)
ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_for_io_space(IOAddress address, u64 space_length)
{
VERIFY(!Checked<u64>::addition_would_overflow(address.get(), space_length));
auto io_address_range = TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOAddressData(address.get(), space_length)));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOWindow(move(io_address_range))));
}
IOWindow::IOWindow(NonnullOwnPtr<IOAddressData> io_range)
: m_space_type(SpaceType::IO)
, m_io_range(move(io_range))
{
}
#endif
ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_from_io_window_with_offset(u64 offset, u64 space_length)
{
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
VERIFY(m_io_range);
if (Checked<u64>::addition_would_overflow(m_io_range->address(), space_length))
return Error::from_errno(EOVERFLOW);
auto io_address_range = TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOAddressData(as_io_address().offset(offset).get(), space_length)));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOWindow(move(io_address_range))));
}
#endif
VERIFY(space_type() == SpaceType::Memory);
VERIFY(m_memory_mapped_range);
if (Checked<u64>::addition_would_overflow(m_memory_mapped_range->paddr.get(), offset))
return Error::from_errno(EOVERFLOW);
if (Checked<u64>::addition_would_overflow(m_memory_mapped_range->paddr.get() + offset, space_length))
return Error::from_errno(EOVERFLOW);
auto memory_mapped_range = TRY(Memory::adopt_new_nonnull_own_typed_mapping<u8 volatile>(m_memory_mapped_range->paddr.offset(offset), space_length, Memory::Region::Access::ReadWrite));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOWindow(move(memory_mapped_range))));
}
ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_from_io_window_with_offset(u64 offset)
{
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
VERIFY(m_io_range);
VERIFY(m_io_range->space_length() >= offset);
return create_from_io_window_with_offset(offset, m_io_range->space_length() - offset);
}
#endif
VERIFY(space_type() == SpaceType::Memory);
VERIFY(m_memory_mapped_range);
VERIFY(m_memory_mapped_range->length >= offset);
return create_from_io_window_with_offset(offset, m_memory_mapped_range->length - offset);
}
ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_for_pci_device_bar(PCI::DeviceIdentifier const& pci_device_identifier, PCI::HeaderType0BaseRegister pci_bar, u64 space_length)
{
u64 pci_bar_value = PCI::get_BAR(pci_device_identifier, pci_bar);
auto pci_bar_space_type = PCI::get_BAR_space_type(pci_bar_value);
if (pci_bar_space_type == PCI::BARSpaceType::Memory64BitSpace) {
// FIXME: In theory, BAR5 cannot be assigned to 64 bit as it is the last one...
// however, there might be 64 bit BAR5 for real bare metal hardware, so remove this
// if it makes a problem.
if (pci_bar == PCI::HeaderType0BaseRegister::BAR5) {
return Error::from_errno(EINVAL);
}
u64 next_pci_bar_value = PCI::get_BAR(pci_device_identifier, static_cast<PCI::HeaderType0BaseRegister>(to_underlying(pci_bar) + 1));
pci_bar_value |= next_pci_bar_value << 32;
}
auto pci_bar_space_size = PCI::get_BAR_space_size(pci_device_identifier, pci_bar);
if (pci_bar_space_size < space_length)
return Error::from_errno(EIO);
if (pci_bar_space_type == PCI::BARSpaceType::IOSpace) {
#if ARCH(X86_64)
if (Checked<u64>::addition_would_overflow(pci_bar_value, space_length))
return Error::from_errno(EOVERFLOW);
auto io_address_range = TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOAddressData((pci_bar_value & 0xfffffffc), space_length)));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOWindow(move(io_address_range))));
#else
// Note: For non-x86 platforms, IO PCI BARs are simply not useable.
return Error::from_errno(ENOTSUP);
#endif
}
if (pci_bar_space_type == PCI::BARSpaceType::Memory32BitSpace && Checked<u32>::addition_would_overflow(pci_bar_value, space_length))
return Error::from_errno(EOVERFLOW);
if (pci_bar_space_type == PCI::BARSpaceType::Memory16BitSpace && Checked<u16>::addition_would_overflow(pci_bar_value, space_length))
return Error::from_errno(EOVERFLOW);
if (pci_bar_space_type == PCI::BARSpaceType::Memory64BitSpace && Checked<u64>::addition_would_overflow(pci_bar_value, space_length))
return Error::from_errno(EOVERFLOW);
auto memory_mapped_range = TRY(Memory::adopt_new_nonnull_own_typed_mapping<u8 volatile>(PhysicalAddress(pci_bar_value & PCI::bar_address_mask), space_length, Memory::Region::Access::ReadWrite));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOWindow(move(memory_mapped_range))));
}
ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_for_pci_device_bar(PCI::DeviceIdentifier const& pci_device_identifier, PCI::HeaderType0BaseRegister pci_bar)
{
u64 pci_bar_space_size = PCI::get_BAR_space_size(pci_device_identifier, pci_bar);
return create_for_pci_device_bar(pci_device_identifier, pci_bar, pci_bar_space_size);
}
IOWindow::IOWindow(NonnullOwnPtr<Memory::TypedMapping<u8 volatile>> memory_mapped_range)
: m_space_type(SpaceType::Memory)
, m_memory_mapped_range(move(memory_mapped_range))
{
}
IOWindow::~IOWindow() = default;
bool IOWindow::is_access_aligned(u64 offset, size_t byte_size_access) const
{
return (offset % byte_size_access) == 0;
}
bool IOWindow::is_access_in_range(u64 offset, size_t byte_size_access) const
{
if (Checked<u64>::addition_would_overflow(offset, byte_size_access))
return false;
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
VERIFY(m_io_range);
VERIFY(!Checked<u64>::addition_would_overflow(m_io_range->address(), m_io_range->space_length()));
// To understand how we treat IO address space with the corresponding calculation, the Intel Software Developer manual
// helps us to understand the layout of the IO address space -
//
// Intel® 64 and IA-32 Architectures Software Developers Manual, Volume 1: Basic Architecture, 16.3 I/O ADDRESS SPACE, page 16-1 wrote:
// Any two consecutive 8-bit ports can be treated as a 16-bit port, and any four consecutive ports can be a 32-bit port.
// In this manner, the processor can transfer 8, 16, or 32 bits to or from a device in the I/O address space.
// Like words in memory, 16-bit ports should be aligned to even addresses (0, 2, 4, ...) so that all 16 bits can be transferred in a single bus cycle.
// Likewise, 32-bit ports should be aligned to addresses that are multiples of four (0, 4, 8, ...).
// The processor supports data transfers to unaligned ports, but there is a performance penalty because one or more
// extra bus cycle must be used.
return (m_io_range->address() + m_io_range->space_length()) >= (offset + byte_size_access);
}
#endif
VERIFY(space_type() == SpaceType::Memory);
VERIFY(m_memory_mapped_range);
VERIFY(!Checked<u64>::addition_would_overflow(m_memory_mapped_range->offset, m_memory_mapped_range->length));
return (m_memory_mapped_range->offset + m_memory_mapped_range->length) >= (offset + byte_size_access);
}
u8 IOWindow::read8(u64 offset)
{
VERIFY(is_access_in_range(offset, sizeof(u8)));
u8 data { 0 };
in<u8>(offset, data);
return data;
}
u16 IOWindow::read16(u64 offset)
{
// Note: Although it might be OK to allow unaligned access on regular memory,
// for memory mapped IO access, it should always be considered a bug.
// The same goes for port mapped IO access, because in x86 unaligned access to ports
// is possible but there's a performance penalty.
VERIFY(is_access_in_range(offset, sizeof(u16)));
VERIFY(is_access_aligned(offset, sizeof(u16)));
u16 data { 0 };
in<u16>(offset, data);
return data;
}
u32 IOWindow::read32(u64 offset)
{
// Note: Although it might be OK to allow unaligned access on regular memory,
// for memory mapped IO access, it should always be considered a bug.
// The same goes for port mapped IO access, because in x86 unaligned access to ports
// is possible but there's a performance penalty.
VERIFY(is_access_in_range(offset, sizeof(u32)));
VERIFY(is_access_aligned(offset, sizeof(u32)));
u32 data { 0 };
in<u32>(offset, data);
return data;
}
void IOWindow::write8(u64 offset, u8 data)
{
VERIFY(is_access_in_range(offset, sizeof(u8)));
out<u8>(offset, data);
}
void IOWindow::write16(u64 offset, u16 data)
{
// Note: Although it might be OK to allow unaligned access on regular memory,
// for memory mapped IO access, it should always be considered a bug.
// The same goes for port mapped IO access, because in x86 unaligned access to ports
// is possible but there's a performance penalty.
VERIFY(is_access_in_range(offset, sizeof(u16)));
VERIFY(is_access_aligned(offset, sizeof(u16)));
out<u16>(offset, data);
}
void IOWindow::write32(u64 offset, u32 data)
{
// Note: Although it might be OK to allow unaligned access on regular memory,
// for memory mapped IO access, it should always be considered a bug.
// The same goes for port mapped IO access, because in x86 unaligned access to ports
// is possible but there's a performance penalty.
VERIFY(is_access_in_range(offset, sizeof(u32)));
VERIFY(is_access_aligned(offset, sizeof(u32)));
out<u32>(offset, data);
}
void IOWindow::write32_unaligned(u64 offset, u32 data)
{
// Note: We only verify that we access IO in the expected range.
// Note: for port mapped IO access, because in x86 unaligned access to ports
// is possible but there's a performance penalty, we can still allow that to happen.
// However, it should be noted that most cases should not use unaligned access
// to hardware IO, so this is a valid case in emulators or hypervisors only.
// Note: Using this for memory mapped IO will fail for unaligned access, because
// there's no valid use case for it (yet).
VERIFY(space_type() != SpaceType::Memory);
VERIFY(is_access_in_range(offset, sizeof(u32)));
out<u32>(offset, data);
}
u32 IOWindow::read32_unaligned(u64 offset)
{
// Note: We only verify that we access IO in the expected range.
// Note: for port mapped IO access, because in x86 unaligned access to ports
// is possible but there's a performance penalty, we can still allow that to happen.
// However, it should be noted that most cases should not use unaligned access
// to hardware IO, so this is a valid case in emulators or hypervisors only.
// Note: Using this for memory mapped IO will fail for unaligned access, because
// there's no valid use case for it (yet).
VERIFY(space_type() != SpaceType::Memory);
VERIFY(is_access_in_range(offset, sizeof(u32)));
u32 data { 0 };
in<u32>(offset, data);
return data;
}
PhysicalAddress IOWindow::as_physical_memory_address() const
{
VERIFY(space_type() == SpaceType::Memory);
VERIFY(m_memory_mapped_range);
return m_memory_mapped_range->paddr;
}
u8 volatile* IOWindow::as_memory_address_pointer()
{
VERIFY(space_type() == SpaceType::Memory);
VERIFY(m_memory_mapped_range);
return m_memory_mapped_range->ptr();
}
#if ARCH(X86_64)
IOAddress IOWindow::as_io_address() const
{
VERIFY(space_type() == SpaceType::IO);
VERIFY(m_io_range);
return IOAddress(m_io_range->address());
}
#endif
}

155
Kernel/Library/IOWindow.h Normal file
View file

@ -0,0 +1,155 @@
/*
* Copyright (c) 2022, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/ByteReader.h>
#include <AK/Platform.h>
#include <AK/Types.h>
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/IO.h>
#endif
#include <Kernel/Bus/PCI/Definitions.h>
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/TypedMapping.h>
namespace Kernel {
class IOWindow {
public:
enum class SpaceType {
#if ARCH(X86_64)
IO,
#endif
Memory,
};
SpaceType space_type() const { return m_space_type; }
#if ARCH(X86_64)
static ErrorOr<NonnullOwnPtr<IOWindow>> create_for_io_space(IOAddress, u64 space_length);
#endif
static ErrorOr<NonnullOwnPtr<IOWindow>> create_for_pci_device_bar(PCI::DeviceIdentifier const&, PCI::HeaderType0BaseRegister, u64 space_length);
static ErrorOr<NonnullOwnPtr<IOWindow>> create_for_pci_device_bar(PCI::DeviceIdentifier const&, PCI::HeaderType0BaseRegister);
ErrorOr<NonnullOwnPtr<IOWindow>> create_from_io_window_with_offset(u64 offset, u64 space_length);
ErrorOr<NonnullOwnPtr<IOWindow>> create_from_io_window_with_offset(u64 offset);
u8 read8(u64 offset);
u16 read16(u64 offset);
u32 read32(u64 offset);
void write8(u64 offset, u8);
void write16(u64 offset, u16);
void write32(u64 offset, u32);
// Note: These methods are useful in exceptional cases where we need to do unaligned
// access. This mostly happens on emulators and hypervisors (such as VMWare) because they don't enforce aligned access
// to IO and sometimes even require such access, so we have to use these functions.
void write32_unaligned(u64 offset, u32);
u32 read32_unaligned(u64 offset);
bool operator==(IOWindow const& other) const = delete;
bool operator!=(IOWindow const& other) const = delete;
bool operator>(IOWindow const& other) const = delete;
bool operator>=(IOWindow const& other) const = delete;
bool operator<(IOWindow const& other) const = delete;
bool operator<=(IOWindow const& other) const = delete;
~IOWindow();
PhysicalAddress as_physical_memory_address() const;
#if ARCH(X86_64)
IOAddress as_io_address() const;
#endif
private:
explicit IOWindow(NonnullOwnPtr<Memory::TypedMapping<u8 volatile>>);
u8 volatile* as_memory_address_pointer();
#if ARCH(X86_64)
struct IOAddressData {
public:
IOAddressData(u64 address, u64 space_length)
: m_address(address)
, m_space_length(space_length)
{
}
u64 address() const { return m_address; }
u64 space_length() const { return m_space_length; }
private:
u64 m_address { 0 };
u64 m_space_length { 0 };
};
explicit IOWindow(NonnullOwnPtr<IOAddressData>);
#endif
bool is_access_in_range(u64 offset, size_t byte_size_access) const;
bool is_access_aligned(u64 offset, size_t byte_size_access) const;
template<typename T>
ALWAYS_INLINE void in(u64 start_offset, T& data)
{
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
data = as_io_address().offset(start_offset).in<T>();
return;
}
#endif
VERIFY(m_space_type == SpaceType::Memory);
VERIFY(m_memory_mapped_range);
// Note: For memory-mapped IO we simply never allow unaligned access as it
// can cause problems with strict bare metal hardware. For example, some XHCI USB controllers
// might completely lock up because of an unaligned memory access to their registers.
VERIFY((start_offset % sizeof(T)) == 0);
data = *(T volatile*)(as_memory_address_pointer() + start_offset);
}
template<typename T>
ALWAYS_INLINE void out(u64 start_offset, T value)
{
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
VERIFY(m_io_range);
as_io_address().offset(start_offset).out<T>(value);
return;
}
#endif
VERIFY(m_space_type == SpaceType::Memory);
VERIFY(m_memory_mapped_range);
// Note: For memory-mapped IO we simply never allow unaligned access as it
// can cause problems with strict bare metal hardware. For example, some XHCI USB controllers
// might completely lock up because of an unaligned memory access to their registers.
VERIFY((start_offset % sizeof(T)) == 0);
*(T volatile*)(as_memory_address_pointer() + start_offset) = value;
}
SpaceType m_space_type { SpaceType::Memory };
OwnPtr<Memory::TypedMapping<u8 volatile>> m_memory_mapped_range;
#if ARCH(X86_64)
OwnPtr<IOAddressData> m_io_range;
#endif
};
}
template<>
struct AK::Formatter<Kernel::IOWindow> : AK::Formatter<FormatString> {
ErrorOr<void> format(FormatBuilder& builder, Kernel::IOWindow const& value)
{
#if ARCH(X86_64)
if (value.space_type() == Kernel::IOWindow::SpaceType::IO)
return Formatter<FormatString>::format(builder, "{}"sv, value.as_io_address());
#endif
VERIFY(value.space_type() == Kernel::IOWindow::SpaceType::Memory);
return Formatter<FormatString>::format(builder, "Memory {}"sv, value.as_physical_memory_address());
}
};

66
Kernel/Library/KBuffer.h Normal file
View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
// KBuffer: Memory buffer backed by a kernel region.
//
// The memory is allocated via the global kernel-only page allocator, rather than via
// kmalloc() which is what ByteBuffer/Vector/etc will use.
//
// This makes KBuffer a little heavier to allocate, but much better for large and/or
// long-lived allocations, since they don't put all that weight and pressure on the
// severely limited kmalloc heap.
#include <AK/Assertions.h>
#include <AK/StringView.h>
#include <Kernel/Library/StdLib.h> // For memcpy. FIXME: Make memcpy less expensive to access a declaration of in the Kernel.
#include <Kernel/Memory/MemoryManager.h>
namespace Kernel {
class [[nodiscard]] KBuffer {
public:
static ErrorOr<NonnullOwnPtr<KBuffer>> try_create_with_size(StringView name, size_t size, Memory::Region::Access access = Memory::Region::Access::ReadWrite, AllocationStrategy strategy = AllocationStrategy::Reserve)
{
auto rounded_size = TRY(Memory::page_round_up(size));
auto region = TRY(MM.allocate_kernel_region(rounded_size, name, access, strategy));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) KBuffer { size, move(region) }));
}
static ErrorOr<NonnullOwnPtr<KBuffer>> try_create_with_bytes(StringView name, ReadonlyBytes bytes, Memory::Region::Access access = Memory::Region::Access::ReadWrite, AllocationStrategy strategy = AllocationStrategy::Reserve)
{
auto buffer = TRY(try_create_with_size(name, bytes.size(), access, strategy));
memcpy(buffer->data(), bytes.data(), bytes.size());
return buffer;
}
[[nodiscard]] u8* data() { return m_region->vaddr().as_ptr(); }
[[nodiscard]] u8 const* data() const { return m_region->vaddr().as_ptr(); }
[[nodiscard]] size_t size() const { return m_size; }
[[nodiscard]] size_t capacity() const { return m_region->size(); }
[[nodiscard]] ReadonlyBytes bytes() const { return { data(), size() }; }
[[nodiscard]] Bytes bytes() { return { data(), size() }; }
void set_size(size_t size)
{
VERIFY(size <= capacity());
m_size = size;
}
private:
explicit KBuffer(size_t size, NonnullOwnPtr<Memory::Region> region)
: m_size(size)
, m_region(move(region))
{
}
size_t m_size { 0 };
NonnullOwnPtr<Memory::Region> m_region;
};
}

View file

@ -0,0 +1,132 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/StdLibExtras.h>
#include <Kernel/Library/KBufferBuilder.h>
namespace Kernel {
inline bool KBufferBuilder::check_expand(size_t size)
{
if (!m_buffer)
return false;
if ((m_size + size) < m_buffer->capacity())
return true;
if (Checked<size_t>::addition_would_overflow(m_size, size))
return false;
size_t new_buffer_size = m_size + size;
if (Checked<size_t>::addition_would_overflow(new_buffer_size, 1 * MiB))
return false;
auto rounded_new_buffer_size_or_error = Memory::page_round_up(new_buffer_size + 1 * MiB);
if (rounded_new_buffer_size_or_error.is_error()) {
return false;
}
auto new_buffer_or_error = KBuffer::try_create_with_size("KBufferBuilder"sv, rounded_new_buffer_size_or_error.value());
if (new_buffer_or_error.is_error())
return false;
auto new_buffer = new_buffer_or_error.release_value();
memcpy(new_buffer->data(), m_buffer->data(), m_buffer->size());
m_buffer = move(new_buffer);
return true;
}
bool KBufferBuilder::flush()
{
if (!m_buffer)
return false;
m_buffer->set_size(m_size);
return true;
}
OwnPtr<KBuffer> KBufferBuilder::build()
{
if (!flush())
return {};
return move(m_buffer);
}
ErrorOr<KBufferBuilder> KBufferBuilder::try_create()
{
auto buffer = TRY(KBuffer::try_create_with_size("KBufferBuilder"sv, 4 * MiB, Memory::Region::Access::ReadWrite));
return KBufferBuilder { move(buffer) };
}
KBufferBuilder::KBufferBuilder(NonnullOwnPtr<KBuffer> buffer)
: m_buffer(move(buffer))
{
}
ErrorOr<void> KBufferBuilder::append_bytes(ReadonlyBytes bytes)
{
if (!check_expand(bytes.size()))
return ENOMEM;
memcpy(insertion_ptr(), bytes.data(), bytes.size());
m_size += bytes.size();
return {};
}
ErrorOr<void> KBufferBuilder::append(StringView str)
{
if (str.is_empty())
return {};
if (!check_expand(str.length()))
return ENOMEM;
memcpy(insertion_ptr(), str.characters_without_null_termination(), str.length());
m_size += str.length();
return {};
}
ErrorOr<void> KBufferBuilder::append(char const* characters, int length)
{
if (!length)
return {};
if (!check_expand(length))
return ENOMEM;
memcpy(insertion_ptr(), characters, length);
m_size += length;
return {};
}
ErrorOr<void> KBufferBuilder::append(char ch)
{
if (!check_expand(1))
return ENOMEM;
insertion_ptr()[0] = ch;
m_size += 1;
return {};
}
ErrorOr<void> KBufferBuilder::append_escaped_for_json(StringView string)
{
for (auto ch : string) {
switch (ch) {
case '\b':
TRY(append("\\b"sv));
break;
case '\n':
TRY(append("\\n"sv));
break;
case '\t':
TRY(append("\\t"sv));
break;
case '\"':
TRY(append("\\\""sv));
break;
case '\\':
TRY(append("\\\\"sv));
break;
default:
if (ch >= 0 && ch <= 0x1f)
TRY(appendff("\\u{:04x}", ch));
else
TRY(append(ch));
}
}
return {};
}
}

View file

@ -0,0 +1,74 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/StringBuilder.h>
#include <AK/StringView.h>
#include <Kernel/Library/KBuffer.h>
namespace Kernel {
class KBufferBuilder {
AK_MAKE_NONCOPYABLE(KBufferBuilder);
public:
using OutputType = KBuffer;
static ErrorOr<KBufferBuilder> try_create();
KBufferBuilder(KBufferBuilder&&) = default;
KBufferBuilder& operator=(KBufferBuilder&&) = default;
~KBufferBuilder() = default;
ErrorOr<void> append(StringView);
ErrorOr<void> append(char);
ErrorOr<void> append(char const*, int);
ErrorOr<void> append_escaped_for_json(StringView);
ErrorOr<void> append_bytes(ReadonlyBytes);
template<typename... Parameters>
ErrorOr<void> appendff(CheckedFormatString<Parameters...>&& fmtstr, Parameters const&... parameters)
{
// FIXME: This really not ideal, but vformat expects StringBuilder.
StringBuilder builder;
AK::VariadicFormatParams<AK::AllowDebugOnlyFormatters::No, Parameters...> variadic_format_params { parameters... };
TRY(vformat(builder, fmtstr.view(), variadic_format_params));
return append_bytes(builder.string_view().bytes());
}
bool flush();
OwnPtr<KBuffer> build();
ReadonlyBytes bytes() const
{
if (!m_buffer)
return {};
return m_buffer->bytes();
}
size_t length() const
{
return m_size;
}
private:
explicit KBufferBuilder(NonnullOwnPtr<KBuffer>);
bool check_expand(size_t);
u8* insertion_ptr()
{
if (!m_buffer)
return nullptr;
return m_buffer->data() + m_size;
}
OwnPtr<KBuffer> m_buffer;
size_t m_size { 0 };
};
}

View file

@ -0,0 +1,89 @@
/*
* Copyright (c) 2021, Max Wipfli <max.wipfli@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Vector.h>
#include <Kernel/Library/KLexicalPath.h>
namespace Kernel::KLexicalPath {
static StringView const s_single_dot = "."sv;
bool is_absolute(StringView path)
{
return !path.is_empty() && path[0] == '/';
}
bool is_canonical(StringView path)
{
// FIXME: This can probably be done more efficiently.
if (path.is_empty())
return false;
if (path.ends_with('/') && path.length() != 1)
return false;
if (path.starts_with("./"sv) || path.contains("/./"sv) || path.ends_with("/."sv))
return false;
if (path.starts_with("../"sv) || path.contains("/../"sv) || path.ends_with("/.."sv))
return false;
if (path.contains("//"sv))
return false;
return true;
}
StringView basename(StringView a_path)
{
if (a_path == "/"sv)
return a_path;
if (a_path.is_empty())
return s_single_dot;
auto path = a_path.trim("/"sv, TrimMode::Right);
// NOTE: If it's empty now, it means the path was just a series of slashes.
if (path.is_empty())
return a_path.substring_view(0, 1);
auto slash_index = path.find_last('/');
if (!slash_index.has_value())
return path;
auto basename = path.substring_view(*slash_index + 1);
return basename;
}
StringView dirname(StringView path)
{
VERIFY(is_canonical(path));
auto slash_index = path.find_last('/');
VERIFY(slash_index.has_value());
return path.substring_view(0, *slash_index);
}
Vector<StringView> parts(StringView path)
{
VERIFY(is_canonical(path));
return path.split_view('/');
}
ErrorOr<NonnullOwnPtr<KString>> try_join(StringView first, StringView second)
{
VERIFY(is_canonical(first));
VERIFY(is_canonical(second));
VERIFY(!is_absolute(second));
if (first == "/"sv) {
char* buffer;
auto string = TRY(KString::try_create_uninitialized(1 + second.length(), buffer));
buffer[0] = '/';
__builtin_memcpy(buffer + 1, second.characters_without_null_termination(), second.length());
buffer[string->length()] = 0;
return string;
}
char* buffer;
auto string = TRY(KString::try_create_uninitialized(first.length() + 1 + second.length(), buffer));
__builtin_memcpy(buffer, first.characters_without_null_termination(), first.length());
buffer[first.length()] = '/';
__builtin_memcpy(buffer + first.length() + 1, second.characters_without_null_termination(), second.length());
buffer[string->length()] = 0;
return string;
}
}

View file

@ -0,0 +1,22 @@
/*
* Copyright (c) 2021, Max Wipfli <max.wipfli@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/StringView.h>
#include <Kernel/Library/KString.h>
namespace Kernel::KLexicalPath {
bool is_absolute(StringView);
bool is_canonical(StringView);
StringView basename(StringView);
StringView dirname(StringView);
Vector<StringView> parts(StringView);
ErrorOr<NonnullOwnPtr<KString>> try_join(StringView, StringView);
}

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Format.h>
#include <AK/StringBuilder.h>
#include <Kernel/Library/KString.h>
extern bool g_in_early_boot;
namespace Kernel {
ErrorOr<NonnullOwnPtr<KString>> KString::try_create(StringView string)
{
char* characters = nullptr;
size_t length = string.length();
auto new_string = TRY(KString::try_create_uninitialized(length, characters));
if (!string.is_empty())
__builtin_memcpy(characters, string.characters_without_null_termination(), length);
characters[length] = '\0';
return new_string;
}
ErrorOr<NonnullOwnPtr<KString>> KString::vformatted(StringView fmtstr, AK::TypeErasedFormatParams& params)
{
StringBuilder builder;
TRY(AK::vformat(builder, fmtstr, params));
return try_create(builder.string_view());
}
NonnullOwnPtr<KString> KString::must_create(StringView string)
{
// We can only enforce success during early boot.
VERIFY(g_in_early_boot);
return KString::try_create(string).release_value();
}
ErrorOr<NonnullOwnPtr<KString>> KString::try_create_uninitialized(size_t length, char*& characters)
{
size_t allocation_size = sizeof(KString) + (sizeof(char) * length) + sizeof(char);
auto* slot = kmalloc(allocation_size);
if (!slot)
return ENOMEM;
auto new_string = TRY(adopt_nonnull_own_or_enomem(new (slot) KString(length)));
characters = new_string->m_characters;
return new_string;
}
NonnullOwnPtr<KString> KString::must_create_uninitialized(size_t length, char*& characters)
{
// We can only enforce success during early boot.
VERIFY(g_in_early_boot);
return KString::try_create_uninitialized(length, characters).release_value();
}
ErrorOr<NonnullOwnPtr<KString>> KString::try_clone() const
{
return try_create(view());
}
void KString::operator delete(void* string)
{
if (!string)
return;
size_t allocation_size = sizeof(KString) + (sizeof(char) * static_cast<KString*>(string)->m_length) + sizeof(char);
kfree_sized(string, allocation_size);
}
}

131
Kernel/Library/KString.h Normal file
View file

@ -0,0 +1,131 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Format.h>
#include <AK/OwnPtr.h>
namespace Kernel {
class KString {
AK_MAKE_NONCOPYABLE(KString);
AK_MAKE_NONMOVABLE(KString);
public:
[[nodiscard]] static ErrorOr<NonnullOwnPtr<KString>> try_create_uninitialized(size_t, char*&);
[[nodiscard]] static NonnullOwnPtr<KString> must_create_uninitialized(size_t, char*&);
[[nodiscard]] static ErrorOr<NonnullOwnPtr<KString>> try_create(StringView);
[[nodiscard]] static NonnullOwnPtr<KString> must_create(StringView);
[[nodiscard]] static ErrorOr<NonnullOwnPtr<KString>> vformatted(StringView fmtstr, AK::TypeErasedFormatParams&);
template<typename... Parameters>
[[nodiscard]] static ErrorOr<NonnullOwnPtr<KString>> formatted(CheckedFormatString<Parameters...>&& fmtstr, Parameters const&... parameters)
{
AK::VariadicFormatParams<AK::AllowDebugOnlyFormatters::No, Parameters...> variadic_format_parameters { parameters... };
return vformatted(fmtstr.view(), variadic_format_parameters);
}
[[nodiscard]] static ErrorOr<NonnullOwnPtr<KString>> number(Arithmetic auto value)
{
return formatted("{}", value);
}
void operator delete(void*);
ErrorOr<NonnullOwnPtr<KString>> try_clone() const;
[[nodiscard]] bool is_empty() const { return m_length == 0; }
[[nodiscard]] size_t length() const { return m_length; }
[[nodiscard]] char const* characters() const { return m_characters; }
[[nodiscard]] StringView view() const { return { characters(), length() }; }
[[nodiscard]] ReadonlyBytes bytes() const { return { characters(), length() }; }
private:
explicit KString(size_t length)
: m_length(length)
{
}
size_t m_length { 0 };
char m_characters[0];
};
}
namespace AK {
template<>
struct Formatter<Kernel::KString> : Formatter<StringView> {
ErrorOr<void> format(FormatBuilder& builder, Kernel::KString const& value)
{
return Formatter<StringView>::format(builder, value.view());
}
};
template<>
struct Formatter<OwnPtr<Kernel::KString>> : Formatter<StringView> {
ErrorOr<void> format(FormatBuilder& builder, OwnPtr<Kernel::KString> const& value)
{
if (value)
return Formatter<StringView>::format(builder, value->view());
return Formatter<StringView>::format(builder, "[out of memory]"sv);
}
};
template<>
struct Formatter<NonnullOwnPtr<Kernel::KString>> : Formatter<StringView> {
ErrorOr<void> format(FormatBuilder& builder, NonnullOwnPtr<Kernel::KString> const& value)
{
return Formatter<StringView>::format(builder, value->view());
}
};
template<>
struct Traits<NonnullOwnPtr<Kernel::KString>> : public GenericTraits<NonnullOwnPtr<Kernel::KString>> {
using PeekType = Kernel::KString*;
using ConstPeekType = Kernel::KString const*;
static unsigned hash(NonnullOwnPtr<Kernel::KString> const& p) { return string_hash(p->characters(), p->length()); }
static bool equals(NonnullOwnPtr<Kernel::KString> const& a, NonnullOwnPtr<Kernel::KString> const& b) { return a->view() == b->view(); }
static bool equals(StringView a, NonnullOwnPtr<Kernel::KString> const& b) { return a == b->view(); }
};
template<>
struct Traits<OwnPtr<Kernel::KString>> : public GenericTraits<OwnPtr<Kernel::KString>> {
using PeekType = Kernel::KString*;
using ConstPeekType = Kernel::KString const*;
static unsigned hash(OwnPtr<Kernel::KString> const& p)
{
if (!p)
return ptr_hash(nullptr);
return string_hash(p->characters(), p->length());
}
static bool equals(OwnPtr<Kernel::KString> const& a, OwnPtr<Kernel::KString> const& b)
{
if (!a || !b)
return a.ptr() == b.ptr();
if (a == b)
return true;
return a->view() == b->view();
}
static bool equals(StringView a, OwnPtr<Kernel::KString> const& b)
{
if (!b)
return a.is_null();
return a == b->view();
}
};
namespace Detail {
template<>
inline constexpr bool IsHashCompatible<StringView, NonnullOwnPtr<Kernel::KString>> = true;
template<>
inline constexpr bool IsHashCompatible<StringView, OwnPtr<Kernel::KString>> = true;
}
}

View file

@ -16,7 +16,7 @@
#include <Kernel/Library/NonnullLockRefPtr.h>
#ifdef KERNEL
# include <Kernel/Arch/Processor.h>
# include <Kernel/ScopedCritical.h>
# include <Kernel/Library/ScopedCritical.h>
#endif
#define LOCKREFPTR_SCRUB_BYTE 0xa0

View file

@ -12,7 +12,7 @@
#include <AK/StdLibExtras.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Library/LockRefPtr.h>
#include <Kernel/ScopedCritical.h>
#include <Kernel/Library/ScopedCritical.h>
namespace AK {

View file

@ -0,0 +1,148 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Library/StdLib.h>
extern "C" {
void* memcpy(void* dest_ptr, void const* src_ptr, size_t n)
{
#if ARCH(X86_64)
size_t dest = (size_t)dest_ptr;
size_t src = (size_t)src_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
asm volatile(
"rep movsq\n"
: "=S"(src), "=D"(dest)
: "S"(src), "D"(dest), "c"(size_ts)
: "memory");
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
}
asm volatile(
"rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
: "memory");
#else
u8* pd = (u8*)dest_ptr;
u8 const* ps = (u8 const*)src_ptr;
for (; n--;)
*pd++ = *ps++;
#endif
return dest_ptr;
}
void* memmove(void* dest, void const* src, size_t n)
{
if (dest < src)
return memcpy(dest, src, n);
u8* pd = (u8*)dest;
u8 const* ps = (u8 const*)src;
for (pd += n, ps += n; n--;)
*--pd = *--ps;
return dest;
}
void* memset(void* dest_ptr, int c, size_t n)
{
#if ARCH(X86_64)
size_t dest = (size_t)dest_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
size_t expanded_c = explode_byte((u8)c);
asm volatile(
"rep stosq\n"
: "=D"(dest)
: "D"(dest), "c"(size_ts), "a"(expanded_c)
: "memory");
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
}
asm volatile(
"rep stosb\n"
: "=D"(dest), "=c"(n)
: "0"(dest), "1"(n), "a"(c)
: "memory");
#else
u8* pd = (u8*)dest_ptr;
for (; n--;)
*pd++ = c;
#endif
return dest_ptr;
}
size_t strlen(char const* str)
{
size_t len = 0;
while (*(str++))
++len;
return len;
}
size_t strnlen(char const* str, size_t maxlen)
{
size_t len = 0;
for (; len < maxlen && *str; str++)
len++;
return len;
}
int strcmp(char const* s1, char const* s2)
{
for (; *s1 == *s2; ++s1, ++s2) {
if (*s1 == 0)
return 0;
}
return *(u8 const*)s1 < *(u8 const*)s2 ? -1 : 1;
}
int memcmp(void const* v1, void const* v2, size_t n)
{
auto const* s1 = (u8 const*)v1;
auto const* s2 = (u8 const*)v2;
while (n-- > 0) {
if (*s1++ != *s2++)
return s1[-1] < s2[-1] ? -1 : 1;
}
return 0;
}
int strncmp(char const* s1, char const* s2, size_t n)
{
if (!n)
return 0;
do {
if (*s1 != *s2++)
return *(unsigned char const*)s1 - *(unsigned char const*)--s2;
if (*s1++ == 0)
break;
} while (--n);
return 0;
}
char* strstr(char const* haystack, char const* needle)
{
char nch;
char hch;
if ((nch = *needle++) != 0) {
size_t len = strlen(needle);
do {
do {
if ((hch = *haystack++) == 0)
return nullptr;
} while (hch != nch);
} while (strncmp(haystack, needle, len) != 0);
--haystack;
}
return const_cast<char*>(haystack);
}
}

View file

@ -14,7 +14,7 @@
#include <AK/Types.h>
#ifdef KERNEL
# include <Kernel/Arch/Processor.h>
# include <Kernel/ScopedCritical.h>
# include <Kernel/Library/ScopedCritical.h>
#endif
#define NONNULLLOCKREFPTR_SCRUB_BYTE 0xa1

54
Kernel/Library/Panic.cpp Normal file
View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Format.h>
#include <Kernel/Arch/Processor.h>
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/Shutdown.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/RPi/Watchdog.h>
#endif
#include <Kernel/CommandLine.h>
#include <Kernel/KSyms.h>
#include <Kernel/Library/Panic.h>
#include <Kernel/Tasks/Thread.h>
namespace Kernel {
[[noreturn]] static void __shutdown()
{
#if ARCH(X86_64)
qemu_shutdown();
virtualbox_shutdown();
#elif ARCH(AARCH64)
RPi::Watchdog::the().system_shutdown();
#endif
// Note: If we failed to invoke platform shutdown, we need to halt afterwards
// to ensure no further execution on any CPU still happens.
Processor::halt();
}
void __panic(char const* file, unsigned int line, char const* function)
{
// Avoid lock ranking checks on crashing paths, just try to get some debugging messages out.
auto* thread = Thread::current();
if (thread)
thread->set_crashing();
critical_dmesgln("at {}:{} in {}", file, line, function);
dump_backtrace(PrintToScreen::Yes);
if (!CommandLine::was_initialized())
Processor::halt();
switch (kernel_command_line().panic_mode()) {
case PanicMode::Shutdown:
__shutdown();
case PanicMode::Halt:
[[fallthrough]];
default:
Processor::halt();
}
}
}

22
Kernel/Library/Panic.h Normal file
View file

@ -0,0 +1,22 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Format.h>
namespace Kernel {
[[noreturn]] void __panic(char const* file, unsigned int line, char const* function);
#define PANIC(...) \
do { \
critical_dmesgln("KERNEL PANIC! :^("); \
critical_dmesgln(__VA_ARGS__); \
__panic(__FILE__, __LINE__, __PRETTY_FUNCTION__); \
} while (0)
}

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Library/ScopedCritical.h>
#include <Kernel/Arch/Processor.h>
namespace Kernel {
ScopedCritical::ScopedCritical()
{
enter();
}
ScopedCritical::~ScopedCritical()
{
if (m_valid)
leave();
}
ScopedCritical::ScopedCritical(ScopedCritical&& from)
: m_valid(exchange(from.m_valid, false))
{
}
ScopedCritical& ScopedCritical::operator=(ScopedCritical&& from)
{
if (&from != this) {
m_valid = exchange(from.m_valid, false);
}
return *this;
}
void ScopedCritical::leave()
{
VERIFY(m_valid);
m_valid = false;
Processor::leave_critical();
}
void ScopedCritical::enter()
{
VERIFY(!m_valid);
m_valid = true;
Processor::enter_critical();
}
}

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Noncopyable.h>
namespace Kernel {
class ScopedCritical {
AK_MAKE_NONCOPYABLE(ScopedCritical);
public:
ScopedCritical();
~ScopedCritical();
ScopedCritical(ScopedCritical&& from);
ScopedCritical& operator=(ScopedCritical&& from);
void leave();
void enter();
private:
bool m_valid { false };
};
}

255
Kernel/Library/StdLib.cpp Normal file
View file

@ -0,0 +1,255 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/MemMem.h>
#include <AK/Types.h>
#include <Kernel/Arch/SafeMem.h>
#include <Kernel/Arch/SmapDisabler.h>
#include <Kernel/Library/StdLib.h>
#include <Kernel/Memory/MemoryManager.h>
ErrorOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<char const*> user_str, size_t user_str_size)
{
bool is_user = Kernel::Memory::is_user_range(user_str.vaddr(), user_str_size);
if (!is_user)
return EFAULT;
Kernel::SmapDisabler disabler;
void* fault_at;
ssize_t length = Kernel::safe_strnlen(user_str.unsafe_userspace_ptr(), user_str_size, fault_at);
if (length < 0) {
dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<void const*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
return EFAULT;
}
char* buffer;
auto new_string = TRY(Kernel::KString::try_create_uninitialized(length, buffer));
buffer[length] = '\0';
if (length == 0)
return new_string;
if (!Kernel::safe_memcpy(buffer, user_str.unsafe_userspace_ptr(), (size_t)length, fault_at)) {
dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<void const*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
return EFAULT;
}
return new_string;
}
ErrorOr<Duration> copy_time_from_user(timespec const* ts_user)
{
timespec ts {};
TRY(copy_from_user(&ts, ts_user, sizeof(timespec)));
return Duration::from_timespec(ts);
}
ErrorOr<Duration> copy_time_from_user(timeval const* tv_user)
{
timeval tv {};
TRY(copy_from_user(&tv, tv_user, sizeof(timeval)));
return Duration::from_timeval(tv);
}
template<>
ErrorOr<Duration> copy_time_from_user<timeval const>(Userspace<timeval const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
template<>
ErrorOr<Duration> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
template<>
ErrorOr<Duration> copy_time_from_user<timespec const>(Userspace<timespec const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
template<>
ErrorOr<Duration> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
Optional<u32> user_atomic_fetch_add_relaxed(u32 volatile* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_add_relaxed(var, val);
}
Optional<u32> user_atomic_exchange_relaxed(u32 volatile* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_exchange_relaxed(var, val);
}
Optional<u32> user_atomic_load_relaxed(u32 volatile* var)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_load_relaxed(var);
}
bool user_atomic_store_relaxed(u32 volatile* var, u32 val)
{
if (FlatPtr(var) & 3)
return false; // not aligned!
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return false;
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_store_relaxed(var, val);
}
Optional<bool> user_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(&expected), sizeof(expected)));
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
}
Optional<u32> user_atomic_fetch_and_relaxed(u32 volatile* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_and_relaxed(var, val);
}
Optional<u32> user_atomic_fetch_and_not_relaxed(u32 volatile* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
}
Optional<u32> user_atomic_fetch_or_relaxed(u32 volatile* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_or_relaxed(var, val);
}
Optional<u32> user_atomic_fetch_xor_relaxed(u32 volatile* var, u32 val)
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
Kernel::SmapDisabler disabler;
return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
}
ErrorOr<void> copy_to_user(void* dest_ptr, void const* src_ptr, size_t n)
{
if (!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n))
return EFAULT;
VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n));
Kernel::SmapDisabler disabler;
void* fault_at;
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
return EFAULT;
}
return {};
}
ErrorOr<void> copy_from_user(void* dest_ptr, void const* src_ptr, size_t n)
{
if (!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n))
return EFAULT;
VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n));
Kernel::SmapDisabler disabler;
void* fault_at;
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
return EFAULT;
}
return {};
}
ErrorOr<void> memset_user(void* dest_ptr, int c, size_t n)
{
bool is_user = Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n);
if (!is_user)
return EFAULT;
Kernel::SmapDisabler disabler;
void* fault_at;
if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
return EFAULT;
}
return {};
}
#if defined(AK_COMPILER_CLANG) && defined(ENABLE_KERNEL_LTO)
// Due to a chicken-and-egg situation, certain linker-defined symbols that are added on-demand (like the GOT)
// need to be present before LTO bitcode files are compiled. And since we don't link to any native object files,
// the linker does not know that _GLOBAL_OFFSET_TABLE_ is needed, so it doesn't define it, so linking as a PIE fails.
// See https://bugs.llvm.org/show_bug.cgi?id=39634
FlatPtr missing_got_workaround()
{
extern volatile FlatPtr _GLOBAL_OFFSET_TABLE_;
return _GLOBAL_OFFSET_TABLE_;
}
#endif
extern "C" {
void const* memmem(void const* haystack, size_t haystack_length, void const* needle, size_t needle_length)
{
return AK::memmem(haystack, haystack_length, needle, needle_length);
}
// Functions that are automatically called by the C++ compiler.
// Declare them first, to tell the silly compiler that they are indeed being used.
[[noreturn]] void __stack_chk_fail() __attribute__((used));
[[noreturn]] void __stack_chk_fail_local() __attribute__((used));
extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
[[noreturn]] void __cxa_pure_virtual();
[[noreturn]] void __stack_chk_fail()
{
VERIFY_NOT_REACHED();
}
[[noreturn]] void __stack_chk_fail_local()
{
VERIFY_NOT_REACHED();
}
extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
{
VERIFY_NOT_REACHED();
return 0;
}
[[noreturn]] void __cxa_pure_virtual()
{
VERIFY_NOT_REACHED();
}
}

183
Kernel/Library/StdLib.h Normal file
View file

@ -0,0 +1,183 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Checked.h>
#include <AK/Error.h>
#include <AK/Forward.h>
#include <AK/Time.h>
#include <AK/Userspace.h>
#include <Kernel/Library/KString.h>
#include <Kernel/UnixTypes.h>
ErrorOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<char const*>, size_t);
ErrorOr<Duration> copy_time_from_user(timespec const*);
ErrorOr<Duration> copy_time_from_user(timeval const*);
template<typename T>
ErrorOr<Duration> copy_time_from_user(Userspace<T*>);
[[nodiscard]] Optional<u32> user_atomic_fetch_add_relaxed(u32 volatile* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_exchange_relaxed(u32 volatile* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_load_relaxed(u32 volatile* var);
[[nodiscard]] bool user_atomic_store_relaxed(u32 volatile* var, u32 val);
[[nodiscard]] Optional<bool> user_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val);
[[nodiscard]] Optional<u32> user_atomic_fetch_and_relaxed(u32 volatile* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_fetch_and_not_relaxed(u32 volatile* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_fetch_or_relaxed(u32 volatile* var, u32 val);
[[nodiscard]] Optional<u32> user_atomic_fetch_xor_relaxed(u32 volatile* var, u32 val);
ErrorOr<void> copy_to_user(void*, void const*, size_t);
ErrorOr<void> copy_from_user(void*, void const*, size_t);
ErrorOr<void> memset_user(void*, int, size_t);
extern "C" {
void* memcpy(void*, void const*, size_t);
[[nodiscard]] int strncmp(char const* s1, char const* s2, size_t n);
[[nodiscard]] char* strstr(char const* haystack, char const* needle);
[[nodiscard]] int strcmp(char const*, char const*);
[[nodiscard]] size_t strlen(char const*);
[[nodiscard]] size_t strnlen(char const*, size_t);
void* memset(void*, int, size_t);
[[nodiscard]] int memcmp(void const*, void const*, size_t);
void* memmove(void* dest, void const* src, size_t n);
void const* memmem(void const* haystack, size_t, void const* needle, size_t);
[[nodiscard]] inline u16 ntohs(u16 w) { return (w & 0xff) << 8 | ((w >> 8) & 0xff); }
[[nodiscard]] inline u16 htons(u16 w) { return (w & 0xff) << 8 | ((w >> 8) & 0xff); }
}
#define offsetof(type, member) __builtin_offsetof(type, member)
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_from_user(T* dest, T const* src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_from_user(dest, src, sizeof(T));
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_to_user(T* dest, T const* src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_to_user(dest, src, sizeof(T));
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_from_user(T* dest, Userspace<T const*> src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_from_user(dest, src.unsafe_userspace_ptr(), sizeof(T));
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_from_user(T* dest, Userspace<T*> src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_from_user(dest, src.unsafe_userspace_ptr(), sizeof(T));
}
#define DEPRECATE_COPY_FROM_USER_TYPE(T, REPLACEMENT) \
template<> \
[[nodiscard]] inline __attribute__((deprecated("use " #REPLACEMENT " instead"))) ErrorOr<void> copy_from_user<T>(T*, const T*) \
{ \
VERIFY_NOT_REACHED(); \
} \
template<> \
[[nodiscard]] inline __attribute__((deprecated("use " #REPLACEMENT " instead"))) ErrorOr<void> copy_from_user<T>(T*, Userspace<const T*>) \
{ \
VERIFY_NOT_REACHED(); \
} \
template<> \
[[nodiscard]] inline __attribute__((deprecated("use " #REPLACEMENT " instead"))) ErrorOr<void> copy_from_user<T>(T*, Userspace<T*>) \
{ \
VERIFY_NOT_REACHED(); \
}
DEPRECATE_COPY_FROM_USER_TYPE(timespec, copy_time_from_user)
DEPRECATE_COPY_FROM_USER_TYPE(timeval, copy_time_from_user)
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_to_user(Userspace<T*> dest, T const* src)
{
static_assert(IsTriviallyCopyable<T>);
return copy_to_user(dest.unsafe_userspace_ptr(), src, sizeof(T));
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_to_user(Userspace<T*> dest, void const* src, size_t size)
{
static_assert(IsTriviallyCopyable<T>);
return copy_to_user(dest.unsafe_userspace_ptr(), src, size);
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_from_user(void* dest, Userspace<T const*> src, size_t size)
{
static_assert(IsTriviallyCopyable<T>);
return copy_from_user(dest, src.unsafe_userspace_ptr(), size);
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_n_from_user(T* dest, T const* src, size_t count)
{
static_assert(IsTriviallyCopyable<T>);
Checked<size_t> size = sizeof(T);
size *= count;
if (size.has_overflow())
return EOVERFLOW;
return copy_from_user(dest, src, size.value());
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_n_to_user(T* dest, T const* src, size_t count)
{
static_assert(IsTriviallyCopyable<T>);
Checked<size_t> size = sizeof(T);
size *= count;
if (size.has_overflow())
return EOVERFLOW;
return copy_to_user(dest, src, size.value());
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_n_from_user(T* dest, Userspace<T const*> src, size_t count)
{
static_assert(IsTriviallyCopyable<T>);
Checked<size_t> size = sizeof(T);
size *= count;
if (size.has_overflow())
return EOVERFLOW;
return copy_from_user(dest, src.unsafe_userspace_ptr(), size.value());
}
template<typename T>
[[nodiscard]] inline ErrorOr<void> copy_n_to_user(Userspace<T*> dest, T const* src, size_t count)
{
static_assert(IsTriviallyCopyable<T>);
Checked<size_t> size = sizeof(T);
size *= count;
if (size.has_overflow())
return EOVERFLOW;
return copy_to_user(dest.unsafe_userspace_ptr(), src, size.value());
}
template<typename T>
inline ErrorOr<T> copy_typed_from_user(Userspace<T const*> user_data)
{
T data {};
TRY(copy_from_user(&data, user_data));
return data;
}
template<typename T>
inline ErrorOr<T> copy_typed_from_user(Userspace<T*> user_data)
{
T data {};
TRY(copy_from_user(&data, user_data));
return data;
}

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Library/UserOrKernelBuffer.h>
#include <Kernel/Memory/MemoryManager.h>
namespace Kernel {
bool UserOrKernelBuffer::is_kernel_buffer() const
{
return !Memory::is_user_address(VirtualAddress(m_buffer));
}
ErrorOr<NonnullOwnPtr<KString>> UserOrKernelBuffer::try_copy_into_kstring(size_t size) const
{
if (!m_buffer)
return EINVAL;
if (Memory::is_user_address(VirtualAddress(m_buffer))) {
char* buffer;
auto kstring = TRY(KString::try_create_uninitialized(size, buffer));
TRY(copy_from_user(buffer, m_buffer, size));
return kstring;
}
return KString::try_create(ReadonlyBytes { m_buffer, size });
}
ErrorOr<void> UserOrKernelBuffer::write(void const* src, size_t offset, size_t len)
{
if (!m_buffer)
return EFAULT;
if (Memory::is_user_address(VirtualAddress(m_buffer)))
return copy_to_user(m_buffer + offset, src, len);
memcpy(m_buffer + offset, src, len);
return {};
}
ErrorOr<void> UserOrKernelBuffer::read(void* dest, size_t offset, size_t len) const
{
if (!m_buffer)
return EFAULT;
if (Memory::is_user_address(VirtualAddress(m_buffer)))
return copy_from_user(dest, m_buffer + offset, len);
memcpy(dest, m_buffer + offset, len);
return {};
}
ErrorOr<void> UserOrKernelBuffer::memset(int value, size_t offset, size_t len)
{
if (!m_buffer)
return EFAULT;
if (Memory::is_user_address(VirtualAddress(m_buffer)))
return memset_user(m_buffer + offset, value, len);
::memset(m_buffer + offset, value, len);
return {};
}
}

View file

@ -0,0 +1,165 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <AK/Userspace.h>
#include <Kernel/API/POSIX/errno.h>
#include <Kernel/Library/StdLib.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
class [[nodiscard]] UserOrKernelBuffer {
public:
UserOrKernelBuffer() = delete;
static UserOrKernelBuffer for_kernel_buffer(u8* kernel_buffer)
{
VERIFY(!kernel_buffer || !Memory::is_user_address(VirtualAddress(kernel_buffer)));
return UserOrKernelBuffer(kernel_buffer);
}
static ErrorOr<UserOrKernelBuffer> for_user_buffer(u8* user_buffer, size_t size)
{
if (user_buffer && !Memory::is_user_range(VirtualAddress(user_buffer), size))
return Error::from_errno(EFAULT);
return UserOrKernelBuffer(user_buffer);
}
template<typename UserspaceType>
static ErrorOr<UserOrKernelBuffer> for_user_buffer(UserspaceType userspace, size_t size)
{
if (!Memory::is_user_range(VirtualAddress(userspace.unsafe_userspace_ptr()), size))
return Error::from_errno(EFAULT);
return UserOrKernelBuffer(const_cast<u8*>((u8 const*)userspace.unsafe_userspace_ptr()));
}
[[nodiscard]] bool is_kernel_buffer() const;
[[nodiscard]] void const* user_or_kernel_ptr() const { return m_buffer; }
[[nodiscard]] UserOrKernelBuffer offset(size_t offset) const
{
if (!m_buffer)
return *this;
UserOrKernelBuffer offset_buffer = *this;
offset_buffer.m_buffer += offset;
VERIFY(offset_buffer.is_kernel_buffer() == is_kernel_buffer());
return offset_buffer;
}
ErrorOr<NonnullOwnPtr<KString>> try_copy_into_kstring(size_t) const;
ErrorOr<void> write(void const* src, size_t offset, size_t len);
ErrorOr<void> write(void const* src, size_t len)
{
return write(src, 0, len);
}
ErrorOr<void> write(ReadonlyBytes bytes)
{
return write(bytes.data(), bytes.size());
}
ErrorOr<void> read(void* dest, size_t offset, size_t len) const;
ErrorOr<void> read(void* dest, size_t len) const
{
return read(dest, 0, len);
}
ErrorOr<void> read(Bytes bytes) const
{
return read(bytes.data(), bytes.size());
}
ErrorOr<void> memset(int value, size_t offset, size_t len);
ErrorOr<void> memset(int value, size_t len)
{
return memset(value, 0, len);
}
template<size_t BUFFER_BYTES, typename F>
ErrorOr<size_t> write_buffered(size_t offset, size_t len, F f)
{
if (!m_buffer)
return EFAULT;
if (is_kernel_buffer()) {
// We're transferring directly to a kernel buffer, bypass
Bytes bytes { m_buffer + offset, len };
return f(bytes);
}
// The purpose of using a buffer on the stack is that we can
// avoid a bunch of small (e.g. 1-byte) copy_to_user calls
u8 buffer[BUFFER_BYTES];
size_t nwritten = 0;
while (nwritten < len) {
auto to_copy = min(sizeof(buffer), len - nwritten);
Bytes bytes { buffer, to_copy };
ErrorOr<size_t> copied_or_error = f(bytes);
if (copied_or_error.is_error())
return copied_or_error.release_error();
auto copied = copied_or_error.release_value();
VERIFY(copied <= to_copy);
TRY(write(buffer, nwritten, copied));
nwritten += copied;
if (copied < to_copy)
break;
}
return nwritten;
}
template<size_t BUFFER_BYTES, typename F>
ErrorOr<size_t> write_buffered(size_t len, F f)
{
return write_buffered<BUFFER_BYTES, F>(0, len, f);
}
template<size_t BUFFER_BYTES, typename F>
ErrorOr<size_t> read_buffered(size_t offset, size_t len, F f) const
{
if (!m_buffer)
return EFAULT;
if (is_kernel_buffer()) {
// We're transferring directly from a kernel buffer, bypass
return f({ m_buffer + offset, len });
}
// The purpose of using a buffer on the stack is that we can
// avoid a bunch of small (e.g. 1-byte) copy_from_user calls
u8 buffer[BUFFER_BYTES];
size_t nread = 0;
while (nread < len) {
auto to_copy = min(sizeof(buffer), len - nread);
TRY(read(buffer, nread, to_copy));
ReadonlyBytes read_only_bytes { buffer, to_copy };
ErrorOr<size_t> copied_or_error = f(read_only_bytes);
if (copied_or_error.is_error())
return copied_or_error.release_error();
auto copied = copied_or_error.release_value();
VERIFY(copied <= to_copy);
nread += copied;
if (copied < to_copy)
break;
}
return nread;
}
template<size_t BUFFER_BYTES, typename F>
ErrorOr<size_t> read_buffered(size_t len, F f) const
{
return read_buffered<BUFFER_BYTES, F>(0, len, f);
}
private:
explicit UserOrKernelBuffer(u8* buffer)
: m_buffer(buffer)
{
}
u8* m_buffer;
};
}