1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 12:47:35 +00:00

Kernel: Add support for 64-bit unaligned Mem-ops

Also let the compiler enforce the size and type restrictions
This commit is contained in:
Hendiadyoin1 2021-07-01 14:51:41 +02:00 committed by Andreas Kling
parent 6b9cf8376e
commit 5f6c513610

View file

@ -7,6 +7,7 @@
#pragma once #pragma once
#include <AK/Atomic.h> #include <AK/Atomic.h>
#include <AK/Concepts.h>
#include <AK/Vector.h> #include <AK/Vector.h>
#include <Kernel/Arch/x86/DescriptorTable.h> #include <Kernel/Arch/x86/DescriptorTable.h>
@ -29,47 +30,59 @@ inline u32 get_iopl_from_eflags(u32 eflags)
return (eflags & iopl_mask) >> 12; return (eflags & iopl_mask) >> 12;
} }
template<typename T> template<Integral T>
void read_possibly_unaligned_data(u8* where, T& data) void read_possibly_unaligned_data(u8* where, T& data) requires(sizeof(T) == 8 || sizeof(T) == 4 || sizeof(T) == 2)
{ {
// FIXME: Implement 64 bit unaligned data access if (((FlatPtr)where % alignof(T)) == 0) {
VERIFY(sizeof(T) == 8 || sizeof(T) == 4 || sizeof(T) == 2);
if (((FlatPtr)where % sizeof(T)) == 0) {
data = *(T*)where; data = *(T*)where;
return; return;
} }
if (sizeof(T) == 2) { if constexpr (sizeof(T) == 2) {
data = *(u8*)where | ((u16)(*(where + 1)) << 8); data = *where | ((u16)(*(where + 1)) << 8);
return; return;
} }
if (sizeof(T) == 4) { if constexpr (sizeof(T) == 4) {
data = *(u8*)where | (((u32) * (where + 1)) << 8) | (((u32) * (where + 2)) << 16) | (((u32) * (u8*)(where + 3)) << 24); data = *where | (((u32) * (where + 1)) << 8) | (((u32) * (where + 2)) << 16) | (((u32) * (where + 3)) << 24);
return;
}
if constexpr (sizeof(T) == 8) {
data = *where | (((u32) * (where + 1)) << 8) | (((u64) * (where + 2)) << 16) | (((u64) * (where + 3)) << 24)
| (((u64) * (where + 4)) << 32) | (((u64) * (where + 5)) << 40) | (((u64) * (where + 6)) << 48) | (((u64) * (where + 7)) << 56);
return; return;
} }
VERIFY_NOT_REACHED(); VERIFY_NOT_REACHED();
} }
template<typename T> template<Integral T>
void write_possibly_unaligned_data(u8* where, T data) void write_possibly_unaligned_data(u8* where, T data) requires(sizeof(T) == 8 || sizeof(T) == 4 || sizeof(T) == 2)
{ {
// FIXME: Implement 64 bit unaligned data access if (((FlatPtr)where % alignof(T)) == 0) {
VERIFY(sizeof(T) == 8 || sizeof(T) == 4 || sizeof(T) == 2);
if (((FlatPtr)where % sizeof(T)) == 0) {
*(T*)where = data; *(T*)where = data;
return; return;
} }
if (sizeof(T) == 2) { if constexpr (sizeof(T) == 2) {
where[0] = (u8)(data & 0xFF); where[0] = (u8)(data & 0xFF);
where[1] = (u8)((data >> 8) & 0xFF); where[1] = (u8)((data >> 8) & 0xFF);
return; return;
} }
if (sizeof(T) == 4) { if constexpr (sizeof(T) == 4) {
where[0] = (u8)(data & 0xFF); where[0] = (u8)(data & 0xFF);
where[1] = (u8)((data >> 8) & 0xFF); where[1] = (u8)((data >> 8) & 0xFF);
where[2] = (u8)((data >> 16) & 0xFF); where[2] = (u8)((data >> 16) & 0xFF);
where[3] = (u8)((data >> 24) & 0xFF); where[3] = (u8)((data >> 24) & 0xFF);
return; return;
} }
if constexpr (sizeof(T) == 8) {
where[0] = (u8)(data & 0xFF);
where[1] = (u8)((data >> 8) & 0xFF);
where[2] = (u8)((data >> 16) & 0xFF);
where[3] = (u8)((data >> 24) & 0xFF);
where[5] = (u8)(data >> 32 & 0xFF);
where[5] = (u8)((data >> 40) & 0xFF);
where[6] = (u8)((data >> 48) & 0xFF);
where[7] = (u8)((data >> 52) & 0xFF);
return;
}
VERIFY_NOT_REACHED(); VERIFY_NOT_REACHED();
} }