mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 23:27:43 +00:00
AK: Rename KB, MB, GB to KiB, MiB, GiB
The SI prefixes "k", "M", "G" mean "10^3", "10^6", "10^9". The IEC prefixes "Ki", "Mi", "Gi" mean "2^10", "2^20", "2^30". Let's use the correct name, at least in code. Only changes the name of the constants, no other behavior change.
This commit is contained in:
parent
a68650a7b4
commit
430b265cd4
31 changed files with 69 additions and 69 deletions
|
@ -355,7 +355,7 @@ struct SC_create_thread_params {
|
|||
// ... ok, if you say so posix. Guess we get to lie to people about guard page size
|
||||
unsigned int m_guard_page_size = 0; // Rounded up to PAGE_SIZE
|
||||
unsigned int m_reported_guard_page_size = 0; // The lie we tell callers
|
||||
unsigned int m_stack_size = 4 * MB; // Default PTHREAD_STACK_MIN
|
||||
unsigned int m_stack_size = 4 * MiB; // Default PTHREAD_STACK_MIN
|
||||
Userspace<void*> m_stack_location; // nullptr means any, o.w. process virtual address
|
||||
};
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace Kernel {
|
|||
MappedROM map_bios()
|
||||
{
|
||||
MappedROM mapping;
|
||||
mapping.size = 128 * KB;
|
||||
mapping.size = 128 * KiB;
|
||||
mapping.paddr = PhysicalAddress(0xe0000);
|
||||
mapping.region = MM.allocate_kernel_region(mapping.paddr, PAGE_ROUND_UP(mapping.size), {}, Region::Access::Read);
|
||||
return mapping;
|
||||
|
|
|
@ -837,7 +837,7 @@ KResult Plan9FSInode::traverse_as_directory(Function<bool(const FS::DirectoryEnt
|
|||
}
|
||||
|
||||
u64 offset = 0;
|
||||
u32 count = fs().adjust_buffer_size(8 * MB);
|
||||
u32 count = fs().adjust_buffer_size(8 * MiB);
|
||||
|
||||
while (true) {
|
||||
Plan9FS::Message message { fs(), Plan9FS::Message::Type::Treaddir };
|
||||
|
|
|
@ -105,7 +105,7 @@ private:
|
|||
Atomic<u32> m_next_fid { 1 };
|
||||
|
||||
ProtocolVersion m_remote_protocol_version { ProtocolVersion::v9P2000 };
|
||||
size_t m_max_message_size { 4 * KB };
|
||||
size_t m_max_message_size { 4 * KiB };
|
||||
|
||||
Lock m_send_lock { "Plan9FS send" };
|
||||
Atomic<bool> m_someone_is_reading { false };
|
||||
|
|
|
@ -128,10 +128,10 @@ void for_each_allocator(Callback callback)
|
|||
|
||||
void slab_alloc_init()
|
||||
{
|
||||
s_slab_allocator_16.init(128 * KB);
|
||||
s_slab_allocator_32.init(128 * KB);
|
||||
s_slab_allocator_64.init(512 * KB);
|
||||
s_slab_allocator_128.init(512 * KB);
|
||||
s_slab_allocator_16.init(128 * KiB);
|
||||
s_slab_allocator_32.init(128 * KiB);
|
||||
s_slab_allocator_64.init(512 * KiB);
|
||||
s_slab_allocator_128.init(512 * KiB);
|
||||
}
|
||||
|
||||
void* slab_alloc(size_t slab_size)
|
||||
|
|
|
@ -48,12 +48,12 @@ struct AllocationHeader {
|
|||
u8 data[0];
|
||||
};
|
||||
|
||||
#define BASE_PHYSICAL (0xc0000000 + (4 * MB))
|
||||
#define BASE_PHYSICAL (0xc0000000 + (4 * MiB))
|
||||
#define CHUNK_SIZE 32
|
||||
#define POOL_SIZE (3 * MB)
|
||||
#define POOL_SIZE (3 * MiB)
|
||||
|
||||
#define ETERNAL_BASE_PHYSICAL (0xc0000000 + (2 * MB))
|
||||
#define ETERNAL_RANGE_SIZE (2 * MB)
|
||||
#define ETERNAL_BASE_PHYSICAL (0xc0000000 + (2 * MiB))
|
||||
#define ETERNAL_RANGE_SIZE (2 * MiB)
|
||||
|
||||
static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ KBuffer KBufferBuilder::build()
|
|||
}
|
||||
|
||||
KBufferBuilder::KBufferBuilder()
|
||||
: m_buffer(KBuffer::create_with_size(4 * MB, Region::Access::Read | Region::Access::Write))
|
||||
: m_buffer(KBuffer::create_with_size(4 * MiB, Region::Access::Read | Region::Access::Write))
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ void NetworkTask_main()
|
|||
return packet_size;
|
||||
};
|
||||
|
||||
size_t buffer_size = 64 * KB;
|
||||
size_t buffer_size = 64 * KiB;
|
||||
auto buffer_region = MM.allocate_kernel_region(buffer_size, "Kernel Packet Buffer", Region::Access::Read | Region::Access::Write, false, true);
|
||||
auto buffer = (u8*)buffer_region->vaddr().get();
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
namespace Kernel {
|
||||
|
||||
PerformanceEventBuffer::PerformanceEventBuffer()
|
||||
: m_buffer(KBuffer::create_with_size(4 * MB))
|
||||
: m_buffer(KBuffer::create_with_size(4 * MiB))
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ void start(Process& process)
|
|||
s_pid = process.pid();
|
||||
|
||||
if (!s_profiling_buffer) {
|
||||
s_profiling_buffer = RefPtr<KBufferImpl>(KBuffer::create_with_size(8 * MB).impl()).leak_ref();
|
||||
s_profiling_buffer = RefPtr<KBufferImpl>(KBuffer::create_with_size(8 * MiB).impl()).leak_ref();
|
||||
s_profiling_buffer->region().commit();
|
||||
s_slot_count = s_profiling_buffer->size() / sizeof(Sample);
|
||||
}
|
||||
|
|
|
@ -512,7 +512,7 @@ public:
|
|||
}
|
||||
|
||||
static constexpr u32 default_kernel_stack_size = 65536;
|
||||
static constexpr u32 default_userspace_stack_size = 4 * MB;
|
||||
static constexpr u32 default_userspace_stack_size = 4 * MiB;
|
||||
|
||||
ThreadTracer* tracer() { return m_tracer.ptr(); }
|
||||
void start_tracing_from(ProcessID tracer);
|
||||
|
|
|
@ -101,8 +101,8 @@ void MemoryManager::parse_memory_map()
|
|||
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
|
||||
continue;
|
||||
|
||||
// FIXME: Maybe make use of stuff below the 1MB mark?
|
||||
if (mmap->addr < (1 * MB))
|
||||
// FIXME: Maybe make use of stuff below the 1MiB mark?
|
||||
if (mmap->addr < (1 * MiB))
|
||||
continue;
|
||||
|
||||
if ((mmap->addr + mmap->len) > 0xffffffff)
|
||||
|
@ -131,9 +131,9 @@ void MemoryManager::parse_memory_map()
|
|||
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
|
||||
auto addr = PhysicalAddress(page_base);
|
||||
|
||||
if (page_base < 7 * MB) {
|
||||
if (page_base < 7 * MiB) {
|
||||
// nothing
|
||||
} else if (page_base >= 7 * MB && page_base < 8 * MB) {
|
||||
} else if (page_base >= 7 * MiB && page_base < 8 * MiB) {
|
||||
if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
|
||||
m_super_physical_regions.append(PhysicalRegion::create(addr, addr));
|
||||
region = m_super_physical_regions.last();
|
||||
|
|
|
@ -80,7 +80,7 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
|
|||
if (parent_range_allocator) {
|
||||
m_range_allocator.initialize_from_parent(*parent_range_allocator);
|
||||
} else {
|
||||
size_t random_offset = (get_fast_random<u8>() % 32 * MB) & PAGE_MASK;
|
||||
size_t random_offset = (get_fast_random<u8>() % 32 * MiB) & PAGE_MASK;
|
||||
u32 base = userspace_range_base + random_offset;
|
||||
m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base);
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
|
|||
MM.unquickmap_page();
|
||||
}
|
||||
|
||||
// Clone bottom 2 MB of mappings from kernel_page_directory
|
||||
// Clone bottom 2 MiB of mappings from kernel_page_directory
|
||||
PageDirectoryEntry buffer;
|
||||
auto* kernel_pd = MM.quickmap_pd(MM.kernel_page_directory(), 0);
|
||||
memcpy(&buffer, kernel_pd, sizeof(PageDirectoryEntry));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue