mirror of
https://github.com/RGBCube/serenity
synced 2025-05-23 18:35:07 +00:00
Revert "Kernel: Avoid a memcpy() of the whole block when paging in from inode"
This reverts commit 11896d0e26
.
This caused a race where other processes using the same InodeVMObject
could end up accessing the newly-mapped physical page before we've
actually filled it with bytes from disk.
It would be nice to avoid these copies without breaking anything.
This commit is contained in:
parent
f5d779f47e
commit
dde10f534f
1 changed files with 17 additions and 14 deletions
|
@ -342,14 +342,14 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
||||||
ASSERT(vmo.is_inode());
|
ASSERT(vmo.is_inode());
|
||||||
|
|
||||||
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
|
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
|
||||||
|
|
||||||
auto& vmo_page = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
|
auto& vmo_page = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
|
||||||
|
|
||||||
InterruptFlagSaver saver;
|
InterruptFlagSaver saver;
|
||||||
|
|
||||||
bool interrupts_were_enabled = are_interrupts_enabled();
|
sti();
|
||||||
if (!interrupts_were_enabled)
|
|
||||||
sti();
|
|
||||||
LOCKER(vmo.m_paging_lock);
|
LOCKER(vmo.m_paging_lock);
|
||||||
|
cli();
|
||||||
|
|
||||||
if (!vmo_page.is_null()) {
|
if (!vmo_page.is_null()) {
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
|
@ -362,6 +362,19 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
dbgprintf("MM: page_in_from_inode ready to read from inode\n");
|
dbgprintf("MM: page_in_from_inode ready to read from inode\n");
|
||||||
#endif
|
#endif
|
||||||
|
sti();
|
||||||
|
u8 page_buffer[PAGE_SIZE];
|
||||||
|
auto& inode = inode_vmobject.inode();
|
||||||
|
auto nread = inode.read_bytes((region.first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, page_buffer, nullptr);
|
||||||
|
if (nread < 0) {
|
||||||
|
kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (nread < PAGE_SIZE) {
|
||||||
|
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
|
||||||
|
memset(page_buffer + nread, 0, PAGE_SIZE - nread);
|
||||||
|
}
|
||||||
|
cli();
|
||||||
vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
|
vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
|
||||||
if (vmo_page.is_null()) {
|
if (vmo_page.is_null()) {
|
||||||
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
|
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
|
||||||
|
@ -369,17 +382,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
||||||
}
|
}
|
||||||
remap_region_page(region, page_index_in_region);
|
remap_region_page(region, page_index_in_region);
|
||||||
u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
||||||
|
memcpy(dest_ptr, page_buffer, PAGE_SIZE);
|
||||||
auto& inode = inode_vmobject.inode();
|
|
||||||
auto nread = inode.read_bytes((region.first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, dest_ptr, nullptr);
|
|
||||||
if (nread < 0) {
|
|
||||||
kprintf("MM: page_in_from_inode had error (%d) while reading!\n", nread);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (nread < PAGE_SIZE) {
|
|
||||||
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
|
|
||||||
memset(dest_ptr + nread, 0, PAGE_SIZE - nread);
|
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue