mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 18:27:35 +00:00
Kernel: Make copy_to/from_user safe and remove unnecessary checks
Since the CPU already does almost all necessary validation steps for us, we don't really need to attempt to do this. Doing it ourselves doesn't really work very reliably, because we'd have to account for other processors modifying virtual memory, and we'd have to account for e.g. pages not being able to be allocated due to insufficient resources. So change the copy_to/from_user (and associated helper functions) to use the new safe_memcpy, which will return whether it succeeded or not. The only manual validation step needed (which the CPU can't perform for us) is making sure the pointers provided by user mode aren't pointing to kernel mappings. To make it easier to read/write from/to either kernel or user mode data add the UserOrKernelBuffer helper class, which will internally either use copy_from/to_user or directly memcpy, or pass the data through directly using a temporary buffer on the stack. Last but not least we need to keep syscall params trivial as we need to copy them from/to user mode using copy_from/to_user.
This commit is contained in:
parent
7d1b8417bd
commit
c8d9f1b9c9
149 changed files with 1585 additions and 1244 deletions
|
@ -89,7 +89,7 @@ void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new
|
|||
});
|
||||
}
|
||||
|
||||
void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
|
||||
void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const UserOrKernelBuffer& data)
|
||||
{
|
||||
(void)size;
|
||||
(void)data;
|
||||
|
|
|
@ -39,7 +39,7 @@ public:
|
|||
Inode& inode() { return *m_inode; }
|
||||
const Inode& inode() const { return *m_inode; }
|
||||
|
||||
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const u8*);
|
||||
void inode_contents_changed(Badge<Inode>, off_t, ssize_t, const UserOrKernelBuffer&);
|
||||
void inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size);
|
||||
|
||||
size_t amount_dirty() const;
|
||||
|
|
|
@ -767,39 +767,6 @@ bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress v
|
|||
return region && region->is_user_accessible() && region->is_stack();
|
||||
}
|
||||
|
||||
bool MemoryManager::validate_kernel_read(const Process& process, VirtualAddress vaddr, size_t size) const
|
||||
{
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
return validate_range<AccessSpace::Kernel, AccessType::Read>(process, vaddr, size);
|
||||
}
|
||||
|
||||
bool MemoryManager::can_read_without_faulting(const Process& process, VirtualAddress vaddr, size_t size) const
|
||||
{
|
||||
// FIXME: Use the size argument!
|
||||
UNUSED_PARAM(size);
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto* pte = const_cast<MemoryManager*>(this)->pte(process.page_directory(), vaddr);
|
||||
if (!pte)
|
||||
return false;
|
||||
return pte->is_present();
|
||||
}
|
||||
|
||||
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr, size_t size) const
|
||||
{
|
||||
if (!is_user_address(vaddr))
|
||||
return false;
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
return validate_range<AccessSpace::User, AccessType::Read>(process, vaddr, size);
|
||||
}
|
||||
|
||||
bool MemoryManager::validate_user_write(const Process& process, VirtualAddress vaddr, size_t size) const
|
||||
{
|
||||
if (!is_user_address(vaddr))
|
||||
return false;
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
return validate_range<AccessSpace::User, AccessType::Write>(process, vaddr, size);
|
||||
}
|
||||
|
||||
void MemoryManager::register_vmobject(VMObject& vmobject)
|
||||
{
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
|
|
@ -100,12 +100,6 @@ public:
|
|||
void enter_process_paging_scope(Process&);
|
||||
|
||||
bool validate_user_stack(const Process&, VirtualAddress) const;
|
||||
bool validate_user_read(const Process&, VirtualAddress, size_t) const;
|
||||
bool validate_user_write(const Process&, VirtualAddress, size_t) const;
|
||||
|
||||
bool validate_kernel_read(const Process&, VirtualAddress, size_t) const;
|
||||
|
||||
bool can_read_without_faulting(const Process&, VirtualAddress, size_t) const;
|
||||
|
||||
enum class ShouldZeroFill {
|
||||
No,
|
||||
|
|
|
@ -439,13 +439,24 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
|||
klog() << "MM: handle_cow_fault was unable to allocate a physical page";
|
||||
return PageFaultResponse::OutOfMemory;
|
||||
}
|
||||
auto physical_page_to_copy = move(page_slot);
|
||||
|
||||
u8* dest_ptr = MM.quickmap_page(*page);
|
||||
const u8* src_ptr = vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbg() << " >> COW " << page->paddr() << " <- " << physical_page_to_copy->paddr();
|
||||
dbg() << " >> COW " << page->paddr() << " <- " << page_slot->paddr();
|
||||
#endif
|
||||
copy_from_user(dest_ptr, src_ptr, PAGE_SIZE);
|
||||
{
|
||||
SmapDisabler disabler;
|
||||
void* fault_at;
|
||||
if (!safe_memcpy(dest_ptr, src_ptr, PAGE_SIZE, fault_at)) {
|
||||
if ((u8*)fault_at >= dest_ptr && (u8*)fault_at <= dest_ptr + PAGE_SIZE)
|
||||
dbg() << " >> COW: error copying page " << page_slot->paddr() << "/" << VirtualAddress(src_ptr) << " to " << page->paddr() << "/" << VirtualAddress(dest_ptr) << ": failed to write to page at " << VirtualAddress(fault_at);
|
||||
else if ((u8*)fault_at >= src_ptr && (u8*)fault_at <= src_ptr + PAGE_SIZE)
|
||||
dbg() << " >> COW: error copying page " << page_slot->paddr() << "/" << VirtualAddress(src_ptr) << " to " << page->paddr() << "/" << VirtualAddress(dest_ptr) << ": failed to read from page at " << VirtualAddress(fault_at);
|
||||
else
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
}
|
||||
page_slot = move(page);
|
||||
MM.unquickmap_page();
|
||||
set_should_cow(page_index_in_region, false);
|
||||
|
@ -489,7 +500,8 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
|||
sti();
|
||||
u8 page_buffer[PAGE_SIZE];
|
||||
auto& inode = inode_vmobject.inode();
|
||||
auto nread = inode.read_bytes((first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, page_buffer, nullptr);
|
||||
auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
|
||||
auto nread = inode.read_bytes((first_page_index() + page_index_in_region) * PAGE_SIZE, PAGE_SIZE, buffer, nullptr);
|
||||
if (nread < 0) {
|
||||
klog() << "MM: handle_inode_fault had error (" << nread << ") while reading!";
|
||||
return PageFaultResponse::ShouldCrash;
|
||||
|
@ -506,7 +518,15 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
|||
}
|
||||
|
||||
u8* dest_ptr = MM.quickmap_page(*vmobject_physical_page_entry);
|
||||
memcpy(dest_ptr, page_buffer, PAGE_SIZE);
|
||||
{
|
||||
void* fault_at;
|
||||
if (!safe_memcpy(dest_ptr, page_buffer, PAGE_SIZE, fault_at)) {
|
||||
if ((u8*)fault_at >= dest_ptr && (u8*)fault_at <= dest_ptr + PAGE_SIZE)
|
||||
dbg() << " >> inode fault: error copying data to " << vmobject_physical_page_entry->paddr() << "/" << VirtualAddress(dest_ptr) << ", failed at " << VirtualAddress(fault_at);
|
||||
else
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
}
|
||||
MM.unquickmap_page();
|
||||
|
||||
remap_page(page_index_in_region);
|
||||
|
|
|
@ -79,6 +79,7 @@ public:
|
|||
unsigned access() const { return m_access; }
|
||||
|
||||
void set_name(const String& name) { m_name = name; }
|
||||
void set_name(String&& name) { m_name = move(name); }
|
||||
|
||||
const VMObject& vmobject() const { return *m_vmobject; }
|
||||
VMObject& vmobject() { return *m_vmobject; }
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue