1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 04:58:13 +00:00

SharedBuffer: Fix deadlock on destroy

We were locking the list of references, and then destroying the
reference, which made things go a little crazy.

It's more straightforward to just remove the per-reference lock: the
syscalls all have to lock the full list anyway, so let's just do that
and avoid the hassle.

While I'm at it, also move the SharedBuffer code out to its own file as it's
getting a little long and unwieldly, and Process.cpp is already huge.
This commit is contained in:
Robin Burchell 2019-07-16 15:03:39 +02:00 committed by Andreas Kling
parent d53e54f8bf
commit 6aa77d1999
4 changed files with 170 additions and 150 deletions

117
Kernel/SharedBuffer.cpp Normal file
View file

@ -0,0 +1,117 @@
#include <Kernel/SharedBuffer.h>
#include <Kernel/Process.h>
Lockable<HashMap<int, OwnPtr<SharedBuffer>>>& shared_buffers()
{
static Lockable<HashMap<int, OwnPtr<SharedBuffer>>>* map;
if (!map)
map = new Lockable<HashMap<int, OwnPtr<SharedBuffer>>>;
return *map;
}
bool SharedBuffer::is_shared_with(pid_t peer_pid)
{
LOCKER(shared_buffers().lock());
for (auto& ref : m_refs) {
if (ref.pid == peer_pid) {
return true;
}
}
return false;
}
void* SharedBuffer::get_address(Process& process)
{
LOCKER(shared_buffers().lock());
ASSERT(is_shared_with(process.pid()));
for (auto& ref : m_refs) {
if (ref.pid == process.pid()) {
if (ref.region == nullptr) {
ref.region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmo, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0));
ref.region->set_shared(true);
}
return ref.region->vaddr().as_ptr();
}
}
ASSERT_NOT_REACHED();
}
void SharedBuffer::share_with(pid_t peer_pid)
{
LOCKER(shared_buffers().lock());
for (auto& ref : m_refs) {
if (ref.pid == peer_pid) {
ref.count++;
return;
}
}
m_refs.append(Reference(peer_pid));
}
void SharedBuffer::release(Process& process)
{
LOCKER(shared_buffers().lock());
for (int i = 0; i < m_refs.size(); ++i) {
auto& ref = m_refs[i];
if (ref.pid == process.pid()) {
if (--ref.count == 0) {
#ifdef SHARED_BUFFER_DEBUG
dbgprintf("Releasing shared buffer reference on %d of size %d by PID %d\n", m_shared_buffer_id, size(), process.pid());
#endif
process.deallocate_region(*ref.region);
m_refs.remove(i);
#ifdef SHARED_BUFFER_DEBUG
dbgprintf("Released shared buffer reference on %d of size %d by PID %d\n", m_shared_buffer_id, size(), process.pid());
#endif
destroy_if_unused();
return;
}
}
}
}
void SharedBuffer::disown(pid_t pid)
{
LOCKER(shared_buffers().lock());
for (int i = 0; i < m_refs.size(); ++i) {
auto& ref = m_refs[i];
if (ref.pid == pid) {
#ifdef SHARED_BUFFER_DEBUG
dbgprintf("Disowning shared buffer %d of size %d by PID %d\n", m_shared_buffer_id, size(), pid);
#endif
m_refs.remove(i);
#ifdef SHARED_BUFFER_DEBUG
dbgprintf("Disowned shared buffer %d of size %d by PID %d\n", m_shared_buffer_id, size(), pid);
#endif
destroy_if_unused();
return;
}
}
}
void SharedBuffer::destroy_if_unused()
{
LOCKER(shared_buffers().lock());
if (m_refs.size() == 0) {
#ifdef SHARED_BUFFER_DEBUG
kprintf("Destroying unused SharedBuffer{%p} id: %d\n", this, m_shared_buffer_id);
#endif
auto count_before = shared_buffers().resource().size();
shared_buffers().resource().remove(m_shared_buffer_id);
ASSERT(count_before != shared_buffers().resource().size());
}
}
void SharedBuffer::seal()
{
LOCKER(shared_buffers().lock());
m_writable = false;
for (auto& ref : m_refs) {
if (ref.region) {
ref.region->set_writable(false);
MM.remap_region(*ref.region->page_directory(), *ref.region);
}
}
}