1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 21:07:35 +00:00

AK+Kernel: Disallow implicitly lifting pointers to OwnPtr's

This doesn't really _fix_ anything, it just gets rid of the API and
instead makes the users explicitly use `adopt_own_if_non_null()`.
This commit is contained in:
Ali Mohammad Pur 2021-05-30 21:09:23 +04:30 committed by Ali Mohammad Pur
parent 3bc2527ce7
commit 2b5732ab77
6 changed files with 27 additions and 20 deletions

View file

@ -15,13 +15,12 @@ template<typename T>
class OwnPtr { class OwnPtr {
public: public:
OwnPtr() = default; OwnPtr() = default;
explicit OwnPtr(T* ptr)
: m_ptr(ptr) OwnPtr(decltype(nullptr))
: m_ptr(nullptr)
{ {
static_assert(
requires { requires typename T::AllowOwnPtr()(); } || !requires(T obj) { requires !typename T::AllowOwnPtr()(); obj.ref(); obj.unref(); },
"Use RefPtr<> for RefCounted types");
} }
OwnPtr(OwnPtr&& other) OwnPtr(OwnPtr&& other)
: m_ptr(other.leak_ptr()) : m_ptr(other.leak_ptr())
{ {
@ -96,13 +95,7 @@ public:
return *this; return *this;
} }
OwnPtr& operator=(T* ptr) OwnPtr& operator=(T* ptr) = delete;
{
if (m_ptr != ptr)
delete m_ptr;
m_ptr = ptr;
return *this;
}
OwnPtr& operator=(std::nullptr_t) OwnPtr& operator=(std::nullptr_t)
{ {
@ -181,6 +174,20 @@ public:
::swap(m_ptr, other.m_ptr); ::swap(m_ptr, other.m_ptr);
} }
static OwnPtr lift(T* ptr)
{
return OwnPtr { ptr };
}
protected:
explicit OwnPtr(T* ptr)
: m_ptr(ptr)
{
static_assert(
requires { requires typename T::AllowOwnPtr()(); } || !requires(T obj) { requires !typename T::AllowOwnPtr()(); obj.ref(); obj.unref(); },
"Use RefPtr<> for RefCounted types");
}
private: private:
T* m_ptr = nullptr; T* m_ptr = nullptr;
}; };
@ -195,7 +202,7 @@ template<typename T>
inline OwnPtr<T> adopt_own_if_nonnull(T* object) inline OwnPtr<T> adopt_own_if_nonnull(T* object)
{ {
if (object) if (object)
return OwnPtr<T>(object); return OwnPtr<T>::lift(object);
return {}; return {};
} }

View file

@ -576,7 +576,7 @@ KResult Plan9FS::read_and_dispatch_one_message()
auto completion = optional_completion.value(); auto completion = optional_completion.value();
ScopedSpinLock lock(completion->lock); ScopedSpinLock lock(completion->lock);
completion->result = KSuccess; completion->result = KSuccess;
completion->message = new Message { buffer.release_nonnull() }; completion->message = adopt_own_if_nonnull(new Message { buffer.release_nonnull() });
completion->completed = true; completion->completed = true;
m_completions.remove(header.tag); m_completions.remove(header.tag);

View file

@ -1102,7 +1102,7 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
} }
if (!cached_data) if (!cached_data)
cached_data = new ProcFSInodeData; cached_data = adopt_own_if_nonnull(new ProcFSInodeData);
auto& buffer = static_cast<ProcFSInodeData&>(*cached_data).buffer; auto& buffer = static_cast<ProcFSInodeData&>(*cached_data).buffer;
if (buffer) { if (buffer) {
// If we're reusing the buffer, reset the size to 0 first. This // If we're reusing the buffer, reset the size to 0 first. This

View file

@ -18,7 +18,7 @@ UNMAP_AFTER_INIT void SpuriousInterruptHandler::initialize(u8 interrupt_number)
void SpuriousInterruptHandler::register_handler(GenericInterruptHandler& handler) void SpuriousInterruptHandler::register_handler(GenericInterruptHandler& handler)
{ {
VERIFY(!m_real_handler); VERIFY(!m_real_handler);
m_real_handler = &handler; m_real_handler = adopt_own_if_nonnull(&handler);
} }
void SpuriousInterruptHandler::unregister_handler(GenericInterruptHandler&) void SpuriousInterruptHandler::unregister_handler(GenericInterruptHandler&)
{ {

View file

@ -108,7 +108,7 @@ KResultOr<int> Process::sys$profiling_free_buffer(pid_t pid)
{ {
ScopedCritical critical; ScopedCritical critical;
perf_events = g_global_perf_events; perf_events = adopt_own_if_nonnull(g_global_perf_events);
g_global_perf_events = nullptr; g_global_perf_events = nullptr;
} }

View file

@ -22,9 +22,9 @@ VirtIOQueue::VirtIOQueue(u16 queue_size, u16 notify_offset)
// TODO: ensure alignment!!! // TODO: ensure alignment!!!
u8* ptr = m_queue_region->vaddr().as_ptr(); u8* ptr = m_queue_region->vaddr().as_ptr();
memset(ptr, 0, m_queue_region->size()); memset(ptr, 0, m_queue_region->size());
m_descriptors = reinterpret_cast<VirtIOQueueDescriptor*>(ptr); m_descriptors = adopt_own_if_nonnull(reinterpret_cast<VirtIOQueueDescriptor*>(ptr));
m_driver = reinterpret_cast<VirtIOQueueDriver*>(ptr + size_of_descriptors); m_driver = adopt_own_if_nonnull(reinterpret_cast<VirtIOQueueDriver*>(ptr + size_of_descriptors));
m_device = reinterpret_cast<VirtIOQueueDevice*>(ptr + size_of_descriptors + size_of_driver); m_device = adopt_own_if_nonnull(reinterpret_cast<VirtIOQueueDevice*>(ptr + size_of_descriptors + size_of_driver));
for (auto i = 0; i + 1 < queue_size; i++) { for (auto i = 0; i + 1 < queue_size; i++) {
m_descriptors[i].next = i + 1; // link all of the descriptors in a line m_descriptors[i].next = i + 1; // link all of the descriptors in a line