mirror of
https://github.com/RGBCube/serenity
synced 2025-05-20 12:05:07 +00:00
Kernel: Make AsyncDeviceRequest sub-req management alloc free
The previous implementation could allocate on insertion into the completed / pending sub request vectors. There's no reason these can't be intrusive lists instead. This is a very minor step towards improving the ability to handle OOM, as tracked by #6369 It might also help improve performance on the IO path in certain situations. I'll benchmark that later.
This commit is contained in:
parent
f1d832e5ea
commit
033b287635
2 changed files with 22 additions and 19 deletions
|
@ -49,10 +49,13 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
|
|||
// sub-requests should be completed (either succeeded, failed, or cancelled).
|
||||
// Which means there should be no more pending sub-requests and the
|
||||
// entire AsyncDeviceRequest hierarchy should be immutable.
|
||||
for (auto& sub_request : m_sub_requests_complete) {
|
||||
VERIFY(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
|
||||
VERIFY(sub_request.m_parent_request == this);
|
||||
sub_request.m_parent_request = nullptr;
|
||||
while (!m_sub_requests_complete.is_empty()) {
|
||||
// Note: sub_request is ref-counted, and we use this specific pattern
|
||||
// to allow make sure the refcount is dropped properly.
|
||||
auto sub_request = m_sub_requests_complete.take_first();
|
||||
VERIFY(is_completed_result(sub_request->m_result)); // Shouldn't need any locking anymore
|
||||
VERIFY(sub_request->m_parent_request == this);
|
||||
sub_request->m_parent_request = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,24 +107,19 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
|
|||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
VERIFY(m_result == Started);
|
||||
size_t index;
|
||||
for (index = 0; index < m_sub_requests_pending.size(); index++) {
|
||||
if (&m_sub_requests_pending[index] == &sub_request) {
|
||||
NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]);
|
||||
m_sub_requests_pending.remove(index);
|
||||
m_sub_requests_complete.append(move(request));
|
||||
break;
|
||||
}
|
||||
|
||||
if (m_sub_requests_pending.contains(sub_request)) {
|
||||
// Note: append handles removing from any previous intrusive list internally.
|
||||
m_sub_requests_complete.append(sub_request);
|
||||
}
|
||||
VERIFY(index < m_sub_requests_pending.size());
|
||||
|
||||
all_completed = m_sub_requests_pending.is_empty();
|
||||
if (all_completed) {
|
||||
// Aggregate any errors
|
||||
bool any_failures = false;
|
||||
bool any_memory_faults = false;
|
||||
for (index = 0; index < m_sub_requests_complete.size(); index++) {
|
||||
auto& sub_request = m_sub_requests_complete[index];
|
||||
auto sub_result = sub_request.get_request_result();
|
||||
for (auto& com_sub_request : m_sub_requests_complete) {
|
||||
auto sub_result = com_sub_request.get_request_result();
|
||||
VERIFY(is_completed_result(sub_result));
|
||||
switch (sub_result) {
|
||||
case Failure:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue