From d5dce448ea679e40348ac77869f73374d3c89ae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?kleines=20Filmr=C3=B6llchen?= Date: Fri, 17 Dec 2021 01:02:16 +0100 Subject: [PATCH] AK: Bypass Buffered's buffer for large reads Before, if we couldn't read enough data out of the buffer, we would re- fill the buffer and recursively call read(), which in turn reads data from the buffer into the resliced target span. This incurs very intensive superflous memmove's when large chunks of data are read from a buffered stream. This commit changes the behavior so that when we exhaust the buffer, we first read any necessary additional data directly into the target, then fill up the buffer again. Effectively, this results in drastically reduced overhead from Buffered when reading large contiguous chunks. Of course, Buffered is designed to speed up data access patterns with small frequent reads, but it's nice to be able to combine both access patterns on one stream without penalties either way. The final performance gain is about an additional 80% of abench decoding speed. --- AK/Buffered.h | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/AK/Buffered.h b/AK/Buffered.h index fab3487a54..ab4c7e2752 100644 --- a/AK/Buffered.h +++ b/AK/Buffered.h @@ -57,15 +57,13 @@ public: auto nread = buffer().trim(m_buffered).copy_trimmed_to(bytes); m_buffered -= nread; - buffer().slice(nread, m_buffered).copy_to(buffer()); + if (m_buffered > 0) + buffer().slice(nread, m_buffered).copy_to(buffer()); if (nread < bytes.size()) { + nread += m_stream.read(bytes.slice(nread)); + m_buffered = m_stream.read(buffer()); - - if (m_buffered == 0) - return nread; - - nread += read(bytes.slice(nread)); } return nread;