1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 09:48:11 +00:00

Streams: Consistent behaviour when reading from stream with error.

The streaming operator doesn't short-circuit, consider the following
snippet:

    void foo(InputStream& stream) {
        int a, b;
        stream >> a >> b;
    }

If the first read fails, the second is called regardless. It should be
well defined what happens in this case: nothing.
This commit is contained in:
asynts 2020-09-05 16:39:56 +02:00 committed by Andreas Kling
parent 5d85be7ed4
commit 6de63782c7
8 changed files with 24 additions and 3 deletions

View file

@ -40,8 +40,10 @@ public:
size_t read(Bytes bytes) override size_t read(Bytes bytes) override
{ {
size_t nread = 0; if (has_any_error())
return 0;
size_t nread = 0;
if (bytes.size() >= 1) { if (bytes.size() >= 1) {
if (m_next_byte.has_value()) { if (m_next_byte.has_value()) {
bytes[0] = m_next_byte.value(); bytes[0] = m_next_byte.value();

View file

@ -57,6 +57,9 @@ public:
size_t read(Bytes bytes) override size_t read(Bytes bytes) override
{ {
if (has_any_error())
return 0;
auto nread = buffer().trim(m_buffer_remaining).copy_trimmed_to(bytes); auto nread = buffer().trim(m_buffer_remaining).copy_trimmed_to(bytes);
m_buffer_remaining -= nread; m_buffer_remaining -= nread;

View file

@ -60,6 +60,9 @@ public:
size_t read(Bytes bytes) override size_t read(Bytes bytes) override
{ {
if (has_any_error())
return 0;
const auto nread = min(bytes.size(), m_queue.size()); const auto nread = min(bytes.size(), m_queue.size());
for (size_t idx = 0; idx < nread; ++idx) for (size_t idx = 0; idx < nread; ++idx)

View file

@ -44,6 +44,9 @@ public:
size_t read(Bytes bytes) override size_t read(Bytes bytes) override
{ {
if (has_any_error())
return 0;
const auto count = min(bytes.size(), remaining()); const auto count = min(bytes.size(), remaining());
__builtin_memcpy(bytes.data(), m_bytes.data() + m_offset, count); __builtin_memcpy(bytes.data(), m_bytes.data() + m_offset, count);
m_offset += count; m_offset += count;
@ -239,6 +242,9 @@ public:
size_t read(Bytes bytes) override size_t read(Bytes bytes) override
{ {
if (has_any_error())
return 0;
const auto nread = read_without_consuming(bytes); const auto nread = read_without_consuming(bytes);
m_read_offset += nread; m_read_offset += nread;

View file

@ -75,6 +75,7 @@ namespace AK {
class InputStream : public virtual AK::Detail::Stream { class InputStream : public virtual AK::Detail::Stream {
public: public:
// Does nothing and returns zero if there is already an error.
virtual size_t read(Bytes) = 0; virtual size_t read(Bytes) = 0;
virtual bool read_or_error(Bytes) = 0; virtual bool read_or_error(Bytes) = 0;
virtual bool eof() const = 0; virtual bool eof() const = 0;

View file

@ -189,8 +189,8 @@ DeflateDecompressor::~DeflateDecompressor()
size_t DeflateDecompressor::read(Bytes bytes) size_t DeflateDecompressor::read(Bytes bytes)
{ {
// FIXME: There are surely a ton of bugs because we don't check for read errors if (has_any_error())
// very often. return 0;
if (m_state == State::Idle) { if (m_state == State::Idle) {
if (m_read_final_bock) if (m_read_final_bock)

View file

@ -68,6 +68,9 @@ GzipDecompressor::~GzipDecompressor()
// FIXME: Again, there are surely a ton of bugs because the code doesn't check for read errors. // FIXME: Again, there are surely a ton of bugs because the code doesn't check for read errors.
size_t GzipDecompressor::read(Bytes bytes) size_t GzipDecompressor::read(Bytes bytes)
{ {
if (has_any_error())
return 0;
if (m_current_member.has_value()) { if (m_current_member.has_value()) {
size_t nread = current_member().m_stream.read(bytes); size_t nread = current_member().m_stream.read(bytes);
current_member().m_checksum.update(bytes.trim(nread)); current_member().m_checksum.update(bytes.trim(nread));

View file

@ -65,6 +65,9 @@ public:
size_t read(Bytes bytes) override size_t read(Bytes bytes) override
{ {
if (has_any_error())
return 0;
auto nread = m_buffered.bytes().copy_trimmed_to(bytes); auto nread = m_buffered.bytes().copy_trimmed_to(bytes);
m_buffered.bytes().slice(nread, m_buffered.size() - nread).copy_to(m_buffered); m_buffered.bytes().slice(nread, m_buffered.size() - nread).copy_to(m_buffered);