AK: Remove history from DuplexMemoryStream.

That feature was really only useful for Compress::DeflateDecompressor
but that is now using CircularDuplexBuffer instead.
This commit is contained in:
asynts 2020-09-01 11:30:41 +02:00 committed by Andreas Kling
parent 9ce4475907
commit f9516a99bf
2 changed files with 5 additions and 70 deletions

View file

@ -326,13 +326,9 @@ private:
// All data written to this stream can be read from it. Reading and writing is done
// using different offsets, meaning that it is not necessary to seek to the start
// before reading; this behaviour differs from BufferStream.
//
// The stream keeps a history of 64KiB which means that seeking backwards is well
// defined. Data past that point will be discarded.
class DuplexMemoryStream final : public DuplexStream {
public:
static constexpr size_t chunk_size = 4 * 1024;
static constexpr size_t history_size = 64 * 1024;
bool eof() const override { return m_write_offset == m_read_offset; }
@ -411,22 +407,6 @@ public:
return nread;
}
size_t read(Bytes bytes, size_t offset)
{
const auto backup = this->roffset();
bool do_discard_chunks = false;
exchange(m_do_discard_chunks, do_discard_chunks);
rseek(offset);
const auto count = read(bytes);
rseek(backup);
exchange(m_do_discard_chunks, do_discard_chunks);
return count;
}
bool read_or_error(Bytes bytes) override
{
if (m_write_offset - m_read_offset < bytes.size()) {
@ -461,22 +441,12 @@ public:
size_t roffset() const { return m_read_offset; }
size_t woffset() const { return m_write_offset; }
void rseek(size_t offset)
{
ASSERT(offset >= m_base_offset);
ASSERT(offset <= m_write_offset);
m_read_offset = offset;
}
size_t remaining() const { return m_write_offset - m_read_offset; }
private:
void try_discard_chunks()
{
if (!m_do_discard_chunks)
return;
while (m_read_offset - m_base_offset >= history_size + chunk_size) {
while (m_read_offset - m_base_offset >= chunk_size) {
m_chunks.take_first();
m_base_offset += chunk_size;
}
@ -486,7 +456,6 @@ private:
size_t m_write_offset { 0 };
size_t m_read_offset { 0 };
size_t m_base_offset { 0 };
bool m_do_discard_chunks { false };
};
}

View file

@ -123,7 +123,7 @@ TEST_CASE(duplex_simple)
EXPECT(stream.eof());
}
TEST_CASE(duplex_seek_into_history)
TEST_CASE(duplex_large_buffer)
{
DuplexMemoryStream stream;
@ -131,56 +131,22 @@ TEST_CASE(duplex_seek_into_history)
EXPECT_EQ(stream.remaining(), 0ul);
for (size_t idx = 0; idx < 256; ++idx) {
for (size_t idx = 0; idx < 256; ++idx)
stream << one_kibibyte;
}
EXPECT_EQ(stream.remaining(), 256 * 1024ul);
for (size_t idx = 0; idx < 128; ++idx) {
for (size_t idx = 0; idx < 128; ++idx)
stream >> one_kibibyte;
}
EXPECT_EQ(stream.remaining(), 128 * 1024ul);
// We now have 128KiB on the stream. Because the stream has a
// history size of 64KiB, we should be able to seek to 64KiB.
static_assert(DuplexMemoryStream::history_size == 64 * 1024);
stream.rseek(64 * 1024);
EXPECT_EQ(stream.remaining(), 192 * 1024ul);
for (size_t idx = 0; idx < 192; ++idx) {
for (size_t idx = 0; idx < 128; ++idx)
stream >> one_kibibyte;
}
EXPECT(stream.eof());
}
TEST_CASE(duplex_wild_seeking)
{
DuplexMemoryStream stream;
int input0 = 42, input1 = 13, input2 = -12;
int output0, output1, output2;
stream << input2;
stream << input0 << input1;
stream.rseek(0);
stream << input2 << input0;
stream.rseek(4);
stream >> output0 >> output1 >> output2;
EXPECT(!stream.eof());
EXPECT_EQ(input0, output0);
EXPECT_EQ(input1, output1);
EXPECT_EQ(input2, output2);
stream.discard_or_error(4);
EXPECT(stream.eof());
}
TEST_CASE(read_endian_values)
{
const u8 input[] { 0, 1, 2, 3, 4, 5, 6, 7 };