implement DocumentBuffer for persistence of runtime data

In order to provide persistence of runtime state across the application
the documentbuffer provides a simple cache interface that balances cached
internal state events between an in memory cache and an on disk storage.

From a feature development perspective the DocumentBuffer provides a simple
cache interface:
  - Push() and Pop() individual items
  - Remove() and Read() bulk items
  - Peek() at the most recent item
  - Flush() items in memory to the disk

as well as some control features:
  - Close(), which calls flush and then returns
    index of the last byte of useful data in the
    backing store.

  - Cached(), which returns the number of items
    cached in memory.

Underneath the hood, documentbuffer balances the cache (memory) and store
(disk) by "promoting" the most recent documents in store to cache and by
"demoting" the least recent documents in cache to store. Thus, the cache
is always ordered by most recent, and so is the store.

Documentbuffer takes any implementation of readwriteseeker as an interface
for a backing store. Theoretically this means that documentbuffer can leverage
more than just a standard os.File. Possible implementations could include
transactions over the network or device drivers for long term cold storage devices.

In fact, documentbuffer comes with an in memory test implementation of the
readwriteseeker interface called ReadWriteSeekString. This emulates the functions
of Read(), Write(), and Seek() to operate on an internal string buffer.
This facility is only provided for testing and mock up purposes.

One note about Close(): Since the documentbuffer has no way of truncating the
underlying store an edge case can present itself that necessitates Close() and
specifically Close()'s return type. If the documentbuffer has Remove()ed or
promote()ed more bytes of data from store than it will subsequently Flush() or
demote() to disk one or more bytes of junk data may be left over from the
overwriting of the previously Remove()/promote()ed data. In this case, or
more specifically in all cases Close() wil return the last usable index
of the underlying store. It is up to the caller to then truncate it.

Regretably there is no reasonable truncate interface that applies polymorphicly
to any underlying data stream. Thus the quirk around Close() remains a design
challenge.

This commit provides comprehensive unit tests for both DocumentBuffer and
ReadWriteSeekString.

Not implemented in this commit is the whole design for runtime data persistence.
It is intended that internal modules for bingobot that provide functionality
directly to the user leverage the event pub/sub system as their sole authoritative
source for state management. As long as these modules provide the same output
for the same input sequence of events consistently than all parts of the application
will recover from error status or crashes by re-ingesting the on disk storage
of events provided by the documentbuffer. This way the application will always
come back up with minimal data loss and potentially the exact same state as
before it went down.

Signed-off-by: Ava Affine <ava@sunnypup.io>
This commit is contained in:
Ava Apples Affine 2024-11-25 18:07:26 -08:00
parent ad873ef836
commit 1ef8ff042f
5 changed files with 961 additions and 0 deletions

View file

@ -22,3 +22,8 @@ tests-config-pkg:
stage: test stage: test
script: script:
- go test ./internal/config - go test ./internal/config
tests-docbuf-pkg:
stage: test
script:
- go test ./internal/docbuf

387
internal/docbuf/docbuf.go Normal file
View file

@ -0,0 +1,387 @@
package docbuf
import (
"encoding/base64"
"errors"
"io"
)
const (
DemoteFromEmptyCacheError = "Can't demote from empty cache"
PromoteToFullCacheError = "Can't promote to full cache"
WriteEmptyDocumentError = "Not writing empty document to store"
PartialWriteError = "Couldnt complete document write"
BadReadRequestError = "Can't read less than one documents"
BadReadLengthError = "Unexpected read length"
PopFromEmptyBufferError = "Can't pop from empty buffer"
RemoveOverCapacityError = "Not enough documents in buffer to remove"
)
type DocumentBuffer interface {
/* Push
* Add a document to the buffer.
* If cache is full oldest document is written to backing writer.
*/
Push(string) error
/* Pop
* removes a document from the cache and pulls a document back
* out of the backing ReaderWriter.
*/
Pop() (string, error)
/* Remove
* removes N documents according to arg.
* backfills cache by pulling documents back out of backing
* ReaderWriter.
*/
Remove(int) ([]string, error)
/* Flush
* empties cache by placing all cached documnent into backing
* ReaderWriter.
*/
Flush() error
/* Peek
* views first document in either cache or backing ReaderWriter
*/
Peek() (string, error)
/* Read
* returns N documents from the cache and backing store without
* removing them from either data structure.
*/
Read(int) ([]string, error)
/* Close
* flushes and then prepares backing store for closure
*/
Close() (int64, error)
/* Cached
* returns current length of cache
*/
Cached() int
}
type DocBuf struct {
cache []string
store io.ReadWriteSeeker
cacheSize int
endIndex int64
}
// put an element from cache into store
func (b *DocBuf) demote() error {
if len(b.cache) < 1 {
return errors.New(DemoteFromEmptyCacheError)
}
demoted := b.cache[len(b.cache)-1]
b.cache = b.cache[:len(b.cache)-1]
return b.writeToBackingStore(demoted)
}
/* bounds here refers to checking the capacity of the internal memory cache
* Remove() includes a use case where a user might want to evict more events
* than the internal memory cache can possibly contain, in which case we use
* promote and turn off bounds checking in order to overpopulate memory cache
*/
func (b *DocBuf) promote(bounds bool) error {
if bounds && len(b.cache) >= b.cacheSize {
return errors.New(PromoteToFullCacheError)
}
doc, err := b.readDocumentsFromDisk(1, true)
if err != nil {
return err
}
if len(doc) > 0 {
b.cache = append(b.cache, doc[0])
}
return nil
}
// encode as base64 so as to avoid needing to escape things
// then documents can be separated by newlines
func (b *DocBuf) writeToBackingStore(doc string) error {
if len(doc) == 0 {
return errors.New(WriteEmptyDocumentError)
}
str := base64.StdEncoding.EncodeToString([]byte(doc))
// seek to end index
c, err := b.store.Seek(int64(b.endIndex), io.SeekStart);
if err != nil {
return err
}
// separate documents with a newline
if c > 0 {
str = "\n" + str
}
n, err := b.store.Write([]byte(str))
if err != nil || n < len(doc) {
if n < len(doc) {
return errors.Join(
errors.New(PartialWriteError),
err)
}
return err
}
b.endIndex += int64(n)
return nil
}
func (b *DocBuf) readDocumentsFromDisk(
count int,
truncate bool,
) ([]string, error) {
docs := []string{}
cursor := int64(0)
if count < 1 {
return docs, errors.New(BadReadRequestError)
}
// catch store is empty
cursor, err := b.store.Seek(0, io.SeekEnd)
if b.endIndex == 0 || err != nil || cursor == 0 {
return docs, err
}
// self repair?
if b.endIndex > cursor {
b.endIndex = cursor
}
cursor = b.endIndex - 1
for len(docs) < count && cursor >= 0 {
doc := ""
char := make([]byte, 1)
char[0] = 0
// read bytes backwards from file
for cursor >= 0 {
// set cursor
if _, err := b.store.Seek(cursor, io.SeekStart); err != nil {
return docs, err
}
// read one byte
n, err := b.store.Read(char)
if err != nil {
return docs, err
}
if n != 1 {
return docs, errors.New(BadReadLengthError)
}
// break on newline
if char[0] == 10 {
break
}
cursor -= 1
doc = string(char) + doc
}
/* parse document and add to account (docs)
* each read will stop on the newline so make sure
* we dont try to account for an empty document
*/
if (doc != "\n") {
str, err := base64.StdEncoding.DecodeString(doc)
if err != nil {
return docs, err
}
doc = string(str)
docs = append(docs, doc)
}
if len(docs) < count {
cursor -= 1
}
}
if truncate {
if cursor < 0 {
cursor = 0
}
b.endIndex = cursor
}
return docs, nil
}
/* WARNING: this constructor will promote items
* from storage, resulting in a modified backing
* as well as a prefilled cache.
*/
func NewDocumentBuffer(
cacheSize int,
backingStore io.ReadWriteSeeker,
) (DocumentBuffer, error) {
newBuf := DocBuf {
cache: make([]string, 0, cacheSize),
store: backingStore,
cacheSize: cacheSize,
}
c, err := backingStore.Seek(0, io.SeekEnd)
newBuf.endIndex = c
if err != nil {
return &newBuf, err
}
// prefill cache
for range cacheSize {
newBuf.promote(true)
}
return &newBuf, nil
}
/* Push
* Add a document to the buffer.
* If cache is full oldest document is written to backing writer.
*/
func (b *DocBuf) Push(doc string) error {
if len(b.cache) >= b.cacheSize {
if err := b.demote(); err != nil {
return err
}
}
tc := b.cache
b.cache = append(make([]string, 0, b.cacheSize), doc)
b.cache = append(b.cache, tc...)
return nil
}
/* Pop
* removes a document from the cache and pulls a document back
* out of the backing ReaderWriter.
*/
func (b *DocBuf) Pop() (string, error) {
if len(b.cache) < 1 {
if err := b.promote(true); err != nil {
return "", err
}
}
if len(b.cache) < 1 {
return "", errors.New(PopFromEmptyBufferError)
}
candidate := b.cache[0]
b.cache = b.cache[1:]
e := b.promote(true)
return candidate, e
}
/* Remove
* removes N newest documents in order of newest to oldest.
* backfills cache by pulling documents back out of backing
* ReaderWriter (again, newest first).
*/
func (b *DocBuf) Remove(count int) ([]string, error) {
delta := count - b.cacheSize
if delta > 0 {
for range delta {
if err := b.promote(false); err != nil {
return []string{}, err
}
}
}
if count > len(b.cache) {
tmp := b.cache
b.cache = make([]string, 0, b.cacheSize)
return tmp, errors.New(RemoveOverCapacityError)
}
candidates := b.cache[:count]
b.cache = b.cache[count:]
// refill cache
delta = b.cacheSize - len(b.cache)
if delta > 0 {
for range delta {
if err := b.promote(true); err != nil {
return []string{}, err
}
}
}
return candidates, nil
}
/* Flush
* empties cache by placing all cached documnent into backing
* ReaderWriter.
*/
func (b *DocBuf) Flush() error {
for range len(b.cache) {
if err := b.demote(); err != nil {
return err
}
}
return nil
}
/* Peek
* views first document in either cache or backing ReaderWriter
*/
func (b *DocBuf) Peek() (string, error) {
if len(b.cache) > 0 {
return b.cache[0], nil
} else {
d, e := b.readDocumentsFromDisk(1, false)
if len(d) < 1 {
return "", e
}
return d[0], e
}
}
/* Read
* returns N documents from the cache and backing store without
* removing them from either data structure.
*/
func (b *DocBuf) Read(count int) ([]string, error) {
delta := count - len(b.cache)
candidates := b.cache[:count - (delta)]
if delta > 0 {
fromStorage, err := b.readDocumentsFromDisk(delta, false)
if err != nil {
return candidates, err
}
candidates = append(candidates, fromStorage...)
}
return candidates, nil
}
/* Close
* flushes and then prepares backing store for closure.
* returns the end index into the underlying stream...
* it is the callers responsibility to truncate.
*/
func (b *DocBuf) Close() (int64, error) {
if err := b.Flush(); err != nil {
return b.endIndex, err
}
return b.endIndex, nil
}
/* Cached
* returns current length of cache
*/
func (b *DocBuf) Cached() int {
return len(b.cache)
}

View file

@ -0,0 +1,304 @@
package docbuf
import (
"testing"
"fmt"
)
func docu(ctr *int) string {
// we add a new line to try to trick the docbuf
// into thinking there is an extra doc here.
// but there isnt one.
s := fmt.Sprintf("%d\n", *ctr)
(*ctr) += 1
return s
}
func TestPushPop(t *testing.T) {
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// does push add 1 to cache
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
// does push past cache demote first document
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || backingStore.Cursor() == 0 {
t.Fatalf("error pushing: %e", err)
}
if backingStore.Contents() != "MQo=" {
t.Fatalf("expected oldest doc to be in store, got %s",
backingStore.Contents())
}
// does pop promote document from cache
doc, err := buf.Pop()
if err != nil || doc != "4\n" {
t.Fatalf("did not get expected doc from cache: %s (%e)", doc, err)
}
if buf.Cached() != 3 {
t.Fatalf("doc buffer did not promote: %d", buf.Cached())
}
// does pop past empty throw the right error
doc, err = buf.Pop()
if err != nil || doc != "3\n" {
t.Fatalf("did not get expected doc from cache: %s (%e)", doc, err)
}
doc, err = buf.Pop()
if err != nil || doc != "2\n" {
t.Fatalf("did not get expected doc from cache: %s (%e)", doc, err)
}
doc, err = buf.Pop()
if err != nil || doc != "1\n" {
t.Logf("bs: %s", backingStore.Contents())
t.Fatalf("did not get expected doc from cache: %s (%e)", doc, err)
}
doc, err = buf.Pop()
if err == nil ||
doc != "" ||
err.Error() != "Can't pop from empty buffer" {
t.Fatalf("did not get expected doc from cache: %s", doc)
}
}
func TestRemove(t *testing.T) {
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil ||
buf.Cached() != 3 || backingStore.Contents() != "MQo=" {
t.Fatalf("error pushing: %e", err)
}
// tests
docs, err := buf.Remove(2)
if err != nil ||
len(docs) != 2 ||
docs[0] != "4\n" ||
docs[1] != "3\n" ||
buf.Cached() != 2 {
t.Fatalf("error removing: %e", err)
}
for range 5 {
if err := buf.Push(docu(&docCtr)); err != nil {
t.Fatalf("error pushing: %e", err)
}
}
docs, err = buf.Remove(4)
if err != nil ||
len(docs) != 4 ||
docs[0] != "9\n" ||
docs[1] != "8\n" ||
docs[2] != "7\n" ||
docs[3] != "6\n" ||
buf.Cached() != 3 {
t.Fatalf("error removing: %e", err)
}
docs, err = buf.Remove(3)
if err != nil ||
len(docs) != 3 ||
docs[0] != "5\n" ||
docs[1] != "2\n" ||
docs[2] != "1\n" ||
buf.Cached() != 0 {
t.Fatalf("error removing: %e", err)
}
}
func TestFlush(t *testing.T) {
docCtr := 1
expectedDoc := "MQo=\nMgo=\nMwo=\nNAo="
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil ||
buf.Cached() != 3 || backingStore.Contents() == "Nao=\n" {
t.Fatalf("error pushing: %e", err)
}
// test
if err := buf.Flush(); err != nil {
t.Fatalf("error flushing buffer: %e", err)
}
if backingStore.Contents() != expectedDoc {
t.Fatalf("did not get expected document: %s", backingStore.Contents())
}
}
func TestPeek(t *testing.T) {
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
// test
if d, e := buf.Peek(); e != nil ||
d != "1\n" ||
buf.Cached() != 1 {
t.Fatalf("error peeking: %e", e)
}
}
func TestRead(t *testing.T) {
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil ||
buf.Cached() != 3 || backingStore.Contents() != "MQo=" {
t.Fatalf("error pushing: %e", err)
}
// test
if docs, err := buf.Read(4); err != nil ||
len(docs) != 4 ||
docs[0] != "4\n" ||
docs[1] != "3\n" ||
docs[2] != "2\n" ||
docs[3] != "1\n" ||
buf.Cached() != 3 ||
backingStore.Contents() != "MQo=" {
t.Fatalf("error reading: %e", err)
}
}
func TestClose(t *testing.T) {
// do pushes and a remove then Close. assure no error
// is the int return where I can truncate?
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil ||
buf.Cached() != 3 || backingStore.Contents() != "MQo=" {
t.Fatalf("error pushing: %e", err)
}
for range 5 {
if err := buf.Push(docu(&docCtr)); err != nil {
t.Fatalf("error pushing: %e", err)
}
}
docs, err := buf.Remove(4)
if err != nil ||
len(docs) != 4 ||
buf.Cached() != 3 {
t.Fatalf("error removing: %e", err)
}
expectedDoc := "MQo=\nMgo=\nMwo=\nNAo=\nNQo=\nNgo="
idx, err := buf.Close()
if err != nil ||
idx != 24 ||
buf.Cached() != 0 ||
backingStore.Contents() != expectedDoc {
t.Fatalf("error closing: %e", err)
}
}
func TestInitialize(t *testing.T) {
backingStore := NewReadWriteSeekString()
backingStore.Write([]byte("MQo=\nMgo=\nMwo=\nNAo=\nNQo=\nNgo="))
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// test cached
if buf.Cached() != 3 {
t.Fatalf("expected 3 docs in cache")
}
// read all docs
docs, err := buf.Read(6)
if err != nil ||
len(docs) != 6 ||
docs[0] != "6\n" ||
docs[1] != "5\n" ||
docs[2] != "4\n" ||
docs[3] != "3\n" ||
docs[4] != "2\n" ||
docs[5] != "1\n" {
t.Fatalf("error reading: %e", err)
}
}

View file

@ -0,0 +1,100 @@
package docbuf
import (
"errors"
"io"
"fmt"
)
/* WARNING:
* This code is meant to assist with testing and mock ups
* It is not only not designed to any rigorous standards,
* but additionally does not offer any benefit over a static
* in memory single layer cache.
*/
type ReadWriteSeekString struct {
inner string
cursor int
}
func NewReadWriteSeekString() ReadWriteSeekString{
return ReadWriteSeekString{
inner: "",
cursor: 0,
}
}
func (s *ReadWriteSeekString) Read(
buf []byte,
) (int, error) {
i := 0
for ; i < len(buf); i++ {
if len(s.inner) <= s.cursor {
return i, nil
}
buf[i] = s.inner[s.cursor]
s.cursor += 1
}
return i, nil
}
func (s *ReadWriteSeekString) Write(
buf []byte,
) (int, error) {
backfillDelta := s.cursor - (len(s.inner) - 1)
if backfillDelta > 0 {
for range backfillDelta {
s.inner += "\x00"
}
}
tmpBuf := ""
if s.cursor > 0 {
tmpBuf += s.inner[:s.cursor]
}
tmpBuf += string(buf)
if s.cursor + len(buf) < (len(s.inner) - 1) {
tmpBuf += s.inner[s.cursor + len(buf):]
}
s.inner = tmpBuf
s.cursor += len(buf)
return len(buf), nil
}
func (s *ReadWriteSeekString) Seek(
offset int64,
whence int,
) (int64, error) {
var tmpCur int64
tmpCur = 0
switch whence {
case io.SeekCurrent:
tmpCur = int64(s.cursor)
case io.SeekEnd:
tmpCur = int64(len(s.inner))
case io.SeekStart:
tmpCur = int64(0)
default:
return int64(s.cursor),
errors.New("invalid whence value")
}
tmpCur += offset
if tmpCur < 0 {
msg := fmt.Sprintf("seek index (%d) is negative", tmpCur)
return int64(s.cursor), errors.New(msg)
}
s.cursor = int(tmpCur)
return tmpCur, nil
}
func (s *ReadWriteSeekString) Contents() string {
return s.inner
}
func (s *ReadWriteSeekString) Cursor() int {
return s.cursor
}

View file

@ -0,0 +1,165 @@
package docbuf
import (
"io"
"testing"
)
func SetupTestingBuffer(t *testing.T) ReadWriteSeekString{
str := NewReadWriteSeekString()
n, e := str.Write([]byte("test"))
if n != 4 || e != nil {
t.Fatalf("Failed to write to buffer: %e", e)
}
return str
}
// can it read
func TestRWSSRead(t *testing.T) {
b := make([]byte, 4)
buf := SetupTestingBuffer(t)
c, err := buf.Seek(0, io.SeekStart)
if err != nil || c != 0 {
t.Fatalf("seek failed: %e", err)
}
n, err := buf.Read(b)
if n != 4 || err != nil || string(b) != "test" {
t.Fatalf("read failed: %e", err)
}
m, err := buf.Seek(-3, io.SeekEnd)
if err != nil || m != 1 {
t.Fatalf("seek failed: %e", err)
}
b = make([]byte, 4)
l, err := buf.Read(b)
if l != 3 || err != nil || string(b[:3]) != "est" {
t.Fatalf("read failed: %e", err)
}
k, err := buf.Seek(0, io.SeekStart)
if k != 0 || err != nil {
t.Fatalf("seek failed: %e", err)
}
b = make([]byte, 3)
j, err := buf.Read(b)
if j != 3 || err != nil || string(b) != "tes" {
t.Fatalf("read failed: %e", err)
}
b = make([]byte, 1)
i, err := buf.Read(b)
if i != 1 || err != nil || string(b) != "t" {
t.Fatalf("read failed: %e", err)
}
}
// can it write
func TestRWSSWrite(t *testing.T) {
buf := SetupTestingBuffer(t)
if buf.Contents() != "test" || buf.Cursor() != 4 {
t.Fatalf("write failed: %s", buf.Contents())
}
m, err := buf.Write([]byte("test2"))
if m != 5 ||
err != nil ||
buf.Contents() != "testtest2" ||
buf.Cursor() != 9 {
t.Fatalf("write failed: %e", err)
}
n, err := buf.Seek(2, io.SeekStart)
if n != 2 || err != nil {
t.Fatalf("seek failed: %e", err)
}
o, err := buf.Write([]byte("one"))
if o != 3 ||
err != nil ||
buf.Contents() != "teoneest2" ||
buf.Cursor() != 5 {
t.Fatalf("write failed: %e", err)
}
p, err := buf.Seek(0, io.SeekEnd)
if p != 9 || err != nil {
t.Fatalf("seek (%d) failed: %e", p, err)
}
q, err := buf.Write([]byte("two"))
if q != 3 ||
err != nil ||
buf.Contents() != "teoneest2two" ||
buf.Cursor() != 12 {
t.Fatalf("write failed: %e", err)
}
}
// if it seeks can it read from new position
// if it seeks can it write to new position
func TestRWSSSeek(t *testing.T) {
buf := SetupTestingBuffer(t)
if n, err := buf.Seek(0, io.SeekStart);
n != 0 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(3, io.SeekStart);
n != 3 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-1, io.SeekStart);
n != 3 ||
err == nil ||
err.Error() != "seek index (-1) is negative" {
t.Fatalf("seek should have failed but didnt: %e", err)
}
if n, err := buf.Seek(0, io.SeekCurrent);
n != 3 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-2, io.SeekEnd);
n != 2 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-1, io.SeekCurrent);
n != 1 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-2, io.SeekCurrent);
n != 1 ||
err == nil ||
err.Error() != "seek index (-1) is negative" {
t.Fatalf("seek should have failed but didnt: %e", err)
}
if n, err := buf.Seek(-1, io.SeekEnd);
n != 3 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-5, io.SeekEnd);
n != 3 ||
err == nil ||
err.Error() != "seek index (-1) is negative" {
t.Fatalf("seek should have failed but didnt: %e", err)
}
}