put docbuf into an external pkg

Signed-off-by: Ava Affine <ava@sunnypup.io>
This commit is contained in:
Ava Apples Affine 2024-12-05 17:06:54 -08:00
parent 41c420ebcc
commit 0dae469cce
7 changed files with 3 additions and 3 deletions

459
pkg/docbuf/docbuf.go Normal file
View file

@ -0,0 +1,459 @@
package docbuf
import (
"encoding/base64"
"errors"
"io"
)
const (
DemoteFromEmptyCacheError = "Can't demote from empty cache"
PromoteToFullCacheError = "Can't promote to full cache"
WriteEmptyDocumentError = "Not writing empty document to store"
PartialWriteError = "Couldnt complete document write"
BadReadRequestError = "Can't read less than one documents"
BadReadLengthError = "Unexpected read length"
PopFromEmptyBufferError = "Can't pop from empty buffer"
RemoveOverCapacityError = "Not enough documents in buffer to remove"
)
type DocumentBuffer interface {
/* Push
* Add a document to the buffer.
* If cache is full oldest document is written to backing writer.
*/
Push(string) error
/* Pop
* removes a document from the cache and pulls a document back
* out of the backing ReaderWriter.
*/
Pop() (string, error)
/* Remove
* removes N documents according to arg.
* backfills cache by pulling documents back out of backing
* ReaderWriter.
*/
Remove(int) ([]string, error)
/* Flush
* empties cache by placing all cached documnent into backing
* ReaderWriter.
*/
Flush() error
/* Peek
* views first document in either cache or backing ReaderWriter
*/
Peek() (string, error)
/* Read
* returns N documents from the cache and backing store without
* removing them from either data structure.
*/
Read(int) ([]string, error)
/* Apply
* Takes a callable/closure/function that accepts a string (document)
* and returns a bool. Apply returns an error, or nil.
* Apply will begin applying this callable to each doc from most recent
* to least recent so long as the callable continues to return true.
* If the callable returns false, Apply ceases iteration and returns.
*/
Apply(func(string)bool) error
/* Close
* flushes and then prepares backing store for closure
*/
Close() (int64, error)
/* Cached
* returns current length of cache
*/
Cached() int
}
type DocBuf struct {
cache []string
store io.ReadWriteSeeker
cacheSize int
endIndex int64
}
// put an element from cache into store
func (b *DocBuf) demote() error {
if len(b.cache) < 1 {
return errors.New(DemoteFromEmptyCacheError)
}
demoted := b.cache[len(b.cache)-1]
b.cache = b.cache[:len(b.cache)-1]
return b.writeToBackingStore(demoted)
}
/* bounds here refers to checking the capacity of the internal memory cache
* Remove() includes a use case where a user might want to evict more events
* than the internal memory cache can possibly contain, in which case we use
* promote and turn off bounds checking in order to overpopulate memory cache
*/
func (b *DocBuf) promote(bounds bool) error {
if bounds && len(b.cache) >= b.cacheSize {
return errors.New(PromoteToFullCacheError)
}
doc, err := b.readDocumentsFromDisk(1, true, false)
if err != nil {
return err
}
if len(doc) > 0 {
b.cache = append(b.cache, doc[0])
}
return nil
}
// encode as base64 so as to avoid needing to escape things
// then documents can be separated by newlines
func (b *DocBuf) writeToBackingStore(doc string) error {
if len(doc) == 0 {
return errors.New(WriteEmptyDocumentError)
}
str := base64.StdEncoding.EncodeToString([]byte(doc))
// seek to end index
c, err := b.store.Seek(int64(b.endIndex), io.SeekStart);
if err != nil {
return err
}
// separate documents with a newline
if c > 0 {
str = "\n" + str
}
n, err := b.store.Write([]byte(str))
if err != nil || n < len(doc) {
if n < len(doc) {
return errors.Join(
errors.New(PartialWriteError),
err)
}
return err
}
b.endIndex += int64(n)
return nil
}
func (b *DocBuf) readDocumentsFromDisk(
count int,
truncate bool,
continues bool,
) ([]string, error) {
docs := []string{}
cursor := int64(0)
var err error
if count < 1 {
return docs, errors.New(BadReadRequestError)
}
if continues {
cursor, err = b.store.Seek(0, io.SeekCurrent)
if b.endIndex == 0 || err != nil || cursor == 0 {
return docs, err
}
} else {
// catch store is empty
cursor, err = b.store.Seek(0, io.SeekEnd)
if b.endIndex == 0 || err != nil || cursor == 0 {
return docs, err
}
// self repair?
if b.endIndex > cursor {
b.endIndex = cursor
}
cursor = b.endIndex - 1
}
for len(docs) < count && cursor >= 0 {
doc := ""
char := make([]byte, 1)
char[0] = 0
// read bytes backwards from file
for cursor >= 0 {
// set cursor
if _, err := b.store.Seek(cursor, io.SeekStart); err != nil {
return docs, err
}
// read one byte
n, err := b.store.Read(char)
if err != nil {
return docs, err
}
if n != 1 {
return docs, errors.New(BadReadLengthError)
}
// break on newline
if char[0] == 10 {
break
}
cursor -= 1
doc = string(char) + doc
}
/* parse document and add to account (docs)
* each read will stop on the newline so make sure
* we dont try to account for an empty document
*/
if len(doc) > 0 && doc != "\n" {
str, err := base64.StdEncoding.DecodeString(doc)
if err != nil {
return docs, err
}
doc = string(str)
docs = append(docs, doc)
}
if len(docs) < count {
cursor -= 1
}
}
if truncate {
if cursor < 0 {
cursor = 0
}
b.endIndex = cursor
}
return docs, nil
}
/* WARNING: this constructor will promote items
* from storage, resulting in a modified backing
* as well as a prefilled cache.
*/
func NewDocumentBuffer(
cacheSize int,
backingStore io.ReadWriteSeeker,
) (DocumentBuffer, error) {
newBuf := DocBuf {
cache: make([]string, 0, cacheSize),
store: backingStore,
cacheSize: cacheSize,
}
c, err := backingStore.Seek(0, io.SeekEnd)
newBuf.endIndex = c
if err != nil {
return &newBuf, err
}
// prefill cache
for range cacheSize {
newBuf.promote(true)
}
return &newBuf, nil
}
/* Push
* Add a document to the buffer.
* If cache is full oldest document is written to backing writer.
*/
func (b *DocBuf) Push(doc string) error {
if len(b.cache) >= b.cacheSize {
if err := b.demote(); err != nil {
return err
}
}
tc := b.cache
b.cache = append(make([]string, 0, b.cacheSize), doc)
b.cache = append(b.cache, tc...)
return nil
}
/* Pop
* removes a document from the cache and pulls a document back
* out of the backing ReaderWriter.
*/
func (b *DocBuf) Pop() (string, error) {
if len(b.cache) < 1 {
if err := b.promote(true); err != nil {
return "", err
}
}
if len(b.cache) < 1 {
return "", errors.New(PopFromEmptyBufferError)
}
candidate := b.cache[0]
b.cache = b.cache[1:]
e := b.promote(true)
return candidate, e
}
/* Remove
* removes N newest documents in order of newest to oldest.
* backfills cache by pulling documents back out of backing
* ReaderWriter (again, newest first).
*/
func (b *DocBuf) Remove(count int) ([]string, error) {
delta := count - b.cacheSize
if delta > 0 {
for range delta {
if err := b.promote(false); err != nil {
return []string{}, err
}
}
}
if count > len(b.cache) {
tmp := b.cache
b.cache = make([]string, 0, b.cacheSize)
return tmp, errors.New(RemoveOverCapacityError)
}
candidates := b.cache[:count]
b.cache = b.cache[count:]
// refill cache
delta = b.cacheSize - len(b.cache)
if delta > 0 {
for range delta {
if err := b.promote(true); err != nil {
return []string{}, err
}
}
}
return candidates, nil
}
/* Flush
* empties cache by placing all cached documnent into backing
* ReaderWriter.
*/
func (b *DocBuf) Flush() error {
for range len(b.cache) {
if err := b.demote(); err != nil {
return err
}
}
return nil
}
/* Peek
* views first document in either cache or backing ReaderWriter
*/
func (b *DocBuf) Peek() (string, error) {
if len(b.cache) > 0 {
return b.cache[0], nil
} else {
d, e := b.readDocumentsFromDisk(1, false, false)
if len(d) < 1 {
return "", e
}
return d[0], e
}
}
/* Read
* returns N documents from the cache and backing store without
* removing them from either data structure.
*/
func (b *DocBuf) Read(count int) ([]string, error) {
delta := count - len(b.cache)
candidates := b.cache[:count - (delta)]
if delta > 0 {
fromStorage, err := b.readDocumentsFromDisk(delta, false, false)
if err != nil {
return candidates, err
}
candidates = append(candidates, fromStorage...)
}
return candidates, nil
}
/* Apply
* Takes a callable/closure/function that accepts a string (document)
* and returns a bool. Apply returns an error, or nil.
* Apply will begin applying this callable to each doc from most recent
* to least recent so long as the callable continues to return true.
* If the callable returns false, Apply ceases iteration and returns.
*/
func (b *DocBuf) Apply(f func(string)bool) error {
// iterate over internal cache applying function
for _, i := range b.cache {
if !f(i) {
return nil
}
}
// begin iterating with readDocumentsFromDisk
first := true
flag := true
for flag {
doc, err := b.readDocumentsFromDisk(1, false, !first)
if err != nil {
return err
}
if len(doc) == 0 {
return nil
} else if len(doc) != 1 {
return errors.New("improper read from buffer")
}
first = false
flag = f(doc[0])
if flag {
var c, d int64
var err error
// since continue is on we need to bring the cursor back one
if c, err = b.store.Seek(0, io.SeekCurrent); err != nil {
return err
// dont seek earlier than 0
} else if c < 1 {
break
}
if d, err = b.store.Seek(-1, io.SeekCurrent); err != nil {
return err
}
if c == d {
return errors.New("Seek failure!")
}
}
}
return nil
}
/* Close
* flushes and then prepares backing store for closure.
* returns the end index into the underlying stream...
* it is the callers responsibility to truncate.
*/
func (b *DocBuf) Close() (int64, error) {
if err := b.Flush(); err != nil {
return b.endIndex, err
}
return b.endIndex, nil
}
/* Cached
* returns current length of cache
*/
func (b *DocBuf) Cached() int {
return len(b.cache)
}

337
pkg/docbuf/docbuf_test.go Normal file
View file

@ -0,0 +1,337 @@
package docbuf
import (
"testing"
"fmt"
)
func docu(ctr *int) string {
// we add a new line to try to trick the docbuf
// into thinking there is an extra doc here.
// but there isnt one.
s := fmt.Sprintf("%d\n", *ctr)
(*ctr) += 1
return s
}
func TestPushPop(t *testing.T) {
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// does push add 1 to cache
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
// does push past cache demote first document
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || backingStore.Cursor() == 0 {
t.Fatalf("error pushing: %e", err)
}
if backingStore.Contents() != "MQo=" {
t.Fatalf("expected oldest doc to be in store, got %s",
backingStore.Contents())
}
// does pop promote document from cache
doc, err := buf.Pop()
if err != nil || doc != "4\n" {
t.Fatalf("did not get expected doc from cache: %s (%e)", doc, err)
}
if buf.Cached() != 3 {
t.Fatalf("doc buffer did not promote: %d", buf.Cached())
}
// does pop past empty throw the right error
doc, err = buf.Pop()
if err != nil || doc != "3\n" {
t.Fatalf("did not get expected doc from cache: %s (%e)", doc, err)
}
doc, err = buf.Pop()
if err != nil || doc != "2\n" {
t.Fatalf("did not get expected doc from cache: %s (%e)", doc, err)
}
doc, err = buf.Pop()
if err != nil || doc != "1\n" {
t.Logf("bs: %s", backingStore.Contents())
t.Fatalf("did not get expected doc from cache: %s (%e)", doc, err)
}
doc, err = buf.Pop()
if err == nil ||
doc != "" ||
err.Error() != "Can't pop from empty buffer" {
t.Fatalf("did not get expected doc from cache: %s", doc)
}
}
func TestRemove(t *testing.T) {
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil ||
buf.Cached() != 3 || backingStore.Contents() != "MQo=" {
t.Fatalf("error pushing: %e", err)
}
// tests
docs, err := buf.Remove(2)
if err != nil ||
len(docs) != 2 ||
docs[0] != "4\n" ||
docs[1] != "3\n" ||
buf.Cached() != 2 {
t.Fatalf("error removing: %e", err)
}
for range 5 {
if err := buf.Push(docu(&docCtr)); err != nil {
t.Fatalf("error pushing: %e", err)
}
}
docs, err = buf.Remove(4)
if err != nil ||
len(docs) != 4 ||
docs[0] != "9\n" ||
docs[1] != "8\n" ||
docs[2] != "7\n" ||
docs[3] != "6\n" ||
buf.Cached() != 3 {
t.Fatalf("error removing: %e", err)
}
docs, err = buf.Remove(3)
if err != nil ||
len(docs) != 3 ||
docs[0] != "5\n" ||
docs[1] != "2\n" ||
docs[2] != "1\n" ||
buf.Cached() != 0 {
t.Fatalf("error removing: %e", err)
}
}
func TestFlush(t *testing.T) {
docCtr := 1
expectedDoc := "MQo=\nMgo=\nMwo=\nNAo="
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil ||
buf.Cached() != 3 || backingStore.Contents() == "Nao=\n" {
t.Fatalf("error pushing: %e", err)
}
// test
if err := buf.Flush(); err != nil {
t.Fatalf("error flushing buffer: %e", err)
}
if backingStore.Contents() != expectedDoc {
t.Fatalf("did not get expected document: %s", backingStore.Contents())
}
}
func TestPeek(t *testing.T) {
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
// test
if d, e := buf.Peek(); e != nil ||
d != "1\n" ||
buf.Cached() != 1 {
t.Fatalf("error peeking: %e", e)
}
}
func TestRead(t *testing.T) {
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil ||
buf.Cached() != 3 || backingStore.Contents() != "MQo=" {
t.Fatalf("error pushing: %e", err)
}
// test
if docs, err := buf.Read(4); err != nil ||
len(docs) != 4 ||
docs[0] != "4\n" ||
docs[1] != "3\n" ||
docs[2] != "2\n" ||
docs[3] != "1\n" ||
buf.Cached() != 3 ||
backingStore.Contents() != "MQo=" {
t.Fatalf("error reading: %e", err)
}
}
func TestClose(t *testing.T) {
// do pushes and a remove then Close. assure no error
// is the int return where I can truncate?
docCtr := 1
backingStore := NewReadWriteSeekString()
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// setup test data
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 1 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 2 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil || buf.Cached() != 3 {
t.Fatalf("error pushing: %e", err)
}
if err := buf.Push(docu(&docCtr)); err != nil ||
buf.Cached() != 3 || backingStore.Contents() != "MQo=" {
t.Fatalf("error pushing: %e", err)
}
for range 5 {
if err := buf.Push(docu(&docCtr)); err != nil {
t.Fatalf("error pushing: %e", err)
}
}
docs, err := buf.Remove(4)
if err != nil ||
len(docs) != 4 ||
buf.Cached() != 3 {
t.Fatalf("error removing: %e", err)
}
expectedDoc := "MQo=\nMgo=\nMwo=\nNAo=\nNQo=\nNgo="
idx, err := buf.Close()
if err != nil ||
idx != 24 ||
buf.Cached() != 0 ||
backingStore.Contents() != expectedDoc {
t.Fatalf("error closing: %e", err)
}
}
func TestInitialize(t *testing.T) {
backingStore := NewReadWriteSeekString()
backingStore.Write([]byte("MQo=\nMgo=\nMwo=\nNAo=\nNQo=\nNgo="))
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// test cached
if buf.Cached() != 3 {
t.Fatalf("expected 3 docs in cache")
}
// read all docs
docs, err := buf.Read(6)
if err != nil ||
len(docs) != 6 ||
docs[0] != "6\n" ||
docs[1] != "5\n" ||
docs[2] != "4\n" ||
docs[3] != "3\n" ||
docs[4] != "2\n" ||
docs[5] != "1\n" {
t.Fatalf("error reading: %e", err)
}
}
func TestApply(t *testing.T) {
backingStore := NewReadWriteSeekString()
backingStore.Write([]byte("MQo=\nMgo=\nMwo=\nNAo=\nNQo=\nNgo="))
buf, e := NewDocumentBuffer(3, &backingStore)
if e != nil {
t.Fatalf("error making documentbuffer: %e", e)
}
// test cached
if buf.Cached() != 3 {
t.Fatalf("expected 3 docs in cache")
}
count := 0
if err := buf.Apply(func(doc string) bool {
count += 1
return true
}); err != nil || count != 6 {
t.Fatalf("error applying: %e", err)
}
count = 0
if err := buf.Apply(func(doc string) bool {
if doc == "2\n" {
return false
}
count += 1
return true
}); err != nil || count != 4 {
t.Fatalf("error applying: %e", err)
}
}

View file

@ -0,0 +1,100 @@
package docbuf
import (
"errors"
"io"
"fmt"
)
/* WARNING:
* This code is meant to assist with testing and mock ups
* It is not only not designed to any rigorous standards,
* but additionally does not offer any benefit over a static
* in memory single layer cache.
*/
type ReadWriteSeekString struct {
inner string
cursor int
}
func NewReadWriteSeekString() ReadWriteSeekString{
return ReadWriteSeekString{
inner: "",
cursor: 0,
}
}
func (s *ReadWriteSeekString) Read(
buf []byte,
) (int, error) {
i := 0
for ; i < len(buf); i++ {
if len(s.inner) <= s.cursor {
return i, nil
}
buf[i] = s.inner[s.cursor]
s.cursor += 1
}
return i, nil
}
func (s *ReadWriteSeekString) Write(
buf []byte,
) (int, error) {
backfillDelta := s.cursor - (len(s.inner) - 1)
if backfillDelta > 0 {
for range backfillDelta {
s.inner += "\x00"
}
}
tmpBuf := ""
if s.cursor > 0 {
tmpBuf += s.inner[:s.cursor]
}
tmpBuf += string(buf)
if s.cursor + len(buf) < (len(s.inner) - 1) {
tmpBuf += s.inner[s.cursor + len(buf):]
}
s.inner = tmpBuf
s.cursor += len(buf)
return len(buf), nil
}
func (s *ReadWriteSeekString) Seek(
offset int64,
whence int,
) (int64, error) {
var tmpCur int64
tmpCur = 0
switch whence {
case io.SeekCurrent:
tmpCur = int64(s.cursor)
case io.SeekEnd:
tmpCur = int64(len(s.inner))
case io.SeekStart:
tmpCur = int64(0)
default:
return int64(s.cursor),
errors.New("invalid whence value")
}
tmpCur += offset
if tmpCur < 0 {
msg := fmt.Sprintf("seek index (%d) is negative", tmpCur)
return int64(s.cursor), errors.New(msg)
}
s.cursor = int(tmpCur)
return tmpCur, nil
}
func (s *ReadWriteSeekString) Contents() string {
return s.inner
}
func (s *ReadWriteSeekString) Cursor() int {
return s.cursor
}

View file

@ -0,0 +1,165 @@
package docbuf
import (
"io"
"testing"
)
func SetupTestingBuffer(t *testing.T) ReadWriteSeekString{
str := NewReadWriteSeekString()
n, e := str.Write([]byte("test"))
if n != 4 || e != nil {
t.Fatalf("Failed to write to buffer: %e", e)
}
return str
}
// can it read
func TestRWSSRead(t *testing.T) {
b := make([]byte, 4)
buf := SetupTestingBuffer(t)
c, err := buf.Seek(0, io.SeekStart)
if err != nil || c != 0 {
t.Fatalf("seek failed: %e", err)
}
n, err := buf.Read(b)
if n != 4 || err != nil || string(b) != "test" {
t.Fatalf("read failed: %e", err)
}
m, err := buf.Seek(-3, io.SeekEnd)
if err != nil || m != 1 {
t.Fatalf("seek failed: %e", err)
}
b = make([]byte, 4)
l, err := buf.Read(b)
if l != 3 || err != nil || string(b[:3]) != "est" {
t.Fatalf("read failed: %e", err)
}
k, err := buf.Seek(0, io.SeekStart)
if k != 0 || err != nil {
t.Fatalf("seek failed: %e", err)
}
b = make([]byte, 3)
j, err := buf.Read(b)
if j != 3 || err != nil || string(b) != "tes" {
t.Fatalf("read failed: %e", err)
}
b = make([]byte, 1)
i, err := buf.Read(b)
if i != 1 || err != nil || string(b) != "t" {
t.Fatalf("read failed: %e", err)
}
}
// can it write
func TestRWSSWrite(t *testing.T) {
buf := SetupTestingBuffer(t)
if buf.Contents() != "test" || buf.Cursor() != 4 {
t.Fatalf("write failed: %s", buf.Contents())
}
m, err := buf.Write([]byte("test2"))
if m != 5 ||
err != nil ||
buf.Contents() != "testtest2" ||
buf.Cursor() != 9 {
t.Fatalf("write failed: %e", err)
}
n, err := buf.Seek(2, io.SeekStart)
if n != 2 || err != nil {
t.Fatalf("seek failed: %e", err)
}
o, err := buf.Write([]byte("one"))
if o != 3 ||
err != nil ||
buf.Contents() != "teoneest2" ||
buf.Cursor() != 5 {
t.Fatalf("write failed: %e", err)
}
p, err := buf.Seek(0, io.SeekEnd)
if p != 9 || err != nil {
t.Fatalf("seek (%d) failed: %e", p, err)
}
q, err := buf.Write([]byte("two"))
if q != 3 ||
err != nil ||
buf.Contents() != "teoneest2two" ||
buf.Cursor() != 12 {
t.Fatalf("write failed: %e", err)
}
}
// if it seeks can it read from new position
// if it seeks can it write to new position
func TestRWSSSeek(t *testing.T) {
buf := SetupTestingBuffer(t)
if n, err := buf.Seek(0, io.SeekStart);
n != 0 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(3, io.SeekStart);
n != 3 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-1, io.SeekStart);
n != 3 ||
err == nil ||
err.Error() != "seek index (-1) is negative" {
t.Fatalf("seek should have failed but didnt: %e", err)
}
if n, err := buf.Seek(0, io.SeekCurrent);
n != 3 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-2, io.SeekEnd);
n != 2 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-1, io.SeekCurrent);
n != 1 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-2, io.SeekCurrent);
n != 1 ||
err == nil ||
err.Error() != "seek index (-1) is negative" {
t.Fatalf("seek should have failed but didnt: %e", err)
}
if n, err := buf.Seek(-1, io.SeekEnd);
n != 3 ||
err != nil {
t.Fatalf("seek failed: %e", err)
}
if n, err := buf.Seek(-5, io.SeekEnd);
n != 3 ||
err == nil ||
err.Error() != "seek index (-1) is negative" {
t.Fatalf("seek should have failed but didnt: %e", err)
}
}