From ffaaa66e7a363a8adaa646ef9a6cfb489fef328c Mon Sep 17 00:00:00 2001 From: Janardhan Reddy Date: Mon, 2 Oct 2017 17:47:46 +1100 Subject: [PATCH] Fix all unit tests LSM Get checks in all levels if key is not found in memtable or level 0 to find latest version. Fix parseItem. Breakage caused by how we store key in KVItem. Fix storing/reading readTs from badger head --- doc_test.go | 162 ------------- iterator.go | 64 ++---- kv.go | 15 +- kv_test.go | 544 ++++++++++++++------------------------------ level_handler.go | 12 +- levels.go | 13 +- manifest_test.go | 19 +- skl/skl.go | 8 +- skl/skl_test.go | 54 ++--- table/table_test.go | 17 +- transaction.go | 7 +- transaction_test.go | 18 +- util.go | 2 +- value.go | 2 +- value_test.go | 221 +++++++++--------- y/iterator.go | 4 +- y/iterator_test.go | 6 +- y/y.go | 6 +- 18 files changed, 396 insertions(+), 778 deletions(-) delete mode 100644 doc_test.go diff --git a/doc_test.go b/doc_test.go deleted file mode 100644 index c5726d05e..000000000 --- a/doc_test.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger_test - -import ( - "fmt" - "io/ioutil" - "sync" - - "github.com/dgraph-io/badger" -) - -var d string = "doc" - -func Example() { - opt := badger.DefaultOptions - dir, _ := ioutil.TempDir("", "badger") - opt.Dir = dir - opt.ValueDir = dir - kv, _ := badger.NewKV(&opt) - - key := []byte("hello") - - kv.Set(key, []byte("world"), 0x00) - fmt.Printf("SET %s world\n", key) - - var item badger.KVItem - if err := kv.Get(key, &item); err != nil { - fmt.Printf("Error while getting key: %q", key) - return - } - var val []byte - err := item.Value(func(v []byte) error { - val = make([]byte, len(v)) - copy(val, v) - return nil - }) - if err != nil { - fmt.Printf("Error while getting value for key: %q", key) - return - } - - fmt.Printf("GET %s %s\n", key, val) - - if err := kv.CompareAndSet(key, []byte("venus"), 100); err != nil { - fmt.Println("CAS counter mismatch") - } else { - if err = kv.Get(key, &item); err != nil { - fmt.Printf("Error while getting key: %q", key) - } - - err := item.Value(func(v []byte) error { - val = make([]byte, len(v)) - copy(val, v) - return nil - }) - - if err != nil { - fmt.Printf("Error while getting value for key: %q", key) - return - } - - fmt.Printf("Set to %s\n", val) - } - if err := kv.CompareAndSet(key, []byte("mars"), item.Counter()); err == nil { - fmt.Println("Set to mars") - } else { - fmt.Printf("Unsuccessful write. Got error: %v\n", err) - } - - // Output: - // SET hello world - // GET hello world - // CAS counter mismatch - // Set to mars -} - -// func ExampleNewIterator() { -// opt := DefaultOptions -// opt.Dir = "/tmp/badger" -// kv := NewKV(&opt) - -// itrOpt := IteratorOptions{ -// PrefetchSize: 1000, -// PrefetchValues: true, -// Reverse: false, -// } -// itr := kv.NewIterator(itrOpt) -// for itr.Rewind(); itr.Valid(); itr.Next() { -// item := itr.Item() -// item.Key() -// var val []byte -// err = item.Value(func(v []byte) { -// val = make([]byte, len(v)) -// copy(val, v) -// }) -// } -// } - -func ExampleKV_BatchSetAsync() { - opt := badger.DefaultOptions - dir, _ := ioutil.TempDir("", "badger") - opt.Dir = dir - opt.SyncWrites = true - opt.ValueDir = dir - kv, _ := badger.NewKV(&opt) - wg := new(sync.WaitGroup) - wb := make([]*badger.Entry, 0, 100) - - wg.Add(1) - // Async writes would be useful if you want to write some key-value pairs without waiting - // for them to be complete and perform some cleanup when they are written. - - // In Dgraph we keep on flushing posting lists periodically to badger. We do it an async - // manner and provide a callback to it which can do the cleanup when the writes are done. - f := func(err error) { - defer wg.Done() - if err != nil { - // At this point you can retry writing keys or send error over a channel to handle - // in some other goroutine. - fmt.Printf("Got error: %+v\n", err) - } - - // Check for error in entries which could be non-nil if the user supplies a CasCounter. - for _, e := range wb { - if e.Error != nil { - fmt.Printf("Got error: %+v\n", e.Error) - } - } - - // You can do cleanup now. Like deleting keys from cache. - fmt.Println("All async sets complete.") - } - - for i := 0; i < 100; i++ { - k := []byte(fmt.Sprintf("%09d", i)) - wb = append(wb, &badger.Entry{ - Key: k, - Value: k, - }) - } - kv.BatchSetAsync(wb, f) - fmt.Println("Finished writing keys to badger.") - wg.Wait() - - // Output: Finished writing keys to badger. - // All async sets complete. -} diff --git a/iterator.go b/iterator.go index b0411e190..6077f6656 100644 --- a/iterator.go +++ b/iterator.go @@ -18,6 +18,7 @@ package badger import ( "bytes" + "fmt" "sync" "github.com/dgraph-io/badger/y" @@ -45,11 +46,22 @@ type KVItem struct { val []byte slice *y.Slice // Used only during prefetching. next *KVItem + version uint64 +} + +func (item *KVItem) ToString() string { + return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta) + } // Key returns the key. Remember to copy if you need to access it outside the iteration loop. func (item *KVItem) Key() []byte { - return y.ParseKey(item.key) + return item.key +} + +// Version returns the commit timestamp of the item. +func (item *KVItem) Version() uint64 { + return item.version } // Value retrieves the value of the item from the value log. It calls the @@ -115,11 +127,6 @@ func (item *KVItem) EstimatedSize() int64 { return int64(vp.Len) // includes key length. } -// Version returns the commit timestamp of the item. -func (item *KVItem) Version() uint64 { - return y.ParseTs(item.key) -} - // UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user // is used to interpret the value. func (item *KVItem) UserMeta() byte { @@ -303,7 +310,8 @@ FILL: // Reverse direction. nextTs := y.ParseTs(mi.Key()) - if nextTs <= it.readTs && y.SameKey(mi.Key(), item.key) { + mik := y.ParseKey(mi.Key()) + if nextTs <= it.readTs && bytes.Compare(mik, item.key) == 0 { // This is a valid potential candidate. goto FILL } @@ -320,7 +328,10 @@ func (it *Iterator) fill(item *KVItem) { vs := it.iitr.Value() item.meta = vs.Meta item.userMeta = vs.UserMeta - item.key = y.Safecopy(item.key, it.iitr.Key()) + + item.version = y.ParseTs(it.iitr.Key()) + item.key = y.Safecopy(item.key, y.ParseKey(it.iitr.Key())) + item.vptr = y.Safecopy(item.vptr, vs.Value) item.val = nil if it.opt.PrefetchValues { @@ -379,40 +390,3 @@ func (it *Iterator) Rewind() { it.iitr.Rewind() it.prefetch() } - -// NewIterator returns a new iterator. Depending upon the options, either only keys, or both -// key-value pairs would be fetched. The keys are returned in lexicographically sorted order. -// Usage: -// opt := badger.DefaultIteratorOptions -// itr := kv.NewIterator(opt) -// for itr.Rewind(); itr.Valid(); itr.Next() { -// item := itr.Item() -// key := item.Key() -// var val []byte -// err = item.Value(func(v []byte) { -// val = make([]byte, len(v)) -// copy(val, v) -// }) // This could block while value is fetched from value log. -// // For key only iteration, set opt.PrefetchValues to false, and don't call -// // item.Value(func(v []byte)). -// -// // Remember that both key, val would become invalid in the next iteration of the loop. -// // So, if you need access to them outside, copy them or parse them. -// } -// itr.Close() -// TODO: Remove this. -func (s *KV) NewIterator(opt IteratorOptions) *Iterator { - tables, decr := s.getMemTables() - defer decr() - s.vlog.incrIteratorCount() - var iters []y.Iterator - for i := 0; i < len(tables); i++ { - iters = append(iters, tables[i].NewUniIterator(opt.Reverse)) - } - iters = s.lc.appendIterators(iters, opt.Reverse) // This will increment references. - res := &Iterator{ - iitr: y.NewMergeIterator(iters, opt.Reverse), - opt: opt, - } - return res -} diff --git a/kv.go b/kv.go index 4c88a613e..6c79a3e5b 100644 --- a/kv.go +++ b/kv.go @@ -20,6 +20,7 @@ import ( "container/heap" "expvar" "log" + "math" "os" "path/filepath" "strconv" @@ -177,7 +178,9 @@ func NewKV(optParam *Options) (out *KV, err error) { return nil, err } - vs, err := out.get(head) + headKey := y.KeyWithTs(head, math.MaxUint64) + // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key + vs, err := out.get(headKey) if err != nil { return nil, errors.Wrap(err, "Retrieving head") } @@ -510,6 +513,9 @@ func (s *KV) writeToLSM(b *request) error { } for i, entry := range b.Entries { + if entry.Meta&BitFinTxn != 0 { + continue + } if s.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case. s.mt.Put(entry.Key, y.ValueStruct{ @@ -673,8 +679,9 @@ func (s *KV) batchSet(entries []*Entry) error { req.Wg.Wait() req.Entries = nil + err = req.Err requestPool.Put(req) - return req.Err + return err } // batchSetAsync is the asynchronous version of batchSet. It accepts a callback @@ -765,7 +772,8 @@ func (s *KV) flushMemtable(lc *y.Closer) error { return nil } - if !ft.vptr.IsZero() { + if !ft.mt.Empty() { + // Store badger head even if vptr is zero, need it for readTs s.elog.Printf("Storing offset: %+v\n", ft.vptr) offset := make([]byte, vptrSize) ft.vptr.Encode(offset) @@ -775,6 +783,7 @@ func (s *KV) flushMemtable(lc *y.Closer) error { headTs := y.KeyWithTs(head, s.txnState.commitTs()) ft.mt.Put(headTs, y.ValueStruct{Value: offset}) } + fileID := s.lc.reserveFileID() fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.opt.Dir), true) if err != nil { diff --git a/kv_test.go b/kv_test.go index b75655aae..6f69a58ff 100644 --- a/kv_test.go +++ b/kv_test.go @@ -18,17 +18,17 @@ package badger import ( "bytes" - "crypto/rand" "fmt" "io/ioutil" + "math" + "math/rand" "os" "regexp" "sort" "sync" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/dgraph-io/badger/y" "github.com/stretchr/testify/require" ) @@ -58,6 +58,26 @@ func getItemValue(t *testing.T, item *KVItem) (val []byte) { return val } +func txnSet(t *testing.T, kv *KV, key []byte, val []byte, meta byte) { + txn, err := kv.NewTransaction(true) + require.NoError(t, err) + require.NoError(t, txn.Set(key, val, meta)) + require.NoError(t, txn.Commit(nil)) +} + +func txnDelete(t *testing.T, kv *KV, key []byte) { + txn, err := kv.NewTransaction(true) + require.NoError(t, err) + require.NoError(t, txn.Delete(key)) + require.NoError(t, txn.Commit(nil)) +} + +func txnGet(t *testing.T, kv *KV, key []byte) (KVItem, error) { + txn, err := kv.NewTransaction(false) + require.NoError(t, err) + return txn.Get(key) +} + func TestWrite(t *testing.T) { dir, err := ioutil.TempDir("", "badger") require.NoError(t, err) @@ -66,16 +86,8 @@ func TestWrite(t *testing.T) { require.NoError(t, err) defer kv.Close() - var entries []*Entry for i := 0; i < 100; i++ { - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("key%d", i)), - Value: []byte(fmt.Sprintf("val%d", i)), - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + txnSet(t, kv, []byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("val%d", i)), 0x00) } } @@ -95,7 +107,7 @@ func TestConcurrentWrite(t *testing.T) { go func(i int) { defer wg.Done() for j := 0; j < m; j++ { - kv.Set([]byte(fmt.Sprintf("k%05d_%08d", i, j)), + txnSet(t, kv, []byte(fmt.Sprintf("k%05d_%08d", i, j)), []byte(fmt.Sprintf("v%05d_%08d", i, j)), byte(j%127)) } }(i) @@ -109,7 +121,8 @@ func TestConcurrentWrite(t *testing.T) { opt.PrefetchSize = 10 opt.PrefetchValues = true - it := kv.NewIterator(opt) + txn, err := kv.NewTransaction(true) + it := txn.NewIterator(opt) defer it.Close() var i, j int for it.Rewind(); it.Valid(); it.Next() { @@ -133,98 +146,6 @@ func TestConcurrentWrite(t *testing.T) { require.EqualValues(t, 0, j) } -func TestCAS(t *testing.T) { - dir, err := ioutil.TempDir("", "badger") - require.NoError(t, err) - defer os.RemoveAll(dir) - kv, _ := NewKV(getTestOptions(dir)) - defer kv.Close() - - var entries []*Entry - for i := 0; i < 100; i++ { - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("key%d", i)), - Value: []byte(fmt.Sprintf("val%d", i)), - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) - } - - time.Sleep(time.Second) - - var item KVItem - for i := 0; i < 100; i++ { - k := []byte(fmt.Sprintf("key%d", i)) - v := []byte(fmt.Sprintf("val%d", i)) - if err := kv.Get(k, &item); err != nil { - t.Error(err) - } - require.EqualValues(t, v, getItemValue(t, &item)) - require.EqualValues(t, entries[i].casCounter, item.Counter()) - } - - for i := 0; i < 100; i++ { - k := []byte(fmt.Sprintf("key%d", i)) - v := []byte(fmt.Sprintf("zzz%d", i)) - cc := entries[i].casCounter - if cc == 5 { - cc = 6 - } else { - cc = 5 - } - require.Error(t, kv.CompareAndSet(k, v, cc)) - } - time.Sleep(time.Second) - for i := 0; i < 100; i++ { - k := []byte(fmt.Sprintf("key%d", i)) - v := []byte(fmt.Sprintf("val%d", i)) - if err := kv.Get(k, &item); err != nil { - t.Error(err) - } - require.EqualValues(t, v, getItemValue(t, &item)) - require.EqualValues(t, entries[i].casCounter, item.Counter()) - } - - for i := 0; i < 100; i++ { - k := []byte(fmt.Sprintf("key%d", i)) - cc := entries[i].casCounter - if cc == 5 { - cc = 6 - } else { - cc = 5 - } - require.Error(t, kv.CompareAndDelete(k, cc)) - } - time.Sleep(time.Second) - for i := 0; i < 100; i++ { - k := []byte(fmt.Sprintf("key%d", i)) - v := []byte(fmt.Sprintf("val%d", i)) - if err := kv.Get(k, &item); err != nil { - t.Error(err) - } - require.EqualValues(t, v, getItemValue(t, &item)) - require.EqualValues(t, entries[i].casCounter, item.Counter()) - } - - for i := 0; i < 100; i++ { - k := []byte(fmt.Sprintf("key%d", i)) - v := []byte(fmt.Sprintf("zzz%d", i)) - require.NoError(t, kv.CompareAndSet(k, v, entries[i].casCounter)) - } - time.Sleep(time.Second) - for i := 0; i < 100; i++ { - k := []byte(fmt.Sprintf("key%d", i)) - v := []byte(fmt.Sprintf("zzz%d", i)) // Value should be changed. - if err := kv.Get(k, &item); err != nil { - t.Error(err) - } - require.EqualValues(t, v, getItemValue(t, &item)) - require.True(t, item.Counter() != 0) - } -} - func TestGet(t *testing.T) { dir, err := ioutil.TempDir("", "badger") require.NoError(t, err) @@ -234,47 +155,34 @@ func TestGet(t *testing.T) { t.Error(err) } defer kv.Close() + txnSet(t, kv, []byte("key1"), []byte("val1"), 0x08) - var item KVItem - kv.Set([]byte("key1"), []byte("val1"), 0x08) - - if err := kv.Get([]byte("key1"), &item); err != nil { - t.Error(err) - } + item, err := txnGet(t, kv, []byte("key1")) + require.NoError(t, err) require.EqualValues(t, "val1", getItemValue(t, &item)) require.Equal(t, byte(0x08), item.UserMeta()) - require.True(t, item.Counter() != 0) - kv.Set([]byte("key1"), []byte("val2"), 0x09) - if err := kv.Get([]byte("key1"), &item); err != nil { - t.Error(err) - } + txnSet(t, kv, []byte("key1"), []byte("val2"), 0x09) + item, err = txnGet(t, kv, []byte("key1")) + require.NoError(t, err) require.EqualValues(t, "val2", getItemValue(t, &item)) require.Equal(t, byte(0x09), item.UserMeta()) - require.True(t, item.Counter() != 0) - kv.Delete([]byte("key1")) - if err := kv.Get([]byte("key1"), &item); err != nil { - t.Error(err) - } - require.Nil(t, getItemValue(t, &item)) - require.True(t, item.Counter() != 0) + txnDelete(t, kv, []byte("key1")) + item, err = txnGet(t, kv, []byte("key1")) + require.Equal(t, ErrKeyNotFound, err) - kv.Set([]byte("key1"), []byte("val3"), 0x01) - if err := kv.Get([]byte("key1"), &item); err != nil { - t.Error(err) - } + txnSet(t, kv, []byte("key1"), []byte("val3"), 0x01) + item, err = txnGet(t, kv, []byte("key1")) + require.NoError(t, err) require.EqualValues(t, "val3", getItemValue(t, &item)) require.Equal(t, byte(0x01), item.UserMeta()) - require.True(t, item.Counter() != 0) longVal := make([]byte, 1000) - kv.Set([]byte("key1"), longVal, 0x00) - if err := kv.Get([]byte("key1"), &item); err != nil { - t.Error(err) - } + txnSet(t, kv, []byte("key1"), longVal, 0x00) + item, err = txnGet(t, kv, []byte("key1")) + require.NoError(t, err) require.EqualValues(t, longVal, getItemValue(t, &item)) - require.True(t, item.Counter() != 0) } func TestExists(t *testing.T) { @@ -288,32 +196,29 @@ func TestExists(t *testing.T) { defer kv.Close() // populate with one entry - err = kv.Set([]byte("key1"), []byte("val1"), 0x00) - require.NoError(t, err) + txnSet(t, kv, []byte("key1"), []byte("val1"), 0x00) tt := []struct { key []byte exists bool - name string }{ { key: []byte("key1"), exists: true, - name: " valid key", }, { key: []byte("non-exits"), exists: false, - name: "non exist key", }, } for _, test := range tt { - t.Run(test.name, func(t *testing.T) { - exists, err := kv.Exists(test.key) - assert.NoError(t, err) - assert.Equal(t, test.exists, exists) - }) + _, err := txnGet(t, kv, test.key) + if test.exists { + require.NoError(t, err) + continue + } + require.Error(t, err) } } @@ -331,68 +236,77 @@ func TestGetMore(t *testing.T) { } defer kv.Close() + data := func(i int) []byte { + return []byte(fmt.Sprintf("%09d", i)) + } // n := 500000 n := 10000 - m := 100 + m := 49 // Increasing would cause ErrTxnTooBig + fmt.Println("writing") for i := 0; i < n; i += m { - if (i % 10000) == 0 { - fmt.Printf("Putting i=%d\n", i) - } - var entries []*Entry + txn, err := kv.NewTransaction(true) + require.NoError(t, err) for j := i; j < i+m && j < n; j++ { - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("%09d", j)), - Value: []byte(fmt.Sprintf("%09d", j)), - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + require.NoError(t, txn.Set(data(j), data(j), 0)) } + require.NoError(t, txn.Commit(nil)) } - kv.validate() + require.NoError(t, kv.validate()) - var item KVItem + fmt.Println("retrieving") for i := 0; i < n; i++ { - if (i % 10000) == 0 { - fmt.Printf("Testing i=%d\n", i) - } - k := fmt.Sprintf("%09d", i) - if err := kv.Get([]byte(k), &item); err != nil { + item, err := txnGet(t, kv, data(i)) + if err != nil { t.Error(err) } - require.EqualValues(t, k, string(getItemValue(t, &item))) + require.EqualValues(t, string(data(i)), string(getItemValue(t, &item))) } // Overwrite - for i := n - 1; i >= 0; i -= m { - if (i % 10000) == 0 { - fmt.Printf("Overwriting i=%d\n", i) - } - var entries []*Entry - for j := i; j > i-m && j >= 0; j-- { - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("%09d", j)), + fmt.Println("overwriting") + for i := 0; i < n; i += m { + txn, err := kv.NewTransaction(true) + require.NoError(t, err) + for j := i; j < i+m && j < n; j++ { + require.NoError(t, txn.Set(data(j), // Use a long value that will certainly exceed value threshold. - Value: []byte(fmt.Sprintf("zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz%09d", j)), - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + []byte(fmt.Sprintf("zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz%09d", j)), + 0x00)) } + require.NoError(t, txn.Commit(nil)) } - kv.validate() + require.NoError(t, kv.validate()) + + fmt.Println("testing") for i := 0; i < n; i++ { - if (i % 10000) == 0 { - fmt.Printf("Testing i=%d\n", i) - } k := []byte(fmt.Sprintf("%09d", i)) expectedValue := fmt.Sprintf("zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz%09d", i) - if err := kv.Get([]byte(k), &item); err != nil { + item, err := txnGet(t, kv, []byte(k)) + if err != nil { t.Error(err) } - require.EqualValues(t, expectedValue, string(getItemValue(t, &item))) + got := string(getItemValue(t, &item)) + if expectedValue != got { + k0 := y.KeyWithTs(k, math.MaxUint64) + + vs, err := kv.get(k0) + require.NoError(t, err) + fmt.Printf("wanted=%q Item: %s\n", k, item.ToString()) + fmt.Printf("on re-run, got version: %+v\n", vs) + + txn, err := kv.NewTransaction(false) + require.NoError(t, err) + itr := txn.NewIterator(DefaultIteratorOptions) + for itr.Seek(k0); itr.Valid(); itr.Next() { + item := itr.Item() + fmt.Printf("item=%s\n", item.ToString()) + if !bytes.Equal(item.Key(), k) { + break + } + } + itr.Close() + } + require.EqualValues(t, expectedValue, string(getItemValue(t, &item)), "wanted=%q Item: %s\n", k, item.ToString()) } // "Delete" key. @@ -400,17 +314,12 @@ func TestGetMore(t *testing.T) { if (i % 10000) == 0 { fmt.Printf("Deleting i=%d\n", i) } - var entries []*Entry + txn, err := kv.NewTransaction(true) + require.NoError(t, err) for j := i; j < i+m && j < n; j++ { - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("%09d", j)), - Meta: BitDelete, - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + require.NoError(t, txn.Delete([]byte(fmt.Sprintf("%09d", j)))) } + require.NoError(t, txn.Commit(nil)) } kv.validate() for i := 0; i < n; i++ { @@ -418,11 +327,9 @@ func TestGetMore(t *testing.T) { // Display some progress. Right now, it's not very fast with no caching. fmt.Printf("Testing i=%d\n", i) } - k := fmt.Sprintf("%09d", i) - if err := kv.Get([]byte(k), &item); err != nil { - t.Error(err) - } - require.Nil(t, getItemValue(t, &item)) + k := data(i) + item, err := txnGet(t, kv, []byte(k)) + require.Equal(t, ErrKeyNotFound, err, "wanted=%q item=%s\n", k, item.ToString()) } fmt.Println("Done and closing") } @@ -442,59 +349,44 @@ func TestExistsMore(t *testing.T) { // n := 500000 n := 10000 - m := 100 + m := 49 for i := 0; i < n; i += m { if (i % 1000) == 0 { fmt.Printf("Putting i=%d\n", i) } - var entries []*Entry + txn, err := kv.NewTransaction(true) + require.NoError(t, err) for j := i; j < i+m && j < n; j++ { - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("%09d", j)), - Value: []byte(fmt.Sprintf("%09d", j)), - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + require.NoError(t, txn.Set([]byte(fmt.Sprintf("%09d", j)), + []byte(fmt.Sprintf("%09d", j)), + 0x00)) } + require.NoError(t, txn.Commit(nil)) } kv.validate() - var found bool for i := 0; i < n; i++ { if (i % 1000) == 0 { fmt.Printf("Testing i=%d\n", i) } k := fmt.Sprintf("%09d", i) - found, err = kv.Exists([]byte(k)) - if err != nil { - t.Error(err) - } - require.EqualValues(t, true, found) + _, err = txnGet(t, kv, []byte(k)) + require.NoError(t, err) } - found, err = kv.Exists([]byte("non-exists")) - if err != nil { - t.Error(err) - } - require.EqualValues(t, false, found) + _, err = txnGet(t, kv, []byte("non-exists")) + require.Error(t, err) // "Delete" key. for i := 0; i < n; i += m { if (i % 1000) == 0 { fmt.Printf("Deleting i=%d\n", i) } - var entries []*Entry + txn, err := kv.NewTransaction(true) + require.NoError(t, err) for j := i; j < i+m && j < n; j++ { - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("%09d", j)), - Meta: BitDelete, - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + require.NoError(t, txn.Delete([]byte(fmt.Sprintf("%09d", j)))) } + require.NoError(t, txn.Commit(nil)) } kv.validate() for i := 0; i < n; i++ { @@ -503,11 +395,8 @@ func TestExistsMore(t *testing.T) { fmt.Printf("Testing i=%d\n", i) } k := fmt.Sprintf("%09d", i) - found, err := kv.Exists([]byte(k)) - if err != nil { - t.Error(err) - } - require.False(t, found, fmt.Sprintf("key=%s", k)) + _, err = txnGet(t, kv, []byte(k)) + require.Error(t, err) } fmt.Println("Done and closing") } @@ -532,14 +421,16 @@ func TestIterate2Basic(t *testing.T) { if (i % 1000) == 0 { t.Logf("Put i=%d\n", i) } - kv.Set(bkey(i), bval(i), byte(i%127)) + txnSet(t, kv, bkey(i), bval(i), byte(i%127)) } opt := IteratorOptions{} opt.PrefetchValues = true opt.PrefetchSize = 10 - it := kv.NewIterator(opt) + txn, err := kv.NewTransaction(false) + require.NoError(t, err) + it := txn.NewIterator(opt) { var count int rewind := true @@ -591,22 +482,21 @@ func TestLoad(t *testing.T) { fmt.Printf("Putting i=%d\n", i) } k := []byte(fmt.Sprintf("%09d", i)) - kv.Set(k, k, 0x00) + txnSet(t, kv, k, k, 0x00) } kv.Close() } kv, err := NewKV(getTestOptions(dir)) require.NoError(t, err) - var item KVItem + require.Equal(t, uint64(10001), kv.txnState.readTs()) for i := 0; i < n; i++ { if (i % 10000) == 0 { fmt.Printf("Testing i=%d\n", i) } k := fmt.Sprintf("%09d", i) - if err := kv.Get([]byte(k), &item); err != nil { - t.Error(err) - } + item, err := txnGet(t, kv, []byte(k)) + require.NoError(t, err) require.EqualValues(t, k, string(getItemValue(t, &item))) } kv.Close() @@ -640,35 +530,40 @@ func TestIterateDeleted(t *testing.T) { ps, err := NewKV(&opt) require.NoError(t, err) defer ps.Close() - ps.Set([]byte("Key1"), []byte("Value1"), 0x00) - ps.Set([]byte("Key2"), []byte("Value2"), 0x00) + txnSet(t, ps, []byte("Key1"), []byte("Value1"), 0x00) + txnSet(t, ps, []byte("Key2"), []byte("Value2"), 0x00) iterOpt := DefaultIteratorOptions iterOpt.PrefetchValues = false - idxIt := ps.NewIterator(iterOpt) + txn, err := ps.NewTransaction(false) + require.NoError(t, err) + idxIt := txn.NewIterator(iterOpt) defer idxIt.Close() - wb := make([]*Entry, 0, 100) + count := 0 + txn2, err := ps.NewTransaction(true) + require.NoError(t, err) prefix := []byte("Key") for idxIt.Seek(prefix); idxIt.Valid(); idxIt.Next() { key := idxIt.Item().Key() if !bytes.HasPrefix(key, prefix) { break } - wb = EntriesDelete(wb, key) - } - require.Equal(t, 2, len(wb)) - ps.BatchSet(wb) - - for _, e := range wb { - require.NoError(t, e.Error) + count++ + newKey := make([]byte, len(key)) + copy(newKey, key) + require.NoError(t, txn2.Delete(newKey)) } + require.Equal(t, 2, count) + require.NoError(t, txn2.Commit(nil)) for _, prefetch := range [...]bool{true, false} { t.Run(fmt.Sprintf("Prefetch=%t", prefetch), func(t *testing.T) { + txn, err := ps.NewTransaction(false) + require.NoError(t, err) iterOpt = DefaultIteratorOptions iterOpt.PrefetchValues = prefetch - idxIt = ps.NewIterator(iterOpt) + idxIt = txn.NewIterator(iterOpt) var estSize int64 var idxKeys []string @@ -709,8 +604,8 @@ func TestDeleteWithoutSyncWrite(t *testing.T) { key := []byte("k1") // Set a value with size > value threshold so that its written to value log. - require.NoError(t, kv.Set(key, []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789FOOBARZOGZOG"), 0x00)) - require.NoError(t, kv.Delete(key)) + txnSet(t, kv, key, []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789FOOBARZOGZOG"), 0x00) + txnDelete(t, kv, key) kv.Close() // Reopen KV @@ -721,96 +616,8 @@ func TestDeleteWithoutSyncWrite(t *testing.T) { } defer kv.Close() - item := KVItem{} - require.NoError(t, kv.Get(key, &item)) - require.Equal(t, 0, len(getItemValue(t, &item))) -} - -func TestSetIfAbsent(t *testing.T) { - dir, err := ioutil.TempDir("", "badger") - opt := getTestOptions(dir) - kv, err := NewKV(opt) - require.NoError(t, err) - - key := []byte("k1") - err = kv.SetIfAbsent(key, []byte("val"), 0x00) - require.NoError(t, err) - - err = kv.SetIfAbsent(key, []byte("val2"), 0x00) - require.EqualError(t, err, ErrKeyExists.Error()) -} - -func BenchmarkExists(b *testing.B) { - dir, err := ioutil.TempDir("", "badger") - require.NoError(b, err) - defer os.RemoveAll(dir) - kv, err := NewKV(getTestOptions(dir)) - if err != nil { - b.Error(err) - b.Fail() - } - defer kv.Close() - - n := 50000 - m := 100 - for i := 0; i < n; i += m { - if (i % 10000) == 0 { - fmt.Printf("Putting i=%d\n", i) - } - var entries []*Entry - for j := i; j < i+m && j < n; j++ { - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("%09d", j)), - Value: []byte(fmt.Sprintf("%09d", j)), - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(b, e.Error, "entry with error: %+v", e) - } - } - kv.validate() - - // rand.Seed(int64(time.Now().Nanosecond())) - - b.Run("WithGet", func(b *testing.B) { - b.ResetTimer() - item := &KVItem{} - for i := 0; i < b.N; i++ { - k := fmt.Sprintf("%09d", i%n) - err := kv.Get([]byte(k), item) - if err != nil { - b.Error(err) - } - var val []byte - err = item.Value(func(v []byte) error { - val = make([]byte, len(v)) - copy(val, v) - return nil - }) - if err != nil { - b.Error(err) - } - found := val == nil - _ = found - } - }) - - b.Run("WithExists", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - // k := fmt.Sprintf("%09d", rand.Intn(n)) - k := fmt.Sprintf("%09d", i%n) - // k := fmt.Sprintf("%09d", 0) - found, err := kv.Exists([]byte(k)) - if err != nil { - b.Error(err) - } - _ = found - } - }) - - fmt.Println("Done and closing") + _, err = txnGet(t, kv, key) + require.Error(t, ErrKeyNotFound, err) } func TestPidFile(t *testing.T) { @@ -839,21 +646,18 @@ func TestBigKeyValuePairs(t *testing.T) { bigV := make([]byte, opt.ValueLogFileSize+1) small := make([]byte, 10) - require.Regexp(t, regexp.MustCompile("Key.*exceeded"), kv.Set(bigK, small, 0).Error()) - require.Regexp(t, regexp.MustCompile("Value.*exceeded"), kv.Set(small, bigV, 0).Error()) + txn, err := kv.NewTransaction(true) + require.Regexp(t, regexp.MustCompile("Key.*exceeded"), txn.Set(bigK, small, 0)) + txn, err = kv.NewTransaction(true) + require.Regexp(t, regexp.MustCompile("Value.*exceeded"), txn.Set(small, bigV, 0)) - e1 := Entry{Key: small, Value: small} - e2 := Entry{Key: bigK, Value: bigV} - err = kv.BatchSet([]*Entry{&e1, &e2}) - require.Nil(t, err) - require.Nil(t, e1.Error) - require.Regexp(t, regexp.MustCompile("Key.*exceeded"), e2.Error.Error()) + txn, err = kv.NewTransaction(true) + require.NoError(t, err) + require.NoError(t, txn.Set(small, small, 0x00)) + require.Regexp(t, regexp.MustCompile("Key.*exceeded"), txn.Set(bigK, bigV, 0x00)) - // make sure e1 was actually set: - var item KVItem - require.NoError(t, kv.Get(small, &item)) - require.Equal(t, item.Key(), small) - require.Equal(t, getItemValue(t, &item), small) + _, err = txnGet(t, kv, small) + require.Equal(t, ErrKeyNotFound, err) require.NoError(t, kv.Close()) } @@ -877,7 +681,7 @@ func TestIteratorPrefetchSize(t *testing.T) { if (i % 10) == 0 { t.Logf("Put i=%d\n", i) } - kv.Set(bkey(i), bval(i), byte(i%127)) + txnSet(t, kv, bkey(i), bval(i), byte(i%127)) } getIteratorCount := func(prefetchSize int) int { @@ -886,7 +690,9 @@ func TestIteratorPrefetchSize(t *testing.T) { opt.PrefetchSize = prefetchSize var count int - it := kv.NewIterator(opt) + txn, err := kv.NewTransaction(false) + require.NoError(t, err) + it := txn.NewIterator(opt) { t.Log("Starting first basic iteration") for it.Rewind(); it.Valid(); it.Next() { @@ -921,7 +727,12 @@ func TestSetIfAbsentAsync(t *testing.T) { if (i % 10) == 0 { t.Logf("Put i=%d\n", i) } - kv.SetIfAbsentAsync(bkey(i), nil, byte(i%127), f) + txn, err := kv.NewTransaction(true) + require.NoError(t, err) + _, err = txn.Get(bkey(i)) + require.Equal(t, ErrKeyNotFound, err) + require.NoError(t, txn.Set(bkey(i), nil, byte(i%127))) + require.NoError(t, txn.Commit(f)) } require.NoError(t, kv.Close()) @@ -929,8 +740,10 @@ func TestSetIfAbsentAsync(t *testing.T) { require.NoError(t, err) opt := DefaultIteratorOptions + txn, err := kv.NewTransaction(false) + require.NoError(t, err) var count int - it := kv.NewIterator(opt) + it := txn.NewIterator(opt) { t.Log("Starting first basic iteration") for it.Rewind(); it.Valid(); it.Next() { @@ -968,8 +781,7 @@ func TestGetSetRace(t *testing.T) { for i := 0; i < numOp; i++ { key := fmt.Sprintf("%d", i) - err = kv.Set([]byte(key), data, 0x00) - require.NoError(t, err) + txnSet(t, kv, []byte(key), data, 0x00) keyCh <- key } }() @@ -980,9 +792,7 @@ func TestGetSetRace(t *testing.T) { defer wg.Done() for key := range keyCh { - var item KVItem - - err := kv.Get([]byte(key), &item) + item, err := txnGet(t, kv, []byte(key)) require.NoError(t, err) var val []byte diff --git a/level_handler.go b/level_handler.go index 2cb41c78d..44281ec28 100644 --- a/level_handler.go +++ b/level_handler.go @@ -240,12 +240,12 @@ func (s *levelHandler) get(key []byte) (y.ValueStruct, error) { tables, decr := s.getTableForKey(key) for _, th := range tables { - if th.DoesNotHave(key) { - // TODO: Only check the prefix, not suffix in blooms. - // TODO: This is important. - y.NumLSMBloomHits.Add(s.strLevel, 1) - continue - } + // if th.DoesNotHave(key) { + // // TODO: Only check the prefix, not suffix in blooms. + // // TODO: This is important. + // y.NumLSMBloomHits.Add(s.strLevel, 1) + // continue + // } it := th.NewIterator(false) defer it.Close() diff --git a/levels.go b/levels.go index a911da057..74d0e2306 100644 --- a/levels.go +++ b/levels.go @@ -663,7 +663,9 @@ func (s *levelsController) get(key []byte) (y.ValueStruct, error) { // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do // parallelize this, we will need to call the h.RLock() function by increasing order of level // number.) - for _, h := range s.levels { + + var maxVs y.ValueStruct + for l, h := range s.levels { vs, err := h.get(key) // Calls h.RLock() and h.RUnlock(). if err != nil { return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key) @@ -671,9 +673,14 @@ func (s *levelsController) get(key []byte) (y.ValueStruct, error) { if vs.Value == nil && vs.Meta == 0 { continue } - return vs, nil + if l == 0 { + return vs, nil + } + if maxVs.Version < vs.Version { + maxVs = vs + } } - return y.ValueStruct{}, nil + return maxVs, nil } func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator { diff --git a/manifest_test.go b/manifest_test.go index 95a7b6c35..5a3b6f57f 100644 --- a/manifest_test.go +++ b/manifest_test.go @@ -49,9 +49,9 @@ func TestManifestBasic(t *testing.T) { fmt.Printf("Putting i=%d\n", i) } k := []byte(fmt.Sprintf("%16x", rand.Int63())) - kv.Set(k, k, 0x00) + txnSet(t, kv, k, k, 0x00) } - kv.Set([]byte("testkey"), []byte("testval"), 0x05) + txnSet(t, kv, []byte("testkey"), []byte("testval"), 0x05) kv.validate() require.NoError(t, kv.Close()) } @@ -59,10 +59,8 @@ func TestManifestBasic(t *testing.T) { kv, err := NewKV(opt) require.NoError(t, err) - var item KVItem - if err := kv.Get([]byte("testkey"), &item); err != nil { - t.Error(err) - } + item, err := txnGet(t, kv, []byte("testkey")) + require.NoError(t, err) require.EqualValues(t, "testval", string(getItemValue(t, &item))) require.EqualValues(t, byte(0x05), item.UserMeta()) require.NoError(t, kv.Close()) @@ -136,13 +134,12 @@ func buildTable(t *testing.T, keyValues [][]string) *os.File { sort.Slice(keyValues, func(i, j int) bool { return keyValues[i][0] < keyValues[j][0] }) - for i, kv := range keyValues { + for _, kv := range keyValues { y.AssertTrue(len(kv) == 2) err := b.Add([]byte(kv[0]), y.ValueStruct{ - Value: []byte(kv[1]), - Meta: 'A', - UserMeta: 0, - CASCounter: uint64(i), + Value: []byte(kv[1]), + Meta: 'A', + UserMeta: 0, }) if t != nil { require.NoError(t, err) diff --git a/skl/skl.go b/skl/skl.go index 5826c3101..784b67eb5 100644 --- a/skl/skl.go +++ b/skl/skl.go @@ -109,7 +109,7 @@ func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node { node.keyOffset = arena.putKey(key) node.keySize = uint16(len(key)) node.height = uint16(height) - node.value = encodeValue(arena.putVal(v), uint16(v.EncodedSize())) + node.value = encodeValue(arena.putVal(v), v.EncodedSize()) return node } @@ -146,7 +146,7 @@ func (s *node) key(arena *Arena) []byte { func (s *node) setValue(arena *Arena, v y.ValueStruct) { valOffset := arena.putVal(v) - value := encodeValue(valOffset, uint16(len(v.Value))) + value := encodeValue(valOffset, v.EncodedSize()) atomic.StoreUint64(&s.value, value) } @@ -378,11 +378,13 @@ func (s *Skiplist) Get(key []byte) y.ValueStruct { if n == nil { return y.ValueStruct{} } - valOffset, valSize := n.getValueOffset() + nextKey := s.arena.getKey(n.keyOffset, n.keySize) if !y.SameKey(key, nextKey) { return y.ValueStruct{} } + + valOffset, valSize := n.getValueOffset() vs := s.arena.getVal(valOffset, valSize) vs.Version = y.ParseTs(nextKey) return vs diff --git a/skl/skl_test.go b/skl/skl_test.go index 08638c2e1..e949d98f9 100644 --- a/skl/skl_test.go +++ b/skl/skl_test.go @@ -92,37 +92,31 @@ func TestBasic(t *testing.T) { // Try inserting values. // Somehow require.Nil doesn't work when checking for unsafe.Pointer(nil). - l.Put([]byte("key1"), y.MakeValueStruct(val1, 55, 0, 60000)) - l.Put([]byte("key3"), y.MakeValueStruct(val3, 56, 0, 60001)) - l.Put([]byte("key2"), y.MakeValueStruct(val2, 57, 0, 60002)) + l.Put(y.KeyWithTs([]byte("key1"), 0), y.ValueStruct{Value: val1, Meta: 55, UserMeta: 0}) + l.Put(y.KeyWithTs([]byte("key2"), 2), y.ValueStruct{Value: val2, Meta: 56, UserMeta: 0}) + l.Put(y.KeyWithTs([]byte("key3"), 0), y.ValueStruct{Value: val3, Meta: 57, UserMeta: 0}) - v := l.Get([]byte("key")) + v := l.Get(y.KeyWithTs([]byte("key"), 0)) require.True(t, v.Value == nil) - v = l.Get([]byte("key1")) + v = l.Get(y.KeyWithTs([]byte("key1"), 0)) require.True(t, v.Value != nil) require.EqualValues(t, "00042", string(v.Value)) require.EqualValues(t, 55, v.Meta) - require.EqualValues(t, 60000, v.CASCounter) - v = l.Get([]byte("key2")) - require.True(t, v.Value != nil) - require.EqualValues(t, "00052", string(v.Value)) - require.EqualValues(t, 57, v.Meta) - require.EqualValues(t, 60002, v.CASCounter) + v = l.Get(y.KeyWithTs([]byte("key2"), 0)) + require.True(t, v.Value == nil) - v = l.Get([]byte("key3")) + v = l.Get(y.KeyWithTs([]byte("key3"), 0)) require.True(t, v.Value != nil) require.EqualValues(t, "00062", string(v.Value)) - require.EqualValues(t, 56, v.Meta) - require.EqualValues(t, 60001, v.CASCounter) + require.EqualValues(t, 57, v.Meta) - l.Put([]byte("key2"), y.MakeValueStruct(val4, 12, 0, 50000)) - v = l.Get([]byte("key2")) + l.Put(y.KeyWithTs([]byte("key3"), 1), y.ValueStruct{Value: val4, Meta: 12, UserMeta: 0}) + v = l.Get(y.KeyWithTs([]byte("key3"), 1)) require.True(t, v.Value != nil) require.EqualValues(t, "00072", string(v.Value)) require.EqualValues(t, 12, v.Meta) - require.EqualValues(t, 50000, v.CASCounter) } // TestConcurrentBasic tests concurrent writes followed by concurrent reads. @@ -130,12 +124,15 @@ func TestConcurrentBasic(t *testing.T) { const n = 1000 l := NewSkiplist(arenaSize) var wg sync.WaitGroup + key := func(i int) []byte { + return y.KeyWithTs([]byte(fmt.Sprintf("%05d", i)), 0) + } for i := 0; i < n; i++ { wg.Add(1) go func(i int) { defer wg.Done() - l.Put([]byte(fmt.Sprintf("%05d", i)), - y.MakeValueStruct(newValue(i), 0, 0, uint64(i))) + l.Put(key(i), + y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) }(i) } wg.Wait() @@ -144,10 +141,9 @@ func TestConcurrentBasic(t *testing.T) { wg.Add(1) go func(i int) { defer wg.Done() - v := l.Get([]byte(fmt.Sprintf("%05d", i))) + v := l.Get(key(i)) require.True(t, v.Value != nil) require.EqualValues(t, newValue(i), v.Value) - require.EqualValues(t, i, v.CASCounter) }(i) } wg.Wait() @@ -157,7 +153,7 @@ func TestConcurrentBasic(t *testing.T) { // TestOneKey will read while writing to one single key. func TestOneKey(t *testing.T) { const n = 100 - key := []byte("thekey") + key := y.KeyWithTs([]byte("thekey"), 0) l := NewSkiplist(arenaSize) defer l.DecrRef() @@ -166,7 +162,7 @@ func TestOneKey(t *testing.T) { wg.Add(1) go func(i int) { defer wg.Done() - l.Put(key, y.MakeValueStruct(newValue(i), 0, 0, uint64(i))) + l.Put(key, y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) }(i) } // We expect that at least some write made it such that some read returns a value. @@ -183,7 +179,6 @@ func TestOneKey(t *testing.T) { v, err := strconv.Atoi(string(p.Value)) require.NoError(t, err) require.True(t, 0 <= v && v < n) - require.EqualValues(t, v, p.CASCounter) }() } wg.Wait() @@ -196,7 +191,7 @@ func TestFindNear(t *testing.T) { defer l.DecrRef() for i := 0; i < 1000; i++ { key := fmt.Sprintf("%05d", i*10+5) - l.Put([]byte(key), y.MakeValueStruct(newValue(i), 0, 0, uint64(i))) + l.Put([]byte(key), y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) } n, eq := l.findNear([]byte("00001"), false, false) @@ -308,7 +303,7 @@ func TestIteratorNext(t *testing.T) { require.False(t, it.Valid()) for i := n - 1; i >= 0; i-- { l.Put([]byte(fmt.Sprintf("%05d", i)), - y.MakeValueStruct(newValue(i), 0, 0, uint64(i))) + y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) } it.SeekToFirst() for i := 0; i < n; i++ { @@ -332,14 +327,13 @@ func TestIteratorPrev(t *testing.T) { require.False(t, it.Valid()) for i := 0; i < n; i++ { l.Put([]byte(fmt.Sprintf("%05d", i)), - y.MakeValueStruct(newValue(i), 0, 0, uint64(i))) + y.ValueStruct{Value: newValue(i), Meta: 0, UserMeta: 0}) } it.SeekToLast() for i := n - 1; i >= 0; i-- { require.True(t, it.Valid()) v := it.Value() require.EqualValues(t, newValue(i), v.Value) - require.EqualValues(t, i, v.CASCounter) it.Prev() } require.False(t, it.Valid()) @@ -360,7 +354,7 @@ func TestIteratorSeek(t *testing.T) { // 1000, 1010, 1020, ..., 1990. for i := n - 1; i >= 0; i-- { v := i*10 + 1000 - l.Put([]byte(fmt.Sprintf("%05d", i*10+1000)), y.MakeValueStruct(newValue(v), 0, 0, 555)) + l.Put([]byte(fmt.Sprintf("%05d", i*10+1000)), y.ValueStruct{Value: newValue(v), Meta: 0, UserMeta: 0}) } it.Seek([]byte("")) require.True(t, it.Valid()) @@ -439,7 +433,7 @@ func BenchmarkReadWrite(b *testing.B) { count++ } } else { - l.Put(randomKey(rng), y.MakeValueStruct(value, 0, 0, 0)) + l.Put(randomKey(rng), y.ValueStruct{Value: value, Meta: 0, UserMeta: 0}) } } }) diff --git a/table/table_test.go b/table/table_test.go index f60fdc9b7..80c355d85 100644 --- a/table/table_test.go +++ b/table/table_test.go @@ -60,9 +60,9 @@ func buildTable(t *testing.T, keyValues [][]string) *os.File { sort.Slice(keyValues, func(i, j int) bool { return keyValues[i][0] < keyValues[j][0] }) - for i, kv := range keyValues { + for _, kv := range keyValues { y.AssertTrue(len(kv) == 2) - err := b.Add([]byte(kv[0]), y.MakeValueStruct([]byte(kv[1]), 'A', 0, uint64(i))) + err := b.Add([]byte(kv[0]), y.ValueStruct{Value: []byte(kv[1]), Meta: 'A', UserMeta: 0}) if t != nil { require.NoError(t, err) } else { @@ -89,7 +89,6 @@ func TestSeekToFirst(t *testing.T) { v := it.Value() require.EqualValues(t, "0", string(v.Value)) require.EqualValues(t, 'A', v.Meta) - require.EqualValues(t, 0, v.CASCounter) }) } } @@ -108,13 +107,11 @@ func TestSeekToLast(t *testing.T) { v := it.Value() require.EqualValues(t, fmt.Sprintf("%d", n-1), string(v.Value)) require.EqualValues(t, 'A', v.Meta) - require.EqualValues(t, n-1, v.CASCounter) it.prev() require.True(t, it.Valid()) v = it.Value() require.EqualValues(t, fmt.Sprintf("%d", n-2), string(v.Value)) require.EqualValues(t, 'A', v.Meta) - require.EqualValues(t, n-2, v.CASCounter) }) } } @@ -209,7 +206,6 @@ func TestIterateFromStart(t *testing.T) { v := ti.Value() require.EqualValues(t, fmt.Sprintf("%d", count), string(v.Value)) require.EqualValues(t, 'A', v.Meta) - require.EqualValues(t, count, v.CASCounter) count++ } require.EqualValues(t, n, count) @@ -236,7 +232,6 @@ func TestIterateFromEnd(t *testing.T) { v := ti.Value() require.EqualValues(t, fmt.Sprintf("%d", i), string(v.Value)) require.EqualValues(t, 'A', v.Meta) - require.EqualValues(t, i, v.CASCounter) } ti.prev() require.False(t, ti.Valid()) @@ -325,7 +320,6 @@ func TestUniIterator(t *testing.T) { v := it.Value() require.EqualValues(t, fmt.Sprintf("%d", count), string(v.Value)) require.EqualValues(t, 'A', v.Meta) - require.EqualValues(t, count, v.CASCounter) count++ } require.EqualValues(t, 10000, count) @@ -338,7 +332,6 @@ func TestUniIterator(t *testing.T) { v := it.Value() require.EqualValues(t, fmt.Sprintf("%d", 10000-1-count), string(v.Value)) require.EqualValues(t, 'A', v.Meta) - require.EqualValues(t, 10000-1-count, v.CASCounter) count++ } require.EqualValues(t, 10000, count) @@ -617,7 +610,7 @@ func BenchmarkRead(b *testing.B) { for i := 0; i < n; i++ { k := fmt.Sprintf("%016x", i) v := fmt.Sprintf("%d", i) - y.Check(builder.Add([]byte(k), y.MakeValueStruct([]byte(v), 123, 0, 5555))) + y.Check(builder.Add([]byte(k), y.ValueStruct{Value: []byte(v), Meta: 123, UserMeta: 0})) } f.Write(builder.Finish()) @@ -647,7 +640,7 @@ func BenchmarkReadAndBuild(b *testing.B) { for i := 0; i < n; i++ { k := fmt.Sprintf("%016x", i) v := fmt.Sprintf("%d", i) - y.Check(builder.Add([]byte(k), y.MakeValueStruct([]byte(v), 123, 0, 5555))) + y.Check(builder.Add([]byte(k), y.ValueStruct{Value: []byte(v), Meta: 123, UserMeta: 0})) } f.Write(builder.Finish()) @@ -687,7 +680,7 @@ func BenchmarkReadMerged(b *testing.B) { // id := i*tableSize+j (not interleaved) k := fmt.Sprintf("%016x", id) v := fmt.Sprintf("%d", id) - y.Check(builder.Add([]byte(k), y.MakeValueStruct([]byte(v), 123, 0, 5555))) + y.Check(builder.Add([]byte(k), y.ValueStruct{Value: []byte(v), Meta: 123, UserMeta: 0})) } f.Write(builder.Finish()) tbl, err := OpenTable(f, options.MemoryMap) diff --git a/transaction.go b/transaction.go index c4e56ad3c..381ad3d91 100644 --- a/transaction.go +++ b/transaction.go @@ -204,6 +204,7 @@ func (txn *Txn) Get(key []byte) (item KVItem, rerr error) { item.userMeta = e.UserMeta item.key = key item.status = prefetched + item.version = txn.readTs // We probably don't need to set KV on item here. return item, nil } @@ -225,9 +226,10 @@ func (txn *Txn) Get(key []byte) (item KVItem, rerr error) { return item, ErrKeyNotFound } + item.key = key + item.version = vs.Version item.meta = vs.Meta item.userMeta = vs.UserMeta - item.key = key item.kv = txn.kv item.vptr = vs.Value return item, nil @@ -263,9 +265,8 @@ func (txn *Txn) Commit(callback func(error)) error { e.Meta |= BitTxn entries = append(entries, e) } - // TODO: Add logic in replay to deal with this. entry := &Entry{ - Key: txnKey, + Key: y.KeyWithTs(txnKey, commitTs), Value: []byte(strconv.FormatUint(commitTs, 10)), Meta: BitFinTxn, } diff --git a/transaction_test.go b/transaction_test.go index 9049ab001..ac5918c8e 100644 --- a/transaction_test.go +++ b/transaction_test.go @@ -50,7 +50,7 @@ func TestTxnSimple(t *testing.T) { return nil } require.NoError(t, item.Value(fn)) - require.NoError(t, txn.Commit()) + require.NoError(t, txn.Commit(nil)) } func TestTxnVersions(t *testing.T) { @@ -67,7 +67,7 @@ func TestTxnVersions(t *testing.T) { require.NoError(t, err) txn.Set(k, []byte(fmt.Sprintf("valversion=%d", i)), 0) - require.NoError(t, txn.Commit()) + require.NoError(t, txn.Commit(nil)) require.Equal(t, uint64(i), kv.txnState.readTs()) } @@ -143,7 +143,7 @@ func TestTxnWriteSkew(t *testing.T) { val := []byte(strconv.Itoa(100)) txn.Set(ax, val, 0) txn.Set(ay, val, 0) - require.NoError(t, txn.Commit()) + require.NoError(t, txn.Commit(nil)) require.Equal(t, uint64(1), kv.txnState.readTs()) getBal := func(txn *Txn, key []byte) (bal int) { @@ -190,8 +190,8 @@ func TestTxnWriteSkew(t *testing.T) { require.Equal(t, 100, sum) // Commit both now. - require.NoError(t, txn1.Commit()) - require.Error(t, txn2.Commit()) // This should fail. + require.NoError(t, txn1.Commit(nil)) + require.Error(t, txn2.Commit(nil)) // This should fail. require.Equal(t, uint64(2), kv.txnState.readTs()) } @@ -217,7 +217,7 @@ func TestTxnIterationEdgeCase(t *testing.T) { txn, err := kv.NewTransaction(true) require.NoError(t, err) txn.Set(kc, []byte("c1"), 0) - require.NoError(t, txn.Commit()) + require.NoError(t, txn.Commit(nil)) require.Equal(t, uint64(1), kv.txnState.readTs()) // a2, c2 @@ -225,7 +225,7 @@ func TestTxnIterationEdgeCase(t *testing.T) { require.NoError(t, err) txn.Set(ka, []byte("a2"), 0) txn.Set(kc, []byte("c2"), 0) - require.NoError(t, txn.Commit()) + require.NoError(t, txn.Commit(nil)) require.Equal(t, uint64(2), kv.txnState.readTs()) // b3 @@ -233,14 +233,14 @@ func TestTxnIterationEdgeCase(t *testing.T) { require.NoError(t, err) txn.Set(ka, []byte("a3"), 0) txn.Set(kb, []byte("b3"), 0) - require.NoError(t, txn.Commit()) + require.NoError(t, txn.Commit(nil)) require.Equal(t, uint64(3), kv.txnState.readTs()) // b4 (del) txn, err = kv.NewTransaction(true) require.NoError(t, err) txn.Delete(kb) - require.NoError(t, txn.Commit()) + require.NoError(t, txn.Commit(nil)) require.Equal(t, uint64(4), kv.txnState.readTs()) checkIterator := func(itr *Iterator, expected []string) { diff --git a/util.go b/util.go index fd67103a3..bc5187e89 100644 --- a/util.go +++ b/util.go @@ -51,7 +51,7 @@ func (s *levelHandler) getSummary(sum *summary) { } } -func (s *KV) validate() { s.lc.validate() } +func (s *KV) validate() error { return s.lc.validate() } func (s *levelsController) validate() error { for _, l := range s.levels { diff --git a/value.go b/value.go index 817e2a4e0..ab83dc957 100644 --- a/value.go +++ b/value.go @@ -714,7 +714,7 @@ func (vlog *valueLog) write(reqs []*request) error { e := b.Entries[j] var p valuePointer - if !vlog.opt.SyncWrites && len(e.Value) < vlog.opt.ValueThreshold { + if !vlog.opt.SyncWrites && len(e.Value) < vlog.opt.ValueThreshold && e.Meta&BitFinTxn == 0 { // No need to write to value log. b.Ptrs = append(b.Ptrs, p) continue diff --git a/value_test.go b/value_test.go index f54c153e6..2385d4697 100644 --- a/value_test.go +++ b/value_test.go @@ -44,18 +44,14 @@ func TestValueBasic(t *testing.T) { require.True(t, len(val1) >= kv.opt.ValueThreshold) entry := &Entry{ - Key: []byte("samplekey"), - Value: []byte(val1), - Meta: BitValuePointer, - CASCounterCheck: 22222, - casCounter: 33333, + Key: []byte("samplekey"), + Value: []byte(val1), + Meta: BitValuePointer, } entry2 := &Entry{ - Key: []byte("samplekeyb"), - Value: []byte(val2), - Meta: BitValuePointer, - CASCounterCheck: 22225, - casCounter: 33335, + Key: []byte("samplekeyb"), + Value: []byte(val2), + Meta: BitValuePointer, } b := new(request) @@ -81,18 +77,14 @@ func TestValueBasic(t *testing.T) { readEntries := []Entry{valueBytesToEntry(buf1), valueBytesToEntry(buf2)} require.EqualValues(t, []Entry{ { - Key: []byte("samplekey"), - Value: []byte(val1), - Meta: BitValuePointer, - CASCounterCheck: 22222, - casCounter: 33333, + Key: []byte("samplekey"), + Value: []byte(val1), + Meta: BitValuePointer, }, { - Key: []byte("samplekeyb"), - Value: []byte(val2), - Meta: BitValuePointer, - CASCounterCheck: 22225, - casCounter: 33335, + Key: []byte("samplekeyb"), + Value: []byte(val2), + Meta: BitValuePointer, }, }, readEntries) } @@ -108,22 +100,22 @@ func TestValueGC(t *testing.T) { defer kv.Close() sz := 32 << 10 - var entries []*Entry + txn, err := kv.NewTransaction(true) + require.NoError(t, err) for i := 0; i < 100; i++ { v := make([]byte, sz) rand.Read(v[:rand.Intn(sz)]) - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("key%d", i)), - Value: v, - }) - } - kv.batchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%d", i)), v, 0)) + if i%20 == 0 { + require.NoError(t, txn.Commit(nil)) + txn, err = kv.NewTransaction(true) + require.NoError(t, err) + } } + require.NoError(t, txn.Commit(nil)) for i := 0; i < 45; i++ { - kv.Delete([]byte(fmt.Sprintf("key%d", i))) + txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i))) } kv.vlog.filesLock.RLock() @@ -136,12 +128,10 @@ func TestValueGC(t *testing.T) { // }) kv.vlog.rewrite(lf) - var item KVItem for i := 45; i < 100; i++ { key := []byte(fmt.Sprintf("key%d", i)) - if err := kv.Get(key, &item); err != nil { - t.Error(err) - } + item, err := txnGet(t, kv, key) + require.NoError(t, err) val := getItemValue(t, &item) require.NotNil(t, val) require.True(t, len(val) == sz, "Size found: %d", len(val)) @@ -159,40 +149,27 @@ func TestValueGC2(t *testing.T) { defer kv.Close() sz := 32 << 10 - var entries []*Entry + txn, err := kv.NewTransaction(true) + require.NoError(t, err) for i := 0; i < 100; i++ { v := make([]byte, sz) rand.Read(v[:rand.Intn(sz)]) - entry := &Entry{ - Key: []byte(fmt.Sprintf("key%d", i)), - Value: v, - } - entries = append(entries, entry) - // We don't overwrite these values later in the test - if i == 10 || i == 11 { - entry.Meta = BitSetIfAbsent + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%d", i)), v, 0)) + if i%20 == 0 { + require.NoError(t, txn.Commit(nil)) + txn, err = kv.NewTransaction(true) + require.NoError(t, err) } } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) - } + require.NoError(t, txn.Commit(nil)) for i := 0; i < 5; i++ { - kv.Delete([]byte(fmt.Sprintf("key%d", i))) + txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i))) } - entries = entries[:0] for i := 5; i < 10; i++ { v := []byte(fmt.Sprintf("value%d", i)) - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("key%d", i)), - Value: v, - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + txnSet(t, kv, []byte(fmt.Sprintf("key%d", i)), v, 0) } kv.vlog.filesLock.RLock() @@ -205,29 +182,23 @@ func TestValueGC2(t *testing.T) { // }) kv.vlog.rewrite(lf) - var item KVItem for i := 0; i < 5; i++ { key := []byte(fmt.Sprintf("key%d", i)) - if err := kv.Get(key, &item); err != nil { - t.Error(err) - } - val := getItemValue(t, &item) - require.True(t, len(val) == 0, "Size found: %d", len(val)) + _, err := txnGet(t, kv, key) + require.Error(t, ErrKeyNotFound, err) } for i := 5; i < 10; i++ { key := []byte(fmt.Sprintf("key%d", i)) - if err := kv.Get(key, &item); err != nil { - t.Error(err) - } + item, err := txnGet(t, kv, key) + require.NoError(t, err) val := getItemValue(t, &item) require.NotNil(t, val) require.Equal(t, string(val), fmt.Sprintf("value%d", i)) } for i := 10; i < 100; i++ { key := []byte(fmt.Sprintf("key%d", i)) - if err := kv.Get(key, &item); err != nil { - t.Error(err) - } + item, err := txnGet(t, kv, key) + require.NoError(t, err) val := getItemValue(t, &item) require.NotNil(t, val) require.True(t, len(val) == sz, "Size found: %d", len(val)) @@ -250,7 +221,8 @@ func TestValueGC3(t *testing.T) { valueSize := 32 << 10 var value3 []byte - var entries []*Entry + txn, err := kv.NewTransaction(true) + require.NoError(t, err) for i := 0; i < 100; i++ { v := make([]byte, valueSize) // 32K * 100 will take >=3'276'800 B. if i == 3 { @@ -258,17 +230,14 @@ func TestValueGC3(t *testing.T) { } rand.Read(v[:]) // Keys key000, key001, key002, such that sorted order matches insertion order - entry := &Entry{ - Key: []byte(fmt.Sprintf("key%03d", i)), - Value: v, + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%03d", i)), v, 0)) + if i%20 == 0 { + require.NoError(t, txn.Commit(nil)) + txn, err = kv.NewTransaction(true) + require.NoError(t, err) } - entries = append(entries, entry) - } - err = kv.BatchSet(entries) - require.NoError(t, err) - for _, e := range entries { - require.NoError(t, e.Error) } + require.NoError(t, txn.Commit(nil)) // Start an iterator to keys in the first value log file itOpt := IteratorOptions{ @@ -277,7 +246,9 @@ func TestValueGC3(t *testing.T) { Reverse: false, } - it := kv.NewIterator(itOpt) + txn, err = kv.NewTransaction(true) + require.NoError(t, err) + it := txn.NewIterator(itOpt) defer it.Close() // Walk a few keys it.Rewind() @@ -322,23 +293,24 @@ func TestChecksums(t *testing.T) { opts.ValueLogFileSize = 100 * 1024 * 1024 // 100Mb kv, err := NewKV(opts) require.NoError(t, err) + require.NoError(t, kv.Close()) var ( + k0 = []byte("k0") k1 = []byte("k1") k2 = []byte("k2") k3 = []byte("k3") + v0 = []byte("value0-012345678901234567890123") v1 = []byte("value1-012345678901234567890123") v2 = []byte("value2-012345678901234567890123") v3 = []byte("value3-012345678901234567890123") ) // Make sure the value log would actually store the item - require.True(t, len(v3) >= kv.opt.ValueThreshold) - - require.NoError(t, kv.Set(k1, v1, 0)) - require.NoError(t, kv.Close()) + require.True(t, len(v0) >= kv.opt.ValueThreshold) - // Use a vlog with K1=V1 and a (corrupted) K2=V2 + // Use a vlog with K0=V0 and a (corrupted) second transaction(k1,k2) buf := createVlog(t, []*Entry{ + {Key: k0, Value: v0}, {Key: k1, Value: v1}, {Key: k2, Value: v2}, }) @@ -348,26 +320,29 @@ func TestChecksums(t *testing.T) { // K1 should exist, but K2 shouldn't. kv, err = NewKV(opts) require.NoError(t, err) - var item KVItem - require.NoError(t, kv.Get(k1, &item)) - require.Equal(t, getItemValue(t, &item), v1) - ok, err := kv.Exists(k2) + item, err := txnGet(t, kv, k0) require.NoError(t, err) - require.False(t, ok) + require.Equal(t, getItemValue(t, &item), v0) + _, err = txnGet(t, kv, k1) + require.Error(t, ErrKeyNotFound, err) + _, err = txnGet(t, kv, k2) + require.Error(t, ErrKeyNotFound, err) // Write K3 at the end of the vlog. - require.NoError(t, kv.Set(k3, v3, 0)) + txnSet(t, kv, k3, v3, 0) require.NoError(t, kv.Close()) - // The vlog should contain K1 and K3 (K2 was lost when Badger started up + // The vlog should contain K0 and K3 (K1 and k2 was lost when Badger started up // last due to checksum failure). kv, err = NewKV(opts) require.NoError(t, err) - iter := kv.NewIterator(DefaultIteratorOptions) - iter.Seek(k1) + txn, err := kv.NewTransaction(false) + require.NoError(t, err) + iter := txn.NewIterator(DefaultIteratorOptions) + iter.Seek(k0) require.True(t, iter.Valid()) it := iter.Item() - require.Equal(t, it.Key(), k1) - require.Equal(t, getItemValue(t, it), v1) + require.Equal(t, it.Key(), k0) + require.Equal(t, getItemValue(t, it), v0) iter.Next() require.True(t, iter.Valid()) it = iter.Item() @@ -390,9 +365,11 @@ func TestPartialAppendToValueLog(t *testing.T) { require.NoError(t, kv.Close()) var ( + k0 = []byte("k0") k1 = []byte("k1") k2 = []byte("k2") k3 = []byte("k3") + v0 = []byte("value0-012345678901234567890123") v1 = []byte("value1-012345678901234567890123") v2 = []byte("value2-012345678901234567890123") v3 = []byte("value3-012345678901234567890123") @@ -401,30 +378,32 @@ func TestPartialAppendToValueLog(t *testing.T) { require.True(t, len(v3) >= kv.opt.ValueThreshold) // Create truncated vlog to simulate a partial append. + // k0 - single transaction, k1 and k2 in another transaction buf := createVlog(t, []*Entry{ + {Key: k0, Value: v0}, {Key: k1, Value: v1}, {Key: k2, Value: v2}, }) buf = buf[:len(buf)-6] require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 0), buf, 0777)) - // Badger should now start up, but with only K1. + // Badger should now start up kv, err = NewKV(opts) require.NoError(t, err) - var item KVItem - require.NoError(t, kv.Get(k1, &item)) - ok, err := kv.Exists(k2) + item, err := txnGet(t, kv, k0) require.NoError(t, err) - require.False(t, ok) - require.Equal(t, item.Key(), k1) - require.Equal(t, getItemValue(t, &item), v1) + require.Equal(t, v0, getItemValue(t, &item)) + _, err = txnGet(t, kv, k1) + require.Error(t, ErrKeyNotFound, err) + _, err = txnGet(t, kv, k2) + require.Error(t, ErrKeyNotFound, err) // When K3 is set, it should be persisted after a restart. - require.NoError(t, kv.Set(k3, v3, 0)) + txnSet(t, kv, k3, v3, 0) require.NoError(t, kv.Close()) kv, err = NewKV(getTestOptions(dir)) require.NoError(t, err) - checkKeys(t, kv, [][]byte{k1, k3}) + checkKeys(t, kv, [][]byte{k3}) require.NoError(t, kv.Close()) } @@ -440,22 +419,22 @@ func TestValueLogTrigger(t *testing.T) { // Write a lot of data, so it creates some work for valug log GC. sz := 32 << 10 - var entries []*Entry + txn, err := kv.NewTransaction(true) + require.NoError(t, err) for i := 0; i < 100; i++ { v := make([]byte, sz) rand.Read(v[:rand.Intn(sz)]) - entries = append(entries, &Entry{ - Key: []byte(fmt.Sprintf("key%d", i)), - Value: v, - }) - } - kv.BatchSet(entries) - for _, e := range entries { - require.NoError(t, e.Error, "entry with error: %+v", e) + require.NoError(t, txn.Set([]byte(fmt.Sprintf("key%d", i)), v, 0)) + if i%20 == 0 { + require.NoError(t, txn.Commit(nil)) + txn, err = kv.NewTransaction(true) + require.NoError(t, err) + } } + require.NoError(t, txn.Commit(nil)) for i := 0; i < 45; i++ { - kv.Delete([]byte(fmt.Sprintf("key%d", i))) + txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i))) } // Now attempt to run 5 value log GCs simultaneously. @@ -486,7 +465,13 @@ func createVlog(t *testing.T, entries []*Entry) []byte { opts.ValueLogFileSize = 100 * 1024 * 1024 // 100Mb kv, err := NewKV(opts) require.NoError(t, err) - require.NoError(t, kv.BatchSet(entries)) + txnSet(t, kv, entries[0].Key, entries[0].Value, entries[0].Meta) + entries = entries[1:] + txn, err := kv.NewTransaction(true) + for _, entry := range entries { + require.NoError(t, txn.Set(entry.Key, entry.Value, entry.Meta)) + } + require.NoError(t, txn.Commit(nil)) require.NoError(t, kv.Close()) filename := vlogFilePath(dir, 0) @@ -497,7 +482,9 @@ func createVlog(t *testing.T, entries []*Entry) []byte { func checkKeys(t *testing.T, kv *KV, keys [][]byte) { i := 0 - iter := kv.NewIterator(IteratorOptions{}) + txn, err := kv.NewTransaction(false) + require.NoError(t, err) + iter := txn.NewIterator(IteratorOptions{}) for iter.Seek(keys[0]); iter.Valid(); iter.Next() { require.Equal(t, iter.Item().Key(), keys[i]) i++ diff --git a/y/iterator.go b/y/iterator.go index 344d766d1..ab44eef51 100644 --- a/y/iterator.go +++ b/y/iterator.go @@ -34,8 +34,8 @@ type ValueStruct struct { } // EncodedSize is the size of the ValueStruct when encoded -func (v *ValueStruct) EncodedSize() int { - return len(v.Value) + 2 +func (v *ValueStruct) EncodedSize() uint16 { + return uint16(len(v.Value) + 2) } // Decode uses the length of the slice to infer the length of the Value field. diff --git a/y/iterator_test.go b/y/iterator_test.go index 14dbfca16..73540b595 100644 --- a/y/iterator_test.go +++ b/y/iterator_test.go @@ -69,7 +69,11 @@ func (s *SimpleIterator) Seek(key []byte) { func (s *SimpleIterator) Key() []byte { return s.keys[s.idx] } func (s *SimpleIterator) Value() ValueStruct { - return ValueStruct{s.vals[s.idx], 55, 0, 12345} + return ValueStruct{ + Value: s.vals[s.idx], + UserMeta: 55, + Meta: 0, + } } func (s *SimpleIterator) Valid() bool { return s.idx >= 0 && s.idx < len(s.keys) diff --git a/y/y.go b/y/y.go index ecb0c5ba0..8056f64f0 100644 --- a/y/y.go +++ b/y/y.go @@ -96,9 +96,11 @@ func ParseTs(key []byte) uint64 { } func ParseKey(key []byte) []byte { - if len(key) < 8 { - return key + if key == nil { + return nil } + + AssertTruef(len(key) > 8, "key=%q", key) return key[:len(key)-8] }