Skip to content

Commit

Permalink
Fix all unit tests
Browse files Browse the repository at this point in the history
LSM Get checks in all levels if key is not found in memtable or level 0 to find latest version.
Fix parseItem. Breakage caused by how we store key in KVItem.
 Fix storing/reading readTs from badger head
  • Loading branch information
Janardhan Reddy committed Oct 2, 2017
1 parent 3674b2c commit ffaaa66
Show file tree
Hide file tree
Showing 18 changed files with 396 additions and 778 deletions.
162 changes: 0 additions & 162 deletions doc_test.go

This file was deleted.

64 changes: 19 additions & 45 deletions iterator.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package badger

import (
"bytes"
"fmt"
"sync"

"github.com/dgraph-io/badger/y"
Expand Down Expand Up @@ -45,11 +46,22 @@ type KVItem struct {
val []byte
slice *y.Slice // Used only during prefetching.
next *KVItem
version uint64
}

func (item *KVItem) ToString() string {
return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta)

}

// Key returns the key. Remember to copy if you need to access it outside the iteration loop.
func (item *KVItem) Key() []byte {
return y.ParseKey(item.key)
return item.key
}

// Version returns the commit timestamp of the item.
func (item *KVItem) Version() uint64 {
return item.version
}

// Value retrieves the value of the item from the value log. It calls the
Expand Down Expand Up @@ -115,11 +127,6 @@ func (item *KVItem) EstimatedSize() int64 {
return int64(vp.Len) // includes key length.
}

// Version returns the commit timestamp of the item.
func (item *KVItem) Version() uint64 {
return y.ParseTs(item.key)
}

// UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user
// is used to interpret the value.
func (item *KVItem) UserMeta() byte {
Expand Down Expand Up @@ -303,7 +310,8 @@ FILL:

// Reverse direction.
nextTs := y.ParseTs(mi.Key())
if nextTs <= it.readTs && y.SameKey(mi.Key(), item.key) {
mik := y.ParseKey(mi.Key())
if nextTs <= it.readTs && bytes.Compare(mik, item.key) == 0 {
// This is a valid potential candidate.
goto FILL
}
Expand All @@ -320,7 +328,10 @@ func (it *Iterator) fill(item *KVItem) {
vs := it.iitr.Value()
item.meta = vs.Meta
item.userMeta = vs.UserMeta
item.key = y.Safecopy(item.key, it.iitr.Key())

item.version = y.ParseTs(it.iitr.Key())
item.key = y.Safecopy(item.key, y.ParseKey(it.iitr.Key()))

item.vptr = y.Safecopy(item.vptr, vs.Value)
item.val = nil
if it.opt.PrefetchValues {
Expand Down Expand Up @@ -379,40 +390,3 @@ func (it *Iterator) Rewind() {
it.iitr.Rewind()
it.prefetch()
}

// NewIterator returns a new iterator. Depending upon the options, either only keys, or both
// key-value pairs would be fetched. The keys are returned in lexicographically sorted order.
// Usage:
// opt := badger.DefaultIteratorOptions
// itr := kv.NewIterator(opt)
// for itr.Rewind(); itr.Valid(); itr.Next() {
// item := itr.Item()
// key := item.Key()
// var val []byte
// err = item.Value(func(v []byte) {
// val = make([]byte, len(v))
// copy(val, v)
// }) // This could block while value is fetched from value log.
// // For key only iteration, set opt.PrefetchValues to false, and don't call
// // item.Value(func(v []byte)).
//
// // Remember that both key, val would become invalid in the next iteration of the loop.
// // So, if you need access to them outside, copy them or parse them.
// }
// itr.Close()
// TODO: Remove this.
func (s *KV) NewIterator(opt IteratorOptions) *Iterator {
tables, decr := s.getMemTables()
defer decr()
s.vlog.incrIteratorCount()
var iters []y.Iterator
for i := 0; i < len(tables); i++ {
iters = append(iters, tables[i].NewUniIterator(opt.Reverse))
}
iters = s.lc.appendIterators(iters, opt.Reverse) // This will increment references.
res := &Iterator{
iitr: y.NewMergeIterator(iters, opt.Reverse),
opt: opt,
}
return res
}
15 changes: 12 additions & 3 deletions kv.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"container/heap"
"expvar"
"log"
"math"
"os"
"path/filepath"
"strconv"
Expand Down Expand Up @@ -177,7 +178,9 @@ func NewKV(optParam *Options) (out *KV, err error) {
return nil, err
}

vs, err := out.get(head)
headKey := y.KeyWithTs(head, math.MaxUint64)
// Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
vs, err := out.get(headKey)
if err != nil {
return nil, errors.Wrap(err, "Retrieving head")
}
Expand Down Expand Up @@ -510,6 +513,9 @@ func (s *KV) writeToLSM(b *request) error {
}

for i, entry := range b.Entries {
if entry.Meta&BitFinTxn != 0 {
continue
}
if s.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case.
s.mt.Put(entry.Key,
y.ValueStruct{
Expand Down Expand Up @@ -673,8 +679,9 @@ func (s *KV) batchSet(entries []*Entry) error {

req.Wg.Wait()
req.Entries = nil
err = req.Err
requestPool.Put(req)
return req.Err
return err
}

// batchSetAsync is the asynchronous version of batchSet. It accepts a callback
Expand Down Expand Up @@ -765,7 +772,8 @@ func (s *KV) flushMemtable(lc *y.Closer) error {
return nil
}

if !ft.vptr.IsZero() {
if !ft.mt.Empty() {
// Store badger head even if vptr is zero, need it for readTs
s.elog.Printf("Storing offset: %+v\n", ft.vptr)
offset := make([]byte, vptrSize)
ft.vptr.Encode(offset)
Expand All @@ -775,6 +783,7 @@ func (s *KV) flushMemtable(lc *y.Closer) error {
headTs := y.KeyWithTs(head, s.txnState.commitTs())
ft.mt.Put(headTs, y.ValueStruct{Value: offset})
}

fileID := s.lc.reserveFileID()
fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.opt.Dir), true)
if err != nil {
Expand Down
Loading

0 comments on commit ffaaa66

Please sign in to comment.