Commit 58415e25 authored by renovate's avatar renovate
Browse files

Update module github.com/syndtr/goleveldb to v1

parent 877afafd
Pipeline #17445 passed with stages
in 1 minute and 43 seconds
......@@ -5,10 +5,9 @@ go 1.15
require (
github.com/PuerkitoBio/goquery v1.5.0
github.com/PuerkitoBio/purell v0.1.0
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/google/go-cmp v0.5.6
github.com/google/uuid v1.1.1 // indirect
github.com/pborman/uuid v1.2.1
github.com/syndtr/goleveldb v0.0.0-20190923125748-758128399b1d
github.com/syndtr/goleveldb v1.0.0
golang.org/x/net v0.0.0-20190926025831-c00fd9afed17 // indirect
)
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
github.com/PuerkitoBio/purell v0.0.0-20180310210909-975f53781597 h1:1H3FyRw7YsqIty9WHPOVEGJaFJ1sfGVZ3PPDUw3ob2w=
github.com/PuerkitoBio/purell v0.0.0-20180310210909-975f53781597/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v0.1.0 h1:N8Bcc53nei5frgNYgAKo93qMUVdU5LUGHCBv8efdVcM=
github.com/PuerkitoBio/purell v0.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
......@@ -28,14 +22,10 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pborman/uuid v0.0.0-20171128162732-e53336930665 h1:7G9lvlxEu1ZPLqJnsRY1MuoBaf2Mg4qbtcxNRXKdzFs=
github.com/pborman/uuid v0.0.0-20171128162732-e53336930665/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/syndtr/goleveldb v0.0.0-20190923125748-758128399b1d h1:OgkXbz/O0zsJoaB+z6n/a3bNGCbCWhBPLfGr6qaBprM=
github.com/syndtr/goleveldb v0.0.0-20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
......@@ -49,6 +39,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
......
......@@ -238,11 +238,6 @@ func newBatch() interface{} {
return &Batch{}
}
// MakeBatch returns empty batch with preallocated buffer.
func MakeBatch(n int) *Batch {
return &Batch{data: make([]byte, 0, n)}
}
func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error {
var index batchIndex
for i, o := 0, 0; o < len(data); i++ {
......
......@@ -38,12 +38,6 @@ type DB struct {
inWritePaused int32 // The indicator whether write operation is paused by compaction
aliveSnaps, aliveIters int32
// Compaction statistic
memComp uint32 // The cumulative number of memory compaction
level0Comp uint32 // The cumulative number of level0 compaction
nonLevel0Comp uint32 // The cumulative number of non-level0 compaction
seekComp uint32 // The cumulative number of seek compaction
// Session.
s *session
......@@ -474,7 +468,7 @@ func recoverTable(s *session, o *opt.Options) error {
}
// Commit.
return s.commit(rec, false)
return s.commit(rec)
}
func (db *DB) recoverJournal() error {
......@@ -544,7 +538,7 @@ func (db *DB) recoverJournal() error {
rec.setJournalNum(fd.Num)
rec.setSeqNum(db.seq)
if err := db.s.commit(rec, false); err != nil {
if err := db.s.commit(rec); err != nil {
fr.Close()
return err
}
......@@ -623,7 +617,7 @@ func (db *DB) recoverJournal() error {
// Commit.
rec.setJournalNum(db.journalFd.Num)
rec.setSeqNum(db.seq)
if err := db.s.commit(rec, false); err != nil {
if err := db.s.commit(rec); err != nil {
// Close journal on error.
if db.journal != nil {
db.journal.Close()
......@@ -963,29 +957,15 @@ func (db *DB) GetProperty(name string) (value string, err error) {
value = "Compactions\n" +
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
"-------+------------+---------------+---------------+---------------+---------------\n"
var totalTables int
var totalSize, totalRead, totalWrite int64
var totalDuration time.Duration
for level, tables := range v.levels {
duration, read, write := db.compStats.getStat(level)
if len(tables) == 0 && duration == 0 {
continue
}
totalTables += len(tables)
totalSize += tables.size()
totalRead += read
totalWrite += write
totalDuration += duration
value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
float64(read)/1048576.0, float64(write)/1048576.0)
}
value += "-------+------------+---------------+---------------+---------------+---------------\n"
value += fmt.Sprintf(" Total | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
totalTables, float64(totalSize)/1048576.0, totalDuration.Seconds(),
float64(totalRead)/1048576.0, float64(totalWrite)/1048576.0)
case p == "compcount":
value = fmt.Sprintf("MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", atomic.LoadUint32(&db.memComp), atomic.LoadUint32(&db.level0Comp), atomic.LoadUint32(&db.nonLevel0Comp), atomic.LoadUint32(&db.seekComp))
case p == "iostats":
value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f",
float64(db.s.stor.reads())/1048576.0,
......@@ -1037,16 +1017,11 @@ type DBStats struct {
BlockCacheSize int
OpenedTablesCount int
LevelSizes Sizes
LevelSizes []int64
LevelTablesCounts []int
LevelRead Sizes
LevelWrite Sizes
LevelRead []int64
LevelWrite []int64
LevelDurations []time.Duration
MemComp uint32
Level0Comp uint32
NonLevel0Comp uint32
SeekComp uint32
}
// Stats populates s with database statistics.
......@@ -1083,17 +1058,16 @@ func (db *DB) Stats(s *DBStats) error {
for level, tables := range v.levels {
duration, read, write := db.compStats.getStat(level)
if len(tables) == 0 && duration == 0 {
continue
}
s.LevelDurations = append(s.LevelDurations, duration)
s.LevelRead = append(s.LevelRead, read)
s.LevelWrite = append(s.LevelWrite, write)
s.LevelSizes = append(s.LevelSizes, tables.size())
s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables))
}
s.MemComp = atomic.LoadUint32(&db.memComp)
s.Level0Comp = atomic.LoadUint32(&db.level0Comp)
s.NonLevel0Comp = atomic.LoadUint32(&db.nonLevel0Comp)
s.SeekComp = atomic.LoadUint32(&db.seekComp)
return nil
}
......
......@@ -8,7 +8,6 @@ package leveldb
import (
"sync"
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/errors"
......@@ -261,7 +260,7 @@ func (db *DB) compactionCommit(name string, rec *sessionRecord) {
db.compCommitLk.Lock()
defer db.compCommitLk.Unlock() // Defer is necessary.
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
return db.s.commit(rec, true)
return db.s.commit(rec)
}, nil)
}
......@@ -325,12 +324,10 @@ func (db *DB) memCompaction() {
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
// Save compaction stats
for _, r := range rec.addedTables {
stats.write += r.size
}
db.compStats.addStat(flushLevel, stats)
atomic.AddUint32(&db.memComp, 1)
// Drop frozen memdb.
db.dropFrozenMem()
......@@ -591,14 +588,6 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
for i := range stats {
db.compStats.addStat(c.sourceLevel+1, &stats[i])
}
switch c.typ {
case level0Compaction:
atomic.AddUint32(&db.level0Comp, 1)
case nonLevel0Compaction:
atomic.AddUint32(&db.nonLevel0Comp, 1)
case seekCompaction:
atomic.AddUint32(&db.seekComp, 1)
}
}
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
......
......@@ -78,17 +78,13 @@ func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Rang
}
rawIter := db.newRawIterator(auxm, auxt, islice, ro)
iter := &dbIter{
db: db,
icmp: db.s.icmp,
iter: rawIter,
seq: seq,
strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
disableSampling: db.s.o.GetDisableSeeksCompaction() || db.s.o.GetIteratorSamplingRate() <= 0,
key: make([]byte, 0),
value: make([]byte, 0),
}
if !iter.disableSampling {
iter.samplingGap = db.iterSamplingRate()
db: db,
icmp: db.s.icmp,
iter: rawIter,
seq: seq,
strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
key: make([]byte, 0),
value: make([]byte, 0),
}
atomic.AddInt32(&db.aliveIters, 1)
runtime.SetFinalizer(iter, (*dbIter).Release)
......@@ -111,14 +107,13 @@ const (
// dbIter represent an interator states over a database session.
type dbIter struct {
db *DB
icmp *iComparer
iter iterator.Iterator
seq uint64
strict bool
disableSampling bool
samplingGap int
db *DB
icmp *iComparer
iter iterator.Iterator
seq uint64
strict bool
smaplingGap int
dir dir
key []byte
value []byte
......@@ -127,14 +122,10 @@ type dbIter struct {
}
func (i *dbIter) sampleSeek() {
if i.disableSampling {
return
}
ikey := i.iter.Key()
i.samplingGap -= len(ikey) + len(i.iter.Value())
for i.samplingGap < 0 {
i.samplingGap += i.db.iterSamplingRate()
i.smaplingGap -= len(ikey) + len(i.iter.Value())
for i.smaplingGap < 0 {
i.smaplingGap += i.db.iterSamplingRate()
i.db.sampleSeek(ikey)
}
}
......
......@@ -69,9 +69,6 @@ func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) {
// DB. And a nil Range.Limit is treated as a key after all keys in
// the DB.
//
// The returned iterator has locks on its own resources, so it can live beyond
// the lifetime of the transaction who creates them.
//
// WARNING: Any slice returned by interator (e.g. slice returned by calling
// Iterator.Key() or Iterator.Key() methods), its content should not be modified
// unless noted otherwise.
......@@ -212,7 +209,7 @@ func (tr *Transaction) Commit() error {
tr.stats.startTimer()
var cerr error
for retry := 0; retry < 3; retry++ {
cerr = tr.db.s.commit(&tr.rec, false)
cerr = tr.db.s.commit(&tr.rec)
if cerr != nil {
tr.db.logf("transaction@commit error R·%d %q", retry, cerr)
select {
......@@ -255,14 +252,13 @@ func (tr *Transaction) discard() {
// Discard transaction.
for _, t := range tr.tables {
tr.db.logf("transaction@discard @%d", t.fd.Num)
// Iterator may still use the table, so we use tOps.remove here.
tr.db.s.tops.remove(t.fd)
if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil {
tr.db.s.reuseFileNum(t.fd.Num)
}
}
}
// Discard discards the transaction.
// This method is noop if transaction is already closed (either committed or
// discarded)
//
// Other methods should not be called after transaction has been discarded.
func (tr *Transaction) Discard() {
......@@ -286,10 +282,8 @@ func (db *DB) waitCompaction() error {
// until in-flight transaction is committed or discarded.
// The returned transaction handle is safe for concurrent use.
//
// Transaction is very expensive and can overwhelm compaction, especially if
// Transaction is expensive and can overwhelm compaction, especially if
// transaction size is small. Use with caution.
// The rule of thumb is if you need to merge at least same amount of
// `Options.WriteBuffer` worth of data then use transaction, otherwise don't.
//
// The transaction must be closed once done, either by committing or discarding
// the transaction.
......
......@@ -16,7 +16,7 @@ func bloomHash(key []byte) uint32 {
type bloomFilter int
// Name: The bloom filter serializes its parameters and is backward compatible
// The bloom filter serializes its parameters and is backward compatible
// with respect to them. Therefor, its parameters are not added to its
// name.
func (bloomFilter) Name() string {
......
......@@ -278,14 +278,6 @@ type Options struct {
// The default is false.
DisableLargeBatchTransaction bool
// DisableSeeksCompaction allows disabling 'seeks triggered compaction'.
// The purpose of 'seeks triggered compaction' is to optimize database so
// that 'level seeks' can be minimized, however this might generate many
// small compaction which may not preferable.
//
// The default is false.
DisableSeeksCompaction bool
// ErrorIfExist defines whether an error should returned if the DB already
// exist.
//
......@@ -317,8 +309,6 @@ type Options struct {
// IteratorSamplingRate defines approximate gap (in bytes) between read
// sampling of an iterator. The samples will be used to determine when
// compaction should be triggered.
// Use negative value to disable iterator sampling.
// The iterator sampling is disabled if DisableSeeksCompaction is true.
//
// The default is 1MiB.
IteratorSamplingRate int
......@@ -536,13 +526,6 @@ func (o *Options) GetDisableLargeBatchTransaction() bool {
return o.DisableLargeBatchTransaction
}
func (o *Options) GetDisableSeeksCompaction() bool {
if o == nil {
return false
}
return o.DisableSeeksCompaction
}
func (o *Options) GetErrorIfExist() bool {
if o == nil {
return false
......@@ -565,10 +548,8 @@ func (o *Options) GetFilter() filter.Filter {
}
func (o *Options) GetIteratorSamplingRate() int {
if o == nil || o.IteratorSamplingRate == 0 {
if o == nil || o.IteratorSamplingRate <= 0 {
return DefaultIteratorSamplingRate
} else if o.IteratorSamplingRate < 0 {
return 0
}
return o.IteratorSamplingRate
}
......
......@@ -47,24 +47,15 @@ type session struct {
o *cachedOptions
icmp *iComparer
tops *tOps
fileRef map[int64]int
manifest *journal.Writer
manifestWriter storage.Writer
manifestFd storage.FileDesc
stCompPtrs []internalKey // compaction pointers; need external synchronization
stVersion *version // current version
ntVersionId int64 // next version id to assign
refCh chan *vTask
relCh chan *vTask
deltaCh chan *vDelta
abandon chan int64
closeC chan struct{}
closeW sync.WaitGroup
vmu sync.Mutex
// Testing fields
fileRefCh chan chan map[int64]int // channel used to pass current reference stat
stCompPtrs []internalKey // compaction pointers; need external synchronization
stVersion *version // current version
vmu sync.Mutex
}
// Creates new initialized session instance.
......@@ -77,21 +68,13 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
return
}
s = &session{
stor: newIStorage(stor),
storLock: storLock,
refCh: make(chan *vTask),
relCh: make(chan *vTask),
deltaCh: make(chan *vDelta),
abandon: make(chan int64),
fileRefCh: make(chan chan map[int64]int),
closeC: make(chan struct{}),
stor: newIStorage(stor),
storLock: storLock,
fileRef: make(map[int64]int),
}
s.setOptions(o)
s.tops = newTableOps(s)
s.closeW.Add(1)
go s.refLoop()
s.setVersion(nil, newVersion(s))
s.setVersion(newVersion(s))
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
return
}
......@@ -107,11 +90,7 @@ func (s *session) close() {
}
s.manifest = nil
s.manifestWriter = nil
s.setVersion(nil, &version{s: s, closing: true, id: s.ntVersionId})
// Close all background goroutines
close(s.closeC)
s.closeW.Wait()
s.setVersion(&version{s: s, closing: true})
}
// Release session lock.
......@@ -201,27 +180,19 @@ func (s *session) recover() (err error) {
}
s.manifestFd = fd
s.setVersion(rec, staging.finish(false))
s.setVersion(staging.finish())
s.setNextFileNum(rec.nextFileNum)
s.recordCommited(rec)
return nil
}
// Commit session; need external synchronization.
func (s *session) commit(r *sessionRecord, trivial bool) (err error) {
func (s *session) commit(r *sessionRecord) (err error) {
v := s.version()
defer v.release()
// spawn new version based on current version
nv := v.spawn(r, trivial)
// abandon useless version id to prevent blocking version processing loop.
defer func() {
if err != nil {
s.abandon <- nv.id
s.logf("commit@abandon useless vid D%d", nv.id)
}
}()
nv := v.spawn(r)
if s.manifest == nil {
// manifest journal writer not yet created, create one
......@@ -232,7 +203,7 @@ func (s *session) commit(r *sessionRecord, trivial bool) (err error) {
// finally, apply new version if no error rise
if err == nil {
s.setVersion(r, nv)
s.setVersion(nv)
}
return
......
......@@ -14,13 +14,6 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt"
)
const (
undefinedCompaction = iota
level0Compaction
nonLevel0Compaction
seekCompaction
)
func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int {
v := s.version()
defer v.release()
......@@ -57,7 +50,6 @@ func (s *session) pickCompaction() *compaction {
var sourceLevel int
var t0 tFiles
var typ int
if v.cScore >= 1 {
sourceLevel = v.cLevel
cptr := s.getCompPtr(sourceLevel)
......@@ -71,24 +63,18 @@ func (s *session) pickCompaction() *compaction {
if len(t0) == 0 {
t0 = append(t0, tables[0])
}
if sourceLevel == 0 {
typ = level0Compaction
} else {
typ = nonLevel0Compaction
}
} else {
if p := atomic.LoadPointer(&v.cSeek); p != nil {
ts := (*tSet)(p)
sourceLevel = ts.level
t0 = append(t0, ts.table)
typ = seekCompaction
} else {
v.release()
return nil
}
}
return newCompaction(s, v, sourceLevel, t0, typ)
return newCompaction(s, v, sourceLevel, t0)
}
// Create compaction from given level and range; need external synchronization.
......@@ -123,18 +109,13 @@ func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit
}
}
typ := level0Compaction
if sourceLevel != 0 {
typ = nonLevel0Compaction
}
return newCompaction(s, v, sourceLevel, t0, typ)
return newCompaction(s, v, sourceLevel, t0)
}
func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles, typ int) *compaction {
func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction {
c := &compaction{
s: s,
v: v,
typ: typ,
sourceLevel: sourceLevel,
levels: [2]tFiles{t0, nil},
maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)),
......@@ -150,7 +131,6 @@ type compaction struct {
s *session
v *version
typ int
sourceLevel int
levels [2]tFiles
maxGPOverlaps int64
......@@ -201,14 +181,10 @@ func (c *compaction) expand() {
t0, t1 := c.levels[0], c.levels[1]
imin, imax := t0.getRange(c.s.icmp)
// For non-zero levels, the ukey can't hop across tables at all.
if c.sourceLevel == 0 {
// We expand t0 here just incase ukey hop across tables.
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0)
if len(t0) != len(c.levels[0]) {
imin, imax = t0.getRange(c.s.icmp)