Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

chores(linter): enable rest of the linters #1871

Merged
merged 1 commit into from
Feb 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 12 additions & 8 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,21 @@ run:
linters-settings:
lll:
line-length: 120
staticcheck:
checks:
- all
- '-SA1019' # it is okay to use math/rand at times

linters:
disable-all: true
enable:
# - errcheck
# - ineffassign
# - gas
- errcheck
- ineffassign
# - gas
- gofmt
# - gosimple
# - govet
# - lll
# - unused
# - staticcheck
- gosimple
- govet
- lll
- unused
- staticcheck
- goimports
3 changes: 1 addition & 2 deletions backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,7 @@ func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) {
var valCopy []byte
if !item.IsDeletedOrExpired() {
// No need to copy value, if item is deleted or expired.
var err error
err = item.Value(func(val []byte) error {
err := item.Value(func(val []byte) error {
valCopy = a.Copy(val)
return nil
})
Expand Down
2 changes: 1 addition & 1 deletion badger/cmd/flatten.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func flatten(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
if fo.compressionType < 0 || fo.compressionType > 2 {
if fo.compressionType > 2 {
return errors.Errorf(
"compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)")
}
Expand Down
8 changes: 4 additions & 4 deletions badger/cmd/rotate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ func TestRotatePlainTextToEncrypted(t *testing.T) {
db, err := badger.Open(opts)
require.NoError(t, err)

db.Update(func(txn *badger.Txn) error {
require.NoError(t, db.Update(func(txn *badger.Txn) error {
return txn.Set([]byte("foo"), []byte("bar"))
})
}))

require.NoError(t, db.Close())

Expand Down Expand Up @@ -140,7 +140,7 @@ func TestRotatePlainTextToEncrypted(t *testing.T) {
db, err = badger.Open(opts)
require.NoError(t, err)

db.View(func(txn *badger.Txn) error {
require.NoError(t, db.View(func(txn *badger.Txn) error {
iopt := badger.DefaultIteratorOptions
it := txn.NewIterator(iopt)
defer it.Close()
Expand All @@ -150,6 +150,6 @@ func TestRotatePlainTextToEncrypted(t *testing.T) {
}
require.Equal(t, 1, count)
return nil
})
}))
require.NoError(t, db.Close())
}
3 changes: 2 additions & 1 deletion badger/cmd/stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func stream(cmd *cobra.Command, args []string) error {
WithEncryptionKey(encKey)

// Options for output DB.
if so.compressionType < 0 || so.compressionType > 2 {
if so.compressionType > 2 {
return errors.Errorf(
"compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)")
}
Expand Down Expand Up @@ -126,6 +126,7 @@ func stream(cmd *cobra.Command, args []string) error {
f, err := os.OpenFile(so.outFile, os.O_RDWR|os.O_CREATE, 0666)
y.Check(err)
_, err = stream.Backup(f, 0)
y.Check(err)
}
fmt.Println("Done.")
return err
Expand Down
4 changes: 2 additions & 2 deletions badger/cmd/write_bench.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ func writeBench(cmd *cobra.Command, args []string) error {
}

c.SignalAndWait()
fmt.Printf(db.LevelsToString())
fmt.Println(db.LevelsToString())
return err
}

Expand Down Expand Up @@ -401,7 +401,7 @@ func reportStats(c *z.Closer, db *badger.DB) {
humanize.IBytes(uint64(z.NumAllocBytes())))

if count%10 == 0 {
fmt.Printf(db.LevelsToString())
fmt.Println(db.LevelsToString())
}
}
}
Expand Down
16 changes: 5 additions & 11 deletions db.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,6 @@ var (
bannedNsKey = []byte("!badger!banned") // For storing the banned namespaces.
)

const (
maxNumSplits = 128
)

type closers struct {
updateSize *z.Closer
compactors *z.Closer
Expand Down Expand Up @@ -1872,7 +1868,10 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches
}

c := z.NewCloser(1)
s := db.pub.newSubscriber(c, matches)
s, err := db.pub.newSubscriber(c, matches)
if err != nil {
return y.Wrapf(err, "while creating a new subscriber")
}
slurp := func(batch *pb.KVList) error {
for {
select {
Expand Down Expand Up @@ -1926,11 +1925,6 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches
}
}

// shouldEncrypt returns bool, which tells whether to encrypt or not.
func (db *DB) shouldEncrypt() bool {
return len(db.opt.EncryptionKey) > 0
}

func (db *DB) syncDir(dir string) error {
if db.opt.InMemory {
return nil
Expand Down Expand Up @@ -1971,7 +1965,7 @@ func (db *DB) StreamDB(outOptions Options) error {
defer outDB.Close()
writer := outDB.NewStreamWriter()
if err := writer.Prepare(); err != nil {
y.Wrapf(err, "cannot create stream writer in out DB at %s", outDir)
return y.Wrapf(err, "cannot create stream writer in out DB at %s", outDir)
}

// Stream contents of DB to the output DB.
Expand Down
8 changes: 4 additions & 4 deletions db2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -883,7 +883,7 @@ func TestMaxVersion(t *testing.T) {
rand.Read(k)
// Create multiple version of the same key.
for i := 1; i <= N; i++ {
wb.SetEntryAt(&Entry{Key: k}, uint64(i))
require.NoError(t, wb.SetEntryAt(&Entry{Key: k}, uint64(i)))
}
require.NoError(t, wb.Flush())

Expand All @@ -906,7 +906,7 @@ func TestMaxVersion(t *testing.T) {

// This will create commits from 1 to N.
for i := 1; i <= N; i++ {
wb.SetEntryAt(&Entry{Key: []byte(fmt.Sprintf("%d", i))}, uint64(i))
require.NoError(t, wb.SetEntryAt(&Entry{Key: []byte(fmt.Sprintf("%d", i))}, uint64(i)))
}
require.NoError(t, wb.Flush())

Expand Down Expand Up @@ -1001,12 +1001,12 @@ func TestKeyCount(t *testing.T) {

write := func(kvs *pb.KVList) error {
buf := z.NewBuffer(1<<20, "test")
defer buf.Release()
defer func() { require.NoError(t, buf.Release()) }()

for _, kv := range kvs.Kv {
KVToBuffer(kv, buf)
}
writer.Write(buf)
require.NoError(t, writer.Write(buf))
return nil
}

Expand Down
32 changes: 16 additions & 16 deletions db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,7 @@ func TestGetMore(t *testing.T) {
}
require.NoError(t, txn.Commit())
}
db.validate()
require.NoError(t, db.validate())
for i := 0; i < n; i++ {
if (i % 10000) == 0 {
// Display some progress. Right now, it's not very fast with no caching.
Expand Down Expand Up @@ -643,7 +643,7 @@ func TestExistsMore(t *testing.T) {
}
require.NoError(t, txn.Commit())
}
db.validate()
require.NoError(t, db.validate())

for i := 0; i < n; i++ {
if (i % 1000) == 0 {
Expand Down Expand Up @@ -673,7 +673,7 @@ func TestExistsMore(t *testing.T) {
}
require.NoError(t, txn.Commit())
}
db.validate()
require.NoError(t, db.validate())
for i := 0; i < n; i++ {
if (i % 10000) == 0 {
// Display some progress. Right now, it's not very fast with no caching.
Expand Down Expand Up @@ -1231,7 +1231,7 @@ func TestDiscardVersionsBelow(t *testing.T) {
opts.PrefetchValues = false

// Verify that there are 4 versions, and record 3rd version (2nd from top in iteration)
db.View(func(txn *Txn) error {
require.NoError(t, db.View(func(txn *Txn) error {
it := txn.NewIterator(opts)
defer it.Close()
var count int
Expand All @@ -1245,7 +1245,7 @@ func TestDiscardVersionsBelow(t *testing.T) {
}
require.Equal(t, 4, count)
return nil
})
}))

// Set new version and discard older ones.
err := db.Update(func(txn *Txn) error {
Expand All @@ -1255,7 +1255,7 @@ func TestDiscardVersionsBelow(t *testing.T) {

// Verify that there are only 2 versions left, and versions
// below ts have been deleted.
db.View(func(txn *Txn) error {
require.NoError(t, db.View(func(txn *Txn) error {
it := txn.NewIterator(opts)
defer it.Close()
var count int
Expand All @@ -1269,7 +1269,7 @@ func TestDiscardVersionsBelow(t *testing.T) {
}
require.Equal(t, 1, count)
return nil
})
}))
})
}

Expand Down Expand Up @@ -1478,7 +1478,7 @@ func TestGetSetDeadlock(t *testing.T) {
timeout, done := time.After(10*time.Second), make(chan bool)

go func() {
db.Update(func(txn *Txn) error {
require.NoError(t, db.Update(func(txn *Txn) error {
item, err := txn.Get(key)
require.NoError(t, err)
err = item.Value(nil) // This take a RLock on file
Expand All @@ -1488,7 +1488,7 @@ func TestGetSetDeadlock(t *testing.T) {
require.NoError(t, txn.SetEntry(NewEntry(key, val)))
require.NoError(t, txn.SetEntry(NewEntry([]byte("key2"), val)))
return nil
})
}))
done <- true
}()

Expand Down Expand Up @@ -1818,9 +1818,9 @@ func TestMinReadTs(t *testing.T) {
db.orc.readMark.Done(uint64(20)) // Because we called readTs.

for i := 0; i < 10; i++ {
db.View(func(txn *Txn) error {
require.NoError(t, db.View(func(txn *Txn) error {
return nil
})
}))
}
time.Sleep(time.Millisecond)
require.Equal(t, uint64(20), db.orc.readMark.DoneUntil())
Expand Down Expand Up @@ -2089,7 +2089,7 @@ func TestVerifyChecksum(t *testing.T) {
st := 0

buf := z.NewBuffer(10<<20, "test")
defer buf.Release()
defer func() { require.NoError(t, buf.Release()) }()
for i := 0; i < 1000; i++ {
key := make([]byte, 8)
binary.BigEndian.PutUint64(key, uint64(i))
Expand Down Expand Up @@ -2153,12 +2153,12 @@ func TestWriteInemory(t *testing.T) {
item, err := txn.Get([]byte(fmt.Sprintf("key%d", j)))
require.NoError(t, err)
expected := []byte(fmt.Sprintf("val%d", j))
item.Value(func(val []byte) error {
require.NoError(t, item.Value(func(val []byte) error {
require.Equal(t, expected, val,
"Invalid value for key %q. expected: %q, actual: %q",
item.Key(), expected, val)
return nil
})
}))
}
return nil
})
Expand Down Expand Up @@ -2242,7 +2242,7 @@ func TestOpenDBReadOnly(t *testing.T) {
var count int
read := func() {
count = 0
db.View(func(txn *Txn) error {
require.NoError(t, db.View(func(txn *Txn) error {
it := txn.NewIterator(DefaultIteratorOptions)
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
Expand All @@ -2254,7 +2254,7 @@ func TestOpenDBReadOnly(t *testing.T) {
count++
}
return nil
})
}))
}
read()
require.Equal(t, 10, count)
Expand Down
4 changes: 2 additions & 2 deletions iterator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ func TestIterateSinceTs(t *testing.T) {
iopt := DefaultIteratorOptions
iopt.SinceTs = sinceTs

db.View(func(txn *Txn) error {
require.NoError(t, db.View(func(txn *Txn) error {
it := txn.NewIterator(iopt)
defer it.Close()

Expand All @@ -156,7 +156,7 @@ func TestIterateSinceTs(t *testing.T) {
require.GreaterOrEqual(t, i.Version(), sinceTs)
}
return nil
})
}))

})
}
Expand Down
14 changes: 4 additions & 10 deletions levels.go
Original file line number Diff line number Diff line change
Expand Up @@ -998,20 +998,14 @@ func containsPrefix(table *table.Table, prefix []byte) bool {
// In table iterator's Seek, we assume that key has version in last 8 bytes. We set
// version=0 (ts=math.MaxUint64), so that we don't skip the key prefixed with prefix.
ti.Seek(y.KeyWithTs(prefix, math.MaxUint64))
if bytes.HasPrefix(ti.Key(), prefix) {
return true
}
return false
return bytes.HasPrefix(ti.Key(), prefix)
}

if bytes.Compare(prefix, smallValue) > 0 &&
bytes.Compare(prefix, largeValue) < 0 {
// There may be a case when table contains [0x0000,...., 0xffff]. If we are searching for
// k=0x0011, we should not directly infer that k is present. It may not be present.
if !isPresent() {
return false
}
return true
return isPresent()
}

return false
Expand Down Expand Up @@ -1426,8 +1420,8 @@ func (s *levelsController) runCompactDef(id, l int, cd compactDef) (err error) {
cd.splits = append(cd.splits, keyRange{})
}

// Table should never be moved directly between levels, always be rewritten to allow discarding
// invalid versions.
// Table should never be moved directly between levels,
// always be rewritten to allow discarding invalid versions.

newTables, decr, err := s.compactBuildTables(l, cd)
if err != nil {
Expand Down
Loading