Skip to content

Commit 12231a4

Browse files
holimankaralabe
authored andcommitted
core/state/snapshot: replace bigcache with fastcache (#21)
1 parent 8e46942 commit 12231a4

File tree

4 files changed

+21
-43
lines changed

4 files changed

+21
-43
lines changed

core/state/snapshot/disklayer.go

+6-8
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package snapshot
1919
import (
2020
"sync"
2121

22-
"github.com/allegro/bigcache"
22+
"github.com/VictoriaMetrics/fastcache"
2323
"github.com/ethereum/go-ethereum/common"
2424
"github.com/ethereum/go-ethereum/core/rawdb"
2525
"github.com/ethereum/go-ethereum/ethdb"
@@ -30,7 +30,7 @@ import (
3030
type diskLayer struct {
3131
journal string // Path of the snapshot journal to use on shutdown
3232
db ethdb.KeyValueStore // Key-value store containing the base snapshot
33-
cache *bigcache.BigCache // Cache to avoid hitting the disk for direct access
33+
cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
3434

3535
root common.Hash // Root hash of the base snapshot
3636
stale bool // Signals that the layer became stale (state progressed)
@@ -80,17 +80,15 @@ func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) {
8080
if dl.stale {
8181
return nil, ErrSnapshotStale
8282
}
83-
key := string(hash[:])
84-
8583
// Try to retrieve the account from the memory cache
86-
if blob, err := dl.cache.Get(key); err == nil {
84+
if blob := dl.cache.Get(nil, hash[:]); blob != nil {
8785
snapshotCleanHitMeter.Mark(1)
8886
snapshotCleanReadMeter.Mark(int64(len(blob)))
8987
return blob, nil
9088
}
9189
// Cache doesn't contain account, pull from disk and cache for later
9290
blob := rawdb.ReadAccountSnapshot(dl.db, hash)
93-
dl.cache.Set(key, blob)
91+
dl.cache.Set(hash[:], blob)
9492

9593
snapshotCleanMissMeter.Mark(1)
9694
snapshotCleanWriteMeter.Mark(int64(len(blob)))
@@ -109,10 +107,10 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
109107
if dl.stale {
110108
return nil, ErrSnapshotStale
111109
}
112-
key := string(append(accountHash[:], storageHash[:]...))
110+
key := append(accountHash[:], storageHash[:]...)
113111

114112
// Try to retrieve the storage slot from the memory cache
115-
if blob, err := dl.cache.Get(key); err == nil {
113+
if blob := dl.cache.Get(nil, key); blob != nil {
116114
snapshotCleanHitMeter.Mark(1)
117115
snapshotCleanReadMeter.Mark(int64(len(blob)))
118116
return blob, nil

core/state/snapshot/generate.go

+2-8
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import (
2222
"math/big"
2323
"time"
2424

25-
"github.com/allegro/bigcache"
25+
"github.com/VictoriaMetrics/fastcache"
2626
"github.com/ethereum/go-ethereum/common"
2727
"github.com/ethereum/go-ethereum/core/rawdb"
2828
"github.com/ethereum/go-ethereum/crypto"
@@ -196,13 +196,7 @@ func generateSnapshot(db ethdb.KeyValueStore, journal string, root common.Hash)
196196
return nil, err
197197
}
198198
// New snapshot generated, construct a brand new base layer
199-
cache, _ := bigcache.NewBigCache(bigcache.Config{ // TODO(karalabe): dedup
200-
Shards: 1024,
201-
LifeWindow: time.Hour,
202-
MaxEntriesInWindow: 512 * 1024,
203-
MaxEntrySize: 512,
204-
HardMaxCacheSize: 512,
205-
})
199+
cache := fastcache.New(512 * 1024 * 1024)
206200
return &diskLayer{
207201
journal: journal,
208202
db: db,

core/state/snapshot/snapshot.go

+7-15
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,8 @@ import (
2222
"fmt"
2323
"os"
2424
"sync"
25-
"time"
2625

27-
"github.com/allegro/bigcache"
26+
"github.com/VictoriaMetrics/fastcache"
2827
"github.com/ethereum/go-ethereum/common"
2928
"github.com/ethereum/go-ethereum/core/rawdb"
3029
"github.com/ethereum/go-ethereum/ethdb"
@@ -323,7 +322,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
323322
if len(data) > 0 {
324323
// Account was updated, push to disk
325324
rawdb.WriteAccountSnapshot(batch, hash, data)
326-
base.cache.Set(string(hash[:]), data)
325+
base.cache.Set(hash[:], data)
327326

328327
if batch.ValueSize() > ethdb.IdealBatchSize {
329328
if err := batch.Write(); err != nil {
@@ -334,13 +333,13 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
334333
} else {
335334
// Account was deleted, remove all storage slots too
336335
rawdb.DeleteAccountSnapshot(batch, hash)
337-
base.cache.Set(string(hash[:]), nil)
336+
base.cache.Set(hash[:], nil)
338337

339338
it := rawdb.IterateStorageSnapshots(base.db, hash)
340339
for it.Next() {
341340
if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
342341
batch.Delete(key)
343-
base.cache.Delete(string(key[1:]))
342+
base.cache.Del(key[1:])
344343
}
345344
}
346345
it.Release()
@@ -351,10 +350,10 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
351350
for storageHash, data := range storage {
352351
if len(data) > 0 {
353352
rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
354-
base.cache.Set(string(append(accountHash[:], storageHash[:]...)), data)
353+
base.cache.Set(append(accountHash[:], storageHash[:]...), data)
355354
} else {
356355
rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
357-
base.cache.Set(string(append(accountHash[:], storageHash[:]...)), nil)
356+
base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
358357
}
359358
}
360359
if batch.ValueSize() > ethdb.IdealBatchSize {
@@ -401,17 +400,10 @@ func loadSnapshot(db ethdb.KeyValueStore, journal string, root common.Hash) (sna
401400
if baseRoot == (common.Hash{}) {
402401
return nil, errors.New("missing or corrupted snapshot")
403402
}
404-
cache, _ := bigcache.NewBigCache(bigcache.Config{ // TODO(karalabe): dedup
405-
Shards: 1024,
406-
LifeWindow: time.Hour,
407-
MaxEntriesInWindow: 512 * 1024,
408-
MaxEntrySize: 512,
409-
HardMaxCacheSize: 512,
410-
})
411403
base := &diskLayer{
412404
journal: journal,
413405
db: db,
414-
cache: cache,
406+
cache: fastcache.New(512 * 1024 * 1024),
415407
root: baseRoot,
416408
}
417409
// Load all the snapshot diffs from the journal, failing if their chain is broken

core/state/snapshot/snapshot_test.go

+6-12
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,8 @@ package snapshot
1919
import (
2020
"fmt"
2121
"testing"
22-
"time"
2322

24-
"github.com/allegro/bigcache"
23+
"github.com/VictoriaMetrics/fastcache"
2524
"github.com/ethereum/go-ethereum/common"
2625
"github.com/ethereum/go-ethereum/core/rawdb"
2726
)
@@ -31,11 +30,10 @@ import (
3130
// to check internal corner case around the bottom-most memory accumulator.
3231
func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
3332
// Create an empty base layer and a snapshot tree out of it
34-
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(time.Minute))
3533
base := &diskLayer{
3634
db: rawdb.NewMemoryDatabase(),
3735
root: common.HexToHash("0x01"),
38-
cache: cache,
36+
cache: fastcache.New(1024 * 500),
3937
}
4038
snaps := &Tree{
4139
layers: map[common.Hash]snapshot{
@@ -77,11 +75,10 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
7775
// layer to check the usual mode of operation where the accumulator is retained.
7876
func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
7977
// Create an empty base layer and a snapshot tree out of it
80-
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(time.Minute))
8178
base := &diskLayer{
8279
db: rawdb.NewMemoryDatabase(),
8380
root: common.HexToHash("0x01"),
84-
cache: cache,
81+
cache: fastcache.New(1024 * 500),
8582
}
8683
snaps := &Tree{
8784
layers: map[common.Hash]snapshot{
@@ -126,11 +123,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
126123
// to check internal corner case around the bottom-most memory accumulator.
127124
func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) {
128125
// Create an empty base layer and a snapshot tree out of it
129-
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(time.Minute))
130126
base := &diskLayer{
131127
db: rawdb.NewMemoryDatabase(),
132128
root: common.HexToHash("0x01"),
133-
cache: cache,
129+
cache: fastcache.New(1024 * 500),
134130
}
135131
snaps := &Tree{
136132
layers: map[common.Hash]snapshot{
@@ -175,11 +171,10 @@ func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) {
175171
// layer to check the usual mode of operation where the accumulator is retained.
176172
func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
177173
// Create an empty base layer and a snapshot tree out of it
178-
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(time.Minute))
179174
base := &diskLayer{
180175
db: rawdb.NewMemoryDatabase(),
181176
root: common.HexToHash("0x01"),
182-
cache: cache,
177+
cache: fastcache.New(1024 * 500),
183178
}
184179
snaps := &Tree{
185180
layers: map[common.Hash]snapshot{
@@ -240,11 +235,10 @@ func TestPostCapBasicDataAccess(t *testing.T) {
240235
}
241236
}
242237
// Create a starting base layer and a snapshot tree out of it
243-
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(time.Minute))
244238
base := &diskLayer{
245239
db: rawdb.NewMemoryDatabase(),
246240
root: common.HexToHash("0x01"),
247-
cache: cache,
241+
cache: fastcache.New(1024 * 500),
248242
}
249243
snaps := &Tree{
250244
layers: map[common.Hash]snapshot{

0 commit comments

Comments
 (0)