Skip to content

Commit

Permalink
ethdb: add benchmark test suite (ethereum#26659)
Browse files Browse the repository at this point in the history
  • Loading branch information
rjl493456442 authored and shekhirin committed Jun 6, 2023
1 parent 63dedfc commit b9cc7bb
Show file tree
Hide file tree
Showing 4 changed files with 147 additions and 1 deletion.
118 changes: 118 additions & 0 deletions ethdb/dbtest/testsuite.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package dbtest

import (
"bytes"
"math/rand"
"reflect"
"sort"
"testing"
Expand Down Expand Up @@ -377,6 +378,101 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) {
})
}

// BenchDatabaseSuite runs a suite of benchmarks against a KeyValueStore database
// implementation.
func BenchDatabaseSuite(b *testing.B, New func() ethdb.KeyValueStore) {
var (
keys, vals = makeDataset(1_000_000, 32, 32, false)
sKeys, sVals = makeDataset(1_000_000, 32, 32, true)
)
// Run benchmarks sequentially
b.Run("Write", func(b *testing.B) {
benchWrite := func(b *testing.B, keys, vals [][]byte) {
b.ResetTimer()
b.ReportAllocs()

db := New()
defer db.Close()

for i := 0; i < len(keys); i++ {
db.Put(keys[i], vals[i])
}
}
b.Run("WriteSorted", func(b *testing.B) {
benchWrite(b, sKeys, sVals)
})
b.Run("WriteRandom", func(b *testing.B) {
benchWrite(b, keys, vals)
})
})
b.Run("Read", func(b *testing.B) {
benchRead := func(b *testing.B, keys, vals [][]byte) {
db := New()
defer db.Close()

for i := 0; i < len(keys); i++ {
db.Put(keys[i], vals[i])
}
b.ResetTimer()
b.ReportAllocs()

for i := 0; i < len(keys); i++ {
db.Get(keys[i])
}
}
b.Run("ReadSorted", func(b *testing.B) {
benchRead(b, sKeys, sVals)
})
b.Run("ReadRandom", func(b *testing.B) {
benchRead(b, keys, vals)
})
})
b.Run("Iteration", func(b *testing.B) {
benchIteration := func(b *testing.B, keys, vals [][]byte) {
db := New()
defer db.Close()

for i := 0; i < len(keys); i++ {
db.Put(keys[i], vals[i])
}
b.ResetTimer()
b.ReportAllocs()

it := db.NewIterator(nil, nil)
for it.Next() {
}
it.Release()
}
b.Run("IterationSorted", func(b *testing.B) {
benchIteration(b, sKeys, sVals)
})
b.Run("IterationRandom", func(b *testing.B) {
benchIteration(b, keys, vals)
})
})
b.Run("BatchWrite", func(b *testing.B) {
benchBatchWrite := func(b *testing.B, keys, vals [][]byte) {
b.ResetTimer()
b.ReportAllocs()

db := New()
defer db.Close()

batch := db.NewBatch()
for i := 0; i < len(keys); i++ {
batch.Put(keys[i], vals[i])
}
batch.Write()
}
b.Run("BenchWriteSorted", func(b *testing.B) {
benchBatchWrite(b, sKeys, sVals)
})
b.Run("BenchWriteRandom", func(b *testing.B) {
benchBatchWrite(b, keys, vals)
})
})
}

func iterateKeys(it ethdb.Iterator) []string {
keys := []string{}
for it.Next() {
Expand All @@ -386,3 +482,25 @@ func iterateKeys(it ethdb.Iterator) []string {
it.Release()
return keys
}

// randomHash generates a random blob of data and returns it as a hash.
func randBytes(len int) []byte {
buf := make([]byte, len)
if n, err := rand.Read(buf); n != len || err != nil {
panic(err)
}
return buf
}

func makeDataset(size, ksize, vsize int, order bool) ([][]byte, [][]byte) {
var keys [][]byte
var vals [][]byte
for i := 0; i < size; i += 1 {
keys = append(keys, randBytes(ksize))
vals = append(vals, randBytes(vsize))
}
if order {
sort.Slice(keys, func(i, j int) bool { return bytes.Compare(keys[i], keys[j]) < 0 })
}
return keys, vals
}
12 changes: 12 additions & 0 deletions ethdb/leveldb/leveldb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,15 @@ func TestLevelDB(t *testing.T) {
})
})
}

func BenchmarkLevelDB(b *testing.B) {
dbtest.BenchDatabaseSuite(b, func() ethdb.KeyValueStore {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
b.Fatal(err)
}
return &Database{
db: db,
}
})
}
4 changes: 3 additions & 1 deletion ethdb/pebble/pebble.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,9 @@ func (d *Database) NewBatch() ethdb.Batch {
}

// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
// TODO can't do this with pebble. Batches are allocated in a pool so maybe this doesn't matter?
// It's not supported by pebble, but pebble has better memory allocation strategy
// which turns out a lot faster than leveldb. It's performant enough to construct
// batch object without any pre-allocated space.
func (d *Database) NewBatchWithSize(_ int) ethdb.Batch {
return &batch{
b: d.db.NewBatch(),
Expand Down
14 changes: 14 additions & 0 deletions ethdb/pebble/pebble_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,17 @@ func TestPebbleDB(t *testing.T) {
})
})
}

func BenchmarkPebbleDB(b *testing.B) {
dbtest.BenchDatabaseSuite(b, func() ethdb.KeyValueStore {
db, err := pebble.Open("", &pebble.Options{
FS: vfs.NewMem(),
})
if err != nil {
b.Fatal(err)
}
return &Database{
db: db,
}
})
}

0 comments on commit b9cc7bb

Please sign in to comment.