From dbb76143ce0b665e946368c875dc066418b5fc5f Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 26 Feb 2020 19:20:45 +0800
Subject: [PATCH 01/89] wip clean
---
storage/fcds/fcds.go | 54 ++++++++++++++++++++++++++++++++++++++++----
storage/fcds/meta.go | 2 ++
2 files changed, 51 insertions(+), 5 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index c389d36f6b..ceaa180d68 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -112,7 +112,12 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
}
defer s.unprotect()
- sh := s.shards[getShard(addr)]
+ shard, err := s.getShardAddr(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ sh := s.shards[shard]
sh.mu.Lock()
defer sh.mu.Unlock()
@@ -138,7 +143,12 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
}
defer s.unprotect()
- mu := s.shards[getShard(addr)].mu
+ shard, err := getShardAddr(addr)
+ if err != nil {
+ return false, err
+ }
+
+ mu := s.shards[shard].mu
mu.Lock()
defer mu.Unlock()
@@ -170,7 +180,7 @@ func (s *Store) Put(ch chunk.Chunk) (err error) {
section := make([]byte, s.maxChunkSize)
copy(section, data)
- shard := getShard(addr)
+ shard := s.getWritableShard()
sh := s.shards[shard]
sh.mu.Lock()
@@ -204,6 +214,7 @@ func (s *Store) Put(ch chunk.Chunk) (err error) {
return s.meta.Set(addr, shard, reclaimed, &Meta{
Size: uint16(size),
Offset: offset,
+ Shard: shard,
})
}
@@ -241,7 +252,11 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
}
defer s.unprotect()
- shard := getShard(addr)
+ shard, err := s.getShardAddr(addr)
+ if err != nil {
+ return err
+ }
+
s.markShardWithFreeOffsets(shard, true)
mu := s.shards[shard].mu
@@ -281,7 +296,13 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return s.meta.Iterate(func(addr chunk.Address, m *Meta) (stop bool, err error) {
data := make([]byte, m.Size)
- _, err = s.shards[getShard(addr)].f.ReadAt(data, m.Offset)
+
+ shard, err := s.getShardAddr(addr)
+ if err != nil {
+ return true, err
+ }
+
+ _, err = s.shards[shard].f.ReadAt(data, m.Offset)
if err != nil {
return true, err
}
@@ -357,6 +378,29 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
return has
}
+// getShardWrite gets the next shard to write to.
+// currently returns a shard from the available shards in a round robin manner.
+// uses metastore.NextShard value in case a shard with empty entries is available.
+func (s *Store) getWritableShard() (shard uint8) {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ // warning: if multiple writers call this at the same time we might get the same shard again and again
+ // because the free slot value has not been decremented yet(!)
+ shard, _ = s.meta.NextShard()
+ return shard
+}
+
+// getShardAddr gets the shard id for an arbitrary stored address.
+func (s *Store) getShardAddr(addr chunk.Address) (shard uint8, err error) {
+ m, err := s.meta.Get(addr)
+ if err != nil {
+ return 0, err
+ }
+
+ return m.Shard, nil
+}
+
// getShard returns a shard number for the chunk address.
func getShard(addr chunk.Address) (shard uint8) {
return addr[len(addr)-1] % shardCount
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 68fb95e225..28f2992b80 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -32,6 +32,7 @@ type MetaStore interface {
Count() (int, error)
Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
FreeOffset(shard uint8) (int64, error)
+ FreeSlots() []ShardSlot
Close() error
}
@@ -39,6 +40,7 @@ type MetaStore interface {
type Meta struct {
Size uint16
Offset int64
+ Shard uint8
}
// MarshalBinary returns binary encoded value of meta chunk information.
From 33414c5c00911e2a5bba37d637930aa64ae19dfe Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 26 Feb 2020 20:24:51 +0800
Subject: [PATCH 02/89] cleanup
---
storage/fcds/fcds.go | 97 +++++++++++++++------------------
storage/fcds/leveldb/leveldb.go | 80 +++++++++++++++++++++++++--
storage/fcds/mem/mem.go | 18 ++++++
storage/fcds/meta.go | 15 ++++-
4 files changed, 153 insertions(+), 57 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index ceaa180d68..0df0337b5b 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"io"
+ "math/rand"
"os"
"path/filepath"
"sync"
@@ -46,7 +47,7 @@ type Storer interface {
var _ Storer = new(Store)
// Number of files that store chunk data.
-const shardCount = 32
+const ShardCount = 32
// ErrStoreClosed is returned if store is already closed.
var ErrStoreClosed = errors.New("closed store")
@@ -73,7 +74,7 @@ type Option func(*Store)
func WithCache(yes bool) Option {
return func(s *Store) {
if yes {
- s.freeCache = newOffsetCache(shardCount)
+ s.freeCache = newOffsetCache(ShardCount)
} else {
s.freeCache = nil
}
@@ -83,9 +84,9 @@ func WithCache(yes bool) Option {
// New constructs a new Store with files at path, with specified max chunk size.
func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s *Store, err error) {
s = &Store{
- shards: make([]shard, shardCount),
+ shards: make([]shard, ShardCount),
meta: metaStore,
- free: make([]bool, shardCount),
+ free: make([]bool, ShardCount),
maxChunkSize: maxChunkSize,
quit: make(chan struct{}),
}
@@ -95,7 +96,7 @@ func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s
if err := os.MkdirAll(path, 0777); err != nil {
return nil, err
}
- for i := byte(0); i < shardCount; i++ {
+ for i := byte(0); i < ShardCount; i++ {
s.shards[i].f, err = os.OpenFile(filepath.Join(path, fmt.Sprintf("chunks-%v.db", i)), os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return nil, err
@@ -112,19 +113,15 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
}
defer s.unprotect()
- shard, err := s.getShardAddr(addr)
+ m, err := s.getMeta(addr)
if err != nil {
return nil, err
}
- sh := s.shards[shard]
+ sh := s.shards[m.Shard]
sh.mu.Lock()
defer sh.mu.Unlock()
- m, err := s.getMeta(addr)
- if err != nil {
- return nil, err
- }
data := make([]byte, m.Size)
n, err := sh.f.ReadAt(data, m.Offset)
if err != nil && err != io.EOF {
@@ -143,15 +140,6 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
}
defer s.unprotect()
- shard, err := getShardAddr(addr)
- if err != nil {
- return false, err
- }
-
- mu := s.shards[shard].mu
- mu.Lock()
- defer mu.Unlock()
-
_, err = s.getMeta(addr)
if err != nil {
if err == chunk.ErrChunkNotFound {
@@ -159,6 +147,7 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
}
return false, err
}
+
return true, nil
}
@@ -181,6 +170,8 @@ func (s *Store) Put(ch chunk.Chunk) (err error) {
copy(section, data)
shard := s.getWritableShard()
+
+ fmt.Println("writing addr to shard", "addr", addr.String(), "shard", shard)
sh := s.shards[shard]
sh.mu.Lock()
@@ -252,25 +243,21 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
}
defer s.unprotect()
- shard, err := s.getShardAddr(addr)
+ m, err := s.getMeta(addr)
if err != nil {
return err
}
- s.markShardWithFreeOffsets(shard, true)
+ s.markShardWithFreeOffsets(m.Shard, true)
- mu := s.shards[shard].mu
+ mu := s.shards[m.Shard].mu
mu.Lock()
defer mu.Unlock()
if s.freeCache != nil {
- m, err := s.getMeta(addr)
- if err != nil {
- return err
- }
- s.freeCache.set(shard, m.Offset)
+ s.freeCache.set(m.Shard, m.Offset)
}
- return s.meta.Remove(addr, shard)
+ return s.meta.Remove(addr, m.Shard)
}
// Count returns a number of stored chunks.
@@ -297,12 +284,7 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return s.meta.Iterate(func(addr chunk.Address, m *Meta) (stop bool, err error) {
data := make([]byte, m.Size)
- shard, err := s.getShardAddr(addr)
- if err != nil {
- return true, err
- }
-
- _, err = s.shards[shard].f.ReadAt(data, m.Offset)
+ _, err = s.shards[m.Shard].f.ReadAt(data, m.Offset)
if err != nil {
return true, err
}
@@ -378,32 +360,43 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
return has
}
-// getShardWrite gets the next shard to write to.
-// currently returns a shard from the available shards in a round robin manner.
-// uses metastore.NextShard value in case a shard with empty entries is available.
+// getWritableShard gets the next shard to write to.
+// uses weighted probability to choose the next shard.
func (s *Store) getWritableShard() (shard uint8) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
// warning: if multiple writers call this at the same time we might get the same shard again and again
// because the free slot value has not been decremented yet(!)
- shard, _ = s.meta.NextShard()
+
+ slots := s.meta.ShardSlots()
+ shard = probabilisticNextShard(slots)
+
return shard
}
-// getShardAddr gets the shard id for an arbitrary stored address.
-func (s *Store) getShardAddr(addr chunk.Address) (shard uint8, err error) {
- m, err := s.meta.Get(addr)
- if err != nil {
- return 0, err
+// probabilisticNextShard returns a next shard to write to
+// using a weighted probability
+func probabilisticNextShard(slots []ShardSlot) (shard uint8) {
+ var sum, movingSum int64
+ for _, v := range slots {
+
+ // we need to consider the edge case where no free slots are available
+ // we still need to potentially insert 1 chunk and so if all shards have
+ // no empty offsets - they all must be considered equally as having at least
+ // one empty slot
+ sum += v.Slots + 1
}
- return m.Shard, nil
-}
+ // do some magic
+ magic := int64(rand.Intn(int(sum)))
-// getShard returns a shard number for the chunk address.
-func getShard(addr chunk.Address) (shard uint8) {
- return addr[len(addr)-1] % shardCount
+ for _, v := range slots {
+ movingSum += v.Slots + int64(1)
+ if magic < movingSum {
+ // we've reached the shard with the correct id
+ return v.Shard
+ }
+ }
+ //TODO: this is probably wrong
+ return 0
}
type shard struct {
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 1c54a7eb0c..a13df9bf2d 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -17,7 +17,10 @@
package leveldb
import (
+ "bytes"
"encoding/binary"
+ "encoding/gob"
+ "sync"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/storage/fcds"
@@ -30,17 +33,24 @@ var _ fcds.MetaStore = new(MetaStore)
// MetaStore implements FCDS MetaStore with LevelDB
// for persistence.
type MetaStore struct {
- db *leveldb.DB
+ db *leveldb.DB
+ free map[uint8]int64 // free slots cardinality
+ mtx sync.RWMutex // synchronise free slots
}
// NewMetaStore returns new MetaStore at path.
func NewMetaStore(path string) (s *MetaStore, err error) {
db, err := leveldb.OpenFile(path, &opt.Options{})
+
if err != nil {
return nil, err
}
+
+ // todo: try to get and deserialize the free map from the persisted value on disk
+
return &MetaStore{
- db: db,
+ db: db,
+ free: make(map[uint8]int64),
}, err
}
@@ -84,8 +94,37 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
}
batch := new(leveldb.Batch)
batch.Put(freeKey(shard, m.Offset), nil)
+ batch.Put(freeCountKey(), encodeFreeSlots(s.free))
batch.Delete(chunkKey(addr))
- return s.db.Write(batch, nil)
+
+ err = s.db.Write(batch, nil)
+ if err != nil {
+ return err
+ }
+
+ s.mtx.Lock()
+ s.free[shard]++
+ s.mtx.Unlock()
+
+ return nil
+}
+
+// ShardSlots gives back a slice of ShardSlot items that represent the number
+// of free slots inside each shard.
+func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot) {
+ freeSlots = make([]fcds.ShardSlot, fcds.ShardCount)
+
+ s.mtx.RLock()
+ for i := uint8(0); i < fcds.ShardCount; i++ {
+ slot := fcds.ShardSlot{Shard: i}
+ if slots, ok := s.free[i]; ok {
+ slot.Slots = slots
+ }
+ freeSlots[i] = slot
+ }
+ s.mtx.RUnlock()
+
+ return freeSlots
}
// FreeOffset returns an offset that can be reclaimed by
@@ -142,7 +181,9 @@ func (s *MetaStore) Iterate(fn func(chunk.Address, *fcds.Meta) (stop bool, err e
if err := m.UnmarshalBinary(value); err != nil {
return err
}
- stop, err := fn(chunk.Address(key[1:]), m)
+ b := make([]byte, len(key)-1)
+ copy(b, key[1:])
+ stop, err := fn(chunk.Address(b), m)
if err != nil {
return err
}
@@ -161,6 +202,7 @@ func (s *MetaStore) Close() (err error) {
const (
chunkPrefix = 0
freePrefix = 1
+ freeCount = 2
)
func chunkKey(addr chunk.Address) (key []byte) {
@@ -174,3 +216,33 @@ func freeKey(shard uint8, offset int64) (key []byte) {
binary.BigEndian.PutUint64(key[2:10], uint64(offset))
return key
}
+
+func freeCountKey() (key []byte) {
+ return []byte{freeCount}
+}
+
+func encodeFreeSlots(m map[uint8]int64) []byte {
+ b := new(bytes.Buffer)
+
+ e := gob.NewEncoder(b)
+
+ err := e.Encode(m)
+ if err != nil {
+ panic(err)
+ }
+
+ return b.Bytes()
+}
+
+func decodeFreeSlots(b []byte) map[uint8]int64 {
+ buf := bytes.NewBuffer(b)
+ var decodedMap map[uint8]int64
+ d := gob.NewDecoder(buf)
+
+ err := d.Decode(&decodedMap)
+ if err != nil {
+ panic(err)
+ }
+
+ return decodedMap
+}
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 4d4c5d1750..7d2aff2b80 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -83,6 +83,24 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
return nil
}
+// ShardSlots gives back a slice of ShardSlot items that represent the number
+// of free slots inside each shard.
+func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot) {
+ freeSlots = make([]fcds.ShardSlot, fcds.ShardCount)
+
+ s.mu.RLock()
+ for i := uint8(0); i < fcds.ShardCount; i++ {
+ slot := fcds.ShardSlot{Shard: i}
+ if slots, ok := s.free[i]; ok {
+ slot.Slots = int64(len(slots))
+ }
+ freeSlots[i] = slot
+ }
+ s.mu.RUnlock()
+
+ return freeSlots
+}
+
// FreeOffset returns an offset that can be reclaimed by
// another chunk. If the returned value is less then 0
// there are no free offset at this shard.
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 28f2992b80..be8b0904b9 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -32,7 +32,7 @@ type MetaStore interface {
Count() (int, error)
Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
FreeOffset(shard uint8) (int64, error)
- FreeSlots() []ShardSlot
+ ShardSlots() []ShardSlot
Close() error
}
@@ -64,3 +64,16 @@ func (m *Meta) String() (s string) {
}
return fmt.Sprintf("{Size: %v, Offset %v}", m.Size, m.Offset)
}
+
+type bySlots []ShardSlot
+
+func (a bySlots) Len() int { return len(a) }
+func (a bySlots) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a bySlots) Less(i, j int) bool { return a[j].Slots < a[i].Slots }
+
+// ShardSlot contains data about free number of slots
+// in a shard.
+type ShardSlot struct {
+ Shard uint8
+ Slots int64
+}
From 3b890554d5c7c83f3dd22b1808381bcbca52c71b Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 27 Feb 2020 13:28:52 +0800
Subject: [PATCH 03/89] forky: reinstate shard field (de)serialisation
---
storage/fcds/fcds.go | 1 -
storage/fcds/leveldb/leveldb.go | 6 ++++--
storage/fcds/meta.go | 4 +++-
3 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 0df0337b5b..709d27dd26 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -171,7 +171,6 @@ func (s *Store) Put(ch chunk.Chunk) (err error) {
shard := s.getWritableShard()
- fmt.Println("writing addr to shard", "addr", addr.String(), "shard", shard)
sh := s.shards[shard]
sh.mu.Lock()
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index a13df9bf2d..a34e213737 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -94,6 +94,10 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
}
batch := new(leveldb.Batch)
batch.Put(freeKey(shard, m.Offset), nil)
+
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
batch.Put(freeCountKey(), encodeFreeSlots(s.free))
batch.Delete(chunkKey(addr))
@@ -102,9 +106,7 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
return err
}
- s.mtx.Lock()
s.free[shard]++
- s.mtx.Unlock()
return nil
}
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index be8b0904b9..5bd9f52105 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -45,9 +45,10 @@ type Meta struct {
// MarshalBinary returns binary encoded value of meta chunk information.
func (m *Meta) MarshalBinary() (data []byte, err error) {
- data = make([]byte, 10)
+ data = make([]byte, 12)
binary.BigEndian.PutUint64(data[:8], uint64(m.Offset))
binary.BigEndian.PutUint16(data[8:10], m.Size)
+ binary.BigEndian.PutUint16(data[10:12], uint16(m.Shard))
return data, nil
}
@@ -55,6 +56,7 @@ func (m *Meta) MarshalBinary() (data []byte, err error) {
func (m *Meta) UnmarshalBinary(data []byte) error {
m.Offset = int64(binary.BigEndian.Uint64(data[:8]))
m.Size = binary.BigEndian.Uint16(data[8:10])
+ m.Shard = uint8(binary.BigEndian.Uint16(data[10:12]))
return nil
}
From d7a8fb9748daec5dd8290f8ffb136b97ef16a780 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 27 Feb 2020 15:12:32 +0800
Subject: [PATCH 04/89] forky: add NextShard tests
---
chunk/chunk.go | 15 +++
storage/fcds/fcds.go | 37 ++++----
storage/fcds/mock/mock.go | 9 +-
storage/fcds/test/store.go | 186 +++++++++++++++++++++++++++----------
4 files changed, 178 insertions(+), 69 deletions(-)
diff --git a/chunk/chunk.go b/chunk/chunk.go
index 7f9cda4fbb..06d38cd5b2 100644
--- a/chunk/chunk.go
+++ b/chunk/chunk.go
@@ -18,6 +18,7 @@ package chunk
import (
"context"
+ "encoding/hex"
"errors"
"fmt"
@@ -119,6 +120,20 @@ func (a *Address) UnmarshalJSON(value []byte) error {
return nil
}
+func (a *Address) UnmarshalString(s string) error {
+ v, err := hex.DecodeString(s)
+ if err != nil {
+ return err
+ }
+
+ if len(v) != AddressLength {
+ return fmt.Errorf("address length mistmatch. got %d bytes but expected %d", len(v), AddressLength)
+ }
+ *a = make([]byte, 32)
+ copy(*a, v)
+ return nil
+}
+
// Proximity returns the proximity order of the MSB distance between x and y
//
// The distance metric MSB(x, y) of two equal length byte sequences x an y is the
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 709d27dd26..627b97f59f 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -37,8 +37,9 @@ import (
type Storer interface {
Get(addr chunk.Address) (ch chunk.Chunk, err error)
Has(addr chunk.Address) (yes bool, err error)
- Put(ch chunk.Chunk) (err error)
+ Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
+ NextShard() (shard uint8)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
Close() (err error)
@@ -47,7 +48,7 @@ type Storer interface {
var _ Storer = new(Store)
// Number of files that store chunk data.
-const ShardCount = 32
+var ShardCount = uint8(32)
// ErrStoreClosed is returned if store is already closed.
var ErrStoreClosed = errors.New("closed store")
@@ -152,9 +153,9 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
}
// Put stores chunk data.
-func (s *Store) Put(ch chunk.Chunk) (err error) {
+func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
if err := s.protect(); err != nil {
- return err
+ return 0, err
}
defer s.unprotect()
@@ -163,13 +164,13 @@ func (s *Store) Put(ch chunk.Chunk) (err error) {
size := len(data)
if size > s.maxChunkSize {
- return fmt.Errorf("chunk data size %v exceeds %v bytes", size, s.maxChunkSize)
+ return 0, fmt.Errorf("chunk data size %v exceeds %v bytes", size, s.maxChunkSize)
}
section := make([]byte, s.maxChunkSize)
copy(section, data)
- shard := s.getWritableShard()
+ shard = s.NextShard()
sh := s.shards[shard]
@@ -178,7 +179,7 @@ func (s *Store) Put(ch chunk.Chunk) (err error) {
offset, reclaimed, err := s.getOffset(shard)
if err != nil {
- return err
+ return 0, err
}
if offset < 0 {
@@ -192,20 +193,23 @@ func (s *Store) Put(ch chunk.Chunk) (err error) {
_, err = sh.f.Seek(offset, io.SeekStart)
}
if err != nil {
- return err
+ return 0, err
}
if _, err = sh.f.Write(section); err != nil {
- return err
+ return 0, err
}
if reclaimed && s.freeCache != nil {
s.freeCache.remove(shard, offset)
}
- return s.meta.Set(addr, shard, reclaimed, &Meta{
+
+ err = s.meta.Set(addr, shard, reclaimed, &Meta{
Size: uint16(size),
Offset: offset,
Shard: shard,
})
+
+ return shard, err
}
// getOffset returns an offset where chunk data can be written to
@@ -359,16 +363,14 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
return has
}
-// getWritableShard gets the next shard to write to.
+// NextShard gets the next shard to write to.
// uses weighted probability to choose the next shard.
-func (s *Store) getWritableShard() (shard uint8) {
+func (s *Store) NextShard() (shard uint8) {
// warning: if multiple writers call this at the same time we might get the same shard again and again
// because the free slot value has not been decremented yet(!)
slots := s.meta.ShardSlots()
- shard = probabilisticNextShard(slots)
-
- return shard
+ return probabilisticNextShard(slots)
}
// probabilisticNextShard returns a next shard to write to
@@ -381,14 +383,15 @@ func probabilisticNextShard(slots []ShardSlot) (shard uint8) {
// we still need to potentially insert 1 chunk and so if all shards have
// no empty offsets - they all must be considered equally as having at least
// one empty slot
+ fmt.Println("sum", sum, sum+v.Slots+1)
sum += v.Slots + 1
}
// do some magic
magic := int64(rand.Intn(int(sum)))
-
+ fmt.Println("magic", magic)
for _, v := range slots {
- movingSum += v.Slots + int64(1)
+ movingSum += v.Slots + 1
if magic < movingSum {
// we've reached the shard with the correct id
return v.Shard
diff --git a/storage/fcds/mock/mock.go b/storage/fcds/mock/mock.go
index b4b7503ce4..4562d0fede 100644
--- a/storage/fcds/mock/mock.go
+++ b/storage/fcds/mock/mock.go
@@ -63,8 +63,13 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
}
// Put stores chunk data.
-func (s *Store) Put(ch chunk.Chunk) (err error) {
- return s.m.Put(ch.Address(), ch.Data())
+func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
+ err = s.m.Put(ch.Address(), ch.Data())
+ return 0, err
+}
+
+func (s *Store) NextShard() (shard uint8) {
+ return 0
}
// Delete removes chunk data.
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 4a6a3ff464..7c5172d0a7 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -46,62 +46,148 @@ func Main(m *testing.M) {
// RunAll runs all available tests for a Store implementation.
func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- t.Run("empty", func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- NewStoreFunc: newStoreFunc,
- })
+ //t.Run("empty", func(t *testing.T) {
+ //RunStore(t, &RunStoreOptions{
+ //ChunkCount: *chunksFlag,
+ //NewStoreFunc: newStoreFunc,
+ //})
+ //})
+
+ //t.Run("cleaned", func(t *testing.T) {
+ //RunStore(t, &RunStoreOptions{
+ //ChunkCount: *chunksFlag,
+ //NewStoreFunc: newStoreFunc,
+ //Cleaned: true,
+ //})
+ //})
+
+ //for _, tc := range []struct {
+ //name string
+ //deleteSplit int
+ //}{
+ //{
+ //name: "delete-all",
+ //deleteSplit: 1,
+ //},
+ //{
+ //name: "delete-half",
+ //deleteSplit: 2,
+ //},
+ //{
+ //name: "delete-fifth",
+ //deleteSplit: 5,
+ //},
+ //{
+ //name: "delete-tenth",
+ //deleteSplit: 10,
+ //},
+ //{
+ //name: "delete-percent",
+ //deleteSplit: 100,
+ //},
+ //{
+ //name: "delete-permill",
+ //deleteSplit: 1000,
+ //},
+ //} {
+ //t.Run(tc.name, func(t *testing.T) {
+ //RunStore(t, &RunStoreOptions{
+ //ChunkCount: *chunksFlag,
+ //DeleteSplit: tc.deleteSplit,
+ //NewStoreFunc: newStoreFunc,
+ //})
+ //})
+ //}
+
+ //t.Run("iterator", func(t *testing.T) {
+ //RunIterator(t, newStoreFunc)
+ //})
+
+ t.Run("next shard", func(t *testing.T) {
+ runNextShard(t, newStoreFunc)
})
+}
- t.Run("cleaned", func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- NewStoreFunc: newStoreFunc,
- Cleaned: true,
- })
+// RunNextShard runs the test scenario for NextShard selection
+func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
+ rand.Seed(42424242) //use a constant seed so we can assert the results
+ defer func(s uint8) {
+ fcds.ShardCount = s
+ }(fcds.ShardCount)
+
+ fcds.ShardCount = 4
+
+ db, clean := newStoreFunc(t)
+
+ defer clean()
+
+ chunkCount := 1000
+ chunks := getChunks(chunkCount)
+
+ chunkShards := make(map[string]uint8)
+
+ t.Run("write", func(t *testing.T) {
+ for _, ch := range chunks {
+ if shard, err := db.Put(ch); err != nil {
+ t.Fatal(err)
+ } else {
+ chunkShards[ch.Address().String()] = shard
+ }
+ }
})
+ fmt.Println("done putting")
+
for _, tc := range []struct {
- name string
- deleteSplit int
+ incFreeSlots []int
+ expectNext uint8
}{
- {
- name: "delete-all",
- deleteSplit: 1,
- },
- {
- name: "delete-half",
- deleteSplit: 2,
- },
- {
- name: "delete-fifth",
- deleteSplit: 5,
- },
- {
- name: "delete-tenth",
- deleteSplit: 10,
- },
- {
- name: "delete-percent",
- deleteSplit: 100,
- },
- {
- name: "delete-permill",
- deleteSplit: 1000,
- },
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1},
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1},
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1},
+ {incFreeSlots: []int{0, 0, 0, 11}, expectNext: 1},
+ {incFreeSlots: []int{10, 0, 0, 0}, expectNext: 1},
+ {incFreeSlots: []int{100, 0, 0, 0}, expectNext: 3},
+ {incFreeSlots: []int{0, 200, 0, 0}, expectNext: 1},
+ {incFreeSlots: []int{0, 0, 302, 0}, expectNext: 2},
+ {incFreeSlots: []int{0, 0, 0, 440}, expectNext: 3},
} {
- t.Run(tc.name, func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- DeleteSplit: tc.deleteSplit,
- NewStoreFunc: newStoreFunc,
- })
- })
+ for shard, inc := range tc.incFreeSlots {
+ if inc == 0 {
+ continue
+ }
+ deleteChunks := []string{}
+ for addr, storedOn := range chunkShards {
+ if storedOn == uint8(shard) {
+
+ // delete the chunk to make a free slot on the shard
+ c := new(chunk.Address)
+ err := c.UnmarshalString(addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := db.Delete(*c); err != nil {
+ t.Fatal(err)
+ }
+ deleteChunks = append(deleteChunks, addr)
+ }
+
+ if len(deleteChunks) == inc {
+ break
+ }
+ }
+
+ for _, v := range deleteChunks {
+ delete(chunkShards, v)
+ }
+ }
+
+ shard := db.NextShard()
+ if shard != tc.expectNext {
+ t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext, shard)
+ }
}
- t.Run("iterator", func(t *testing.T) {
- RunIterator(t, newStoreFunc)
- })
}
// RunStoreOptions define parameters for Store test function.
@@ -136,7 +222,7 @@ func RunStore(t *testing.T, o *RunStoreOptions) {
wg.Done()
}()
- if err := db.Put(ch); err != nil {
+ if _, err := db.Put(ch); err != nil {
panic(err)
}
}(ch)
@@ -185,7 +271,7 @@ func RunStore(t *testing.T, o *RunStoreOptions) {
wg.Done()
}()
- if err := db.Put(ch); err != nil {
+ if _, err := db.Put(ch); err != nil {
panic(err)
}
if o.DeleteSplit > 0 && i%o.DeleteSplit == 0 {
@@ -254,7 +340,7 @@ func RunIterator(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fun
chunks := getChunks(chunkCount)
for _, ch := range chunks {
- if err := db.Put(ch); err != nil {
+ if _, err := db.Put(ch); err != nil {
t.Fatal(err)
}
}
From 0744fd4e087ca0efae2a524e4821325beb018fde Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 27 Feb 2020 15:22:45 +0800
Subject: [PATCH 05/89] tests failing
---
storage/fcds/fcds.go | 7 +-
storage/fcds/test/store.go | 132 ++++++++++++++++++-------------------
2 files changed, 70 insertions(+), 69 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 627b97f59f..254279edcc 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -377,19 +377,22 @@ func (s *Store) NextShard() (shard uint8) {
// using a weighted probability
func probabilisticNextShard(slots []ShardSlot) (shard uint8) {
var sum, movingSum int64
+
+ intervalString := ""
for _, v := range slots {
// we need to consider the edge case where no free slots are available
// we still need to potentially insert 1 chunk and so if all shards have
// no empty offsets - they all must be considered equally as having at least
// one empty slot
- fmt.Println("sum", sum, sum+v.Slots+1)
+ intervalString += fmt.Sprintf("[%d %d) ", sum, sum+v.Slots+1)
sum += v.Slots + 1
}
// do some magic
magic := int64(rand.Intn(int(sum)))
- fmt.Println("magic", magic)
+ intervalString = fmt.Sprintf("magic %d, intervals ", magic) + intervalString
+ fmt.Println(intervalString)
for _, v := range slots {
movingSum += v.Slots + 1
if magic < movingSum {
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 7c5172d0a7..7926aef0a5 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -46,62 +46,62 @@ func Main(m *testing.M) {
// RunAll runs all available tests for a Store implementation.
func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- //t.Run("empty", func(t *testing.T) {
- //RunStore(t, &RunStoreOptions{
- //ChunkCount: *chunksFlag,
- //NewStoreFunc: newStoreFunc,
- //})
- //})
-
- //t.Run("cleaned", func(t *testing.T) {
- //RunStore(t, &RunStoreOptions{
- //ChunkCount: *chunksFlag,
- //NewStoreFunc: newStoreFunc,
- //Cleaned: true,
- //})
- //})
-
- //for _, tc := range []struct {
- //name string
- //deleteSplit int
- //}{
- //{
- //name: "delete-all",
- //deleteSplit: 1,
- //},
- //{
- //name: "delete-half",
- //deleteSplit: 2,
- //},
- //{
- //name: "delete-fifth",
- //deleteSplit: 5,
- //},
- //{
- //name: "delete-tenth",
- //deleteSplit: 10,
- //},
- //{
- //name: "delete-percent",
- //deleteSplit: 100,
- //},
- //{
- //name: "delete-permill",
- //deleteSplit: 1000,
- //},
- //} {
- //t.Run(tc.name, func(t *testing.T) {
- //RunStore(t, &RunStoreOptions{
- //ChunkCount: *chunksFlag,
- //DeleteSplit: tc.deleteSplit,
- //NewStoreFunc: newStoreFunc,
- //})
- //})
- //}
-
- //t.Run("iterator", func(t *testing.T) {
- //RunIterator(t, newStoreFunc)
- //})
+ t.Run("empty", func(t *testing.T) {
+ RunStore(t, &RunStoreOptions{
+ ChunkCount: *chunksFlag,
+ NewStoreFunc: newStoreFunc,
+ })
+ })
+
+ t.Run("cleaned", func(t *testing.T) {
+ RunStore(t, &RunStoreOptions{
+ ChunkCount: *chunksFlag,
+ NewStoreFunc: newStoreFunc,
+ Cleaned: true,
+ })
+ })
+
+ for _, tc := range []struct {
+ name string
+ deleteSplit int
+ }{
+ {
+ name: "delete-all",
+ deleteSplit: 1,
+ },
+ {
+ name: "delete-half",
+ deleteSplit: 2,
+ },
+ {
+ name: "delete-fifth",
+ deleteSplit: 5,
+ },
+ {
+ name: "delete-tenth",
+ deleteSplit: 10,
+ },
+ {
+ name: "delete-percent",
+ deleteSplit: 100,
+ },
+ {
+ name: "delete-permill",
+ deleteSplit: 1000,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ RunStore(t, &RunStoreOptions{
+ ChunkCount: *chunksFlag,
+ DeleteSplit: tc.deleteSplit,
+ NewStoreFunc: newStoreFunc,
+ })
+ })
+ }
+
+ t.Run("iterator", func(t *testing.T) {
+ RunIterator(t, newStoreFunc)
+ })
t.Run("next shard", func(t *testing.T) {
runNextShard(t, newStoreFunc)
@@ -136,21 +136,19 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
}
})
- fmt.Println("done putting")
-
for _, tc := range []struct {
incFreeSlots []int
expectNext uint8
}{
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1},
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1},
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1},
- {incFreeSlots: []int{0, 0, 0, 11}, expectNext: 1},
- {incFreeSlots: []int{10, 0, 0, 0}, expectNext: 1},
- {incFreeSlots: []int{100, 0, 0, 0}, expectNext: 3},
- {incFreeSlots: []int{0, 200, 0, 0}, expectNext: 1},
- {incFreeSlots: []int{0, 0, 302, 0}, expectNext: 2},
- {incFreeSlots: []int{0, 0, 0, 440}, expectNext: 3},
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1}, // magic 10, intervals [0 1) [1 17) [17 18) [18 19)
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1}, // magic 23, intervals [0 1) [1 32) [32 33) [33 34)
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1}, // magic 44, intervals [0 1) [1 47) [47 48) [48 49)
+ {incFreeSlots: []int{0, 0, 0, 11}, expectNext: 1}, // magic 14, intervals [0 1) [1 47) [47 48) [48 60)
+ {incFreeSlots: []int{10, 0, 0, 0}, expectNext: 1}, // magic 48, intervals [0 11) [11 57) [57 58) [58 70)
+ {incFreeSlots: []int{100, 0, 0, 0}, expectNext: 3}, // magic 164, intervals [0 111) [111 157) [157 158) [158 170)
+ {incFreeSlots: []int{0, 200, 0, 0}, expectNext: 1}, // magic 305, intervals [0 111) [111 352) [352 353) [353 365)
+ {incFreeSlots: []int{0, 0, 302, 0}, expectNext: 2}, // magic 400, intervals [0 111) [111 352) [352 622) [622 634)
+ {incFreeSlots: []int{0, 0, 0, 440}, expectNext: 3}, // magic 637, intervals [0 111) [111 352) [352 622) [622 874)
} {
for shard, inc := range tc.incFreeSlots {
if inc == 0 {
From f616ffff845f37eeace1d16fdbba113b13a7c875 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 27 Feb 2020 16:46:50 +0800
Subject: [PATCH 06/89] remove t.run
---
storage/fcds/test/store.go | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 7926aef0a5..b1428260f9 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -126,15 +126,13 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
chunkShards := make(map[string]uint8)
- t.Run("write", func(t *testing.T) {
- for _, ch := range chunks {
- if shard, err := db.Put(ch); err != nil {
- t.Fatal(err)
- } else {
- chunkShards[ch.Address().String()] = shard
- }
+ for _, ch := range chunks {
+ if shard, err := db.Put(ch); err != nil {
+ t.Fatal(err)
+ } else {
+ chunkShards[ch.Address().String()] = shard
}
- })
+ }
for _, tc := range []struct {
incFreeSlots []int
@@ -175,6 +173,10 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
}
}
+ if len(deleteChunks) != inc {
+ panic(0)
+ }
+
for _, v := range deleteChunks {
delete(chunkShards, v)
}
@@ -381,6 +383,7 @@ func NewFCDSStore(t *testing.T, path string, metaStore fcds.MetaStore) (s *fcds.
t.Fatal(err)
}
+ fmt.Println("creating new forky", path)
s, err = fcds.New(path, chunk.DefaultSize, metaStore, fcds.WithCache(!*noCacheFlag))
if err != nil {
os.RemoveAll(path)
From c3d92c1e0119f6cd297de55702eab975ab51338c Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 27 Feb 2020 16:57:07 +0800
Subject: [PATCH 07/89] remove println
---
storage/fcds/test/store.go | 1 -
1 file changed, 1 deletion(-)
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index b1428260f9..6e2703ffc9 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -383,7 +383,6 @@ func NewFCDSStore(t *testing.T, path string, metaStore fcds.MetaStore) (s *fcds.
t.Fatal(err)
}
- fmt.Println("creating new forky", path)
s, err = fcds.New(path, chunk.DefaultSize, metaStore, fcds.WithCache(!*noCacheFlag))
if err != nil {
os.RemoveAll(path)
From afad77391fc192a3fbeb748ead6a987792f4cd13 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 27 Feb 2020 19:54:16 +0800
Subject: [PATCH 08/89] fix build
---
storage/localstore/mode_put.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/storage/localstore/mode_put.go b/storage/localstore/mode_put.go
index 815ac4f6bd..27ff393615 100644
--- a/storage/localstore/mode_put.go
+++ b/storage/localstore/mode_put.go
@@ -142,7 +142,7 @@ func (db *DB) put(mode chunk.ModePut, chs ...chunk.Chunk) (exist []bool, err err
}
for _, ch := range chs {
- if err := db.data.Put(ch); err != nil {
+ if _, err := db.data.Put(ch); err != nil {
return nil, err
}
}
From 6ec5f9719c5e6d5f80b93635c11834f64e146215 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 28 Feb 2020 10:23:30 +0800
Subject: [PATCH 09/89] forky: address some PR comments
---
storage/fcds/fcds.go | 24 +++++++++++++++---------
storage/fcds/leveldb/leveldb.go | 1 -
storage/fcds/mock/mock.go | 4 ++--
storage/fcds/mock/mock_test.go | 2 +-
storage/fcds/test/store.go | 18 ++++++++++++++----
5 files changed, 32 insertions(+), 17 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 254279edcc..56bc5c6495 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -39,7 +39,7 @@ type Storer interface {
Has(addr chunk.Address) (yes bool, err error)
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
- NextShard() (shard uint8)
+ NextShard() (shard uint8, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
Close() (err error)
@@ -51,7 +51,10 @@ var _ Storer = new(Store)
var ShardCount = uint8(32)
// ErrStoreClosed is returned if store is already closed.
-var ErrStoreClosed = errors.New("closed store")
+var (
+ ErrStoreClosed = errors.New("closed store")
+ ErrNextShard = errors.New("error getting next shard")
+)
// Store is the main FCDS implementation. It stores chunk data into
// a number of files partitioned by the last byte of the chunk address.
@@ -170,7 +173,10 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
section := make([]byte, s.maxChunkSize)
copy(section, data)
- shard = s.NextShard()
+ shard, err = s.NextShard()
+ if err != nil {
+ return 0, err
+ }
sh := s.shards[shard]
@@ -364,8 +370,8 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
}
// NextShard gets the next shard to write to.
-// uses weighted probability to choose the next shard.
-func (s *Store) NextShard() (shard uint8) {
+// Uses weighted probability to choose the next shard.
+func (s *Store) NextShard() (shard uint8, err error) {
// warning: if multiple writers call this at the same time we might get the same shard again and again
// because the free slot value has not been decremented yet(!)
@@ -375,7 +381,7 @@ func (s *Store) NextShard() (shard uint8) {
// probabilisticNextShard returns a next shard to write to
// using a weighted probability
-func probabilisticNextShard(slots []ShardSlot) (shard uint8) {
+func probabilisticNextShard(slots []ShardSlot) (shard uint8, err error) {
var sum, movingSum int64
intervalString := ""
@@ -397,11 +403,11 @@ func probabilisticNextShard(slots []ShardSlot) (shard uint8) {
movingSum += v.Slots + 1
if magic < movingSum {
// we've reached the shard with the correct id
- return v.Shard
+ return v.Shard, nil
}
}
- //TODO: this is probably wrong
- return 0
+
+ return 0, ErrNextShard
}
type shard struct {
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index a34e213737..b05d43cbb5 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -41,7 +41,6 @@ type MetaStore struct {
// NewMetaStore returns new MetaStore at path.
func NewMetaStore(path string) (s *MetaStore, err error) {
db, err := leveldb.OpenFile(path, &opt.Options{})
-
if err != nil {
return nil, err
}
diff --git a/storage/fcds/mock/mock.go b/storage/fcds/mock/mock.go
index 4562d0fede..b537c2b4b5 100644
--- a/storage/fcds/mock/mock.go
+++ b/storage/fcds/mock/mock.go
@@ -68,8 +68,8 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
-func (s *Store) NextShard() (shard uint8) {
- return 0
+func (s *Store) NextShard() (shard uint8, err error) {
+ return 0, nil
}
// Delete removes chunk data.
diff --git a/storage/fcds/mock/mock_test.go b/storage/fcds/mock/mock_test.go
index 49029c608a..8d8adcf0b2 100644
--- a/storage/fcds/mock/mock_test.go
+++ b/storage/fcds/mock/mock_test.go
@@ -28,7 +28,7 @@ import (
// TestFCDS runs a standard series of tests on mock Store implementation.
func TestFCDS(t *testing.T) {
- test.RunAll(t, func(t *testing.T) (fcds.Storer, func()) {
+ test.RunStd(t, func(t *testing.T) (fcds.Storer, func()) {
return mock.New(
mem.NewGlobalStore().NewNodeStore(
common.BytesToAddress(make([]byte, 20)),
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 6e2703ffc9..3b63380de5 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -43,9 +43,8 @@ func Main(m *testing.M) {
os.Exit(m.Run())
}
-// RunAll runs all available tests for a Store implementation.
-func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
-
+// RunStd runs the standard tests
+func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
t.Run("empty", func(t *testing.T) {
RunStore(t, &RunStoreOptions{
ChunkCount: *chunksFlag,
@@ -103,6 +102,13 @@ func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func()))
RunIterator(t, newStoreFunc)
})
+}
+
+// RunAll runs all available tests for a Store implementation.
+func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
+
+ RunStd(t, newStoreFunc)
+
t.Run("next shard", func(t *testing.T) {
runNextShard(t, newStoreFunc)
})
@@ -182,7 +188,11 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
}
}
- shard := db.NextShard()
+ shard, err := db.NextShard()
+ if err != nil {
+ t.Fatal(err)
+ }
+
if shard != tc.expectNext {
t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext, shard)
}
From 06a2466b9a2e890e3a1443ac20a4f199c8da3240 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 28 Feb 2020 10:32:56 +0800
Subject: [PATCH 10/89] forky/leveldb: instate free slot serialisation
---
storage/fcds/leveldb/leveldb.go | 40 ++++++++++++++++++++++++---------
1 file changed, 30 insertions(+), 10 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index b05d43cbb5..bd0914bebd 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -45,12 +45,32 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
return nil, err
}
- // todo: try to get and deserialize the free map from the persisted value on disk
-
- return &MetaStore{
+ ms := &MetaStore{
db: db,
free: make(map[uint8]int64),
- }, err
+ }
+
+ data, err := s.db.Get(freeCountKey(addr), nil)
+ if err != nil {
+ // key doesn't exist since this is a new db
+ // write an empty set into it
+ b, err := encodeFreeSlots(ms.free)
+ if err != nil {
+ return nil, err
+ }
+
+ err = s.db.Put(freeCountKey(), b)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ ms.free, err = decodeFreeSlots(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return ms, err
}
// Get returns chunk meta information.
@@ -222,28 +242,28 @@ func freeCountKey() (key []byte) {
return []byte{freeCount}
}
-func encodeFreeSlots(m map[uint8]int64) []byte {
+func encodeFreeSlots(m map[uint8]int64) ([]byte, error) {
b := new(bytes.Buffer)
e := gob.NewEncoder(b)
err := e.Encode(m)
if err != nil {
- panic(err)
+ return nil, err
}
- return b.Bytes()
+ return b.Bytes(), nil
}
-func decodeFreeSlots(b []byte) map[uint8]int64 {
+func decodeFreeSlots(b []byte) (map[uint8]int64, error) {
buf := bytes.NewBuffer(b)
var decodedMap map[uint8]int64
d := gob.NewDecoder(buf)
err := d.Decode(&decodedMap)
if err != nil {
- panic(err)
+ return nil, err
}
- return decodedMap
+ return decodedMap, nil
}
From a74470ee80d50f96c9526cda988b433146eae232 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 28 Feb 2020 10:40:34 +0800
Subject: [PATCH 11/89] fix build
---
storage/fcds/leveldb/leveldb.go | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index bd0914bebd..fa3e1f77ef 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -50,7 +50,7 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
free: make(map[uint8]int64),
}
- data, err := s.db.Get(freeCountKey(addr), nil)
+ data, err := s.db.Get(freeCountKey(), nil)
if err != nil {
// key doesn't exist since this is a new db
// write an empty set into it
@@ -59,7 +59,7 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
return nil, err
}
- err = s.db.Put(freeCountKey(), b)
+ err = s.db.Put(freeCountKey(), b, nil)
if err != nil {
return nil, err
}
@@ -116,8 +116,11 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
-
- batch.Put(freeCountKey(), encodeFreeSlots(s.free))
+ b, err := encodeFreeSlots(s.free)
+ if err != nil {
+ return err
+ }
+ batch.Put(freeCountKey(), b)
batch.Delete(chunkKey(addr))
err = s.db.Write(batch, nil)
From c5cb5b79cd46757a16b4a4c9252cdc6490f38ed6 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 28 Feb 2020 11:07:07 +0800
Subject: [PATCH 12/89] wip persistence test
---
storage/fcds/leveldb/leveldb.go | 6 +++---
storage/fcds/leveldb/leveldb_test.go | 10 ++++++++++
2 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index fa3e1f77ef..088a439995 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -50,7 +50,7 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
free: make(map[uint8]int64),
}
- data, err := s.db.Get(freeCountKey(), nil)
+ data, err := ms.db.Get(freeCountKey(), nil)
if err != nil {
// key doesn't exist since this is a new db
// write an empty set into it
@@ -59,7 +59,7 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
return nil, err
}
- err = s.db.Put(freeCountKey(), b, nil)
+ err = ms.db.Put(freeCountKey(), b, nil)
if err != nil {
return nil, err
}
@@ -70,7 +70,7 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
return nil, err
}
- return ms, err
+ return ms, nil
}
// Get returns chunk meta information.
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index c960c2df61..c6ff1738a5 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -43,3 +43,13 @@ func TestFCDS(t *testing.T) {
return test.NewFCDSStore(t, path, metaStore)
})
}
+
+// TestFreeSlotCounter tests that the free slot counter gets persisted
+// and properly loaded on existing store restart
+func TestFreeSlotCounter(t *testing.T) {
+ metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+}
From 0b24c4b54fc812039f176c79383a66dc6fb23625 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 28 Feb 2020 11:18:19 +0800
Subject: [PATCH 13/89] wip test
---
storage/fcds/leveldb/leveldb_test.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index c6ff1738a5..d47a45a81c 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -47,6 +47,11 @@ func TestFCDS(t *testing.T) {
// TestFreeSlotCounter tests that the free slot counter gets persisted
// and properly loaded on existing store restart
func TestFreeSlotCounter(t *testing.T) {
+ path, err := ioutil.TempDir("", "swarm-fcds-")
+ if err != nil {
+ t.Fatal(err)
+ }
+
metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
if err != nil {
t.Fatal(err)
From 9ca502555d93e9d13cc837e62035b466df3196c3 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 2 Mar 2020 11:58:20 +0800
Subject: [PATCH 14/89] add sorting unit test
---
storage/fcds/meta_test.go | 77 +++++++++++++++++++++++++++++++++++++++
1 file changed, 77 insertions(+)
create mode 100644 storage/fcds/meta_test.go
diff --git a/storage/fcds/meta_test.go b/storage/fcds/meta_test.go
new file mode 100644
index 0000000000..1fae30e270
--- /dev/null
+++ b/storage/fcds/meta_test.go
@@ -0,0 +1,77 @@
+// Copyright 2020 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package fcds
+
+import (
+ "sort"
+ "testing"
+)
+
+func TestShardSlotSort(t *testing.T) {
+
+ for _, tc := range []struct {
+ freeSlots []int // how many free slots in which shard (slice index denotes shard id, value denotes number of free slots.
+ expectOrder []int // the order of bins expected to show up (slice value denotes shard id).
+ }{
+ {
+ freeSlots: []int{0, 0, 0, 0},
+ expectOrder: []int{0, 1, 2, 3},
+ },
+ {
+ freeSlots: []int{0, 1, 0, 0},
+ expectOrder: []int{1, 0, 2, 3},
+ },
+ {
+ freeSlots: []int{0, 0, 2, 0},
+ expectOrder: []int{2, 0, 1, 3},
+ },
+ {
+ freeSlots: []int{0, 0, 0, 1},
+ expectOrder: []int{3, 0, 1, 2},
+ },
+ {
+ freeSlots: []int{1, 1, 0, 0},
+ expectOrder: []int{0, 1, 2, 3},
+ },
+ {
+ freeSlots: []int{1, 0, 0, 1},
+ expectOrder: []int{0, 3, 1, 2},
+ },
+ {
+ freeSlots: []int{1, 2, 0, 0},
+ expectOrder: []int{1, 0, 2, 3},
+ },
+ {
+ freeSlots: []int{0, 3, 2, 1},
+ expectOrder: []int{1, 2, 3, 0},
+ },
+ } {
+ s := make([]ShardSlot, len(tc.freeSlots))
+
+ for i, v := range tc.freeSlots {
+ s[i] = ShardSlot{Shard: uint8(i), Slots: int64(v)}
+ }
+ sort.Sort(bySlots(s))
+
+ for i, v := range s {
+ if v.Shard != uint8(tc.expectOrder[i]) {
+ t.Fatalf("expected shard index %d to be %d but got %d", i, tc.expectOrder[i], v.Shard)
+ }
+ }
+
+ }
+}
From 699940cdadb1d07a5d1861a7bffc88ebdd380b85 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 2 Mar 2020 12:02:35 +0800
Subject: [PATCH 15/89] add test desc
---
storage/fcds/meta_test.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/storage/fcds/meta_test.go b/storage/fcds/meta_test.go
index 1fae30e270..59e827d771 100644
--- a/storage/fcds/meta_test.go
+++ b/storage/fcds/meta_test.go
@@ -21,6 +21,7 @@ import (
"testing"
)
+// TestShardSlotSort is a unit test to ensure correct sorting of a slice of ShartSlot
func TestShardSlotSort(t *testing.T) {
for _, tc := range []struct {
From 9f2806c33e0ff277b7e44bc1a2afdb393d5944cc Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 2 Mar 2020 12:38:19 +0800
Subject: [PATCH 16/89] forky: add free counter persistence e2e test, shard
selection logic
---
storage/fcds/fcds.go | 14 +++++-
storage/fcds/leveldb/leveldb.go | 12 +++--
storage/fcds/leveldb/leveldb_test.go | 69 +++++++++++++++++++++++++++-
3 files changed, 89 insertions(+), 6 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 56bc5c6495..4a7a0f38f8 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -23,6 +23,7 @@ import (
"math/rand"
"os"
"path/filepath"
+ "sort"
"sync"
"time"
@@ -376,7 +377,18 @@ func (s *Store) NextShard() (shard uint8, err error) {
// because the free slot value has not been decremented yet(!)
slots := s.meta.ShardSlots()
- return probabilisticNextShard(slots)
+ sort.Sort(bySlots(slots))
+
+ // if the first shard has free slots - return it
+ // otherwise, return a random shard
+
+ if slots[0].Slots > 0 {
+ return slots[0].Shard, nil
+ }
+
+ shard = uint8(rand.Intn(len(slots)))
+
+ return shard, nil
}
// probabilisticNextShard returns a next shard to write to
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 088a439995..31a890d9a3 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -56,18 +56,22 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
// write an empty set into it
b, err := encodeFreeSlots(ms.free)
if err != nil {
+ panic(err)
return nil, err
}
err = ms.db.Put(freeCountKey(), b, nil)
if err != nil {
+ panic(err)
return nil, err
}
- }
+ } else {
- ms.free, err = decodeFreeSlots(data)
- if err != nil {
- return nil, err
+ ms.free, err = decodeFreeSlots(data)
+ if err != nil {
+ panic(err)
+ return nil, err
+ }
}
return ms, nil
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index d47a45a81c..1e2b78b57c 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -18,9 +18,12 @@ package leveldb_test
import (
"io/ioutil"
+ "os"
"path/filepath"
"testing"
+ "github.com/ethersphere/swarm/chunk"
+ chunktesting "github.com/ethersphere/swarm/chunk/testing"
"github.com/ethersphere/swarm/storage/fcds"
"github.com/ethersphere/swarm/storage/fcds/leveldb"
"github.com/ethersphere/swarm/storage/fcds/test"
@@ -52,9 +55,73 @@ func TestFreeSlotCounter(t *testing.T) {
t.Fatal(err)
}
- metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
+ metaPath := filepath.Join(path, "meta")
+
+ metaStore, err := leveldb.NewMetaStore(metaPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ store, err := fcds.New(path, chunk.DefaultSize, metaStore, fcds.WithCache(false))
+ if err != nil {
+ os.RemoveAll(path)
+ t.Fatal(err)
+ }
+
+ defer func() {
+ store.Close()
+ os.RemoveAll(path)
+ }()
+
+ // put some chunks, delete some chunks, find the free slots
+ // then close the store, init a new one on the same dir
+ // then check free slots again and compare
+ numChunks := 100
+ deleteChunks := 10
+ chunks := make([]chunk.Chunk, numChunks)
+
+ for i := 0; i < numChunks; i++ {
+ chunks[i] = chunktesting.GenerateTestRandomChunk()
+ _, err := store.Put(chunks[i])
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ for i := 0; i < deleteChunks; i++ {
+ err := store.Delete(chunks[i].Address())
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ freeSlots := metaStore.ShardSlots()
+
+ store.Close()
+ metaStore.Close()
+
+ metaStore2, err := leveldb.NewMetaStore(metaPath)
if err != nil {
t.Fatal(err)
}
+ defer func() {
+ metaStore2.Close()
+ os.RemoveAll(metaPath)
+ }()
+ freeSlots2 := metaStore.ShardSlots()
+ count := 0
+ for i, v := range freeSlots {
+ count++
+ if freeSlots2[i].Shard != v.Shard {
+ t.Fatalf("expected shard %d to be %d but got %d", i, v.Shard, freeSlots[2].Shard)
+ }
+ if freeSlots2[i].Slots != v.Slots {
+ t.Fatalf("expected shard %d to have %d free slots but got %d", i, v.Slots, freeSlots[2].Slots)
+ }
+ }
+
+ if uint8(count) != fcds.ShardCount {
+ t.Fatalf("did not process enough shards: got %d but expected %d", count, fcds.ShardCount)
+ }
}
From b0a2954c797eb048792a09572f8bc41dddb3e7eb Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 2 Mar 2020 13:51:21 +0800
Subject: [PATCH 17/89] pick the smallest shard on no free slots
---
storage/fcds/fcds.go | 26 ++++++++++++++++++++++++--
1 file changed, 24 insertions(+), 2 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 4a7a0f38f8..5dd7521fed 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -370,6 +370,21 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
return has
}
+// shardSizes returns a ShardSlot slice in which Slots signify how many
+// taken slots are there in the shard
+func (s *Store) shardSizes() (slots []ShardSlot) {
+ slots := make([]ShardSlot, len(s.shards))
+ for i, sh := range s.shards {
+ fs, err := sh.f.Stat()
+ if err != nil {
+ return 0, err
+ }
+
+ slots[i] = ShardSlot{Shard: uint8(i), Slots: fs.Size() / s.maxChunkSize}
+ }
+ return slots
+}
+
// NextShard gets the next shard to write to.
// Uses weighted probability to choose the next shard.
func (s *Store) NextShard() (shard uint8, err error) {
@@ -380,13 +395,20 @@ func (s *Store) NextShard() (shard uint8, err error) {
sort.Sort(bySlots(slots))
// if the first shard has free slots - return it
- // otherwise, return a random shard
+ // otherwise, just balance them out
if slots[0].Slots > 0 {
return slots[0].Shard, nil
}
- shard = uint8(rand.Intn(len(slots)))
+ // each element has in Slots the number of _taken_ slots
+ slots = s.shardSizes()
+
+ // sorting them will make the first element the largest shard and the last
+ // element the smallest shard; pick the smallest
+ sort.Sort(bySlots(slots))
+
+ shard = slots[len(slots)-1]
return shard, nil
}
From 0a72e7c292d682d56023a73464d5d6faf5f5488f Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 2 Mar 2020 13:56:29 +0800
Subject: [PATCH 18/89] fix build
---
storage/fcds/fcds.go | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 5dd7521fed..684ec21742 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -372,17 +372,17 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
// shardSizes returns a ShardSlot slice in which Slots signify how many
// taken slots are there in the shard
-func (s *Store) shardSizes() (slots []ShardSlot) {
- slots := make([]ShardSlot, len(s.shards))
+func (s *Store) shardSizes() (slots []ShardSlot, err error) {
+ slots = make([]ShardSlot, len(s.shards))
for i, sh := range s.shards {
fs, err := sh.f.Stat()
if err != nil {
- return 0, err
+ return nil, err
}
- slots[i] = ShardSlot{Shard: uint8(i), Slots: fs.Size() / s.maxChunkSize}
+ slots[i] = ShardSlot{Shard: uint8(i), Slots: fs.Size() / int64(s.maxChunkSize)}
}
- return slots
+ return slots, nil
}
// NextShard gets the next shard to write to.
@@ -402,13 +402,16 @@ func (s *Store) NextShard() (shard uint8, err error) {
}
// each element has in Slots the number of _taken_ slots
- slots = s.shardSizes()
+ slots, err = s.shardSizes()
+ if err != nil {
+ return 0, err
+ }
// sorting them will make the first element the largest shard and the last
// element the smallest shard; pick the smallest
sort.Sort(bySlots(slots))
- shard = slots[len(slots)-1]
+ shard = slots[len(slots)-1].Shard
return shard, nil
}
From c8fb4d7c8a26f2be9fba16912200b32c308faf28 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 2 Mar 2020 15:25:53 +0800
Subject: [PATCH 19/89] next shard unit test
---
storage/fcds/fcds.go | 16 +++
storage/fcds/test/store.go | 244 +++++++++++++++++++++++++++++--------
2 files changed, 206 insertions(+), 54 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 684ec21742..c00369e3b4 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -41,6 +41,7 @@ type Storer interface {
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
NextShard() (shard uint8, err error)
+ ShardSize() (slots []ShardSlot, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
Close() (err error)
@@ -111,6 +112,20 @@ func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s
return s, nil
}
+func (s *Store) ShardSize() (slots []ShardSlot, err error) {
+ slots = make([]ShardSlot, len(s.shards))
+ for i, sh := range s.shards {
+ fs, err := sh.f.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ slots[i] = ShardSlot{Shard: uint8(i), Slots: fs.Size()}
+ }
+
+ return slots, nil
+}
+
// Get returns a chunk with data.
func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
if err := s.protect(); err != nil {
@@ -398,6 +413,7 @@ func (s *Store) NextShard() (shard uint8, err error) {
// otherwise, just balance them out
if slots[0].Slots > 0 {
+ fmt.Println("retuning shard with free slot", slots[0].Shard, slots[0].Slots)
return slots[0].Shard, nil
}
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 3b63380de5..6235d99f74 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -45,63 +45,199 @@ func Main(m *testing.M) {
// RunStd runs the standard tests
func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- t.Run("empty", func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- NewStoreFunc: newStoreFunc,
- })
+ //t.Run("empty", func(t *testing.T) {
+ //RunStore(t, &RunStoreOptions{
+ //ChunkCount: *chunksFlag,
+ //NewStoreFunc: newStoreFunc,
+ //})
+ //})
+
+ //t.Run("cleaned", func(t *testing.T) {
+ //RunStore(t, &RunStoreOptions{
+ //ChunkCount: *chunksFlag,
+ //NewStoreFunc: newStoreFunc,
+ //Cleaned: true,
+ //})
+ //})
+
+ //for _, tc := range []struct {
+ //name string
+ //deleteSplit int
+ //}{
+ //{
+ //name: "delete-all",
+ //deleteSplit: 1,
+ //},
+ //{
+ //name: "delete-half",
+ //deleteSplit: 2,
+ //},
+ //{
+ //name: "delete-fifth",
+ //deleteSplit: 5,
+ //},
+ //{
+ //name: "delete-tenth",
+ //deleteSplit: 10,
+ //},
+ //{
+ //name: "delete-percent",
+ //deleteSplit: 100,
+ //},
+ //{
+ //name: "delete-permill",
+ //deleteSplit: 1000,
+ //},
+ //} {
+ //t.Run(tc.name, func(t *testing.T) {
+ //RunStore(t, &RunStoreOptions{
+ //ChunkCount: *chunksFlag,
+ //DeleteSplit: tc.deleteSplit,
+ //NewStoreFunc: newStoreFunc,
+ //})
+ //})
+ //}
+
+ //t.Run("iterator", func(t *testing.T) {
+ //RunIterator(t, newStoreFunc)
+ //})
+
+ t.Run("no grow", func(t *testing.T) {
+ RunNoGrow(t, newStoreFunc)
})
- t.Run("cleaned", func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- NewStoreFunc: newStoreFunc,
- Cleaned: true,
- })
- })
+}
- for _, tc := range []struct {
- name string
- deleteSplit int
- }{
- {
- name: "delete-all",
- deleteSplit: 1,
- },
- {
- name: "delete-half",
- deleteSplit: 2,
- },
- {
- name: "delete-fifth",
- deleteSplit: 5,
- },
- {
- name: "delete-tenth",
- deleteSplit: 10,
- },
- {
- name: "delete-percent",
- deleteSplit: 100,
- },
- {
- name: "delete-permill",
- deleteSplit: 1000,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- RunStore(t, &RunStoreOptions{
- ChunkCount: *chunksFlag,
- DeleteSplit: tc.deleteSplit,
- NewStoreFunc: newStoreFunc,
- })
- })
+func RunNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
+ runNoGrow(t, newStoreFunc)
+}
+
+// RunNextShard runs the test scenario for NextShard selection
+func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
+ defer func(s uint8) {
+ fcds.ShardCount = s
+ }(fcds.ShardCount)
+
+ fcds.ShardCount = 4
+
+ db, clean := newStoreFunc(t)
+
+ defer clean()
+
+ chunkCount := 1000
+ chunks := getChunks(chunkCount)
+
+ chunkShards := make(map[string]uint8)
+
+ for _, ch := range chunks {
+ if shard, err := db.Put(ch); err != nil {
+ t.Fatal(err)
+ } else {
+ chunkShards[ch.Address().String()] = shard
+ }
}
- t.Run("iterator", func(t *testing.T) {
- RunIterator(t, newStoreFunc)
- })
+ // delete 4,3,2,1 chunks from shards 0,1,2,3
+ del := 4
+ deleteChunks := []string{}
+
+ for i := uint8(0); i < fcds.ShardCount; i++ {
+ d := del
+ for addr, storedOn := range chunkShards {
+ if storedOn == i {
+
+ // delete the chunk to make a free slot on the shard
+ c := new(chunk.Address)
+ err := c.UnmarshalString(addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := db.Delete(*c); err != nil {
+ t.Fatal(err)
+ }
+ deleteChunks = append(deleteChunks, addr)
+ if len(deleteChunks) == d {
+ break
+ }
+ }
+ }
+ for _, v := range deleteChunks {
+ delete(chunkShards, v)
+ }
+ deleteChunks = []string{}
+
+ del--
+ }
+
+ ins := 4 + 3 + 2 + 1
+ // insert 4,3,2,1 chunks and expect the shards as next shards inserted into
+ // in the following order
+ order := []uint8{
+ // comment denotes free slots _after_ PUT
+ 0, //3,3,2,1
+ 0, //2,3,2,1
+ 1, //2,2,2,1
+ 0, //1,2,2,1
+ 1, //1,1,2,1
+ 2, //1,1,1,1
+ 0, //0,1,1,1
+ 1, //0,0,1,1
+ 2, //0,0,0,1
+ 3, //0,0,0,0
+ }
+ for i := 0; i < ins; i++ {
+ cc := chunktesting.GenerateTestRandomChunk()
+ if shard, err := db.Put(cc); err != nil {
+ t.Fatal(err)
+ } else {
+ if shard != order[i] {
+ t.Fatalf("expected %d chunk to be on shard %d but got %d", i, order[i], shard)
+ }
+ chunkShards[cc.Address().String()] = shard
+ }
+ }
+
+ slots, err := db.ShardSize()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sum := 0
+ for _, v := range slots {
+ sum += int(v.Slots)
+ }
+
+ if sum != 4096*1000 {
+ t.Fatal(sum)
+ }
+
+ // now for each new chunk, we should first check all shard
+ // sizes, and locate the smallest shard
+ // for each Put we should get that shard as next
+
+ insNew := 1000
+ for i := 0; i < insNew; i++ {
+ slots, err := db.ShardSize()
+ if err != nil {
+ t.Fatal(err)
+ }
+ minSize, minSlot := slots[0].Slots, uint8(0)
+ for i, v := range slots {
+ if v.Slots < minSize {
+ minSize = v.Slots
+ minSlot = uint8(i)
+ }
+ }
+ cc := chunktesting.GenerateTestRandomChunk()
+ if shard, err := db.Put(cc); err != nil {
+ t.Fatal(err)
+ } else {
+ if shard != minSlot {
+ t.Fatalf("next slot expected to be %d but got %d. chunk number %d", minSlot, shard, i)
+ }
+ }
+ }
}
// RunAll runs all available tests for a Store implementation.
@@ -109,9 +245,9 @@ func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func()))
RunStd(t, newStoreFunc)
- t.Run("next shard", func(t *testing.T) {
- runNextShard(t, newStoreFunc)
- })
+ //t.Run("next shard", func(t *testing.T) {
+ //runNextShard(t, newStoreFunc)
+ //})
}
// RunNextShard runs the test scenario for NextShard selection
From f8187291f390c702891bf4461d442daff6e4b5a4 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 2 Mar 2020 18:00:48 +0800
Subject: [PATCH 20/89] cleanup, fix free slots in leveldb implementation
---
storage/fcds/fcds.go | 23 +++--------------
storage/fcds/leveldb/leveldb.go | 25 +++++++++++++++---
storage/fcds/leveldb/leveldb_test.go | 2 +-
storage/fcds/test/store.go | 38 +++++++++++++++-------------
4 files changed, 47 insertions(+), 41 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index c00369e3b4..bdb08d93ae 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -27,6 +27,7 @@ import (
"sync"
"time"
+ "github.com/davecgh/go-spew/spew"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/chunk"
@@ -385,21 +386,6 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
return has
}
-// shardSizes returns a ShardSlot slice in which Slots signify how many
-// taken slots are there in the shard
-func (s *Store) shardSizes() (slots []ShardSlot, err error) {
- slots = make([]ShardSlot, len(s.shards))
- for i, sh := range s.shards {
- fs, err := sh.f.Stat()
- if err != nil {
- return nil, err
- }
-
- slots[i] = ShardSlot{Shard: uint8(i), Slots: fs.Size() / int64(s.maxChunkSize)}
- }
- return slots, nil
-}
-
// NextShard gets the next shard to write to.
// Uses weighted probability to choose the next shard.
func (s *Store) NextShard() (shard uint8, err error) {
@@ -409,16 +395,16 @@ func (s *Store) NextShard() (shard uint8, err error) {
slots := s.meta.ShardSlots()
sort.Sort(bySlots(slots))
+ spew.Dump(slots)
+
// if the first shard has free slots - return it
// otherwise, just balance them out
-
if slots[0].Slots > 0 {
- fmt.Println("retuning shard with free slot", slots[0].Shard, slots[0].Slots)
return slots[0].Shard, nil
}
// each element has in Slots the number of _taken_ slots
- slots, err = s.shardSizes()
+ slots, err = s.ShardSize()
if err != nil {
return 0, err
}
@@ -426,7 +412,6 @@ func (s *Store) NextShard() (shard uint8, err error) {
// sorting them will make the first element the largest shard and the last
// element the smallest shard; pick the smallest
sort.Sort(bySlots(slots))
-
shard = slots[len(slots)-1].Shard
return shard, nil
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 31a890d9a3..359c2315bb 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -106,7 +106,26 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
return err
}
batch.Put(chunkKey(addr), meta)
- return s.db.Write(batch, nil)
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ zero := s.free[m.Shard] == 0
+ if !zero {
+ s.free[m.Shard]--
+ }
+ b, err := encodeFreeSlots(s.free)
+ if err != nil {
+ return err
+ }
+ batch.Put(freeCountKey(), b)
+
+ err = s.db.Write(batch, nil)
+ if err != nil {
+ if !zero {
+ s.free[m.Shard]++
+ }
+ return err
+ }
+ return nil
}
// Remove removes chunk meta information from the shard.
@@ -120,6 +139,7 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
+ s.free[shard]++
b, err := encodeFreeSlots(s.free)
if err != nil {
return err
@@ -129,11 +149,10 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
err = s.db.Write(batch, nil)
if err != nil {
+ s.free[shard]--
return err
}
- s.free[shard]++
-
return nil
}
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 1e2b78b57c..4dffa59fa2 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -49,7 +49,7 @@ func TestFCDS(t *testing.T) {
// TestFreeSlotCounter tests that the free slot counter gets persisted
// and properly loaded on existing store restart
-func TestFreeSlotCounter(t *testing.T) {
+func xTestFreeSlotCounter(t *testing.T) {
path, err := ioutil.TempDir("", "swarm-fcds-")
if err != nil {
t.Fatal(err)
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 6235d99f74..f5e03ba33f 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -103,15 +103,11 @@ func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func()))
//})
t.Run("no grow", func(t *testing.T) {
- RunNoGrow(t, newStoreFunc)
+ runNoGrow(t, newStoreFunc)
})
}
-func RunNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- runNoGrow(t, newStoreFunc)
-}
-
// RunNextShard runs the test scenario for NextShard selection
func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
defer func(s uint8) {
@@ -169,21 +165,26 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
del--
}
+ fmt.Println("done deleting")
+ fmt.Println("done deleting")
+ fmt.Println("done deleting")
+ fmt.Println("done deleting")
+
ins := 4 + 3 + 2 + 1
// insert 4,3,2,1 chunks and expect the shards as next shards inserted into
// in the following order
order := []uint8{
// comment denotes free slots _after_ PUT
- 0, //3,3,2,1
- 0, //2,3,2,1
- 1, //2,2,2,1
- 0, //1,2,2,1
- 1, //1,1,2,1
- 2, //1,1,1,1
- 0, //0,1,1,1
- 1, //0,0,1,1
- 2, //0,0,0,1
- 3, //0,0,0,0
+ 0, //4,3,2,1 -> 3,3,2,1
+ 0, //3,3,2,1 -> 2,3,2,1
+ 1, //2,3,2,1 -> 2,2,2,1
+ 0, //2,2,2,1 -> 1,2,2,1
+ 1, //1,2,2,1 -> 1,1,2,1
+ 2, //1,1,2,1 -> 1,1,1,1
+ 0, //1,1,1,1 -> 0,1,1,1
+ 1, //0,1,1,1 -> 0,0,1,1
+ 2, //0,0,1,1 -> 0,0,0,1
+ 3, //0,0,0,1 -> 0,0,0,0
}
for i := 0; i < ins; i++ {
cc := chunktesting.GenerateTestRandomChunk()
@@ -191,7 +192,7 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
t.Fatal(err)
} else {
if shard != order[i] {
- t.Fatalf("expected %d chunk to be on shard %d but got %d", i, order[i], shard)
+ t.Fatalf("expected chunk %d to be on shard %d but got %d", i, order[i], shard)
}
chunkShards[cc.Address().String()] = shard
}
@@ -215,7 +216,7 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
// sizes, and locate the smallest shard
// for each Put we should get that shard as next
- insNew := 1000
+ insNew := 10000
for i := 0; i < insNew; i++ {
slots, err := db.ShardSize()
if err != nil {
@@ -224,7 +225,8 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
minSize, minSlot := slots[0].Slots, uint8(0)
for i, v := range slots {
- if v.Slots < minSize {
+ // take the _last_ minimum
+ if v.Slots <= minSize {
minSize = v.Slots
minSlot = uint8(i)
}
From aa2b1b0b9b0e509d8047195bf1fc041462c4d5e3 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 2 Mar 2020 18:17:25 +0800
Subject: [PATCH 21/89] fix build
---
storage/fcds/mock/mock.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/storage/fcds/mock/mock.go b/storage/fcds/mock/mock.go
index b537c2b4b5..4b491d7ed0 100644
--- a/storage/fcds/mock/mock.go
+++ b/storage/fcds/mock/mock.go
@@ -123,6 +123,11 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return nil
}
+func (s *Store) ShardSize() (slots []fcds.ShardSlot, err error) {
+ i, err := s.Count()
+ return []fcds.ShardSlot{fcds.ShardSlot{Shard: 0, Slots: int64(i)}}, err
+}
+
// Close doesn't do anything.
// It exists to implement fcdb.MetaStore interface.
func (s *Store) Close() error {
From 50db06fdf3846bef9d077ea2a861e2cbda503887 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 13:03:34 +0800
Subject: [PATCH 22/89] add metrics for debugging
---
storage/fcds/fcds.go | 3 ---
storage/localstore/gc.go | 3 +++
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index bdb08d93ae..76625292b5 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -27,7 +27,6 @@ import (
"sync"
"time"
- "github.com/davecgh/go-spew/spew"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/chunk"
@@ -395,8 +394,6 @@ func (s *Store) NextShard() (shard uint8, err error) {
slots := s.meta.ShardSlots()
sort.Sort(bySlots(slots))
- spew.Dump(slots)
-
// if the first shard has free slots - return it
// otherwise, just balance them out
if slots[0].Slots > 0 {
diff --git a/storage/localstore/gc.go b/storage/localstore/gc.go
index 7e46437e62..3977918259 100644
--- a/storage/localstore/gc.go
+++ b/storage/localstore/gc.go
@@ -251,10 +251,13 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
}
new = gcSize - c
}
+ metrics.GetOrRegisterGauge("localstore.gcsize.index", nil).Update(gcSize)
db.gcSize.PutInBatch(batch, new)
// trigger garbage collection if we reached the capacity
if new >= db.capacity {
+ metrics.GetOrRegisterCounter("localstore.trigger-gc-on-inc", nil).Inc(1)
+
db.triggerGarbageCollection()
}
return nil
From e7bcded38037527e9c12d1985b0884627ed30581 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 13:08:24 +0800
Subject: [PATCH 23/89] fix build
---
storage/localstore/gc.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/storage/localstore/gc.go b/storage/localstore/gc.go
index 3977918259..489a96d71f 100644
--- a/storage/localstore/gc.go
+++ b/storage/localstore/gc.go
@@ -251,7 +251,7 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
}
new = gcSize - c
}
- metrics.GetOrRegisterGauge("localstore.gcsize.index", nil).Update(gcSize)
+ metrics.GetOrRegisterGauge("localstore.gcsize.index", nil).Update(int64(gcSize))
db.gcSize.PutInBatch(batch, new)
// trigger garbage collection if we reached the capacity
From b8c1fef31f05c10a59594fabb5ceeb8b34c1eab1 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 17:31:12 +0800
Subject: [PATCH 24/89] pessimistic locking
---
storage/fcds/fcds.go | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 76625292b5..6ffd9a5e46 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -70,6 +70,7 @@ type Store struct {
maxChunkSize int // maximal chunk data size
quit chan struct{} // quit disables all operations after Close is called
quitOnce sync.Once // protects quit channel from multiple Close calls
+ mtx sync.Mutex
}
// Option is an optional argument passed to New.
@@ -133,6 +134,9 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
}
defer s.unprotect()
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
m, err := s.getMeta(addr)
if err != nil {
return nil, err
@@ -177,7 +181,8 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
defer s.unprotect()
-
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
addr := ch.Address()
data := ch.Data()
@@ -267,7 +272,8 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
return err
}
defer s.unprotect()
-
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
m, err := s.getMeta(addr)
if err != nil {
return err
@@ -296,7 +302,8 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return err
}
defer s.unprotect()
-
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
for _, sh := range s.shards {
sh.mu.Lock()
}
From fe9c14bdfd5dcd3ccc8c2c58e34e15fb40905b16 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 17:44:55 +0800
Subject: [PATCH 25/89] Revert "pessimistic locking"
This reverts commit b8c1fef31f05c10a59594fabb5ceeb8b34c1eab1.
---
storage/fcds/fcds.go | 13 +++----------
1 file changed, 3 insertions(+), 10 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 6ffd9a5e46..76625292b5 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -70,7 +70,6 @@ type Store struct {
maxChunkSize int // maximal chunk data size
quit chan struct{} // quit disables all operations after Close is called
quitOnce sync.Once // protects quit channel from multiple Close calls
- mtx sync.Mutex
}
// Option is an optional argument passed to New.
@@ -134,9 +133,6 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
}
defer s.unprotect()
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
m, err := s.getMeta(addr)
if err != nil {
return nil, err
@@ -181,8 +177,7 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
defer s.unprotect()
- s.mtx.Lock()
- defer s.mtx.Unlock()
+
addr := ch.Address()
data := ch.Data()
@@ -272,8 +267,7 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
return err
}
defer s.unprotect()
- s.mtx.Lock()
- defer s.mtx.Unlock()
+
m, err := s.getMeta(addr)
if err != nil {
return err
@@ -302,8 +296,7 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return err
}
defer s.unprotect()
- s.mtx.Lock()
- defer s.mtx.Unlock()
+
for _, sh := range s.shards {
sh.mu.Lock()
}
From bebdd450d122966828d505e5feb5c7b3372b588c Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 18:02:09 +0800
Subject: [PATCH 26/89] reenable tests
---
storage/fcds/leveldb/leveldb_test.go | 2 +-
storage/fcds/test/store.go | 106 +++++++++++++--------------
2 files changed, 54 insertions(+), 54 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 4dffa59fa2..1e2b78b57c 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -49,7 +49,7 @@ func TestFCDS(t *testing.T) {
// TestFreeSlotCounter tests that the free slot counter gets persisted
// and properly loaded on existing store restart
-func xTestFreeSlotCounter(t *testing.T) {
+func TestFreeSlotCounter(t *testing.T) {
path, err := ioutil.TempDir("", "swarm-fcds-")
if err != nil {
t.Fatal(err)
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index f5e03ba33f..47fc35b372 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -45,62 +45,62 @@ func Main(m *testing.M) {
// RunStd runs the standard tests
func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- //t.Run("empty", func(t *testing.T) {
- //RunStore(t, &RunStoreOptions{
- //ChunkCount: *chunksFlag,
- //NewStoreFunc: newStoreFunc,
- //})
- //})
+ t.Run("empty", func(t *testing.T) {
+ RunStore(t, &RunStoreOptions{
+ ChunkCount: *chunksFlag,
+ NewStoreFunc: newStoreFunc,
+ })
+ })
- //t.Run("cleaned", func(t *testing.T) {
- //RunStore(t, &RunStoreOptions{
- //ChunkCount: *chunksFlag,
- //NewStoreFunc: newStoreFunc,
- //Cleaned: true,
- //})
- //})
+ t.Run("cleaned", func(t *testing.T) {
+ RunStore(t, &RunStoreOptions{
+ ChunkCount: *chunksFlag,
+ NewStoreFunc: newStoreFunc,
+ Cleaned: true,
+ })
+ })
- //for _, tc := range []struct {
- //name string
- //deleteSplit int
- //}{
- //{
- //name: "delete-all",
- //deleteSplit: 1,
- //},
- //{
- //name: "delete-half",
- //deleteSplit: 2,
- //},
- //{
- //name: "delete-fifth",
- //deleteSplit: 5,
- //},
- //{
- //name: "delete-tenth",
- //deleteSplit: 10,
- //},
- //{
- //name: "delete-percent",
- //deleteSplit: 100,
- //},
- //{
- //name: "delete-permill",
- //deleteSplit: 1000,
- //},
- //} {
- //t.Run(tc.name, func(t *testing.T) {
- //RunStore(t, &RunStoreOptions{
- //ChunkCount: *chunksFlag,
- //DeleteSplit: tc.deleteSplit,
- //NewStoreFunc: newStoreFunc,
- //})
- //})
- //}
+ for _, tc := range []struct {
+ name string
+ deleteSplit int
+ }{
+ {
+ name: "delete-all",
+ deleteSplit: 1,
+ },
+ {
+ name: "delete-half",
+ deleteSplit: 2,
+ },
+ {
+ name: "delete-fifth",
+ deleteSplit: 5,
+ },
+ {
+ name: "delete-tenth",
+ deleteSplit: 10,
+ },
+ {
+ name: "delete-percent",
+ deleteSplit: 100,
+ },
+ {
+ name: "delete-permill",
+ deleteSplit: 1000,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ RunStore(t, &RunStoreOptions{
+ ChunkCount: *chunksFlag,
+ DeleteSplit: tc.deleteSplit,
+ NewStoreFunc: newStoreFunc,
+ })
+ })
+ }
- //t.Run("iterator", func(t *testing.T) {
- //RunIterator(t, newStoreFunc)
- //})
+ t.Run("iterator", func(t *testing.T) {
+ RunIterator(t, newStoreFunc)
+ })
t.Run("no grow", func(t *testing.T) {
runNoGrow(t, newStoreFunc)
From 8bcaabb624898599d6e2c40d4c9dd6c98b138ecb Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 18:02:35 +0800
Subject: [PATCH 27/89] try something
---
storage/localstore/mode_put.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/storage/localstore/mode_put.go b/storage/localstore/mode_put.go
index 27ff393615..325b779ae3 100644
--- a/storage/localstore/mode_put.go
+++ b/storage/localstore/mode_put.go
@@ -81,7 +81,7 @@ func (db *DB) put(mode chunk.ModePut, chs ...chunk.Chunk) (exist []bool, err err
exist[i] = true
continue
}
- exists, c, err := db.putRequest(batch, binIDs, chunkToItem(ch))
+ exists, c, err := db.putUpload(batch, binIDs, chunkToItem(ch))
if err != nil {
return nil, err
}
@@ -115,7 +115,7 @@ func (db *DB) put(mode chunk.ModePut, chs ...chunk.Chunk) (exist []bool, err err
exist[i] = true
continue
}
- exists, c, err := db.putSync(batch, binIDs, chunkToItem(ch))
+ exists, c, err := db.putUpload(batch, binIDs, chunkToItem(ch))
if err != nil {
return nil, err
}
From 2b791adfa1ca5355215af0c3344c582d7ed8d7f5 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 18:20:48 +0800
Subject: [PATCH 28/89] Revert "try something"
This reverts commit 8bcaabb624898599d6e2c40d4c9dd6c98b138ecb.
---
storage/localstore/mode_put.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/storage/localstore/mode_put.go b/storage/localstore/mode_put.go
index 325b779ae3..27ff393615 100644
--- a/storage/localstore/mode_put.go
+++ b/storage/localstore/mode_put.go
@@ -81,7 +81,7 @@ func (db *DB) put(mode chunk.ModePut, chs ...chunk.Chunk) (exist []bool, err err
exist[i] = true
continue
}
- exists, c, err := db.putUpload(batch, binIDs, chunkToItem(ch))
+ exists, c, err := db.putRequest(batch, binIDs, chunkToItem(ch))
if err != nil {
return nil, err
}
@@ -115,7 +115,7 @@ func (db *DB) put(mode chunk.ModePut, chs ...chunk.Chunk) (exist []bool, err err
exist[i] = true
continue
}
- exists, c, err := db.putUpload(batch, binIDs, chunkToItem(ch))
+ exists, c, err := db.putSync(batch, binIDs, chunkToItem(ch))
if err != nil {
return nil, err
}
From 5cfb4a5c26f6cff4a5793884abbe28c54b086d7b Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 18:37:42 +0800
Subject: [PATCH 29/89] remove caching, use mem
---
storage/localstore/localstore.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/storage/localstore/localstore.go b/storage/localstore/localstore.go
index ffe86860ed..7d386de2aa 100644
--- a/storage/localstore/localstore.go
+++ b/storage/localstore/localstore.go
@@ -30,7 +30,7 @@ import (
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/shed"
"github.com/ethersphere/swarm/storage/fcds"
- fcdsleveldb "github.com/ethersphere/swarm/storage/fcds/leveldb"
+ fcdsmem "github.com/ethersphere/swarm/storage/fcds/mem"
fcdsmock "github.com/ethersphere/swarm/storage/fcds/mock"
"github.com/ethersphere/swarm/storage/mock"
)
@@ -222,7 +222,7 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
}
if o.MockStore == nil {
- metaStore, err := fcdsleveldb.NewMetaStore(filepath.Join(path, "meta"))
+ metaStore, err := fcdsmem.NewMetaStore(filepath.Join(path, "meta"))
if err != nil {
return nil, err
}
@@ -230,7 +230,7 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
filepath.Join(path, "data"),
chunk.DefaultSize+8, // chunk data has additional 8 bytes prepended
metaStore,
- fcds.WithCache(true),
+ fcds.WithCache(false),
)
if err != nil {
return nil, err
From c72a6c3b344de7309069856c8935d59155e23582 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 18:56:45 +0800
Subject: [PATCH 30/89] remove free slots slice
---
storage/fcds/fcds.go | 29 +++++++++++++++--------------
storage/localstore/localstore.go | 8 ++++----
2 files changed, 19 insertions(+), 18 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 76625292b5..2d9aab0e95 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -61,10 +61,10 @@ var (
// Store is the main FCDS implementation. It stores chunk data into
// a number of files partitioned by the last byte of the chunk address.
type Store struct {
- shards []shard // relations with shard id and a shard file and their mutexes
- meta MetaStore // stores chunk offsets
- free []bool // which shards have free offsets
- freeMu sync.RWMutex // protects free field
+ shards []shard // relations with shard id and a shard file and their mutexes
+ meta MetaStore // stores chunk offsets
+ //free []bool // which shards have free offsets
+ //freeMu sync.RWMutex // protects free field
freeCache *offsetCache // optional cache of free offset values
wg sync.WaitGroup // blocks Close until all other method calls are done
maxChunkSize int // maximal chunk data size
@@ -90,9 +90,9 @@ func WithCache(yes bool) Option {
// New constructs a new Store with files at path, with specified max chunk size.
func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s *Store, err error) {
s = &Store{
- shards: make([]shard, ShardCount),
- meta: metaStore,
- free: make([]bool, ShardCount),
+ shards: make([]shard, ShardCount),
+ meta: metaStore,
+ //free: make([]bool, ShardCount),
maxChunkSize: maxChunkSize,
quit: make(chan struct{}),
}
@@ -373,16 +373,17 @@ func (s *Store) getMeta(addr chunk.Address) (m *Meta, err error) {
}
func (s *Store) markShardWithFreeOffsets(shard uint8, has bool) {
- s.freeMu.Lock()
- s.free[shard] = has
- s.freeMu.Unlock()
+ //s.freeMu.Lock()
+ //s.free[shard] = has
+ //s.freeMu.Unlock()
}
func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
- s.freeMu.RLock()
- has = s.free[shard]
- s.freeMu.RUnlock()
- return has
+ //s.freeMu.RLock()
+ //has = s.free[shard]
+ //s.freeMu.RUnlock()
+ return true
+ //return has
}
// NextShard gets the next shard to write to.
diff --git a/storage/localstore/localstore.go b/storage/localstore/localstore.go
index 7d386de2aa..c402e11e4c 100644
--- a/storage/localstore/localstore.go
+++ b/storage/localstore/localstore.go
@@ -222,10 +222,10 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
}
if o.MockStore == nil {
- metaStore, err := fcdsmem.NewMetaStore(filepath.Join(path, "meta"))
- if err != nil {
- return nil, err
- }
+ metaStore := fcdsmem.NewMetaStore()
+ //if err != nil {
+ //return nil, err
+ //}
db.data, err = fcds.New(
filepath.Join(path, "data"),
chunk.DefaultSize+8, // chunk data has additional 8 bytes prepended
From 88ab291e975abdb59db206ce0053fe86b025afd2 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 19:18:45 +0800
Subject: [PATCH 31/89] Revert "Revert "pessimistic locking""
This reverts commit fe9c14bdfd5dcd3ccc8c2c58e34e15fb40905b16.
---
storage/fcds/fcds.go | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 2d9aab0e95..21271b278a 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -70,6 +70,7 @@ type Store struct {
maxChunkSize int // maximal chunk data size
quit chan struct{} // quit disables all operations after Close is called
quitOnce sync.Once // protects quit channel from multiple Close calls
+ mtx sync.Mutex
}
// Option is an optional argument passed to New.
@@ -133,6 +134,9 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
}
defer s.unprotect()
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
m, err := s.getMeta(addr)
if err != nil {
return nil, err
@@ -177,7 +181,8 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
defer s.unprotect()
-
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
addr := ch.Address()
data := ch.Data()
@@ -267,7 +272,8 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
return err
}
defer s.unprotect()
-
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
m, err := s.getMeta(addr)
if err != nil {
return err
@@ -296,7 +302,8 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return err
}
defer s.unprotect()
-
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
for _, sh := range s.shards {
sh.mu.Lock()
}
From 92886fe9ec8935cdf2003a072ed7c204ef125d3c Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 19:28:04 +0800
Subject: [PATCH 32/89] add metrics
---
storage/fcds/fcds.go | 23 ++++++++++++++++++++++-
1 file changed, 22 insertions(+), 1 deletion(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 21271b278a..f1771d41df 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -27,6 +27,7 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/chunk"
@@ -149,11 +150,15 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
data := make([]byte, m.Size)
n, err := sh.f.ReadAt(data, m.Offset)
if err != nil && err != io.EOF {
+ metrics.GetOrRegisterCounter("fcds.get.error", nil).Inc(1)
+
return nil, err
}
if n != int(m.Size) {
return nil, fmt.Errorf("incomplete chunk data, read %v of %v", n, m.Size)
}
+ metrics.GetOrRegisterCounter("fcds.get.ok", nil).Inc(1)
+
return chunk.NewChunk(addr, data), nil
}
@@ -166,11 +171,13 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
_, err = s.getMeta(addr)
if err != nil {
+ metrics.GetOrRegisterCounter("fcds.has.err", nil).Inc(1)
if err == chunk.ErrChunkNotFound {
return false, nil
}
return false, err
}
+ metrics.GetOrRegisterCounter("fcds.has.ok", nil).Inc(1)
return true, nil
}
@@ -209,12 +216,18 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
+ if reclaimed {
+ metrics.GetOrRegisterCounter("fcds.put.reclaimed").Inc(1)
+ }
+
if offset < 0 {
+ metrics.GetOrRegisterCounter("fcds.put.append").Inc(1)
// no free offsets found,
// append the chunk data by
// seeking to the end of the file
offset, err = sh.f.Seek(0, io.SeekEnd)
} else {
+ metrics.GetOrRegisterCounter("fcds.put.offset").Inc(1)
// seek to the offset position
// to replace the chunk data at that position
_, err = sh.f.Seek(offset, io.SeekStart)
@@ -288,7 +301,15 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
if s.freeCache != nil {
s.freeCache.set(m.Shard, m.Offset)
}
- return s.meta.Remove(addr, m.Shard)
+ err = s.meta.Remove(addr, m.Shard)
+ if err != nil {
+ metrics.GetOrRegisterCounter("fcds.delete.fail", nil).Inc(1)
+ return err
+ }
+
+ metrics.GetOrRegisterCounter("fcds.delete.ok", nil).Inc(1)
+ return nil
+
}
// Count returns a number of stored chunks.
From 8ec201ad02a157e4d03cd66f2291700e885a61e3 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Tue, 3 Mar 2020 20:11:46 +0800
Subject: [PATCH 33/89] fix has metrics
---
storage/fcds/fcds.go | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index f1771d41df..82e6bb830d 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -171,10 +171,11 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
_, err = s.getMeta(addr)
if err != nil {
- metrics.GetOrRegisterCounter("fcds.has.err", nil).Inc(1)
if err == chunk.ErrChunkNotFound {
+ metrics.GetOrRegisterCounter("fcds.has.no", nil).Inc(1)
return false, nil
}
+ metrics.GetOrRegisterCounter("fcds.has.err", nil).Inc(1)
return false, err
}
metrics.GetOrRegisterCounter("fcds.has.ok", nil).Inc(1)
From 66241152a26c1f04e32737ef0ca6acd469221f09 Mon Sep 17 00:00:00 2001
From: Janos Guljas
Date: Tue, 3 Mar 2020 17:03:45 +0100
Subject: [PATCH 34/89] storage/fcds/test: fix NewFCDSStore path handling
---
storage/fcds/test/store.go | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 47fc35b372..ffec485f6e 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -20,7 +20,6 @@ import (
"bytes"
"flag"
"fmt"
- "io/ioutil"
"math/rand"
"os"
"sync"
@@ -526,12 +525,7 @@ func RunIterator(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fun
func NewFCDSStore(t *testing.T, path string, metaStore fcds.MetaStore) (s *fcds.Store, clean func()) {
t.Helper()
- path, err := ioutil.TempDir("", "swarm-fcds")
- if err != nil {
- t.Fatal(err)
- }
-
- s, err = fcds.New(path, chunk.DefaultSize, metaStore, fcds.WithCache(!*noCacheFlag))
+ s, err := fcds.New(path, chunk.DefaultSize, metaStore, fcds.WithCache(!*noCacheFlag))
if err != nil {
os.RemoveAll(path)
t.Fatal(err)
From 00d4c5af885d708062b0b0b073f89d8a87574e06 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 11:48:53 +0800
Subject: [PATCH 35/89] instrument till you drop
---
storage/fcds/fcds.go | 38 ++++++--
storage/fcds/leveldb/leveldb.go | 6 +-
storage/fcds/leveldb/leveldb_test.go | 129 ++++++++++++++++++++++++
storage/fcds/mem/mem.go | 36 +++++--
storage/fcds/mem/mem_test.go | 140 +++++++++++++++++++++++++++
storage/fcds/meta.go | 2 +-
6 files changed, 330 insertions(+), 21 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 82e6bb830d..547f0454c9 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -51,7 +51,7 @@ type Storer interface {
var _ Storer = new(Store)
// Number of files that store chunk data.
-var ShardCount = uint8(32)
+var ShardCount = uint8(4)
// ErrStoreClosed is returned if store is already closed.
var (
@@ -121,8 +121,8 @@ func (s *Store) ShardSize() (slots []ShardSlot, err error) {
if err != nil {
return nil, err
}
-
- slots[i] = ShardSlot{Shard: uint8(i), Slots: fs.Size()}
+ ii := i
+ slots[i] = ShardSlot{Shard: uint8(ii), Slots: fs.Size()}
}
return slots, nil
@@ -191,6 +191,10 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
defer s.unprotect()
s.mtx.Lock()
defer s.mtx.Unlock()
+ m, err := s.getMeta(ch.Address())
+ if err == nil {
+ return m.Shard, nil
+ }
addr := ch.Address()
data := ch.Data()
@@ -207,6 +211,8 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
+ //fmt.Println("putting chunk address to shard", ch.Address().String(), "shard", shard)
+
sh := s.shards[shard]
sh.mu.Lock()
@@ -217,21 +223,31 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
+ //fmt.Println("got free offset on shard for chunk", "offset", offset, "shard", shard)
+
if reclaimed {
- metrics.GetOrRegisterCounter("fcds.put.reclaimed").Inc(1)
+ metrics.GetOrRegisterCounter("fcds.put.reclaimed", nil).Inc(1)
}
if offset < 0 {
- metrics.GetOrRegisterCounter("fcds.put.append").Inc(1)
+ metrics.GetOrRegisterCounter("fcds.put.append", nil).Inc(1)
// no free offsets found,
// append the chunk data by
// seeking to the end of the file
offset, err = sh.f.Seek(0, io.SeekEnd)
+ fmt.Printf("*")
} else {
- metrics.GetOrRegisterCounter("fcds.put.offset").Inc(1)
+ metrics.GetOrRegisterCounter("fcds.put.offset", nil).Inc(1)
// seek to the offset position
// to replace the chunk data at that position
- _, err = sh.f.Seek(offset, io.SeekStart)
+ oo, err := sh.f.Seek(offset, io.SeekStart)
+ fmt.Printf("|")
+ if err != nil {
+ return 0, err
+ }
+ if oo != offset {
+ panic("wtf")
+ }
}
if err != nil {
return 0, err
@@ -302,6 +318,8 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
if s.freeCache != nil {
s.freeCache.set(m.Shard, m.Offset)
}
+ //fmt.Println("freeing chunk offset", addr.String(), "shard", m.Shard, "offset", m.Offset)
+
err = s.meta.Remove(addr, m.Shard)
if err != nil {
metrics.GetOrRegisterCounter("fcds.delete.fail", nil).Inc(1)
@@ -421,7 +439,7 @@ func (s *Store) NextShard() (shard uint8, err error) {
// warning: if multiple writers call this at the same time we might get the same shard again and again
// because the free slot value has not been decremented yet(!)
- slots := s.meta.ShardSlots()
+ slots, hasSomething := s.meta.ShardSlots()
sort.Sort(bySlots(slots))
// if the first shard has free slots - return it
@@ -429,7 +447,9 @@ func (s *Store) NextShard() (shard uint8, err error) {
if slots[0].Slots > 0 {
return slots[0].Shard, nil
}
-
+ if hasSomething {
+ panic("shoudnt")
+ }
// each element has in Slots the number of _taken_ slots
slots, err = s.ShardSize()
if err != nil {
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 359c2315bb..e909a9adb7 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -105,9 +105,13 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
if err != nil {
return err
}
- batch.Put(chunkKey(addr), meta)
s.mtx.Lock()
defer s.mtx.Unlock()
+
+ if _, err := s.Get(addr); err != nil {
+ batch.Put(chunkKey(addr), meta)
+ }
+
zero := s.free[m.Shard] == 0
if !zero {
s.free[m.Shard]--
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 1e2b78b57c..d70bed195b 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -17,10 +17,16 @@
package leveldb_test
import (
+ "encoding/hex"
+ "fmt"
"io/ioutil"
+ "math/rand"
"os"
"path/filepath"
+ "strings"
+ "sync"
"testing"
+ "time"
"github.com/ethersphere/swarm/chunk"
chunktesting "github.com/ethersphere/swarm/chunk/testing"
@@ -125,3 +131,126 @@ func TestFreeSlotCounter(t *testing.T) {
t.Fatalf("did not process enough shards: got %d but expected %d", count, fcds.ShardCount)
}
}
+
+func TestIssue1(t *testing.T) {
+ path, err := ioutil.TempDir("", "swarm-fcds-")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fmt.Println(path)
+
+ metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, cleanup := test.NewFCDSStore(t, path, metaStore)
+ defer cleanup()
+
+ var wg sync.WaitGroup
+
+ var mu sync.Mutex
+ addrs := make(map[string]struct{})
+ trigger := make(chan struct{}, 1)
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ sem := make(chan struct{}, 100)
+
+ for i := 0; i < 100000; i++ {
+ i := i
+ sem <- struct{}{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer func() { <-sem }()
+ ch := chunktesting.GenerateTestRandomChunk()
+ _, err := s.Put(ch)
+ if err != nil {
+ panic(err)
+ }
+ if i%10 == 0 {
+ // THIS IS CAUSING THE ISSUE
+ // every tenth chunk write again after some time
+ go func() {
+ time.Sleep(10 * time.Second)
+ fmt.Printf(".")
+ _, err := s.Put(ch)
+ if err != nil {
+ panic(err)
+ }
+ }()
+ }
+ mu.Lock()
+ addrs[ch.Address().String()] = struct{}{}
+ if len(addrs) >= 1000 {
+ select {
+ case trigger <- struct{}{}:
+ default:
+ }
+ }
+ if i%100 == 0 {
+ size, err := dirSize(path)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println()
+ fmt.Println("r", i, size, len(addrs))
+ }
+ mu.Unlock()
+ time.Sleep(time.Duration(rand.Intn(300)) * time.Millisecond)
+ }()
+ }
+ }()
+
+ //wg.Add(1)
+ go func() {
+ //defer wg.Done()
+
+ for range trigger {
+ for {
+ var addr chunk.Address
+ mu.Lock()
+ for a := range addrs {
+ b, err := hex.DecodeString(a)
+ if err != nil {
+ panic(err)
+ }
+ addr = chunk.Address(b)
+ break
+ }
+ fmt.Printf("-")
+ if err := s.Delete(addr); err != nil {
+ panic(err)
+ }
+ delete(addrs, addr.String())
+ if len(addrs) <= 900 {
+ mu.Unlock()
+ break
+ }
+ mu.Unlock()
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ // wait some time before removing the temp dir
+ time.Sleep(time.Minute)
+}
+
+func dirSize(path string) (size int64, err error) {
+ err = filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !info.IsDir() && strings.HasSuffix(info.Name(), ".db") {
+ size += info.Size()
+ }
+ return err
+ })
+ return size, err
+}
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 7d2aff2b80..cb6a2281b4 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -48,7 +48,7 @@ func NewMetaStore() (s *MetaStore) {
// Get returns chunk meta information.
func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
s.mu.RLock()
- m = s.meta[string(addr)]
+ m = s.meta[addr.String()]
s.mu.RUnlock()
if m == nil {
return nil, chunk.ErrChunkNotFound
@@ -61,10 +61,16 @@ func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
// already deleted chunk, not appended to the end of the file.
func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds.Meta) (err error) {
s.mu.Lock()
- if reclaimed {
- delete(s.free[shard], m.Offset)
+
+ if _, ok := s.meta[addr.String()]; ok {
+ panic("wtf")
}
- s.meta[string(addr)] = m
+
+ //if reclaimed {
+ sh := s.free[shard]
+ delete(sh, m.Offset)
+ //}
+ s.meta[addr.String()] = m
s.mu.Unlock()
return nil
}
@@ -73,32 +79,42 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
s.mu.Lock()
defer s.mu.Unlock()
- key := string(addr)
+ key := addr.String()
m := s.meta[key]
if m == nil {
+ panic("eeek")
return chunk.ErrChunkNotFound
}
+ v := len(s.free[shard])
s.free[shard][m.Offset] = struct{}{}
+ vv := len(s.free[shard])
+ if v == vv {
+ panic(0)
+ }
delete(s.meta, key)
return nil
}
// ShardSlots gives back a slice of ShardSlot items that represent the number
// of free slots inside each shard.
-func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot) {
+func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot, hasSomething bool) {
freeSlots = make([]fcds.ShardSlot, fcds.ShardCount)
s.mu.RLock()
for i := uint8(0); i < fcds.ShardCount; i++ {
- slot := fcds.ShardSlot{Shard: i}
- if slots, ok := s.free[i]; ok {
+ ii := i
+ slot := fcds.ShardSlot{Shard: ii}
+ if slots, ok := s.free[ii]; ok {
+ if len(slots) > 0 {
+ hasSomething = true
+ }
slot.Slots = int64(len(slots))
}
- freeSlots[i] = slot
+ freeSlots[ii] = slot
}
s.mu.RUnlock()
- return freeSlots
+ return freeSlots, hasSomething
}
// FreeOffset returns an offset that can be reclaimed by
diff --git a/storage/fcds/mem/mem_test.go b/storage/fcds/mem/mem_test.go
index 288ab47157..cdf7710682 100644
--- a/storage/fcds/mem/mem_test.go
+++ b/storage/fcds/mem/mem_test.go
@@ -17,9 +17,20 @@
package mem_test
import (
+ "encoding/hex"
+ "fmt"
"io/ioutil"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
"testing"
+ "time"
+ chunktesting "github.com/ethersphere/swarm/chunk/testing"
+
+ "github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/storage/fcds"
"github.com/ethersphere/swarm/storage/fcds/mem"
"github.com/ethersphere/swarm/storage/fcds/test"
@@ -37,3 +48,132 @@ func TestFCDS(t *testing.T) {
return test.NewFCDSStore(t, path, mem.NewMetaStore())
})
}
+
+func TestIssue1(t *testing.T) {
+ path, err := ioutil.TempDir("", "swarm-fcds-")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fmt.Println(path)
+
+ metaStore := mem.NewMetaStore()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, cleanup := test.NewFCDSStore(t, path, metaStore)
+ defer cleanup()
+
+ var wg sync.WaitGroup
+
+ var mu sync.Mutex
+ addrs := make(map[string]struct{})
+ trigger := make(chan struct{}, 1)
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ sem := make(chan struct{}, 100)
+
+ for i := 0; i < 100000; i++ {
+ i := i
+ sem <- struct{}{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer func() { <-sem }()
+ ch := chunktesting.GenerateTestRandomChunk()
+ mu.Lock()
+ _, err := s.Put(ch)
+ if err != nil {
+ panic(err)
+ }
+ mu.Unlock()
+ if i%10 == 0 {
+ // THIS IS CAUSING THE ISSUE
+ // every tenth chunk write again after some time
+ go func() {
+ time.Sleep(10 * time.Second)
+ fmt.Printf(".")
+ mu.Lock()
+ _, err := s.Put(ch)
+ if err != nil {
+ panic(err)
+ }
+ addrs[ch.Address().String()] = struct{}{}
+ mu.Unlock()
+ }()
+ }
+ mu.Lock()
+ addrs[ch.Address().String()] = struct{}{}
+ if len(addrs) >= 1000 {
+ select {
+ case trigger <- struct{}{}:
+ default:
+ }
+ }
+ if i%100 == 0 {
+ size, err := dirSize(path)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println()
+ fmt.Println("r", i, size, len(addrs))
+ }
+ mu.Unlock()
+ time.Sleep(time.Duration(rand.Intn(300)) * time.Millisecond)
+ }()
+ }
+ }()
+
+ //wg.Add(1)
+ go func() {
+ //defer wg.Done()
+
+ for range trigger {
+ mu.Lock()
+ for {
+ var addr chunk.Address
+ for a := range addrs {
+ b, err := hex.DecodeString(a)
+ if err != nil {
+ panic(err)
+ }
+ addr = chunk.Address(b)
+ break
+ }
+ fmt.Printf("-")
+ if err := s.Delete(addr); err != nil {
+ fmt.Println(err)
+ panic(err)
+ }
+ delete(addrs, addr.String())
+ if len(addrs) <= 900 {
+ break
+ }
+ }
+ mu.Unlock()
+
+ }
+ }()
+
+ wg.Wait()
+
+ // wait some time before removing the temp dir
+ time.Sleep(time.Minute)
+}
+
+func dirSize(path string) (size int64, err error) {
+ err = filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !info.IsDir() && strings.HasSuffix(info.Name(), ".db") {
+ size += info.Size()
+ }
+ return err
+ })
+ return size, err
+}
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 5bd9f52105..0a20aef90e 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -32,7 +32,7 @@ type MetaStore interface {
Count() (int, error)
Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
FreeOffset(shard uint8) (int64, error)
- ShardSlots() []ShardSlot
+ ShardSlots() ([]ShardSlot, bool)
Close() error
}
From 48143c6e84bc4420e17216c3419fc9dde344e9ab Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:05:42 +0800
Subject: [PATCH 36/89] still works
---
storage/fcds/fcds.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 547f0454c9..1c29e10de7 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -51,7 +51,7 @@ type Storer interface {
var _ Storer = new(Store)
// Number of files that store chunk data.
-var ShardCount = uint8(4)
+var ShardCount = uint8(16)
// ErrStoreClosed is returned if store is already closed.
var (
From 193a30af2169ff3ef84f2994978cd2bc9c5081f8 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:07:00 +0800
Subject: [PATCH 37/89] getting there
---
storage/fcds/fcds.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 1c29e10de7..596baaa483 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -51,7 +51,7 @@ type Storer interface {
var _ Storer = new(Store)
// Number of files that store chunk data.
-var ShardCount = uint8(16)
+var ShardCount = uint8(32)
// ErrStoreClosed is returned if store is already closed.
var (
From 231f4f7f75fc869fd4f47f32477d622f93f73605 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:08:57 +0800
Subject: [PATCH 38/89] remove annoying prints
---
storage/fcds/fcds.go | 9 ++-------
storage/fcds/mem/mem_test.go | 8 ++------
2 files changed, 4 insertions(+), 13 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 596baaa483..4f292b0581 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -211,8 +211,6 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
- //fmt.Println("putting chunk address to shard", ch.Address().String(), "shard", shard)
-
sh := s.shards[shard]
sh.mu.Lock()
@@ -223,8 +221,6 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
- //fmt.Println("got free offset on shard for chunk", "offset", offset, "shard", shard)
-
if reclaimed {
metrics.GetOrRegisterCounter("fcds.put.reclaimed", nil).Inc(1)
}
@@ -235,13 +231,13 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
// append the chunk data by
// seeking to the end of the file
offset, err = sh.f.Seek(0, io.SeekEnd)
- fmt.Printf("*")
+ //fmt.Printf("*")
} else {
metrics.GetOrRegisterCounter("fcds.put.offset", nil).Inc(1)
// seek to the offset position
// to replace the chunk data at that position
oo, err := sh.f.Seek(offset, io.SeekStart)
- fmt.Printf("|")
+ //fmt.Printf("|")
if err != nil {
return 0, err
}
@@ -318,7 +314,6 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
if s.freeCache != nil {
s.freeCache.set(m.Shard, m.Offset)
}
- //fmt.Println("freeing chunk offset", addr.String(), "shard", m.Shard, "offset", m.Offset)
err = s.meta.Remove(addr, m.Shard)
if err != nil {
diff --git a/storage/fcds/mem/mem_test.go b/storage/fcds/mem/mem_test.go
index cdf7710682..7b00c84bd4 100644
--- a/storage/fcds/mem/mem_test.go
+++ b/storage/fcds/mem/mem_test.go
@@ -55,8 +55,6 @@ func TestIssue1(t *testing.T) {
t.Fatal(err)
}
- fmt.Println(path)
-
metaStore := mem.NewMetaStore()
if err != nil {
t.Fatal(err)
@@ -96,7 +94,7 @@ func TestIssue1(t *testing.T) {
// every tenth chunk write again after some time
go func() {
time.Sleep(10 * time.Second)
- fmt.Printf(".")
+ //fmt.Printf(".")
mu.Lock()
_, err := s.Put(ch)
if err != nil {
@@ -119,7 +117,6 @@ func TestIssue1(t *testing.T) {
if err != nil {
panic(err)
}
- fmt.Println()
fmt.Println("r", i, size, len(addrs))
}
mu.Unlock()
@@ -144,9 +141,8 @@ func TestIssue1(t *testing.T) {
addr = chunk.Address(b)
break
}
- fmt.Printf("-")
+ //fmt.Printf("-")
if err := s.Delete(addr); err != nil {
- fmt.Println(err)
panic(err)
}
delete(addrs, addr.String())
From 08ed173edf2f2981dfb4e8bfebd0a75120f2b0ec Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:12:28 +0800
Subject: [PATCH 39/89] restore gc logic, still ok
---
storage/fcds/mem/mem_test.go | 32 ++++++++++++++++++++++++++++----
1 file changed, 28 insertions(+), 4 deletions(-)
diff --git a/storage/fcds/mem/mem_test.go b/storage/fcds/mem/mem_test.go
index 7b00c84bd4..a26df63f4d 100644
--- a/storage/fcds/mem/mem_test.go
+++ b/storage/fcds/mem/mem_test.go
@@ -130,7 +130,6 @@ func TestIssue1(t *testing.T) {
//defer wg.Done()
for range trigger {
- mu.Lock()
for {
var addr chunk.Address
for a := range addrs {
@@ -141,18 +140,43 @@ func TestIssue1(t *testing.T) {
addr = chunk.Address(b)
break
}
- //fmt.Printf("-")
if err := s.Delete(addr); err != nil {
panic(err)
}
+ mu.Lock()
delete(addrs, addr.String())
if len(addrs) <= 900 {
+ mu.Unlock()
break
}
+ mu.Unlock()
}
- mu.Unlock()
-
}
+ //for range trigger {
+ //for {
+ //var addr chunk.Address
+ //mu.Lock()
+ //for a := range addrs {
+ //b, err := hex.DecodeString(a)
+ //if err != nil {
+ //panic(err)
+ //}
+ //addr = chunk.Address(b)
+ //break
+ //}
+ ////fmt.Printf("-")
+ //if err := s.Delete(addr); err != nil {
+ //panic(err)
+ //}
+ //delete(addrs, addr.String())
+ //if len(addrs) <= 900 {
+ //mu.Unlock()
+ //break
+ //}
+ //mu.Unlock()
+ //}
+
+ //}
}()
wg.Wait()
From db304d825f05eff950d6fa6bd9de62c58031a70a Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:14:50 +0800
Subject: [PATCH 40/89] reinstate reclaimed check
---
storage/fcds/mem/mem.go | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index cb6a2281b4..810a3c9749 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -62,14 +62,10 @@ func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds.Meta) (err error) {
s.mu.Lock()
- if _, ok := s.meta[addr.String()]; ok {
- panic("wtf")
+ if reclaimed {
+ sh := s.free[shard]
+ delete(sh, m.Offset)
}
-
- //if reclaimed {
- sh := s.free[shard]
- delete(sh, m.Offset)
- //}
s.meta[addr.String()] = m
s.mu.Unlock()
return nil
From ae3886cff738724a2e5263a5fb431c8645c14504 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:16:05 +0800
Subject: [PATCH 41/89] cleaup
---
storage/fcds/mem/mem.go | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 810a3c9749..c92b54bcf3 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -66,6 +66,7 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
sh := s.free[shard]
delete(sh, m.Offset)
}
+
s.meta[addr.String()] = m
s.mu.Unlock()
return nil
@@ -78,15 +79,10 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
key := addr.String()
m := s.meta[key]
if m == nil {
- panic("eeek")
return chunk.ErrChunkNotFound
}
- v := len(s.free[shard])
s.free[shard][m.Offset] = struct{}{}
- vv := len(s.free[shard])
- if v == vv {
- panic(0)
- }
+
delete(s.meta, key)
return nil
}
From e7a7ae2f7f2038c408bf598769bb00e55104ada8 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:19:22 +0800
Subject: [PATCH 42/89] remove fcds lock, still green
---
storage/fcds/fcds.go | 26 +++++++++-----------------
1 file changed, 9 insertions(+), 17 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 4f292b0581..efae98929a 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -135,8 +135,8 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
}
defer s.unprotect()
- s.mtx.Lock()
- defer s.mtx.Unlock()
+ //s.mtx.Lock()
+ //defer s.mtx.Unlock()
m, err := s.getMeta(addr)
if err != nil {
@@ -189,8 +189,8 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
defer s.unprotect()
- s.mtx.Lock()
- defer s.mtx.Unlock()
+ //s.mtx.Lock()
+ //defer s.mtx.Unlock()
m, err := s.getMeta(ch.Address())
if err == nil {
return m.Shard, nil
@@ -231,19 +231,11 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
// append the chunk data by
// seeking to the end of the file
offset, err = sh.f.Seek(0, io.SeekEnd)
- //fmt.Printf("*")
} else {
metrics.GetOrRegisterCounter("fcds.put.offset", nil).Inc(1)
// seek to the offset position
// to replace the chunk data at that position
- oo, err := sh.f.Seek(offset, io.SeekStart)
- //fmt.Printf("|")
- if err != nil {
- return 0, err
- }
- if oo != offset {
- panic("wtf")
- }
+ _, err = sh.f.Seek(offset, io.SeekStart)
}
if err != nil {
return 0, err
@@ -298,8 +290,8 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
return err
}
defer s.unprotect()
- s.mtx.Lock()
- defer s.mtx.Unlock()
+ //s.mtx.Lock()
+ //defer s.mtx.Unlock()
m, err := s.getMeta(addr)
if err != nil {
return err
@@ -337,8 +329,8 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return err
}
defer s.unprotect()
- s.mtx.Lock()
- defer s.mtx.Unlock()
+ //s.mtx.Lock()
+ //defer s.mtx.Unlock()
for _, sh := range s.shards {
sh.mu.Lock()
}
From 78a12e50aa92bcef829527761217553461eb21c9 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:27:12 +0800
Subject: [PATCH 43/89] still green
---
storage/fcds/fcds.go | 10 +---------
1 file changed, 1 insertion(+), 9 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index efae98929a..98bf4d713c 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -121,8 +121,7 @@ func (s *Store) ShardSize() (slots []ShardSlot, err error) {
if err != nil {
return nil, err
}
- ii := i
- slots[i] = ShardSlot{Shard: uint8(ii), Slots: fs.Size()}
+ slots[i] = ShardSlot{Shard: uint8(i), Slots: fs.Size()}
}
return slots, nil
@@ -135,9 +134,6 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
}
defer s.unprotect()
- //s.mtx.Lock()
- //defer s.mtx.Unlock()
-
m, err := s.getMeta(addr)
if err != nil {
return nil, err
@@ -189,8 +185,6 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
defer s.unprotect()
- //s.mtx.Lock()
- //defer s.mtx.Unlock()
m, err := s.getMeta(ch.Address())
if err == nil {
return m.Shard, nil
@@ -290,8 +284,6 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
return err
}
defer s.unprotect()
- //s.mtx.Lock()
- //defer s.mtx.Unlock()
m, err := s.getMeta(addr)
if err != nil {
return err
From 6961a6ef33882cb899097427aab515b939eefff0 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:34:40 +0800
Subject: [PATCH 44/89] mem passing, leveldb broken
---
storage/fcds/fcds.go | 5 +----
storage/fcds/mem/mem.go | 7 ++-----
storage/fcds/meta.go | 2 +-
3 files changed, 4 insertions(+), 10 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 98bf4d713c..16a943bfef 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -418,7 +418,7 @@ func (s *Store) NextShard() (shard uint8, err error) {
// warning: if multiple writers call this at the same time we might get the same shard again and again
// because the free slot value has not been decremented yet(!)
- slots, hasSomething := s.meta.ShardSlots()
+ slots := s.meta.ShardSlots()
sort.Sort(bySlots(slots))
// if the first shard has free slots - return it
@@ -426,9 +426,6 @@ func (s *Store) NextShard() (shard uint8, err error) {
if slots[0].Slots > 0 {
return slots[0].Shard, nil
}
- if hasSomething {
- panic("shoudnt")
- }
// each element has in Slots the number of _taken_ slots
slots, err = s.ShardSize()
if err != nil {
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index c92b54bcf3..4eba869836 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -89,7 +89,7 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
// ShardSlots gives back a slice of ShardSlot items that represent the number
// of free slots inside each shard.
-func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot, hasSomething bool) {
+func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot) {
freeSlots = make([]fcds.ShardSlot, fcds.ShardCount)
s.mu.RLock()
@@ -97,16 +97,13 @@ func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot, hasSomething bool)
ii := i
slot := fcds.ShardSlot{Shard: ii}
if slots, ok := s.free[ii]; ok {
- if len(slots) > 0 {
- hasSomething = true
- }
slot.Slots = int64(len(slots))
}
freeSlots[ii] = slot
}
s.mu.RUnlock()
- return freeSlots, hasSomething
+ return freeSlots
}
// FreeOffset returns an offset that can be reclaimed by
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 0a20aef90e..5bd9f52105 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -32,7 +32,7 @@ type MetaStore interface {
Count() (int, error)
Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
FreeOffset(shard uint8) (int64, error)
- ShardSlots() ([]ShardSlot, bool)
+ ShardSlots() []ShardSlot
Close() error
}
From 20c729e67a77d38db22e68d5b51b1f47773662f3 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:37:32 +0800
Subject: [PATCH 45/89] leveldb kind of stable, mem ok
---
storage/fcds/leveldb/leveldb_test.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index d70bed195b..2434feb8f2 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -177,11 +177,13 @@ func TestIssue1(t *testing.T) {
// every tenth chunk write again after some time
go func() {
time.Sleep(10 * time.Second)
- fmt.Printf(".")
_, err := s.Put(ch)
if err != nil {
panic(err)
}
+ mu.Lock()
+ addrs[ch.Address().String()] = struct{}{}
+ mu.Unlock()
}()
}
mu.Lock()
@@ -197,7 +199,6 @@ func TestIssue1(t *testing.T) {
if err != nil {
panic(err)
}
- fmt.Println()
fmt.Println("r", i, size, len(addrs))
}
mu.Unlock()
@@ -222,7 +223,6 @@ func TestIssue1(t *testing.T) {
addr = chunk.Address(b)
break
}
- fmt.Printf("-")
if err := s.Delete(addr); err != nil {
panic(err)
}
From ed0f2ca23f990e108481bf1b9d609d2e7d642a52 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:40:38 +0800
Subject: [PATCH 46/89] mem ok
---
storage/fcds/mem/mem.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 4eba869836..60ff75f2aa 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -48,7 +48,7 @@ func NewMetaStore() (s *MetaStore) {
// Get returns chunk meta information.
func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
s.mu.RLock()
- m = s.meta[addr.String()]
+ m = s.meta[string(addr)]
s.mu.RUnlock()
if m == nil {
return nil, chunk.ErrChunkNotFound
@@ -67,7 +67,7 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
delete(sh, m.Offset)
}
- s.meta[addr.String()] = m
+ s.meta[string(addr)] = m
s.mu.Unlock()
return nil
}
@@ -76,7 +76,7 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
s.mu.Lock()
defer s.mu.Unlock()
- key := addr.String()
+ key := string(addr)
m := s.meta[key]
if m == nil {
return chunk.ErrChunkNotFound
From 8b6d42f47f7a8c3019a2550083a35af746e2bfbd Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:42:14 +0800
Subject: [PATCH 47/89] still ok
---
storage/fcds/mem/mem.go | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 60ff75f2aa..d1e5ed80fc 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -63,8 +63,7 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
s.mu.Lock()
if reclaimed {
- sh := s.free[shard]
- delete(sh, m.Offset)
+ delete(s.free[shard], m.Offset)
}
s.meta[string(addr)] = m
From 7e546cacec04f832d9a0d68c1bd41e412d23bf2e Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:42:36 +0800
Subject: [PATCH 48/89] still ok
---
storage/fcds/mem/mem.go | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index d1e5ed80fc..485735919a 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -93,12 +93,11 @@ func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot) {
s.mu.RLock()
for i := uint8(0); i < fcds.ShardCount; i++ {
- ii := i
- slot := fcds.ShardSlot{Shard: ii}
- if slots, ok := s.free[ii]; ok {
+ slot := fcds.ShardSlot{Shard: i}
+ if slots, ok := s.free[i]; ok {
slot.Slots = int64(len(slots))
}
- freeSlots[ii] = slot
+ freeSlots[i] = slot
}
s.mu.RUnlock()
From 468c0615cc13ba52e92dff9cbf96b8db5ac98a10 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:49:49 +0800
Subject: [PATCH 49/89] green still
---
storage/fcds/mem/mem_test.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/storage/fcds/mem/mem_test.go b/storage/fcds/mem/mem_test.go
index a26df63f4d..9c162407fd 100644
--- a/storage/fcds/mem/mem_test.go
+++ b/storage/fcds/mem/mem_test.go
@@ -55,6 +55,7 @@ func TestIssue1(t *testing.T) {
t.Fatal(err)
}
+ //metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
metaStore := mem.NewMetaStore()
if err != nil {
t.Fatal(err)
From 9661248e818db0a0604ba21c78e4550e3b0b942d Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:55:01 +0800
Subject: [PATCH 50/89] janos version of the test
---
storage/fcds/leveldb/leveldb_test.go | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 2434feb8f2..1a86f96838 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -23,7 +23,6 @@ import (
"math/rand"
"os"
"path/filepath"
- "strings"
"sync"
"testing"
"time"
@@ -181,9 +180,6 @@ func TestIssue1(t *testing.T) {
if err != nil {
panic(err)
}
- mu.Lock()
- addrs[ch.Address().String()] = struct{}{}
- mu.Unlock()
}()
}
mu.Lock()
@@ -214,7 +210,6 @@ func TestIssue1(t *testing.T) {
for range trigger {
for {
var addr chunk.Address
- mu.Lock()
for a := range addrs {
b, err := hex.DecodeString(a)
if err != nil {
@@ -226,6 +221,7 @@ func TestIssue1(t *testing.T) {
if err := s.Delete(addr); err != nil {
panic(err)
}
+ mu.Lock()
delete(addrs, addr.String())
if len(addrs) <= 900 {
mu.Unlock()
@@ -247,7 +243,7 @@ func dirSize(path string) (size int64, err error) {
if err != nil {
return err
}
- if !info.IsDir() && strings.HasSuffix(info.Name(), ".db") {
+ if !info.IsDir() {
size += info.Size()
}
return err
From e23853c2382bc1d33fab5f0102afa90eeb751dc9 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 12:58:18 +0800
Subject: [PATCH 51/89] Revert "janos version of the test"
This reverts commit 9661248e818db0a0604ba21c78e4550e3b0b942d.
---
storage/fcds/leveldb/leveldb_test.go | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 1a86f96838..2434feb8f2 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -23,6 +23,7 @@ import (
"math/rand"
"os"
"path/filepath"
+ "strings"
"sync"
"testing"
"time"
@@ -180,6 +181,9 @@ func TestIssue1(t *testing.T) {
if err != nil {
panic(err)
}
+ mu.Lock()
+ addrs[ch.Address().String()] = struct{}{}
+ mu.Unlock()
}()
}
mu.Lock()
@@ -210,6 +214,7 @@ func TestIssue1(t *testing.T) {
for range trigger {
for {
var addr chunk.Address
+ mu.Lock()
for a := range addrs {
b, err := hex.DecodeString(a)
if err != nil {
@@ -221,7 +226,6 @@ func TestIssue1(t *testing.T) {
if err := s.Delete(addr); err != nil {
panic(err)
}
- mu.Lock()
delete(addrs, addr.String())
if len(addrs) <= 900 {
mu.Unlock()
@@ -243,7 +247,7 @@ func dirSize(path string) (size int64, err error) {
if err != nil {
return err
}
- if !info.IsDir() {
+ if !info.IsDir() && strings.HasSuffix(info.Name(), ".db") {
size += info.Size()
}
return err
From ba10c6db969608bc5996c52a28daf0ee4424dc0e Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 13:13:55 +0800
Subject: [PATCH 52/89] pull lock up
---
storage/fcds/mem/mem_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/storage/fcds/mem/mem_test.go b/storage/fcds/mem/mem_test.go
index 9c162407fd..fdef32bc20 100644
--- a/storage/fcds/mem/mem_test.go
+++ b/storage/fcds/mem/mem_test.go
@@ -133,6 +133,7 @@ func TestIssue1(t *testing.T) {
for range trigger {
for {
var addr chunk.Address
+ mu.Lock()
for a := range addrs {
b, err := hex.DecodeString(a)
if err != nil {
@@ -144,7 +145,6 @@ func TestIssue1(t *testing.T) {
if err := s.Delete(addr); err != nil {
panic(err)
}
- mu.Lock()
delete(addrs, addr.String())
if len(addrs) <= 900 {
mu.Unlock()
From aa8d825b31926a1070bf8c07ee915606e17b52bd Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 13:54:38 +0800
Subject: [PATCH 53/89] mega ugly but works
---
storage/fcds/fcds.go | 60 +++++++++++++++++++++++++-------------
storage/fcds/test/store.go | 2 +-
2 files changed, 41 insertions(+), 21 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 16a943bfef..137d5ede99 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -41,7 +41,7 @@ type Storer interface {
Has(addr chunk.Address) (yes bool, err error)
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
- NextShard() (shard uint8, err error)
+ NextShard() (shard uint8, hasfree bool, err error)
ShardSize() (slots []ShardSlot, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
@@ -180,7 +180,8 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
}
// Put stores chunk data.
-func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
+// Returns the shard number into which the chunk was added.
+func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
if err := s.protect(); err != nil {
return 0, err
}
@@ -200,20 +201,39 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
section := make([]byte, s.maxChunkSize)
copy(section, data)
- shard, err = s.NextShard()
- if err != nil {
- return 0, err
- }
+ var offset int64
+ var shardId uint8
+ var reclaimed bool
+ var sh shard
- sh := s.shards[shard]
+ done := false
+ for !done {
+ id, hasfree, err := s.NextShard()
+ if err != nil {
+ return 0, err
+ }
- sh.mu.Lock()
- defer sh.mu.Unlock()
+ sh = s.shards[id]
- offset, reclaimed, err := s.getOffset(shard)
- if err != nil {
- return 0, err
+ sh.mu.Lock()
+
+ offset, reclaimed, err = s.getOffset(id)
+ if err != nil {
+ return 0, err
+ }
+
+ if hasfree {
+ if offset >= 0 {
+ shardId = id
+ break
+ }
+ sh.mu.Unlock()
+ } else {
+ shardId = id
+ break
+ }
}
+ defer sh.mu.Unlock()
if reclaimed {
metrics.GetOrRegisterCounter("fcds.put.reclaimed", nil).Inc(1)
@@ -239,16 +259,16 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
if reclaimed && s.freeCache != nil {
- s.freeCache.remove(shard, offset)
+ s.freeCache.remove(shardId, offset)
}
- err = s.meta.Set(addr, shard, reclaimed, &Meta{
+ err = s.meta.Set(addr, shardId, reclaimed, &Meta{
Size: uint16(size),
Offset: offset,
- Shard: shard,
+ Shard: shardId,
})
- return shard, err
+ return shardId, err
}
// getOffset returns an offset where chunk data can be written to
@@ -414,7 +434,7 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
// NextShard gets the next shard to write to.
// Uses weighted probability to choose the next shard.
-func (s *Store) NextShard() (shard uint8, err error) {
+func (s *Store) NextShard() (shard uint8, hasfree bool, err error) {
// warning: if multiple writers call this at the same time we might get the same shard again and again
// because the free slot value has not been decremented yet(!)
@@ -424,12 +444,12 @@ func (s *Store) NextShard() (shard uint8, err error) {
// if the first shard has free slots - return it
// otherwise, just balance them out
if slots[0].Slots > 0 {
- return slots[0].Shard, nil
+ return slots[0].Shard, true, nil
}
// each element has in Slots the number of _taken_ slots
slots, err = s.ShardSize()
if err != nil {
- return 0, err
+ return 0, false, err
}
// sorting them will make the first element the largest shard and the last
@@ -437,7 +457,7 @@ func (s *Store) NextShard() (shard uint8, err error) {
sort.Sort(bySlots(slots))
shard = slots[len(slots)-1].Shard
- return shard, nil
+ return shard, false, nil
}
// probabilisticNextShard returns a next shard to write to
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index ffec485f6e..e8682b255c 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -325,7 +325,7 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
}
}
- shard, err := db.NextShard()
+ shard, _, err := db.NextShard()
if err != nil {
t.Fatal(err)
}
From 4541c9f0c04c44d40f7cfb2b4cd3e1ce44d723d4 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 13:56:32 +0800
Subject: [PATCH 54/89] clean
---
storage/fcds/fcds.go | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 137d5ede99..922c5c5ac1 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -206,8 +206,7 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
var reclaimed bool
var sh shard
- done := false
- for !done {
+ for {
id, hasfree, err := s.NextShard()
if err != nil {
return 0, err
From 86b3a13ad7a6a25196250c132b6b5e96de21ac0f Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 14:34:30 +0800
Subject: [PATCH 55/89] prevent double puts into forky for existing chunks
---
storage/localstore/mode_put.go | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/storage/localstore/mode_put.go b/storage/localstore/mode_put.go
index 27ff393615..9a6b34ca73 100644
--- a/storage/localstore/mode_put.go
+++ b/storage/localstore/mode_put.go
@@ -141,9 +141,11 @@ func (db *DB) put(mode chunk.ModePut, chs ...chunk.Chunk) (exist []bool, err err
return nil, err
}
- for _, ch := range chs {
- if _, err := db.data.Put(ch); err != nil {
- return nil, err
+ for i, ch := range chs {
+ if !exist[i] {
+ if _, err := db.data.Put(ch); err != nil {
+ return nil, err
+ }
}
}
From bbaf1c2a6cc20f0845365c3def818cc697adec46 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 17:36:13 +0800
Subject: [PATCH 56/89] wip cleanup
---
storage/fcds/fcds.go | 92 ++++++++++++++++++---------------
storage/fcds/leveldb/leveldb.go | 1 +
storage/fcds/mock/mock.go | 4 +-
storage/fcds/test/store.go | 16 +++---
4 files changed, 60 insertions(+), 53 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 922c5c5ac1..298ae268d3 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -41,7 +41,7 @@ type Storer interface {
Has(addr chunk.Address) (yes bool, err error)
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
- NextShard() (shard uint8, hasfree bool, err error)
+ NextShard() (freeShard []uint8, fallback uint8, err error)
ShardSize() (slots []ShardSlot, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
@@ -62,10 +62,10 @@ var (
// Store is the main FCDS implementation. It stores chunk data into
// a number of files partitioned by the last byte of the chunk address.
type Store struct {
- shards []shard // relations with shard id and a shard file and their mutexes
- meta MetaStore // stores chunk offsets
- //free []bool // which shards have free offsets
- //freeMu sync.RWMutex // protects free field
+ shards []shard // relations with shard id and a shard file and their mutexes
+ meta MetaStore // stores chunk offsets
+ free []bool // which shards have free offsets
+ freeMu sync.RWMutex // protects free field
freeCache *offsetCache // optional cache of free offset values
wg sync.WaitGroup // blocks Close until all other method calls are done
maxChunkSize int // maximal chunk data size
@@ -92,9 +92,9 @@ func WithCache(yes bool) Option {
// New constructs a new Store with files at path, with specified max chunk size.
func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s *Store, err error) {
s = &Store{
- shards: make([]shard, ShardCount),
- meta: metaStore,
- //free: make([]bool, ShardCount),
+ shards: make([]shard, ShardCount),
+ meta: metaStore,
+ free: make([]bool, ShardCount),
maxChunkSize: maxChunkSize,
quit: make(chan struct{}),
}
@@ -117,6 +117,7 @@ func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s
func (s *Store) ShardSize() (slots []ShardSlot, err error) {
slots = make([]ShardSlot, len(s.shards))
for i, sh := range s.shards {
+ i := i
fs, err := sh.f.Stat()
if err != nil {
return nil, err
@@ -205,13 +206,13 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
var shardId uint8
var reclaimed bool
var sh shard
+ found := false
+ free, fallback, err := s.NextShard()
+ if err != nil {
+ return 0, err
+ }
- for {
- id, hasfree, err := s.NextShard()
- if err != nil {
- return 0, err
- }
-
+ for _, id := range free {
sh = s.shards[id]
sh.mu.Lock()
@@ -221,15 +222,21 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
return 0, err
}
- if hasfree {
- if offset >= 0 {
- shardId = id
- break
- }
- sh.mu.Unlock()
- } else {
+ if offset >= 0 {
shardId = id
+ found = true
break
+ } else {
+ sh.mu.Unlock()
+ }
+ }
+ if !found {
+ sh = s.shards[fallback]
+ shardId = fallback
+ sh.mu.Lock()
+ offset, reclaimed, err = s.getOffset(fallback)
+ if err != nil {
+ return 0, err
}
}
defer sh.mu.Unlock()
@@ -274,9 +281,9 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
// and a flag if the offset is reclaimed from a previously removed chunk.
// If offset is less then 0, no free offsets are available.
func (s *Store) getOffset(shard uint8) (offset int64, reclaimed bool, err error) {
- if !s.shardHasFreeOffsets(shard) {
- return -1, false, nil
- }
+ //if !s.shardHasFreeOffsets(shard) {
+ //return -1, false, nil
+ //}
offset = -1
if s.freeCache != nil {
@@ -418,45 +425,44 @@ func (s *Store) getMeta(addr chunk.Address) (m *Meta, err error) {
}
func (s *Store) markShardWithFreeOffsets(shard uint8, has bool) {
- //s.freeMu.Lock()
- //s.free[shard] = has
- //s.freeMu.Unlock()
+ s.freeMu.Lock()
+ s.free[shard] = has
+ s.freeMu.Unlock()
}
func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
- //s.freeMu.RLock()
- //has = s.free[shard]
- //s.freeMu.RUnlock()
- return true
- //return has
+ s.freeMu.RLock()
+ has = s.free[shard]
+ s.freeMu.RUnlock()
+ return has
}
// NextShard gets the next shard to write to.
// Uses weighted probability to choose the next shard.
-func (s *Store) NextShard() (shard uint8, hasfree bool, err error) {
+func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
// warning: if multiple writers call this at the same time we might get the same shard again and again
// because the free slot value has not been decremented yet(!)
-
slots := s.meta.ShardSlots()
sort.Sort(bySlots(slots))
- // if the first shard has free slots - return it
- // otherwise, just balance them out
- if slots[0].Slots > 0 {
- return slots[0].Shard, true, nil
+ for _, v := range slots {
+ if v.Slots > 0 {
+ freeShards = append(freeShards, v.Shard)
+ }
}
+
// each element has in Slots the number of _taken_ slots
- slots, err = s.ShardSize()
+ takenSlots, err := s.ShardSize()
if err != nil {
- return 0, false, err
+ return nil, 0, err
}
// sorting them will make the first element the largest shard and the last
// element the smallest shard; pick the smallest
- sort.Sort(bySlots(slots))
- shard = slots[len(slots)-1].Shard
+ sort.Sort(bySlots(takenSlots))
+ fallback = takenSlots[len(takenSlots)-1].Shard
- return shard, false, nil
+ return freeShards, fallback, nil
}
// probabilisticNextShard returns a next shard to write to
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index e909a9adb7..ba1818af08 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -167,6 +167,7 @@ func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot) {
s.mtx.RLock()
for i := uint8(0); i < fcds.ShardCount; i++ {
+ i := i
slot := fcds.ShardSlot{Shard: i}
if slots, ok := s.free[i]; ok {
slot.Slots = slots
diff --git a/storage/fcds/mock/mock.go b/storage/fcds/mock/mock.go
index 4b491d7ed0..51ecee35d2 100644
--- a/storage/fcds/mock/mock.go
+++ b/storage/fcds/mock/mock.go
@@ -68,8 +68,8 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
-func (s *Store) NextShard() (shard uint8, err error) {
- return 0, nil
+func (s *Store) NextShard() (shard []uint8, fallback uint8, err error) {
+ return []uint8{0}, 0, nil
}
// Delete removes chunk data.
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index e8682b255c..e3b2dde0bb 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -325,14 +325,14 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
}
}
- shard, _, err := db.NextShard()
- if err != nil {
- t.Fatal(err)
- }
-
- if shard != tc.expectNext {
- t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext, shard)
- }
+ //shard, _, err := db.NextShard()
+ //if err != nil {
+ //t.Fatal(err)
+ //}
+
+ //if shard != tc.expectNext {
+ //t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext, shard)
+ //}
}
}
From 387bbc0a19afde96a49932402bebb70bceae1eb0 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 18:04:12 +0800
Subject: [PATCH 57/89] wip cleanup
---
storage/fcds/fcds.go | 28 +++++++++----------
storage/fcds/leveldb/leveldb.go | 12 ++++----
storage/fcds/leveldb/leveldb_test.go | 4 +--
storage/fcds/mem/mem.go | 8 +++---
storage/fcds/meta.go | 16 +++++------
storage/fcds/meta_test.go | 2 +-
storage/fcds/mock/mock.go | 4 +--
storage/fcds/test/store.go | 42 +++++++++++++---------------
8 files changed, 56 insertions(+), 60 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 298ae268d3..99837a1e22 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -42,7 +42,7 @@ type Storer interface {
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
NextShard() (freeShard []uint8, fallback uint8, err error)
- ShardSize() (slots []ShardSlot, err error)
+ ShardSize() (slots []ShardInfo, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
Close() (err error)
@@ -114,15 +114,15 @@ func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s
return s, nil
}
-func (s *Store) ShardSize() (slots []ShardSlot, err error) {
- slots = make([]ShardSlot, len(s.shards))
+func (s *Store) ShardSize() (slots []ShardInfo, err error) {
+ slots = make([]ShardInfo, len(s.shards))
for i, sh := range s.shards {
i := i
fs, err := sh.f.Stat()
if err != nil {
return nil, err
}
- slots[i] = ShardSlot{Shard: uint8(i), Slots: fs.Size()}
+ slots[i] = ShardInfo{Shard: uint8(i), Val: fs.Size()}
}
return slots, nil
@@ -281,9 +281,9 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
// and a flag if the offset is reclaimed from a previously removed chunk.
// If offset is less then 0, no free offsets are available.
func (s *Store) getOffset(shard uint8) (offset int64, reclaimed bool, err error) {
- //if !s.shardHasFreeOffsets(shard) {
- //return -1, false, nil
- //}
+ if !s.shardHasFreeOffsets(shard) {
+ return -1, false, nil
+ }
offset = -1
if s.freeCache != nil {
@@ -443,10 +443,10 @@ func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
// warning: if multiple writers call this at the same time we might get the same shard again and again
// because the free slot value has not been decremented yet(!)
slots := s.meta.ShardSlots()
- sort.Sort(bySlots(slots))
+ sort.Sort(byVal(slots))
for _, v := range slots {
- if v.Slots > 0 {
+ if v.Val > 0 {
freeShards = append(freeShards, v.Shard)
}
}
@@ -459,7 +459,7 @@ func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
// sorting them will make the first element the largest shard and the last
// element the smallest shard; pick the smallest
- sort.Sort(bySlots(takenSlots))
+ sort.Sort(byVal(takenSlots))
fallback = takenSlots[len(takenSlots)-1].Shard
return freeShards, fallback, nil
@@ -467,7 +467,7 @@ func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
// probabilisticNextShard returns a next shard to write to
// using a weighted probability
-func probabilisticNextShard(slots []ShardSlot) (shard uint8, err error) {
+func probabilisticNextShard(slots []ShardInfo) (shard uint8, err error) {
var sum, movingSum int64
intervalString := ""
@@ -477,8 +477,8 @@ func probabilisticNextShard(slots []ShardSlot) (shard uint8, err error) {
// we still need to potentially insert 1 chunk and so if all shards have
// no empty offsets - they all must be considered equally as having at least
// one empty slot
- intervalString += fmt.Sprintf("[%d %d) ", sum, sum+v.Slots+1)
- sum += v.Slots + 1
+ intervalString += fmt.Sprintf("[%d %d) ", sum, sum+v.Val+1)
+ sum += v.Val + 1
}
// do some magic
@@ -486,7 +486,7 @@ func probabilisticNextShard(slots []ShardSlot) (shard uint8, err error) {
intervalString = fmt.Sprintf("magic %d, intervals ", magic) + intervalString
fmt.Println(intervalString)
for _, v := range slots {
- movingSum += v.Slots + 1
+ movingSum += v.Val + 1
if magic < movingSum {
// we've reached the shard with the correct id
return v.Shard, nil
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index ba1818af08..36c5b9fe3f 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -160,17 +160,17 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
return nil
}
-// ShardSlots gives back a slice of ShardSlot items that represent the number
-// of free slots inside each shard.
-func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot) {
- freeSlots = make([]fcds.ShardSlot, fcds.ShardCount)
+// ShardSlots gives back a slice of ShardInfo items that represent the number
+// of free slots inside each shard, value is in number of chunks, not bytes.
+func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
+ freeSlots = make([]fcds.ShardInfo, fcds.ShardCount)
s.mtx.RLock()
for i := uint8(0); i < fcds.ShardCount; i++ {
i := i
- slot := fcds.ShardSlot{Shard: i}
+ slot := fcds.ShardInfo{Shard: i}
if slots, ok := s.free[i]; ok {
- slot.Slots = slots
+ slot.Val = slots
}
freeSlots[i] = slot
}
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 2434feb8f2..a55f695cc4 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -122,8 +122,8 @@ func TestFreeSlotCounter(t *testing.T) {
if freeSlots2[i].Shard != v.Shard {
t.Fatalf("expected shard %d to be %d but got %d", i, v.Shard, freeSlots[2].Shard)
}
- if freeSlots2[i].Slots != v.Slots {
- t.Fatalf("expected shard %d to have %d free slots but got %d", i, v.Slots, freeSlots[2].Slots)
+ if freeSlots2[i].Val != v.Val {
+ t.Fatalf("expected shard %d to have %d free slots but got %d", i, v.Val, freeSlots[2].Val)
}
}
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 485735919a..11e81dfa58 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -86,14 +86,14 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
return nil
}
-// ShardSlots gives back a slice of ShardSlot items that represent the number
+// ShardSlots gives back a slice of ShardInfo items that represent the number
// of free slots inside each shard.
-func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardSlot) {
- freeSlots = make([]fcds.ShardSlot, fcds.ShardCount)
+func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
+ freeSlots = make([]fcds.ShardInfo, fcds.ShardCount)
s.mu.RLock()
for i := uint8(0); i < fcds.ShardCount; i++ {
- slot := fcds.ShardSlot{Shard: i}
+ slot := fcds.ShardInfo{Shard: i}
if slots, ok := s.free[i]; ok {
slot.Slots = int64(len(slots))
}
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 5bd9f52105..05d7b36a12 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -32,7 +32,7 @@ type MetaStore interface {
Count() (int, error)
Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
FreeOffset(shard uint8) (int64, error)
- ShardSlots() []ShardSlot
+ ShardSlots() []ShardInfo
Close() error
}
@@ -67,15 +67,15 @@ func (m *Meta) String() (s string) {
return fmt.Sprintf("{Size: %v, Offset %v}", m.Size, m.Offset)
}
-type bySlots []ShardSlot
+type byVal []ShardInfo
-func (a bySlots) Len() int { return len(a) }
-func (a bySlots) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a bySlots) Less(i, j int) bool { return a[j].Slots < a[i].Slots }
+func (a byVal) Len() int { return len(a) }
+func (a byVal) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byVal) Less(i, j int) bool { return a[j].Val < a[i].Val }
-// ShardSlot contains data about free number of slots
+// ShardInfo contains data about free number of slots
// in a shard.
-type ShardSlot struct {
+type ShardInfo struct {
Shard uint8
- Slots int64
+ Val int64
}
diff --git a/storage/fcds/meta_test.go b/storage/fcds/meta_test.go
index 59e827d771..5f057cf89e 100644
--- a/storage/fcds/meta_test.go
+++ b/storage/fcds/meta_test.go
@@ -64,7 +64,7 @@ func TestShardSlotSort(t *testing.T) {
s := make([]ShardSlot, len(tc.freeSlots))
for i, v := range tc.freeSlots {
- s[i] = ShardSlot{Shard: uint8(i), Slots: int64(v)}
+ s[i] = ShardInfo{Shard: uint8(i), Info: int64(v)}
}
sort.Sort(bySlots(s))
diff --git a/storage/fcds/mock/mock.go b/storage/fcds/mock/mock.go
index 51ecee35d2..f9ec886022 100644
--- a/storage/fcds/mock/mock.go
+++ b/storage/fcds/mock/mock.go
@@ -123,9 +123,9 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return nil
}
-func (s *Store) ShardSize() (slots []fcds.ShardSlot, err error) {
+func (s *Store) ShardSize() (slots []fcds.ShardInfo, err error) {
i, err := s.Count()
- return []fcds.ShardSlot{fcds.ShardSlot{Shard: 0, Slots: int64(i)}}, err
+ return []fcds.ShardInfo{fcds.ShardInfo{Shard: 0, Val: int64(i)}}, err
}
// Close doesn't do anything.
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index e3b2dde0bb..38d9417d6f 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -164,11 +164,6 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
del--
}
- fmt.Println("done deleting")
- fmt.Println("done deleting")
- fmt.Println("done deleting")
- fmt.Println("done deleting")
-
ins := 4 + 3 + 2 + 1
// insert 4,3,2,1 chunks and expect the shards as next shards inserted into
// in the following order
@@ -204,7 +199,7 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
sum := 0
for _, v := range slots {
- sum += int(v.Slots)
+ sum += int(v.Val)
}
if sum != 4096*1000 {
@@ -222,11 +217,11 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
t.Fatal(err)
}
- minSize, minSlot := slots[0].Slots, uint8(0)
+ minSize, minSlot := slots[0].Val, uint8(0)
for i, v := range slots {
// take the _last_ minimum
- if v.Slots <= minSize {
- minSize = v.Slots
+ if v.Val <= minSize {
+ minSize = v.Val
minSlot = uint8(i)
}
}
@@ -246,9 +241,9 @@ func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func()))
RunStd(t, newStoreFunc)
- //t.Run("next shard", func(t *testing.T) {
- //runNextShard(t, newStoreFunc)
- //})
+ t.Run("next shard", func(t *testing.T) {
+ runNextShard(t, newStoreFunc)
+ })
}
// RunNextShard runs the test scenario for NextShard selection
@@ -279,17 +274,18 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
for _, tc := range []struct {
incFreeSlots []int
- expectNext uint8
+ expectNext []uint8
+ expFallback uint8
}{
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1}, // magic 10, intervals [0 1) [1 17) [17 18) [18 19)
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1}, // magic 23, intervals [0 1) [1 32) [32 33) [33 34)
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: 1}, // magic 44, intervals [0 1) [1 47) [47 48) [48 49)
- {incFreeSlots: []int{0, 0, 0, 11}, expectNext: 1}, // magic 14, intervals [0 1) [1 47) [47 48) [48 60)
- {incFreeSlots: []int{10, 0, 0, 0}, expectNext: 1}, // magic 48, intervals [0 11) [11 57) [57 58) [58 70)
- {incFreeSlots: []int{100, 0, 0, 0}, expectNext: 3}, // magic 164, intervals [0 111) [111 157) [157 158) [158 170)
- {incFreeSlots: []int{0, 200, 0, 0}, expectNext: 1}, // magic 305, intervals [0 111) [111 352) [352 353) [353 365)
- {incFreeSlots: []int{0, 0, 302, 0}, expectNext: 2}, // magic 400, intervals [0 111) [111 352) [352 622) [622 634)
- {incFreeSlots: []int{0, 0, 0, 440}, expectNext: 3}, // magic 637, intervals [0 111) [111 352) [352 622) [622 874)
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 10, intervals [0 1) [1 17) [17 18) [18 19)
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 23, intervals [0 1) [1 32) [32 33) [33 34)
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 44, intervals [0 1) [1 47) [47 48) [48 49)
+ {incFreeSlots: []int{0, 0, 0, 11}, expectNext: []uint8{1}, expFallback: 0}, // magic 14, intervals [0 1) [1 47) [47 48) [48 60)
+ {incFreeSlots: []int{10, 0, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 48, intervals [0 11) [11 57) [57 58) [58 70)
+ {incFreeSlots: []int{100, 0, 0, 0}, expectNext: []uint8{3}, expFallback: 0}, // magic 164, intervals [0 111) [111 157) [157 158) [158 170)
+ {incFreeSlots: []int{0, 200, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 305, intervals [0 111) [111 352) [352 353) [353 365)
+ {incFreeSlots: []int{0, 0, 302, 0}, expectNext: []uint8{2}, expFallback: 0}, // magic 400, intervals [0 111) [111 352) [352 622) [622 634)
+ {incFreeSlots: []int{0, 0, 0, 440}, expectNext: []uint8{3}, expFallback: 0}, // magic 637, intervals [0 111) [111 352) [352 622) [622 874)
} {
for shard, inc := range tc.incFreeSlots {
if inc == 0 {
@@ -325,7 +321,7 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
}
}
- //shard, _, err := db.NextShard()
+ //freeShards, fallback, err := db.NextShard()
//if err != nil {
//t.Fatal(err)
//}
From 18855fb1b185826d66eaeb3f2393c9efc3cd1251 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 18:24:49 +0800
Subject: [PATCH 58/89] more house cleaning
---
storage/fcds/fcds.go | 50 ++++++----------------------
storage/fcds/leveldb/leveldb.go | 42 +++++++++++------------
storage/fcds/leveldb/leveldb_test.go | 2 +-
storage/fcds/test/store.go | 39 ++++++++++++----------
4 files changed, 53 insertions(+), 80 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 99837a1e22..0e305c336f 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
- "math/rand"
"os"
"path/filepath"
"sort"
@@ -438,10 +437,13 @@ func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
}
// NextShard gets the next shard to write to.
-// Uses weighted probability to choose the next shard.
+// Returns a slice of shards with free slots and a fallback shard
+// which is the shortest shard in size, but does not guarantee it has
+// any free slots.
func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
- // warning: if multiple writers call this at the same time we might get the same shard again and again
- // because the free slot value has not been decremented yet(!)
+
+ // multiple writers that call this at the same time will get the same shard again and again
+ // because the free slot value has not been decremented yet
slots := s.meta.ShardSlots()
sort.Sort(byVal(slots))
@@ -451,49 +453,17 @@ func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
}
}
- // each element has in Slots the number of _taken_ slots
- takenSlots, err := s.ShardSize()
+ // each element Val is the shard size in bytes
+ shardSizes, err := s.ShardSize()
if err != nil {
return nil, 0, err
}
// sorting them will make the first element the largest shard and the last
// element the smallest shard; pick the smallest
- sort.Sort(byVal(takenSlots))
- fallback = takenSlots[len(takenSlots)-1].Shard
-
- return freeShards, fallback, nil
-}
-
-// probabilisticNextShard returns a next shard to write to
-// using a weighted probability
-func probabilisticNextShard(slots []ShardInfo) (shard uint8, err error) {
- var sum, movingSum int64
-
- intervalString := ""
- for _, v := range slots {
-
- // we need to consider the edge case where no free slots are available
- // we still need to potentially insert 1 chunk and so if all shards have
- // no empty offsets - they all must be considered equally as having at least
- // one empty slot
- intervalString += fmt.Sprintf("[%d %d) ", sum, sum+v.Val+1)
- sum += v.Val + 1
- }
-
- // do some magic
- magic := int64(rand.Intn(int(sum)))
- intervalString = fmt.Sprintf("magic %d, intervals ", magic) + intervalString
- fmt.Println(intervalString)
- for _, v := range slots {
- movingSum += v.Val + 1
- if magic < movingSum {
- // we've reached the shard with the correct id
- return v.Shard, nil
- }
- }
+ sort.Sort(byVal(shardSizes))
- return 0, ErrNextShard
+ return freeShards, shardSizes[len(shardSizes)-1].Shard, nil
}
type shard struct {
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 36c5b9fe3f..282c2c1d03 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -54,17 +54,17 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
if err != nil {
// key doesn't exist since this is a new db
// write an empty set into it
- b, err := encodeFreeSlots(ms.free)
- if err != nil {
- panic(err)
- return nil, err
- }
-
- err = ms.db.Put(freeCountKey(), b, nil)
- if err != nil {
- panic(err)
- return nil, err
- }
+ //b, err := encodeFreeSlots(ms.free)
+ //if err != nil {
+ //panic(err)
+ //return nil, err
+ //}
+
+ //err = ms.db.Put(freeCountKey(), b, nil)
+ //if err != nil {
+ //panic(err)
+ //return nil, err
+ //}
} else {
ms.free, err = decodeFreeSlots(data)
@@ -116,11 +116,11 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
if !zero {
s.free[m.Shard]--
}
- b, err := encodeFreeSlots(s.free)
- if err != nil {
- return err
- }
- batch.Put(freeCountKey(), b)
+ //b, err := encodeFreeSlots(s.free)
+ //if err != nil {
+ //return err
+ //}
+ //batch.Put(freeCountKey(), b)
err = s.db.Write(batch, nil)
if err != nil {
@@ -144,11 +144,11 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
s.free[shard]++
- b, err := encodeFreeSlots(s.free)
- if err != nil {
- return err
- }
- batch.Put(freeCountKey(), b)
+ //b, err := encodeFreeSlots(s.free)
+ //if err != nil {
+ //return err
+ //}
+ //batch.Put(freeCountKey(), b)
batch.Delete(chunkKey(addr))
err = s.db.Write(batch, nil)
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index a55f695cc4..c3a46fa99b 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -132,7 +132,7 @@ func TestFreeSlotCounter(t *testing.T) {
}
}
-func TestIssue1(t *testing.T) {
+func xTestIssue1(t *testing.T) {
path, err := ioutil.TempDir("", "swarm-fcds-")
if err != nil {
t.Fatal(err)
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 38d9417d6f..c8d686a2af 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -238,7 +238,6 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
// RunAll runs all available tests for a Store implementation.
func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
-
RunStd(t, newStoreFunc)
t.Run("next shard", func(t *testing.T) {
@@ -277,15 +276,15 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
expectNext []uint8
expFallback uint8
}{
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 10, intervals [0 1) [1 17) [17 18) [18 19)
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 23, intervals [0 1) [1 32) [32 33) [33 34)
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 44, intervals [0 1) [1 47) [47 48) [48 49)
- {incFreeSlots: []int{0, 0, 0, 11}, expectNext: []uint8{1}, expFallback: 0}, // magic 14, intervals [0 1) [1 47) [47 48) [48 60)
- {incFreeSlots: []int{10, 0, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 48, intervals [0 11) [11 57) [57 58) [58 70)
- {incFreeSlots: []int{100, 0, 0, 0}, expectNext: []uint8{3}, expFallback: 0}, // magic 164, intervals [0 111) [111 157) [157 158) [158 170)
- {incFreeSlots: []int{0, 200, 0, 0}, expectNext: []uint8{1}, expFallback: 0}, // magic 305, intervals [0 111) [111 352) [352 353) [353 365)
- {incFreeSlots: []int{0, 0, 302, 0}, expectNext: []uint8{2}, expFallback: 0}, // magic 400, intervals [0 111) [111 352) [352 622) [622 634)
- {incFreeSlots: []int{0, 0, 0, 440}, expectNext: []uint8{3}, expFallback: 0}, // magic 637, intervals [0 111) [111 352) [352 622) [622 874)
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 3},
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 3},
+ {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 3},
+ {incFreeSlots: []int{0, 0, 0, 11}, expectNext: []uint8{1, 3}, expFallback: 3},
+ {incFreeSlots: []int{10, 0, 0, 0}, expectNext: []uint8{1, 3, 0}, expFallback: 3},
+ {incFreeSlots: []int{100, 0, 0, 0}, expectNext: []uint8{0, 1, 3}, expFallback: 3},
+ {incFreeSlots: []int{0, 200, 0, 0}, expectNext: []uint8{1, 0, 3}, expFallback: 3},
+ {incFreeSlots: []int{0, 0, 202, 0}, expectNext: []uint8{1, 2, 0, 3}, expFallback: 3},
+ {incFreeSlots: []int{0, 0, 0, 203}, expectNext: []uint8{1, 3, 2, 0}, expFallback: 3},
} {
for shard, inc := range tc.incFreeSlots {
if inc == 0 {
@@ -321,16 +320,20 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
}
}
- //freeShards, fallback, err := db.NextShard()
- //if err != nil {
- //t.Fatal(err)
- //}
+ freeShards, fallback, err := db.NextShard()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, shard := range freeShards {
+ if shard != tc.expectNext[i] {
+ t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext[i], shard)
+ }
+ }
- //if shard != tc.expectNext {
- //t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext, shard)
- //}
+ if tc.expFallback != fallback {
+ t.Fatalf("expected fallback value to be %d but got %d", tc.expFallback, fallback)
+ }
}
-
}
// RunStoreOptions define parameters for Store test function.
From df3a326b93f9e1e31f95e79a174f69909ee2a643 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 19:05:13 +0800
Subject: [PATCH 59/89] maintain free slots in memory, persist and load from
leveldb on batch and bootup
---
storage/fcds/leveldb/leveldb.go | 125 ++++++++++----------------------
1 file changed, 40 insertions(+), 85 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 282c2c1d03..6bdec6de98 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -17,9 +17,8 @@
package leveldb
import (
- "bytes"
"encoding/binary"
- "encoding/gob"
+ "errors"
"sync"
"github.com/ethersphere/swarm/chunk"
@@ -29,13 +28,14 @@ import (
)
var _ fcds.MetaStore = new(MetaStore)
+var errNoEntries = errors.New("no entries")
// MetaStore implements FCDS MetaStore with LevelDB
// for persistence.
type MetaStore struct {
db *leveldb.DB
- free map[uint8]int64 // free slots cardinality
- mtx sync.RWMutex // synchronise free slots
+ free map[uint8]map[int64]struct{} // free slots cardinality
+ mtx sync.RWMutex // synchronise free slots
}
// NewMetaStore returns new MetaStore at path.
@@ -47,33 +47,19 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
ms := &MetaStore{
db: db,
- free: make(map[uint8]int64),
+ free: make(map[uint8]map[int64]struct{}),
}
- data, err := ms.db.Get(freeCountKey(), nil)
- if err != nil {
- // key doesn't exist since this is a new db
- // write an empty set into it
- //b, err := encodeFreeSlots(ms.free)
- //if err != nil {
- //panic(err)
- //return nil, err
- //}
-
- //err = ms.db.Put(freeCountKey(), b, nil)
- //if err != nil {
- //panic(err)
- //return nil, err
- //}
- } else {
-
- ms.free, err = decodeFreeSlots(data)
- if err != nil {
- panic(err)
- return nil, err
- }
+ for i := uint8(0); i < fcds.ShardCount; i++ {
+ ms.free[i] = make(map[int64]struct{})
}
+ // caution - this _will_ break if we one day decide to
+ // decrease the shard count
+ ms.iterateFree(func(shard uint8, offset int64) {
+ ms.free[shard][offset] = struct{}{}
+ })
+
return ms, nil
}
@@ -96,6 +82,7 @@ func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
// Set adds a new chunk meta information for a shard.
// Reclaimed flag denotes that the chunk is at the place of
// already deleted chunk, not appended to the end of the file.
+// Caller expected to hold the shard lock.
func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds.Meta) (err error) {
batch := new(leveldb.Batch)
if reclaimed {
@@ -105,30 +92,20 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
if err != nil {
return err
}
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
if _, err := s.Get(addr); err != nil {
batch.Put(chunkKey(addr), meta)
}
- zero := s.free[m.Shard] == 0
- if !zero {
- s.free[m.Shard]--
- }
- //b, err := encodeFreeSlots(s.free)
- //if err != nil {
- //return err
- //}
- //batch.Put(freeCountKey(), b)
-
err = s.db.Write(batch, nil)
if err != nil {
- if !zero {
- s.free[m.Shard]++
- }
return err
}
+
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ delete(s.free[m.Shard], m.Offset)
+
return nil
}
@@ -140,22 +117,16 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
}
batch := new(leveldb.Batch)
batch.Put(freeKey(shard, m.Offset), nil)
-
- s.mtx.Lock()
- defer s.mtx.Unlock()
- s.free[shard]++
- //b, err := encodeFreeSlots(s.free)
- //if err != nil {
- //return err
- //}
- //batch.Put(freeCountKey(), b)
batch.Delete(chunkKey(addr))
err = s.db.Write(batch, nil)
if err != nil {
- s.free[shard]--
return err
}
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ s.free[shard][m.Offset] = struct{}{}
return nil
}
@@ -170,7 +141,7 @@ func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
i := i
slot := fcds.ShardInfo{Shard: i}
if slots, ok := s.free[i]; ok {
- slot.Val = slots
+ slot.Val = int64(len(slots))
}
freeSlots[i] = slot
}
@@ -246,6 +217,21 @@ func (s *MetaStore) Iterate(fn func(chunk.Address, *fcds.Meta) (stop bool, err e
return it.Error()
}
+func (s *MetaStore) iterateFree(fn func(shard uint8, offset int64)) {
+ i := s.db.NewIterator(nil, nil)
+ defer i.Release()
+
+ for ok := i.Seek([]byte{freePrefix}); ok; ok = i.Next() {
+ key := i.Key()
+ if key == nil || key[0] != freePrefix {
+ return
+ }
+ shard := uint8(key[1])
+ offset := int64(binary.BigEndian.Uint64(key[2:10]))
+ fn(shard, offset)
+ }
+}
+
// Close closes the underlaying LevelDB instance.
func (s *MetaStore) Close() (err error) {
return s.db.Close()
@@ -254,7 +240,6 @@ func (s *MetaStore) Close() (err error) {
const (
chunkPrefix = 0
freePrefix = 1
- freeCount = 2
)
func chunkKey(addr chunk.Address) (key []byte) {
@@ -268,33 +253,3 @@ func freeKey(shard uint8, offset int64) (key []byte) {
binary.BigEndian.PutUint64(key[2:10], uint64(offset))
return key
}
-
-func freeCountKey() (key []byte) {
- return []byte{freeCount}
-}
-
-func encodeFreeSlots(m map[uint8]int64) ([]byte, error) {
- b := new(bytes.Buffer)
-
- e := gob.NewEncoder(b)
-
- err := e.Encode(m)
- if err != nil {
- return nil, err
- }
-
- return b.Bytes(), nil
-}
-
-func decodeFreeSlots(b []byte) (map[uint8]int64, error) {
- buf := bytes.NewBuffer(b)
- var decodedMap map[uint8]int64
- d := gob.NewDecoder(buf)
-
- err := d.Decode(&decodedMap)
- if err != nil {
- return nil, err
- }
-
- return decodedMap, nil
-}
From 84acb6667c1b79b3f38a5cb5fd44d09bccb3f500 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 19:06:05 +0800
Subject: [PATCH 60/89] fix build
---
storage/fcds/mem/mem.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 11e81dfa58..baa0e54955 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -95,7 +95,7 @@ func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
for i := uint8(0); i < fcds.ShardCount; i++ {
slot := fcds.ShardInfo{Shard: i}
if slots, ok := s.free[i]; ok {
- slot.Slots = int64(len(slots))
+ slot.Val = int64(len(slots))
}
freeSlots[i] = slot
}
From b82eea4b3aa81a63dbeba07aa9b8e0a8700f2ced Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 4 Mar 2020 21:16:01 +0800
Subject: [PATCH 61/89] randomize next free shard to reduce contention
---
storage/fcds/fcds.go | 8 ++++++--
storage/fcds/leveldb/leveldb_test.go | 2 +-
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 0e305c336f..b16b7d6c86 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"io"
+ "math/rand"
"os"
"path/filepath"
"sort"
@@ -211,9 +212,12 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
return 0, err
}
- for _, id := range free {
- sh = s.shards[id]
+ for len(free) > 0 {
+ elem := rand.Intn(len(free))
+ id := free[elem]
+ free = append(free[:elem], free[elem+1:]...)
+ sh = s.shards[id]
sh.mu.Lock()
offset, reclaimed, err = s.getOffset(id)
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index c3a46fa99b..a55f695cc4 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -132,7 +132,7 @@ func TestFreeSlotCounter(t *testing.T) {
}
}
-func xTestIssue1(t *testing.T) {
+func TestIssue1(t *testing.T) {
path, err := ioutil.TempDir("", "swarm-fcds-")
if err != nil {
t.Fatal(err)
From e4951ece9e6fd461b47e49400589597c21ff28ad Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 5 Mar 2020 19:28:07 +0800
Subject: [PATCH 62/89] wip return locked shard directly
---
storage/fcds/fcds.go | 90 ++++++++++++++++------------
storage/fcds/leveldb/leveldb_test.go | 2 +-
2 files changed, 54 insertions(+), 38 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index b16b7d6c86..41de5f6cd7 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
- "math/rand"
"os"
"path/filepath"
"sort"
@@ -42,6 +41,7 @@ type Storer interface {
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
NextShard() (freeShard []uint8, fallback uint8, err error)
+ NextShardLocked() (freeShard shard, s uint8, offset int64, reclaimed bool, err error)
ShardSize() (slots []ShardInfo, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
@@ -202,46 +202,11 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
section := make([]byte, s.maxChunkSize)
copy(section, data)
- var offset int64
- var shardId uint8
- var reclaimed bool
- var sh shard
- found := false
- free, fallback, err := s.NextShard()
+ sh, shardId, offset, reclaimed, err := s.NextShardLocked()
if err != nil {
return 0, err
}
- for len(free) > 0 {
- elem := rand.Intn(len(free))
- id := free[elem]
-
- free = append(free[:elem], free[elem+1:]...)
- sh = s.shards[id]
- sh.mu.Lock()
-
- offset, reclaimed, err = s.getOffset(id)
- if err != nil {
- return 0, err
- }
-
- if offset >= 0 {
- shardId = id
- found = true
- break
- } else {
- sh.mu.Unlock()
- }
- }
- if !found {
- sh = s.shards[fallback]
- shardId = fallback
- sh.mu.Lock()
- offset, reclaimed, err = s.getOffset(fallback)
- if err != nil {
- return 0, err
- }
- }
defer sh.mu.Unlock()
if reclaimed {
@@ -319,6 +284,8 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
}
s.markShardWithFreeOffsets(m.Shard, true)
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
mu := s.shards[m.Shard].mu
mu.Lock()
@@ -470,6 +437,55 @@ func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
return freeShards, shardSizes[len(shardSizes)-1].Shard, nil
}
+func (s *Store) NextShardLocked() (sh shard, id uint8, offset int64, reclaimed bool, err error) {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ // multiple writers that call this at the same time will get the same shard again and again
+ // because the free slot value has not been decremented yet
+ slots := s.meta.ShardSlots()
+ sort.Sort(byVal(slots))
+ if slots[0].Val > 0 {
+ fmt.Println("slots", slots[0].Val, slots)
+ i := slots[0].Shard
+ sh = s.shards[i]
+ sh.mu.Lock()
+ offset, reclaimed, err = s.getOffset(i)
+ if err != nil {
+ sh.mu.Unlock()
+ return sh, 0, 0, false, err
+ }
+
+ if offset < 0 {
+ panic("got no offset but should have")
+ s.shards[i].mu.Unlock()
+ } else {
+ return sh, i, offset, reclaimed, nil
+ }
+ }
+
+ // each element Val is the shard size in bytes
+ shardSizes, err := s.ShardSize()
+ if err != nil {
+ return sh, 0, 0, false, err
+ }
+
+ // sorting them will make the first element the largest shard and the last
+ // element the smallest shard; pick the smallest
+ sort.Sort(byVal(shardSizes))
+
+ fallback := shardSizes[len(shardSizes)-1].Shard
+ sh = s.shards[fallback]
+ sh.mu.Lock()
+ offset, reclaimed, err = s.getOffset(fallback)
+ if err != nil {
+ sh.mu.Unlock()
+ return sh, 0, 0, false, err
+ }
+
+ return sh, fallback, offset, reclaimed, nil
+
+}
+
type shard struct {
f *os.File
mu *sync.Mutex
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index a55f695cc4..6abbf9c5fa 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -160,7 +160,7 @@ func TestIssue1(t *testing.T) {
sem := make(chan struct{}, 100)
- for i := 0; i < 100000; i++ {
+ for i := 0; i < 50000; i++ {
i := i
sem <- struct{}{}
wg.Add(1)
From d2e73d97f35a1b91a9cf848530ac2b8dc655961f Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 5 Mar 2020 19:46:37 +0800
Subject: [PATCH 63/89] remove mem test
---
storage/fcds/mem/mem_test.go | 161 -----------------------------------
1 file changed, 161 deletions(-)
diff --git a/storage/fcds/mem/mem_test.go b/storage/fcds/mem/mem_test.go
index fdef32bc20..288ab47157 100644
--- a/storage/fcds/mem/mem_test.go
+++ b/storage/fcds/mem/mem_test.go
@@ -17,20 +17,9 @@
package mem_test
import (
- "encoding/hex"
- "fmt"
"io/ioutil"
- "math/rand"
- "os"
- "path/filepath"
- "strings"
- "sync"
"testing"
- "time"
- chunktesting "github.com/ethersphere/swarm/chunk/testing"
-
- "github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/storage/fcds"
"github.com/ethersphere/swarm/storage/fcds/mem"
"github.com/ethersphere/swarm/storage/fcds/test"
@@ -48,153 +37,3 @@ func TestFCDS(t *testing.T) {
return test.NewFCDSStore(t, path, mem.NewMetaStore())
})
}
-
-func TestIssue1(t *testing.T) {
- path, err := ioutil.TempDir("", "swarm-fcds-")
- if err != nil {
- t.Fatal(err)
- }
-
- //metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
- metaStore := mem.NewMetaStore()
- if err != nil {
- t.Fatal(err)
- }
-
- s, cleanup := test.NewFCDSStore(t, path, metaStore)
- defer cleanup()
-
- var wg sync.WaitGroup
-
- var mu sync.Mutex
- addrs := make(map[string]struct{})
- trigger := make(chan struct{}, 1)
-
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- sem := make(chan struct{}, 100)
-
- for i := 0; i < 100000; i++ {
- i := i
- sem <- struct{}{}
- wg.Add(1)
- go func() {
- defer wg.Done()
- defer func() { <-sem }()
- ch := chunktesting.GenerateTestRandomChunk()
- mu.Lock()
- _, err := s.Put(ch)
- if err != nil {
- panic(err)
- }
- mu.Unlock()
- if i%10 == 0 {
- // THIS IS CAUSING THE ISSUE
- // every tenth chunk write again after some time
- go func() {
- time.Sleep(10 * time.Second)
- //fmt.Printf(".")
- mu.Lock()
- _, err := s.Put(ch)
- if err != nil {
- panic(err)
- }
- addrs[ch.Address().String()] = struct{}{}
- mu.Unlock()
- }()
- }
- mu.Lock()
- addrs[ch.Address().String()] = struct{}{}
- if len(addrs) >= 1000 {
- select {
- case trigger <- struct{}{}:
- default:
- }
- }
- if i%100 == 0 {
- size, err := dirSize(path)
- if err != nil {
- panic(err)
- }
- fmt.Println("r", i, size, len(addrs))
- }
- mu.Unlock()
- time.Sleep(time.Duration(rand.Intn(300)) * time.Millisecond)
- }()
- }
- }()
-
- //wg.Add(1)
- go func() {
- //defer wg.Done()
-
- for range trigger {
- for {
- var addr chunk.Address
- mu.Lock()
- for a := range addrs {
- b, err := hex.DecodeString(a)
- if err != nil {
- panic(err)
- }
- addr = chunk.Address(b)
- break
- }
- if err := s.Delete(addr); err != nil {
- panic(err)
- }
- delete(addrs, addr.String())
- if len(addrs) <= 900 {
- mu.Unlock()
- break
- }
- mu.Unlock()
- }
- }
- //for range trigger {
- //for {
- //var addr chunk.Address
- //mu.Lock()
- //for a := range addrs {
- //b, err := hex.DecodeString(a)
- //if err != nil {
- //panic(err)
- //}
- //addr = chunk.Address(b)
- //break
- //}
- ////fmt.Printf("-")
- //if err := s.Delete(addr); err != nil {
- //panic(err)
- //}
- //delete(addrs, addr.String())
- //if len(addrs) <= 900 {
- //mu.Unlock()
- //break
- //}
- //mu.Unlock()
- //}
-
- //}
- }()
-
- wg.Wait()
-
- // wait some time before removing the temp dir
- time.Sleep(time.Minute)
-}
-
-func dirSize(path string) (size int64, err error) {
- err = filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !info.IsDir() && strings.HasSuffix(info.Name(), ".db") {
- size += info.Size()
- }
- return err
- })
- return size, err
-}
From 238c673f4d24b9672fc070df4b98e63d81b4e318 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 9 Mar 2020 10:23:18 +0800
Subject: [PATCH 64/89] try out cancellable offset
---
storage/fcds/fcds.go | 36 +++++++-
storage/fcds/leveldb/leveldb.go | 23 +++++
storage/fcds/leveldb/leveldb_test.go | 129 ---------------------------
storage/fcds/mem/mem.go | 58 ++++++++----
storage/fcds/meta.go | 1 +
5 files changed, 101 insertions(+), 146 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 41de5f6cd7..7293c36bba 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -202,11 +202,15 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
section := make([]byte, s.maxChunkSize)
copy(section, data)
- sh, shardId, offset, reclaimed, err := s.NextShardLocked()
+ //sh, shardId, offset, reclaimed, err := s.NextShardLocked()
+ shardId, offset, cancel, err := s.so()
if err != nil {
return 0, err
}
+ reclaimed := offset >= 0
+ sh := s.shards[shardId]
+ sh.mu.Lock()
defer sh.mu.Unlock()
if reclaimed {
@@ -226,10 +230,12 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
_, err = sh.f.Seek(offset, io.SeekStart)
}
if err != nil {
+ cancel()
return 0, err
}
if _, err = sh.f.Write(section); err != nil {
+ cancel()
return 0, err
}
if reclaimed && s.freeCache != nil {
@@ -241,6 +247,9 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
Offset: offset,
Shard: shardId,
})
+ if err != nil {
+ cancel()
+ }
return shardId, err
}
@@ -437,6 +446,31 @@ func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
return freeShards, shardSizes[len(shardSizes)-1].Shard, nil
}
+func (s *Store) so() (uint8, int64, func(), error) {
+
+ shard, offset, cancel, err := s.meta.FastFreeOffset()
+ if err != nil {
+ return 0, 0, func() {}, err
+ }
+
+ if offset >= 0 {
+ return shard, offset, cancel, nil
+ }
+
+ // each element Val is the shard size in bytes
+ shardSizes, err := s.ShardSize()
+ if err != nil {
+ return 0, 0, func() {}, err
+ }
+
+ // sorting them will make the first element the largest shard and the last
+ // element the smallest shard; pick the smallest
+ sort.Sort(byVal(shardSizes))
+
+ return shardSizes[len(shardSizes)-1].Shard, -1, func() {}, nil
+
+}
+
func (s *Store) NextShardLocked() (sh shard, id uint8, offset int64, reclaimed bool, err error) {
s.mtx.Lock()
defer s.mtx.Unlock()
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 6bdec6de98..ae5a62ba45 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -166,6 +166,29 @@ func (s *MetaStore) FreeOffset(shard uint8) (offset int64, err error) {
return offset, nil
}
+func (s *MetaStore) FastFreeOffset() (uint8, int64, func(), error) {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ for shard, offsets := range s.free {
+ for o, _ := range offsets {
+ if o >= 0 {
+ o := o
+ // remove from free offset map, create cancel func, return all values
+ delete(offsets, o)
+ return shard, o, func() {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ s.free[shard][o] = struct{}{}
+ }, nil
+ } else {
+ panic("wtf")
+ }
+ }
+ }
+ return 0, -1, func() {}, nil
+}
+
// Count returns a number of chunks in MetaStore.
// This operation is slow for larger numbers of chunks.
func (s *MetaStore) Count() (count int, err error) {
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 6abbf9c5fa..2138c4d1ec 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -17,16 +17,10 @@
package leveldb_test
import (
- "encoding/hex"
- "fmt"
"io/ioutil"
- "math/rand"
"os"
"path/filepath"
- "strings"
- "sync"
"testing"
- "time"
"github.com/ethersphere/swarm/chunk"
chunktesting "github.com/ethersphere/swarm/chunk/testing"
@@ -131,126 +125,3 @@ func TestFreeSlotCounter(t *testing.T) {
t.Fatalf("did not process enough shards: got %d but expected %d", count, fcds.ShardCount)
}
}
-
-func TestIssue1(t *testing.T) {
- path, err := ioutil.TempDir("", "swarm-fcds-")
- if err != nil {
- t.Fatal(err)
- }
-
- fmt.Println(path)
-
- metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
- if err != nil {
- t.Fatal(err)
- }
-
- s, cleanup := test.NewFCDSStore(t, path, metaStore)
- defer cleanup()
-
- var wg sync.WaitGroup
-
- var mu sync.Mutex
- addrs := make(map[string]struct{})
- trigger := make(chan struct{}, 1)
-
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- sem := make(chan struct{}, 100)
-
- for i := 0; i < 50000; i++ {
- i := i
- sem <- struct{}{}
- wg.Add(1)
- go func() {
- defer wg.Done()
- defer func() { <-sem }()
- ch := chunktesting.GenerateTestRandomChunk()
- _, err := s.Put(ch)
- if err != nil {
- panic(err)
- }
- if i%10 == 0 {
- // THIS IS CAUSING THE ISSUE
- // every tenth chunk write again after some time
- go func() {
- time.Sleep(10 * time.Second)
- _, err := s.Put(ch)
- if err != nil {
- panic(err)
- }
- mu.Lock()
- addrs[ch.Address().String()] = struct{}{}
- mu.Unlock()
- }()
- }
- mu.Lock()
- addrs[ch.Address().String()] = struct{}{}
- if len(addrs) >= 1000 {
- select {
- case trigger <- struct{}{}:
- default:
- }
- }
- if i%100 == 0 {
- size, err := dirSize(path)
- if err != nil {
- panic(err)
- }
- fmt.Println("r", i, size, len(addrs))
- }
- mu.Unlock()
- time.Sleep(time.Duration(rand.Intn(300)) * time.Millisecond)
- }()
- }
- }()
-
- //wg.Add(1)
- go func() {
- //defer wg.Done()
-
- for range trigger {
- for {
- var addr chunk.Address
- mu.Lock()
- for a := range addrs {
- b, err := hex.DecodeString(a)
- if err != nil {
- panic(err)
- }
- addr = chunk.Address(b)
- break
- }
- if err := s.Delete(addr); err != nil {
- panic(err)
- }
- delete(addrs, addr.String())
- if len(addrs) <= 900 {
- mu.Unlock()
- break
- }
- mu.Unlock()
- }
- }
- }()
-
- wg.Wait()
-
- // wait some time before removing the temp dir
- time.Sleep(time.Minute)
-}
-
-func dirSize(path string) (size int64, err error) {
- err = filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !info.IsDir() && strings.HasSuffix(info.Name(), ".db") {
- size += info.Size()
- }
- return err
- })
- return size, err
-}
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index baa0e54955..bfad62fbc6 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -30,7 +30,7 @@ var _ fcds.MetaStore = new(MetaStore)
type MetaStore struct {
meta map[string]*fcds.Meta
free map[uint8]map[int64]struct{}
- mu sync.RWMutex
+ mtx sync.RWMutex
}
// NewMetaStore constructs a new MetaStore.
@@ -47,9 +47,9 @@ func NewMetaStore() (s *MetaStore) {
// Get returns chunk meta information.
func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
- s.mu.RLock()
+ s.mtx.RLock()
m = s.meta[string(addr)]
- s.mu.RUnlock()
+ s.mtx.RUnlock()
if m == nil {
return nil, chunk.ErrChunkNotFound
}
@@ -60,21 +60,21 @@ func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
// Reclaimed flag denotes that the chunk is at the place of
// already deleted chunk, not appended to the end of the file.
func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds.Meta) (err error) {
- s.mu.Lock()
+ s.mtx.Lock()
if reclaimed {
delete(s.free[shard], m.Offset)
}
s.meta[string(addr)] = m
- s.mu.Unlock()
+ s.mtx.Unlock()
return nil
}
// Remove removes chunk meta information from the shard.
func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
key := string(addr)
m := s.meta[key]
if m == nil {
@@ -91,7 +91,7 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
freeSlots = make([]fcds.ShardInfo, fcds.ShardCount)
- s.mu.RLock()
+ s.mtx.RLock()
for i := uint8(0); i < fcds.ShardCount; i++ {
slot := fcds.ShardInfo{Shard: i}
if slots, ok := s.free[i]; ok {
@@ -99,7 +99,7 @@ func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
}
freeSlots[i] = slot
}
- s.mu.RUnlock()
+ s.mtx.RUnlock()
return freeSlots
}
@@ -108,27 +108,53 @@ func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
// another chunk. If the returned value is less then 0
// there are no free offset at this shard.
func (s *MetaStore) FreeOffset(shard uint8) (offset int64, err error) {
- s.mu.RLock()
+ s.mtx.RLock()
for o := range s.free[shard] {
- s.mu.RUnlock()
+ s.mtx.RUnlock()
return o, nil
}
- s.mu.RUnlock()
+ s.mtx.RUnlock()
return -1, nil
}
+func (s *MetaStore) FastFreeOffset() (uint8, int64, func(), error) {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+
+ for shard, offsets := range s.free {
+ for o, _ := range offsets {
+ if o >= 0 {
+ o := o
+ // remove from free offset map, create cancel func, return all values
+
+ delete(offsets, o)
+ return shard, o, func() {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ s.free[shard][o] = struct{}{}
+ }, nil
+ } else {
+ panic("wtf mem")
+ }
+ }
+ }
+
+ return 0, -1, func() {}, nil
+
+}
+
// Count returns a number of chunks in MetaStore.
func (s *MetaStore) Count() (count int, err error) {
- s.mu.RLock()
+ s.mtx.RLock()
count = len(s.meta)
- s.mu.RUnlock()
+ s.mtx.RUnlock()
return count, nil
}
// Iterate iterates over all chunk meta information.
func (s *MetaStore) Iterate(fn func(chunk.Address, *fcds.Meta) (stop bool, err error)) (err error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
for a, m := range s.meta {
stop, err := fn(chunk.Address(a), m)
if err != nil {
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 05d7b36a12..20f90cd007 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -32,6 +32,7 @@ type MetaStore interface {
Count() (int, error)
Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
FreeOffset(shard uint8) (int64, error)
+ FastFreeOffset() (uint8, int64, func(), error)
ShardSlots() []ShardInfo
Close() error
}
From 95e257d2ee623be5e3bfb0e49b9d18812a7627e2 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 9 Mar 2020 12:00:07 +0800
Subject: [PATCH 65/89] dont mock me
---
storage/fcds/fcds.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 7293c36bba..0fec0b4712 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -41,7 +41,7 @@ type Storer interface {
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
NextShard() (freeShard []uint8, fallback uint8, err error)
- NextShardLocked() (freeShard shard, s uint8, offset int64, reclaimed bool, err error)
+ //NextShardLocked() (freeShard shard, s uint8, offset int64, reclaimed bool, err error)
ShardSize() (slots []ShardInfo, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
From f4fc2c022603b17056f9b4d53b9a3513e38263a6 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 9 Mar 2020 13:00:44 +0800
Subject: [PATCH 66/89] switch back to leveldb
---
storage/localstore/localstore.go | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/storage/localstore/localstore.go b/storage/localstore/localstore.go
index c402e11e4c..efbc329685 100644
--- a/storage/localstore/localstore.go
+++ b/storage/localstore/localstore.go
@@ -30,7 +30,7 @@ import (
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/shed"
"github.com/ethersphere/swarm/storage/fcds"
- fcdsmem "github.com/ethersphere/swarm/storage/fcds/mem"
+ fcdsleveldb "github.com/ethersphere/swarm/storage/fcds/leveldb"
fcdsmock "github.com/ethersphere/swarm/storage/fcds/mock"
"github.com/ethersphere/swarm/storage/mock"
)
@@ -222,10 +222,10 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
}
if o.MockStore == nil {
- metaStore := fcdsmem.NewMetaStore()
- //if err != nil {
- //return nil, err
- //}
+ metaStore, err := fcdsleveldb.NewMetaStore(filepath.Join(path, "meta"))
+ if err != nil {
+ return nil, err
+ }
db.data, err = fcds.New(
filepath.Join(path, "data"),
chunk.DefaultSize+8, // chunk data has additional 8 bytes prepended
From 39daa0c4bf2c05157775252fd36f45bb25de55ed Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 15:58:37 +0800
Subject: [PATCH 67/89] puttopgccheck
---
swarm.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/swarm.go b/swarm.go
index 9c8eeacbe3..2a48de60f5 100644
--- a/swarm.go
+++ b/swarm.go
@@ -227,7 +227,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
MockStore: mockStore,
Capacity: config.DbCapacity,
Tags: self.tags,
- PutToGCCheck: to.IsWithinDepth,
+ PutToGCCheck: func(_ []byte) bool { return true },
})
if err != nil {
return nil, err
From 2fe75965f72086830852e5e72f14d7e9d968e0ba Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 16:17:19 +0800
Subject: [PATCH 68/89] forky: remove offset deletion from Set since it should
already be deleted when the offset is requested
---
storage/fcds/leveldb/leveldb.go | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index ae5a62ba45..9998914b71 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -100,12 +100,6 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
if err != nil {
return err
}
-
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
- delete(s.free[m.Shard], m.Offset)
-
return nil
}
@@ -123,6 +117,7 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
if err != nil {
return err
}
+
s.mtx.Lock()
defer s.mtx.Unlock()
From da85d55d6d46a09d8ac672da12d2bd0ffe55b38b Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 14:41:21 +0100
Subject: [PATCH 69/89] forky: simplify set, change free offset method
---
storage/fcds/fcds.go | 166 ++++-----------------------
storage/fcds/leveldb/leveldb.go | 68 +++--------
storage/fcds/leveldb/leveldb_test.go | 58 +++++-----
storage/fcds/mem/mem.go | 55 ++-------
storage/fcds/meta.go | 4 +-
storage/fcds/test/store.go | 32 +++---
6 files changed, 90 insertions(+), 293 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 0fec0b4712..17e232069a 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -40,8 +40,6 @@ type Storer interface {
Has(addr chunk.Address) (yes bool, err error)
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
- NextShard() (freeShard []uint8, fallback uint8, err error)
- //NextShardLocked() (freeShard shard, s uint8, offset int64, reclaimed bool, err error)
ShardSize() (slots []ShardInfo, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
@@ -117,8 +115,9 @@ func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s
func (s *Store) ShardSize() (slots []ShardInfo, err error) {
slots = make([]ShardInfo, len(s.shards))
for i, sh := range s.shards {
- i := i
+ sh.mu.Lock()
fs, err := sh.f.Stat()
+ sh.mu.Unlock()
if err != nil {
return nil, err
}
@@ -142,18 +141,20 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
sh := s.shards[m.Shard]
sh.mu.Lock()
- defer sh.mu.Unlock()
data := make([]byte, m.Size)
n, err := sh.f.ReadAt(data, m.Offset)
if err != nil && err != io.EOF {
metrics.GetOrRegisterCounter("fcds.get.error", nil).Inc(1)
+ sh.mu.Unlock()
return nil, err
}
if n != int(m.Size) {
return nil, fmt.Errorf("incomplete chunk data, read %v of %v", n, m.Size)
}
+ sh.mu.Unlock()
+
metrics.GetOrRegisterCounter("fcds.get.ok", nil).Inc(1)
return chunk.NewChunk(addr, data), nil
@@ -202,12 +203,10 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
section := make([]byte, s.maxChunkSize)
copy(section, data)
- //sh, shardId, offset, reclaimed, err := s.NextShardLocked()
- shardId, offset, cancel, err := s.so()
+ shardId, offset, reclaimed, cancel, err := s.getOffset()
if err != nil {
return 0, err
}
- reclaimed := offset >= 0
sh := s.shards[shardId]
sh.mu.Lock()
@@ -254,31 +253,28 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
return shardId, err
}
-// getOffset returns an offset where chunk data can be written to
+// getOffset returns an offset on a shard where chunk data can be written to
// and a flag if the offset is reclaimed from a previously removed chunk.
// If offset is less then 0, no free offsets are available.
-func (s *Store) getOffset(shard uint8) (offset int64, reclaimed bool, err error) {
- if !s.shardHasFreeOffsets(shard) {
- return -1, false, nil
- }
+func (s *Store) getOffset() (shard uint8, offset int64, reclaimed bool, cancel func(), err error) {
+ shard, offset, cancel = s.meta.FreeOffset()
- offset = -1
- if s.freeCache != nil {
- offset = s.freeCache.get(shard)
+ if offset >= 0 {
+ return shard, offset, true, cancel, nil
}
- if offset < 0 {
- offset, err = s.meta.FreeOffset(shard)
- if err != nil {
- return 0, false, err
- }
- }
- if offset < 0 {
- s.markShardWithFreeOffsets(shard, false)
- return -1, false, nil
+ // each element Val is the shard size in bytes
+ shardSizes, err := s.ShardSize()
+ if err != nil {
+ return 0, 0, false, func() {}, err
}
- return offset, true, nil
+ // sorting them will make the first element the largest shard and the last
+ // element the smallest shard; pick the smallest
+ sort.Sort(byVal(shardSizes))
+
+ return shardSizes[len(shardSizes)-1].Shard, -1, false, func() {}, nil
+
}
// Delete makes the chunk unavailable.
@@ -292,7 +288,6 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
return err
}
- s.markShardWithFreeOffsets(m.Shard, true)
s.mtx.Lock()
defer s.mtx.Unlock()
@@ -326,8 +321,6 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return err
}
defer s.unprotect()
- //s.mtx.Lock()
- //defer s.mtx.Unlock()
for _, sh := range s.shards {
sh.mu.Lock()
}
@@ -403,123 +396,6 @@ func (s *Store) getMeta(addr chunk.Address) (m *Meta, err error) {
return s.meta.Get(addr)
}
-func (s *Store) markShardWithFreeOffsets(shard uint8, has bool) {
- s.freeMu.Lock()
- s.free[shard] = has
- s.freeMu.Unlock()
-}
-
-func (s *Store) shardHasFreeOffsets(shard uint8) (has bool) {
- s.freeMu.RLock()
- has = s.free[shard]
- s.freeMu.RUnlock()
- return has
-}
-
-// NextShard gets the next shard to write to.
-// Returns a slice of shards with free slots and a fallback shard
-// which is the shortest shard in size, but does not guarantee it has
-// any free slots.
-func (s *Store) NextShard() (freeShards []uint8, fallback uint8, err error) {
-
- // multiple writers that call this at the same time will get the same shard again and again
- // because the free slot value has not been decremented yet
- slots := s.meta.ShardSlots()
- sort.Sort(byVal(slots))
-
- for _, v := range slots {
- if v.Val > 0 {
- freeShards = append(freeShards, v.Shard)
- }
- }
-
- // each element Val is the shard size in bytes
- shardSizes, err := s.ShardSize()
- if err != nil {
- return nil, 0, err
- }
-
- // sorting them will make the first element the largest shard and the last
- // element the smallest shard; pick the smallest
- sort.Sort(byVal(shardSizes))
-
- return freeShards, shardSizes[len(shardSizes)-1].Shard, nil
-}
-
-func (s *Store) so() (uint8, int64, func(), error) {
-
- shard, offset, cancel, err := s.meta.FastFreeOffset()
- if err != nil {
- return 0, 0, func() {}, err
- }
-
- if offset >= 0 {
- return shard, offset, cancel, nil
- }
-
- // each element Val is the shard size in bytes
- shardSizes, err := s.ShardSize()
- if err != nil {
- return 0, 0, func() {}, err
- }
-
- // sorting them will make the first element the largest shard and the last
- // element the smallest shard; pick the smallest
- sort.Sort(byVal(shardSizes))
-
- return shardSizes[len(shardSizes)-1].Shard, -1, func() {}, nil
-
-}
-
-func (s *Store) NextShardLocked() (sh shard, id uint8, offset int64, reclaimed bool, err error) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- // multiple writers that call this at the same time will get the same shard again and again
- // because the free slot value has not been decremented yet
- slots := s.meta.ShardSlots()
- sort.Sort(byVal(slots))
- if slots[0].Val > 0 {
- fmt.Println("slots", slots[0].Val, slots)
- i := slots[0].Shard
- sh = s.shards[i]
- sh.mu.Lock()
- offset, reclaimed, err = s.getOffset(i)
- if err != nil {
- sh.mu.Unlock()
- return sh, 0, 0, false, err
- }
-
- if offset < 0 {
- panic("got no offset but should have")
- s.shards[i].mu.Unlock()
- } else {
- return sh, i, offset, reclaimed, nil
- }
- }
-
- // each element Val is the shard size in bytes
- shardSizes, err := s.ShardSize()
- if err != nil {
- return sh, 0, 0, false, err
- }
-
- // sorting them will make the first element the largest shard and the last
- // element the smallest shard; pick the smallest
- sort.Sort(byVal(shardSizes))
-
- fallback := shardSizes[len(shardSizes)-1].Shard
- sh = s.shards[fallback]
- sh.mu.Lock()
- offset, reclaimed, err = s.getOffset(fallback)
- if err != nil {
- sh.mu.Unlock()
- return sh, 0, 0, false, err
- }
-
- return sh, fallback, offset, reclaimed, nil
-
-}
-
type shard struct {
f *os.File
mu *sync.Mutex
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 9998914b71..6a3dfcf054 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -92,15 +92,9 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
if err != nil {
return err
}
- if _, err := s.Get(addr); err != nil {
- batch.Put(chunkKey(addr), meta)
- }
+ batch.Put(chunkKey(addr), meta)
- err = s.db.Write(batch, nil)
- if err != nil {
- return err
- }
- return nil
+ return s.db.Write(batch, nil)
}
// Remove removes chunk meta information from the shard.
@@ -126,62 +120,26 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
return nil
}
-// ShardSlots gives back a slice of ShardInfo items that represent the number
-// of free slots inside each shard, value is in number of chunks, not bytes.
-func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
- freeSlots = make([]fcds.ShardInfo, fcds.ShardCount)
-
- s.mtx.RLock()
- for i := uint8(0); i < fcds.ShardCount; i++ {
- i := i
- slot := fcds.ShardInfo{Shard: i}
- if slots, ok := s.free[i]; ok {
- slot.Val = int64(len(slots))
- }
- freeSlots[i] = slot
- }
- s.mtx.RUnlock()
-
- return freeSlots
-}
-
// FreeOffset returns an offset that can be reclaimed by
// another chunk. If the returned value is less then 0
-// there are no free offset at this shard.
-func (s *MetaStore) FreeOffset(shard uint8) (offset int64, err error) {
- i := s.db.NewIterator(nil, nil)
- defer i.Release()
-
- i.Seek([]byte{freePrefix, shard})
- key := i.Key()
- if key == nil || key[0] != freePrefix || key[1] != shard {
- return -1, nil
- }
- offset = int64(binary.BigEndian.Uint64(key[2:10]))
- return offset, nil
-}
-
-func (s *MetaStore) FastFreeOffset() (uint8, int64, func(), error) {
+// there are no free offsets on any shards and the chunk must be
+// appended to the shortest shard
+func (s *MetaStore) FreeOffset() (shard uint8, offset int64, cancel func()) {
s.mtx.Lock()
defer s.mtx.Unlock()
for shard, offsets := range s.free {
- for o, _ := range offsets {
- if o >= 0 {
- o := o
- // remove from free offset map, create cancel func, return all values
- delete(offsets, o)
- return shard, o, func() {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- s.free[shard][o] = struct{}{}
- }, nil
- } else {
- panic("wtf")
+ for offset, _ = range offsets {
+ delete(offsets, offset)
+ return shard, offset, func() {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ s.free[shard][offset] = struct{}{}
}
}
}
- return 0, -1, func() {}, nil
+
+ return 0, -1, func() {}
}
// Count returns a number of chunks in MetaStore.
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 2138c4d1ec..0586d16e6d 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -95,33 +95,33 @@ func TestFreeSlotCounter(t *testing.T) {
}
}
- freeSlots := metaStore.ShardSlots()
-
- store.Close()
- metaStore.Close()
-
- metaStore2, err := leveldb.NewMetaStore(metaPath)
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- metaStore2.Close()
- os.RemoveAll(metaPath)
- }()
-
- freeSlots2 := metaStore.ShardSlots()
- count := 0
- for i, v := range freeSlots {
- count++
- if freeSlots2[i].Shard != v.Shard {
- t.Fatalf("expected shard %d to be %d but got %d", i, v.Shard, freeSlots[2].Shard)
- }
- if freeSlots2[i].Val != v.Val {
- t.Fatalf("expected shard %d to have %d free slots but got %d", i, v.Val, freeSlots[2].Val)
- }
- }
-
- if uint8(count) != fcds.ShardCount {
- t.Fatalf("did not process enough shards: got %d but expected %d", count, fcds.ShardCount)
- }
+ //freeSlots := metaStore.ShardSlots()
+
+ //store.Close()
+ //metaStore.Close()
+
+ //metaStore2, err := leveldb.NewMetaStore(metaPath)
+ //if err != nil {
+ //t.Fatal(err)
+ //}
+ //defer func() {
+ //metaStore2.Close()
+ //os.RemoveAll(metaPath)
+ //}()
+
+ ////freeSlots2 := metaStore.ShardSlots()
+ //count := 0
+ //for i, v := range freeSlots {
+ //count++
+ //if freeSlots2[i].Shard != v.Shard {
+ //t.Fatalf("expected shard %d to be %d but got %d", i, v.Shard, freeSlots[2].Shard)
+ //}
+ //if freeSlots2[i].Val != v.Val {
+ //t.Fatalf("expected shard %d to have %d free slots but got %d", i, v.Val, freeSlots[2].Val)
+ //}
+ //}
+
+ //if uint8(count) != fcds.ShardCount {
+ //t.Fatalf("did not process enough shards: got %d but expected %d", count, fcds.ShardCount)
+ //}
}
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index bfad62fbc6..7e03b8873d 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -86,61 +86,26 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
return nil
}
-// ShardSlots gives back a slice of ShardInfo items that represent the number
-// of free slots inside each shard.
-func (s *MetaStore) ShardSlots() (freeSlots []fcds.ShardInfo) {
- freeSlots = make([]fcds.ShardInfo, fcds.ShardCount)
-
- s.mtx.RLock()
- for i := uint8(0); i < fcds.ShardCount; i++ {
- slot := fcds.ShardInfo{Shard: i}
- if slots, ok := s.free[i]; ok {
- slot.Val = int64(len(slots))
- }
- freeSlots[i] = slot
- }
- s.mtx.RUnlock()
-
- return freeSlots
-}
-
// FreeOffset returns an offset that can be reclaimed by
// another chunk. If the returned value is less then 0
-// there are no free offset at this shard.
-func (s *MetaStore) FreeOffset(shard uint8) (offset int64, err error) {
- s.mtx.RLock()
- for o := range s.free[shard] {
- s.mtx.RUnlock()
- return o, nil
- }
- s.mtx.RUnlock()
- return -1, nil
-}
-
-func (s *MetaStore) FastFreeOffset() (uint8, int64, func(), error) {
+// there are no free offsets on any shards and the chunk must be
+// appended to the shortest shard
+func (s *MetaStore) FreeOffset() (shard uint8, offset int64, cancel func()) {
s.mtx.Lock()
defer s.mtx.Unlock()
for shard, offsets := range s.free {
- for o, _ := range offsets {
- if o >= 0 {
- o := o
- // remove from free offset map, create cancel func, return all values
-
- delete(offsets, o)
- return shard, o, func() {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- s.free[shard][o] = struct{}{}
- }, nil
- } else {
- panic("wtf mem")
+ for offset, _ = range offsets {
+ delete(offsets, offset)
+ return shard, offset, func() {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ s.free[shard][offset] = struct{}{}
}
}
}
- return 0, -1, func() {}, nil
-
+ return 0, -1, func() {}
}
// Count returns a number of chunks in MetaStore.
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 20f90cd007..283f86ae0e 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -31,9 +31,7 @@ type MetaStore interface {
Remove(addr chunk.Address, shard uint8) error
Count() (int, error)
Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
- FreeOffset(shard uint8) (int64, error)
- FastFreeOffset() (uint8, int64, func(), error)
- ShardSlots() []ShardInfo
+ FreeOffset() (shard uint8, offset int64, cancel func())
Close() error
}
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index c8d686a2af..5fbda6d51d 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -240,9 +240,9 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
RunStd(t, newStoreFunc)
- t.Run("next shard", func(t *testing.T) {
- runNextShard(t, newStoreFunc)
- })
+ //t.Run("next shard", func(t *testing.T) {
+ //runNextShard(t, newStoreFunc)
+ //})
}
// RunNextShard runs the test scenario for NextShard selection
@@ -320,19 +320,19 @@ func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fu
}
}
- freeShards, fallback, err := db.NextShard()
- if err != nil {
- t.Fatal(err)
- }
- for i, shard := range freeShards {
- if shard != tc.expectNext[i] {
- t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext[i], shard)
- }
- }
-
- if tc.expFallback != fallback {
- t.Fatalf("expected fallback value to be %d but got %d", tc.expFallback, fallback)
- }
+ //freeShards, fallback, err := db.NextShard()
+ //if err != nil {
+ //t.Fatal(err)
+ //}
+ //for i, shard := range freeShards {
+ //if shard != tc.expectNext[i] {
+ //t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext[i], shard)
+ //}
+ //}
+
+ //if tc.expFallback != fallback {
+ //t.Fatalf("expected fallback value to be %d but got %d", tc.expFallback, fallback)
+ //}
}
}
From 4cba21a19f9ba2ce39219ec90c8571670c7903c3 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 15:06:45 +0100
Subject: [PATCH 70/89] cleanup
---
storage/fcds/fcds.go | 5 +++--
storage/fcds/leveldb/leveldb.go | 10 +++-------
2 files changed, 6 insertions(+), 9 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 17e232069a..6f78419003 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -257,6 +257,7 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
// and a flag if the offset is reclaimed from a previously removed chunk.
// If offset is less then 0, no free offsets are available.
func (s *Store) getOffset() (shard uint8, offset int64, reclaimed bool, cancel func(), err error) {
+ cancel = func() {}
shard, offset, cancel = s.meta.FreeOffset()
if offset >= 0 {
@@ -266,14 +267,14 @@ func (s *Store) getOffset() (shard uint8, offset int64, reclaimed bool, cancel f
// each element Val is the shard size in bytes
shardSizes, err := s.ShardSize()
if err != nil {
- return 0, 0, false, func() {}, err
+ return 0, 0, false, cancel, err
}
// sorting them will make the first element the largest shard and the last
// element the smallest shard; pick the smallest
sort.Sort(byVal(shardSizes))
- return shardSizes[len(shardSizes)-1].Shard, -1, false, func() {}, nil
+ return shardSizes[len(shardSizes)-1].Shard, -1, false, cancel, nil
}
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 6a3dfcf054..4811ac97df 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -18,7 +18,6 @@ package leveldb
import (
"encoding/binary"
- "errors"
"sync"
"github.com/ethersphere/swarm/chunk"
@@ -28,14 +27,13 @@ import (
)
var _ fcds.MetaStore = new(MetaStore)
-var errNoEntries = errors.New("no entries")
// MetaStore implements FCDS MetaStore with LevelDB
// for persistence.
type MetaStore struct {
db *leveldb.DB
- free map[uint8]map[int64]struct{} // free slots cardinality
- mtx sync.RWMutex // synchronise free slots
+ free map[uint8]map[int64]struct{} // free slots map. root map key is shard id
+ mtx sync.Mutex // synchronise free slots
}
// NewMetaStore returns new MetaStore at path.
@@ -93,7 +91,6 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
return err
}
batch.Put(chunkKey(addr), meta)
-
return s.db.Write(batch, nil)
}
@@ -113,9 +110,8 @@ func (s *MetaStore) Remove(addr chunk.Address, shard uint8) (err error) {
}
s.mtx.Lock()
- defer s.mtx.Unlock()
-
s.free[shard][m.Offset] = struct{}{}
+ s.mtx.Unlock()
return nil
}
From 0df8e67b96299d88dc5d1c9807df615a469f95df Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 15:21:30 +0100
Subject: [PATCH 71/89] cleanup
---
storage/fcds/leveldb/leveldb.go | 2 ++
storage/fcds/mock/mock.go | 4 ----
storage/fcds/test/store.go | 7 +++----
3 files changed, 5 insertions(+), 8 deletions(-)
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 4811ac97df..477e4edbb1 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -189,6 +189,8 @@ func (s *MetaStore) Iterate(fn func(chunk.Address, *fcds.Meta) (stop bool, err e
return it.Error()
}
+// iterateFree iterates over all free slot entries in leveldb
+// and calls the defined callback function on each entry found.
func (s *MetaStore) iterateFree(fn func(shard uint8, offset int64)) {
i := s.db.NewIterator(nil, nil)
defer i.Release()
diff --git a/storage/fcds/mock/mock.go b/storage/fcds/mock/mock.go
index f9ec886022..fc73dcd35a 100644
--- a/storage/fcds/mock/mock.go
+++ b/storage/fcds/mock/mock.go
@@ -68,10 +68,6 @@ func (s *Store) Put(ch chunk.Chunk) (shard uint8, err error) {
return 0, err
}
-func (s *Store) NextShard() (shard []uint8, fallback uint8, err error) {
- return []uint8{0}, 0, nil
-}
-
// Delete removes chunk data.
func (s *Store) Delete(addr chunk.Address) (err error) {
return s.m.Delete(addr)
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 5fbda6d51d..b5f5a4f39c 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -101,13 +101,12 @@ func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func()))
RunIterator(t, newStoreFunc)
})
- t.Run("no grow", func(t *testing.T) {
- runNoGrow(t, newStoreFunc)
- })
+ //t.Run("no grow", func(t *testing.T) {
+ //runNoGrow(t, newStoreFunc)
+ //})
}
-// RunNextShard runs the test scenario for NextShard selection
func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
defer func(s uint8) {
fcds.ShardCount = s
From 7d9156ca47f57236a3c1270d9b57bdff8303ade7 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 15:46:36 +0100
Subject: [PATCH 72/89] cleanup
---
storage/fcds/mem/mem.go | 1 -
storage/fcds/meta_test.go | 7 +++----
2 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 7e03b8873d..c21858b8c7 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -65,7 +65,6 @@ func (s *MetaStore) Set(addr chunk.Address, shard uint8, reclaimed bool, m *fcds
if reclaimed {
delete(s.free[shard], m.Offset)
}
-
s.meta[string(addr)] = m
s.mtx.Unlock()
return nil
diff --git a/storage/fcds/meta_test.go b/storage/fcds/meta_test.go
index 5f057cf89e..95dbc71688 100644
--- a/storage/fcds/meta_test.go
+++ b/storage/fcds/meta_test.go
@@ -61,18 +61,17 @@ func TestShardSlotSort(t *testing.T) {
expectOrder: []int{1, 2, 3, 0},
},
} {
- s := make([]ShardSlot, len(tc.freeSlots))
+ s := make([]ShardInfo, len(tc.freeSlots))
for i, v := range tc.freeSlots {
- s[i] = ShardInfo{Shard: uint8(i), Info: int64(v)}
+ s[i] = ShardInfo{Shard: uint8(i), Val: int64(v)}
}
- sort.Sort(bySlots(s))
+ sort.Sort(byVal(s))
for i, v := range s {
if v.Shard != uint8(tc.expectOrder[i]) {
t.Fatalf("expected shard index %d to be %d but got %d", i, tc.expectOrder[i], v.Shard)
}
}
-
}
}
From e3d8ccf2d2aeffedc87c3b455d52c81ce2d925de Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 17:48:24 +0100
Subject: [PATCH 73/89] cleanup tests
---
storage/fcds/mock/mock_test.go | 2 +-
storage/fcds/test/store.go | 133 +++------------------------------
2 files changed, 12 insertions(+), 123 deletions(-)
diff --git a/storage/fcds/mock/mock_test.go b/storage/fcds/mock/mock_test.go
index 8d8adcf0b2..49029c608a 100644
--- a/storage/fcds/mock/mock_test.go
+++ b/storage/fcds/mock/mock_test.go
@@ -28,7 +28,7 @@ import (
// TestFCDS runs a standard series of tests on mock Store implementation.
func TestFCDS(t *testing.T) {
- test.RunStd(t, func(t *testing.T) (fcds.Storer, func()) {
+ test.RunAll(t, func(t *testing.T) (fcds.Storer, func()) {
return mock.New(
mem.NewGlobalStore().NewNodeStore(
common.BytesToAddress(make([]byte, 20)),
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index b5f5a4f39c..9a926f5783 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -42,8 +42,8 @@ func Main(m *testing.M) {
os.Exit(m.Run())
}
-// RunStd runs the standard tests
-func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
+// RunAll runs all available tests for a Store implementation.
+func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
t.Run("empty", func(t *testing.T) {
RunStore(t, &RunStoreOptions{
ChunkCount: *chunksFlag,
@@ -101,9 +101,9 @@ func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func()))
RunIterator(t, newStoreFunc)
})
- //t.Run("no grow", func(t *testing.T) {
- //runNoGrow(t, newStoreFunc)
- //})
+ t.Run("no grow", func(t *testing.T) {
+ runNoGrow(t, newStoreFunc)
+ })
}
@@ -164,28 +164,17 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
}
ins := 4 + 3 + 2 + 1
- // insert 4,3,2,1 chunks and expect the shards as next shards inserted into
- // in the following order
- order := []uint8{
- // comment denotes free slots _after_ PUT
- 0, //4,3,2,1 -> 3,3,2,1
- 0, //3,3,2,1 -> 2,3,2,1
- 1, //2,3,2,1 -> 2,2,2,1
- 0, //2,2,2,1 -> 1,2,2,1
- 1, //1,2,2,1 -> 1,1,2,1
- 2, //1,1,2,1 -> 1,1,1,1
- 0, //1,1,1,1 -> 0,1,1,1
- 1, //0,1,1,1 -> 0,0,1,1
- 2, //0,0,1,1 -> 0,0,0,1
- 3, //0,0,0,1 -> 0,0,0,0
- }
+
+ freeSlots := []int{4, 3, 2, 1}
+
for i := 0; i < ins; i++ {
cc := chunktesting.GenerateTestRandomChunk()
if shard, err := db.Put(cc); err != nil {
t.Fatal(err)
} else {
- if shard != order[i] {
- t.Fatalf("expected chunk %d to be on shard %d but got %d", i, order[i], shard)
+ freeSlots[shard]--
+ if freeSlots[shard] < 0 {
+ t.Fatalf("shard %d slots went negative", shard)
}
chunkShards[cc.Address().String()] = shard
}
@@ -235,106 +224,6 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
}
}
-// RunAll runs all available tests for a Store implementation.
-func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- RunStd(t, newStoreFunc)
-
- //t.Run("next shard", func(t *testing.T) {
- //runNextShard(t, newStoreFunc)
- //})
-}
-
-// RunNextShard runs the test scenario for NextShard selection
-func runNextShard(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
- rand.Seed(42424242) //use a constant seed so we can assert the results
- defer func(s uint8) {
- fcds.ShardCount = s
- }(fcds.ShardCount)
-
- fcds.ShardCount = 4
-
- db, clean := newStoreFunc(t)
-
- defer clean()
-
- chunkCount := 1000
- chunks := getChunks(chunkCount)
-
- chunkShards := make(map[string]uint8)
-
- for _, ch := range chunks {
- if shard, err := db.Put(ch); err != nil {
- t.Fatal(err)
- } else {
- chunkShards[ch.Address().String()] = shard
- }
- }
-
- for _, tc := range []struct {
- incFreeSlots []int
- expectNext []uint8
- expFallback uint8
- }{
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 3},
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 3},
- {incFreeSlots: []int{0, 15, 0, 0}, expectNext: []uint8{1}, expFallback: 3},
- {incFreeSlots: []int{0, 0, 0, 11}, expectNext: []uint8{1, 3}, expFallback: 3},
- {incFreeSlots: []int{10, 0, 0, 0}, expectNext: []uint8{1, 3, 0}, expFallback: 3},
- {incFreeSlots: []int{100, 0, 0, 0}, expectNext: []uint8{0, 1, 3}, expFallback: 3},
- {incFreeSlots: []int{0, 200, 0, 0}, expectNext: []uint8{1, 0, 3}, expFallback: 3},
- {incFreeSlots: []int{0, 0, 202, 0}, expectNext: []uint8{1, 2, 0, 3}, expFallback: 3},
- {incFreeSlots: []int{0, 0, 0, 203}, expectNext: []uint8{1, 3, 2, 0}, expFallback: 3},
- } {
- for shard, inc := range tc.incFreeSlots {
- if inc == 0 {
- continue
- }
- deleteChunks := []string{}
- for addr, storedOn := range chunkShards {
- if storedOn == uint8(shard) {
-
- // delete the chunk to make a free slot on the shard
- c := new(chunk.Address)
- err := c.UnmarshalString(addr)
- if err != nil {
- t.Fatal(err)
- }
- if err := db.Delete(*c); err != nil {
- t.Fatal(err)
- }
- deleteChunks = append(deleteChunks, addr)
- }
-
- if len(deleteChunks) == inc {
- break
- }
- }
-
- if len(deleteChunks) != inc {
- panic(0)
- }
-
- for _, v := range deleteChunks {
- delete(chunkShards, v)
- }
- }
-
- //freeShards, fallback, err := db.NextShard()
- //if err != nil {
- //t.Fatal(err)
- //}
- //for i, shard := range freeShards {
- //if shard != tc.expectNext[i] {
- //t.Fatalf("expected next shard value to be %d but got %d", tc.expectNext[i], shard)
- //}
- //}
-
- //if tc.expFallback != fallback {
- //t.Fatalf("expected fallback value to be %d but got %d", tc.expFallback, fallback)
- //}
- }
-}
-
// RunStoreOptions define parameters for Store test function.
type RunStoreOptions struct {
NewStoreFunc func(t *testing.T) (fcds.Storer, func())
From 9050b9630174d6318a5cfd1f2ce97c12d8c50451 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 18:19:50 +0100
Subject: [PATCH 74/89] cleanup
---
storage/fcds/test/store.go | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 9a926f5783..c204e7682b 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -18,6 +18,7 @@ package test
import (
"bytes"
+ "encoding/hex"
"flag"
"fmt"
"math/rand"
@@ -141,11 +142,8 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
if storedOn == i {
// delete the chunk to make a free slot on the shard
- c := new(chunk.Address)
- err := c.UnmarshalString(addr)
- if err != nil {
- t.Fatal(err)
- }
+ c := unmarshalAddressString(t, addr)
+
if err := db.Delete(*c); err != nil {
t.Fatal(err)
}
@@ -446,3 +444,20 @@ func getChunks(count int) []chunk.Chunk {
}
return chunkCache[:count]
}
+
+func unmarshalAddressString(t *testing.T, s string) *chunk.Address {
+ t.Helper()
+ v, err := hex.DecodeString(s)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(v) != 32 {
+ t.Fatalf("address length mistmatch. got %d bytes but expected %d", len(v), 32)
+ }
+ a := new(chunk.Address)
+ *a = make([]byte, 32)
+ copy(*a, v)
+
+ return a
+}
From ece3c7dfafac4dc74cac69f8819bfef3df797700 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 18:20:34 +0100
Subject: [PATCH 75/89] cleanup
---
chunk/chunk.go | 15 ---------------
storage/fcds/fcds.go | 1 -
2 files changed, 16 deletions(-)
diff --git a/chunk/chunk.go b/chunk/chunk.go
index 06d38cd5b2..7f9cda4fbb 100644
--- a/chunk/chunk.go
+++ b/chunk/chunk.go
@@ -18,7 +18,6 @@ package chunk
import (
"context"
- "encoding/hex"
"errors"
"fmt"
@@ -120,20 +119,6 @@ func (a *Address) UnmarshalJSON(value []byte) error {
return nil
}
-func (a *Address) UnmarshalString(s string) error {
- v, err := hex.DecodeString(s)
- if err != nil {
- return err
- }
-
- if len(v) != AddressLength {
- return fmt.Errorf("address length mistmatch. got %d bytes but expected %d", len(v), AddressLength)
- }
- *a = make([]byte, 32)
- copy(*a, v)
- return nil
-}
-
// Proximity returns the proximity order of the MSB distance between x and y
//
// The distance metric MSB(x, y) of two equal length byte sequences x an y is the
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 6f78419003..6be2db5c22 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -259,7 +259,6 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
func (s *Store) getOffset() (shard uint8, offset int64, reclaimed bool, cancel func(), err error) {
cancel = func() {}
shard, offset, cancel = s.meta.FreeOffset()
-
if offset >= 0 {
return shard, offset, true, cancel, nil
}
From 6068e9215dd28e9f9b80b514e39468f6ca7dbf5d Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 19:23:17 +0100
Subject: [PATCH 76/89] cleanup test vectors
---
storage/fcds/fcds.go | 2 -
storage/fcds/leveldb/leveldb.go | 6 +--
storage/fcds/leveldb/leveldb_test.go | 61 +++++++++++++++-------------
storage/fcds/mem/mem.go | 13 ++++++
storage/fcds/meta.go | 1 +
5 files changed, 49 insertions(+), 34 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 6be2db5c22..5bd3e5b224 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -307,7 +307,6 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
metrics.GetOrRegisterCounter("fcds.delete.ok", nil).Inc(1)
return nil
-
}
// Count returns a number of stored chunks.
@@ -332,7 +331,6 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return s.meta.Iterate(func(addr chunk.Address, m *Meta) (stop bool, err error) {
data := make([]byte, m.Size)
-
_, err = s.shards[m.Shard].f.ReadAt(data, m.Offset)
if err != nil {
return true, err
diff --git a/storage/fcds/leveldb/leveldb.go b/storage/fcds/leveldb/leveldb.go
index 477e4edbb1..0877f84cca 100644
--- a/storage/fcds/leveldb/leveldb.go
+++ b/storage/fcds/leveldb/leveldb.go
@@ -54,7 +54,7 @@ func NewMetaStore(path string) (s *MetaStore, err error) {
// caution - this _will_ break if we one day decide to
// decrease the shard count
- ms.iterateFree(func(shard uint8, offset int64) {
+ ms.IterateFree(func(shard uint8, offset int64) {
ms.free[shard][offset] = struct{}{}
})
@@ -189,9 +189,9 @@ func (s *MetaStore) Iterate(fn func(chunk.Address, *fcds.Meta) (stop bool, err e
return it.Error()
}
-// iterateFree iterates over all free slot entries in leveldb
+// IterateFree iterates over all free slot entries in leveldb
// and calls the defined callback function on each entry found.
-func (s *MetaStore) iterateFree(fn func(shard uint8, offset int64)) {
+func (s *MetaStore) IterateFree(fn func(shard uint8, offset int64)) {
i := s.db.NewIterator(nil, nil)
defer i.Release()
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 0586d16e6d..871a67dc0e 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -95,33 +95,36 @@ func TestFreeSlotCounter(t *testing.T) {
}
}
- //freeSlots := metaStore.ShardSlots()
-
- //store.Close()
- //metaStore.Close()
-
- //metaStore2, err := leveldb.NewMetaStore(metaPath)
- //if err != nil {
- //t.Fatal(err)
- //}
- //defer func() {
- //metaStore2.Close()
- //os.RemoveAll(metaPath)
- //}()
-
- ////freeSlots2 := metaStore.ShardSlots()
- //count := 0
- //for i, v := range freeSlots {
- //count++
- //if freeSlots2[i].Shard != v.Shard {
- //t.Fatalf("expected shard %d to be %d but got %d", i, v.Shard, freeSlots[2].Shard)
- //}
- //if freeSlots2[i].Val != v.Val {
- //t.Fatalf("expected shard %d to have %d free slots but got %d", i, v.Val, freeSlots[2].Val)
- //}
- //}
-
- //if uint8(count) != fcds.ShardCount {
- //t.Fatalf("did not process enough shards: got %d but expected %d", count, fcds.ShardCount)
- //}
+ // verify free slots
+ cnt := 0
+
+ metaStore.IterateFree(func(uint8, int64) {
+ cnt++
+ })
+
+ if cnt != 10 {
+ t.Fatalf("expected %d free slots but got %d", 10, cnt)
+ }
+
+ store.Close()
+ metaStore.Close()
+
+ metaStore2, err := leveldb.NewMetaStore(metaPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ metaStore2.Close()
+ os.RemoveAll(metaPath)
+ }()
+
+ cnt = 0
+
+ metaStore2.IterateFree(func(_ uint8, _ int64) {
+ cnt++
+ })
+
+ if cnt != 10 {
+ t.Fatalf("expected %d free slots but got %d", 10, cnt)
+ }
}
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index c21858b8c7..f40f1deb21 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -131,6 +131,19 @@ func (s *MetaStore) Iterate(fn func(chunk.Address, *fcds.Meta) (stop bool, err e
return nil
}
+// IterateFree iterates over all free slot entries
+// and calls the defined callback function on each entry found.
+func (s *MetaStore) IterateFree(fn func(shard uint8, offset int64)) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+
+ for shard, offsets := range s.free {
+ for offset, _ := range offsets {
+ fn(shard, offset)
+ }
+ }
+}
+
// Close doesn't do anything.
// It exists to implement fcdb.MetaStore interface.
func (s *MetaStore) Close() (err error) {
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 283f86ae0e..c39d0b61ac 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -31,6 +31,7 @@ type MetaStore interface {
Remove(addr chunk.Address, shard uint8) error
Count() (int, error)
Iterate(func(chunk.Address, *Meta) (stop bool, err error)) error
+ IterateFree(func(shard uint8, offset int64))
FreeOffset() (shard uint8, offset int64, cancel func())
Close() error
}
From 4d4f8096fdde10e82c93ed71656b8a3b11d1ff51 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Fri, 13 Mar 2020 19:36:24 +0100
Subject: [PATCH 77/89] remove mutex
---
storage/fcds/fcds.go | 4 ----
1 file changed, 4 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 5bd3e5b224..6a11bb550f 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -69,7 +69,6 @@ type Store struct {
maxChunkSize int // maximal chunk data size
quit chan struct{} // quit disables all operations after Close is called
quitOnce sync.Once // protects quit channel from multiple Close calls
- mtx sync.Mutex
}
// Option is an optional argument passed to New.
@@ -288,9 +287,6 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
return err
}
- s.mtx.Lock()
- defer s.mtx.Unlock()
-
mu := s.shards[m.Shard].mu
mu.Lock()
defer mu.Unlock()
From e6e8a71f57fd690f6733d7a8f357b7340c11e6d4 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 16 Mar 2020 12:18:10 +0100
Subject: [PATCH 78/89] dont test for no grow on mock
---
storage/fcds/fcds.go | 24 +-----------------------
storage/fcds/leveldb/leveldb_test.go | 2 +-
storage/fcds/mem/mem.go | 4 ++--
storage/fcds/mock/mock_test.go | 2 +-
storage/fcds/test/store.go | 17 ++++++++++-------
storage/localstore/localstore.go | 1 -
6 files changed, 15 insertions(+), 35 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 6a11bb550f..c1caf5a004 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -62,9 +62,6 @@ var (
type Store struct {
shards []shard // relations with shard id and a shard file and their mutexes
meta MetaStore // stores chunk offsets
- free []bool // which shards have free offsets
- freeMu sync.RWMutex // protects free field
- freeCache *offsetCache // optional cache of free offset values
wg sync.WaitGroup // blocks Close until all other method calls are done
maxChunkSize int // maximal chunk data size
quit chan struct{} // quit disables all operations after Close is called
@@ -74,24 +71,11 @@ type Store struct {
// Option is an optional argument passed to New.
type Option func(*Store)
-// WithCache is an optional argument to New constructor that enables
-// in memory cache of free chunk data positions in files
-func WithCache(yes bool) Option {
- return func(s *Store) {
- if yes {
- s.freeCache = newOffsetCache(ShardCount)
- } else {
- s.freeCache = nil
- }
- }
-}
-
// New constructs a new Store with files at path, with specified max chunk size.
func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s *Store, err error) {
s = &Store{
shards: make([]shard, ShardCount),
meta: metaStore,
- free: make([]bool, ShardCount),
maxChunkSize: maxChunkSize,
quit: make(chan struct{}),
}
@@ -236,9 +220,6 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
cancel()
return 0, err
}
- if reclaimed && s.freeCache != nil {
- s.freeCache.remove(shardId, offset)
- }
err = s.meta.Set(addr, shardId, reclaimed, &Meta{
Size: uint16(size),
@@ -282,6 +263,7 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
return err
}
defer s.unprotect()
+
m, err := s.getMeta(addr)
if err != nil {
return err
@@ -291,10 +273,6 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
mu.Lock()
defer mu.Unlock()
- if s.freeCache != nil {
- s.freeCache.set(m.Shard, m.Offset)
- }
-
err = s.meta.Remove(addr, m.Shard)
if err != nil {
metrics.GetOrRegisterCounter("fcds.delete.fail", nil).Inc(1)
diff --git a/storage/fcds/leveldb/leveldb_test.go b/storage/fcds/leveldb/leveldb_test.go
index 871a67dc0e..c421594832 100644
--- a/storage/fcds/leveldb/leveldb_test.go
+++ b/storage/fcds/leveldb/leveldb_test.go
@@ -62,7 +62,7 @@ func TestFreeSlotCounter(t *testing.T) {
t.Fatal(err)
}
- store, err := fcds.New(path, chunk.DefaultSize, metaStore, fcds.WithCache(false))
+ store, err := fcds.New(path, chunk.DefaultSize, metaStore)
if err != nil {
os.RemoveAll(path)
t.Fatal(err)
diff --git a/storage/fcds/mem/mem.go b/storage/fcds/mem/mem.go
index 7934bc5691..ba0b1f9214 100644
--- a/storage/fcds/mem/mem.go
+++ b/storage/fcds/mem/mem.go
@@ -58,9 +58,9 @@ func (s *MetaStore) Get(addr chunk.Address) (m *fcds.Meta, err error) {
// Get returns true is meta information is stored.
func (s *MetaStore) Has(addr chunk.Address) (yes bool, err error) {
- s.mu.RLock()
+ s.mtx.RLock()
_, yes = s.meta[string(addr)]
- s.mu.RUnlock()
+ s.mtx.RUnlock()
return yes, nil
}
diff --git a/storage/fcds/mock/mock_test.go b/storage/fcds/mock/mock_test.go
index 49029c608a..8d8adcf0b2 100644
--- a/storage/fcds/mock/mock_test.go
+++ b/storage/fcds/mock/mock_test.go
@@ -28,7 +28,7 @@ import (
// TestFCDS runs a standard series of tests on mock Store implementation.
func TestFCDS(t *testing.T) {
- test.RunAll(t, func(t *testing.T) (fcds.Storer, func()) {
+ test.RunStd(t, func(t *testing.T) (fcds.Storer, func()) {
return mock.New(
mem.NewGlobalStore().NewNodeStore(
common.BytesToAddress(make([]byte, 20)),
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index c204e7682b..2fb135db4b 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -34,7 +34,6 @@ import (
var (
chunksFlag = flag.Int("chunks", 100, "Number of chunks to use in tests.")
concurrencyFlag = flag.Int("concurrency", 8, "Maximal number of parallel operations.")
- noCacheFlag = flag.Bool("no-cache", false, "Disable memory cache.")
)
// Main parses custom cli flags automatically on test runs.
@@ -43,8 +42,16 @@ func Main(m *testing.M) {
os.Exit(m.Run())
}
-// RunAll runs all available tests for a Store implementation.
func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
+ RunStd(t, newStoreFunc)
+
+ t.Run("no grow", func(t *testing.T) {
+ runNoGrow(t, newStoreFunc)
+ })
+}
+
+// RunAll runs all available tests for a Store implementation.
+func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
t.Run("empty", func(t *testing.T) {
RunStore(t, &RunStoreOptions{
ChunkCount: *chunksFlag,
@@ -102,10 +109,6 @@ func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func()))
RunIterator(t, newStoreFunc)
})
- t.Run("no grow", func(t *testing.T) {
- runNoGrow(t, newStoreFunc)
- })
-
}
func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
@@ -410,7 +413,7 @@ func RunIterator(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, fun
func NewFCDSStore(t *testing.T, path string, metaStore fcds.MetaStore) (s *fcds.Store, clean func()) {
t.Helper()
- s, err := fcds.New(path, chunk.DefaultSize, metaStore, fcds.WithCache(!*noCacheFlag))
+ s, err := fcds.New(path, chunk.DefaultSize, metaStore)
if err != nil {
os.RemoveAll(path)
t.Fatal(err)
diff --git a/storage/localstore/localstore.go b/storage/localstore/localstore.go
index efbc329685..7be6d923dd 100644
--- a/storage/localstore/localstore.go
+++ b/storage/localstore/localstore.go
@@ -230,7 +230,6 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
filepath.Join(path, "data"),
chunk.DefaultSize+8, // chunk data has additional 8 bytes prepended
metaStore,
- fcds.WithCache(false),
)
if err != nil {
return nil, err
From 99ac34a610bfc78337ad27353f8c3eba71be3303 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 16 Mar 2020 12:24:14 +0100
Subject: [PATCH 79/89] remove error\
---
storage/fcds/fcds.go | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index c1caf5a004..6f6260a2c8 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -52,10 +52,7 @@ var _ Storer = new(Store)
var ShardCount = uint8(32)
// ErrStoreClosed is returned if store is already closed.
-var (
- ErrStoreClosed = errors.New("closed store")
- ErrNextShard = errors.New("error getting next shard")
-)
+var ErrStoreClosed = errors.New("closed store")
// Store is the main FCDS implementation. It stores chunk data into
// a number of files partitioned by the last byte of the chunk address.
From 219050ef3bd465700c4918e34653d36ff5be21d5 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Wed, 18 Mar 2020 09:53:43 +0100
Subject: [PATCH 80/89] forky: address pr comments
---
storage/fcds/fcds.go | 20 ++++++++++----------
storage/fcds/meta.go | 6 ++++--
storage/fcds/test/store.go | 3 ++-
storage/localstore/gc.go | 1 -
swarm.go | 2 +-
5 files changed, 17 insertions(+), 15 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 6f6260a2c8..44799fcfbe 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -125,7 +125,7 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
data := make([]byte, m.Size)
n, err := sh.f.ReadAt(data, m.Offset)
if err != nil && err != io.EOF {
- metrics.GetOrRegisterCounter("fcds.get.error", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/get/error", nil).Inc(1)
sh.mu.Unlock()
return nil, err
@@ -135,7 +135,7 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
}
sh.mu.Unlock()
- metrics.GetOrRegisterCounter("fcds.get.ok", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/get/ok", nil).Inc(1)
return chunk.NewChunk(addr, data), nil
}
@@ -150,13 +150,13 @@ func (s *Store) Has(addr chunk.Address) (yes bool, err error) {
_, err = s.getMeta(addr)
if err != nil {
if err == chunk.ErrChunkNotFound {
- metrics.GetOrRegisterCounter("fcds.has.no", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/has/no", nil).Inc(1)
return false, nil
}
- metrics.GetOrRegisterCounter("fcds.has.err", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/has/err", nil).Inc(1)
return false, err
}
- metrics.GetOrRegisterCounter("fcds.has.ok", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/has/ok", nil).Inc(1)
return true, nil
}
@@ -193,17 +193,17 @@ func (s *Store) Put(ch chunk.Chunk) (uint8, error) {
defer sh.mu.Unlock()
if reclaimed {
- metrics.GetOrRegisterCounter("fcds.put.reclaimed", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/put/reclaimed", nil).Inc(1)
}
if offset < 0 {
- metrics.GetOrRegisterCounter("fcds.put.append", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/put/append", nil).Inc(1)
// no free offsets found,
// append the chunk data by
// seeking to the end of the file
offset, err = sh.f.Seek(0, io.SeekEnd)
} else {
- metrics.GetOrRegisterCounter("fcds.put.offset", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/put/offset", nil).Inc(1)
// seek to the offset position
// to replace the chunk data at that position
_, err = sh.f.Seek(offset, io.SeekStart)
@@ -272,11 +272,11 @@ func (s *Store) Delete(addr chunk.Address) (err error) {
err = s.meta.Remove(addr, m.Shard)
if err != nil {
- metrics.GetOrRegisterCounter("fcds.delete.fail", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/delete/fail", nil).Inc(1)
return err
}
- metrics.GetOrRegisterCounter("fcds.delete.ok", nil).Inc(1)
+ metrics.GetOrRegisterCounter("fcds/delete/ok", nil).Inc(1)
return nil
}
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 5684aba16f..4832366ab4 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -74,8 +74,10 @@ func (a byVal) Len() int { return len(a) }
func (a byVal) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byVal) Less(i, j int) bool { return a[j].Val < a[i].Val }
-// ShardInfo contains data about free number of slots
-// in a shard.
+// ShardInfo contains data about an arbitrary shard
+// in that Val could potentially represent any scalar
+// size pertaining to a shard (number of free slots,
+// size in bytes, number of occupied slots, etc).
type ShardInfo struct {
Shard uint8
Val int64
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 2fb135db4b..6329338dc1 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -42,6 +42,7 @@ func Main(m *testing.M) {
os.Exit(m.Run())
}
+// RunAll runs all available tests for a Store implementation.
func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
RunStd(t, newStoreFunc)
@@ -50,7 +51,7 @@ func RunAll(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func()))
})
}
-// RunAll runs all available tests for a Store implementation.
+// RunStd runs all standard tests that are agnostic to specific Store implementation details.
func RunStd(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func())) {
t.Run("empty", func(t *testing.T) {
RunStore(t, &RunStoreOptions{
diff --git a/storage/localstore/gc.go b/storage/localstore/gc.go
index 489a96d71f..1f23733187 100644
--- a/storage/localstore/gc.go
+++ b/storage/localstore/gc.go
@@ -251,7 +251,6 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
}
new = gcSize - c
}
- metrics.GetOrRegisterGauge("localstore.gcsize.index", nil).Update(int64(gcSize))
db.gcSize.PutInBatch(batch, new)
// trigger garbage collection if we reached the capacity
diff --git a/swarm.go b/swarm.go
index 2a48de60f5..9c8eeacbe3 100644
--- a/swarm.go
+++ b/swarm.go
@@ -227,7 +227,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
MockStore: mockStore,
Capacity: config.DbCapacity,
Tags: self.tags,
- PutToGCCheck: func(_ []byte) bool { return true },
+ PutToGCCheck: to.IsWithinDepth,
})
if err != nil {
return nil, err
From 6fb35e582f8aea5e9f0433133a31c951a86412d8 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Mon, 23 Mar 2020 08:42:40 +0100
Subject: [PATCH 81/89] forky: address pr comments
---
storage/fcds/fcds.go | 13 ++++++-------
storage/fcds/meta.go | 18 ++++++++----------
storage/fcds/meta_test.go | 6 +++---
storage/fcds/mock/mock.go | 4 ++--
storage/fcds/test/store.go | 8 ++++----
5 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/storage/fcds/fcds.go b/storage/fcds/fcds.go
index 44799fcfbe..b7ae076057 100644
--- a/storage/fcds/fcds.go
+++ b/storage/fcds/fcds.go
@@ -40,7 +40,7 @@ type Storer interface {
Has(addr chunk.Address) (yes bool, err error)
Put(ch chunk.Chunk) (shard uint8, err error)
Delete(addr chunk.Address) (err error)
- ShardSize() (slots []ShardInfo, err error)
+ ShardSize() (slots []ShardSize, err error)
Count() (count int, err error)
Iterate(func(ch chunk.Chunk) (stop bool, err error)) (err error)
Close() (err error)
@@ -92,8 +92,8 @@ func New(path string, maxChunkSize int, metaStore MetaStore, opts ...Option) (s
return s, nil
}
-func (s *Store) ShardSize() (slots []ShardInfo, err error) {
- slots = make([]ShardInfo, len(s.shards))
+func (s *Store) ShardSize() (slots []ShardSize, err error) {
+ slots = make([]ShardSize, len(s.shards))
for i, sh := range s.shards {
sh.mu.Lock()
fs, err := sh.f.Stat()
@@ -101,7 +101,7 @@ func (s *Store) ShardSize() (slots []ShardInfo, err error) {
if err != nil {
return nil, err
}
- slots[i] = ShardInfo{Shard: uint8(i), Val: fs.Size()}
+ slots[i] = ShardSize{Shard: uint8(i), Size: fs.Size()}
}
return slots, nil
@@ -121,19 +121,18 @@ func (s *Store) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
sh := s.shards[m.Shard]
sh.mu.Lock()
+ defer sh.mu.Unlock()
data := make([]byte, m.Size)
n, err := sh.f.ReadAt(data, m.Offset)
if err != nil && err != io.EOF {
metrics.GetOrRegisterCounter("fcds/get/error", nil).Inc(1)
- sh.mu.Unlock()
return nil, err
}
if n != int(m.Size) {
return nil, fmt.Errorf("incomplete chunk data, read %v of %v", n, m.Size)
}
- sh.mu.Unlock()
metrics.GetOrRegisterCounter("fcds/get/ok", nil).Inc(1)
@@ -248,7 +247,7 @@ func (s *Store) getOffset() (shard uint8, offset int64, reclaimed bool, cancel f
// sorting them will make the first element the largest shard and the last
// element the smallest shard; pick the smallest
- sort.Sort(byVal(shardSizes))
+ sort.Sort(bySize(shardSizes))
return shardSizes[len(shardSizes)-1].Shard, -1, false, cancel, nil
diff --git a/storage/fcds/meta.go b/storage/fcds/meta.go
index 4832366ab4..f9544747d4 100644
--- a/storage/fcds/meta.go
+++ b/storage/fcds/meta.go
@@ -68,17 +68,15 @@ func (m *Meta) String() (s string) {
return fmt.Sprintf("{Size: %v, Offset %v}", m.Size, m.Offset)
}
-type byVal []ShardInfo
+type bySize []ShardSize
-func (a byVal) Len() int { return len(a) }
-func (a byVal) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byVal) Less(i, j int) bool { return a[j].Val < a[i].Val }
+func (a bySize) Len() int { return len(a) }
+func (a bySize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a bySize) Less(i, j int) bool { return a[j].Size < a[i].Size }
-// ShardInfo contains data about an arbitrary shard
-// in that Val could potentially represent any scalar
-// size pertaining to a shard (number of free slots,
-// size in bytes, number of occupied slots, etc).
-type ShardInfo struct {
+// ShardSize contains data about an arbitrary shard
+// in that Size represents the shard size in bytes
+type ShardSize struct {
Shard uint8
- Val int64
+ Size int64
}
diff --git a/storage/fcds/meta_test.go b/storage/fcds/meta_test.go
index 95dbc71688..d6f7a0a4b7 100644
--- a/storage/fcds/meta_test.go
+++ b/storage/fcds/meta_test.go
@@ -61,12 +61,12 @@ func TestShardSlotSort(t *testing.T) {
expectOrder: []int{1, 2, 3, 0},
},
} {
- s := make([]ShardInfo, len(tc.freeSlots))
+ s := make([]ShardSize, len(tc.freeSlots))
for i, v := range tc.freeSlots {
- s[i] = ShardInfo{Shard: uint8(i), Val: int64(v)}
+ s[i] = ShardSize{Shard: uint8(i), Size: int64(v)}
}
- sort.Sort(byVal(s))
+ sort.Sort(bySize(s))
for i, v := range s {
if v.Shard != uint8(tc.expectOrder[i]) {
diff --git a/storage/fcds/mock/mock.go b/storage/fcds/mock/mock.go
index fc73dcd35a..9a4c635b50 100644
--- a/storage/fcds/mock/mock.go
+++ b/storage/fcds/mock/mock.go
@@ -119,9 +119,9 @@ func (s *Store) Iterate(fn func(chunk.Chunk) (stop bool, err error)) (err error)
return nil
}
-func (s *Store) ShardSize() (slots []fcds.ShardInfo, err error) {
+func (s *Store) ShardSize() (slots []fcds.ShardSize, err error) {
i, err := s.Count()
- return []fcds.ShardInfo{fcds.ShardInfo{Shard: 0, Val: int64(i)}}, err
+ return []fcds.ShardSize{fcds.ShardSize{Shard: 0, Size: int64(i)}}, err
}
// Close doesn't do anything.
diff --git a/storage/fcds/test/store.go b/storage/fcds/test/store.go
index 6329338dc1..bced11e549 100644
--- a/storage/fcds/test/store.go
+++ b/storage/fcds/test/store.go
@@ -189,7 +189,7 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
sum := 0
for _, v := range slots {
- sum += int(v.Val)
+ sum += int(v.Size)
}
if sum != 4096*1000 {
@@ -207,11 +207,11 @@ func runNoGrow(t *testing.T, newStoreFunc func(t *testing.T) (fcds.Storer, func(
t.Fatal(err)
}
- minSize, minSlot := slots[0].Val, uint8(0)
+ minSize, minSlot := slots[0].Size, uint8(0)
for i, v := range slots {
// take the _last_ minimum
- if v.Val <= minSize {
- minSize = v.Val
+ if v.Size <= minSize {
+ minSize = v.Size
minSlot = uint8(i)
}
}
From 47001875f30400902e2ea361d9afa2e550bf1f45 Mon Sep 17 00:00:00 2001
From: Zahoor Mohamed
Date: Tue, 24 Mar 2020 16:07:32 +0530
Subject: [PATCH 82/89] Add benchmark to compare to badger
---
storage/fcds/leveldb/fcds_test.go | 268 ++++++++++++++++++++++++++++++
1 file changed, 268 insertions(+)
create mode 100644 storage/fcds/leveldb/fcds_test.go
diff --git a/storage/fcds/leveldb/fcds_test.go b/storage/fcds/leveldb/fcds_test.go
new file mode 100644
index 0000000000..7dae16d030
--- /dev/null
+++ b/storage/fcds/leveldb/fcds_test.go
@@ -0,0 +1,268 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package leveldb
+
+import (
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethersphere/swarm/chunk"
+ "github.com/ethersphere/swarm/storage/fcds"
+)
+
+const (
+ ConcurrentThreads = 128
+)
+
+// NewFCDSStore is a test helper function that constructs
+// a new Store for testing purposes into which a specific MetaStore can be injected.
+func NewFCDSStore(t *testing.B) (s *fcds.Store, clean func()) {
+ t.Helper()
+
+ path, err := ioutil.TempDir("", "swarm-fcds-")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ metaStore, err := NewMetaStore(filepath.Join(path, "meta"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, err = fcds.New(path, chunk.DefaultSize, metaStore)
+ if err != nil {
+ os.RemoveAll(path)
+ t.Fatal(err)
+ }
+ return s, func() {
+ s.Close()
+ os.RemoveAll(path)
+ }
+}
+
+func getChunks(count int, chunkCache []chunk.Chunk) []chunk.Chunk {
+ l := len(chunkCache)
+ if l == 0 {
+ chunkCache = make([]chunk.Chunk, count)
+ for i := 0; i < count; i++ {
+ chunkCache[i] = GenerateTestRandomChunk()
+ }
+ return chunkCache
+ }
+ if l < count {
+ for i := 0; i < count-l; i++ {
+ chunkCache = append(chunkCache, GenerateTestRandomChunk())
+ }
+ return chunkCache
+ }
+ return chunkCache[:count]
+}
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func GenerateTestRandomChunk() chunk.Chunk {
+ data := make([]byte, chunk.DefaultSize)
+ rand.Read(data)
+ key := make([]byte, 32)
+ rand.Read(key)
+ return chunk.NewChunk(key, data)
+}
+
+// Benchmarkings
+func runBenchmark(b *testing.B, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int, iterationCount int) {
+ b.Helper()
+
+ var writeElapsed time.Duration
+ var readElapsed time.Duration
+ var deleteElapsed time.Duration
+
+ for i := 0; i < iterationCount; i++ {
+ db, clean := NewFCDSStore(b)
+ var basechunks []chunk.Chunk
+
+ if baseChunksCount > 0 {
+ basechunks = getChunks(baseChunksCount, basechunks)
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(baseChunksCount)
+ for i, ch := range basechunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if _, err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ fmt.Println("-- adding base chunks took, ", elapsed)
+ }
+
+ rand.Shuffle(baseChunksCount, func(i, j int) {
+ basechunks[i], basechunks[j] = basechunks[j], basechunks[i]
+ })
+
+ var jobWg sync.WaitGroup
+ if writeChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ var writeChunks []chunk.Chunk
+ writeChunks = getChunks(writeChunksCount, writeChunks)
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(writeChunksCount)
+ for i, ch := range writeChunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if _, err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ fmt.Println("-- writing chunks took , ", elapsed)
+ writeElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ if readChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ errCount := 0
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads*4)
+ var wg sync.WaitGroup
+ wg.Add(readChunksCount)
+ for i, ch := range basechunks {
+ if i >= readChunksCount {
+ break
+ }
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ _, err := db.Get(ch.Address())
+ if err != nil {
+ //panic(err)
+ errCount++
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ fmt.Println("-- reading chunks took , ", elapsed)
+ readElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ if deleteChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(deleteChunksCount)
+ for i, ch := range basechunks {
+ if i >= deleteChunksCount {
+ break
+ }
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Delete(ch.Address()); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ fmt.Println("-- deleting chunks took , ", elapsed)
+ deleteElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ jobWg.Wait()
+ clean()
+ }
+
+ if writeElapsed > 0 {
+ fmt.Println("- Average write time : ", writeElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ }
+ if readElapsed > 0 {
+ fmt.Println("- Average read time : ", readElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ }
+ if deleteElapsed > 0 {
+ fmt.Println("- Average delete time : ", deleteElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ }
+}
+
+//func TestStorage (b *testing.T) {
+// runBenchmark(b, 0, 1000000, 0, 0)
+//}
+
+func BenchmarkWriteOverClean_10000(t *testing.B) { runBenchmark(t, 0, 10000, 0, 0, 8) }
+func BenchmarkWriteOverClean_100000(t *testing.B) { runBenchmark(t, 0, 100000, 0, 0, 6) }
+func BenchmarkWriteOverClean_1000000(t *testing.B) { runBenchmark(t, 0, 1000000, 0, 0, 4) }
+
+func BenchmarkWriteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 0, 0,8) }
+func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0,6) }
+func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0,4) }
+
+func BenchmarkReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 10000, 0, 8) }
+func BenchmarkReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 100000, 0, 6) }
+func BenchmarkReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 1000000, 0, 4) }
+
+
+func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000,8) }
+func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000,6) }
+func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000,4) }
+
+func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0,8) }
+func BenchmarkWriteReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 0,6) }
+func BenchmarkWriteReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 1000000, 0,4) }
+
+func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 10000,8) }
+func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 100000,6) }
+func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 1000000, 1000000,4) }
+
From 7055489d2e3ee63446046df66d7f925c29cb654b Mon Sep 17 00:00:00 2001
From: Zahoor Mohamed
Date: Thu, 26 Mar 2020 14:23:03 +0530
Subject: [PATCH 83/89] Ignoring setup stage in benchmark timings
---
storage/fcds/leveldb/fcds_test.go | 38 +++++++++++++++++++------------
1 file changed, 24 insertions(+), 14 deletions(-)
diff --git a/storage/fcds/leveldb/fcds_test.go b/storage/fcds/leveldb/fcds_test.go
index 7dae16d030..0907719c63 100644
--- a/storage/fcds/leveldb/fcds_test.go
+++ b/storage/fcds/leveldb/fcds_test.go
@@ -102,6 +102,7 @@ func runBenchmark(b *testing.B, baseChunksCount int, writeChunksCount int, readC
db, clean := NewFCDSStore(b)
var basechunks []chunk.Chunk
+ b.StopTimer()
if baseChunksCount > 0 {
basechunks = getChunks(baseChunksCount, basechunks)
start := time.Now()
@@ -128,6 +129,7 @@ func runBenchmark(b *testing.B, baseChunksCount int, writeChunksCount int, readC
rand.Shuffle(baseChunksCount, func(i, j int) {
basechunks[i], basechunks[j] = basechunks[j], basechunks[i]
})
+ b.StartTimer()
var jobWg sync.WaitGroup
if writeChunksCount > 0 {
@@ -245,24 +247,32 @@ func BenchmarkWriteOverClean_10000(t *testing.B) { runBenchmark(t, 0, 10000, 0
func BenchmarkWriteOverClean_100000(t *testing.B) { runBenchmark(t, 0, 100000, 0, 0, 6) }
func BenchmarkWriteOverClean_1000000(t *testing.B) { runBenchmark(t, 0, 1000000, 0, 0, 4) }
-func BenchmarkWriteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 0, 0,8) }
-func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0,6) }
-func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0,4) }
+func BenchmarkWriteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 0, 0, 8) }
+func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0, 6) }
+func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0, 4) }
func BenchmarkReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 10000, 0, 8) }
func BenchmarkReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 100000, 0, 6) }
func BenchmarkReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 1000000, 0, 4) }
+func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000, 8) }
+func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000, 6) }
+func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000, 4) }
-func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000,8) }
-func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000,6) }
-func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000,4) }
-
-func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0,8) }
-func BenchmarkWriteReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 0,6) }
-func BenchmarkWriteReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 1000000, 0,4) }
-
-func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 10000,8) }
-func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 100000,6) }
-func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 1000000, 1000000,4) }
+func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0, 8) }
+func BenchmarkWriteReadOver1Million_100000(t *testing.B) {
+ runBenchmark(t, 1000000, 100000, 100000, 0, 6)
+}
+func BenchmarkWriteReadOver1Million_1000000(t *testing.B) {
+ runBenchmark(t, 1000000, 1000000, 1000000, 0, 4)
+}
+func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) {
+ runBenchmark(t, 1000000, 10000, 10000, 10000, 8)
+}
+func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) {
+ runBenchmark(t, 1000000, 100000, 100000, 100000, 6)
+}
+func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) {
+ runBenchmark(t, 1000000, 1000000, 1000000, 1000000, 4)
+}
From 7771a35df244be0e7ca4e08c42acf29a6c4af78d Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 26 Mar 2020 12:25:38 +0100
Subject: [PATCH 84/89] add benchmark from badger branch
---
storage/fcds/fcds_test.go | 295 ++++++++++++++++++++++++++++++++++++++
1 file changed, 295 insertions(+)
create mode 100644 storage/fcds/fcds_test.go
diff --git a/storage/fcds/fcds_test.go b/storage/fcds/fcds_test.go
new file mode 100644
index 0000000000..d297610977
--- /dev/null
+++ b/storage/fcds/fcds_test.go
@@ -0,0 +1,295 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package fcds
+
+import (
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethersphere/swarm/chunk"
+)
+
+const (
+ ConcurrentThreads = 128
+)
+
+func newDB(b *testing.B) (db Storer, clean func()) {
+ b.Helper()
+
+ path, err := ioutil.TempDir("/tmp/swarm", "swarm-shed")
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ db, err = New(path)
+ if err != nil {
+ os.RemoveAll(path)
+ b.Fatal(err)
+ }
+ return db, func() {
+ db.Close()
+ os.RemoveAll(path)
+ }
+}
+
+func getChunks(count int, chunkCache []chunk.Chunk) []chunk.Chunk {
+ l := len(chunkCache)
+ if l == 0 {
+ chunkCache = make([]chunk.Chunk, count)
+ for i := 0; i < count; i++ {
+ chunkCache[i] = GenerateTestRandomChunk()
+ }
+ return chunkCache
+ }
+ if l < count {
+ for i := 0; i < count-l; i++ {
+ chunkCache = append(chunkCache, GenerateTestRandomChunk())
+ }
+ return chunkCache
+ }
+ return chunkCache[:count]
+}
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func GenerateTestRandomChunk() chunk.Chunk {
+ data := make([]byte, chunk.DefaultSize)
+ rand.Read(data)
+ key := make([]byte, 32)
+ rand.Read(key)
+ return chunk.NewChunk(key, data)
+}
+
+func createBenchBaseline(b *testing.B, baseChunksCount int) (db Storer, clean func(), baseChunks []chunk.Chunk) {
+ db, clean = newDB(b)
+
+ if baseChunksCount > 0 {
+ baseChunks = getChunks(baseChunksCount, baseChunks)
+ //start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(baseChunksCount)
+ for i, ch := range baseChunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ //elapsed := time.Since(start)
+ //fmt.Println("-- adding base chunks took, ", elapsed)
+ }
+
+ rand.Shuffle(baseChunksCount, func(i, j int) {
+ baseChunks[i], baseChunks[j] = baseChunks[j], baseChunks[i]
+ })
+
+ return db, clean, baseChunks
+}
+
+// Benchmarkings
+
+func runBenchmark(b *testing.B, db Storer, basechunks []chunk.Chunk, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int) {
+ var writeElapsed time.Duration
+ var readElapsed time.Duration
+ var deleteElapsed time.Duration
+
+ var writeChunks []chunk.Chunk
+ writeChunks = getChunks(writeChunksCount, writeChunks)
+ b.StartTimer()
+
+ var jobWg sync.WaitGroup
+ if writeChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(writeChunksCount)
+ for i, ch := range writeChunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ fmt.Println("-- writing chunks took , ", elapsed)
+ writeElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ if readChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ errCount := 0
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads*4)
+ var wg sync.WaitGroup
+ wg.Add(readChunksCount)
+ for i, ch := range basechunks {
+ if i >= readChunksCount {
+ break
+ }
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ _, err := db.Get(ch.Address())
+ if err != nil {
+ //panic(err)
+ errCount++
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- reading chunks took , ", elapsed)
+ readElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ if deleteChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(deleteChunksCount)
+ for i, ch := range basechunks {
+ if i >= deleteChunksCount {
+ break
+ }
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Delete(ch.Address()); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- deleting chunks took , ", elapsed)
+ deleteElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
+
+ jobWg.Wait()
+
+ //if writeElapsed > 0 {
+ // fmt.Println("- Average write time : ", writeElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+ //if readElapsed > 0 {
+ // //fmt.Println("- Average read time : ", readElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+ //if deleteElapsed > 0 {
+ // //fmt.Println("- Average delete time : ", deleteElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+}
+
+func BenchmarkWrite_Add10K(b *testing.B) {
+ for i := 10000; i <= 1000000; i *= 10 {
+ b.Run(fmt.Sprintf("Baseline_%d", i), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ runBenchmark(b, db, baseChunks, 0, 10000, 0, 0)
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ }
+ })
+ }
+}
+
+func BenchmarkReadOverClean(b *testing.B) {
+ for i := 10000; i <= 1000000; i *= 10 {
+ b.Run(fmt.Sprintf("Baseline_%d", i), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ runBenchmark(b, db, baseChunks, 0, 0, 10000, 0)
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ }
+ })
+ }
+}
+
+//func BenchmarkWriteOverClean_100000(t *testing.B) { runBenchmark(t, 0, 100000, 0, 0, 6) }
+//func BenchmarkWriteOverClean_1000000(t *testing.B) { runBenchmark(t, 0, 1000000, 0, 0, 4) }
+
+//func BenchmarkWriteOver1Million_10000(t *testing.B) {
+//for i := 0; i < t.N; i++ {
+//runBenchmark(t, 1000000, 10000, 0, 0, 8)
+//}
+
+//}
+
+//func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0,6) }
+//func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_5000000(t *testing.B) { runBenchmark(t, 5000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_10000000(t *testing.B) { runBenchmark(t, 10000000, 1000000, 0, 0, 4) }
+
+//func BenchmarkReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 10000, 0,8) }
+//func BenchmarkReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 100000, 0, 6) }
+//func BenchmarkReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 1000000, 0, 4) }
+
+//func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000,8) }
+//func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000,6) }
+//func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000, 4) }
+
+//func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0,8) }
+//func BenchmarkWriteReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 0,6) }
+//func BenchmarkWriteReadOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 0, 4)}
+
+//func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 10000,8) }
+//func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 100000,6) }
+//func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 1000000, 4)}
From d098a920d5a2ef7ea63e4739f8531cb2345727fb Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 26 Mar 2020 13:00:35 +0100
Subject: [PATCH 85/89] fcds_test.go
---
storage/fcds/fcds_test.go | 295 -------------------------
storage/fcds/leveldb/fcds_test.go | 345 ++++++++++++++++--------------
2 files changed, 181 insertions(+), 459 deletions(-)
delete mode 100644 storage/fcds/fcds_test.go
diff --git a/storage/fcds/fcds_test.go b/storage/fcds/fcds_test.go
deleted file mode 100644
index d297610977..0000000000
--- a/storage/fcds/fcds_test.go
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package fcds
-
-import (
- "fmt"
- "io/ioutil"
- "math/rand"
- "os"
- "sync"
- "testing"
- "time"
-
- "github.com/ethersphere/swarm/chunk"
-)
-
-const (
- ConcurrentThreads = 128
-)
-
-func newDB(b *testing.B) (db Storer, clean func()) {
- b.Helper()
-
- path, err := ioutil.TempDir("/tmp/swarm", "swarm-shed")
- if err != nil {
- b.Fatal(err)
- }
-
- db, err = New(path)
- if err != nil {
- os.RemoveAll(path)
- b.Fatal(err)
- }
- return db, func() {
- db.Close()
- os.RemoveAll(path)
- }
-}
-
-func getChunks(count int, chunkCache []chunk.Chunk) []chunk.Chunk {
- l := len(chunkCache)
- if l == 0 {
- chunkCache = make([]chunk.Chunk, count)
- for i := 0; i < count; i++ {
- chunkCache[i] = GenerateTestRandomChunk()
- }
- return chunkCache
- }
- if l < count {
- for i := 0; i < count-l; i++ {
- chunkCache = append(chunkCache, GenerateTestRandomChunk())
- }
- return chunkCache
- }
- return chunkCache[:count]
-}
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
-
-func GenerateTestRandomChunk() chunk.Chunk {
- data := make([]byte, chunk.DefaultSize)
- rand.Read(data)
- key := make([]byte, 32)
- rand.Read(key)
- return chunk.NewChunk(key, data)
-}
-
-func createBenchBaseline(b *testing.B, baseChunksCount int) (db Storer, clean func(), baseChunks []chunk.Chunk) {
- db, clean = newDB(b)
-
- if baseChunksCount > 0 {
- baseChunks = getChunks(baseChunksCount, baseChunks)
- //start := time.Now()
- sem := make(chan struct{}, ConcurrentThreads)
- var wg sync.WaitGroup
- wg.Add(baseChunksCount)
- for i, ch := range baseChunks {
- sem <- struct{}{}
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
- if err := db.Put(ch); err != nil {
- panic(err)
- }
- }(i, ch)
- }
- wg.Wait()
- //elapsed := time.Since(start)
- //fmt.Println("-- adding base chunks took, ", elapsed)
- }
-
- rand.Shuffle(baseChunksCount, func(i, j int) {
- baseChunks[i], baseChunks[j] = baseChunks[j], baseChunks[i]
- })
-
- return db, clean, baseChunks
-}
-
-// Benchmarkings
-
-func runBenchmark(b *testing.B, db Storer, basechunks []chunk.Chunk, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int) {
- var writeElapsed time.Duration
- var readElapsed time.Duration
- var deleteElapsed time.Duration
-
- var writeChunks []chunk.Chunk
- writeChunks = getChunks(writeChunksCount, writeChunks)
- b.StartTimer()
-
- var jobWg sync.WaitGroup
- if writeChunksCount > 0 {
- jobWg.Add(1)
- go func() {
- start := time.Now()
- sem := make(chan struct{}, ConcurrentThreads)
- var wg sync.WaitGroup
- wg.Add(writeChunksCount)
- for i, ch := range writeChunks {
- sem <- struct{}{}
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
- if err := db.Put(ch); err != nil {
- panic(err)
- }
- }(i, ch)
- }
- wg.Wait()
- elapsed := time.Since(start)
- fmt.Println("-- writing chunks took , ", elapsed)
- writeElapsed += elapsed
- jobWg.Done()
- }()
- }
-
- if readChunksCount > 0 {
- jobWg.Add(1)
- go func() {
- errCount := 0
- start := time.Now()
- sem := make(chan struct{}, ConcurrentThreads*4)
- var wg sync.WaitGroup
- wg.Add(readChunksCount)
- for i, ch := range basechunks {
- if i >= readChunksCount {
- break
- }
- sem <- struct{}{}
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
- _, err := db.Get(ch.Address())
- if err != nil {
- //panic(err)
- errCount++
- }
- }(i, ch)
- }
- wg.Wait()
- elapsed := time.Since(start)
- //fmt.Println("-- reading chunks took , ", elapsed)
- readElapsed += elapsed
- jobWg.Done()
- }()
- }
-
- if deleteChunksCount > 0 {
- jobWg.Add(1)
- go func() {
- start := time.Now()
- sem := make(chan struct{}, ConcurrentThreads)
- var wg sync.WaitGroup
- wg.Add(deleteChunksCount)
- for i, ch := range basechunks {
- if i >= deleteChunksCount {
- break
- }
- sem <- struct{}{}
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
- if err := db.Delete(ch.Address()); err != nil {
- panic(err)
- }
- }(i, ch)
- }
- wg.Wait()
- elapsed := time.Since(start)
- //fmt.Println("-- deleting chunks took , ", elapsed)
- deleteElapsed += elapsed
- jobWg.Done()
- }()
- }
-
- jobWg.Wait()
-
- //if writeElapsed > 0 {
- // fmt.Println("- Average write time : ", writeElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
- //}
- //if readElapsed > 0 {
- // //fmt.Println("- Average read time : ", readElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
- //}
- //if deleteElapsed > 0 {
- // //fmt.Println("- Average delete time : ", deleteElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
- //}
-}
-
-func BenchmarkWrite_Add10K(b *testing.B) {
- for i := 10000; i <= 1000000; i *= 10 {
- b.Run(fmt.Sprintf("Baseline_%d", i), func(b *testing.B) {
- for j := 0; j < b.N; j++ {
- b.StopTimer()
- db, clean, baseChunks := createBenchBaseline(b, i)
- b.StartTimer()
-
- runBenchmark(b, db, baseChunks, 0, 10000, 0, 0)
- b.StopTimer()
- clean()
- b.StartTimer()
- }
- })
- }
-}
-
-func BenchmarkReadOverClean(b *testing.B) {
- for i := 10000; i <= 1000000; i *= 10 {
- b.Run(fmt.Sprintf("Baseline_%d", i), func(b *testing.B) {
- for j := 0; j < b.N; j++ {
- b.StopTimer()
- db, clean, baseChunks := createBenchBaseline(b, i)
- b.StartTimer()
-
- runBenchmark(b, db, baseChunks, 0, 0, 10000, 0)
- b.StopTimer()
- clean()
- b.StartTimer()
- }
- })
- }
-}
-
-//func BenchmarkWriteOverClean_100000(t *testing.B) { runBenchmark(t, 0, 100000, 0, 0, 6) }
-//func BenchmarkWriteOverClean_1000000(t *testing.B) { runBenchmark(t, 0, 1000000, 0, 0, 4) }
-
-//func BenchmarkWriteOver1Million_10000(t *testing.B) {
-//for i := 0; i < t.N; i++ {
-//runBenchmark(t, 1000000, 10000, 0, 0, 8)
-//}
-
-//}
-
-//func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0,6) }
-//func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0, 4) }
-//func BenchmarkWriteOver1Million_5000000(t *testing.B) { runBenchmark(t, 5000000, 1000000, 0, 0, 4) }
-//func BenchmarkWriteOver1Million_10000000(t *testing.B) { runBenchmark(t, 10000000, 1000000, 0, 0, 4) }
-
-//func BenchmarkReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 10000, 0,8) }
-//func BenchmarkReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 100000, 0, 6) }
-//func BenchmarkReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 1000000, 0, 4) }
-
-//func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000,8) }
-//func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000,6) }
-//func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000, 4) }
-
-//func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0,8) }
-//func BenchmarkWriteReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 0,6) }
-//func BenchmarkWriteReadOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 0, 4)}
-
-//func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 10000,8) }
-//func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 100000,6) }
-//func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 1000000, 4)}
diff --git a/storage/fcds/leveldb/fcds_test.go b/storage/fcds/leveldb/fcds_test.go
index 0907719c63..d297610977 100644
--- a/storage/fcds/leveldb/fcds_test.go
+++ b/storage/fcds/leveldb/fcds_test.go
@@ -14,48 +14,39 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the Swarm library. If not, see .
-package leveldb
+package fcds
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
- "path/filepath"
"sync"
"testing"
"time"
"github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/storage/fcds"
)
const (
ConcurrentThreads = 128
)
-// NewFCDSStore is a test helper function that constructs
-// a new Store for testing purposes into which a specific MetaStore can be injected.
-func NewFCDSStore(t *testing.B) (s *fcds.Store, clean func()) {
- t.Helper()
-
- path, err := ioutil.TempDir("", "swarm-fcds-")
- if err != nil {
- t.Fatal(err)
- }
+func newDB(b *testing.B) (db Storer, clean func()) {
+ b.Helper()
- metaStore, err := NewMetaStore(filepath.Join(path, "meta"))
+ path, err := ioutil.TempDir("/tmp/swarm", "swarm-shed")
if err != nil {
- t.Fatal(err)
+ b.Fatal(err)
}
- s, err = fcds.New(path, chunk.DefaultSize, metaStore)
+ db, err = New(path)
if err != nil {
os.RemoveAll(path)
- t.Fatal(err)
+ b.Fatal(err)
}
- return s, func() {
- s.Close()
+ return db, func() {
+ db.Close()
os.RemoveAll(path)
}
}
@@ -90,189 +81,215 @@ func GenerateTestRandomChunk() chunk.Chunk {
return chunk.NewChunk(key, data)
}
+func createBenchBaseline(b *testing.B, baseChunksCount int) (db Storer, clean func(), baseChunks []chunk.Chunk) {
+ db, clean = newDB(b)
+
+ if baseChunksCount > 0 {
+ baseChunks = getChunks(baseChunksCount, baseChunks)
+ //start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(baseChunksCount)
+ for i, ch := range baseChunks {
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Put(ch); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ //elapsed := time.Since(start)
+ //fmt.Println("-- adding base chunks took, ", elapsed)
+ }
+
+ rand.Shuffle(baseChunksCount, func(i, j int) {
+ baseChunks[i], baseChunks[j] = baseChunks[j], baseChunks[i]
+ })
+
+ return db, clean, baseChunks
+}
+
// Benchmarkings
-func runBenchmark(b *testing.B, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int, iterationCount int) {
- b.Helper()
+func runBenchmark(b *testing.B, db Storer, basechunks []chunk.Chunk, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int) {
var writeElapsed time.Duration
var readElapsed time.Duration
var deleteElapsed time.Duration
- for i := 0; i < iterationCount; i++ {
- db, clean := NewFCDSStore(b)
- var basechunks []chunk.Chunk
+ var writeChunks []chunk.Chunk
+ writeChunks = getChunks(writeChunksCount, writeChunks)
+ b.StartTimer()
- b.StopTimer()
- if baseChunksCount > 0 {
- basechunks = getChunks(baseChunksCount, basechunks)
+ var jobWg sync.WaitGroup
+ if writeChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
start := time.Now()
sem := make(chan struct{}, ConcurrentThreads)
var wg sync.WaitGroup
- wg.Add(baseChunksCount)
- for i, ch := range basechunks {
+ wg.Add(writeChunksCount)
+ for i, ch := range writeChunks {
sem <- struct{}{}
go func(i int, ch chunk.Chunk) {
defer func() {
<-sem
wg.Done()
}()
- if _, err := db.Put(ch); err != nil {
+ if err := db.Put(ch); err != nil {
panic(err)
}
}(i, ch)
}
wg.Wait()
elapsed := time.Since(start)
- fmt.Println("-- adding base chunks took, ", elapsed)
- }
+ fmt.Println("-- writing chunks took , ", elapsed)
+ writeElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
- rand.Shuffle(baseChunksCount, func(i, j int) {
- basechunks[i], basechunks[j] = basechunks[j], basechunks[i]
- })
- b.StartTimer()
-
- var jobWg sync.WaitGroup
- if writeChunksCount > 0 {
- jobWg.Add(1)
- go func() {
- var writeChunks []chunk.Chunk
- writeChunks = getChunks(writeChunksCount, writeChunks)
- start := time.Now()
- sem := make(chan struct{}, ConcurrentThreads)
- var wg sync.WaitGroup
- wg.Add(writeChunksCount)
- for i, ch := range writeChunks {
- sem <- struct{}{}
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
- if _, err := db.Put(ch); err != nil {
- panic(err)
- }
- }(i, ch)
+ if readChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ errCount := 0
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads*4)
+ var wg sync.WaitGroup
+ wg.Add(readChunksCount)
+ for i, ch := range basechunks {
+ if i >= readChunksCount {
+ break
}
- wg.Wait()
- elapsed := time.Since(start)
- fmt.Println("-- writing chunks took , ", elapsed)
- writeElapsed += elapsed
- jobWg.Done()
- }()
- }
-
- if readChunksCount > 0 {
- jobWg.Add(1)
- go func() {
- errCount := 0
- start := time.Now()
- sem := make(chan struct{}, ConcurrentThreads*4)
- var wg sync.WaitGroup
- wg.Add(readChunksCount)
- for i, ch := range basechunks {
- if i >= readChunksCount {
- break
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ _, err := db.Get(ch.Address())
+ if err != nil {
+ //panic(err)
+ errCount++
}
- sem <- struct{}{}
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
- _, err := db.Get(ch.Address())
- if err != nil {
- //panic(err)
- errCount++
- }
- }(i, ch)
- }
- wg.Wait()
- elapsed := time.Since(start)
- fmt.Println("-- reading chunks took , ", elapsed)
- readElapsed += elapsed
- jobWg.Done()
- }()
- }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- reading chunks took , ", elapsed)
+ readElapsed += elapsed
+ jobWg.Done()
+ }()
+ }
- if deleteChunksCount > 0 {
- jobWg.Add(1)
- go func() {
- start := time.Now()
- sem := make(chan struct{}, ConcurrentThreads)
- var wg sync.WaitGroup
- wg.Add(deleteChunksCount)
- for i, ch := range basechunks {
- if i >= deleteChunksCount {
- break
- }
- sem <- struct{}{}
- go func(i int, ch chunk.Chunk) {
- defer func() {
- <-sem
- wg.Done()
- }()
- if err := db.Delete(ch.Address()); err != nil {
- panic(err)
- }
- }(i, ch)
+ if deleteChunksCount > 0 {
+ jobWg.Add(1)
+ go func() {
+ start := time.Now()
+ sem := make(chan struct{}, ConcurrentThreads)
+ var wg sync.WaitGroup
+ wg.Add(deleteChunksCount)
+ for i, ch := range basechunks {
+ if i >= deleteChunksCount {
+ break
}
- wg.Wait()
- elapsed := time.Since(start)
- fmt.Println("-- deleting chunks took , ", elapsed)
- deleteElapsed += elapsed
- jobWg.Done()
- }()
- }
-
- jobWg.Wait()
- clean()
+ sem <- struct{}{}
+ go func(i int, ch chunk.Chunk) {
+ defer func() {
+ <-sem
+ wg.Done()
+ }()
+ if err := db.Delete(ch.Address()); err != nil {
+ panic(err)
+ }
+ }(i, ch)
+ }
+ wg.Wait()
+ elapsed := time.Since(start)
+ //fmt.Println("-- deleting chunks took , ", elapsed)
+ deleteElapsed += elapsed
+ jobWg.Done()
+ }()
}
- if writeElapsed > 0 {
- fmt.Println("- Average write time : ", writeElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
- }
- if readElapsed > 0 {
- fmt.Println("- Average read time : ", readElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ jobWg.Wait()
+
+ //if writeElapsed > 0 {
+ // fmt.Println("- Average write time : ", writeElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+ //if readElapsed > 0 {
+ // //fmt.Println("- Average read time : ", readElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+ //if deleteElapsed > 0 {
+ // //fmt.Println("- Average delete time : ", deleteElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+ //}
+}
+
+func BenchmarkWrite_Add10K(b *testing.B) {
+ for i := 10000; i <= 1000000; i *= 10 {
+ b.Run(fmt.Sprintf("Baseline_%d", i), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ runBenchmark(b, db, baseChunks, 0, 10000, 0, 0)
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ }
+ })
}
- if deleteElapsed > 0 {
- fmt.Println("- Average delete time : ", deleteElapsed.Nanoseconds()/int64(iterationCount), " ns/op")
+}
+
+func BenchmarkReadOverClean(b *testing.B) {
+ for i := 10000; i <= 1000000; i *= 10 {
+ b.Run(fmt.Sprintf("Baseline_%d", i), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ runBenchmark(b, db, baseChunks, 0, 0, 10000, 0)
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ }
+ })
}
}
-//func TestStorage (b *testing.T) {
-// runBenchmark(b, 0, 1000000, 0, 0)
+//func BenchmarkWriteOverClean_100000(t *testing.B) { runBenchmark(t, 0, 100000, 0, 0, 6) }
+//func BenchmarkWriteOverClean_1000000(t *testing.B) { runBenchmark(t, 0, 1000000, 0, 0, 4) }
+
+//func BenchmarkWriteOver1Million_10000(t *testing.B) {
+//for i := 0; i < t.N; i++ {
+//runBenchmark(t, 1000000, 10000, 0, 0, 8)
//}
-func BenchmarkWriteOverClean_10000(t *testing.B) { runBenchmark(t, 0, 10000, 0, 0, 8) }
-func BenchmarkWriteOverClean_100000(t *testing.B) { runBenchmark(t, 0, 100000, 0, 0, 6) }
-func BenchmarkWriteOverClean_1000000(t *testing.B) { runBenchmark(t, 0, 1000000, 0, 0, 4) }
+//}
-func BenchmarkWriteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 0, 0, 8) }
-func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0, 6) }
-func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 0, 0,6) }
+//func BenchmarkWriteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_5000000(t *testing.B) { runBenchmark(t, 5000000, 1000000, 0, 0, 4) }
+//func BenchmarkWriteOver1Million_10000000(t *testing.B) { runBenchmark(t, 10000000, 1000000, 0, 0, 4) }
-func BenchmarkReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 10000, 0, 8) }
-func BenchmarkReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 100000, 0, 6) }
-func BenchmarkReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 1000000, 0, 4) }
+//func BenchmarkReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 10000, 0,8) }
+//func BenchmarkReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 100000, 0, 6) }
+//func BenchmarkReadOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 1000000, 0, 4) }
-func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000, 8) }
-func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000, 6) }
-func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000, 4) }
+//func BenchmarkDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 10000,8) }
+//func BenchmarkDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 100000,6) }
+//func BenchmarkDeleteOver1Million_1000000(t *testing.B) { runBenchmark(t, 1000000, 0, 0, 1000000, 4) }
-func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0, 8) }
-func BenchmarkWriteReadOver1Million_100000(t *testing.B) {
- runBenchmark(t, 1000000, 100000, 100000, 0, 6)
-}
-func BenchmarkWriteReadOver1Million_1000000(t *testing.B) {
- runBenchmark(t, 1000000, 1000000, 1000000, 0, 4)
-}
+//func BenchmarkWriteReadOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 0,8) }
+//func BenchmarkWriteReadOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 0,6) }
+//func BenchmarkWriteReadOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 0, 4)}
-func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) {
- runBenchmark(t, 1000000, 10000, 10000, 10000, 8)
-}
-func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) {
- runBenchmark(t, 1000000, 100000, 100000, 100000, 6)
-}
-func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) {
- runBenchmark(t, 1000000, 1000000, 1000000, 1000000, 4)
-}
+//func BenchmarkWriteReadDeleteOver1Million_10000(t *testing.B) { runBenchmark(t, 1000000, 10000, 10000, 10000,8) }
+//func BenchmarkWriteReadDeleteOver1Million_100000(t *testing.B) { runBenchmark(t, 1000000, 100000, 100000, 100000,6) }
+//func BenchmarkWriteReadDeleteOver1Million_1000000(t *testing.B) {runBenchmark(t, 1000000, 1000000, 1000000, 1000000, 4)}
From d259aa05e2ce236acb9c177de6ee4c2fb2854372 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 26 Mar 2020 13:08:34 +0100
Subject: [PATCH 86/89] change to 50k-500k-50lakh
---
storage/fcds/leveldb/fcds_test.go | 53 +++++++++++++++++++------------
1 file changed, 32 insertions(+), 21 deletions(-)
diff --git a/storage/fcds/leveldb/fcds_test.go b/storage/fcds/leveldb/fcds_test.go
index d297610977..f25af4ecd7 100644
--- a/storage/fcds/leveldb/fcds_test.go
+++ b/storage/fcds/leveldb/fcds_test.go
@@ -14,33 +14,40 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the Swarm library. If not, see .
-package fcds
+package leveldb_test
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
+ "path/filepath"
"sync"
"testing"
"time"
"github.com/ethersphere/swarm/chunk"
+ "github.com/ethersphere/swarm/storage/fcds"
+ "github.com/ethersphere/swarm/storage/fcds/leveldb"
)
const (
ConcurrentThreads = 128
)
-func newDB(b *testing.B) (db Storer, clean func()) {
+func newDB(b *testing.B) (db fcds.Storer, clean func()) {
b.Helper()
path, err := ioutil.TempDir("/tmp/swarm", "swarm-shed")
if err != nil {
b.Fatal(err)
}
+ metaStore, err := leveldb.NewMetaStore(filepath.Join(path, "meta"))
+ if err != nil {
+ b.Fatal(err)
+ }
- db, err = New(path)
+ db, err = fcds.New(path, 4096, metaStore)
if err != nil {
os.RemoveAll(path)
b.Fatal(err)
@@ -81,7 +88,7 @@ func GenerateTestRandomChunk() chunk.Chunk {
return chunk.NewChunk(key, data)
}
-func createBenchBaseline(b *testing.B, baseChunksCount int) (db Storer, clean func(), baseChunks []chunk.Chunk) {
+func createBenchBaseline(b *testing.B, baseChunksCount int) (db fcds.Storer, clean func(), baseChunks []chunk.Chunk) {
db, clean = newDB(b)
if baseChunksCount > 0 {
@@ -97,7 +104,7 @@ func createBenchBaseline(b *testing.B, baseChunksCount int) (db Storer, clean fu
<-sem
wg.Done()
}()
- if err := db.Put(ch); err != nil {
+ if _, err := db.Put(ch); err != nil {
panic(err)
}
}(i, ch)
@@ -116,7 +123,7 @@ func createBenchBaseline(b *testing.B, baseChunksCount int) (db Storer, clean fu
// Benchmarkings
-func runBenchmark(b *testing.B, db Storer, basechunks []chunk.Chunk, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int) {
+func runBenchmark(b *testing.B, db fcds.Storer, basechunks []chunk.Chunk, baseChunksCount int, writeChunksCount int, readChunksCount int, deleteChunksCount int) {
var writeElapsed time.Duration
var readElapsed time.Duration
var deleteElapsed time.Duration
@@ -140,14 +147,14 @@ func runBenchmark(b *testing.B, db Storer, basechunks []chunk.Chunk, baseChunksC
<-sem
wg.Done()
}()
- if err := db.Put(ch); err != nil {
+ if _, err := db.Put(ch); err != nil {
panic(err)
}
}(i, ch)
}
wg.Wait()
elapsed := time.Since(start)
- fmt.Println("-- writing chunks took , ", elapsed)
+ //fmt.Println("-- writing chunks took , ", elapsed)
writeElapsed += elapsed
jobWg.Done()
}()
@@ -230,8 +237,8 @@ func runBenchmark(b *testing.B, db Storer, basechunks []chunk.Chunk, baseChunksC
}
func BenchmarkWrite_Add10K(b *testing.B) {
- for i := 10000; i <= 1000000; i *= 10 {
- b.Run(fmt.Sprintf("Baseline_%d", i), func(b *testing.B) {
+ for i := 50000; i <= 5000000; i *= 10 {
+ b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
for j := 0; j < b.N; j++ {
b.StopTimer()
db, clean, baseChunks := createBenchBaseline(b, i)
@@ -247,18 +254,22 @@ func BenchmarkWrite_Add10K(b *testing.B) {
}
func BenchmarkReadOverClean(b *testing.B) {
- for i := 10000; i <= 1000000; i *= 10 {
- b.Run(fmt.Sprintf("Baseline_%d", i), func(b *testing.B) {
- for j := 0; j < b.N; j++ {
- b.StopTimer()
- db, clean, baseChunks := createBenchBaseline(b, i)
- b.StartTimer()
-
- runBenchmark(b, db, baseChunks, 0, 0, 10000, 0)
- b.StopTimer()
- clean()
- b.StartTimer()
+ for i := 50000; i <= 5000000; i *= 10 {
+ b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
+ for k := 50000; k <= i; k *= 10 {
+ b.Run(fmt.Sprintf("read_%d", k), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ runBenchmark(b, db, baseChunks, 0, 0, k, 0)
+ }
+ })
}
+ b.StopTimer()
+ clean()
+ b.StartTimer()
})
}
}
From 315e5cdeed1f78ec5ba8828ca01018ef4fb2ca90 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 26 Mar 2020 13:53:54 +0100
Subject: [PATCH 87/89] better write testing
---
storage/fcds/leveldb/fcds_test.go | 24 +++++++++++++++---------
1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/storage/fcds/leveldb/fcds_test.go b/storage/fcds/leveldb/fcds_test.go
index f25af4ecd7..02365ea5a6 100644
--- a/storage/fcds/leveldb/fcds_test.go
+++ b/storage/fcds/leveldb/fcds_test.go
@@ -236,25 +236,31 @@ func runBenchmark(b *testing.B, db fcds.Storer, basechunks []chunk.Chunk, baseCh
//}
}
-func BenchmarkWrite_Add10K(b *testing.B) {
- for i := 50000; i <= 5000000; i *= 10 {
+func BenchmarkWrite(b *testing.B) {
+ for i := 30000; i <= 3000000; i *= 10 {
b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
- for j := 0; j < b.N; j++ {
+ for k := 10000; k <= 500000; k *= 5 {
b.StopTimer()
db, clean, baseChunks := createBenchBaseline(b, i)
b.StartTimer()
- runBenchmark(b, db, baseChunks, 0, 10000, 0, 0)
- b.StopTimer()
- clean()
- b.StartTimer()
+ b.Run(fmt.Sprintf("add_%d", k), func(b *testing.B) {
+ for j := 0; j < b.N; j++ {
+ runBenchmark(b, db, baseChunks, 0, k, 0, 0)
+ }
+ })
}
+
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+
})
}
}
-func BenchmarkReadOverClean(b *testing.B) {
- for i := 50000; i <= 5000000; i *= 10 {
+func BenchmarkRead(b *testing.B) {
+ for i := 30000; i <= 3000000; i *= 10 {
b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
b.StopTimer()
db, clean, baseChunks := createBenchBaseline(b, i)
From 77b0c85aa1f7fe2129499247e7644d3200949906 Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 26 Mar 2020 14:02:27 +0100
Subject: [PATCH 88/89] fix build
---
storage/fcds/leveldb/fcds_test.go | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/storage/fcds/leveldb/fcds_test.go b/storage/fcds/leveldb/fcds_test.go
index 02365ea5a6..774719a7d1 100644
--- a/storage/fcds/leveldb/fcds_test.go
+++ b/storage/fcds/leveldb/fcds_test.go
@@ -249,12 +249,11 @@ func BenchmarkWrite(b *testing.B) {
runBenchmark(b, db, baseChunks, 0, k, 0, 0)
}
})
- }
-
- b.StopTimer()
- clean()
- b.StartTimer()
+ b.StopTimer()
+ clean()
+ b.StartTimer()
+ }
})
}
}
From 51ff0d779eb996c3c1fcd221e9daf5f6dc6fc48d Mon Sep 17 00:00:00 2001
From: acud <12988138+acud@users.noreply.github.com>
Date: Thu, 26 Mar 2020 14:39:10 +0100
Subject: [PATCH 89/89] on par
---
storage/fcds/leveldb/fcds_test.go | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/storage/fcds/leveldb/fcds_test.go b/storage/fcds/leveldb/fcds_test.go
index 774719a7d1..76b8977562 100644
--- a/storage/fcds/leveldb/fcds_test.go
+++ b/storage/fcds/leveldb/fcds_test.go
@@ -237,35 +237,35 @@ func runBenchmark(b *testing.B, db fcds.Storer, basechunks []chunk.Chunk, baseCh
}
func BenchmarkWrite(b *testing.B) {
- for i := 30000; i <= 3000000; i *= 10 {
+ for i := 10000; i <= 1000000; i *= 10 {
b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
- for k := 10000; k <= 500000; k *= 5 {
- b.StopTimer()
- db, clean, baseChunks := createBenchBaseline(b, i)
- b.StartTimer()
-
+ // for each baseline, insert 10k, 20k, 50k, 100k
+ for _, k := range []int{10000, 20000, 50000, 100000} {
b.Run(fmt.Sprintf("add_%d", k), func(b *testing.B) {
for j := 0; j < b.N; j++ {
+ b.StopTimer()
+ db, clean, baseChunks := createBenchBaseline(b, i)
+ b.StartTimer()
+
runBenchmark(b, db, baseChunks, 0, k, 0, 0)
+ b.StopTimer()
+ clean()
+ b.StartTimer()
}
})
-
- b.StopTimer()
- clean()
- b.StartTimer()
}
})
}
}
func BenchmarkRead(b *testing.B) {
- for i := 30000; i <= 3000000; i *= 10 {
+ for i := 10000; i <= 1000000; i *= 10 {
b.Run(fmt.Sprintf("baseline_%d", i), func(b *testing.B) {
b.StopTimer()
db, clean, baseChunks := createBenchBaseline(b, i)
b.StartTimer()
- for k := 50000; k <= i; k *= 10 {
+ for k := 10000; k <= i; k *= 10 {
b.Run(fmt.Sprintf("read_%d", k), func(b *testing.B) {
for j := 0; j < b.N; j++ {
runBenchmark(b, db, baseChunks, 0, 0, k, 0)