diff --git a/.golangci.yml b/.golangci.yml index 96abcf35..1f7d809b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -30,7 +30,6 @@ linters: - exhaustive - exportloopref - forcetypeassert - - gocognit - goconst - gocritic - gofmt @@ -72,6 +71,7 @@ linters: # - exhaustivestruct # - forbidigo # - funlen + # - gocognit # - godot # - goerr113 # - gomnd @@ -84,8 +84,6 @@ linters: # - wsl linters-settings: - gocognit: - min-complexity: 37 gocritic: disabled-checks: - appendAssign diff --git a/chain/block.go b/chain/block.go index f90eb12c..9733b908 100644 --- a/chain/block.go +++ b/chain/block.go @@ -150,6 +150,14 @@ func (b *StatelessBlock) verify() (*StatelessBlock, *versiondb.Database, error) return nil, nil, err } onAcceptDB := versiondb.New(parentState) + + // Remove all expired prefixes + if err := ExpireNext(onAcceptDB, parent.Tmstmp, b.Tmstmp); err != nil { + return nil, nil, err + } + + // Process new transactions + log.Debug("build context", "next difficulty", context.NextDifficulty, "next cost", context.NextCost) var surplusDifficulty uint64 for _, tx := range b.Txs { if err := tx.Execute(onAcceptDB, b.Tmstmp, context); err != nil { @@ -159,7 +167,9 @@ func (b *StatelessBlock) verify() (*StatelessBlock, *versiondb.Database, error) surplusDifficulty += tx.Difficulty() - context.NextDifficulty } // Ensure enough work is performed to compensate for block production speed - if surplusDifficulty < b.Difficulty*b.Cost { + requiredSurplus := b.Difficulty * b.Cost + if surplusDifficulty < requiredSurplus { + log.Debug("insufficient block surplus", "found", surplusDifficulty, "required", requiredSurplus) return nil, nil, ErrInsufficientSurplus } return parent, onAcceptDB, nil @@ -195,7 +205,6 @@ func (b *StatelessBlock) Accept() error { } b.st = choices.Accepted b.vm.Accepted(b) - // TODO: clear expired state (using index from timestamp to prefix) return nil } @@ -221,6 +230,15 @@ func (b *StatelessBlock) Height() uint64 { return b.StatefulBlock.Hght } // implements "snowman.Block" func (b *StatelessBlock) Timestamp() time.Time { return b.t } +func (b *StatelessBlock) SetChildrenDB(db database.Database) error { + for _, child := range b.children { + if err := child.onAcceptDB.SetDatabase(db); err != nil { + return err + } + } + return nil +} + func (b *StatelessBlock) onAccept() (database.Database, error) { if b.st == choices.Accepted || b.Hght == 0 /* genesis */ { return b.vm.State(), nil diff --git a/chain/claim_tx.go b/chain/claim_tx.go index b02d29f5..ef952819 100644 --- a/chain/claim_tx.go +++ b/chain/claim_tx.go @@ -24,30 +24,25 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { return ErrPublicKeyMismatch } - prevInfo, infoExists, err := GetPrefixInfo(db, c.Prefix) + // Prefix keys only exist if they are still valid + exists, err := HasPrefix(db, c.Prefix) if err != nil { return err } - if infoExists && prevInfo.Expiry >= blockTime { + if exists { return ErrPrefixNotExpired } - // every successful "claim" deletes the existing keys - // whether "c.Sender" is same as or different than "prevInfo.Owner" - // now write with either prefix expired or new prefix owner + // Anything previously at the prefix was previously removed... newInfo := &PrefixInfo{ Owner: c.Sender, + Created: blockTime, LastUpdated: blockTime, Expiry: blockTime + expiryTime, Keys: 1, } - if err := PutPrefixInfo(db, c.Prefix, newInfo); err != nil { + if err := PutPrefixInfo(db, c.Prefix, newInfo, -1); err != nil { return err } - - // Remove anything that is stored in value prefix - // overwrite even if claimed by the same owner - // TODO(patrick-ogrady): free things async for faster block verification loops - // e.g., lazily free what is said to be freed in the block? - return DeleteAllPrefixKeys(db, c.Prefix) + return nil } diff --git a/chain/claim_tx_test.go b/chain/claim_tx_test.go index 47fd65be..e290b52e 100644 --- a/chain/claim_tx_test.go +++ b/chain/claim_tx_test.go @@ -67,6 +67,12 @@ func TestClaimTx(t *testing.T) { }, } for i, tv := range tt { + if i > 0 { + // Expire old prefixes between txs + if err := ExpireNext(db, tt[i-1].blockTime, tv.blockTime); err != nil { + t.Fatalf("#%d: ExpireNext errored %v", i, err) + } + } err := tv.tx.Execute(db, tv.blockTime) if !errors.Is(err, tv.err) { t.Fatalf("#%d: tx.Execute err expected %v, got %v", i, tv.err, err) @@ -85,4 +91,19 @@ func TestClaimTx(t *testing.T) { t.Fatalf("#%d: unexpected owner found (expected pub key %q)", i, string(pub.PublicKey)) } } + + // Cleanup DB after all txs submitted + if err := ExpireNext(db, 0, 1000); err != nil { + t.Fatal(err) + } + if err := PruneNext(db, 100); err != nil { + t.Fatal(err) + } + _, exists, err := GetPrefixInfo(db, []byte("foo")) + if err != nil { + t.Fatalf("failed to get prefix info %v", err) + } + if exists { + t.Fatal("prefix should not exist") + } } diff --git a/chain/lifeline_tx.go b/chain/lifeline_tx.go index 0def7b45..1cf5af00 100644 --- a/chain/lifeline_tx.go +++ b/chain/lifeline_tx.go @@ -23,6 +23,7 @@ func (l *LifelineTx) Execute(db database.Database, blockTime int64) error { return ErrPrefixMissing } // If you are "in debt", lifeline only adds but doesn't reset to new + lastExpiry := i.Expiry i.Expiry += expiryTime / i.Keys - return PutPrefixInfo(db, l.Prefix, i) + return PutPrefixInfo(db, l.Prefix, i, lastExpiry) } diff --git a/chain/lifeline_tx_test.go b/chain/lifeline_tx_test.go index a211c414..abfe4b3f 100644 --- a/chain/lifeline_tx_test.go +++ b/chain/lifeline_tx_test.go @@ -38,7 +38,7 @@ func TestLifelineTx(t *testing.T) { blockTime: 1, err: nil, }, - { // successful lifeline when prefix info is missing + { // successful lifeline when prefix info is not missing utx: &LifelineTx{BaseTx: &BaseTx{Sender: pub.Bytes(), Prefix: []byte("foo")}}, blockTime: 1, err: nil, diff --git a/chain/prefix_info.go b/chain/prefix_info.go index 865fcd83..7ee5983c 100644 --- a/chain/prefix_info.go +++ b/chain/prefix_info.go @@ -4,12 +4,16 @@ package chain import ( + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/quarkvm/crypto" ) type PrefixInfo struct { Owner [crypto.PublicKeySize]byte `serialize:"true" json:"owner"` + Created int64 `serialize:"true" json:"created"` LastUpdated int64 `serialize:"true" json:"lastUpdated"` Expiry int64 `serialize:"true" json:"expiry"` Keys int64 `serialize:"true" json:"keys"` // decays faster the more keys you have + + RawPrefix ids.ShortID `serialize:"true" json:"rawPrefix"` } diff --git a/chain/set_tx.go b/chain/set_tx.go index 6f96dcc2..f6666056 100644 --- a/chain/set_tx.go +++ b/chain/set_tx.go @@ -65,7 +65,7 @@ func (s *SetTx) Execute(db database.Database, blockTime int64) error { return s.updatePrefix(db, blockTime, i) } -func (s *SetTx) updatePrefix(db database.KeyValueWriter, blockTime int64, i *PrefixInfo) error { +func (s *SetTx) updatePrefix(db database.Database, blockTime int64, i *PrefixInfo) error { timeRemaining := (i.Expiry - i.LastUpdated) * i.Keys if len(s.Value) == 0 { i.Keys-- @@ -80,6 +80,7 @@ func (s *SetTx) updatePrefix(db database.KeyValueWriter, blockTime int64, i *Pre } newTimeRemaining := timeRemaining / i.Keys i.LastUpdated = blockTime + lastExpiry := i.Expiry i.Expiry = blockTime + newTimeRemaining - return PutPrefixInfo(db, s.Prefix, i) + return PutPrefixInfo(db, s.Prefix, i, lastExpiry) } diff --git a/chain/set_tx_test.go b/chain/set_tx_test.go index 06fbcd02..c65fc9cf 100644 --- a/chain/set_tx_test.go +++ b/chain/set_tx_test.go @@ -188,7 +188,11 @@ func TestSetTx(t *testing.T) { t.Fatalf("#%d: unexpected owner found (expected pub key %q)", i, string(pub.PublicKey)) } // each claim must delete all existing keys with the value key - if kvs := Range(db, tp.Prefix, nil, WithPrefix()); len(kvs) > 0 { + kvs, err := Range(db, tp.Prefix, nil, WithPrefix()) + if err != nil { + t.Fatalf("#%d: unexpected error when fetching range %v", i, err) + } + if len(kvs) > 0 { t.Fatalf("#%d: unexpected key-values for the prefix after claim", i) } diff --git a/chain/storage.go b/chain/storage.go index ac5b0296..80605e27 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -5,30 +5,63 @@ package chain import ( "bytes" + "encoding/binary" "errors" + "math" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" + log "github.com/inconshreveable/log15" + "github.com/ava-labs/quarkvm/parser" ) -// 0x0/ (singleton prefix info) -// -> [reserved prefix] -// 0x1/ (prefix keys) -// -> [reserved prefix] +// TODO: cleanup mapping diagram +// 0x0/ (block hashes) +// 0x1/ (tx hashes) +// 0x2/ (singleton prefix info) +// -> [prefix]:[prefix info/raw prefix] +// 0x3/ (prefix keys) +// -> [raw prefix] // -> [key] -// 0x2/ (tx hashes) -// 0x3/ (block hashes) +// 0x4/ (prefix expiry queue) +// -> [raw prefix] +// 0x5/ (prefix pruning queue) +// -> [raw prefix] const ( - infoPrefix = 0x0 - keyPrefix = 0x1 - txPrefix = 0x2 - blockPrefix = 0x3 + blockPrefix = 0x0 + txPrefix = 0x1 + infoPrefix = 0x2 + keyPrefix = 0x3 + expiryPrefix = 0x4 + pruningPrefix = 0x5 + + shortIDLen = 20 ) var lastAccepted = []byte("last_accepted") +// [blockPrefix] + [delimiter] + [blockID] +func PrefixBlockKey(blockID ids.ID) (k []byte) { + k = make([]byte, 2+len(blockID)) + k[0] = blockPrefix + k[1] = parser.Delimiter + copy(k[2:], blockID[:]) + return k +} + +// [txPrefix] + [delimiter] + [txID] +func PrefixTxKey(txID ids.ID) (k []byte) { + k = make([]byte, 2+len(txID)) + k[0] = txPrefix + k[1] = parser.Delimiter + copy(k[2:], txID[:]) + return k +} + +// [infoPrefix] + [delimiter] + [prefix] func PrefixInfoKey(prefix []byte) (k []byte) { k = make([]byte, 2+len(prefix)) k[0] = infoPrefix @@ -37,52 +70,80 @@ func PrefixInfoKey(prefix []byte) (k []byte) { return k } -func PrefixValueKey(prefix []byte, key []byte) (k []byte) { - prefixN, keyN := len(prefix), len(key) - pfxDelimExists := bytes.HasSuffix(prefix, []byte{parser.Delimiter}) - - n := 2 + prefixN + keyN - if !pfxDelimExists { - n++ +func RawPrefix(prefix []byte, blockTime int64) (ids.ShortID, error) { + prefixLen := len(prefix) + r := make([]byte, prefixLen+1+8) + copy(r, prefix) + r[prefixLen] = parser.Delimiter + binary.LittleEndian.PutUint64(r[prefixLen+1:], uint64(blockTime)) + h := hashing.ComputeHash160(r) + rprefix, err := ids.ToShortID(h) + if err != nil { + return ids.ShortID{}, err } + return rprefix, nil +} - k = make([]byte, n) +// Assumes [prefix] and [key] do not contain delimiter +// [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] +func PrefixValueKey(rprefix ids.ShortID, key []byte) (k []byte) { + k = make([]byte, 2+shortIDLen+1+len(key)) k[0] = keyPrefix k[1] = parser.Delimiter - cur := 2 - - copy(k[cur:], prefix) - cur += prefixN - - if !pfxDelimExists { - k[cur] = parser.Delimiter - cur++ - } - if len(key) == 0 { - return k - } - - copy(k[cur:], key) + copy(k[2:], rprefix[:]) + k[2+shortIDLen] = parser.Delimiter + copy(k[2+shortIDLen+1:], key) return k } -func PrefixTxKey(txID ids.ID) (k []byte) { - k = make([]byte, 2+len(txID)) - k[0] = txPrefix +// [expiry/pruningPrefix] + [delimiter] + [timestamp] + [delimiter] +func RangeTimeKey(p byte, t int64) (k []byte) { + k = make([]byte, 2+8+1) + k[0] = p k[1] = parser.Delimiter - copy(k[2:], txID[:]) + binary.LittleEndian.PutUint64(k[2:], uint64(t)) + k[2+8] = parser.Delimiter return k } -func PrefixBlockKey(blockID ids.ID) (k []byte) { - k = make([]byte, 2+len(blockID)) - k[0] = blockPrefix +// [expiryPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] +func PrefixExpiryKey(expiry int64, rprefix ids.ShortID) (k []byte) { + return specificTimeKey(expiryPrefix, expiry, rprefix) +} + +// [pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] +func PrefixPruningKey(expired int64, rprefix ids.ShortID) (k []byte) { + return specificTimeKey(pruningPrefix, expired, rprefix) +} + +const specificTimeKeyLen = 2 + 8 + 1 + shortIDLen + +// [expiry/pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] +func specificTimeKey(p byte, t int64, rprefix ids.ShortID) (k []byte) { + k = make([]byte, specificTimeKeyLen) + k[0] = p k[1] = parser.Delimiter - copy(k[2:], blockID[:]) + binary.LittleEndian.PutUint64(k[2:], uint64(t)) + k[2+8] = parser.Delimiter + copy(k[2+8+1:], rprefix[:]) return k } +var ErrInvalidKeyFormat = errors.New("invalid key format") + +// extracts expiry/pruning timstamp and raw prefix +func extractSpecificTimeKey(k []byte) (timestamp int64, rprefix ids.ShortID, err error) { + if len(k) != specificTimeKeyLen { + return -1, ids.ShortEmpty, ErrInvalidKeyFormat + } + timestamp = int64(binary.LittleEndian.Uint64(k[2 : 2+8])) + rprefix, err = ids.ToShortID(k[2+8+1:]) + return timestamp, rprefix, err +} + func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool, error) { + // TODO: add caching (will need some expiry when keys cleared) + // [infoPrefix] + [delimiter] + [prefix] k := PrefixInfoKey(prefix) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { @@ -97,7 +158,16 @@ func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool } func GetValue(db database.KeyValueReader, prefix []byte, key []byte) ([]byte, bool, error) { - k := PrefixValueKey(prefix, key) + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return nil, false, err + } + if !exists { + return nil, false, nil + } + + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] + k := PrefixValueKey(prefixInfo.RawPrefix, key) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { return nil, false, nil @@ -132,19 +202,123 @@ func GetBlock(db database.KeyValueReader, bid ids.ID) ([]byte, error) { return db.Get(PrefixBlockKey(bid)) } +// ExpireNext queries "expiryPrefix" key space to find expiring keys, +// deletes their prefixInfos, and schedules its key pruning with its raw prefix. +func ExpireNext(db database.Database, parent int64, current int64) (err error) { + startKey := RangeTimeKey(expiryPrefix, parent) + endKey := RangeTimeKey(expiryPrefix, current) + cursor := db.NewIteratorWithStart(startKey) + for cursor.Next() { + // [expiryPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] + curKey := cursor.Key() + if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search + continue + } + if bytes.Compare(curKey, endKey) > 0 { // curKey > endKey; end search + break + } + if err := db.Delete(cursor.Key()); err != nil { + return err + } + + // [prefix] + pfx := cursor.Value() + + // [infoPrefix] + [delimiter] + [prefix] + k := PrefixInfoKey(pfx) + if err := db.Delete(k); err != nil { + return err + } + expired, rpfx, err := extractSpecificTimeKey(curKey) + if err != nil { + return err + } + // [pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] + k = PrefixPruningKey(expired, rpfx) + if err := db.Put(k, nil); err != nil { + return err + } + log.Debug("prefix expired", "prefix", string(pfx)) + } + return nil +} + +// PruneNext queries the keys that are currently marked with "pruningPrefix", +// and clears them from the database. +func PruneNext(db database.Database, limit int) (err error) { + startKey := RangeTimeKey(pruningPrefix, 0) + endKey := RangeTimeKey(pruningPrefix, math.MaxInt64) + cursor := db.NewIteratorWithStart(startKey) + removals := 0 + for cursor.Next() && removals < limit { + // [pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] + curKey := cursor.Key() + if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search + continue + } + if bytes.Compare(curKey, endKey) > 0 { // curKey > endKey; end search + break + } + _, rpfx, err := extractSpecificTimeKey(curKey) + if err != nil { + return err + } + if err := db.Delete(curKey); err != nil { + return err + } + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] + if err := database.ClearPrefix(db, db, PrefixValueKey(rpfx, nil)); err != nil { + return err + } + log.Debug("rprefix pruned", "rprefix", rpfx.Hex()) + removals++ + } + return nil +} + // DB func HasPrefix(db database.KeyValueReader, prefix []byte) (bool, error) { + // [infoPrefix] + [delimiter] + [prefix] k := PrefixInfoKey(prefix) return db.Has(k) } func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, error) { - k := PrefixValueKey(prefix, key) + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return false, err + } + if !exists { + return false, nil + } + + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] + k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Has(k) } -func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) error { - k := PrefixInfoKey(prefix) +func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo, lastExpiry int64) error { + if i.RawPrefix == ids.ShortEmpty { + rprefix, err := RawPrefix(prefix, i.Created) + if err != nil { + return err + } + i.RawPrefix = rprefix + } + if lastExpiry >= 0 { + // [expiryPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] + k := PrefixExpiryKey(lastExpiry, i.RawPrefix) + if err := db.Delete(k); err != nil { + return err + } + } + // [expiryPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] + k := PrefixExpiryKey(i.Expiry, i.RawPrefix) + if err := db.Put(k, prefix); err != nil { + return err + } + // [infoPrefix] + [delimiter] + [prefix] + k = PrefixInfoKey(prefix) b, err := Marshal(i) if err != nil { return err @@ -152,20 +326,31 @@ func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) err return db.Put(k, b) } -func PutPrefixKey(db database.KeyValueWriter, prefix []byte, key []byte, value []byte) error { - k := PrefixValueKey(prefix, key) +func PutPrefixKey(db database.Database, prefix []byte, key []byte, value []byte) error { + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return err + } + if !exists { + return ErrPrefixMissing + } + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] + k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Put(k, value) } -func DeletePrefixKey(db database.KeyValueWriter, prefix []byte, key []byte) error { - k := PrefixValueKey(prefix, key) +func DeletePrefixKey(db database.Database, prefix []byte, key []byte) error { + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return err + } + if !exists { + return ErrPrefixMissing + } + k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Delete(k) } -func DeleteAllPrefixKeys(db database.Database, prefix []byte) error { - return database.ClearPrefix(db, db, PrefixValueKey(prefix, nil)) -} - func SetTransaction(db database.KeyValueWriter, tx *Transaction) error { k := PrefixTxKey(tx.ID()) b, err := Marshal(tx) @@ -186,9 +371,15 @@ type KeyValue struct { } // Range reads keys from the store. -// TODO: check prefix info to restrict reads to the owner? -func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (kvs []KeyValue) { - ret := &Op{key: PrefixValueKey(prefix, key)} +func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (kvs []KeyValue, err error) { + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return nil, err + } + if !exists { + return nil, ErrPrefixMissing + } + ret := &Op{key: PrefixValueKey(prefixInfo.RawPrefix, key)} ret.applyOpts(opts) startKey := ret.key @@ -198,7 +389,7 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k endKey = ret.rangeEnd if !bytes.HasPrefix(endKey, []byte{keyPrefix, parser.Delimiter}) { // if overwritten via "WithRange" - endKey = PrefixValueKey(prefix, endKey) + endKey = PrefixValueKey(prefixInfo.RawPrefix, endKey) } } @@ -209,17 +400,14 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k break } + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] curKey := cursor.Key() + formattedKey := curKey[2+shortIDLen+1:] comp := bytes.Compare(startKey, curKey) if comp == 0 { // startKey == curKey kvs = append(kvs, KeyValue{ - Key: bytes.Replace( - curKey, - []byte{keyPrefix, parser.Delimiter}, - nil, - 1, - ), + Key: formattedKey, Value: cursor.Value(), }) continue @@ -237,11 +425,11 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k } kvs = append(kvs, KeyValue{ - Key: bytes.Replace(curKey, []byte{keyPrefix, parser.Delimiter}, nil, 1), + Key: formattedKey, Value: cursor.Value(), }) } - return kvs + return kvs, nil } type Op struct { diff --git a/chain/storage_test.go b/chain/storage_test.go index a850e2c6..c081a88d 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -5,6 +5,7 @@ package chain import ( "bytes" + "errors" "fmt" reflect "reflect" "testing" @@ -18,23 +19,18 @@ func TestPrefixValueKey(t *testing.T) { t.Parallel() tt := []struct { - pfx []byte + rpfx ids.ShortID key []byte valueKey []byte }{ { - pfx: []byte("foo"), + rpfx: ids.ShortID{0x1}, key: []byte("hello"), - valueKey: append([]byte{keyPrefix}, []byte("/foo/hello")...), - }, - { - pfx: []byte("foo/"), - key: []byte("hello"), - valueKey: append([]byte{keyPrefix}, []byte("/foo/hello")...), + valueKey: append([]byte{keyPrefix}, []byte("/\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/hello")...), //nolint:lll }, } for i, tv := range tt { - vv := PrefixValueKey(tv.pfx, tv.key) + vv := PrefixValueKey(tv.rpfx, tv.key) if !bytes.Equal(tv.valueKey, vv) { t.Fatalf("#%d: value expected %q, got %q", i, tv.valueKey, vv) } @@ -103,16 +99,67 @@ func TestPrefixBlockKey(t *testing.T) { } } +func TestPutPrefixInfoAndKey(t *testing.T) { + t.Parallel() + + db := memdb.New() + defer db.Close() + + pfx := []byte("foo") + k, v := []byte("k"), []byte("v") + + // expect failures for non-existing prefixInfo + if ok, err := HasPrefix(db, pfx); ok || err != nil { + t.Fatalf("unexpected ok %v, err %v", ok, err) + } + if ok, err := HasPrefixKey(db, pfx, k); ok || err != nil { + t.Fatalf("unexpected ok %v, err %v", ok, err) + } + if err := PutPrefixKey(db, pfx, k, v); !errors.Is(err, ErrPrefixMissing) { + t.Fatalf("unexpected error %v, expected %v", err, ErrPrefixMissing) + } + + if err := PutPrefixInfo( + db, + pfx, + &PrefixInfo{ + RawPrefix: ids.ShortID{0x1}, + }, + -1, + ); err != nil { + t.Fatal(err) + } + if err := PutPrefixKey(db, pfx, k, v); err != nil { + t.Fatalf("unexpected error %v", err) + } + + // expect success for existing prefixInfo + if ok, err := HasPrefix(db, pfx); !ok || err != nil { + t.Fatalf("unexpected ok %v, err %v", ok, err) + } + if ok, err := HasPrefixKey(db, pfx, k); !ok || err != nil { + t.Fatalf("unexpected ok %v, err %v", ok, err) + } +} + func TestRange(t *testing.T) { t.Parallel() db := memdb.New() defer db.Close() + // Persist PrefixInfo so keys can be stored under rprefix + pfx := []byte("foo") + if err := PutPrefixInfo(db, pfx, &PrefixInfo{ + RawPrefix: ids.ShortID{0x1}, + }, -1); err != nil { + t.Fatal(err) + } + for i := 0; i < 5; i++ { if err := PutPrefixKey( db, - []byte("foo"), + pfx, []byte(fmt.Sprintf("hello%05d", i)), []byte(fmt.Sprintf("bar%05d", i)), ); err != nil { @@ -127,103 +174,125 @@ func TestRange(t *testing.T) { kvs []KeyValue }{ { // prefix exists but the key itself does not exist - pfx: []byte("foo/"), + pfx: pfx, key: []byte("9"), opts: nil, kvs: nil, }, { // single key - pfx: []byte("foo/"), + pfx: pfx, key: []byte("hello00000"), opts: nil, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, }, }, { // prefix query - pfx: []byte("foo/"), + pfx: pfx, key: []byte("hello"), opts: []OpOption{WithPrefix()}, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, - {Key: []byte("foo/hello00003"), Value: []byte("bar00003")}, - {Key: []byte("foo/hello00004"), Value: []byte("bar00004")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00003"), Value: []byte("bar00003")}, + {Key: []byte("hello00004"), Value: []byte("bar00004")}, }, }, { // prefix query - pfx: []byte("foo/"), + pfx: pfx, key: nil, opts: []OpOption{WithPrefix()}, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, - {Key: []byte("foo/hello00003"), Value: []byte("bar00003")}, - {Key: []byte("foo/hello00004"), Value: []byte("bar00004")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00003"), Value: []byte("bar00003")}, + {Key: []byte("hello00004"), Value: []byte("bar00004")}, }, }, { // prefix query - pfx: []byte("foo/"), + pfx: pfx, key: []byte("x"), opts: []OpOption{WithPrefix()}, kvs: nil, }, { // range query - pfx: []byte("foo/"), + pfx: pfx, key: []byte("hello"), opts: []OpOption{WithRangeEnd([]byte("hello00003"))}, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, }, }, { // range query - pfx: []byte("foo/"), + pfx: pfx, key: []byte("hello00001"), opts: []OpOption{WithRangeEnd([]byte("hello00003"))}, kvs: []KeyValue{ - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, }, }, { // range query - pfx: []byte("foo/"), + pfx: pfx, key: []byte("hello00003"), opts: []OpOption{WithRangeEnd([]byte("hello00005"))}, kvs: []KeyValue{ - {Key: []byte("foo/hello00003"), Value: []byte("bar00003")}, - {Key: []byte("foo/hello00004"), Value: []byte("bar00004")}, + {Key: []byte("hello00003"), Value: []byte("bar00003")}, + {Key: []byte("hello00004"), Value: []byte("bar00004")}, }, }, { // range query with limit - pfx: []byte("foo/"), + pfx: pfx, key: []byte("hello00003"), opts: []OpOption{WithRangeEnd([]byte("hello00005")), WithRangeLimit(1)}, kvs: []KeyValue{ - {Key: []byte("foo/hello00003"), Value: []byte("bar00003")}, + {Key: []byte("hello00003"), Value: []byte("bar00003")}, }, }, { // prefix query with limit - pfx: []byte("foo/"), + pfx: pfx, key: []byte("hello"), opts: []OpOption{WithPrefix(), WithRangeLimit(3)}, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, }, }, } for i, tv := range tt { - kvs := Range(db, tv.pfx, tv.key, tv.opts...) + kvs, err := Range(db, tv.pfx, tv.key, tv.opts...) + if err != nil { + t.Fatalf("#%d: unexpected error when fetching range %v", i, err) + } if len(tv.kvs) == 0 && len(kvs) == 0 { continue } if !reflect.DeepEqual(kvs, tv.kvs) { - t.Fatalf("#%d: range response expected %d pair(s), got %v pair(s)", i, len(tv.kvs), len(kvs)) + t.Fatalf("#%d: range response expected %v pair(s), got %v pair(s)", i, tv.kvs, kvs) } } } + +func TestSpecificTimeKey(t *testing.T) { + rpfx0 := ids.ShortID{'k'} + k := PrefixExpiryKey(100, rpfx0) + ts, rpfx, err := extractSpecificTimeKey(k) + if err != nil { + t.Fatal(err) + } + if ts != 100 { + t.Fatalf("unexpected timestamp %d, expected 100", ts) + } + if rpfx != rpfx0 { + t.Fatalf("unexpected rawPrefix %v, expected %v", rpfx, rpfx0) + } + + if _, _, err = extractSpecificTimeKey(k[:10]); !errors.Is(err, ErrInvalidKeyFormat) { + t.Fatalf("unexpected error %v, expected %v", err, ErrInvalidKeyFormat) + } +} diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 7891d131..107ecbb1 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/quarkvm/chain" "github.com/ava-labs/quarkvm/client" "github.com/ava-labs/quarkvm/crypto" - "github.com/ava-labs/quarkvm/parser" "github.com/ava-labs/quarkvm/tests" "github.com/fatih/color" ginkgo "github.com/onsi/ginkgo/v2" @@ -199,7 +198,7 @@ var _ = ginkgo.Describe("[Claim/SetTx]", func() { color.Blue("checking SetTx with Range on %q", inst.uri) kvs, err := inst.cli.Range(pfx, k) gomega.Ω(err).To(gomega.BeNil()) - gomega.Ω(kvs[0].Key).To(gomega.Equal(append(append(pfx, parser.Delimiter), k...))) + gomega.Ω(kvs[0].Key).To(gomega.Equal(k)) gomega.Ω(kvs[0].Value).To(gomega.Equal(v)) } }) diff --git a/tests/integration/integration_test.go b/tests/integration/integration_test.go index 2ee098f0..ad51e25a 100644 --- a/tests/integration/integration_test.go +++ b/tests/integration/integration_test.go @@ -21,7 +21,6 @@ import ( "github.com/ava-labs/quarkvm/chain" "github.com/ava-labs/quarkvm/client" "github.com/ava-labs/quarkvm/crypto" - "github.com/ava-labs/quarkvm/parser" "github.com/ava-labs/quarkvm/vm" "github.com/fatih/color" ginkgo "github.com/onsi/ginkgo/v2" @@ -288,7 +287,7 @@ var _ = ginkgo.Describe("[ClaimTx]", func() { ginkgo.By("read back from VM with range query", func() { kvs, err := instances[0].cli.Range(pfx, k) gomega.Ω(err).To(gomega.BeNil()) - gomega.Ω(kvs[0].Key).To(gomega.Equal(append(append(pfx, parser.Delimiter), k...))) + gomega.Ω(kvs[0].Key).To(gomega.Equal(k)) gomega.Ω(kvs[0].Value).To(gomega.Equal(v)) }) }) diff --git a/vm/chain_vm.go b/vm/chain_vm.go index dba72e8d..58ce23f3 100644 --- a/vm/chain_vm.go +++ b/vm/chain_vm.go @@ -38,7 +38,7 @@ func (vm *VM) Rejected(b *chain.StatelessBlock) { func (vm *VM) Accepted(b *chain.StatelessBlock) { vm.blocks.Put(b.ID(), b) delete(vm.verifiedBlocks, b.ID()) - vm.lastAccepted = b.ID() + vm.lastAccepted = b log.Debug("accepted block", "id", b.ID()) } @@ -63,7 +63,8 @@ func (vm *VM) ExecutionContext(currTime int64, lastBlock *chain.StatelessBlock) nextCost += uint64(chain.BlockTarget - secondsSinceLast) } else { possibleDiff := uint64(secondsSinceLast - chain.BlockTarget) - if possibleDiff < nextCost-vm.minBlockCost { + // TODO: clean this up + if nextCost >= vm.minBlockCost && possibleDiff < nextCost-vm.minBlockCost { nextCost -= possibleDiff } else { nextCost = vm.minBlockCost @@ -76,7 +77,7 @@ func (vm *VM) ExecutionContext(currTime int64, lastBlock *chain.StatelessBlock) nextDifficulty++ } else if recentTxs < chain.TargetTransactions { elapsedWindows := uint64(secondsSinceLast/chain.LookbackWindow) + 1 // account for current window being less - if elapsedWindows < nextDifficulty-vm.minDifficulty { + if nextDifficulty >= vm.minDifficulty && elapsedWindows < nextDifficulty-vm.minDifficulty { nextDifficulty -= elapsedWindows } else { nextDifficulty = vm.minDifficulty diff --git a/vm/helpers.go b/vm/helpers.go index 2da989f9..a6a675b6 100644 --- a/vm/helpers.go +++ b/vm/helpers.go @@ -54,5 +54,14 @@ func (vm *VM) DifficultyEstimate() (uint64, error) { totalBlocks++ return true, nil }) - return totalDifficulty/totalBlocks + 1, err + if err != nil { + return 0, err + } + // TODO: make more sophisticated...maybe return cost/difficulty separately? + recommended := totalDifficulty/totalBlocks + 1 + minRequired := vm.minDifficulty + vm.minBlockCost + if recommended < minRequired { + recommended = minRequired + } + return recommended, nil } diff --git a/vm/pruner.go b/vm/pruner.go new file mode 100644 index 00000000..83ad4b2f --- /dev/null +++ b/vm/pruner.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package vm + +import ( + "time" + + "github.com/ava-labs/avalanchego/database/versiondb" + log "github.com/inconshreveable/log15" + + "github.com/ava-labs/quarkvm/chain" +) + +const ( + pruneLimit = 128 +) + +func (vm *VM) pruneCall() { + // Lock to prevent concurrent modification of state + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + vdb := versiondb.New(vm.db) + defer vdb.Abort() + if err := chain.PruneNext(vdb, pruneLimit); err != nil { + log.Warn("unable to prune next range", "error", err) + return + } + if err := vdb.Commit(); err != nil { + log.Warn("unable to commit pruning work", "error", err) + return + } + if err := vm.lastAccepted.SetChildrenDB(vm.db); err != nil { + log.Error("unable to update child databases of last accepted block", "error", err) + } +} + +func (vm *VM) prune() { + log.Debug("starting prune loops") + defer close(vm.donecPrune) + + // should retry less aggressively + t := time.NewTimer(vm.pruneInterval) + defer t.Stop() + + for { + select { + case <-t.C: + case <-vm.stopc: + return + } + t.Reset(vm.pruneInterval) + vm.pruneCall() + } +} diff --git a/vm/service.go b/vm/service.go index eef87096..b066ea2d 100644 --- a/vm/service.go +++ b/vm/service.go @@ -171,7 +171,6 @@ type RangeArgs struct { type RangeReply struct { KeyValues []chain.KeyValue `serialize:"true" json:"keyValues"` - Error error `serialize:"true" json:"error"` } func (svc *Service) Range(_ *http.Request, args *RangeArgs, reply *RangeReply) (err error) { @@ -183,7 +182,10 @@ func (svc *Service) Range(_ *http.Request, args *RangeArgs, reply *RangeReply) ( if args.Limit > 0 { opts = append(opts, chain.WithRangeLimit(args.Limit)) } - reply.KeyValues = chain.Range(svc.vm.db, args.Prefix, args.Key, opts...) - reply.Error = nil + kvs, err := chain.Range(svc.vm.db, args.Prefix, args.Key, opts...) + if err != nil { + return err + } + reply.KeyValues = kvs return nil } diff --git a/vm/vm.go b/vm/vm.go index bc5e4f37..abd6f385 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -33,6 +33,10 @@ const ( defaultWorkInterval = 100 * time.Millisecond defaultRegossipInterval = time.Second + defaultPruneInterval = time.Minute + + defaultMinimumDifficulty = 1 + defaultMinBlockCost = 1 mempoolSize = 1024 ) @@ -48,9 +52,11 @@ type VM struct { workInterval time.Duration regossipInterval time.Duration - mempool *mempool.Mempool - appSender common.AppSender - gossipedTxs *cache.LRU + pruneInterval time.Duration + + mempool *mempool.Mempool + appSender common.AppSender + gossipedTxs *cache.LRU // cache block objects to optimize "getBlock" // only put when a block is accepted // key: block ID, value: *chain.StatelessBlock @@ -66,7 +72,7 @@ type VM struct { blockBuilder chan struct{} preferred ids.ID - lastAccepted ids.ID + lastAccepted *chain.StatelessBlock minDifficulty uint64 minBlockCost uint64 @@ -74,6 +80,7 @@ type VM struct { stopc chan struct{} donecRun chan struct{} donecRegossip chan struct{} + donecPrune chan struct{} } const ( @@ -97,9 +104,13 @@ func (vm *VM) Initialize( vm.ctx = ctx vm.db = dbManager.Current().Database - // TODO: make this configurable via genesis + // TODO: make this configurable via config vm.workInterval = defaultWorkInterval vm.regossipInterval = defaultRegossipInterval + vm.pruneInterval = defaultPruneInterval + + // TODO: make this configurable via genesis + vm.minDifficulty, vm.minBlockCost = defaultMinimumDifficulty, defaultMinBlockCost vm.mempool = mempool.New(mempoolSize) vm.appSender = appSender @@ -117,44 +128,48 @@ func (vm *VM) Initialize( log.Error("could not determine if have last accepted") return err } - if has { - b, err := chain.GetLastAccepted(vm.db) + if has { //nolint:nestif + blkID, err := chain.GetLastAccepted(vm.db) if err != nil { log.Error("could not get last accepted", "err", err) return err } - vm.preferred = b - vm.lastAccepted = b - log.Info("initialized quarkvm from last accepted", "block", b) - return nil - } + blk, err := vm.getBlock(blkID) + if err != nil { + log.Error("could not load last accepted", "err", err) + return err + } - // Load from genesis - genesisBlk, err := chain.ParseBlock( - genesisBytes, - choices.Accepted, - vm, - ) - if err != nil { - log.Error("unable to init genesis block", "err", err) - return err - } - if err := chain.SetLastAccepted(vm.db, genesisBlk); err != nil { - log.Error("could not set genesis as last accepted", "err", err) - return err + vm.preferred, vm.lastAccepted = blkID, blk + log.Info("initialized quarkvm from last accepted", "block", blkID) + } else { + genesisBlk, err := chain.ParseBlock( + genesisBytes, + choices.Accepted, + vm, + ) + if err != nil { + log.Error("unable to init genesis block", "err", err) + return err + } + if err := chain.SetLastAccepted(vm.db, genesisBlk); err != nil { + log.Error("could not set genesis as last accepted", "err", err) + return err + } + gBlkID := genesisBlk.ID() + vm.preferred, vm.lastAccepted = gBlkID, genesisBlk + log.Info("initialized quarkvm from genesis", "block", gBlkID) } - gBlkID := genesisBlk.ID() - vm.preferred, vm.lastAccepted = gBlkID, gBlkID - vm.minDifficulty, vm.minBlockCost = genesisBlk.Difficulty, genesisBlk.Cost - log.Info("initialized quarkvm from genesis", "block", gBlkID) vm.stopc = make(chan struct{}) vm.donecRun = make(chan struct{}) vm.donecRegossip = make(chan struct{}) + vm.donecPrune = make(chan struct{}) go vm.run() go vm.regossip() + go vm.prune() return nil } @@ -173,6 +188,7 @@ func (vm *VM) Shutdown() error { close(vm.stopc) <-vm.donecRun <-vm.donecRegossip + <-vm.donecPrune if vm.ctx == nil { return nil } @@ -359,5 +375,5 @@ func (vm *VM) SetPreference(id ids.ID) error { // "LastAccepted" implements "snowmanblock.ChainVM" // replaces "core.SnowmanVM.LastAccepted" func (vm *VM) LastAccepted() (ids.ID, error) { - return vm.lastAccepted, nil + return vm.lastAccepted.ID(), nil }