From 7fdae387eb9ce6502c3f1d6f383338a1478f9fe3 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Sun, 2 Jan 2022 16:34:58 -0600 Subject: [PATCH 01/31] first strokes --- chain/storage.go | 109 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 88 insertions(+), 21 deletions(-) diff --git a/chain/storage.go b/chain/storage.go index ac5b0296..1fd288cf 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -12,34 +12,56 @@ import ( "github.com/ava-labs/quarkvm/parser" ) -// 0x0/ (singleton prefix info) -// -> [reserved prefix] -// 0x1/ (prefix keys) -// -> [reserved prefix] +// 0x0/ (prefix mapping) +// -> [user prefix] -> [raw prefix] +// 0x1/ (singleton prefix info) +// -> [raw prefix] +// 0x2/ (prefix keys) +// -> [raw prefix] // -> [key] -// 0x2/ (tx hashes) -// 0x3/ (block hashes) +// 0x3/ (tx hashes) +// 0x4/ (block hashes) +// 0x5/ (prefix expiry queue) +// -> [raw prefix] +// 0x6/ (prefix pruning queue) +// -> [raw prefix] const ( - infoPrefix = 0x0 - keyPrefix = 0x1 - txPrefix = 0x2 - blockPrefix = 0x3 + mappingPrefix = 0x0 + infoPrefix = 0x1 + keyPrefix = 0x2 + txPrefix = 0x3 + blockPrefix = 0x4 + // prefixExpiryQueue = 0x5 + // prefixPruningQueue = 0x6 ) var lastAccepted = []byte("last_accepted") -func PrefixInfoKey(prefix []byte) (k []byte) { +// TODO: use indirection to automatically service prefix->rawPrefix translation +// TODO: derive rawPrefix deterministically by hash(block hash + prefix) +func PrefixMappingKey(prefix []byte) (k []byte) { k = make([]byte, 2+len(prefix)) - k[0] = infoPrefix + k[0] = mappingPrefix k[1] = parser.Delimiter copy(k[2:], prefix) return k } -func PrefixValueKey(prefix []byte, key []byte) (k []byte) { - prefixN, keyN := len(prefix), len(key) - pfxDelimExists := bytes.HasSuffix(prefix, []byte{parser.Delimiter}) +// TODO: make ids.ID? +func PrefixInfoKey(rawPrefix []byte) (k []byte) { + k = make([]byte, 2+len(rawPrefix)) + k[0] = infoPrefix + k[1] = parser.Delimiter + copy(k[2:], rawPrefix) + return k +} + +func PrefixValueKey(rawPrefix []byte, key []byte) (k []byte) { + prefixN, keyN := len(rawPrefix), len(key) + // TODO: can we not introduce an invariant that the delimiter is never + // included? + pfxDelimExists := bytes.HasSuffix(rawPrefix, []byte{parser.Delimiter}) n := 2 + prefixN + keyN if !pfxDelimExists { @@ -51,7 +73,7 @@ func PrefixValueKey(prefix []byte, key []byte) (k []byte) { k[1] = parser.Delimiter cur := 2 - copy(k[cur:], prefix) + copy(k[cur:], rawPrefix) cur += prefixN if !pfxDelimExists { @@ -82,8 +104,28 @@ func PrefixBlockKey(blockID ids.ID) (k []byte) { return k } +func GetPrefixMapping(db database.KeyValueReader, prefix []byte) ([]byte, bool, error) { + k := PrefixMappingKey(prefix) + v, err := db.Get(k) + if errors.Is(err, database.ErrNotFound) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + return v, true, err +} + func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool, error) { - k := PrefixInfoKey(prefix) + rawPrefix, exists, err := GetPrefixMapping(db, prefix) + if err != nil { + return nil, false, err + } + if !exists { + return nil, false, nil + } + + k := PrefixInfoKey(rawPrefix) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { return nil, false, nil @@ -97,7 +139,15 @@ func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool } func GetValue(db database.KeyValueReader, prefix []byte, key []byte) ([]byte, bool, error) { - k := PrefixValueKey(prefix, key) + rawPrefix, exists, err := GetPrefixMapping(db, prefix) + if err != nil { + return nil, false, err + } + if !exists { + return nil, false, nil + } + + k := PrefixValueKey(rawPrefix, key) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { return nil, false, nil @@ -134,17 +184,34 @@ func GetBlock(db database.KeyValueReader, bid ids.ID) ([]byte, error) { // DB func HasPrefix(db database.KeyValueReader, prefix []byte) (bool, error) { - k := PrefixInfoKey(prefix) + k := PrefixMappingKey(prefix) return db.Has(k) } func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, error) { - k := PrefixValueKey(prefix, key) + rawPrefix, exists, err := GetPrefixMapping(db, prefix) + if err != nil { + return false, err + } + if !exists { + return false, nil + } + + k := PrefixValueKey(rawPrefix, key) return db.Has(k) } func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) error { - k := PrefixInfoKey(prefix) + // TODO: handle need to now read on writes + rawPrefix, exists, err := GetPrefixMapping(db, prefix) + if err != nil { + return err + } + if !exists { + return errors.New("TODO") + } + + k := PrefixInfoKey(rawPrefix) b, err := Marshal(i) if err != nil { return err From 8653b7fb313ec8de12d9efa523d4c04bb7d77531 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Sun, 2 Jan 2022 16:46:02 -0600 Subject: [PATCH 02/31] more scoping --- chain/block.go | 1 + chain/claim_tx.go | 7 +++++-- chain/storage.go | 2 ++ vm/vm.go | 3 +++ 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/chain/block.go b/chain/block.go index f90eb12c..af73bed2 100644 --- a/chain/block.go +++ b/chain/block.go @@ -150,6 +150,7 @@ func (b *StatelessBlock) verify() (*StatelessBlock, *versiondb.Database, error) return nil, nil, err } onAcceptDB := versiondb.New(parentState) + // TODO: first remove all expired prefixes and mark for pruning var surplusDifficulty uint64 for _, tx := range b.Txs { if err := tx.Execute(onAcceptDB, b.Tmstmp, context); err != nil { diff --git a/chain/claim_tx.go b/chain/claim_tx.go index b02d29f5..0b51ec8e 100644 --- a/chain/claim_tx.go +++ b/chain/claim_tx.go @@ -24,11 +24,12 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { return ErrPublicKeyMismatch } - prevInfo, infoExists, err := GetPrefixInfo(db, c.Prefix) + // Prefix keys only exist if they are still valid + exists, err := HasPrefix(db, c.Prefix) if err != nil { return err } - if infoExists && prevInfo.Expiry >= blockTime { + if exists { return ErrPrefixNotExpired } @@ -41,6 +42,7 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { Expiry: blockTime + expiryTime, Keys: 1, } + // TODO: create raw prefix with block hash or block time? if err := PutPrefixInfo(db, c.Prefix, newInfo); err != nil { return err } @@ -49,5 +51,6 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { // overwrite even if claimed by the same owner // TODO(patrick-ogrady): free things async for faster block verification loops // e.g., lazily free what is said to be freed in the block? + // TODO: do this freeing async (no longer rely on direct prefixes) return DeleteAllPrefixKeys(db, c.Prefix) } diff --git a/chain/storage.go b/chain/storage.go index 1fd288cf..d3b444e6 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -41,6 +41,8 @@ var lastAccepted = []byte("last_accepted") // TODO: use indirection to automatically service prefix->rawPrefix translation // TODO: derive rawPrefix deterministically by hash(block hash + prefix) func PrefixMappingKey(prefix []byte) (k []byte) { + // TODO: is there a cleaner way to pack these byte arrays? + // TODO: can we use a sync.Pool? k = make([]byte, 2+len(prefix)) k[0] = mappingPrefix k[1] = parser.Delimiter diff --git a/vm/vm.go b/vm/vm.go index bc5e4f37..93ed98df 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -155,6 +155,9 @@ func (vm *VM) Initialize( go vm.run() go vm.regossip() + // TODO: start async pruning loop (make sure has lock to prevent committing + // to vm.State while running)...also make sure children database are set + // after updating return nil } From e4e6b3ed5c24d379d24daf728c8f161fe2b89c5c Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 4 Jan 2022 10:54:51 -0800 Subject: [PATCH 03/31] add additional todo --- chain/storage.go | 1 + 1 file changed, 1 insertion(+) diff --git a/chain/storage.go b/chain/storage.go index d3b444e6..61694058 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/quarkvm/parser" ) +// TODO: update to Algorithm v0.2 // 0x0/ (prefix mapping) // -> [user prefix] -> [raw prefix] // 0x1/ (singleton prefix info) From 2450c1ad69711de2fcb5cdf4b771f90b38a85a09 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 18:38:56 -0800 Subject: [PATCH 04/31] cleanup underway --- chain/prefix_info.go | 1 + chain/storage.go | 127 +++++++++++-------------------------------- 2 files changed, 34 insertions(+), 94 deletions(-) diff --git a/chain/prefix_info.go b/chain/prefix_info.go index 865fcd83..7e827822 100644 --- a/chain/prefix_info.go +++ b/chain/prefix_info.go @@ -9,6 +9,7 @@ import ( type PrefixInfo struct { Owner [crypto.PublicKeySize]byte `serialize:"true" json:"owner"` + RawPrefix rawPrefix `serialize:"true" json:"rawPrefix"` LastUpdated int64 `serialize:"true" json:"lastUpdated"` Expiry int64 `serialize:"true" json:"expiry"` Keys int64 `serialize:"true" json:"keys"` // decays faster the more keys you have diff --git a/chain/storage.go b/chain/storage.go index 61694058..2c7eab04 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -12,82 +12,50 @@ import ( "github.com/ava-labs/quarkvm/parser" ) -// TODO: update to Algorithm v0.2 -// 0x0/ (prefix mapping) -// -> [user prefix] -> [raw prefix] -// 0x1/ (singleton prefix info) -// -> [raw prefix] -// 0x2/ (prefix keys) +// TODO: cleanup mapping diagram +// 0x0/ (singleton prefix info) +// -> [prefix]:[prefix info/raw prefix] +// 0x1/ (prefix keys) // -> [raw prefix] // -> [key] -// 0x3/ (tx hashes) -// 0x4/ (block hashes) -// 0x5/ (prefix expiry queue) +// 0x2/ (tx hashes) +// 0x3/ (block hashes) +// 0x4/ (prefix expiry queue) // -> [raw prefix] -// 0x6/ (prefix pruning queue) +// 0x5/ (prefix pruning queue) // -> [raw prefix] +type rawPrefix ids.ShortID + const ( - mappingPrefix = 0x0 - infoPrefix = 0x1 - keyPrefix = 0x2 - txPrefix = 0x3 - blockPrefix = 0x4 - // prefixExpiryQueue = 0x5 - // prefixPruningQueue = 0x6 + infoPrefix = 0x0 + keyPrefix = 0x1 + txPrefix = 0x2 + blockPrefix = 0x3 + + // TODO: implement queues + // prefixExpiryQueue = 0x4 + // prefixPruningQueue = 0x5 ) var lastAccepted = []byte("last_accepted") -// TODO: use indirection to automatically service prefix->rawPrefix translation -// TODO: derive rawPrefix deterministically by hash(block hash + prefix) -func PrefixMappingKey(prefix []byte) (k []byte) { - // TODO: is there a cleaner way to pack these byte arrays? - // TODO: can we use a sync.Pool? +func PrefixInfoKey(prefix []byte) (k []byte) { k = make([]byte, 2+len(prefix)) - k[0] = mappingPrefix - k[1] = parser.Delimiter - copy(k[2:], prefix) - return k -} - -// TODO: make ids.ID? -func PrefixInfoKey(rawPrefix []byte) (k []byte) { - k = make([]byte, 2+len(rawPrefix)) k[0] = infoPrefix k[1] = parser.Delimiter - copy(k[2:], rawPrefix) + copy(k[2:], prefix) return k } -func PrefixValueKey(rawPrefix []byte, key []byte) (k []byte) { - prefixN, keyN := len(rawPrefix), len(key) - // TODO: can we not introduce an invariant that the delimiter is never - // included? - pfxDelimExists := bytes.HasSuffix(rawPrefix, []byte{parser.Delimiter}) - - n := 2 + prefixN + keyN - if !pfxDelimExists { - n++ - } - - k = make([]byte, n) +// Assumes [prefix] and [key] do not contain delimiter +func PrefixValueKey(prefix rawPrefix, key []byte) (k []byte) { + k = make([]byte, 2+len(prefix)+2+len(key)) k[0] = keyPrefix k[1] = parser.Delimiter - cur := 2 - - copy(k[cur:], rawPrefix) - cur += prefixN - - if !pfxDelimExists { - k[cur] = parser.Delimiter - cur++ - } - if len(key) == 0 { - return k - } - - copy(k[cur:], key) + copy(k[2:], prefix[:]) + k[2+len(prefix)] = parser.Delimiter + copy(k[2+len(prefix)+1:], key) return k } @@ -107,28 +75,8 @@ func PrefixBlockKey(blockID ids.ID) (k []byte) { return k } -func GetPrefixMapping(db database.KeyValueReader, prefix []byte) ([]byte, bool, error) { - k := PrefixMappingKey(prefix) - v, err := db.Get(k) - if errors.Is(err, database.ErrNotFound) { - return nil, false, nil - } - if err != nil { - return nil, false, err - } - return v, true, err -} - func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool, error) { - rawPrefix, exists, err := GetPrefixMapping(db, prefix) - if err != nil { - return nil, false, err - } - if !exists { - return nil, false, nil - } - - k := PrefixInfoKey(rawPrefix) + k := PrefixInfoKey(prefix) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { return nil, false, nil @@ -142,7 +90,7 @@ func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool } func GetValue(db database.KeyValueReader, prefix []byte, key []byte) ([]byte, bool, error) { - rawPrefix, exists, err := GetPrefixMapping(db, prefix) + prefixInfo, exists, err := GetPrefixInfo(db, prefix) if err != nil { return nil, false, err } @@ -150,7 +98,7 @@ func GetValue(db database.KeyValueReader, prefix []byte, key []byte) ([]byte, bo return nil, false, nil } - k := PrefixValueKey(rawPrefix, key) + k := PrefixValueKey(prefixInfo.RawPrefix, key) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { return nil, false, nil @@ -187,12 +135,12 @@ func GetBlock(db database.KeyValueReader, bid ids.ID) ([]byte, error) { // DB func HasPrefix(db database.KeyValueReader, prefix []byte) (bool, error) { - k := PrefixMappingKey(prefix) + k := PrefixInfoKey(prefix) return db.Has(k) } func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, error) { - rawPrefix, exists, err := GetPrefixMapping(db, prefix) + prefixInfo, exists, err := GetPrefixInfo(db, prefix) if err != nil { return false, err } @@ -200,21 +148,12 @@ func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, return false, nil } - k := PrefixValueKey(rawPrefix, key) + k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Has(k) } func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) error { - // TODO: handle need to now read on writes - rawPrefix, exists, err := GetPrefixMapping(db, prefix) - if err != nil { - return err - } - if !exists { - return errors.New("TODO") - } - - k := PrefixInfoKey(rawPrefix) + k := PrefixInfoKey(prefix) b, err := Marshal(i) if err != nil { return err From 6e0a24cd8c9ce3869d455fbac06852b252d3c23d Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 20:35:30 -0800 Subject: [PATCH 05/31] more progress --- chain/claim_tx.go | 15 +++-------- chain/set_tx.go | 2 +- chain/set_tx_test.go | 6 ++++- chain/storage.go | 59 +++++++++++++++++++++++++++++++++++-------- chain/storage_test.go | 14 +++++----- 5 files changed, 65 insertions(+), 31 deletions(-) diff --git a/chain/claim_tx.go b/chain/claim_tx.go index 0b51ec8e..995e5301 100644 --- a/chain/claim_tx.go +++ b/chain/claim_tx.go @@ -33,24 +33,17 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { return ErrPrefixNotExpired } - // every successful "claim" deletes the existing keys - // whether "c.Sender" is same as or different than "prevInfo.Owner" - // now write with either prefix expired or new prefix owner + // Anything previously at the index was previously removed + rawPrefix, err := RawPrefix(c.Prefix, blockTime) newInfo := &PrefixInfo{ Owner: c.Sender, + RawPrefix: rawPrefix, LastUpdated: blockTime, Expiry: blockTime + expiryTime, Keys: 1, } - // TODO: create raw prefix with block hash or block time? if err := PutPrefixInfo(db, c.Prefix, newInfo); err != nil { return err } - - // Remove anything that is stored in value prefix - // overwrite even if claimed by the same owner - // TODO(patrick-ogrady): free things async for faster block verification loops - // e.g., lazily free what is said to be freed in the block? - // TODO: do this freeing async (no longer rely on direct prefixes) - return DeleteAllPrefixKeys(db, c.Prefix) + return nil } diff --git a/chain/set_tx.go b/chain/set_tx.go index 6f96dcc2..17814b5b 100644 --- a/chain/set_tx.go +++ b/chain/set_tx.go @@ -65,7 +65,7 @@ func (s *SetTx) Execute(db database.Database, blockTime int64) error { return s.updatePrefix(db, blockTime, i) } -func (s *SetTx) updatePrefix(db database.KeyValueWriter, blockTime int64, i *PrefixInfo) error { +func (s *SetTx) updatePrefix(db database.Database, blockTime int64, i *PrefixInfo) error { timeRemaining := (i.Expiry - i.LastUpdated) * i.Keys if len(s.Value) == 0 { i.Keys-- diff --git a/chain/set_tx_test.go b/chain/set_tx_test.go index 06fbcd02..c65fc9cf 100644 --- a/chain/set_tx_test.go +++ b/chain/set_tx_test.go @@ -188,7 +188,11 @@ func TestSetTx(t *testing.T) { t.Fatalf("#%d: unexpected owner found (expected pub key %q)", i, string(pub.PublicKey)) } // each claim must delete all existing keys with the value key - if kvs := Range(db, tp.Prefix, nil, WithPrefix()); len(kvs) > 0 { + kvs, err := Range(db, tp.Prefix, nil, WithPrefix()) + if err != nil { + t.Fatalf("#%d: unexpected error when fetching range %v", i, err) + } + if len(kvs) > 0 { t.Fatalf("#%d: unexpected key-values for the prefix after claim", i) } diff --git a/chain/storage.go b/chain/storage.go index 2c7eab04..552a3b43 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -5,6 +5,7 @@ package chain import ( "bytes" + "encoding/binary" "errors" "github.com/ava-labs/avalanchego/database" @@ -40,6 +41,9 @@ const ( var lastAccepted = []byte("last_accepted") +// TODO: move to right spot +var prefixMissing = errors.New("prefix missing") + func PrefixInfoKey(prefix []byte) (k []byte) { k = make([]byte, 2+len(prefix)) k[0] = infoPrefix @@ -75,7 +79,22 @@ func PrefixBlockKey(blockID ids.ID) (k []byte) { return k } +func RawPrefix(prefix []byte, blockTime int64) (rawPrefix, error) { + prefixLen := len(prefix) + raw := make([]byte, prefixLen+1+binary.MaxVarintLen64) + copy(raw, prefix) + raw[prefixLen] = parser.Delimiter + binary.PutVarint(raw[prefixLen+1:], blockTime) + rp, err := ids.ToShortID(raw) + if err != nil { + // TODO: clean up casting + return rawPrefix(ids.ShortID{}), err + } + return rawPrefix(rp), nil +} + func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool, error) { + // TODO: add caching (will need some expiry when keys cleared) k := PrefixInfoKey(prefix) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { @@ -161,17 +180,31 @@ func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) err return db.Put(k, b) } -func PutPrefixKey(db database.KeyValueWriter, prefix []byte, key []byte, value []byte) error { - k := PrefixValueKey(prefix, key) +func PutPrefixKey(db database.Database, prefix []byte, key []byte, value []byte) error { + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return err + } + if !exists { + return prefixMissing + } + k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Put(k, value) } -func DeletePrefixKey(db database.KeyValueWriter, prefix []byte, key []byte) error { - k := PrefixValueKey(prefix, key) +func DeletePrefixKey(db database.Database, prefix []byte, key []byte) error { + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return err + } + if !exists { + return prefixMissing + } + k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Delete(k) } -func DeleteAllPrefixKeys(db database.Database, prefix []byte) error { +func DeleteAllPrefixKeys(db database.Database, prefix rawPrefix) error { return database.ClearPrefix(db, db, PrefixValueKey(prefix, nil)) } @@ -195,9 +228,15 @@ type KeyValue struct { } // Range reads keys from the store. -// TODO: check prefix info to restrict reads to the owner? -func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (kvs []KeyValue) { - ret := &Op{key: PrefixValueKey(prefix, key)} +func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (kvs []KeyValue, err error) { + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return nil, err + } + if !exists { + return nil, prefixMissing + } + ret := &Op{key: PrefixValueKey(prefixInfo.RawPrefix, key)} ret.applyOpts(opts) startKey := ret.key @@ -207,7 +246,7 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k endKey = ret.rangeEnd if !bytes.HasPrefix(endKey, []byte{keyPrefix, parser.Delimiter}) { // if overwritten via "WithRange" - endKey = PrefixValueKey(prefix, endKey) + endKey = PrefixValueKey(prefixInfo.RawPrefix, endKey) } } @@ -250,7 +289,7 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k Value: cursor.Value(), }) } - return kvs + return kvs, nil } type Op struct { diff --git a/chain/storage_test.go b/chain/storage_test.go index a850e2c6..ee591ca7 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -18,17 +18,12 @@ func TestPrefixValueKey(t *testing.T) { t.Parallel() tt := []struct { - pfx []byte + pfx rawPrefix key []byte valueKey []byte }{ { - pfx: []byte("foo"), - key: []byte("hello"), - valueKey: append([]byte{keyPrefix}, []byte("/foo/hello")...), - }, - { - pfx: []byte("foo/"), + pfx: rawPrefix(ids.ShortID{}), key: []byte("hello"), valueKey: append([]byte{keyPrefix}, []byte("/foo/hello")...), }, @@ -218,7 +213,10 @@ func TestRange(t *testing.T) { }, } for i, tv := range tt { - kvs := Range(db, tv.pfx, tv.key, tv.opts...) + kvs, err := Range(db, tv.pfx, tv.key, tv.opts...) + if err != nil { + t.Fatalf("#%d: unexpected error when fetching range %v", i, err) + } if len(tv.kvs) == 0 && len(kvs) == 0 { continue } From eab928cf3825de98c8606072295b9185fd57f108 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 20:54:41 -0800 Subject: [PATCH 06/31] more progress --- chain/claim_tx_test.go | 41 ++++++++++++----------- chain/storage.go | 12 +++---- chain/storage_test.go | 76 +++++++++++++++++++++++------------------- 3 files changed, 66 insertions(+), 63 deletions(-) diff --git a/chain/claim_tx_test.go b/chain/claim_tx_test.go index 47fd65be..4f2fe643 100644 --- a/chain/claim_tx_test.go +++ b/chain/claim_tx_test.go @@ -21,11 +21,11 @@ func TestClaimTx(t *testing.T) { } pub := priv.PublicKey() - priv2, err := crypto.NewPrivateKey() - if err != nil { - t.Fatal(err) - } - pub2 := priv2.PublicKey() + // priv2, err := crypto.NewPrivateKey() + // if err != nil { + // t.Fatal(err) + // } + // pub2 := priv2.PublicKey() db := memdb.New() defer db.Close() @@ -50,21 +50,22 @@ func TestClaimTx(t *testing.T) { blockTime: 1, err: ErrPrefixNotExpired, }, - { // successful new claim - tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub.Bytes(), Prefix: []byte("foo")}}, - blockTime: 100, - err: nil, - }, - { // successful new claim by different owner - tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub2.Bytes(), Prefix: []byte("foo")}}, - blockTime: 150, - err: nil, - }, - { // invalid claim due to expiration by different owner - tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub2.Bytes(), Prefix: []byte("foo")}}, - blockTime: 177, - err: ErrPrefixNotExpired, - }, + // TODO: restore tests once expiry function exists + // { // successful new claim + // tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub.Bytes(), Prefix: []byte("foo")}}, + // blockTime: 100, + // err: nil, + // }, + // { // successful new claim by different owner + // tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub2.Bytes(), Prefix: []byte("foo")}}, + // blockTime: 150, + // err: nil, + // }, + // { // invalid claim due to expiration by different owner + // tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub2.Bytes(), Prefix: []byte("foo")}}, + // blockTime: 177, + // err: ErrPrefixNotExpired, + // }, } for i, tv := range tt { err := tv.tx.Execute(db, tv.blockTime) diff --git a/chain/storage.go b/chain/storage.go index 552a3b43..6e4c7ae0 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -54,7 +54,7 @@ func PrefixInfoKey(prefix []byte) (k []byte) { // Assumes [prefix] and [key] do not contain delimiter func PrefixValueKey(prefix rawPrefix, key []byte) (k []byte) { - k = make([]byte, 2+len(prefix)+2+len(key)) + k = make([]byte, 2+len(prefix)+1+len(key)) k[0] = keyPrefix k[1] = parser.Delimiter copy(k[2:], prefix[:]) @@ -258,16 +258,12 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k } curKey := cursor.Key() + formattedKey := curKey[2+len(prefixInfo.RawPrefix)+1:] comp := bytes.Compare(startKey, curKey) if comp == 0 { // startKey == curKey kvs = append(kvs, KeyValue{ - Key: bytes.Replace( - curKey, - []byte{keyPrefix, parser.Delimiter}, - nil, - 1, - ), + Key: formattedKey, Value: cursor.Value(), }) continue @@ -285,7 +281,7 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k } kvs = append(kvs, KeyValue{ - Key: bytes.Replace(curKey, []byte{keyPrefix, parser.Delimiter}, nil, 1), + Key: formattedKey, Value: cursor.Value(), }) } diff --git a/chain/storage_test.go b/chain/storage_test.go index ee591ca7..1bad12e6 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -25,7 +25,7 @@ func TestPrefixValueKey(t *testing.T) { { pfx: rawPrefix(ids.ShortID{}), key: []byte("hello"), - valueKey: append([]byte{keyPrefix}, []byte("/foo/hello")...), + valueKey: append([]byte{keyPrefix}, []byte("/\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/hello")...), }, } for i, tv := range tt { @@ -104,10 +104,16 @@ func TestRange(t *testing.T) { db := memdb.New() defer db.Close() + // Make sure raw prefix can be retrieved + prefix := []byte("foo") + if err := PutPrefixInfo(db, prefix, &PrefixInfo{}); err != nil { + t.Fatal(err) + } + for i := 0; i < 5; i++ { if err := PutPrefixKey( db, - []byte("foo"), + prefix, []byte(fmt.Sprintf("hello%05d", i)), []byte(fmt.Sprintf("bar%05d", i)), ); err != nil { @@ -122,93 +128,93 @@ func TestRange(t *testing.T) { kvs []KeyValue }{ { // prefix exists but the key itself does not exist - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("9"), opts: nil, kvs: nil, }, { // single key - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("hello00000"), opts: nil, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, }, }, { // prefix query - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("hello"), opts: []OpOption{WithPrefix()}, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, - {Key: []byte("foo/hello00003"), Value: []byte("bar00003")}, - {Key: []byte("foo/hello00004"), Value: []byte("bar00004")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00003"), Value: []byte("bar00003")}, + {Key: []byte("hello00004"), Value: []byte("bar00004")}, }, }, { // prefix query - pfx: []byte("foo/"), + pfx: []byte("foo"), key: nil, opts: []OpOption{WithPrefix()}, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, - {Key: []byte("foo/hello00003"), Value: []byte("bar00003")}, - {Key: []byte("foo/hello00004"), Value: []byte("bar00004")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00003"), Value: []byte("bar00003")}, + {Key: []byte("hello00004"), Value: []byte("bar00004")}, }, }, { // prefix query - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("x"), opts: []OpOption{WithPrefix()}, kvs: nil, }, { // range query - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("hello"), opts: []OpOption{WithRangeEnd([]byte("hello00003"))}, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, }, }, { // range query - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("hello00001"), opts: []OpOption{WithRangeEnd([]byte("hello00003"))}, kvs: []KeyValue{ - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, }, }, { // range query - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("hello00003"), opts: []OpOption{WithRangeEnd([]byte("hello00005"))}, kvs: []KeyValue{ - {Key: []byte("foo/hello00003"), Value: []byte("bar00003")}, - {Key: []byte("foo/hello00004"), Value: []byte("bar00004")}, + {Key: []byte("hello00003"), Value: []byte("bar00003")}, + {Key: []byte("hello00004"), Value: []byte("bar00004")}, }, }, { // range query with limit - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("hello00003"), opts: []OpOption{WithRangeEnd([]byte("hello00005")), WithRangeLimit(1)}, kvs: []KeyValue{ - {Key: []byte("foo/hello00003"), Value: []byte("bar00003")}, + {Key: []byte("hello00003"), Value: []byte("bar00003")}, }, }, { // prefix query with limit - pfx: []byte("foo/"), + pfx: []byte("foo"), key: []byte("hello"), opts: []OpOption{WithPrefix(), WithRangeLimit(3)}, kvs: []KeyValue{ - {Key: []byte("foo/hello00000"), Value: []byte("bar00000")}, - {Key: []byte("foo/hello00001"), Value: []byte("bar00001")}, - {Key: []byte("foo/hello00002"), Value: []byte("bar00002")}, + {Key: []byte("hello00000"), Value: []byte("bar00000")}, + {Key: []byte("hello00001"), Value: []byte("bar00001")}, + {Key: []byte("hello00002"), Value: []byte("bar00002")}, }, }, } @@ -221,7 +227,7 @@ func TestRange(t *testing.T) { continue } if !reflect.DeepEqual(kvs, tv.kvs) { - t.Fatalf("#%d: range response expected %d pair(s), got %v pair(s)", i, len(tv.kvs), len(kvs)) + t.Fatalf("#%d: range response expected %v pair(s), got %v pair(s)", i, tv.kvs, kvs) } } } From 5679700e9dd31d83eed9145de8b5038c3db10067 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 20:58:51 -0800 Subject: [PATCH 07/31] everything compiles --- vm/service.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/vm/service.go b/vm/service.go index eef87096..b066ea2d 100644 --- a/vm/service.go +++ b/vm/service.go @@ -171,7 +171,6 @@ type RangeArgs struct { type RangeReply struct { KeyValues []chain.KeyValue `serialize:"true" json:"keyValues"` - Error error `serialize:"true" json:"error"` } func (svc *Service) Range(_ *http.Request, args *RangeArgs, reply *RangeReply) (err error) { @@ -183,7 +182,10 @@ func (svc *Service) Range(_ *http.Request, args *RangeArgs, reply *RangeReply) ( if args.Limit > 0 { opts = append(opts, chain.WithRangeLimit(args.Limit)) } - reply.KeyValues = chain.Range(svc.vm.db, args.Prefix, args.Key, opts...) - reply.Error = nil + kvs, err := chain.Range(svc.vm.db, args.Prefix, args.Key, opts...) + if err != nil { + return err + } + reply.KeyValues = kvs return nil } From a788ec398ac443e13c40f4f2a78bd7cc30abefe4 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 21:03:56 -0800 Subject: [PATCH 08/31] fix integration test --- tests/integration/integration_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/integration_test.go b/tests/integration/integration_test.go index 2ee098f0..ad51e25a 100644 --- a/tests/integration/integration_test.go +++ b/tests/integration/integration_test.go @@ -21,7 +21,6 @@ import ( "github.com/ava-labs/quarkvm/chain" "github.com/ava-labs/quarkvm/client" "github.com/ava-labs/quarkvm/crypto" - "github.com/ava-labs/quarkvm/parser" "github.com/ava-labs/quarkvm/vm" "github.com/fatih/color" ginkgo "github.com/onsi/ginkgo/v2" @@ -288,7 +287,7 @@ var _ = ginkgo.Describe("[ClaimTx]", func() { ginkgo.By("read back from VM with range query", func() { kvs, err := instances[0].cli.Range(pfx, k) gomega.Ω(err).To(gomega.BeNil()) - gomega.Ω(kvs[0].Key).To(gomega.Equal(append(append(pfx, parser.Delimiter), k...))) + gomega.Ω(kvs[0].Key).To(gomega.Equal(k)) gomega.Ω(kvs[0].Value).To(gomega.Equal(v)) }) }) From be531aee70dcc89b630ea3d60789a9f8c2e5bfb1 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 21:10:50 -0800 Subject: [PATCH 09/31] cleanup --- .golangci.yml | 4 +--- chain/claim_tx.go | 3 +++ chain/prefix_info.go | 3 ++- chain/storage.go | 45 +++++++++++++++++++------------------------ chain/storage_test.go | 38 +++++++++++++++++++----------------- 5 files changed, 46 insertions(+), 47 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 96abcf35..1f7d809b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -30,7 +30,6 @@ linters: - exhaustive - exportloopref - forcetypeassert - - gocognit - goconst - gocritic - gofmt @@ -72,6 +71,7 @@ linters: # - exhaustivestruct # - forbidigo # - funlen + # - gocognit # - godot # - goerr113 # - gomnd @@ -84,8 +84,6 @@ linters: # - wsl linters-settings: - gocognit: - min-complexity: 37 gocritic: disabled-checks: - appendAssign diff --git a/chain/claim_tx.go b/chain/claim_tx.go index 995e5301..76751304 100644 --- a/chain/claim_tx.go +++ b/chain/claim_tx.go @@ -35,6 +35,9 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { // Anything previously at the index was previously removed rawPrefix, err := RawPrefix(c.Prefix, blockTime) + if err != nil { + return err + } newInfo := &PrefixInfo{ Owner: c.Sender, RawPrefix: rawPrefix, diff --git a/chain/prefix_info.go b/chain/prefix_info.go index 7e827822..c234761a 100644 --- a/chain/prefix_info.go +++ b/chain/prefix_info.go @@ -4,12 +4,13 @@ package chain import ( + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/quarkvm/crypto" ) type PrefixInfo struct { Owner [crypto.PublicKeySize]byte `serialize:"true" json:"owner"` - RawPrefix rawPrefix `serialize:"true" json:"rawPrefix"` + RawPrefix ids.ShortID `serialize:"true" json:"-"` LastUpdated int64 `serialize:"true" json:"lastUpdated"` Expiry int64 `serialize:"true" json:"expiry"` Keys int64 `serialize:"true" json:"keys"` // decays faster the more keys you have diff --git a/chain/storage.go b/chain/storage.go index 6e4c7ae0..9901ec3f 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -26,24 +26,20 @@ import ( // 0x5/ (prefix pruning queue) // -> [raw prefix] -type rawPrefix ids.ShortID - const ( infoPrefix = 0x0 keyPrefix = 0x1 txPrefix = 0x2 blockPrefix = 0x3 - // TODO: implement queues // prefixExpiryQueue = 0x4 // prefixPruningQueue = 0x5 + + shortIDLen = 20 ) var lastAccepted = []byte("last_accepted") -// TODO: move to right spot -var prefixMissing = errors.New("prefix missing") - func PrefixInfoKey(prefix []byte) (k []byte) { k = make([]byte, 2+len(prefix)) k[0] = infoPrefix @@ -53,13 +49,13 @@ func PrefixInfoKey(prefix []byte) (k []byte) { } // Assumes [prefix] and [key] do not contain delimiter -func PrefixValueKey(prefix rawPrefix, key []byte) (k []byte) { - k = make([]byte, 2+len(prefix)+1+len(key)) +func PrefixValueKey(rprefix ids.ShortID, key []byte) (k []byte) { + k = make([]byte, 2+shortIDLen+1+len(key)) k[0] = keyPrefix k[1] = parser.Delimiter - copy(k[2:], prefix[:]) - k[2+len(prefix)] = parser.Delimiter - copy(k[2+len(prefix)+1:], key) + copy(k[2:], rprefix[:]) + k[2+shortIDLen] = parser.Delimiter + copy(k[2+shortIDLen+1:], key) return k } @@ -79,18 +75,17 @@ func PrefixBlockKey(blockID ids.ID) (k []byte) { return k } -func RawPrefix(prefix []byte, blockTime int64) (rawPrefix, error) { +func RawPrefix(prefix []byte, blockTime int64) (ids.ShortID, error) { prefixLen := len(prefix) - raw := make([]byte, prefixLen+1+binary.MaxVarintLen64) - copy(raw, prefix) - raw[prefixLen] = parser.Delimiter - binary.PutVarint(raw[prefixLen+1:], blockTime) - rp, err := ids.ToShortID(raw) + r := make([]byte, prefixLen+1+binary.MaxVarintLen64) + copy(r, prefix) + r[prefixLen] = parser.Delimiter + binary.PutVarint(r[prefixLen+1:], blockTime) + rprefix, err := ids.ToShortID(r) if err != nil { - // TODO: clean up casting - return rawPrefix(ids.ShortID{}), err + return ids.ShortID{}, err } - return rawPrefix(rp), nil + return rprefix, nil } func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool, error) { @@ -186,7 +181,7 @@ func PutPrefixKey(db database.Database, prefix []byte, key []byte, value []byte) return err } if !exists { - return prefixMissing + return ErrPrefixMissing } k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Put(k, value) @@ -198,14 +193,14 @@ func DeletePrefixKey(db database.Database, prefix []byte, key []byte) error { return err } if !exists { - return prefixMissing + return ErrPrefixMissing } k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Delete(k) } -func DeleteAllPrefixKeys(db database.Database, prefix rawPrefix) error { - return database.ClearPrefix(db, db, PrefixValueKey(prefix, nil)) +func DeleteAllPrefixKeys(db database.Database, rprefix ids.ShortID) error { + return database.ClearPrefix(db, db, PrefixValueKey(rprefix, nil)) } func SetTransaction(db database.KeyValueWriter, tx *Transaction) error { @@ -234,7 +229,7 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k return nil, err } if !exists { - return nil, prefixMissing + return nil, ErrPrefixMissing } ret := &Op{key: PrefixValueKey(prefixInfo.RawPrefix, key)} ret.applyOpts(opts) diff --git a/chain/storage_test.go b/chain/storage_test.go index 1bad12e6..1f6f810f 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -18,18 +18,18 @@ func TestPrefixValueKey(t *testing.T) { t.Parallel() tt := []struct { - pfx rawPrefix + rpfx ids.ShortID key []byte valueKey []byte }{ { - pfx: rawPrefix(ids.ShortID{}), + rpfx: ids.ShortID{0x1}, key: []byte("hello"), - valueKey: append([]byte{keyPrefix}, []byte("/\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/hello")...), + valueKey: append([]byte{keyPrefix}, []byte("/\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/hello")...), }, } for i, tv := range tt { - vv := PrefixValueKey(tv.pfx, tv.key) + vv := PrefixValueKey(tv.rpfx, tv.key) if !bytes.Equal(tv.valueKey, vv) { t.Fatalf("#%d: value expected %q, got %q", i, tv.valueKey, vv) } @@ -104,16 +104,18 @@ func TestRange(t *testing.T) { db := memdb.New() defer db.Close() - // Make sure raw prefix can be retrieved - prefix := []byte("foo") - if err := PutPrefixInfo(db, prefix, &PrefixInfo{}); err != nil { + // Persist PrefixInfo so keys can be stored under rprefix + pfx := []byte("foo") + if err := PutPrefixInfo(db, pfx, &PrefixInfo{ + RawPrefix: ids.ShortID{0x1}, + }); err != nil { t.Fatal(err) } for i := 0; i < 5; i++ { if err := PutPrefixKey( db, - prefix, + pfx, []byte(fmt.Sprintf("hello%05d", i)), []byte(fmt.Sprintf("bar%05d", i)), ); err != nil { @@ -128,13 +130,13 @@ func TestRange(t *testing.T) { kvs []KeyValue }{ { // prefix exists but the key itself does not exist - pfx: []byte("foo"), + pfx: pfx, key: []byte("9"), opts: nil, kvs: nil, }, { // single key - pfx: []byte("foo"), + pfx: pfx, key: []byte("hello00000"), opts: nil, kvs: []KeyValue{ @@ -142,7 +144,7 @@ func TestRange(t *testing.T) { }, }, { // prefix query - pfx: []byte("foo"), + pfx: pfx, key: []byte("hello"), opts: []OpOption{WithPrefix()}, kvs: []KeyValue{ @@ -154,7 +156,7 @@ func TestRange(t *testing.T) { }, }, { // prefix query - pfx: []byte("foo"), + pfx: pfx, key: nil, opts: []OpOption{WithPrefix()}, kvs: []KeyValue{ @@ -166,13 +168,13 @@ func TestRange(t *testing.T) { }, }, { // prefix query - pfx: []byte("foo"), + pfx: pfx, key: []byte("x"), opts: []OpOption{WithPrefix()}, kvs: nil, }, { // range query - pfx: []byte("foo"), + pfx: pfx, key: []byte("hello"), opts: []OpOption{WithRangeEnd([]byte("hello00003"))}, kvs: []KeyValue{ @@ -182,7 +184,7 @@ func TestRange(t *testing.T) { }, }, { // range query - pfx: []byte("foo"), + pfx: pfx, key: []byte("hello00001"), opts: []OpOption{WithRangeEnd([]byte("hello00003"))}, kvs: []KeyValue{ @@ -191,7 +193,7 @@ func TestRange(t *testing.T) { }, }, { // range query - pfx: []byte("foo"), + pfx: pfx, key: []byte("hello00003"), opts: []OpOption{WithRangeEnd([]byte("hello00005"))}, kvs: []KeyValue{ @@ -200,7 +202,7 @@ func TestRange(t *testing.T) { }, }, { // range query with limit - pfx: []byte("foo"), + pfx: pfx, key: []byte("hello00003"), opts: []OpOption{WithRangeEnd([]byte("hello00005")), WithRangeLimit(1)}, kvs: []KeyValue{ @@ -208,7 +210,7 @@ func TestRange(t *testing.T) { }, }, { // prefix query with limit - pfx: []byte("foo"), + pfx: pfx, key: []byte("hello"), opts: []OpOption{WithPrefix(), WithRangeLimit(3)}, kvs: []KeyValue{ From 9a54fbee1a95d1cfbff24631447fbb9c7cbb9040 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 21:24:37 -0800 Subject: [PATCH 10/31] fix hash issue --- chain/lifeline_tx_test.go | 2 +- chain/storage.go | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/chain/lifeline_tx_test.go b/chain/lifeline_tx_test.go index a211c414..abfe4b3f 100644 --- a/chain/lifeline_tx_test.go +++ b/chain/lifeline_tx_test.go @@ -38,7 +38,7 @@ func TestLifelineTx(t *testing.T) { blockTime: 1, err: nil, }, - { // successful lifeline when prefix info is missing + { // successful lifeline when prefix info is not missing utx: &LifelineTx{BaseTx: &BaseTx{Sender: pub.Bytes(), Prefix: []byte("foo")}}, blockTime: 1, err: nil, diff --git a/chain/storage.go b/chain/storage.go index 9901ec3f..f19826a9 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/quarkvm/parser" ) @@ -81,7 +82,8 @@ func RawPrefix(prefix []byte, blockTime int64) (ids.ShortID, error) { copy(r, prefix) r[prefixLen] = parser.Delimiter binary.PutVarint(r[prefixLen+1:], blockTime) - rprefix, err := ids.ToShortID(r) + h := hashing.ComputeHash160(r) + rprefix, err := ids.ToShortID(h) if err != nil { return ids.ShortID{}, err } From 25006bf2b299a1d9498a3c18c59adfe7b531a33b Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 21:36:15 -0800 Subject: [PATCH 11/31] automatically generate raw prefix in storage --- chain/claim_tx.go | 8 ++------ chain/prefix_info.go | 4 +++- chain/storage.go | 10 +++++++++- tests/e2e/e2e_test.go | 3 +-- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/chain/claim_tx.go b/chain/claim_tx.go index 76751304..65d06375 100644 --- a/chain/claim_tx.go +++ b/chain/claim_tx.go @@ -33,14 +33,10 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { return ErrPrefixNotExpired } - // Anything previously at the index was previously removed - rawPrefix, err := RawPrefix(c.Prefix, blockTime) - if err != nil { - return err - } + // Anything previously at the prefix was previously removed... newInfo := &PrefixInfo{ Owner: c.Sender, - RawPrefix: rawPrefix, + Created: blockTime, LastUpdated: blockTime, Expiry: blockTime + expiryTime, Keys: 1, diff --git a/chain/prefix_info.go b/chain/prefix_info.go index c234761a..7ee5983c 100644 --- a/chain/prefix_info.go +++ b/chain/prefix_info.go @@ -10,8 +10,10 @@ import ( type PrefixInfo struct { Owner [crypto.PublicKeySize]byte `serialize:"true" json:"owner"` - RawPrefix ids.ShortID `serialize:"true" json:"-"` + Created int64 `serialize:"true" json:"created"` LastUpdated int64 `serialize:"true" json:"lastUpdated"` Expiry int64 `serialize:"true" json:"expiry"` Keys int64 `serialize:"true" json:"keys"` // decays faster the more keys you have + + RawPrefix ids.ShortID `serialize:"true" json:"rawPrefix"` } diff --git a/chain/storage.go b/chain/storage.go index f19826a9..55ef981a 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -169,6 +169,14 @@ func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, } func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) error { + if i.RawPrefix == (ids.ShortID{}) { + rprefix, err := RawPrefix(prefix, i.Created) + if err != nil { + return err + } + i.RawPrefix = rprefix + } + k := PrefixInfoKey(prefix) b, err := Marshal(i) if err != nil { @@ -255,7 +263,7 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k } curKey := cursor.Key() - formattedKey := curKey[2+len(prefixInfo.RawPrefix)+1:] + formattedKey := curKey[2+shortIDLen+1:] comp := bytes.Compare(startKey, curKey) if comp == 0 { // startKey == curKey diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 7891d131..107ecbb1 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/quarkvm/chain" "github.com/ava-labs/quarkvm/client" "github.com/ava-labs/quarkvm/crypto" - "github.com/ava-labs/quarkvm/parser" "github.com/ava-labs/quarkvm/tests" "github.com/fatih/color" ginkgo "github.com/onsi/ginkgo/v2" @@ -199,7 +198,7 @@ var _ = ginkgo.Describe("[Claim/SetTx]", func() { color.Blue("checking SetTx with Range on %q", inst.uri) kvs, err := inst.cli.Range(pfx, k) gomega.Ω(err).To(gomega.BeNil()) - gomega.Ω(kvs[0].Key).To(gomega.Equal(append(append(pfx, parser.Delimiter), k...))) + gomega.Ω(kvs[0].Key).To(gomega.Equal(k)) gomega.Ω(kvs[0].Value).To(gomega.Equal(v)) } }) From 4b78fa15d31135e076ae99d8ca308e6b107b7378 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 21:37:29 -0800 Subject: [PATCH 12/31] revert auto hash generation --- chain/claim_tx.go | 5 +++++ chain/storage.go | 8 -------- chain/storage_test.go | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/chain/claim_tx.go b/chain/claim_tx.go index 65d06375..305a8eaa 100644 --- a/chain/claim_tx.go +++ b/chain/claim_tx.go @@ -34,12 +34,17 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { } // Anything previously at the prefix was previously removed... + rprefix, err := RawPrefix(c.Prefix, blockTime) + if err != nil { + return err + } newInfo := &PrefixInfo{ Owner: c.Sender, Created: blockTime, LastUpdated: blockTime, Expiry: blockTime + expiryTime, Keys: 1, + RawPrefix: rprefix, } if err := PutPrefixInfo(db, c.Prefix, newInfo); err != nil { return err diff --git a/chain/storage.go b/chain/storage.go index 55ef981a..ecbee7d8 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -169,14 +169,6 @@ func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, } func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) error { - if i.RawPrefix == (ids.ShortID{}) { - rprefix, err := RawPrefix(prefix, i.Created) - if err != nil { - return err - } - i.RawPrefix = rprefix - } - k := PrefixInfoKey(prefix) b, err := Marshal(i) if err != nil { diff --git a/chain/storage_test.go b/chain/storage_test.go index 1f6f810f..3ac7975f 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -25,7 +25,7 @@ func TestPrefixValueKey(t *testing.T) { { rpfx: ids.ShortID{0x1}, key: []byte("hello"), - valueKey: append([]byte{keyPrefix}, []byte("/\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/hello")...), + valueKey: append([]byte{keyPrefix}, []byte("/\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/hello")...), //nolint:lll }, } for i, tv := range tt { From 84fc802a493fe20b025357c2b37f5e4719cf8d1e Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 22:33:04 -0800 Subject: [PATCH 13/31] first pass on expiry keys --- chain/claim_tx.go | 7 +- chain/lifeline_tx.go | 3 +- chain/set_tx.go | 3 +- chain/storage.go | 172 ++++++++++++++++++++++++++++++++++-------- chain/storage_test.go | 2 +- 5 files changed, 145 insertions(+), 42 deletions(-) diff --git a/chain/claim_tx.go b/chain/claim_tx.go index 305a8eaa..ef952819 100644 --- a/chain/claim_tx.go +++ b/chain/claim_tx.go @@ -34,19 +34,14 @@ func (c *ClaimTx) Execute(db database.Database, blockTime int64) error { } // Anything previously at the prefix was previously removed... - rprefix, err := RawPrefix(c.Prefix, blockTime) - if err != nil { - return err - } newInfo := &PrefixInfo{ Owner: c.Sender, Created: blockTime, LastUpdated: blockTime, Expiry: blockTime + expiryTime, Keys: 1, - RawPrefix: rprefix, } - if err := PutPrefixInfo(db, c.Prefix, newInfo); err != nil { + if err := PutPrefixInfo(db, c.Prefix, newInfo, -1); err != nil { return err } return nil diff --git a/chain/lifeline_tx.go b/chain/lifeline_tx.go index 0def7b45..1cf5af00 100644 --- a/chain/lifeline_tx.go +++ b/chain/lifeline_tx.go @@ -23,6 +23,7 @@ func (l *LifelineTx) Execute(db database.Database, blockTime int64) error { return ErrPrefixMissing } // If you are "in debt", lifeline only adds but doesn't reset to new + lastExpiry := i.Expiry i.Expiry += expiryTime / i.Keys - return PutPrefixInfo(db, l.Prefix, i) + return PutPrefixInfo(db, l.Prefix, i, lastExpiry) } diff --git a/chain/set_tx.go b/chain/set_tx.go index 17814b5b..f6666056 100644 --- a/chain/set_tx.go +++ b/chain/set_tx.go @@ -80,6 +80,7 @@ func (s *SetTx) updatePrefix(db database.Database, blockTime int64, i *PrefixInf } newTimeRemaining := timeRemaining / i.Keys i.LastUpdated = blockTime + lastExpiry := i.Expiry i.Expiry = blockTime + newTimeRemaining - return PutPrefixInfo(db, s.Prefix, i) + return PutPrefixInfo(db, s.Prefix, i, lastExpiry) } diff --git a/chain/storage.go b/chain/storage.go index ecbee7d8..d1ba4ec4 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -15,48 +15,36 @@ import ( ) // TODO: cleanup mapping diagram -// 0x0/ (singleton prefix info) +// 0x0/ (block hashes) +// 0x1/ (tx hashes) +// 0x2/ (singleton prefix info) // -> [prefix]:[prefix info/raw prefix] -// 0x1/ (prefix keys) +// 0x3/ (prefix keys) // -> [raw prefix] // -> [key] -// 0x2/ (tx hashes) -// 0x3/ (block hashes) // 0x4/ (prefix expiry queue) // -> [raw prefix] // 0x5/ (prefix pruning queue) // -> [raw prefix] const ( - infoPrefix = 0x0 - keyPrefix = 0x1 - txPrefix = 0x2 - blockPrefix = 0x3 - // TODO: implement queues - // prefixExpiryQueue = 0x4 - // prefixPruningQueue = 0x5 + blockPrefix = 0x0 + txPrefix = 0x1 + infoPrefix = 0x2 + keyPrefix = 0x3 + expiryPrefix = 0x4 + pruningPrefix = 0x5 shortIDLen = 20 ) var lastAccepted = []byte("last_accepted") -func PrefixInfoKey(prefix []byte) (k []byte) { - k = make([]byte, 2+len(prefix)) - k[0] = infoPrefix - k[1] = parser.Delimiter - copy(k[2:], prefix) - return k -} - -// Assumes [prefix] and [key] do not contain delimiter -func PrefixValueKey(rprefix ids.ShortID, key []byte) (k []byte) { - k = make([]byte, 2+shortIDLen+1+len(key)) - k[0] = keyPrefix +func PrefixBlockKey(blockID ids.ID) (k []byte) { + k = make([]byte, 2+len(blockID)) + k[0] = blockPrefix k[1] = parser.Delimiter - copy(k[2:], rprefix[:]) - k[2+shortIDLen] = parser.Delimiter - copy(k[2+shortIDLen+1:], key) + copy(k[2:], blockID[:]) return k } @@ -68,11 +56,11 @@ func PrefixTxKey(txID ids.ID) (k []byte) { return k } -func PrefixBlockKey(blockID ids.ID) (k []byte) { - k = make([]byte, 2+len(blockID)) - k[0] = blockPrefix +func PrefixInfoKey(prefix []byte) (k []byte) { + k = make([]byte, 2+len(prefix)) + k[0] = infoPrefix k[1] = parser.Delimiter - copy(k[2:], blockID[:]) + copy(k[2:], prefix) return k } @@ -90,6 +78,44 @@ func RawPrefix(prefix []byte, blockTime int64) (ids.ShortID, error) { return rprefix, nil } +// Assumes [prefix] and [key] do not contain delimiter +func PrefixValueKey(rprefix ids.ShortID, key []byte) (k []byte) { + k = make([]byte, 2+shortIDLen+1+len(key)) + k[0] = keyPrefix + k[1] = parser.Delimiter + copy(k[2:], rprefix[:]) + k[2+shortIDLen] = parser.Delimiter + copy(k[2+shortIDLen+1:], key) + return k +} + +func specificTimeKey(p byte, rprefix ids.ShortID, t int64) (k []byte) { + k = make([]byte, 2+binary.MaxVarintLen64+1+shortIDLen) + k[0] = p + k[1] = parser.Delimiter + binary.PutVarint(k[2:], t) + k[2+binary.MaxVarintLen64] = parser.Delimiter + copy(k[2+binary.MaxVarintLen64+1:], rprefix[:]) + return k +} + +func RangeTimeKey(p byte, t int64) (k []byte) { + k = make([]byte, 2+binary.MaxVarintLen64+1) + k[0] = p + k[1] = parser.Delimiter + binary.PutVarint(k[2:], t) + k[2+binary.MaxVarintLen64] = parser.Delimiter + return k +} + +func PrefixExpiryKey(rprefix ids.ShortID, expiry int64) (k []byte) { + return specificTimeKey(expiryPrefix, rprefix, expiry) +} + +func PrefixPruningKey(rprefix ids.ShortID, expired int64) (k []byte) { + return specificTimeKey(pruningPrefix, rprefix, expired) +} + func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool, error) { // TODO: add caching (will need some expiry when keys cleared) k := PrefixInfoKey(prefix) @@ -149,6 +175,43 @@ func GetBlock(db database.KeyValueReader, bid ids.ID) ([]byte, error) { return db.Get(PrefixBlockKey(bid)) } +func GetExpired(db database.Database, parent int64, current int64) (pfxs [][]byte, err error) { + pfxs = [][]byte{} + startKey := RangeTimeKey(expiryPrefix, parent) + endKey := RangeTimeKey(expiryPrefix, current) + cursor := db.NewIteratorWithStart(startKey) + for cursor.Next() { + curKey := cursor.Key() + if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search + continue + } + if bytes.Compare(curKey, endKey) > 0 { // curKey > endKey; end search + break + } + pfxs = append(pfxs, cursor.Value()) + } + return pfxs, nil +} + +func GetNextPrunable(db database.Database) (rpfx ids.ShortID, t int64, err error) { + startKey := RangeTimeKey(expiryPrefix, 0) + cursor := db.NewIteratorWithStart(startKey) + for cursor.Next() { + curKey := cursor.Key() + if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search + continue + } + // Extract prunable info from key + expired, err := binary.ReadVarint(bytes.NewReader(curKey[2 : 2+binary.MaxVarintLen64])) + rpfx, err = ids.ToShortID(curKey[2+binary.MaxVarintLen64+1:]) + if err != nil { + return ids.ShortID{}, -1, err + } + return rpfx, expired, nil + } + return ids.ShortID{}, -1, nil +} + // DB func HasPrefix(db database.KeyValueReader, prefix []byte) (bool, error) { k := PrefixInfoKey(prefix) @@ -168,8 +231,26 @@ func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, return db.Has(k) } -func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) error { - k := PrefixInfoKey(prefix) +func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo, lastExpiry int64) error { + if len(i.RawPrefix) == 0 { + rprefix, err := RawPrefix(prefix, i.Created) + if err != nil { + return err + } + i.RawPrefix = rprefix + } + if lastExpiry >= 0 { + k := PrefixExpiryKey(i.RawPrefix, lastExpiry) + if err := db.Delete(k); err != nil { + return err + } + } + k := PrefixExpiryKey(i.RawPrefix, i.Expiry) + if err := db.Put(k, prefix); err != nil { + return err + } + + k = PrefixInfoKey(prefix) b, err := Marshal(i) if err != nil { return err @@ -177,6 +258,27 @@ func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo) err return db.Put(k, b) } +func ExpirePrefix(db database.Database, prefix []byte) error { + prefixInfo, exists, err := GetPrefixInfo(db, prefix) + if err != nil { + return err + } + if !exists { + return ErrPrefixMissing + } + + k := PrefixInfoKey(prefix) + if err := db.Delete(k); err != nil { + return err + } + k = PrefixExpiryKey(prefixInfo.RawPrefix, prefixInfo.Expiry) + if err := db.Delete(k); err != nil { + return err + } + k = PrefixPruningKey(prefixInfo.RawPrefix, prefixInfo.Expiry) + return db.Put(k, nil) +} + func PutPrefixKey(db database.Database, prefix []byte, key []byte, value []byte) error { prefixInfo, exists, err := GetPrefixInfo(db, prefix) if err != nil { @@ -201,7 +303,11 @@ func DeletePrefixKey(db database.Database, prefix []byte, key []byte) error { return db.Delete(k) } -func DeleteAllPrefixKeys(db database.Database, rprefix ids.ShortID) error { +func PrunePrefix(db database.Database, rprefix ids.ShortID, expired int64) error { + k := PrefixPruningKey(rprefix, expired) + if err := db.Delete(k); err != nil { + return err + } return database.ClearPrefix(db, db, PrefixValueKey(rprefix, nil)) } diff --git a/chain/storage_test.go b/chain/storage_test.go index 3ac7975f..456fdb37 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -108,7 +108,7 @@ func TestRange(t *testing.T) { pfx := []byte("foo") if err := PutPrefixInfo(db, pfx, &PrefixInfo{ RawPrefix: ids.ShortID{0x1}, - }); err != nil { + }, -1); err != nil { t.Fatal(err) } From 0e345377947b9866d7d5dea4b39817887158b0b6 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 23:20:04 -0800 Subject: [PATCH 14/31] jank pruning --- chain/block.go | 24 +++++++++++++-- chain/storage.go | 29 ++++++++---------- vm/chain_vm.go | 2 +- vm/pruner.go | 53 +++++++++++++++++++++++++++++++++ vm/vm.go | 77 ++++++++++++++++++++++++++++-------------------- 5 files changed, 134 insertions(+), 51 deletions(-) create mode 100644 vm/pruner.go diff --git a/chain/block.go b/chain/block.go index af73bed2..463b9958 100644 --- a/chain/block.go +++ b/chain/block.go @@ -150,7 +150,19 @@ func (b *StatelessBlock) verify() (*StatelessBlock, *versiondb.Database, error) return nil, nil, err } onAcceptDB := versiondb.New(parentState) - // TODO: first remove all expired prefixes and mark for pruning + + // Remove all expired prefixes + pfxs, err := GetExpired(onAcceptDB, parent.Tmstmp, b.Tmstmp) + if err != nil { + return nil, nil, err + } + for _, pfx := range pfxs { + if err := ExpirePrefix(onAcceptDB, pfx); err != nil { + return nil, nil, err + } + } + + // Process new transactions var surplusDifficulty uint64 for _, tx := range b.Txs { if err := tx.Execute(onAcceptDB, b.Tmstmp, context); err != nil { @@ -196,7 +208,6 @@ func (b *StatelessBlock) Accept() error { } b.st = choices.Accepted b.vm.Accepted(b) - // TODO: clear expired state (using index from timestamp to prefix) return nil } @@ -222,6 +233,15 @@ func (b *StatelessBlock) Height() uint64 { return b.StatefulBlock.Hght } // implements "snowman.Block" func (b *StatelessBlock) Timestamp() time.Time { return b.t } +func (b *StatelessBlock) SetChildrenDB(db database.Database) error { + for _, child := range b.children { + if err := child.onAcceptDB.SetDatabase(db); err != nil { + return err + } + } + return nil +} + func (b *StatelessBlock) onAccept() (database.Database, error) { if b.st == choices.Accepted || b.Hght == 0 /* genesis */ { return b.vm.State(), nil diff --git a/chain/storage.go b/chain/storage.go index d1ba4ec4..3f6f3b5c 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -193,23 +193,28 @@ func GetExpired(db database.Database, parent int64, current int64) (pfxs [][]byt return pfxs, nil } -func GetNextPrunable(db database.Database) (rpfx ids.ShortID, t int64, err error) { +func PruneNext(db database.Database, limit int) (err error) { startKey := RangeTimeKey(expiryPrefix, 0) cursor := db.NewIteratorWithStart(startKey) - for cursor.Next() { + removals := 0 + for cursor.Next() && removals < limit { curKey := cursor.Key() if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search continue } - // Extract prunable info from key - expired, err := binary.ReadVarint(bytes.NewReader(curKey[2 : 2+binary.MaxVarintLen64])) - rpfx, err = ids.ToShortID(curKey[2+binary.MaxVarintLen64+1:]) + rpfx, err := ids.ToShortID(curKey[2+binary.MaxVarintLen64+1:]) if err != nil { - return ids.ShortID{}, -1, err + return err + } + if err := db.Delete(curKey); err != nil { + return err + } + if err := database.ClearPrefix(db, db, PrefixValueKey(rpfx, nil)); err != nil { + return err } - return rpfx, expired, nil + removals++ } - return ids.ShortID{}, -1, nil + return nil } // DB @@ -303,14 +308,6 @@ func DeletePrefixKey(db database.Database, prefix []byte, key []byte) error { return db.Delete(k) } -func PrunePrefix(db database.Database, rprefix ids.ShortID, expired int64) error { - k := PrefixPruningKey(rprefix, expired) - if err := db.Delete(k); err != nil { - return err - } - return database.ClearPrefix(db, db, PrefixValueKey(rprefix, nil)) -} - func SetTransaction(db database.KeyValueWriter, tx *Transaction) error { k := PrefixTxKey(tx.ID()) b, err := Marshal(tx) diff --git a/vm/chain_vm.go b/vm/chain_vm.go index dba72e8d..838f00b3 100644 --- a/vm/chain_vm.go +++ b/vm/chain_vm.go @@ -38,7 +38,7 @@ func (vm *VM) Rejected(b *chain.StatelessBlock) { func (vm *VM) Accepted(b *chain.StatelessBlock) { vm.blocks.Put(b.ID(), b) delete(vm.verifiedBlocks, b.ID()) - vm.lastAccepted = b.ID() + vm.lastAccepted = b log.Debug("accepted block", "id", b.ID()) } diff --git a/vm/pruner.go b/vm/pruner.go new file mode 100644 index 00000000..8924e1fd --- /dev/null +++ b/vm/pruner.go @@ -0,0 +1,53 @@ +// Copyright (C) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package vm + +import ( + "time" + + "github.com/ava-labs/avalanchego/database/versiondb" + log "github.com/inconshreveable/log15" + + "github.com/ava-labs/quarkvm/chain" +) + +const ( + pruneLimit = 128 +) + +func (vm *VM) prune() { + log.Debug("starting prune loops") + defer close(vm.donecPrune) + + // should retry less aggressively + t := time.NewTimer(vm.pruneInterval) + defer t.Stop() + + for { + select { + case <-t.C: + case <-vm.stopc: + return + } + t.Reset(vm.pruneInterval) + + // TODO: be MUCH more careful about how state is accessed here (likely need + // locking on DB) + vdb := versiondb.New(vm.db) + // TODO: need to close after each loop iteration? + if err := chain.PruneNext(vdb, pruneLimit); err != nil { + log.Warn("unable to prune next range", "error", err) + vdb.Abort() + continue + } + if err := vdb.Commit(); err != nil { + log.Warn("unable to commit pruning work", "error", err) + vdb.Abort() + continue + } + if err := vm.lastAccepted.SetChildrenDB(vm.db); err != nil { + log.Error("unable to update child databases of last accepted block", "error", err) + } + } +} diff --git a/vm/vm.go b/vm/vm.go index 93ed98df..f8092c33 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -33,6 +33,10 @@ const ( defaultWorkInterval = 100 * time.Millisecond defaultRegossipInterval = time.Second + defaultPruneInterval = time.Minute + + defaultMinimumDifficulty = 1 + defaultMinBlockCost = 1 mempoolSize = 1024 ) @@ -48,9 +52,11 @@ type VM struct { workInterval time.Duration regossipInterval time.Duration - mempool *mempool.Mempool - appSender common.AppSender - gossipedTxs *cache.LRU + pruneInterval time.Duration + + mempool *mempool.Mempool + appSender common.AppSender + gossipedTxs *cache.LRU // cache block objects to optimize "getBlock" // only put when a block is accepted // key: block ID, value: *chain.StatelessBlock @@ -66,7 +72,7 @@ type VM struct { blockBuilder chan struct{} preferred ids.ID - lastAccepted ids.ID + lastAccepted *chain.StatelessBlock minDifficulty uint64 minBlockCost uint64 @@ -74,6 +80,7 @@ type VM struct { stopc chan struct{} donecRun chan struct{} donecRegossip chan struct{} + donecPrune chan struct{} } const ( @@ -97,9 +104,13 @@ func (vm *VM) Initialize( vm.ctx = ctx vm.db = dbManager.Current().Database - // TODO: make this configurable via genesis + // TODO: make this configurable via config vm.workInterval = defaultWorkInterval vm.regossipInterval = defaultRegossipInterval + vm.pruneInterval = defaultPruneInterval + + // TODO: make this configurable via genesis + vm.minDifficulty, vm.minBlockCost = defaultMinimumDifficulty, defaultMinBlockCost vm.mempool = mempool.New(mempoolSize) vm.appSender = appSender @@ -118,46 +129,48 @@ func (vm *VM) Initialize( return err } if has { - b, err := chain.GetLastAccepted(vm.db) + blkID, err := chain.GetLastAccepted(vm.db) if err != nil { log.Error("could not get last accepted", "err", err) return err } - vm.preferred = b - vm.lastAccepted = b - log.Info("initialized quarkvm from last accepted", "block", b) - return nil - } + blk, err := vm.getBlock(blkID) + if err != nil { + log.Error("could not load last accepted", "err", err) + return err + } - // Load from genesis - genesisBlk, err := chain.ParseBlock( - genesisBytes, - choices.Accepted, - vm, - ) - if err != nil { - log.Error("unable to init genesis block", "err", err) - return err - } - if err := chain.SetLastAccepted(vm.db, genesisBlk); err != nil { - log.Error("could not set genesis as last accepted", "err", err) - return err + vm.preferred, vm.lastAccepted = blkID, blk + log.Info("initialized quarkvm from last accepted", "block", blkID) + return nil + } else { + genesisBlk, err := chain.ParseBlock( + genesisBytes, + choices.Accepted, + vm, + ) + if err != nil { + log.Error("unable to init genesis block", "err", err) + return err + } + if err := chain.SetLastAccepted(vm.db, genesisBlk); err != nil { + log.Error("could not set genesis as last accepted", "err", err) + return err + } + gBlkID := genesisBlk.ID() + vm.preferred, vm.lastAccepted = gBlkID, genesisBlk + log.Info("initialized quarkvm from genesis", "block", gBlkID) } - gBlkID := genesisBlk.ID() - vm.preferred, vm.lastAccepted = gBlkID, gBlkID - vm.minDifficulty, vm.minBlockCost = genesisBlk.Difficulty, genesisBlk.Cost - log.Info("initialized quarkvm from genesis", "block", gBlkID) vm.stopc = make(chan struct{}) vm.donecRun = make(chan struct{}) vm.donecRegossip = make(chan struct{}) + vm.donecPrune = make(chan struct{}) go vm.run() go vm.regossip() - // TODO: start async pruning loop (make sure has lock to prevent committing - // to vm.State while running)...also make sure children database are set - // after updating + go vm.prune() return nil } @@ -362,5 +375,5 @@ func (vm *VM) SetPreference(id ids.ID) error { // "LastAccepted" implements "snowmanblock.ChainVM" // replaces "core.SnowmanVM.LastAccepted" func (vm *VM) LastAccepted() (ids.ID, error) { - return vm.lastAccepted, nil + return vm.lastAccepted.ID(), nil } From efbb435445795540a3c95c41e49399de170eaa0a Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 23:31:17 -0800 Subject: [PATCH 15/31] expiring tests --- chain/block.go | 8 +------ chain/claim_tx_test.go | 46 ++++++++++++++++++++++------------------- chain/storage.go | 47 ++++++++++++++++++++---------------------- 3 files changed, 48 insertions(+), 53 deletions(-) diff --git a/chain/block.go b/chain/block.go index 463b9958..917e889c 100644 --- a/chain/block.go +++ b/chain/block.go @@ -152,15 +152,9 @@ func (b *StatelessBlock) verify() (*StatelessBlock, *versiondb.Database, error) onAcceptDB := versiondb.New(parentState) // Remove all expired prefixes - pfxs, err := GetExpired(onAcceptDB, parent.Tmstmp, b.Tmstmp) - if err != nil { + if err := ExpireNext(onAcceptDB, parent.Tmstmp, b.Tmstmp); err != nil { return nil, nil, err } - for _, pfx := range pfxs { - if err := ExpirePrefix(onAcceptDB, pfx); err != nil { - return nil, nil, err - } - } // Process new transactions var surplusDifficulty uint64 diff --git a/chain/claim_tx_test.go b/chain/claim_tx_test.go index 4f2fe643..393796ca 100644 --- a/chain/claim_tx_test.go +++ b/chain/claim_tx_test.go @@ -21,11 +21,11 @@ func TestClaimTx(t *testing.T) { } pub := priv.PublicKey() - // priv2, err := crypto.NewPrivateKey() - // if err != nil { - // t.Fatal(err) - // } - // pub2 := priv2.PublicKey() + priv2, err := crypto.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + pub2 := priv2.PublicKey() db := memdb.New() defer db.Close() @@ -50,24 +50,28 @@ func TestClaimTx(t *testing.T) { blockTime: 1, err: ErrPrefixNotExpired, }, - // TODO: restore tests once expiry function exists - // { // successful new claim - // tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub.Bytes(), Prefix: []byte("foo")}}, - // blockTime: 100, - // err: nil, - // }, - // { // successful new claim by different owner - // tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub2.Bytes(), Prefix: []byte("foo")}}, - // blockTime: 150, - // err: nil, - // }, - // { // invalid claim due to expiration by different owner - // tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub2.Bytes(), Prefix: []byte("foo")}}, - // blockTime: 177, - // err: ErrPrefixNotExpired, - // }, + { // successful new claim + tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub.Bytes(), Prefix: []byte("foo")}}, + blockTime: 100, + err: nil, + }, + { // successful new claim by different owner + tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub2.Bytes(), Prefix: []byte("foo")}}, + blockTime: 150, + err: nil, + }, + { // invalid claim due to expiration by different owner + tx: &ClaimTx{BaseTx: &BaseTx{Sender: pub2.Bytes(), Prefix: []byte("foo")}}, + blockTime: 177, + err: ErrPrefixNotExpired, + }, } for i, tv := range tt { + if i > 0 { + if err := ExpireNext(db, tt[i-1].blockTime, tv.blockTime); err != nil { + t.Fatalf("#%d: ExpireNext errored %v", i, err) + } + } err := tv.tx.Execute(db, tv.blockTime) if !errors.Is(err, tv.err) { t.Fatalf("#%d: tx.Execute err expected %v, got %v", i, tv.err, err) diff --git a/chain/storage.go b/chain/storage.go index 3f6f3b5c..063ab349 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -175,8 +175,7 @@ func GetBlock(db database.KeyValueReader, bid ids.ID) ([]byte, error) { return db.Get(PrefixBlockKey(bid)) } -func GetExpired(db database.Database, parent int64, current int64) (pfxs [][]byte, err error) { - pfxs = [][]byte{} +func ExpireNext(db database.Database, parent int64, current int64) (err error) { startKey := RangeTimeKey(expiryPrefix, parent) endKey := RangeTimeKey(expiryPrefix, current) cursor := db.NewIteratorWithStart(startKey) @@ -188,9 +187,28 @@ func GetExpired(db database.Database, parent int64, current int64) (pfxs [][]byt if bytes.Compare(curKey, endKey) > 0 { // curKey > endKey; end search break } - pfxs = append(pfxs, cursor.Value()) + expiry, err := binary.ReadVarint(bytes.NewBuffer(curKey[2 : 2+binary.MaxVarintLen64])) + if err != nil { + return err + } + rpfx, err := ids.ToShortID(curKey[2+binary.MaxVarintLen64+1:]) + if err != nil { + return err + } + k := PrefixInfoKey(cursor.Value()) + if err := db.Delete(k); err != nil { + return err + } + k = PrefixExpiryKey(rpfx, expiry) + if err := db.Delete(k); err != nil { + return err + } + k = PrefixPruningKey(rpfx, expiry) + if err := db.Put(k, nil); err != nil { + return err + } } - return pfxs, nil + return nil } func PruneNext(db database.Database, limit int) (err error) { @@ -263,27 +281,6 @@ func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo, las return db.Put(k, b) } -func ExpirePrefix(db database.Database, prefix []byte) error { - prefixInfo, exists, err := GetPrefixInfo(db, prefix) - if err != nil { - return err - } - if !exists { - return ErrPrefixMissing - } - - k := PrefixInfoKey(prefix) - if err := db.Delete(k); err != nil { - return err - } - k = PrefixExpiryKey(prefixInfo.RawPrefix, prefixInfo.Expiry) - if err := db.Delete(k); err != nil { - return err - } - k = PrefixPruningKey(prefixInfo.RawPrefix, prefixInfo.Expiry) - return db.Put(k, nil) -} - func PutPrefixKey(db database.Database, prefix []byte, key []byte, value []byte) error { prefixInfo, exists, err := GetPrefixInfo(db, prefix) if err != nil { From e98e50c476496f5bdb33af1e193cc8bfd1619e48 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 23:56:34 -0800 Subject: [PATCH 16/31] fix tests --- chain/claim_tx_test.go | 1 + chain/storage.go | 33 ++++++++++++++------------------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/chain/claim_tx_test.go b/chain/claim_tx_test.go index 393796ca..0650988e 100644 --- a/chain/claim_tx_test.go +++ b/chain/claim_tx_test.go @@ -68,6 +68,7 @@ func TestClaimTx(t *testing.T) { } for i, tv := range tt { if i > 0 { + // Expire old prefixes between txs if err := ExpireNext(db, tt[i-1].blockTime, tv.blockTime); err != nil { t.Fatalf("#%d: ExpireNext errored %v", i, err) } diff --git a/chain/storage.go b/chain/storage.go index 063ab349..51e8d5d1 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -66,10 +66,10 @@ func PrefixInfoKey(prefix []byte) (k []byte) { func RawPrefix(prefix []byte, blockTime int64) (ids.ShortID, error) { prefixLen := len(prefix) - r := make([]byte, prefixLen+1+binary.MaxVarintLen64) + r := make([]byte, prefixLen+1+8) copy(r, prefix) r[prefixLen] = parser.Delimiter - binary.PutVarint(r[prefixLen+1:], blockTime) + binary.LittleEndian.PutUint64(r[prefixLen+1:], uint64(blockTime)) h := hashing.ComputeHash160(r) rprefix, err := ids.ToShortID(h) if err != nil { @@ -90,21 +90,21 @@ func PrefixValueKey(rprefix ids.ShortID, key []byte) (k []byte) { } func specificTimeKey(p byte, rprefix ids.ShortID, t int64) (k []byte) { - k = make([]byte, 2+binary.MaxVarintLen64+1+shortIDLen) + k = make([]byte, 2+8+1+shortIDLen) k[0] = p k[1] = parser.Delimiter - binary.PutVarint(k[2:], t) - k[2+binary.MaxVarintLen64] = parser.Delimiter - copy(k[2+binary.MaxVarintLen64+1:], rprefix[:]) + binary.LittleEndian.PutUint64(k[2:], uint64(t)) + k[2+8] = parser.Delimiter + copy(k[2+8+1:], rprefix[:]) return k } func RangeTimeKey(p byte, t int64) (k []byte) { - k = make([]byte, 2+binary.MaxVarintLen64+1) + k = make([]byte, 2+8+1) k[0] = p k[1] = parser.Delimiter - binary.PutVarint(k[2:], t) - k[2+binary.MaxVarintLen64] = parser.Delimiter + binary.LittleEndian.PutUint64(k[2:], uint64(t)) + k[2+8] = parser.Delimiter return k } @@ -187,20 +187,16 @@ func ExpireNext(db database.Database, parent int64, current int64) (err error) { if bytes.Compare(curKey, endKey) > 0 { // curKey > endKey; end search break } - expiry, err := binary.ReadVarint(bytes.NewBuffer(curKey[2 : 2+binary.MaxVarintLen64])) - if err != nil { - return err - } - rpfx, err := ids.ToShortID(curKey[2+binary.MaxVarintLen64+1:]) - if err != nil { + if err := db.Delete(cursor.Key()); err != nil { return err } k := PrefixInfoKey(cursor.Value()) if err := db.Delete(k); err != nil { return err } - k = PrefixExpiryKey(rpfx, expiry) - if err := db.Delete(k); err != nil { + expiry := int64(binary.LittleEndian.Uint64(curKey[2 : 2+8])) + rpfx, err := ids.ToShortID(curKey[2+8+1:]) + if err != nil { return err } k = PrefixPruningKey(rpfx, expiry) @@ -220,7 +216,7 @@ func PruneNext(db database.Database, limit int) (err error) { if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search continue } - rpfx, err := ids.ToShortID(curKey[2+binary.MaxVarintLen64+1:]) + rpfx, err := ids.ToShortID(curKey[2+8+1:]) if err != nil { return err } @@ -272,7 +268,6 @@ func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo, las if err := db.Put(k, prefix); err != nil { return err } - k = PrefixInfoKey(prefix) b, err := Marshal(i) if err != nil { From aeb1821dbea6ebcb69ead4013865cce5834af8ec Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Wed, 5 Jan 2022 23:59:42 -0800 Subject: [PATCH 17/31] add context locking --- vm/pruner.go | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/vm/pruner.go b/vm/pruner.go index 8924e1fd..83ad4b2f 100644 --- a/vm/pruner.go +++ b/vm/pruner.go @@ -16,6 +16,26 @@ const ( pruneLimit = 128 ) +func (vm *VM) pruneCall() { + // Lock to prevent concurrent modification of state + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + vdb := versiondb.New(vm.db) + defer vdb.Abort() + if err := chain.PruneNext(vdb, pruneLimit); err != nil { + log.Warn("unable to prune next range", "error", err) + return + } + if err := vdb.Commit(); err != nil { + log.Warn("unable to commit pruning work", "error", err) + return + } + if err := vm.lastAccepted.SetChildrenDB(vm.db); err != nil { + log.Error("unable to update child databases of last accepted block", "error", err) + } +} + func (vm *VM) prune() { log.Debug("starting prune loops") defer close(vm.donecPrune) @@ -31,23 +51,6 @@ func (vm *VM) prune() { return } t.Reset(vm.pruneInterval) - - // TODO: be MUCH more careful about how state is accessed here (likely need - // locking on DB) - vdb := versiondb.New(vm.db) - // TODO: need to close after each loop iteration? - if err := chain.PruneNext(vdb, pruneLimit); err != nil { - log.Warn("unable to prune next range", "error", err) - vdb.Abort() - continue - } - if err := vdb.Commit(); err != nil { - log.Warn("unable to commit pruning work", "error", err) - vdb.Abort() - continue - } - if err := vm.lastAccepted.SetChildrenDB(vm.db); err != nil { - log.Error("unable to update child databases of last accepted block", "error", err) - } + vm.pruneCall() } } From b8e57b46de5432c8dcc5bdf35d9b134338b7c934 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Thu, 6 Jan 2022 00:02:18 -0800 Subject: [PATCH 18/31] add logging --- chain/storage.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/chain/storage.go b/chain/storage.go index 51e8d5d1..fe740c9a 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -11,6 +11,8 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/hashing" + log "github.com/inconshreveable/log15" + "github.com/ava-labs/quarkvm/parser" ) @@ -190,7 +192,8 @@ func ExpireNext(db database.Database, parent int64, current int64) (err error) { if err := db.Delete(cursor.Key()); err != nil { return err } - k := PrefixInfoKey(cursor.Value()) + pfx := cursor.Value() + k := PrefixInfoKey(pfx) if err := db.Delete(k); err != nil { return err } @@ -203,6 +206,7 @@ func ExpireNext(db database.Database, parent int64, current int64) (err error) { if err := db.Put(k, nil); err != nil { return err } + log.Debug("prefix expired", "prefix", string(pfx)) } return nil } @@ -226,6 +230,7 @@ func PruneNext(db database.Database, limit int) (err error) { if err := database.ClearPrefix(db, db, PrefixValueKey(rpfx, nil)); err != nil { return err } + log.Debug("rprefix pruned", "rprefix", rpfx.Hex()) removals++ } return nil From 14ee27e9e037eb4083cfb6367c875e35237182dd Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Thu, 6 Jan 2022 00:07:00 -0800 Subject: [PATCH 19/31] cleanup db at end of test --- chain/claim_tx_test.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/chain/claim_tx_test.go b/chain/claim_tx_test.go index 0650988e..e290b52e 100644 --- a/chain/claim_tx_test.go +++ b/chain/claim_tx_test.go @@ -91,4 +91,19 @@ func TestClaimTx(t *testing.T) { t.Fatalf("#%d: unexpected owner found (expected pub key %q)", i, string(pub.PublicKey)) } } + + // Cleanup DB after all txs submitted + if err := ExpireNext(db, 0, 1000); err != nil { + t.Fatal(err) + } + if err := PruneNext(db, 100); err != nil { + t.Fatal(err) + } + _, exists, err := GetPrefixInfo(db, []byte("foo")) + if err != nil { + t.Fatalf("failed to get prefix info %v", err) + } + if exists { + t.Fatal("prefix should not exist") + } } From 085be1e396d98f823326cb654c074f294b98e625 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Thu, 6 Jan 2022 00:09:38 -0800 Subject: [PATCH 20/31] fix rprefix gen --- chain/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/storage.go b/chain/storage.go index fe740c9a..ac9d986e 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -256,7 +256,7 @@ func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, } func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo, lastExpiry int64) error { - if len(i.RawPrefix) == 0 { + if i.RawPrefix == (ids.ShortID{}) { rprefix, err := RawPrefix(prefix, i.Created) if err != nil { return err From ada6ee81ad4de8a8f1c2049711ede7cd887140a1 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Thu, 6 Jan 2022 00:11:47 -0800 Subject: [PATCH 21/31] fix init issue --- vm/vm.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vm/vm.go b/vm/vm.go index f8092c33..e5a8f49c 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -128,7 +128,7 @@ func (vm *VM) Initialize( log.Error("could not determine if have last accepted") return err } - if has { + if has { //nolint:nestif blkID, err := chain.GetLastAccepted(vm.db) if err != nil { log.Error("could not get last accepted", "err", err) @@ -143,7 +143,6 @@ func (vm *VM) Initialize( vm.preferred, vm.lastAccepted = blkID, blk log.Info("initialized quarkvm from last accepted", "block", blkID) - return nil } else { genesisBlk, err := chain.ParseBlock( genesisBytes, From dd2169dc55d9f5864c5b3567fb0b662a74bc2d5d Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Thu, 6 Jan 2022 00:40:52 -0800 Subject: [PATCH 22/31] fix pruning correctness --- chain/storage.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/chain/storage.go b/chain/storage.go index ac9d986e..4b7ab8a1 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -7,6 +7,7 @@ import ( "bytes" "encoding/binary" "errors" + "math" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" @@ -212,7 +213,8 @@ func ExpireNext(db database.Database, parent int64, current int64) (err error) { } func PruneNext(db database.Database, limit int) (err error) { - startKey := RangeTimeKey(expiryPrefix, 0) + startKey := RangeTimeKey(pruningPrefix, 0) + endKey := RangeTimeKey(pruningPrefix, math.MaxInt64) cursor := db.NewIteratorWithStart(startKey) removals := 0 for cursor.Next() && removals < limit { @@ -220,6 +222,9 @@ func PruneNext(db database.Database, limit int) (err error) { if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search continue } + if bytes.Compare(curKey, endKey) > 0 { // curKey > endKey; end search + break + } rpfx, err := ids.ToShortID(curKey[2+8+1:]) if err != nil { return err From 81a81e7862abf8fe38653177aacd38b212b72b00 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Thu, 6 Jan 2022 00:45:14 -0800 Subject: [PATCH 23/31] fix difficulty estimate --- vm/helpers.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/vm/helpers.go b/vm/helpers.go index 2da989f9..8622bbf2 100644 --- a/vm/helpers.go +++ b/vm/helpers.go @@ -54,5 +54,13 @@ func (vm *VM) DifficultyEstimate() (uint64, error) { totalBlocks++ return true, nil }) - return totalDifficulty/totalBlocks + 1, err + if err != nil { + return 0, err + } + recommended := totalDifficulty/totalBlocks + 1 + minRequired := vm.minDifficulty + vm.minBlockCost + if recommended < minRequired { + recommended = minRequired + } + return recommended, nil } From 1296705c6e57ba6cfbbc971f9c5d0e1b6cddfab2 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Thu, 6 Jan 2022 00:56:08 -0800 Subject: [PATCH 24/31] working again --- chain/block.go | 1 + vm/vm.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/chain/block.go b/chain/block.go index 917e889c..42004c5f 100644 --- a/chain/block.go +++ b/chain/block.go @@ -157,6 +157,7 @@ func (b *StatelessBlock) verify() (*StatelessBlock, *versiondb.Database, error) } // Process new transactions + log.Debug("build context", "next difficulty", context.NextDifficulty, "next cost", context.NextCost) var surplusDifficulty uint64 for _, tx := range b.Txs { if err := tx.Execute(onAcceptDB, b.Tmstmp, context); err != nil { diff --git a/vm/vm.go b/vm/vm.go index e5a8f49c..47f3073b 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -36,7 +36,7 @@ const ( defaultPruneInterval = time.Minute defaultMinimumDifficulty = 1 - defaultMinBlockCost = 1 + defaultMinBlockCost = 0 mempoolSize = 1024 ) From 2dad05e6197c4c99802101440685690b17cfaf9a Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Thu, 6 Jan 2022 01:10:12 -0800 Subject: [PATCH 25/31] fix underflow --- chain/block.go | 4 +++- vm/chain_vm.go | 5 +++-- vm/helpers.go | 1 + vm/vm.go | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/chain/block.go b/chain/block.go index 42004c5f..9733b908 100644 --- a/chain/block.go +++ b/chain/block.go @@ -167,7 +167,9 @@ func (b *StatelessBlock) verify() (*StatelessBlock, *versiondb.Database, error) surplusDifficulty += tx.Difficulty() - context.NextDifficulty } // Ensure enough work is performed to compensate for block production speed - if surplusDifficulty < b.Difficulty*b.Cost { + requiredSurplus := b.Difficulty * b.Cost + if surplusDifficulty < requiredSurplus { + log.Debug("insufficient block surplus", "found", surplusDifficulty, "required", requiredSurplus) return nil, nil, ErrInsufficientSurplus } return parent, onAcceptDB, nil diff --git a/vm/chain_vm.go b/vm/chain_vm.go index 838f00b3..58ce23f3 100644 --- a/vm/chain_vm.go +++ b/vm/chain_vm.go @@ -63,7 +63,8 @@ func (vm *VM) ExecutionContext(currTime int64, lastBlock *chain.StatelessBlock) nextCost += uint64(chain.BlockTarget - secondsSinceLast) } else { possibleDiff := uint64(secondsSinceLast - chain.BlockTarget) - if possibleDiff < nextCost-vm.minBlockCost { + // TODO: clean this up + if nextCost >= vm.minBlockCost && possibleDiff < nextCost-vm.minBlockCost { nextCost -= possibleDiff } else { nextCost = vm.minBlockCost @@ -76,7 +77,7 @@ func (vm *VM) ExecutionContext(currTime int64, lastBlock *chain.StatelessBlock) nextDifficulty++ } else if recentTxs < chain.TargetTransactions { elapsedWindows := uint64(secondsSinceLast/chain.LookbackWindow) + 1 // account for current window being less - if elapsedWindows < nextDifficulty-vm.minDifficulty { + if nextDifficulty >= vm.minDifficulty && elapsedWindows < nextDifficulty-vm.minDifficulty { nextDifficulty -= elapsedWindows } else { nextDifficulty = vm.minDifficulty diff --git a/vm/helpers.go b/vm/helpers.go index 8622bbf2..a6a675b6 100644 --- a/vm/helpers.go +++ b/vm/helpers.go @@ -57,6 +57,7 @@ func (vm *VM) DifficultyEstimate() (uint64, error) { if err != nil { return 0, err } + // TODO: make more sophisticated...maybe return cost/difficulty separately? recommended := totalDifficulty/totalBlocks + 1 minRequired := vm.minDifficulty + vm.minBlockCost if recommended < minRequired { diff --git a/vm/vm.go b/vm/vm.go index 47f3073b..e5a8f49c 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -36,7 +36,7 @@ const ( defaultPruneInterval = time.Minute defaultMinimumDifficulty = 1 - defaultMinBlockCost = 0 + defaultMinBlockCost = 1 mempoolSize = 1024 ) From ad5a558e221e1903032686a0b0fae77a710cecc2 Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 6 Jan 2022 10:22:33 -0800 Subject: [PATCH 26/31] vm: missing "<-donecPrune" Signed-off-by: Gyuho Lee --- vm/vm.go | 1 + 1 file changed, 1 insertion(+) diff --git a/vm/vm.go b/vm/vm.go index e5a8f49c..abd6f385 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -188,6 +188,7 @@ func (vm *VM) Shutdown() error { close(vm.stopc) <-vm.donecRun <-vm.donecRegossip + <-vm.donecPrune if vm.ctx == nil { return nil } From ecfb9299014d88fe5b61a853fb195655e5f45294 Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 6 Jan 2022 10:25:59 -0800 Subject: [PATCH 27/31] chain: use "ids.ShortEmpty" Signed-off-by: Gyuho Lee --- chain/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/storage.go b/chain/storage.go index 4b7ab8a1..757c923e 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -261,7 +261,7 @@ func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, } func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo, lastExpiry int64) error { - if i.RawPrefix == (ids.ShortID{}) { + if i.RawPrefix == ids.ShortEmpty { rprefix, err := RawPrefix(prefix, i.Created) if err != nil { return err From bf7096439e898577151f09cbb69a5da55e6eef12 Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 6 Jan 2022 10:57:05 -0800 Subject: [PATCH 28/31] chain: add "PutPrefixInfo/Key" tests Signed-off-by: Gyuho Lee --- chain/storage_test.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/chain/storage_test.go b/chain/storage_test.go index 456fdb37..4532c53a 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -98,6 +98,49 @@ func TestPrefixBlockKey(t *testing.T) { } } +func TestPutPrefixInfoAndKey(t *testing.T) { + t.Parallel() + + db := memdb.New() + defer db.Close() + + pfx := []byte("foo") + k, v := []byte("k"), []byte("v") + + // expect failures for non-existing prefixInfo + if ok, err := HasPrefix(db, pfx); ok || err != nil { + t.Fatalf("unexpected ok %v, err %v", ok, err) + } + if ok, err := HasPrefixKey(db, pfx, k); ok || err != nil { + t.Fatalf("unexpected ok %v, err %v", ok, err) + } + if err := PutPrefixKey(db, pfx, k, v); err != ErrPrefixMissing { + t.Fatalf("unexpected error %v, expected %v", err, ErrPrefixMissing) + } + + if err := PutPrefixInfo( + db, + pfx, + &PrefixInfo{ + RawPrefix: ids.ShortID{0x1}, + }, + -1, + ); err != nil { + t.Fatal(err) + } + if err := PutPrefixKey(db, pfx, k, v); err != nil { + t.Fatalf("unexpected error %v", err) + } + + // expect success for existing prefixInfo + if ok, err := HasPrefix(db, pfx); !ok || err != nil { + t.Fatalf("unexpected ok %v, err %v", ok, err) + } + if ok, err := HasPrefixKey(db, pfx, k); !ok || err != nil { + t.Fatalf("unexpected ok %v, err %v", ok, err) + } +} + func TestRange(t *testing.T) { t.Parallel() From a16f187d77687222e085f788576d58b34198f335 Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 6 Jan 2022 11:06:25 -0800 Subject: [PATCH 29/31] chain: document new storage functions Signed-off-by: Gyuho Lee --- chain/storage.go | 47 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/chain/storage.go b/chain/storage.go index 757c923e..49d5c2d1 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -43,6 +43,7 @@ const ( var lastAccepted = []byte("last_accepted") +// [blockPrefix] + [delimiter] + [blockID] func PrefixBlockKey(blockID ids.ID) (k []byte) { k = make([]byte, 2+len(blockID)) k[0] = blockPrefix @@ -51,6 +52,7 @@ func PrefixBlockKey(blockID ids.ID) (k []byte) { return k } +// [txPrefix] + [delimiter] + [txID] func PrefixTxKey(txID ids.ID) (k []byte) { k = make([]byte, 2+len(txID)) k[0] = txPrefix @@ -59,6 +61,7 @@ func PrefixTxKey(txID ids.ID) (k []byte) { return k } +// [infoPrefix] + [delimiter] + [prefix] func PrefixInfoKey(prefix []byte) (k []byte) { k = make([]byte, 2+len(prefix)) k[0] = infoPrefix @@ -82,6 +85,7 @@ func RawPrefix(prefix []byte, blockTime int64) (ids.ShortID, error) { } // Assumes [prefix] and [key] do not contain delimiter +// [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] func PrefixValueKey(rprefix ids.ShortID, key []byte) (k []byte) { k = make([]byte, 2+shortIDLen+1+len(key)) k[0] = keyPrefix @@ -92,7 +96,8 @@ func PrefixValueKey(rprefix ids.ShortID, key []byte) (k []byte) { return k } -func specificTimeKey(p byte, rprefix ids.ShortID, t int64) (k []byte) { +// [expiry/pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] +func specificTimeKey(p byte, t int64, rprefix ids.ShortID) (k []byte) { k = make([]byte, 2+8+1+shortIDLen) k[0] = p k[1] = parser.Delimiter @@ -102,6 +107,7 @@ func specificTimeKey(p byte, rprefix ids.ShortID, t int64) (k []byte) { return k } +// [expiry/pruningPrefix] + [delimiter] + [timestamp] + [delimiter] func RangeTimeKey(p byte, t int64) (k []byte) { k = make([]byte, 2+8+1) k[0] = p @@ -111,16 +117,19 @@ func RangeTimeKey(p byte, t int64) (k []byte) { return k } -func PrefixExpiryKey(rprefix ids.ShortID, expiry int64) (k []byte) { - return specificTimeKey(expiryPrefix, rprefix, expiry) +// [expiryPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] +func PrefixExpiryKey(expiry int64, rprefix ids.ShortID) (k []byte) { + return specificTimeKey(expiryPrefix, expiry, rprefix) } -func PrefixPruningKey(rprefix ids.ShortID, expired int64) (k []byte) { - return specificTimeKey(pruningPrefix, rprefix, expired) +// [pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] +func PrefixPruningKey(expired int64, rprefix ids.ShortID) (k []byte) { + return specificTimeKey(pruningPrefix, expired, rprefix) } func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool, error) { // TODO: add caching (will need some expiry when keys cleared) + // [infoPrefix] + [delimiter] + [prefix] k := PrefixInfoKey(prefix) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { @@ -143,6 +152,7 @@ func GetValue(db database.KeyValueReader, prefix []byte, key []byte) ([]byte, bo return nil, false, nil } + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] k := PrefixValueKey(prefixInfo.RawPrefix, key) v, err := db.Get(k) if errors.Is(err, database.ErrNotFound) { @@ -178,11 +188,14 @@ func GetBlock(db database.KeyValueReader, bid ids.ID) ([]byte, error) { return db.Get(PrefixBlockKey(bid)) } +// ExpireNext queries "expiryPrefix" key space to find expiring keys, +// deletes their prefixInfos, and schedules its key pruning with its raw prefix. func ExpireNext(db database.Database, parent int64, current int64) (err error) { startKey := RangeTimeKey(expiryPrefix, parent) endKey := RangeTimeKey(expiryPrefix, current) cursor := db.NewIteratorWithStart(startKey) for cursor.Next() { + // [expiryPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] curKey := cursor.Key() if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search continue @@ -193,17 +206,22 @@ func ExpireNext(db database.Database, parent int64, current int64) (err error) { if err := db.Delete(cursor.Key()); err != nil { return err } + + // [prefix] pfx := cursor.Value() + + // [infoPrefix] + [delimiter] + [prefix] k := PrefixInfoKey(pfx) if err := db.Delete(k); err != nil { return err } - expiry := int64(binary.LittleEndian.Uint64(curKey[2 : 2+8])) + expired := int64(binary.LittleEndian.Uint64(curKey[2 : 2+8])) rpfx, err := ids.ToShortID(curKey[2+8+1:]) if err != nil { return err } - k = PrefixPruningKey(rpfx, expiry) + // [pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] + k = PrefixPruningKey(expired, rpfx) if err := db.Put(k, nil); err != nil { return err } @@ -212,12 +230,15 @@ func ExpireNext(db database.Database, parent int64, current int64) (err error) { return nil } +// PruneNext queries the keys that are currently marked with "pruningPrefix", +// and clears them from the database. func PruneNext(db database.Database, limit int) (err error) { startKey := RangeTimeKey(pruningPrefix, 0) endKey := RangeTimeKey(pruningPrefix, math.MaxInt64) cursor := db.NewIteratorWithStart(startKey) removals := 0 for cursor.Next() && removals < limit { + // [pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] curKey := cursor.Key() if bytes.Compare(startKey, curKey) < -1 { // startKey < curKey; continue search continue @@ -232,6 +253,7 @@ func PruneNext(db database.Database, limit int) (err error) { if err := db.Delete(curKey); err != nil { return err } + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] if err := database.ClearPrefix(db, db, PrefixValueKey(rpfx, nil)); err != nil { return err } @@ -243,6 +265,7 @@ func PruneNext(db database.Database, limit int) (err error) { // DB func HasPrefix(db database.KeyValueReader, prefix []byte) (bool, error) { + // [infoPrefix] + [delimiter] + [prefix] k := PrefixInfoKey(prefix) return db.Has(k) } @@ -256,6 +279,7 @@ func HasPrefixKey(db database.KeyValueReader, prefix []byte, key []byte) (bool, return false, nil } + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Has(k) } @@ -269,15 +293,18 @@ func PutPrefixInfo(db database.KeyValueWriter, prefix []byte, i *PrefixInfo, las i.RawPrefix = rprefix } if lastExpiry >= 0 { - k := PrefixExpiryKey(i.RawPrefix, lastExpiry) + // [expiryPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] + k := PrefixExpiryKey(lastExpiry, i.RawPrefix) if err := db.Delete(k); err != nil { return err } } - k := PrefixExpiryKey(i.RawPrefix, i.Expiry) + // [expiryPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] + k := PrefixExpiryKey(i.Expiry, i.RawPrefix) if err := db.Put(k, prefix); err != nil { return err } + // [infoPrefix] + [delimiter] + [prefix] k = PrefixInfoKey(prefix) b, err := Marshal(i) if err != nil { @@ -294,6 +321,7 @@ func PutPrefixKey(db database.Database, prefix []byte, key []byte, value []byte) if !exists { return ErrPrefixMissing } + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] k := PrefixValueKey(prefixInfo.RawPrefix, key) return db.Put(k, value) } @@ -359,6 +387,7 @@ func Range(db database.Database, prefix []byte, key []byte, opts ...OpOption) (k break } + // [keyPrefix] + [delimiter] + [rawPrefix] + [delimiter] + [key] curKey := cursor.Key() formattedKey := curKey[2+shortIDLen+1:] From d0cfc380e6c722cdf88738f37245198e11dbbea2 Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 6 Jan 2022 11:28:16 -0800 Subject: [PATCH 30/31] chain: fix error checks Signed-off-by: Gyuho Lee --- chain/storage_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/chain/storage_test.go b/chain/storage_test.go index 4532c53a..30610107 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -5,6 +5,7 @@ package chain import ( "bytes" + "errors" "fmt" reflect "reflect" "testing" @@ -114,7 +115,7 @@ func TestPutPrefixInfoAndKey(t *testing.T) { if ok, err := HasPrefixKey(db, pfx, k); ok || err != nil { t.Fatalf("unexpected ok %v, err %v", ok, err) } - if err := PutPrefixKey(db, pfx, k, v); err != ErrPrefixMissing { + if err := PutPrefixKey(db, pfx, k, v); !errors.Is(err, ErrPrefixMissing) { t.Fatalf("unexpected error %v, expected %v", err, ErrPrefixMissing) } From f2e68b48d3d06289e5243bfd0dae5d56330f7c5b Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 6 Jan 2022 12:52:32 -0800 Subject: [PATCH 31/31] chain: define "extractSpecificTimeKey" Signed-off-by: Gyuho Lee --- chain/storage.go | 41 +++++++++++++++++++++++++++-------------- chain/storage_test.go | 19 +++++++++++++++++++ 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/chain/storage.go b/chain/storage.go index 49d5c2d1..80605e27 100644 --- a/chain/storage.go +++ b/chain/storage.go @@ -96,17 +96,6 @@ func PrefixValueKey(rprefix ids.ShortID, key []byte) (k []byte) { return k } -// [expiry/pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] -func specificTimeKey(p byte, t int64, rprefix ids.ShortID) (k []byte) { - k = make([]byte, 2+8+1+shortIDLen) - k[0] = p - k[1] = parser.Delimiter - binary.LittleEndian.PutUint64(k[2:], uint64(t)) - k[2+8] = parser.Delimiter - copy(k[2+8+1:], rprefix[:]) - return k -} - // [expiry/pruningPrefix] + [delimiter] + [timestamp] + [delimiter] func RangeTimeKey(p byte, t int64) (k []byte) { k = make([]byte, 2+8+1) @@ -127,6 +116,31 @@ func PrefixPruningKey(expired int64, rprefix ids.ShortID) (k []byte) { return specificTimeKey(pruningPrefix, expired, rprefix) } +const specificTimeKeyLen = 2 + 8 + 1 + shortIDLen + +// [expiry/pruningPrefix] + [delimiter] + [timestamp] + [delimiter] + [rawPrefix] +func specificTimeKey(p byte, t int64, rprefix ids.ShortID) (k []byte) { + k = make([]byte, specificTimeKeyLen) + k[0] = p + k[1] = parser.Delimiter + binary.LittleEndian.PutUint64(k[2:], uint64(t)) + k[2+8] = parser.Delimiter + copy(k[2+8+1:], rprefix[:]) + return k +} + +var ErrInvalidKeyFormat = errors.New("invalid key format") + +// extracts expiry/pruning timstamp and raw prefix +func extractSpecificTimeKey(k []byte) (timestamp int64, rprefix ids.ShortID, err error) { + if len(k) != specificTimeKeyLen { + return -1, ids.ShortEmpty, ErrInvalidKeyFormat + } + timestamp = int64(binary.LittleEndian.Uint64(k[2 : 2+8])) + rprefix, err = ids.ToShortID(k[2+8+1:]) + return timestamp, rprefix, err +} + func GetPrefixInfo(db database.KeyValueReader, prefix []byte) (*PrefixInfo, bool, error) { // TODO: add caching (will need some expiry when keys cleared) // [infoPrefix] + [delimiter] + [prefix] @@ -215,8 +229,7 @@ func ExpireNext(db database.Database, parent int64, current int64) (err error) { if err := db.Delete(k); err != nil { return err } - expired := int64(binary.LittleEndian.Uint64(curKey[2 : 2+8])) - rpfx, err := ids.ToShortID(curKey[2+8+1:]) + expired, rpfx, err := extractSpecificTimeKey(curKey) if err != nil { return err } @@ -246,7 +259,7 @@ func PruneNext(db database.Database, limit int) (err error) { if bytes.Compare(curKey, endKey) > 0 { // curKey > endKey; end search break } - rpfx, err := ids.ToShortID(curKey[2+8+1:]) + _, rpfx, err := extractSpecificTimeKey(curKey) if err != nil { return err } diff --git a/chain/storage_test.go b/chain/storage_test.go index 30610107..c081a88d 100644 --- a/chain/storage_test.go +++ b/chain/storage_test.go @@ -277,3 +277,22 @@ func TestRange(t *testing.T) { } } } + +func TestSpecificTimeKey(t *testing.T) { + rpfx0 := ids.ShortID{'k'} + k := PrefixExpiryKey(100, rpfx0) + ts, rpfx, err := extractSpecificTimeKey(k) + if err != nil { + t.Fatal(err) + } + if ts != 100 { + t.Fatalf("unexpected timestamp %d, expected 100", ts) + } + if rpfx != rpfx0 { + t.Fatalf("unexpected rawPrefix %v, expected %v", rpfx, rpfx0) + } + + if _, _, err = extractSpecificTimeKey(k[:10]); !errors.Is(err, ErrInvalidKeyFormat) { + t.Fatalf("unexpected error %v, expected %v", err, ErrInvalidKeyFormat) + } +}