@@ -74,8 +74,6 @@ type Database struct {
7474 oldest common.Hash // Oldest tracked node, flush-list head
7575 newest common.Hash // Newest tracked node, flush-list tail
7676
77- preimages map [common.Hash ][]byte // Preimages of nodes from the secure trie
78-
7977 gctime time.Duration // Time spent on garbage collection since last commit
8078 gcnodes uint64 // Nodes garbage collected since last commit
8179 gcsize common.StorageSize // Data storage garbage collected since last commit
@@ -84,9 +82,9 @@ type Database struct {
8482 flushnodes uint64 // Nodes flushed since last commit
8583 flushsize common.StorageSize // Data storage flushed since last commit
8684
87- dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata)
88- childrenSize common.StorageSize // Storage size of the external children tracking
89- preimagesSize common. StorageSize // Storage size of the preimages cache
85+ dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata)
86+ childrenSize common.StorageSize // Storage size of the external children tracking
87+ preimages * preimageStore // The store for caching preimages
9088
9189 lock sync.RWMutex
9290}
@@ -287,15 +285,17 @@ func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database
287285 cleans = fastcache .LoadFromFileOrNew (config .Journal , config .Cache * 1024 * 1024 )
288286 }
289287 }
288+ var preimage * preimageStore
289+ if config != nil && config .Preimages {
290+ preimage = newPreimageStore (diskdb )
291+ }
290292 db := & Database {
291293 diskdb : diskdb ,
292294 cleans : cleans ,
293295 dirties : map [common.Hash ]* cachedNode {{}: {
294296 children : make (map [common.Hash ]uint16 ),
295297 }},
296- }
297- if config == nil || config .Preimages { // TODO(karalabe): Flip to default off in the future
298- db .preimages = make (map [common.Hash ][]byte )
298+ preimages : preimage ,
299299 }
300300 return db
301301}
@@ -341,24 +341,6 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
341341 db .dirtiesSize += common .StorageSize (common .HashLength + entry .size )
342342}
343343
344- // insertPreimage writes a new trie node pre-image to the memory database if it's
345- // yet unknown. The method will NOT make a copy of the slice,
346- // only use if the preimage will NOT be changed later on.
347- //
348- // Note, this method assumes that the database's lock is held!
349- func (db * Database ) insertPreimage (hash common.Hash , preimage []byte ) {
350- // Short circuit if preimage collection is disabled
351- if db .preimages == nil {
352- return
353- }
354- // Track the preimage if a yet unknown one
355- if _ , ok := db .preimages [hash ]; ok {
356- return
357- }
358- db .preimages [hash ] = preimage
359- db .preimagesSize += common .StorageSize (common .HashLength + len (preimage ))
360- }
361-
362344// node retrieves a cached trie node from memory, or returns nil if none can be
363345// found in the memory cache.
364346func (db * Database ) node (hash common.Hash ) node {
@@ -435,24 +417,6 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
435417 return nil , errors .New ("not found" )
436418}
437419
438- // preimage retrieves a cached trie node pre-image from memory. If it cannot be
439- // found cached, the method queries the persistent database for the content.
440- func (db * Database ) preimage (hash common.Hash ) []byte {
441- // Short circuit if preimage collection is disabled
442- if db .preimages == nil {
443- return nil
444- }
445- // Retrieve the node from cache if available
446- db .lock .RLock ()
447- preimage := db .preimages [hash ]
448- db .lock .RUnlock ()
449-
450- if preimage != nil {
451- return preimage
452- }
453- return rawdb .ReadPreimage (db .diskdb , hash )
454- }
455-
456420// Nodes retrieves the hashes of all the nodes cached within the memory database.
457421// This method is extremely expensive and should only be used to validate internal
458422// states in test code.
@@ -597,19 +561,8 @@ func (db *Database) Cap(limit common.StorageSize) error {
597561
598562 // If the preimage cache got large enough, push to disk. If it's still small
599563 // leave for later to deduplicate writes.
600- flushPreimages := db .preimagesSize > 4 * 1024 * 1024
601- if flushPreimages {
602- if db .preimages == nil {
603- log .Error ("Attempted to write preimages whilst disabled" )
604- } else {
605- rawdb .WritePreimages (batch , db .preimages )
606- if batch .ValueSize () > ethdb .IdealBatchSize {
607- if err := batch .Write (); err != nil {
608- return err
609- }
610- batch .Reset ()
611- }
612- }
564+ if db .preimages != nil {
565+ db .preimages .commit (false )
613566 }
614567 // Keep committing nodes from the flush-list until we're below allowance
615568 oldest := db .oldest
@@ -644,13 +597,6 @@ func (db *Database) Cap(limit common.StorageSize) error {
644597 db .lock .Lock ()
645598 defer db .lock .Unlock ()
646599
647- if flushPreimages {
648- if db .preimages == nil {
649- log .Error ("Attempted to reset preimage cache whilst disabled" )
650- } else {
651- db .preimages , db .preimagesSize = make (map [common.Hash ][]byte ), 0
652- }
653- }
654600 for db .oldest != oldest {
655601 node := db .dirties [db .oldest ]
656602 delete (db .dirties , db .oldest )
@@ -694,13 +640,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
694640
695641 // Move all of the accumulated preimages into a write batch
696642 if db .preimages != nil {
697- rawdb .WritePreimages (batch , db .preimages )
698- // Since we're going to replay trie node writes into the clean cache, flush out
699- // any batched pre-images before continuing.
700- if err := batch .Write (); err != nil {
701- return err
702- }
703- batch .Reset ()
643+ db .preimages .commit (true )
704644 }
705645 // Move the trie itself into the batch, flushing if enough data is accumulated
706646 nodes , storage := len (db .dirties ), db .dirtiesSize
@@ -723,9 +663,6 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
723663 batch .Reset ()
724664
725665 // Reset the storage counters and bumped metrics
726- if db .preimages != nil {
727- db .preimages , db .preimagesSize = make (map [common.Hash ][]byte ), 0
728- }
729666 memcacheCommitTimeTimer .Update (time .Since (start ))
730667 memcacheCommitSizeMeter .Mark (int64 (storage - db .dirtiesSize ))
731668 memcacheCommitNodesMeter .Mark (int64 (nodes - len (db .dirties )))
@@ -837,7 +774,11 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
837774 // counted.
838775 var metadataSize = common .StorageSize ((len (db .dirties ) - 1 ) * cachedNodeSize )
839776 var metarootRefs = common .StorageSize (len (db .dirties [common.Hash {}].children ) * (common .HashLength + 2 ))
840- return db .dirtiesSize + db .childrenSize + metadataSize - metarootRefs , db .preimagesSize
777+ var preimageSize common.StorageSize
778+ if db .preimages != nil {
779+ preimageSize = db .preimages .size ()
780+ }
781+ return db .dirtiesSize + db .childrenSize + metadataSize - metarootRefs , preimageSize
841782}
842783
843784// saveCache saves clean state cache to given directory path
0 commit comments