Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 6 additions & 4 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -693,7 +693,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let root = self.block_root_at_slot(request_slot, skips)?;

if let Some(block_root) = root {
Ok(self.store.get_blinded_block(&block_root)?)
Ok(self
.store
.get_blinded_block(&block_root, Some(request_slot))?)
} else {
Ok(None)
}
Expand Down Expand Up @@ -919,7 +921,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<Option<SignedBeaconBlock<T::EthSpec>>, Error> {
// Load block from database, returning immediately if we have the full block w payload
// stored.
let blinded_block = match self.store.try_get_full_block(block_root)? {
let blinded_block = match self.store.try_get_full_block(block_root, None)? {
Some(DatabaseBlock::Full(block)) => return Ok(Some(block)),
Some(DatabaseBlock::Blinded(block)) => block,
None => return Ok(None),
Expand Down Expand Up @@ -975,7 +977,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&self,
block_root: &Hash256,
) -> Result<Option<SignedBlindedBeaconBlock<T::EthSpec>>, Error> {
Ok(self.store.get_blinded_block(block_root)?)
Ok(self.store.get_blinded_block(block_root, None)?)
}

/// Returns the state at the given root, if any.
Expand Down Expand Up @@ -4629,7 +4631,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

let beacon_block = self
.store
.get_blinded_block(&beacon_block_root)?
.get_blinded_block(&beacon_block_root, None)?
.ok_or_else(|| {
Error::DBInconsistent(format!("Missing block {}", beacon_block_root))
})?;
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/beacon_fork_choice_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ where
metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES);
let justified_block = self
.store
.get_blinded_block(&self.justified_checkpoint.root)
.get_blinded_block(&self.justified_checkpoint.root, None)
.map_err(Error::FailedToReadBlock)?
.ok_or(Error::MissingBlock(self.justified_checkpoint.root))?
.deconstruct()
Expand Down
16 changes: 12 additions & 4 deletions beacon_node/beacon_chain/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ where
.ok_or("Fork choice not found in store")?;

let genesis_block = store
.get_blinded_block(&chain.genesis_block_root)
.get_blinded_block(&chain.genesis_block_root, Some(Slot::new(0)))
.map_err(|e| descriptive_db_error("genesis block", &e))?
.ok_or("Genesis block not found in store")?;
let genesis_state = store
Expand All @@ -266,6 +266,13 @@ where

self.genesis_time = Some(genesis_state.genesis_time());

// Prune finalized execution payloads.
if store.get_config().prune_payloads_on_init {
store
.try_prune_execution_payloads(false)
.map_err(|e| format!("Error pruning execution payloads: {e:?}"))?;
}

self.op_pool = Some(
store
.get_item::<PersistedOperationPool<TEthSpec>>(&OP_POOL_DB_KEY)
Expand Down Expand Up @@ -311,6 +318,7 @@ where
.ok_or("set_genesis_state requires a store")?;

let beacon_block = genesis_block(&mut beacon_state, &self.spec)?;
let blinded_block = beacon_block.clone_as_blinded();

beacon_state
.build_all_caches(&self.spec)
Expand All @@ -323,12 +331,12 @@ where
.put_state(&beacon_state_root, &beacon_state)
.map_err(|e| format!("Failed to store genesis state: {:?}", e))?;
store
.put_block(&beacon_block_root, beacon_block.clone())
.put_cold_blinded_block(&beacon_block_root, &blinded_block)
.map_err(|e| format!("Failed to store genesis block: {:?}", e))?;

// Store the genesis block under the `ZERO_HASH` key.
store
.put_block(&Hash256::zero(), beacon_block.clone())
.put_cold_blinded_block(&Hash256::zero(), &blinded_block)
.map_err(|e| {
format!(
"Failed to store genesis block under 0x00..00 alias: {:?}",
Expand Down Expand Up @@ -618,7 +626,7 @@ where
// Try to decode the head block according to the current fork, if that fails, try
// to backtrack to before the most recent fork.
let (head_block_root, head_block, head_reverted) =
match store.get_full_block(&initial_head_block_root) {
match store.get_full_block(&initial_head_block_root, None) {
Ok(Some(block)) => (initial_head_block_root, block, false),
Ok(None) => return Err("Head block not found in store".into()),
Err(StoreError::SszDecodeError(_)) => {
Expand Down
4 changes: 2 additions & 2 deletions beacon_node/beacon_chain/src/canonical_head.rs
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ impl<T: BeaconChainTypes> CanonicalHead<T> {
let fork_choice_view = fork_choice.cached_fork_choice_view();
let beacon_block_root = fork_choice_view.head_block_root;
let beacon_block = store
.get_full_block(&beacon_block_root)?
.get_full_block(&beacon_block_root, None)?
.ok_or(Error::MissingBeaconBlock(beacon_block_root))?;
let beacon_state_root = beacon_block.state_root();
let beacon_state = store
Expand Down Expand Up @@ -639,7 +639,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.unwrap_or_else(|| {
let beacon_block = self
.store
.get_full_block(&new_view.head_block_root)?
.get_full_block(&new_view.head_block_root, None)?
.ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?;

let beacon_state_root = beacon_block.state_root();
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/fork_revert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
let finalized_checkpoint = head_state.finalized_checkpoint();
let finalized_block_root = finalized_checkpoint.root;
let finalized_block = store
.get_full_block(&finalized_block_root)
.get_full_block(&finalized_block_root, None)
.map_err(|e| format!("Error loading finalized block: {:?}", e))?
.ok_or_else(|| {
format!(
Expand Down
8 changes: 2 additions & 6 deletions beacon_node/beacon_chain/src/historical_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
ChunkWriter::<BlockRoots, _, _>::new(&self.store.cold_db, prev_block_slot.as_usize())?;

let mut cold_batch = Vec::with_capacity(blocks.len());
let mut hot_batch = Vec::with_capacity(blocks.len());

for block in blocks_to_import.iter().rev() {
// Check chain integrity.
Expand All @@ -109,7 +108,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

// Store block in the hot database without payload.
self.store
.blinded_block_as_kv_store_ops(&block_root, block, &mut hot_batch);
.blinded_block_as_cold_kv_store_ops(&block_root, block, &mut cold_batch)?;

// Store block roots, including at all skip slots in the freezer DB.
for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() {
Expand Down Expand Up @@ -177,10 +176,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
drop(verify_timer);
drop(sig_timer);

// Write the I/O batches to disk, writing the blocks themselves first, as it's better
// for the hot DB to contain extra blocks than for the cold DB to point to blocks that
// do not exist.
self.store.hot_db.do_atomically(hot_batch)?;
// Write the I/O batch to disk.
self.store.cold_db.do_atomically(cold_batch)?;

// Update the anchor.
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/migrate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
// so delete it from the head tracker but leave it and its states in the database
// This is suboptimal as it wastes disk space, but it's difficult to fix. A re-sync
// can be used to reclaim the space.
let head_state_root = match store.get_blinded_block(&head_hash) {
let head_state_root = match store.get_blinded_block(&head_hash, Some(head_slot)) {
Ok(Some(block)) => block.state_root(),
Ok(None) => {
return Err(BeaconStateError::MissingBeaconBlock(head_hash.into()).into())
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/pre_finalization_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}

// 2. Check on disk.
if self.store.get_blinded_block(&block_root)?.is_some() {
if self.store.get_blinded_block(&block_root, None)?.is_some() {
cache.block_roots.put(block_root, ());
return Ok(true);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ pub fn upgrade_to_v12<T: BeaconChainTypes>(
.unrealized_justified_checkpoint
.root;
let justified_block = db
.get_blinded_block(&justified_block_root)?
.get_blinded_block(&justified_block_root, None)?
.ok_or_else(|| {
Error::SchemaMigrationError(format!(
"unrealized justified block missing for migration: {justified_block_root:?}",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ pub fn upgrade_to_v9<T: BeaconChainTypes>(
Ok(None) => return Err(Error::BlockNotFound(block_root)),
// There was an error reading a pre-v9 block. Try reading it as a post-v9 block.
Err(_) => {
if db.try_get_full_block(&block_root)?.is_some() {
if db.try_get_full_block(&block_root, None)?.is_some() {
// The block is present as a post-v9 block, assume that it was already
// correctly migrated.
continue;
Expand Down
33 changes: 16 additions & 17 deletions beacon_node/beacon_chain/tests/block_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,28 +41,27 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
)
.await;

harness
let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH);
for snapshot in harness
.chain
.chain_dump()
.expect("should dump chain")
.into_iter()
.map(|snapshot| {
let full_block = harness
.chain
.store
.make_full_block(
&snapshot.beacon_block_root,
snapshot.beacon_block.as_ref().clone(),
)
.unwrap();
BeaconSnapshot {
beacon_block_root: snapshot.beacon_block_root,
beacon_block: Arc::new(full_block),
beacon_state: snapshot.beacon_state,
}
})
.skip(1)
.collect()
{
let full_block = harness
.chain
.get_block(&snapshot.beacon_block_root)
.await
.unwrap()
.unwrap();
segment.push(BeaconSnapshot {
beacon_block_root: snapshot.beacon_block_root,
beacon_block: Arc::new(full_block),
beacon_state: snapshot.beacon_state,
});
}
segment
}

fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
Expand Down
29 changes: 23 additions & 6 deletions beacon_node/beacon_chain/tests/store_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2114,25 +2114,26 @@ async fn weak_subjectivity_sync() {
assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1);

for snapshot in new_blocks {
let block = &snapshot.beacon_block;
let full_block = harness
.chain
.store
.make_full_block(&snapshot.beacon_block_root, block.as_ref().clone())
.get_block(&snapshot.beacon_block_root)
.await
.unwrap()
.unwrap();
let slot = full_block.slot();
let state_root = full_block.state_root();

beacon_chain.slot_clock.set_slot(block.slot().as_u64());
beacon_chain.slot_clock.set_slot(slot.as_u64());
beacon_chain
.process_block(Arc::new(full_block), CountUnrealized::True)
.await
.unwrap();
beacon_chain.recompute_head_at_current_slot().await;

// Check that the new block's state can be loaded correctly.
let state_root = block.state_root();
let mut state = beacon_chain
.store
.get_state(&state_root, Some(block.slot()))
.get_state(&state_root, Some(slot))
.unwrap()
.unwrap();
assert_eq!(state.update_tree_hash_cache().unwrap(), state_root);
Expand Down Expand Up @@ -2583,6 +2584,7 @@ fn check_split_slot(harness: &TestHarness, store: Arc<HotColdDB<E, LevelDB<E>, L
/// Check that all the states in a chain dump have the correct tree hash.
fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
let chain_dump = harness.chain.chain_dump().unwrap();
let split_slot = harness.chain.store.get_split_slot();

assert_eq!(chain_dump.len() as u64, expected_len);

Expand All @@ -2606,6 +2608,21 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
.slot(),
checkpoint.beacon_state.slot()
);

// Check presence of execution payload on disk.
if harness.chain.spec.bellatrix_fork_epoch.is_some() {
assert_eq!(
harness
.chain
.store
.execution_payload_exists(&checkpoint.beacon_block_root)
.unwrap(),
checkpoint.beacon_block.slot() >= split_slot,
"incorrect payload storage for block at slot {}: {:?}",
checkpoint.beacon_block.slot(),
checkpoint.beacon_block_root,
);
}
}

// Check the forwards block roots iterator against the chain dump
Expand Down
7 changes: 7 additions & 0 deletions beacon_node/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -515,6 +515,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.takes_value(true)
.default_value("true")
)
.arg(
Arg::with_name("prune-payloads-on-startup")
.long("prune-payloads-on-startup")
.help("Check for execution payloads to prune on start-up.")
.takes_value(true)
.default_value("true")
)

/*
* Misc.
Expand Down
6 changes: 6 additions & 0 deletions beacon_node/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -358,6 +358,12 @@ pub fn get_config<E: EthSpec>(
.map_err(|_| "auto-compact-db takes a boolean".to_string())?;
}

if let Some(prune_payloads_on_init) =
clap_utils::parse_optional(cli_args, "prune-payloads-on-startup")?
{
client_config.store.prune_payloads_on_init = prune_payloads_on_init;
}

/*
* Zero-ports
*
Expand Down
Loading