diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 674a68a7889..82c1a81e33f 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -26,8 +26,6 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - name: Get latest version of stable Rust run: rustup update stable - name: Check formatting with cargo fmt @@ -58,6 +56,18 @@ jobs: run: choco install -y make - name: Run tests in release run: make test-release + beacon-chain-tests: + name: beacon-chain-tests + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Run beacon_chain tests for base hard fork + run: make test-beacon-chain-base + - name: Run beacon_chain tests for Altair hard fork + run: make test-beacon-chain-altair debug-tests-ubuntu: name: debug-tests-ubuntu runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index dda20f5a3cc..5fc2209f289 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -568,7 +568,7 @@ dependencies = [ "genesis", "int_to_bytes", "integer-sqrt", - "itertools 0.9.0", + "itertools 0.10.1", "lazy_static", "lighthouse_metrics", "log", @@ -1031,7 +1031,7 @@ dependencies = [ "ansi_term 0.11.0", "atty", "bitflags", - "strsim", + "strsim 0.8.0", "textwrap", "unicode-width", "vec_map", @@ -1426,6 +1426,41 @@ dependencies = [ "zeroize", ] +[[package]] +name = "darling" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f2c43f534ea4b0b049015d00269734195e6d3f0f6635cb692251aca6f9f8b3c" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e91455b86830a1c21799d94524df0845183fa55bafd9aa137b01c7d1065fa36" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29b5acf0dea37a7f66f7b25d2c5e93fd46f8f6968b1a5d7a3e02e97768afc95a" +dependencies = [ + "darling_core", + "quote", + "syn", +] + [[package]] name = "darwin-libproc" version = "0.1.2" @@ -1684,15 +1719,20 @@ dependencies = [ "bls", "cached_tree_hash", "compare_fields", + "compare_fields_derive", + "derivative", "eth2_ssz", "eth2_ssz_derive", "ethereum-types 0.9.2", + "fs2", "hex", + "parking_lot", "rayon", "serde", "serde_derive", "serde_repr", "serde_yaml", + "snap", "state_processing", "swap_or_not_shuffle", "tree_hash", @@ -2185,7 +2225,7 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" name = "fallback" version = "0.1.0" dependencies = [ - "itertools 0.9.0", + "itertools 0.10.1", ] [[package]] @@ -2954,6 +2994,12 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.2.3" @@ -4068,7 +4114,7 @@ dependencies = [ "hex", "if-addrs", "igd", - "itertools 0.9.0", + "itertools 0.10.1", "lazy_static", "lighthouse_metrics", "logging", @@ -4321,6 +4367,7 @@ dependencies = [ name = "operation_pool" version = "0.2.0" dependencies = [ + "beacon_chain", "eth2_ssz", "eth2_ssz_derive", "int_to_bytes", @@ -6061,15 +6108,15 @@ name = "state_processing" version = "0.2.0" dependencies = [ "arbitrary", + "beacon_chain", "bls", - "criterion", "env_logger 0.8.4", "eth2_hashing", "eth2_ssz", "eth2_ssz_types", "int_to_bytes", "integer-sqrt", - "itertools 0.9.0", + "itertools 0.10.1", "lazy_static", "log", "merkle_proof", @@ -6078,6 +6125,7 @@ dependencies = [ "serde", "serde_derive", "serde_yaml", + "smallvec", "tree_hash", "tree_hash_derive", "types", @@ -6087,7 +6135,9 @@ dependencies = [ name = "state_transition_vectors" version = "0.1.0" dependencies = [ + "beacon_chain", "eth2_ssz", + "lazy_static", "state_processing", "types", ] @@ -6151,18 +6201,17 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" name = "store" version = "0.2.0" dependencies = [ - "criterion", + "beacon_chain", "db-key", "directory", "eth2_ssz", "eth2_ssz_derive", - "itertools 0.9.0", + "itertools 0.10.1", "lazy_static", "leveldb", "lighthouse_metrics", "lru", "parking_lot", - "rayon", "serde", "serde_derive", "slog", @@ -6201,6 +6250,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "strum" version = "0.20.0" @@ -6234,6 +6289,19 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +[[package]] +name = "superstruct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bf7f6700d7c135cf4e4900c2cfba9a12ecad1fdc45594aad48f6b344b2589a0" +dependencies = [ + "darling", + "itertools 0.10.1", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "swap_or_not_shuffle" version = "0.2.0" @@ -6757,7 +6825,7 @@ dependencies = [ name = "tree_hash" version = "0.1.1" dependencies = [ - "criterion", + "beacon_chain", "eth2_hashing", "ethereum-types 0.9.2", "lazy_static", @@ -6820,6 +6888,7 @@ name = "types" version = "0.2.0" dependencies = [ "arbitrary", + "beacon_chain", "bls", "cached_tree_hash", "compare_fields", @@ -6834,9 +6903,11 @@ dependencies = [ "ethereum-types 0.9.2", "hex", "int_to_bytes", + "itertools 0.10.1", "lazy_static", "log", "merkle_proof", + "parking_lot", "rand 0.7.3", "rand_xorshift", "rayon", @@ -6849,6 +6920,7 @@ dependencies = [ "serde_utils", "serde_yaml", "slog", + "superstruct", "swap_or_not_shuffle", "tempfile", "test_random_derive", diff --git a/Makefile b/Makefile index ae0ec94738b..dd19a0f4a7d 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ .PHONY: tests EF_TESTS = "testing/ef_tests" +BEACON_CHAIN_CRATE = "beacon_node/beacon_chain" STATE_TRANSITION_VECTORS = "testing/state_transition_vectors" GIT_TAG := $(shell git describe --tags --candidates 1) BIN_DIR = "bin" @@ -79,12 +80,12 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --all --release --exclude ef_tests + cargo test --workspace --release --exclude ef_tests --exclude beacon_chain # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: - cargo test --all --exclude ef_tests + cargo test --workspace --exclude ef_tests --exclude beacon_chain # Runs cargo-fmt (linter). cargo-fmt: @@ -92,7 +93,7 @@ cargo-fmt: # Typechecks benchmark code check-benches: - cargo check --all --benches + cargo check --workspace --benches # Typechecks consensus code *without* allowing deprecated legacy arithmetic check-consensus: @@ -100,9 +101,17 @@ check-consensus: # Runs only the ef-test vectors. run-ef-tests: + rm -rf $(EF_TESTS)/.accessed_file_log.txt cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests" cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,fake_crypto" cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests,milagro" + ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/eth2.0-spec-tests + +# Run the tests in the `beacon_chain` crate. +test-beacon-chain: test-beacon-chain-base test-beacon-chain-altair + +test-beacon-chain-%: + env FORK_NAME=$* cargo test --release --features fork_from_env --manifest-path=$(BEACON_CHAIN_CRATE)/Cargo.toml # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: @@ -121,7 +130,7 @@ test-full: cargo-fmt test-release test-debug test-ef # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --all --tests -- \ + cargo clippy --workspace --tests -- \ -D warnings \ -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 6c15615af46..986ae8dc28a 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -116,7 +116,7 @@ async fn publish_voluntary_exit( .beacon_state::() .as_ref() .expect("network should have valid genesis state") - .genesis_validators_root; + .genesis_validators_root(); // Verify that the beacon node and validator being exited are on the same network. if genesis_data.genesis_validators_root != testnet_genesis_root { diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index 00187ccd065..902e26528f7 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -77,7 +77,7 @@ pub fn cli_run( let genesis_validators_root = testnet_config .beacon_state::() - .map(|state: BeaconState| state.genesis_validators_root) + .map(|state: BeaconState| state.genesis_validators_root()) .map_err(|e| { format!( "Unable to get genesis state, has genesis occurred? Detail: {:?}", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 930820b7f66..a7cbc2061fb 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -9,9 +9,9 @@ default = ["participation_metrics"] write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. participation_metrics = [] # Exposes validator participation metrics to Prometheus. test_logger = [] # Print log output to stderr when running tests instead of dropping it +fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable [dev-dependencies] -int_to_bytes = { path = "../../consensus/int_to_bytes" } maplit = "1.0.2" environment = { path = "../../lighthouse/environment" } @@ -45,6 +45,7 @@ eth1 = { path = "../eth1" } futures = "0.3.7" genesis = { path = "../genesis" } integer-sqrt = "0.1.5" +int_to_bytes = { path = "../../consensus/int_to_bytes" } rand = "0.7.3" rand_core = "0.6.2" proto_array = { path = "../../consensus/proto_array" } @@ -56,7 +57,7 @@ safe_arith = { path = "../../consensus/safe_arith" } fork_choice = { path = "../../consensus/fork_choice" } task_executor = { path = "../../common/task_executor" } derivative = "2.1.1" -itertools = "0.9.0" +itertools = "0.10.0" regex = "1.3.9" exit-future = "0.2.0" slasher = { path = "../../slasher" } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index ddd7f93ca92..552d4660241 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -895,7 +895,7 @@ pub fn verify_attestation_signature( .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) .ok_or(BeaconChainError::CanonicalHeadLockTimeout) - .map(|head| head.beacon_state.fork)?; + .map(|head| head.beacon_state.fork())?; let signature_set = indexed_attestation_signature_set_from_pubkeys( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), @@ -1001,7 +1001,7 @@ pub fn verify_signed_aggregate_signatures( .canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) .ok_or(BeaconChainError::CanonicalHeadLockTimeout) - .map(|head| head.beacon_state.fork)?; + .map(|head| head.beacon_state.fork())?; let signature_sets = vec![ signed_aggregate_selection_proof_signature_set( diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 331daf321f0..8f1908e80f2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -454,7 +454,7 @@ impl BeaconChain { let old_block_root = snapshot.beacon_block_root; // The earliest slot for which the two chains may have a common history. - let lowest_slot = std::cmp::min(new_state.slot, old_state.slot); + let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); // Create an iterator across `$state`, assuming that the block at `$state.slot` has the // block root of `$block_root`. @@ -465,7 +465,7 @@ impl BeaconChain { // in all the iterator wrapping. macro_rules! aligned_roots_iter { ($state: ident, $block_root: ident) => { - std::iter::once(Ok(($state.slot, $block_root))) + std::iter::once(Ok(($state.slot(), $block_root))) .chain($state.rev_iter_block_roots(&self.spec)) .skip_while(|result| { result @@ -506,7 +506,7 @@ impl BeaconChain { // We provide this potentially-inaccurate-but-safe information to avoid onerous // database reads during times of deep reorgs. Ok(old_state - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch())) }) @@ -526,7 +526,7 @@ impl BeaconChain { state_root: Hash256, state: &'a BeaconState, ) -> impl Iterator> + 'a { - std::iter::once(Ok((state_root, state.slot))) + std::iter::once(Ok((state_root, state.slot()))) .chain(StateRootsIterator::new(self.store.clone(), state)) .map(|result| result.map_err(Into::into)) } @@ -570,7 +570,7 @@ impl BeaconChain { let root = self.block_root_at_slot(request_slot, skips)?; if let Some(block_root) = root { - Ok(self.store.get_item(&block_root)?) + Ok(self.store.get_block(&block_root)?) } else { Ok(None) } @@ -661,7 +661,7 @@ impl BeaconChain { let state = &head.beacon_state; // Try find the root for the `request_slot`. - let request_root_opt = match state.slot.cmp(&request_slot) { + let request_root_opt = match state.slot().cmp(&request_slot) { // It's always a skip slot if the head is less than the request slot, return early. Ordering::Less => return Ok(Some(None)), // The request slot is the head slot. @@ -836,11 +836,11 @@ impl BeaconChain { slot: head.beacon_block.slot(), block_root: head.beacon_block_root, state_root: head.beacon_state_root(), - current_justified_checkpoint: head.beacon_state.current_justified_checkpoint, - finalized_checkpoint: head.beacon_state.finalized_checkpoint, - fork: head.beacon_state.fork, - genesis_time: head.beacon_state.genesis_time, - genesis_validators_root: head.beacon_state.genesis_validators_root, + current_justified_checkpoint: head.beacon_state.current_justified_checkpoint(), + finalized_checkpoint: head.beacon_state.finalized_checkpoint(), + fork: head.beacon_state.fork(), + genesis_time: head.beacon_state.genesis_time(), + genesis_validators_root: head.beacon_state.genesis_validators_root(), proposer_shuffling_decision_root, }) }) @@ -868,23 +868,23 @@ impl BeaconChain { ) -> Result, Error> { let head_state = self.head()?.beacon_state; - match slot.cmp(&head_state.slot) { + match slot.cmp(&head_state.slot()) { Ordering::Equal => Ok(head_state), Ordering::Greater => { - if slot > head_state.slot + T::EthSpec::slots_per_epoch() { + if slot > head_state.slot() + T::EthSpec::slots_per_epoch() { warn!( self.log, "Skipping more than an epoch"; - "head_slot" => head_state.slot, + "head_slot" => head_state.slot(), "request_slot" => slot ) } - let start_slot = head_state.slot; + let start_slot = head_state.slot(); let task_start = Instant::now(); let max_task_runtime = Duration::from_secs(self.spec.seconds_per_slot); - let head_state_slot = head_state.slot; + let head_state_slot = head_state.slot(); let mut state = head_state; let skip_state_root = match config { @@ -892,7 +892,7 @@ impl BeaconChain { StateSkipConfig::WithoutStateRoots => Some(Hash256::zero()), }; - while state.slot < slot { + while state.slot() < slot { // Do not allow and forward state skip that takes longer than the maximum task duration. // // This is a protection against nodes doing too much work when they're not synced @@ -1046,7 +1046,7 @@ impl BeaconChain { state: &BeaconState, ) -> Result, Error> { let iter = BlockRootsIterator::new(self.store.clone(), state); - let iter_with_head = std::iter::once(Ok((beacon_block_root, state.slot))) + let iter_with_head = std::iter::once(Ok((beacon_block_root, state.slot()))) .chain(iter) .map(|result| result.map_err(|e| e.into())); @@ -1176,7 +1176,7 @@ impl BeaconChain { ) -> Result, Error> { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - if state.slot > slot { + if state.slot() > slot { return Err(Error::CannotAttestToFutureState); } else if state.current_epoch() < epoch { let mut_state = state.to_mut(); @@ -1194,7 +1194,7 @@ impl BeaconChain { let committee_len = state.get_beacon_committee(slot, index)?.committee.len(); let target_slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); - let target_root = if state.slot <= target_slot { + let target_root = if state.slot() <= target_slot { beacon_block_root } else { *state.get_block_root(target_slot)? @@ -1206,7 +1206,7 @@ impl BeaconChain { slot, index, beacon_block_root, - source: state.current_justified_checkpoint, + source: state.current_justified_checkpoint(), target: Checkpoint { epoch, root: target_root, @@ -1347,12 +1347,8 @@ impl BeaconChain { // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. if self.eth1_chain.is_some() { - let fork = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)? - .beacon_state - .fork; + let fork = + self.with_head(|head| Ok::<_, AttestationError>(head.beacon_state.fork()))?; self.op_pool .insert_attestation( @@ -1563,6 +1559,14 @@ impl BeaconChain { .collect::>(); for (i, block) in chain_segment.into_iter().enumerate() { + // Ensure the block is the correct structure for the fork at `block.slot()`. + if let Err(e) = block.fork_name(&self.spec) { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::InconsistentFork(e), + }; + } + let block_root = get_block_root(&block); if let Some((child_parent_root, child_slot)) = children.get(i) { @@ -1691,8 +1695,8 @@ impl BeaconChain { &self, block: SignedBeaconBlock, ) -> Result, BlockError> { - let slot = block.message.slot; - let graffiti_string = block.message.body.graffiti.as_utf8_lossy(); + let slot = block.slot(); + let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); match GossipVerifiedBlock::new(block, self) { Ok(verified) => { @@ -1809,7 +1813,7 @@ impl BeaconChain { // Iterate through the attestations in the block and register them as an "observed // attestation". This will stop us from propagating them on the gossip network. - for a in &signed_block.message.body.attestations { + for a in signed_block.message().body().attestations() { match self .observed_attestations .write() @@ -1828,7 +1832,7 @@ impl BeaconChain { // If a slasher is configured, provide the attestations from the block. if let Some(slasher) = self.slasher.as_ref() { - for attestation in &signed_block.message.body.attestations { + for attestation in signed_block.message().body().attestations() { let committee = state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; let indexed_attestation = @@ -1874,11 +1878,11 @@ impl BeaconChain { // Do not import a block that doesn't descend from the finalized root. let signed_block = check_block_is_finalized_descendant::(signed_block, &fork_choice, &self.store)?; - let block = &signed_block.message; + let (block, block_signature) = signed_block.clone().deconstruct(); // compare the existing finalized checkpoint with the incoming block's finalized checkpoint let old_finalized_checkpoint = fork_choice.finalized_checkpoint(); - let new_finalized_checkpoint = state.finalized_checkpoint; + let new_finalized_checkpoint = state.finalized_checkpoint(); // Only perform the weak subjectivity check if it was configured. if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint { @@ -1894,7 +1898,7 @@ impl BeaconChain { self.log, "Weak subjectivity checkpoint verification failed while importing block!"; "block_root" => ?block_root, - "parent_root" => ?block.parent_root, + "parent_root" => ?block.parent_root(), "old_finalized_epoch" => ?old_finalized_checkpoint.epoch, "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, @@ -1916,7 +1920,7 @@ impl BeaconChain { let _fork_choice_block_timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); fork_choice - .on_block(current_slot, block, block_root, &state) + .on_block(current_slot, &block, block_root, &state) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -1927,7 +1931,7 @@ impl BeaconChain { let validator_monitor = self.validator_monitor.read(); // Register each attestation in the block with the fork choice service. - for attestation in &block.body.attestations[..] { + for attestation in block.body().attestations() { let _fork_choice_attestation_timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); @@ -1947,26 +1951,26 @@ impl BeaconChain { // Only register this with the validator monitor when the block is sufficiently close to // the current slot. if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch() - + block.slot.as_u64() + + block.slot().as_u64() >= current_slot.as_u64() { validator_monitor.register_attestation_in_block( &indexed_attestation, - &block, + block.to_ref(), &self.spec, ); } } - for exit in &block.body.voluntary_exits { + for exit in block.body().voluntary_exits() { validator_monitor.register_block_voluntary_exit(&exit.message) } - for slashing in &block.body.attester_slashings { + for slashing in block.body().attester_slashings() { validator_monitor.register_block_attester_slashing(slashing) } - for slashing in &block.body.proposer_slashings { + for slashing in block.body().proposer_slashings() { validator_monitor.register_block_proposer_slashing(slashing) } @@ -1974,7 +1978,7 @@ impl BeaconChain { metrics::observe( &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, - block.body.attestations.len() as f64, + block.body().attestations().len() as f64, ); let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); @@ -1984,11 +1988,8 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - ops.push(StoreOp::PutBlock( - block_root, - Box::new(signed_block.clone()), - )); - ops.push(StoreOp::PutState(block.state_root, &state)); + ops.push(StoreOp::PutBlock(block_root, Box::new(signed_block))); + ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); if let Err(e) = self.store.do_atomically(ops) { @@ -2024,11 +2025,12 @@ impl BeaconChain { // about it. metrics::observe_duration( &metrics::BEACON_BLOCK_IMPORTED_SLOT_START_DELAY_TIME, - get_block_delay_ms(timestamp_now(), &signed_block.message, &self.slot_clock), + get_block_delay_ms(timestamp_now(), block.to_ref(), &self.slot_clock), ); - let parent_root = block.parent_root; - let slot = block.slot; + let parent_root = block.parent_root(); + let slot = block.slot(); + let signed_block = SignedBeaconBlock::from_block(block, block_signature); self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) @@ -2167,10 +2169,10 @@ impl BeaconChain { .ok_or(BlockProductionError::NoEth1ChainConnection)?; // It is invalid to try to produce a block using a state from a future slot. - if state.slot > produce_at_slot { + if state.slot() > produce_at_slot { return Err(BlockProductionError::StateSlotTooHigh { produce_at_slot, - state_slot: state.slot, + state_slot: state.slot(), }); } @@ -2183,16 +2185,15 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - let parent_root = if state.slot > 0 { + let parent_root = if state.slot() > 0 { *state - .get_block_root(state.slot - 1) + .get_block_root(state.slot() - 1) .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)? } else { - state.latest_block_header.canonical_root() + state.latest_block_header().canonical_root() }; - let (proposer_slashings, attester_slashings) = - self.op_pool.get_slashings(&state, &self.spec); + let (proposer_slashings, attester_slashings) = self.op_pool.get_slashings(&state); let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; let deposits = eth1_chain @@ -2206,8 +2207,8 @@ impl BeaconChain { for attestation in self.naive_aggregation_pool.read().iter() { if let Err(e) = self.op_pool.insert_attestation( attestation.clone(), - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), &self.spec, ) { // Don't stop block production if there's an error, just create a log. @@ -2250,13 +2251,17 @@ impl BeaconChain { .into(); drop(attestation_packing_timer); - let mut block = SignedBeaconBlock { - message: BeaconBlock { - slot: state.slot, - proposer_index: state.get_beacon_proposer_index(state.slot, &self.spec)? as u64, + let slot = state.slot(); + let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + let voluntary_exits = self.op_pool.get_voluntary_exits(&state, &self.spec).into(); + + let inner_block = match state { + BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { + slot, + proposer_index, parent_root, state_root: Hash256::zero(), - body: BeaconBlockBody { + body: BeaconBlockBodyBase { randao_reveal, eth1_data, graffiti, @@ -2264,13 +2269,35 @@ impl BeaconChain { attester_slashings: attester_slashings.into(), attestations, deposits, - voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), + voluntary_exits, }, - }, - // The block is not signed here, that is the task of a validator client. - signature: Signature::empty(), + }), + BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations, + deposits, + voluntary_exits, + // FIXME(altair): put a sync aggregate from the pool here (once implemented) + sync_aggregate: SyncAggregate::new(), + }, + }), }; + let block = SignedBeaconBlock::from_block( + inner_block, + // The block is not signed here, that is the task of a validator client. + Signature::empty(), + ); + let process_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_PROCESS_TIMES); per_block_processing( &mut state, @@ -2285,19 +2312,20 @@ impl BeaconChain { let state_root = state.update_tree_hash_cache()?; drop(state_root_timer); - block.message.state_root = state_root; + let (mut block, _) = block.deconstruct(); + *block.state_root_mut() = state_root; metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); trace!( self.log, "Produced beacon block"; - "parent" => %block.message.parent_root, - "attestations" => block.message.body.attestations.len(), - "slot" => block.message.slot + "parent" => %block.parent_root(), + "attestations" => block.body().attestations().len(), + "slot" => block.slot() ); - Ok((block.message, state)) + Ok((block, state)) } /// Execute the fork choice algorithm and enthrone the result as the canonical head. @@ -2403,16 +2431,16 @@ impl BeaconChain { debug!( self.log, "Head beacon block"; - "justified_root" => %new_head.beacon_state.current_justified_checkpoint.root, - "justified_epoch" => new_head.beacon_state.current_justified_checkpoint.epoch, - "finalized_root" => %new_head.beacon_state.finalized_checkpoint.root, - "finalized_epoch" => new_head.beacon_state.finalized_checkpoint.epoch, + "justified_root" => %new_head.beacon_state.current_justified_checkpoint().root, + "justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch, + "finalized_root" => %new_head.beacon_state.finalized_checkpoint().root, + "finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch, "root" => %beacon_block_root, "slot" => new_head.beacon_block.slot(), ); }; - let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint; + let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint(); // It is an error to try to update to a head with a lesser finalized epoch. if new_finalized_checkpoint.epoch < old_finalized_checkpoint.epoch { @@ -2425,7 +2453,7 @@ impl BeaconChain { let is_epoch_transition = current_head.slot.epoch(T::EthSpec::slots_per_epoch()) < new_head .beacon_state - .slot + .slot() .epoch(T::EthSpec::slots_per_epoch()); if is_epoch_transition || is_reorg { @@ -2438,7 +2466,7 @@ impl BeaconChain { // These fields are used for server-sent events let state_root = new_head.beacon_state_root(); - let head_slot = new_head.beacon_state.slot; + let head_slot = new_head.beacon_state.slot(); let target_epoch_start_slot = new_head .beacon_state .current_epoch() @@ -2513,7 +2541,7 @@ impl BeaconChain { // the reach of the new head's `state_roots` array. let new_finalized_slot = head .beacon_state - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); let new_finalized_state_root = process_results( @@ -2592,7 +2620,7 @@ impl BeaconChain { beacon_block_root: Hash256, state: &BeaconState, ) -> Result<(), BeaconChainError> { - let finalized_checkpoint = state.finalized_checkpoint; + let finalized_checkpoint = state.finalized_checkpoint(); info!(self.log, "Verifying the configured weak subjectivity checkpoint"; "weak_subjectivity_epoch" => wss_checkpoint.epoch, "weak_subjectivity_root" => ?wss_checkpoint.root); // If epochs match, simply compare roots. if wss_checkpoint.epoch == finalized_checkpoint.epoch @@ -2653,7 +2681,7 @@ impl BeaconChain { new_finalized_state_root: Hash256, ) -> Result<(), Error> { self.fork_choice.write().prune()?; - let new_finalized_checkpoint = head_state.finalized_checkpoint; + let new_finalized_checkpoint = head_state.finalized_checkpoint(); self.observed_block_producers.write().prune( new_finalized_checkpoint @@ -2870,13 +2898,6 @@ impl BeaconChain { } } - /// Returns `true` if the given block root has not been processed. - pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { - Ok(!self - .store - .item_exists::>(beacon_block_root)?) - } - /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. /// /// This could be a very expensive operation and should only be done in testing/analysis @@ -2984,9 +3005,9 @@ impl BeaconChain { .get_state(&block.state_root(), Some(block.slot())) .unwrap() .unwrap(); - finalized_blocks.insert(state.finalized_checkpoint.root); - justified_blocks.insert(state.current_justified_checkpoint.root); - justified_blocks.insert(state.previous_justified_checkpoint.root); + finalized_blocks.insert(state.finalized_checkpoint().root); + justified_blocks.insert(state.current_justified_checkpoint().root); + justified_blocks.insert(state.previous_justified_checkpoint().root); } if block_hash == canonical_head_hash { diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 80b9fe2ad80..6345aac27ae 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -10,10 +10,7 @@ use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; -use types::{ - BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, SignedBeaconBlock, - Slot, -}; +use types::{BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, Slot}; #[derive(Debug)] pub enum Error { @@ -45,7 +42,7 @@ const MAX_BALANCE_CACHE_SIZE: usize = 4; /// zero. pub fn get_effective_balances(state: &BeaconState) -> Vec { state - .validators + .validators() .iter() .map(|validator| { if validator.is_active_at(state.current_epoch()) { @@ -91,7 +88,7 @@ impl BalancesCache { } let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch()); - let epoch_boundary_root = if epoch_boundary_slot == state.slot { + let epoch_boundary_root = if epoch_boundary_slot == state.slot() { block_root } else { // This call remains sensible as long as `state.block_roots` is larger than a single @@ -127,7 +124,7 @@ impl BalancesCache { let mut prior_block_found = false; for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) { - if slot < state.slot { + if slot < state.slot() { if *state.get_block_root(slot)? != block_root { prior_block_found = true; break; @@ -208,7 +205,7 @@ where anchor: &BeaconSnapshot, ) -> Self { let anchor_state = &anchor.beacon_state; - let mut anchor_block_header = anchor_state.latest_block_header.clone(); + let mut anchor_block_header = anchor_state.latest_block_header().clone(); if anchor_block_header.state_root == Hash256::zero() { anchor_block_header.state_root = anchor.beacon_state_root(); } @@ -223,9 +220,9 @@ where Self { store, balances_cache: <_>::default(), - time: anchor_state.slot, + time: anchor_state.slot(), justified_checkpoint, - justified_balances: anchor_state.balances.clone().into(), + justified_balances: anchor_state.balances().clone().into(), finalized_checkpoint, best_justified_checkpoint: justified_checkpoint, _phantom: PhantomData, @@ -318,17 +315,20 @@ where metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES); let justified_block = self .store - .get_item::>(&self.justified_checkpoint.root) + .get_block(&self.justified_checkpoint.root) .map_err(Error::FailedToReadBlock)? .ok_or(Error::MissingBlock(self.justified_checkpoint.root))? - .message; + .deconstruct() + .0; + // FIXME(altair): could remove clone with by-value `balances` accessor self.justified_balances = self .store - .get_state(&justified_block.state_root, Some(justified_block.slot)) + .get_state(&justified_block.state_root(), Some(justified_block.slot())) .map_err(Error::FailedToReadState)? - .ok_or(Error::MissingState(justified_block.state_root))? - .balances + .ok_or_else(|| Error::MissingState(justified_block.state_root()))? + .balances() + .clone() .into(); } diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index ba99debaa55..b9de6e9eba1 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,10 +1,9 @@ use serde_derive::Serialize; -use ssz_derive::{Decode, Encode}; use types::{beacon_state::CloneConfig, BeaconState, EthSpec, Hash256, SignedBeaconBlock}; /// Represents some block and its associated state. Generally, this will be used for tracking the /// head, justified head and finalized head. -#[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)] +#[derive(Clone, Serialize, PartialEq, Debug)] pub struct BeaconSnapshot { pub beacon_block: SignedBeaconBlock, pub beacon_block_root: Hash256, @@ -31,7 +30,7 @@ impl BeaconSnapshot { /// /// It is not strictly enforced that `root(self.beacon_state) == self.beacon_state_root()`. pub fn beacon_state_root(&self) -> Hash256 { - self.beacon_block.message.state_root + self.beacon_block.message().state_root() } /// Update all fields of the checkpoint. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 89a5d58fe70..a4ae722b9f0 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -71,8 +71,8 @@ use std::io::Write; use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; use tree_hash::TreeHash; use types::{ - BeaconBlock, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, - PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, + InconsistentFork, PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -219,6 +219,12 @@ pub enum BlockError { /// /// The block is invalid and the peer is faulty. WeakSubjectivityConflict, + /// The block has the wrong structure for the fork at `block.slot`. + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer is faulty. + InconsistentFork(InconsistentFork), } impl std::fmt::Display for BlockError { @@ -477,6 +483,11 @@ impl GossipVerifiedBlock { block: SignedBeaconBlock, chain: &BeaconChain, ) -> Result> { + // Ensure the block is the correct structure for the fork at `block.slot()`. + block + .fork_name(&chain.spec) + .map_err(BlockError::InconsistentFork)?; + // Do not gossip or process blocks from future slots. let present_slot_with_tolerance = chain .slot_clock @@ -492,7 +503,7 @@ impl GossipVerifiedBlock { let block_root = get_block_root(&block); // Do not gossip a block from a finalized slot. - check_block_against_finalized_slot(&block.message, chain)?; + check_block_against_finalized_slot(block.message(), chain)?; // Check if the block is already known. We know it is post-finalization, so it is // sufficient to check the fork choice. @@ -509,12 +520,12 @@ impl GossipVerifiedBlock { if chain .observed_block_producers .read() - .proposer_has_been_observed(&block.message) + .proposer_has_been_observed(block.message()) .map_err(|e| BlockError::BeaconChainError(e.into()))? { return Err(BlockError::RepeatProposal { - proposer: block.message.proposer_index, - slot: block.message.slot, + proposer: block.message().proposer_index(), + slot: block.slot(), }); } @@ -563,7 +574,7 @@ impl GossipVerifiedBlock { }; // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent_block.slot, &block.message)?; + check_block_skip_slots(chain, parent_block.slot, block.message())?; // We assign to a variable instead of using `if let Some` directly to ensure we drop the // write lock before trying to acquire it again in the `else` clause. @@ -607,17 +618,17 @@ impl GossipVerifiedBlock { block_epoch, proposer_shuffling_decision_block, proposers, - state.fork, + state.fork(), )?; - (proposer_index, state.fork, Some(parent), block) + (proposer_index, state.fork(), Some(parent), block) }; let signature_is_valid = { let pubkey_cache = get_validator_pubkey_cache(chain)?; let pubkey = pubkey_cache - .get(block.message.proposer_index as usize) - .ok_or(BlockError::UnknownValidator(block.message.proposer_index))?; + .get(block.message().proposer_index() as usize) + .ok_or_else(|| BlockError::UnknownValidator(block.message().proposer_index()))?; block.verify_signature( Some(block_root), pubkey, @@ -639,18 +650,18 @@ impl GossipVerifiedBlock { if chain .observed_block_producers .write() - .observe_proposer(&block.message) + .observe_proposer(block.message()) .map_err(|e| BlockError::BeaconChainError(e.into()))? { return Err(BlockError::RepeatProposal { - proposer: block.message.proposer_index, - slot: block.message.slot, + proposer: block.message().proposer_index(), + slot: block.slot(), }); } - if block.message.proposer_index != expected_proposer as u64 { + if block.message().proposer_index() != expected_proposer as u64 { return Err(BlockError::IncorrectBlockProposer { - block: block.message.proposer_index, + block: block.message().proposer_index(), local_shuffling: expected_proposer as u64, }); } @@ -693,10 +704,15 @@ impl SignatureVerifiedBlock { block_root: Hash256, chain: &BeaconChain, ) -> Result> { + // Ensure the block is the correct structure for the fork at `block.slot()`. + block + .fork_name(&chain.spec) + .map_err(BlockError::InconsistentFork)?; + let (mut parent, block) = load_parent(block, chain)?; // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent.beacon_block.slot(), &block.message)?; + check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; let state = cheap_state_advance_to_obtain_committees( &mut parent.pre_state, @@ -860,7 +876,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { } // Reject any block that exceeds our limit on skipped slots. - check_block_skip_slots(chain, parent.beacon_block.slot(), &block.message)?; + check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; /* * Perform cursory checks to see if the block is even worth processing. @@ -896,20 +912,20 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // Perform a sanity check on the pre-state. let parent_slot = parent.beacon_block.slot(); - if state.slot < parent_slot || state.slot > parent_slot + 1 { + if state.slot() < parent_slot || state.slot() > parent_slot + 1 { return Err(BeaconChainError::BadPreState { parent_root: parent.beacon_block_root, parent_slot, block_root, block_slot: block.slot(), - state_slot: state.slot, + state_slot: state.slot(), } .into()); } - let distance = block.slot().as_u64().saturating_sub(state.slot.as_u64()); + let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { - let state_root = if parent.beacon_block.slot() == state.slot { + let state_root = if parent.beacon_block.slot() == state.slot() { // If it happens that `pre_state` has *not* already been advanced forward a single // slot, then there is no need to compute the state root for this // `per_slot_processing` call since that state root is already stored in the parent @@ -935,7 +951,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { vec![] } else { vec![ - if state.slot % T::EthSpec::slots_per_epoch() == 0 { + if state.slot() % T::EthSpec::slots_per_epoch() == 0 { StoreOp::PutState(state_root, &state) } else { StoreOp::PutStateSummary( @@ -1070,14 +1086,14 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { fn check_block_skip_slots( chain: &BeaconChain, parent_slot: Slot, - block: &BeaconBlock, + block: BeaconBlockRef<'_, T::EthSpec>, ) -> Result<(), BlockError> { // Reject any block that exceeds our limit on skipped slots. if let Some(max_skip_slots) = chain.config.import_max_skip_slots { - if block.slot > parent_slot + max_skip_slots { + if block.slot() > parent_slot + max_skip_slots { return Err(BlockError::TooManySkippedSlots { parent_slot, - block_slot: block.slot, + block_slot: block.slot(), }); } } @@ -1090,7 +1106,7 @@ fn check_block_skip_slots( /// Returns an error if the block is earlier or equal to the finalized slot, or there was an error /// verifying that condition. fn check_block_against_finalized_slot( - block: &BeaconBlock, + block: BeaconBlockRef<'_, T::EthSpec>, chain: &BeaconChain, ) -> Result<(), BlockError> { let finalized_slot = chain @@ -1099,9 +1115,9 @@ fn check_block_against_finalized_slot( .epoch .start_slot(T::EthSpec::slots_per_epoch()); - if block.slot <= finalized_slot { + if block.slot() <= finalized_slot { Err(BlockError::WouldRevertFinalizedSlot { - block_slot: block.slot, + block_slot: block.slot(), finalized_slot, }) } else { @@ -1127,7 +1143,7 @@ pub fn check_block_is_finalized_descendant>(&block.parent_root()) + .block_exists(&block.parent_root()) .map_err(|e| BlockError::BeaconChainError(e.into()))? { Err(BlockError::NotFinalizedDescendant { @@ -1151,24 +1167,24 @@ pub fn check_block_relevancy( block_root: Option, chain: &BeaconChain, ) -> Result> { - let block = &signed_block.message; + let block = signed_block.message(); // Do not process blocks from the future. - if block.slot > chain.slot()? { + if block.slot() > chain.slot()? { return Err(BlockError::FutureSlot { present_slot: chain.slot()?, - block_slot: block.slot, + block_slot: block.slot(), }); } // Do not re-process the genesis block. - if block.slot == 0 { + if block.slot() == 0 { return Err(BlockError::GenesisBlock); } // This is an artificial (non-spec) restriction that provides some protection from overflow // abuses. - if block.slot >= MAXIMUM_BLOCK_SLOT_NUMBER { + if block.slot() >= MAXIMUM_BLOCK_SLOT_NUMBER { return Err(BlockError::BlockSlotLimitReached); } @@ -1209,7 +1225,7 @@ fn verify_parent_block_is_known( if let Some(proto_block) = chain .fork_choice .read() - .get_block(&block.message.parent_root) + .get_block(&block.message().parent_root()) { Ok((proto_block, block)) } else { @@ -1327,10 +1343,10 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( state.build_committee_cache(RelativeEpoch::Current, spec)?; Ok(Cow::Borrowed(state)) - } else if state.slot > block_slot { + } else if state.slot() > block_slot { Err(BlockError::BlockIsNotLaterThanParent { block_slot, - parent_slot: state.slot, + parent_slot: state.slot(), }) } else { let mut state = state.clone_with(CloneConfig::committee_caches_only()); @@ -1372,7 +1388,7 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>( move |validator_index| { // Disallow access to any validator pubkeys that are not in the current beacon // state. - if validator_index < state.validators.len() { + if validator_index < state.validators().len() { validator_pubkey_cache .get(validator_index) .map(|pk| Cow::Borrowed(pk)) @@ -1398,8 +1414,8 @@ fn verify_header_signature( let (fork, genesis_validators_root) = chain .with_head(|head| { Ok(( - head.beacon_state.fork, - head.beacon_state.genesis_validators_root, + head.beacon_state.fork(), + head.beacon_state.genesis_validators_root(), )) }) .map_err(|e: BlockError| e)?; @@ -1458,7 +1474,7 @@ fn participation_ratio(section: u64, total: u64) -> Option { fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { if WRITE_BLOCK_PROCESSING_SSZ { let root = state.tree_hash_root(); - let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot, root); + let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot(), root); let mut path = std::env::temp_dir().join("lighthouse"); let _ = fs::create_dir_all(path.clone()); path = path.join(filename); @@ -1479,7 +1495,7 @@ fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { fn write_block(block: &SignedBeaconBlock, root: Hash256, log: &Logger) { if WRITE_BLOCK_PROCESSING_SSZ { - let filename = format!("block_slot_{}_root{}.ssz", block.message.slot, root); + let filename = format!("block_slot_{}_root{}.ssz", block.slot(), root); let mut path = std::env::temp_dir().join("lighthouse"); let _ = fs::create_dir_all(path.clone()); path = path.join(filename); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 237e61414b6..efc6865b370 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -236,7 +236,7 @@ where .ok_or("Fork choice not found in store")?; let genesis_block = store - .get_item::>(&chain.genesis_block_root) + .get_block(&chain.genesis_block_root) .map_err(|e| format!("DB error when reading genesis block: {:?}", e))? .ok_or("Genesis block not found in store")?; let genesis_state = store @@ -244,7 +244,7 @@ where .map_err(|e| format!("DB error when reading genesis state: {:?}", e))? .ok_or("Genesis block not found in store")?; - self.genesis_time = Some(genesis_state.genesis_time); + self.genesis_time = Some(genesis_state.genesis_time()); self.op_pool = Some( store @@ -282,7 +282,7 @@ where .build_all_caches(&self.spec) .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; - let beacon_state_root = beacon_block.message.state_root; + let beacon_state_root = beacon_block.message().state_root(); let beacon_block_root = beacon_block.canonical_root(); self.genesis_state_root = Some(beacon_state_root); @@ -292,12 +292,12 @@ where .put_state(&beacon_state_root, &beacon_state) .map_err(|e| format!("Failed to store genesis state: {:?}", e))?; store - .put_item(&beacon_block_root, &beacon_block) + .put_block(&beacon_block_root, beacon_block.clone()) .map_err(|e| format!("Failed to store genesis block: {:?}", e))?; // Store the genesis block under the `ZERO_HASH` key. store - .put_item(&Hash256::zero(), &beacon_block) + .put_block(&Hash256::zero(), beacon_block.clone()) .map_err(|e| { format!( "Failed to store genesis block under 0x00..00 alias: {:?}", @@ -316,13 +316,13 @@ where let fork_choice = ForkChoice::from_genesis( fc_store, genesis.beacon_block_root, - &genesis.beacon_block.message, + &genesis.beacon_block.deconstruct().0, &genesis.beacon_state, ) .map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?; self.fork_choice = Some(fork_choice); - self.genesis_time = Some(genesis.beacon_state.genesis_time); + self.genesis_time = Some(genesis.beacon_state.genesis_time()); Ok(self.empty_op_pool()) } @@ -435,7 +435,7 @@ where .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; let head_block = store - .get_item::>(&head_block_root) + .get_block(&head_block_root) .map_err(|e| format!("DB error when reading head block: {:?}", e))? .ok_or("Head block not found in store")?; let head_state_root = head_block.state_root(); @@ -460,7 +460,7 @@ where // // This is a sanity check to detect database corruption. let fc_finalized = fork_choice.finalized_checkpoint(); - let head_finalized = canonical_head.beacon_state.finalized_checkpoint; + let head_finalized = canonical_head.beacon_state.finalized_checkpoint(); if fc_finalized != head_finalized { if head_finalized.root == Hash256::zero() && head_finalized.epoch == fc_finalized.epoch @@ -518,7 +518,7 @@ where observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, - genesis_validators_root: canonical_head.beacon_state.genesis_validators_root, + genesis_validators_root: canonical_head.beacon_state.genesis_validators_root(), canonical_head: TimeoutRwLock::new(canonical_head.clone()), genesis_block_root, genesis_state_root, @@ -558,7 +558,7 @@ where "Weak subjectivity checkpoint verification failed on startup!"; "head_block_root" => format!("{}", head.beacon_block_root), "head_slot" => format!("{}", head.beacon_block.slot()), - "finalized_epoch" => format!("{}", head.beacon_state.finalized_checkpoint.epoch), + "finalized_epoch" => format!("{}", head.beacon_state.finalized_checkpoint().epoch), "wss_checkpoint_epoch" => format!("{}", wss_checkpoint.epoch), "error" => format!("{:?}", e), ); @@ -640,16 +640,17 @@ fn genesis_block( genesis_state: &mut BeaconState, spec: &ChainSpec, ) -> Result, String> { - let mut genesis_block = SignedBeaconBlock { - message: BeaconBlock::empty(&spec), - // Empty signature, which should NEVER be read. This isn't to-spec, but makes the genesis - // block consistent with every other block. - signature: Signature::empty(), - }; - genesis_block.message.state_root = genesis_state + let mut genesis_block = BeaconBlock::empty(&spec); + *genesis_block.state_root_mut() = genesis_state .update_tree_hash_cache() .map_err(|e| format!("Error hashing genesis state: {:?}", e))?; - Ok(genesis_block) + + Ok(SignedBeaconBlock::from_block( + genesis_block, + // Empty signature, which should NEVER be read. This isn't to-spec, but makes the genesis + // block consistent with every other block. + Signature::empty(), + )) } #[cfg(not(debug_assertions))] @@ -714,9 +715,10 @@ mod test { let state = head.beacon_state; let block = head.beacon_block; - assert_eq!(state.slot, Slot::new(0), "should start from genesis"); + assert_eq!(state.slot(), Slot::new(0), "should start from genesis"); assert_eq!( - state.genesis_time, 13_371_337, + state.genesis_time(), + 13_371_337, "should have the correct genesis time" ); assert_eq!( @@ -734,7 +736,7 @@ mod test { "should store genesis block under zero hash alias" ); assert_eq!( - state.validators.len(), + state.validators().len(), validator_count, "should have correct validator count" ); @@ -757,24 +759,25 @@ mod test { .expect("should build state"); assert_eq!( - state.eth1_data.block_hash, + state.eth1_data().block_hash, Hash256::from_slice(&[0x42; 32]), "eth1 block hash should be co-ordinated junk" ); assert_eq!( - state.genesis_time, genesis_time, + state.genesis_time(), + genesis_time, "genesis time should be as specified" ); - for b in &state.balances { + for b in state.balances() { assert_eq!( *b, spec.max_effective_balance, "validator balances should be max effective balance" ); } - for v in &state.validators { + for v in state.validators() { let creds = v.withdrawal_credentials.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, @@ -788,13 +791,13 @@ mod test { } assert_eq!( - state.balances.len(), + state.balances().len(), validator_count, "validator balances len should be correct" ); assert_eq!( - state.validators.len(), + state.validators().len(), validator_count, "validator count should be correct" ); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index f054796b741..f8c12e3c870 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -2,6 +2,7 @@ use crate::metrics; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2::lighthouse::Eth1SyncStatusData; use eth2_hashing::hash; +use int_to_bytes::int_to_bytes32; use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -364,7 +365,7 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { Ok(Eth1Data { deposit_root: Hash256::from_slice(&deposit_root), - deposit_count: state.eth1_deposit_index, + deposit_count: state.eth1_deposit_index(), block_hash: Hash256::from_slice(&block_hash), }) } @@ -451,9 +452,9 @@ impl CachingEth1Backend { impl Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { let period = T::SlotsPerEth1VotingPeriod::to_u64(); - let voting_period_start_slot = (state.slot / period) * period; + let voting_period_start_slot = (state.slot() / period) * period; let voting_period_start_seconds = slot_start_seconds::( - state.genesis_time, + state.genesis_time(), spec.seconds_per_slot, voting_period_start_slot, ); @@ -491,13 +492,13 @@ impl Eth1ChainBackend for CachingEth1Backend { vote }) .unwrap_or_else(|| { - let vote = state.eth1_data.clone(); + let vote = state.eth1_data().clone(); error!( self.log, "No valid eth1_data votes, `votes_to_consider` empty"; "lowest_block_number" => self.core.lowest_block_number(), "earliest_block_timestamp" => self.core.earliest_block_timestamp(), - "genesis_time" => state.genesis_time, + "genesis_time" => state.genesis_time(), "outcome" => "casting `state.eth1_data` as eth1 vote" ); metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES); @@ -522,11 +523,11 @@ impl Eth1ChainBackend for CachingEth1Backend { eth1_data_vote: &Eth1Data, _spec: &ChainSpec, ) -> Result, Error> { - let deposit_index = state.eth1_deposit_index; + let deposit_index = state.eth1_deposit_index(); let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? { new_eth1_data.deposit_count } else { - state.eth1_data.deposit_count + state.eth1_data().deposit_count }; match deposit_index.cmp(&deposit_count) { @@ -609,7 +610,7 @@ fn collect_valid_votes( ) -> Eth1DataVoteCount { let mut valid_votes = HashMap::new(); state - .eth1_data_votes + .eth1_data_votes() .iter() .filter_map(|vote| { votes_to_consider @@ -633,13 +634,6 @@ fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { .map(|((eth1_data, _), _)| eth1_data.clone()) } -/// Returns `int` as little-endian bytes with a length of 32. -fn int_to_bytes32(int: u64) -> Vec { - let mut vec = int.to_le_bytes().to_vec(); - vec.resize(32, 0); - vec -} - /// Returns the unix-epoch seconds at the start of the given `slot`. fn slot_start_seconds( genesis_unix_seconds: u64, @@ -666,7 +660,7 @@ fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) -> mod test { use super::*; use environment::null_logger; - use types::{test_utils::DepositTestTask, MinimalEthSpec}; + use types::{DepositData, MinimalEthSpec, Signature}; type E = MinimalEthSpec; @@ -680,9 +674,9 @@ mod test { fn get_voting_period_start_seconds(state: &BeaconState, spec: &ChainSpec) -> u64 { let period = ::SlotsPerEth1VotingPeriod::to_u64(); - let voting_period_start_slot = (state.slot / period) * period; + let voting_period_start_slot = (state.slot() / period) * period; slot_start_seconds::( - state.genesis_time, + state.genesis_time(), spec.seconds_per_slot, voting_period_start_slot, ) @@ -723,10 +717,7 @@ mod test { mod eth1_chain_json_backend { use super::*; use eth1::DepositLog; - use types::{ - test_utils::{generate_deterministic_keypair, TestingDepositBuilder}, - EthSpec, MainnetEthSpec, - }; + use types::{test_utils::generate_deterministic_keypair, EthSpec, MainnetEthSpec}; fn get_eth1_chain() -> Eth1Chain, E> { let eth1_config = Eth1Config { @@ -743,13 +734,17 @@ mod test { fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog { let keypair = generate_deterministic_keypair(i as usize); - let mut builder = - TestingDepositBuilder::new(keypair.pk.clone(), spec.max_effective_balance); - builder.sign(DepositTestTask::Valid, &keypair, spec); - let deposit_data = builder.build().data; + let mut deposit = DepositData { + pubkey: keypair.pk.into(), + withdrawal_credentials: Hash256::zero(), + amount: spec.max_effective_balance, + signature: Signature::empty().into(), + }; + + deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); DepositLog { - deposit_data, + deposit_data: deposit, block_number: i, index: i, signature_is_valid: true, @@ -768,8 +763,8 @@ mod test { ); let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); - state.eth1_deposit_index = 0; - state.eth1_data.deposit_count = 0; + *state.eth1_deposit_index_mut() = 0; + state.eth1_data_mut().deposit_count = 0; assert!( eth1_chain @@ -778,7 +773,7 @@ mod test { "should succeed if cache is empty but no deposits are required" ); - state.eth1_data.deposit_count = 1; + state.eth1_data_mut().deposit_count = 1; assert!( eth1_chain @@ -821,8 +816,8 @@ mod test { ); let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); - state.eth1_deposit_index = 0; - state.eth1_data.deposit_count = 0; + *state.eth1_deposit_index_mut() = 0; + state.eth1_data_mut().deposit_count = 0; assert!( eth1_chain @@ -832,10 +827,10 @@ mod test { ); (0..3).for_each(|initial_deposit_index| { - state.eth1_deposit_index = initial_deposit_index as u64; + *state.eth1_deposit_index_mut() = initial_deposit_index as u64; (initial_deposit_index..deposits.len()).for_each(|i| { - state.eth1_data.deposit_count = i as u64; + state.eth1_data_mut().deposit_count = i as u64; let deposits_for_inclusion = eth1_chain .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) @@ -888,7 +883,8 @@ mod test { .eth1_data_for_block_production(&state, &spec) .expect("should produce default eth1 data vote"); assert_eq!( - a, state.eth1_data, + a, + *state.eth1_data(), "default vote should be same as state.eth1_data" ); } @@ -908,7 +904,7 @@ mod test { let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); - state.slot = Slot::from(slots_per_eth1_voting_period * 10); + *state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10); let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; let voting_period_start = get_voting_period_start_seconds(&state, &spec); let start_eth1_block = voting_period_start - follow_distance_seconds * 2; @@ -974,8 +970,8 @@ mod test { let eth1_follow_distance = spec.eth1_follow_distance; let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); - state.genesis_time = 0; - state.slot = Slot::from(slots_per_eth1_voting_period * 10); + *state.genesis_time_mut() = 0; + *state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10); let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; let voting_period_start = get_voting_period_start_seconds(&state, &spec); @@ -1055,7 +1051,7 @@ mod test { let votes_to_consider = get_eth1_data_vec(slots, 0); - state.eth1_data_votes = votes_to_consider[0..slots as usize / 4] + *state.eth1_data_votes_mut() = votes_to_consider[0..slots as usize / 4] .iter() .map(|(eth1_data, _)| eth1_data) .cloned() @@ -1084,7 +1080,7 @@ mod test { .expect("should have some eth1 data") .clone(); - state.eth1_data_votes = vec![duplicate_eth1_data.clone(); 4] + *state.eth1_data_votes_mut() = vec![duplicate_eth1_data.clone(); 4] .iter() .map(|(eth1_data, _)| eth1_data) .cloned() diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 4a4ce2fe572..84c800f3b71 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -112,14 +112,14 @@ mod test { let mut block: BeaconBlock = BeaconBlock::empty(spec); let block_root = Hash256::from_low_u64_be(i); - block.slot = Slot::new(i); - block.parent_root = if i == 0 { + *block.slot_mut() = Slot::new(i); + *block.parent_root_mut() = if i == 0 { Hash256::random() } else { Hash256::from_low_u64_be(i - 1) }; - head_tracker.register_block(block_root, block.parent_root, block.slot); + head_tracker.register_block(block_root, block.parent_root(), block.slot()); } assert_eq!( @@ -130,9 +130,9 @@ mod test { let mut block: BeaconBlock = BeaconBlock::empty(spec); let block_root = Hash256::from_low_u64_be(42); - block.slot = Slot::new(15); - block.parent_root = Hash256::from_low_u64_be(14); - head_tracker.register_block(block_root, block.parent_root, block.slot); + *block.slot_mut() = Slot::new(15); + *block.parent_root_mut() = Hash256::from_low_u64_be(14); + head_tracker.register_block(block_root, block.parent_root(), block.slot()); let heads = head_tracker.heads(); diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 10eb0c7f477..fd79c0e6666 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -682,42 +682,53 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { /// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`. fn scrape_head_state(state: &BeaconState, state_root: Hash256) { - set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot); + set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot()); set_gauge_by_hash(&HEAD_STATE_ROOT, state_root); set_gauge_by_slot( &HEAD_STATE_LATEST_BLOCK_SLOT, - state.latest_block_header.slot, + state.latest_block_header().slot, ); set_gauge_by_hash( &HEAD_STATE_CURRENT_JUSTIFIED_ROOT, - state.current_justified_checkpoint.root, + state.current_justified_checkpoint().root, ); set_gauge_by_epoch( &HEAD_STATE_CURRENT_JUSTIFIED_EPOCH, - state.current_justified_checkpoint.epoch, + state.current_justified_checkpoint().epoch, ); set_gauge_by_hash( &HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT, - state.previous_justified_checkpoint.root, + state.previous_justified_checkpoint().root, ); set_gauge_by_epoch( &HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH, - state.previous_justified_checkpoint.epoch, + state.previous_justified_checkpoint().epoch, + ); + set_gauge_by_hash( + &HEAD_STATE_FINALIZED_ROOT, + state.finalized_checkpoint().root, ); - set_gauge_by_hash(&HEAD_STATE_FINALIZED_ROOT, state.finalized_checkpoint.root); set_gauge_by_epoch( &HEAD_STATE_FINALIZED_EPOCH, - state.finalized_checkpoint.epoch, + state.finalized_checkpoint().epoch, + ); + set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators().len()); + set_gauge_by_u64( + &HEAD_STATE_VALIDATOR_BALANCES, + state.balances().iter().sum(), + ); + set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index()); + set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators().len()); + set_gauge_by_u64( + &HEAD_STATE_VALIDATOR_BALANCES, + state.balances().iter().sum(), ); - set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index); - set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len()); - set_gauge_by_u64(&HEAD_STATE_VALIDATOR_BALANCES, state.balances.iter().sum()); let mut num_active: usize = 0; let mut num_slashed: usize = 0; let mut num_withdrawn: usize = 0; - for v in &state.validators { + for v in state.validators() { if v.is_active_at(state.current_epoch()) { num_active += 1; } diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 92f8efe435f..5a599586cb7 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -284,9 +284,9 @@ impl, Cold: ItemStore> BackgroundMigrator ObservedBlockProducers { /// /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. - pub fn observe_proposer(&mut self, block: &BeaconBlock) -> Result { + pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result { self.sanitize_block(block)?; let did_not_exist = self .items - .entry(block.slot) + .entry(block.slot()) .or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize())) - .insert(block.proposer_index); + .insert(block.proposer_index()); Ok(!did_not_exist) } @@ -72,27 +72,27 @@ impl ObservedBlockProducers { /// /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. - pub fn proposer_has_been_observed(&self, block: &BeaconBlock) -> Result { + pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result { self.sanitize_block(block)?; let exists = self .items - .get(&block.slot) - .map_or(false, |set| set.contains(&block.proposer_index)); + .get(&block.slot()) + .map_or(false, |set| set.contains(&block.proposer_index())); Ok(exists) } /// Returns `Ok(())` if the given `block` is sane. - fn sanitize_block(&self, block: &BeaconBlock) -> Result<(), Error> { - if block.proposer_index > E::ValidatorRegistryLimit::to_u64() { - return Err(Error::ValidatorIndexTooHigh(block.proposer_index)); + fn sanitize_block(&self, block: BeaconBlockRef<'_, E>) -> Result<(), Error> { + if block.proposer_index() >= E::ValidatorRegistryLimit::to_u64() { + return Err(Error::ValidatorIndexTooHigh(block.proposer_index())); } let finalized_slot = self.finalized_slot; - if finalized_slot > 0 && block.slot <= finalized_slot { + if finalized_slot > 0 && block.slot() <= finalized_slot { return Err(Error::FinalizedBlock { - slot: block.slot, + slot: block.slot(), finalized_slot, }); } @@ -119,14 +119,14 @@ impl ObservedBlockProducers { #[cfg(test)] mod tests { use super::*; - use types::MainnetEthSpec; + use types::{BeaconBlock, MainnetEthSpec}; type E = MainnetEthSpec; fn get_block(slot: u64, proposer: u64) -> BeaconBlock { let mut block = BeaconBlock::empty(&E::default_spec()); - block.slot = slot.into(); - block.proposer_index = proposer; + *block.slot_mut() = slot.into(); + *block.proposer_index_mut() = proposer; block } @@ -138,10 +138,10 @@ mod tests { assert_eq!(cache.items.len(), 0, "no slots should be present"); // Slot 0, proposer 0 - let block_a = &get_block(0, 0); + let block_a = get_block(0, 0); assert_eq!( - cache.observe_proposer(block_a), + cache.observe_proposer(block_a.to_ref()), Ok(false), "can observe proposer, indicates proposer unobserved" ); @@ -197,10 +197,10 @@ mod tests { */ // First slot of finalized epoch, proposer 0 - let block_b = &get_block(E::slots_per_epoch(), 0); + let block_b = get_block(E::slots_per_epoch(), 0); assert_eq!( - cache.observe_proposer(block_b), + cache.observe_proposer(block_b.to_ref()), Err(Error::FinalizedBlock { slot: E::slots_per_epoch().into(), finalized_slot: E::slots_per_epoch().into(), @@ -217,10 +217,10 @@ mod tests { let three_epochs = E::slots_per_epoch() * 3; // First slot of finalized epoch, proposer 0 - let block_b = &get_block(three_epochs, 0); + let block_b = get_block(three_epochs, 0); assert_eq!( - cache.observe_proposer(block_b), + cache.observe_proposer(block_b.to_ref()), Ok(false), "can insert non-finalized block" ); @@ -266,25 +266,25 @@ mod tests { let mut cache = ObservedBlockProducers::default(); // Slot 0, proposer 0 - let block_a = &get_block(0, 0); + let block_a = get_block(0, 0); assert_eq!( - cache.proposer_has_been_observed(block_a), + cache.proposer_has_been_observed(block_a.to_ref()), Ok(false), "no observation in empty cache" ); assert_eq!( - cache.observe_proposer(block_a), + cache.observe_proposer(block_a.to_ref()), Ok(false), "can observe proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_a), + cache.proposer_has_been_observed(block_a.to_ref()), Ok(true), "observed block is indicated as true" ); assert_eq!( - cache.observe_proposer(block_a), + cache.observe_proposer(block_a.to_ref()), Ok(true), "observing again indicates true" ); @@ -302,25 +302,25 @@ mod tests { ); // Slot 1, proposer 0 - let block_b = &get_block(1, 0); + let block_b = get_block(1, 0); assert_eq!( - cache.proposer_has_been_observed(block_b), + cache.proposer_has_been_observed(block_b.to_ref()), Ok(false), "no observation for new slot" ); assert_eq!( - cache.observe_proposer(block_b), + cache.observe_proposer(block_b.to_ref()), Ok(false), "can observe proposer for new slot, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_b), + cache.proposer_has_been_observed(block_b.to_ref()), Ok(true), "observed block in slot 1 is indicated as true" ); assert_eq!( - cache.observe_proposer(block_b), + cache.observe_proposer(block_b.to_ref()), Ok(true), "observing slot 1 again indicates true" ); @@ -347,25 +347,25 @@ mod tests { ); // Slot 0, proposer 1 - let block_c = &get_block(0, 1); + let block_c = get_block(0, 1); assert_eq!( - cache.proposer_has_been_observed(block_c), + cache.proposer_has_been_observed(block_c.to_ref()), Ok(false), "no observation for new proposer" ); assert_eq!( - cache.observe_proposer(block_c), + cache.observe_proposer(block_c.to_ref()), Ok(false), "can observe new proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_has_been_observed(block_c), + cache.proposer_has_been_observed(block_c.to_ref()), Ok(true), "observed new proposer block is indicated as true" ); assert_eq!( - cache.observe_proposer(block_c), + cache.observe_proposer(block_c.to_ref()), Ok(true), "observing new proposer again indicates true" ); diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 435e4b6f8d6..b386c22c29a 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -161,7 +161,7 @@ impl SnapshotCache { .enumerate() .filter_map(|(i, snapshot)| { if snapshot.beacon_block_root != self.head_block_root { - Some((i, snapshot.beacon_state.slot)) + Some((i, snapshot.beacon_state.slot())) } else { None } @@ -263,7 +263,7 @@ impl SnapshotCache { /// Removes all snapshots from the queue that are less than or equal to the finalized epoch. pub fn prune(&mut self, finalized_epoch: Epoch) { self.snapshots.retain(|snapshot| { - snapshot.beacon_state.slot > finalized_epoch.start_slot(T::slots_per_epoch()) + snapshot.beacon_state.slot() > finalized_epoch.start_slot(T::slots_per_epoch()) }) } @@ -279,27 +279,43 @@ impl SnapshotCache { #[cfg(test)] mod test { use super::*; + use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use store::StoreConfig; use types::{ - test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder}, - BeaconBlock, Epoch, MainnetEthSpec, SignedBeaconBlock, Slot, + test_utils::generate_deterministic_keypair, BeaconBlock, Epoch, MainnetEthSpec, + SignedBeaconBlock, Slot, }; + fn get_harness() -> BeaconChainHarness> { + let harness = BeaconChainHarness::new_with_store_config( + MainnetEthSpec, + None, + types::test_utils::generate_deterministic_keypairs(1), + StoreConfig::default(), + ); + + harness.advance_slot(); + + harness + } + const CACHE_SIZE: usize = 4; fn get_snapshot(i: u64) -> BeaconSnapshot { let spec = MainnetEthSpec::default_spec(); - let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(1, &spec); - let (beacon_state, _keypairs) = state_builder.build(); + let beacon_state = get_harness().chain.head_beacon_state().unwrap(); + + let signed_beacon_block = SignedBeaconBlock::from_block( + BeaconBlock::empty(&spec), + generate_deterministic_keypair(0) + .sk + .sign(Hash256::from_low_u64_be(42)), + ); BeaconSnapshot { beacon_state, - beacon_block: SignedBeaconBlock { - message: BeaconBlock::empty(&spec), - signature: generate_deterministic_keypair(0) - .sk - .sign(Hash256::from_low_u64_be(42)), - }, + beacon_block: signed_beacon_block, beacon_block_root: Hash256::from_low_u64_be(i), } } @@ -319,7 +335,8 @@ mod test { let mut snapshot = get_snapshot(i); // Each snapshot should be one slot into an epoch, with each snapshot one epoch apart. - snapshot.beacon_state.slot = Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1); + *snapshot.beacon_state.slot_mut() = + Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1); cache.insert(snapshot, None); @@ -352,20 +369,20 @@ mod test { .get_cloned(Hash256::from_low_u64_be(1), CloneConfig::none()) .is_none()); - assert!( + assert_eq!( cache .get_cloned(Hash256::from_low_u64_be(0), CloneConfig::none()) .expect("the head should still be in the cache") - .beacon_block_root - == Hash256::from_low_u64_be(0), + .beacon_block_root, + Hash256::from_low_u64_be(0), "get_cloned should get the correct snapshot" ); - assert!( + assert_eq!( cache .get_state_for_block_processing(Hash256::from_low_u64_be(0)) .expect("the head should still be in the cache") - .beacon_block_root - == Hash256::from_low_u64_be(0), + .beacon_block_root, + Hash256::from_low_u64_be(0), "get_state_for_block_processing should get the correct snapshot" ); @@ -392,12 +409,12 @@ mod test { } // Ensure that the new head value was not removed from the cache. - assert!( + assert_eq!( cache .get_state_for_block_processing(Hash256::from_low_u64_be(2)) .expect("the new head should still be in the cache") - .beacon_block_root - == Hash256::from_low_u64_be(2), + .beacon_block_root, + Hash256::from_low_u64_be(2), "get_state_for_block_processing should get the correct snapshot" ); } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index d8603570bcb..eea329a2a3d 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -213,10 +213,10 @@ fn advance_head( } => (block_slot, state_root, *state), }; - let initial_slot = state.slot; + let initial_slot = state.slot(); let initial_epoch = state.current_epoch(); - let state_root = if state.slot == head_slot { + let state_root = if state.slot() == head_slot { Some(head_state_root) } else { // Protect against advancing a state more than a single slot. @@ -225,7 +225,7 @@ fn advance_head( // database. Future works might store temporary, intermediate states inside this function. return Err(Error::BadStateSlot { block_slot: head_slot, - state_slot: state.slot, + state_slot: state.slot(), }); }; @@ -249,7 +249,7 @@ fn advance_head( log, "Advanced head state one slot"; "head_root" => ?head_root, - "state_slot" => state.slot, + "state_slot" => state.slot(), "current_slot" => current_slot, ); @@ -278,7 +278,7 @@ fn advance_head( state .get_beacon_proposer_indices(&beacon_chain.spec) .map_err(BeaconChainError::from)?, - state.fork, + state.fork(), ) .map_err(BeaconChainError::from)?; @@ -304,7 +304,7 @@ fn advance_head( ); } - let final_slot = state.slot; + let final_slot = state.slot(); // Insert the advanced state back into the snapshot cache. beacon_chain diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8f89fb63f52..4f213821f5d 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -10,8 +10,11 @@ use crate::{ BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, StateSkipConfig, }; +use bls::get_withdrawal_credentials; use futures::channel::mpsc::Receiver; use genesis::interop_genesis_state; +use int_to_bytes::int_to_bytes32; +use merkle_proof::MerkleTree; use parking_lot::Mutex; use rand::rngs::StdRng; use rand::Rng; @@ -29,11 +32,12 @@ use task_executor::ShutdownReason; use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; use types::{ - AggregateSignature, Attestation, AttestationData, AttesterSlashing, BeaconState, - BeaconStateHash, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, Graffiti, Hash256, - IndexedAttestation, Keypair, ProposerSlashing, SelectionProof, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, - VariableList, VoluntaryExit, + typenum::U4294967296, AggregateSignature, Attestation, AttestationData, AttesterSlashing, + BeaconBlock, BeaconState, BeaconStateHash, ChainSpec, Checkpoint, Deposit, DepositData, Domain, + Epoch, EthSpec, ForkName, Graffiti, Hash256, IndexedAttestation, Keypair, ProposerSlashing, + PublicKeyBytes, SelectionProof, SignatureBytes, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, VariableList, + VoluntaryExit, }; pub use types::test_utils::generate_deterministic_keypairs; @@ -42,6 +46,8 @@ pub use types::test_utils::generate_deterministic_keypairs; pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // This parameter is required by a builder but not used because we use the `TestingSlotClock`. pub const HARNESS_SLOT_TIME: Duration = Duration::from_secs(1); +// Environment variable to read if `fork_from_env` feature is enabled. +const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; pub type BaseHarnessType = Witness, TEthSpec, THotStore, TColdStore>; @@ -106,6 +112,29 @@ pub fn test_logger() -> Logger { } } +/// Return a `ChainSpec` suitable for test usage. +/// +/// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment +/// variable. Otherwise use the default spec. +pub fn test_spec() -> ChainSpec { + if cfg!(feature = "fork_from_env") { + let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { + panic!( + "{} env var must be defined when using fork_from_env: {:?}", + FORK_NAME_ENV_VAR, e + ) + }); + let fork = match fork_name.as_str() { + "base" => ForkName::Base, + "altair" => ForkName::Altair, + other => panic!("unknown FORK_NAME: {}", other), + }; + fork.make_genesis_spec(E::default_spec()) + } else { + E::default_spec() + } +} + /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and /// attestations. /// @@ -121,15 +150,20 @@ pub struct BeaconChainHarness { pub rng: Mutex, } -type HarnessAttestations = Vec<( +pub type HarnessAttestations = Vec<( Vec<(Attestation, SubnetId)>, Option>, )>; impl BeaconChainHarness> { - pub fn new(eth_spec_instance: E, validator_keypairs: Vec) -> Self { + pub fn new( + eth_spec_instance: E, + spec: Option, + validator_keypairs: Vec, + ) -> Self { Self::new_with_store_config( eth_spec_instance, + spec, validator_keypairs, StoreConfig::default(), ) @@ -137,6 +171,7 @@ impl BeaconChainHarness> { pub fn new_with_store_config( eth_spec_instance: E, + spec: Option, validator_keypairs: Vec, config: StoreConfig, ) -> Self { @@ -144,18 +179,26 @@ impl BeaconChainHarness> { // committee are required to produce an aggregate. This is overkill, however with small // validator counts it's the only way to be certain there is _at least one_ aggregator per // committee. - Self::new_with_target_aggregators(eth_spec_instance, validator_keypairs, 1 << 32, config) + Self::new_with_target_aggregators( + eth_spec_instance, + spec, + validator_keypairs, + 1 << 32, + config, + ) } /// Instantiate a new harness with a custom `target_aggregators_per_committee` spec value pub fn new_with_target_aggregators( eth_spec_instance: E, + spec: Option, validator_keypairs: Vec, target_aggregators_per_committee: u64, store_config: StoreConfig, ) -> Self { Self::new_with_chain_config( eth_spec_instance, + spec, validator_keypairs, target_aggregators_per_committee, store_config, @@ -167,13 +210,14 @@ impl BeaconChainHarness> { /// `target_aggregators_per_committee` spec value, and a `ChainConfig` pub fn new_with_chain_config( eth_spec_instance: E, + spec: Option, validator_keypairs: Vec, target_aggregators_per_committee: u64, store_config: StoreConfig, chain_config: ChainConfig, ) -> Self { let data_dir = tempdir().expect("should create temporary data_dir"); - let mut spec = E::default_spec(); + let mut spec = spec.unwrap_or_else(test_spec::); spec.target_aggregators_per_committee = target_aggregators_per_committee; @@ -221,11 +265,12 @@ impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. pub fn new_with_disk_store( eth_spec_instance: E, + spec: Option, store: Arc, LevelDB>>, validator_keypairs: Vec, ) -> Self { let data_dir = tempdir().expect("should create temporary data_dir"); - let spec = E::default_spec(); + let spec = spec.unwrap_or_else(test_spec::); let log = test_logger(); let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); @@ -265,11 +310,12 @@ impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. pub fn resume_from_disk_store( eth_spec_instance: E, + spec: Option, store: Arc, LevelDB>>, validator_keypairs: Vec, data_dir: TempDir, ) -> Self { - let spec = E::default_spec(); + let spec = spec.unwrap_or_else(test_spec::); let log = test_logger(); let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); @@ -379,7 +425,7 @@ where slot: Slot, ) -> (SignedBeaconBlock, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); - assert!(slot >= state.slot); + assert!(slot >= state.slot()); complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); @@ -400,8 +446,8 @@ where let domain = self.spec.get_domain( epoch, Domain::Randao, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), ); let message = epoch.signing_root(domain); let sk = &self.validator_keypairs[proposer_index].sk; @@ -415,14 +461,68 @@ where let signed_block = block.sign( &self.validator_keypairs[proposer_index].sk, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), &self.spec, ); (signed_block, state) } + /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after + /// caches are built but before the generated block is processed. + pub fn make_block_return_pre_state( + &self, + mut state: BeaconState, + slot: Slot, + ) -> (SignedBeaconBlock, BeaconState) { + assert_ne!(slot, 0, "can't produce a block at slot 0"); + assert!(slot >= state.slot()); + + complete_state_advance(&mut state, None, slot, &self.spec) + .expect("should be able to advance state to slot"); + + state + .build_all_caches(&self.spec) + .expect("should build caches"); + + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); + + // If we produce two blocks for the same slot, they hash up to the same value and + // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce + // different blocks each time. + let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); + + let randao_reveal = { + let epoch = slot.epoch(E::slots_per_epoch()); + let domain = self.spec.get_domain( + epoch, + Domain::Randao, + &state.fork(), + state.genesis_validators_root(), + ); + let message = epoch.signing_root(domain); + let sk = &self.validator_keypairs[proposer_index].sk; + sk.sign(message) + }; + + let pre_state = state.clone(); + + let (block, state) = self + .chain + .produce_block_on_state(state, None, slot, randao_reveal, Some(graffiti)) + .unwrap(); + + let signed_block = block.sign( + &self.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + + (signed_block, pre_state) + } + /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is @@ -436,7 +536,7 @@ where head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, ) -> Vec, SubnetId)>> { - let committee_count = state.get_committee_count_at_slot(state.slot).unwrap(); + let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); state .get_beacon_committees_at_slot(attestation_slot) @@ -467,8 +567,8 @@ where let domain = self.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), ); let message = attestation.data.signing_root(domain); @@ -540,65 +640,71 @@ where slot, ); - let aggregated_attestations: Vec>> = unaggregated_attestations - .iter() - .map(|committee_attestations| { - // If there are any attestations in this committee, create an aggregate. - if let Some((attestation, _)) = committee_attestations.first() { - let bc = state.get_beacon_committee(attestation.data.slot, attestation.data.index) - .unwrap(); - - let aggregator_index = bc.committee - .iter() - .find(|&validator_index| { - if !attesting_validators.contains(validator_index) { - return false - } - - let selection_proof = SelectionProof::new::( - state.slot, - &self.validator_keypairs[*validator_index].sk, - &state.fork, - state.genesis_validators_root, - &self.spec, - ); + let aggregated_attestations: Vec>> = + unaggregated_attestations + .iter() + .map(|committee_attestations| { + // If there are any attestations in this committee, create an aggregate. + if let Some((attestation, _)) = committee_attestations.first() { + let bc = state + .get_beacon_committee(attestation.data.slot, attestation.data.index) + .unwrap(); - selection_proof.is_aggregator(bc.committee.len(), &self.spec).unwrap_or(false) - }) - .copied() - .unwrap_or_else(|| panic!( - "Committee {} at slot {} with {} attesting validators does not have any aggregators", - bc.index, state.slot, bc.committee.len() - )); - - // If the chain is able to produce an aggregate, use that. Otherwise, build an - // aggregate locally. - let aggregate = self - .chain - .get_aggregated_attestation(&attestation.data) - .unwrap_or_else(|| { - committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| { - agg.aggregate(att); - agg + // Find an aggregator if one exists. Return `None` if there are no + // aggregators. + let aggregator_index = bc + .committee + .iter() + .find(|&validator_index| { + if !attesting_validators.contains(validator_index) { + return false; + } + + let selection_proof = SelectionProof::new::( + state.slot(), + &self.validator_keypairs[*validator_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + + selection_proof + .is_aggregator(bc.committee.len(), &self.spec) + .unwrap_or(false) }) - }); - - let signed_aggregate = SignedAggregateAndProof::from_aggregate( - aggregator_index as u64, - aggregate, - None, - &self.validator_keypairs[aggregator_index].sk, - &state.fork, - state.genesis_validators_root, - &self.spec, - ); + .copied()?; - Some(signed_aggregate) - } - else { - None - } - }).collect(); + // If the chain is able to produce an aggregate, use that. Otherwise, build an + // aggregate locally. + let aggregate = self + .chain + .get_aggregated_attestation(&attestation.data) + .unwrap_or_else(|| { + committee_attestations.iter().skip(1).fold( + attestation.clone(), + |mut agg, (att, _)| { + agg.aggregate(att); + agg + }, + ) + }); + + let signed_aggregate = SignedAggregateAndProof::from_aggregate( + aggregator_index as u64, + aggregate, + None, + &self.validator_keypairs[aggregator_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + + Some(signed_aggregate) + } else { + None + } + }) + .collect(); unaggregated_attestations .into_iter() @@ -653,12 +759,70 @@ where } } + pub fn make_attester_slashing_different_indices( + &self, + validator_indices_1: Vec, + validator_indices_2: Vec, + ) -> AttesterSlashing { + let data = AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + target: Checkpoint { + root: Hash256::zero(), + epoch: Epoch::new(0), + }, + source: Checkpoint { + root: Hash256::zero(), + epoch: Epoch::new(0), + }, + }; + + let mut attestation_1 = IndexedAttestation { + attesting_indices: VariableList::new(validator_indices_1).unwrap(), + data: data.clone(), + signature: AggregateSignature::infinity(), + }; + + let mut attestation_2 = IndexedAttestation { + attesting_indices: VariableList::new(validator_indices_2).unwrap(), + data, + signature: AggregateSignature::infinity(), + }; + + attestation_2.data.index += 1; + + for attestation in &mut [&mut attestation_1, &mut attestation_2] { + for &i in &attestation.attesting_indices { + let sk = &self.validator_keypairs[i as usize].sk; + + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); + + attestation.signature.add_assign(&sk.sign(message)); + } + } + + AttesterSlashing { + attestation_1, + attestation_2, + } + } + pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { let mut block_header_1 = self .chain .head_beacon_block() .unwrap() - .message + .message() .block_header(); block_header_1.proposer_index = validator_index; @@ -694,6 +858,116 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn add_voluntary_exit( + &self, + block: &mut BeaconBlock, + validator_index: u64, + epoch: Epoch, + ) { + let exit = self.make_voluntary_exit(validator_index, epoch); + block.body_mut().voluntary_exits_mut().push(exit).unwrap(); + } + + /// Create a new block, apply `block_modifier` to it, sign it and return it. + /// + /// The state returned is a pre-block state at the same slot as the produced block. + pub fn make_block_with_modifier( + &self, + state: BeaconState, + slot: Slot, + block_modifier: impl FnOnce(&mut BeaconBlock), + ) -> (SignedBeaconBlock, BeaconState) { + assert_ne!(slot, 0, "can't produce a block at slot 0"); + assert!(slot >= state.slot()); + + let (block, state) = self.make_block_return_pre_state(state, slot); + let (mut block, _) = block.deconstruct(); + + block_modifier(&mut block); + + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); + + let signed_block = block.sign( + &self.validator_keypairs[proposer_index as usize].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ); + (signed_block, state) + } + + pub fn make_deposits<'a>( + &self, + state: &'a mut BeaconState, + num_deposits: usize, + invalid_pubkey: Option, + invalid_signature: Option, + ) -> (Vec, &'a mut BeaconState) { + let mut datas = vec![]; + + for _ in 0..num_deposits { + let keypair = Keypair::random(); + let pubkeybytes = PublicKeyBytes::from(keypair.pk.clone()); + + let mut data = DepositData { + pubkey: pubkeybytes, + withdrawal_credentials: Hash256::from_slice( + &get_withdrawal_credentials(&keypair.pk, self.spec.bls_withdrawal_prefix_byte) + [..], + ), + amount: self.spec.min_deposit_amount, + signature: SignatureBytes::empty(), + }; + + data.signature = data.create_signature(&keypair.sk, &self.spec); + + if let Some(invalid_pubkey) = invalid_pubkey { + data.pubkey = invalid_pubkey; + } + if let Some(invalid_signature) = invalid_signature.clone() { + data.signature = invalid_signature; + } + datas.push(data); + } + + // Vector containing all leaves + let leaves = datas + .iter() + .map(|data| data.tree_hash_root()) + .collect::>(); + + // Building a VarList from leaves + let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone()); + + // Setting the deposit_root to be the tree_hash_root of the VarList + state.eth1_data_mut().deposit_root = deposit_data_list.tree_hash_root(); + state.eth1_data_mut().deposit_count = num_deposits as u64; + *state.eth1_deposit_index_mut() = 0; + + // Building the merkle tree used for generating proofs + let tree = MerkleTree::create(&leaves[..], self.spec.deposit_contract_tree_depth as usize); + + // Building proofs + let mut proofs = vec![]; + for i in 0..leaves.len() { + let (_, mut proof) = + tree.generate_proof(i, self.spec.deposit_contract_tree_depth as usize); + proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64))); + proofs.push(proof); + } + + // Building deposits + let deposits = datas + .into_par_iter() + .zip(proofs.into_par_iter()) + .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| Deposit { proof, data }) + .collect::>(); + + // Pushing deposits to block body + (deposits, state) + } + pub fn process_block( &self, slot: Slot, @@ -771,13 +1045,8 @@ where block: &SignedBeaconBlock, validators: &[usize], ) { - let attestations = self.make_attestations( - validators, - &state, - state_root, - block_hash, - block.message.slot, - ); + let attestations = + self.make_attestations(validators, &state, state_root, block_hash, block.slot()); self.process_attestations(attestations); } @@ -932,7 +1201,7 @@ where chain_dump .iter() .cloned() - .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into()) + .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) .filter(|block_hash| *block_hash != Hash256::zero().into()) .collect() } diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 015efe0c64e..3022dd5c92e 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -14,7 +14,7 @@ use std::marker::PhantomData; use std::str::Utf8Error; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use types::{ - AttestationData, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Epoch, EthSpec, + AttestationData, AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, Slot, VoluntaryExit, }; @@ -237,7 +237,7 @@ impl ValidatorMonitor { pub fn process_valid_state(&mut self, current_epoch: Epoch, state: &BeaconState) { // Add any new validator indices. state - .validators + .validators() .iter() .enumerate() .skip(self.indices.len()) @@ -255,7 +255,7 @@ impl ValidatorMonitor { let i = i as usize; let id = &monitored_validator.id; - if let Some(balance) = state.balances.get(i) { + if let Some(balance) = state.balances().get(i) { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_BALANCE_GWEI, &[id], @@ -263,7 +263,7 @@ impl ValidatorMonitor { ); } - if let Some(validator) = state.validators.get(i) { + if let Some(validator) = state.validators().get(i) { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_EFFECTIVE_BALANCE_GWEI, &[id], @@ -473,7 +473,7 @@ impl ValidatorMonitor { pub fn register_gossip_block( &self, seen_timestamp: Duration, - block: &BeaconBlock, + block: BeaconBlockRef<'_, T>, block_root: Hash256, slot_clock: &S, ) { @@ -484,7 +484,7 @@ impl ValidatorMonitor { pub fn register_api_block( &self, seen_timestamp: Duration, - block: &BeaconBlock, + block: BeaconBlockRef<'_, T>, block_root: Hash256, slot_clock: &S, ) { @@ -495,11 +495,11 @@ impl ValidatorMonitor { &self, src: &str, seen_timestamp: Duration, - block: &BeaconBlock, + block: BeaconBlockRef<'_, T>, block_root: Hash256, slot_clock: &S, ) { - if let Some(id) = self.get_validator_id(block.proposer_index) { + if let Some(id) = self.get_validator_id(block.proposer_index()) { let delay = get_block_delay_ms(seen_timestamp, block, slot_clock); metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]); @@ -514,7 +514,7 @@ impl ValidatorMonitor { "Block from API"; "root" => ?block_root, "delay" => %delay.as_millis(), - "slot" => %block.slot, + "slot" => %block.slot(), "src" => src, "validator" => %id, ); @@ -741,11 +741,11 @@ impl ValidatorMonitor { pub fn register_attestation_in_block( &self, indexed_attestation: &IndexedAttestation, - block: &BeaconBlock, + block: BeaconBlockRef<'_, T>, spec: &ChainSpec, ) { let data = &indexed_attestation.data; - let delay = (block.slot - data.slot) - spec.min_attestation_inclusion_delay; + let delay = (block.slot() - data.slot) - spec.min_attestation_inclusion_delay; let epoch = data.slot.epoch(T::slots_per_epoch()); indexed_attestation.attesting_indices.iter().for_each(|i| { @@ -1043,10 +1043,10 @@ fn u64_to_i64(n: impl Into) -> i64 { /// Returns the delay between the start of `block.slot` and `seen_timestamp`. pub fn get_block_delay_ms( seen_timestamp: Duration, - block: &BeaconBlock, + block: BeaconBlockRef<'_, T>, slot_clock: &S, ) -> Duration { - get_slot_delay_ms::(seen_timestamp, block.slot, slot_clock) + get_slot_delay_ms::(seen_timestamp, block.slot(), slot_clock) } /// Returns the delay between the start of `slot` and `seen_timestamp`. diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 86c358fccce..be0ac7b93fc 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -110,9 +110,9 @@ impl ValidatorPubkeyCache { &mut self, state: &BeaconState, ) -> Result<(), BeaconChainError> { - if state.validators.len() > self.pubkeys.len() { + if state.validators().len() > self.pubkeys.len() { self.import( - state.validators[self.pubkeys.len()..] + state.validators()[self.pubkeys.len()..] .iter() .map(|v| v.pubkey), ) @@ -316,23 +316,28 @@ fn append_to_file(file: &mut File, index: usize, pubkey: &PublicKeyBytes) -> Res #[cfg(test)] mod test { use super::*; - use crate::test_utils::{test_logger, EphemeralHarnessType}; + use crate::test_utils::{test_logger, BeaconChainHarness, EphemeralHarnessType}; use std::sync::Arc; - use store::HotColdDB; + use store::{HotColdDB, StoreConfig}; use tempfile::tempdir; use types::{ - test_utils::{generate_deterministic_keypair, TestingBeaconStateBuilder}, - BeaconState, EthSpec, Keypair, MainnetEthSpec, + test_utils::generate_deterministic_keypair, BeaconState, EthSpec, Keypair, MainnetEthSpec, }; type E = MainnetEthSpec; type T = EphemeralHarnessType; fn get_state(validator_count: usize) -> (BeaconState, Vec) { - let spec = E::default_spec(); - let builder = - TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, &spec); - builder.build() + let harness = BeaconChainHarness::new_with_store_config( + MainnetEthSpec, + None, + types::test_utils::generate_deterministic_keypairs(validator_count), + StoreConfig::default(), + ); + + harness.advance_slot(); + + (harness.get_current_state(), harness.validator_keypairs) } fn get_store() -> BeaconStore { diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index e00a04395dc..38877c99ded 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -3,10 +3,8 @@ #[macro_use] extern crate lazy_static; -use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}, - StateSkipConfig, WhenSlotSkipped, -}; +use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; +use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; use store::config::StoreConfig; use tree_hash::TreeHash; use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot}; @@ -29,6 +27,7 @@ fn produces_attestations() { let harness = BeaconChainHarness::new_with_store_config( MainnetEthSpec, + None, KEYPAIRS[..].to_vec(), StoreConfig::default(), ); @@ -63,12 +62,12 @@ fn produces_attestations() { .block_at_slot(block_slot, WhenSlotSkipped::Prev) .expect("should get block") .expect("block should not be skipped"); - let block_root = block.message.tree_hash_root(); + let block_root = block.message().tree_hash_root(); let epoch_boundary_slot = state .current_epoch() .start_slot(MainnetEthSpec::slots_per_epoch()); - let target_root = if state.slot == epoch_boundary_slot { + let target_root = if state.slot() == epoch_boundary_slot { block_root } else { *state @@ -116,11 +115,13 @@ fn produces_attestations() { assert_eq!(data.slot, slot, "bad slot"); assert_eq!(data.beacon_block_root, block_root, "bad block root"); assert_eq!( - data.source, state.current_justified_checkpoint, + data.source, + state.current_justified_checkpoint(), "bad source" ); assert_eq!( - data.source, state.current_justified_checkpoint, + data.source, + state.current_justified_checkpoint(), "bad source" ); assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 9ee351faa6e..2d5b0c81fa5 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -17,7 +17,7 @@ use tree_hash::TreeHash; use types::{ test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, BeaconStateError, BitList, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, - SignedAggregateAndProof, SignedBeaconBlock, SubnetId, Unsigned, + SignedAggregateAndProof, SubnetId, Unsigned, }; pub type E = MainnetEthSpec; @@ -35,6 +35,7 @@ lazy_static! { fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::new_with_target_aggregators( MainnetEthSpec, + None, KEYPAIRS[0..validator_count].to_vec(), // A kind-of arbitrary number that ensures that _some_ validators are aggregators, but // not all. @@ -75,7 +76,7 @@ fn get_valid_unaggregated_attestation( .sign( &validator_sk, validator_committee_index, - &head.beacon_state.fork, + &head.beacon_state.fork(), chain.genesis_validators_root, &chain.spec, ) @@ -120,7 +121,7 @@ fn get_valid_aggregated_attestation( let proof = SelectionProof::new::( aggregate.data.slot, &aggregator_sk, - &state.fork, + &state.fork(), chain.genesis_validators_root, &chain.spec, ); @@ -138,7 +139,7 @@ fn get_valid_aggregated_attestation( aggregate, None, &aggregator_sk, - &state.fork, + &state.fork(), chain.genesis_validators_root, &chain.spec, ); @@ -169,7 +170,7 @@ fn get_non_aggregator( let proof = SelectionProof::new::( aggregate.data.slot, &aggregator_sk, - &state.fork, + &state.fork(), chain.genesis_validators_root, &chain.spec, ); @@ -922,7 +923,7 @@ fn attestation_that_skips_epochs() { .expect("should not error getting state") .expect("should find state"); - while state.slot < current_slot { + while state.slot() < current_slot { per_slot_processing(&mut state, None, &harness.spec).expect("should process slot"); } @@ -946,11 +947,11 @@ fn attestation_that_skips_epochs() { let block_slot = harness .chain .store - .get_item::>(&block_root) + .get_block(&block_root) .expect("should not error getting block") .expect("should find attestation block") - .message - .slot; + .message() + .slot(); assert!( attestation.data.slot - block_slot > E::slots_per_epoch() * 2, diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index d5378d627bc..446b19195ab 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -3,20 +3,19 @@ #[macro_use] extern crate lazy_static; -use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, - BeaconSnapshot, BlockError, +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; +use beacon_chain::{BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult}; use slasher::{Config as SlasherConfig, Slasher}; +use state_processing::{ + per_block_processing::{per_block_processing, BlockSignatureStrategy}, + per_slot_processing, BlockProcessingError, +}; use std::sync::Arc; use store::config::StoreConfig; use tempfile::tempdir; -use types::{ - test_utils::generate_deterministic_keypair, AggregateSignature, AttestationData, - AttesterSlashing, Checkpoint, Deposit, DepositData, Epoch, EthSpec, Hash256, - IndexedAttestation, Keypair, MainnetEthSpec, ProposerSlashing, Signature, SignedBeaconBlock, - SignedBeaconBlockHeader, SignedVoluntaryExit, Slot, VoluntaryExit, DEPOSIT_TREE_DEPTH, -}; +use types::{test_utils::generate_deterministic_keypair, *}; type E = MainnetEthSpec; @@ -54,6 +53,7 @@ fn get_chain_segment() -> Vec> { fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::new_with_store_config( MainnetEthSpec, + None, KEYPAIRS[0..validator_count].to_vec(), StoreConfig::default(), ); @@ -98,10 +98,11 @@ fn update_proposal_signatures( .get(proposer_index) .expect("proposer keypair should be available"); - snapshot.beacon_block = snapshot.beacon_block.message.clone().sign( + let (block, _) = snapshot.beacon_block.clone().deconstruct(); + snapshot.beacon_block = block.sign( &keypair.sk, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), spec, ); } @@ -111,7 +112,9 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot]) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); if let Some(child) = snapshots.get_mut(i + 1) { - child.beacon_block.message.parent_root = root + let (mut block, signature) = child.beacon_block.clone().deconstruct(); + *block.parent_root_mut() = root; + child.beacon_block = SignedBeaconBlock::from_block(block, signature) } } } @@ -168,10 +171,7 @@ fn chain_segment_varying_chunk_size() { .chain .process_chain_segment(chunk.to_vec()) .into_block_error() - .expect(&format!( - "should import chain segment of len {}", - chunk_size - )); + .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } harness.chain.fork_choice().expect("should run fork choice"); @@ -206,7 +206,7 @@ fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks.clone()) + .process_chain_segment(blocks) .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -217,13 +217,15 @@ fn chain_segment_non_linear_parent_roots() { * Test with a modified parent root. */ let mut blocks = chain_segment_blocks(); - blocks[3].message.parent_root = Hash256::zero(); + let (mut block, signature) = blocks[3].clone().deconstruct(); + *block.parent_root_mut() = Hash256::zero(); + blocks[3] = SignedBeaconBlock::from_block(block, signature); assert!( matches!( harness .chain - .process_chain_segment(blocks.clone()) + .process_chain_segment(blocks) .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -244,13 +246,15 @@ fn chain_segment_non_linear_slots() { */ let mut blocks = chain_segment_blocks(); - blocks[3].message.slot = Slot::new(0); + let (mut block, signature) = blocks[3].clone().deconstruct(); + *block.slot_mut() = Slot::new(0); + blocks[3] = SignedBeaconBlock::from_block(block, signature); assert!( matches!( harness .chain - .process_chain_segment(blocks.clone()) + .process_chain_segment(blocks) .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -262,13 +266,15 @@ fn chain_segment_non_linear_slots() { */ let mut blocks = chain_segment_blocks(); - blocks[3].message.slot = blocks[2].message.slot; + let (mut block, signature) = blocks[3].clone().deconstruct(); + *block.slot_mut() = blocks[2].slot(); + blocks[3] = SignedBeaconBlock::from_block(block, signature); assert!( matches!( harness .chain - .process_chain_segment(blocks.clone()) + .process_chain_segment(blocks) .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -342,7 +348,9 @@ fn invalid_signature_gossip_block() { // Ensure the block will be rejected if imported on its own (without gossip checking). let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); - snapshots[block_index].beacon_block.signature = junk_signature(); + let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); + snapshots[block_index].beacon_block = + SignedBeaconBlock::from_block(block.clone(), junk_signature()); // Import all the ancestors before the `block_index` block. let ancestor_blocks = CHAIN_SEGMENT .iter() @@ -358,7 +366,7 @@ fn invalid_signature_gossip_block() { matches!( harness .chain - .process_block(snapshots[block_index].beacon_block.clone()), + .process_block(SignedBeaconBlock::from_block(block, junk_signature())), Err(BlockError::InvalidSignature) ), "should not import individual block with an invalid gossip signature", @@ -371,7 +379,9 @@ fn invalid_signature_block_proposal() { for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); - snapshots[block_index].beacon_block.signature = junk_signature(); + let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); + snapshots[block_index].beacon_block = + SignedBeaconBlock::from_block(block.clone(), junk_signature()); let blocks = snapshots .iter() .map(|snapshot| snapshot.beacon_block.clone()) @@ -395,11 +405,9 @@ fn invalid_signature_randao_reveal() { for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); - snapshots[block_index] - .beacon_block - .message - .body - .randao_reveal = junk_signature(); + let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + *block.body_mut().randao_reveal_mut() = junk_signature(); + snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature(&harness, block_index, &snapshots, "randao"); @@ -411,23 +419,23 @@ fn invalid_signature_proposer_slashing() { for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); + let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); let proposer_slashing = ProposerSlashing { signed_header_1: SignedBeaconBlockHeader { - message: snapshots[block_index].beacon_block.message.block_header(), + message: block.block_header(), signature: junk_signature(), }, signed_header_2: SignedBeaconBlockHeader { - message: snapshots[block_index].beacon_block.message.block_header(), + message: block.block_header(), signature: junk_signature(), }, }; - snapshots[block_index] - .beacon_block - .message - .body - .proposer_slashings + block + .body_mut() + .proposer_slashings_mut() .push(proposer_slashing) .expect("should update proposer slashing"); + snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature(&harness, block_index, &snapshots, "proposer slashing"); @@ -460,13 +468,13 @@ fn invalid_signature_attester_slashing() { attestation_1: indexed_attestation.clone(), attestation_2: indexed_attestation, }; - snapshots[block_index] - .beacon_block - .message - .body - .attester_slashings + let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + block + .body_mut() + .attester_slashings_mut() .push(attester_slashing) .expect("should update attester slashing"); + snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature(&harness, block_index, &snapshots, "attester slashing"); @@ -480,14 +488,10 @@ fn invalid_signature_attestation() { for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); - if let Some(attestation) = snapshots[block_index] - .beacon_block - .message - .body - .attestations - .get_mut(0) - { + let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + if let Some(attestation) = block.body_mut().attestations_mut().get_mut(0) { attestation.signature = junk_aggregate_signature(); + snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature(&harness, block_index, &snapshots, "attestation"); @@ -516,13 +520,13 @@ fn invalid_signature_deposit() { signature: junk_signature().into(), }, }; - snapshots[block_index] - .beacon_block - .message - .body - .deposits + let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + block + .body_mut() + .deposits_mut() .push(deposit) .expect("should update deposit"); + snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); let blocks = snapshots @@ -548,11 +552,10 @@ fn invalid_signature_exit() { let harness = get_invalid_sigs_harness(); let mut snapshots = CHAIN_SEGMENT.clone(); let epoch = snapshots[block_index].beacon_state.current_epoch(); - snapshots[block_index] - .beacon_block - .message - .body - .voluntary_exits + let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + block + .body_mut() + .voluntary_exits_mut() .push(SignedVoluntaryExit { message: VoluntaryExit { epoch, @@ -561,6 +564,7 @@ fn invalid_signature_exit() { signature: junk_signature(), }) .expect("should update deposit"); + snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); assert_invalid_signature(&harness, block_index, &snapshots, "voluntary exit"); @@ -608,12 +612,15 @@ fn block_gossip_verification() { * future blocks for processing at the appropriate slot). */ - let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); - let expected_block_slot = block.message.slot + 1; - block.message.slot = expected_block_slot; + let (mut block, signature) = CHAIN_SEGMENT[block_index] + .beacon_block + .clone() + .deconstruct(); + let expected_block_slot = block.slot() + 1; + *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block)), + unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), BlockError::FutureSlot { present_slot, block_slot, @@ -635,7 +642,10 @@ fn block_gossip_verification() { * nodes, etc). */ - let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let (mut block, signature) = CHAIN_SEGMENT[block_index] + .beacon_block + .clone() + .deconstruct(); let expected_finalized_slot = harness .chain .head_info() @@ -643,10 +653,10 @@ fn block_gossip_verification() { .finalized_checkpoint .epoch .start_slot(E::slots_per_epoch()); - block.message.slot = expected_finalized_slot; + *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block)), + unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -665,11 +675,21 @@ fn block_gossip_verification() { * proposer_index pubkey. */ - let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); - block.signature = junk_signature(); + let block = CHAIN_SEGMENT[block_index] + .beacon_block + .clone() + .deconstruct() + .0; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block)), + unwrap_err( + harness + .chain + .verify_block_for_gossip(SignedBeaconBlock::from_block( + block, + junk_signature() + )) + ), BlockError::ProposalSignatureInvalid ), "should not import a block with an invalid proposal signature" @@ -683,12 +703,15 @@ fn block_gossip_verification() { * The block's parent (defined by block.parent_root) passes validation. */ - let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let (mut block, signature) = CHAIN_SEGMENT[block_index] + .beacon_block + .clone() + .deconstruct(); let parent_root = Hash256::from_low_u64_be(42); - block.message.parent_root = parent_root; + *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block)), + unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), BlockError::ParentUnknown(block) if block.parent_root() == parent_root ), @@ -705,12 +728,15 @@ fn block_gossip_verification() { * store.finalized_checkpoint.root */ - let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let (mut block, signature) = CHAIN_SEGMENT[block_index] + .beacon_block + .clone() + .deconstruct(); let parent_root = CHAIN_SEGMENT[0].beacon_block_root; - block.message.parent_root = parent_root; + *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block)), + unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -728,14 +754,18 @@ fn block_gossip_verification() { * processing while proposers for the block's branch are calculated. */ - let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); - let expected_proposer = block.message.proposer_index; + let mut block = CHAIN_SEGMENT[block_index] + .beacon_block + .clone() + .deconstruct() + .0; + let expected_proposer = block.proposer_index(); let other_proposer = (0..VALIDATOR_COUNT as u64) .into_iter() - .find(|i| *i != block.message.proposer_index) + .find(|i| *i != block.proposer_index()) .expect("there must be more than one validator in this test"); - block.message.proposer_index = other_proposer; - let block = block.message.clone().sign( + *block.proposer_index_mut() = other_proposer; + let block = block.sign( &generate_deterministic_keypair(other_proposer as usize).sk, &harness.chain.head_info().unwrap().fork, harness.chain.genesis_validators_root, @@ -760,7 +790,7 @@ fn block_gossip_verification() { proposer, slot, } - if proposer == other_proposer && slot == block.message.slot + if proposer == other_proposer && slot == block.message().slot() ), "should register any valid signature against the proposer, even if the block failed later verification" ); @@ -792,7 +822,7 @@ fn block_gossip_verification() { proposer, slot, } - if proposer == block.message.proposer_index && slot == block.message.slot + if proposer == block.message().proposer_index() && slot == block.message().slot() ), "the second proposal by this validator should be rejected" ); @@ -829,3 +859,245 @@ fn verify_block_for_gossip_slashing_detection() { drop(slasher); slasher_dir.close().unwrap(); } + +#[test] +fn add_base_block_to_altair_chain() { + let mut spec = MainnetEthSpec::default_spec(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + + // The Altair fork happens at epoch 1. + spec.altair_fork_epoch = Some(Epoch::new(1)); + + let harness = BeaconChainHarness::new_with_chain_config( + MainnetEthSpec, + Some(spec), + KEYPAIRS[..].to_vec(), + 1 << 32, + StoreConfig::default(), + ChainConfig::default(), + ); + + // Move out of the genesis slot. + harness.advance_slot(); + + // Build out all the blocks in epoch 0. + harness.extend_chain( + slots_per_epoch as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + // Move into the next empty slot. + harness.advance_slot(); + + // Produce an Altair block. + let state = harness.get_current_state(); + let slot = harness.get_current_slot(); + let (altair_signed_block, _) = harness.make_block(state.clone(), slot); + let altair_block = &altair_signed_block + .as_altair() + .expect("test expects an altair block") + .message; + let altair_body = &altair_block.body; + + // Create a Base-equivalent of `altair_block`. + let base_block = SignedBeaconBlock::Base(SignedBeaconBlockBase { + message: BeaconBlockBase { + slot: altair_block.slot, + proposer_index: altair_block.proposer_index, + parent_root: altair_block.parent_root, + state_root: altair_block.state_root, + body: BeaconBlockBodyBase { + randao_reveal: altair_body.randao_reveal.clone(), + eth1_data: altair_body.eth1_data.clone(), + graffiti: altair_body.graffiti, + proposer_slashings: altair_body.proposer_slashings.clone(), + attester_slashings: altair_body.attester_slashings.clone(), + attestations: altair_body.attestations.clone(), + deposits: altair_body.deposits.clone(), + voluntary_exits: altair_body.voluntary_exits.clone(), + }, + }, + signature: Signature::empty(), + }); + + // Ensure that it would be impossible to apply this block to `per_block_processing`. + { + let mut state = state; + per_slot_processing(&mut state, None, &harness.chain.spec).unwrap(); + assert!(matches!( + per_block_processing( + &mut state, + &base_block, + None, + BlockSignatureStrategy::NoVerification, + &harness.chain.spec, + ), + Err(BlockProcessingError::InconsistentBlockFork( + InconsistentFork { + fork_at_slot: ForkName::Altair, + object_fork: ForkName::Base, + } + )) + )); + } + + // Ensure that it would be impossible to verify this block for gossip. + assert!(matches!( + harness + .chain + .verify_block_for_gossip(base_block.clone()) + .err() + .expect("should error when processing base block"), + BlockError::InconsistentFork(InconsistentFork { + fork_at_slot: ForkName::Altair, + object_fork: ForkName::Base, + }) + )); + + // Ensure that it would be impossible to import via `BeaconChain::process_block`. + assert!(matches!( + harness + .chain + .process_block(base_block.clone()) + .err() + .expect("should error when processing base block"), + BlockError::InconsistentFork(InconsistentFork { + fork_at_slot: ForkName::Altair, + object_fork: ForkName::Base, + }) + )); + + // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. + assert!(matches!( + harness.chain.process_chain_segment(vec![base_block]), + ChainSegmentResult::Failed { + imported_blocks: 0, + error: BlockError::InconsistentFork(InconsistentFork { + fork_at_slot: ForkName::Altair, + object_fork: ForkName::Base, + }) + } + )); +} + +#[test] +fn add_altair_block_to_base_chain() { + let mut spec = MainnetEthSpec::default_spec(); + + // Altair never happens. + spec.altair_fork_epoch = None; + + let harness = BeaconChainHarness::new_with_chain_config( + MainnetEthSpec, + Some(spec), + KEYPAIRS[..].to_vec(), + 1 << 32, + StoreConfig::default(), + ChainConfig::default(), + ); + + // Move out of the genesis slot. + harness.advance_slot(); + + // Build one block. + harness.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + // Move into the next empty slot. + harness.advance_slot(); + + // Produce an altair block. + let state = harness.get_current_state(); + let slot = harness.get_current_slot(); + let (base_signed_block, _) = harness.make_block(state.clone(), slot); + let base_block = &base_signed_block + .as_base() + .expect("test expects a base block") + .message; + let base_body = &base_block.body; + + // Create an Altair-equivalent of `altair_block`. + let altair_block = SignedBeaconBlock::Altair(SignedBeaconBlockAltair { + message: BeaconBlockAltair { + slot: base_block.slot, + proposer_index: base_block.proposer_index, + parent_root: base_block.parent_root, + state_root: base_block.state_root, + body: BeaconBlockBodyAltair { + randao_reveal: base_body.randao_reveal.clone(), + eth1_data: base_body.eth1_data.clone(), + graffiti: base_body.graffiti, + proposer_slashings: base_body.proposer_slashings.clone(), + attester_slashings: base_body.attester_slashings.clone(), + attestations: base_body.attestations.clone(), + deposits: base_body.deposits.clone(), + voluntary_exits: base_body.voluntary_exits.clone(), + sync_aggregate: SyncAggregate::empty(), + }, + }, + signature: Signature::empty(), + }); + + // Ensure that it would be impossible to apply this block to `per_block_processing`. + { + let mut state = state; + per_slot_processing(&mut state, None, &harness.chain.spec).unwrap(); + assert!(matches!( + per_block_processing( + &mut state, + &altair_block, + None, + BlockSignatureStrategy::NoVerification, + &harness.chain.spec, + ), + Err(BlockProcessingError::InconsistentBlockFork( + InconsistentFork { + fork_at_slot: ForkName::Base, + object_fork: ForkName::Altair, + } + )) + )); + } + + // Ensure that it would be impossible to verify this block for gossip. + assert!(matches!( + harness + .chain + .verify_block_for_gossip(altair_block.clone()) + .err() + .expect("should error when processing altair block"), + BlockError::InconsistentFork(InconsistentFork { + fork_at_slot: ForkName::Base, + object_fork: ForkName::Altair, + }) + )); + + // Ensure that it would be impossible to import via `BeaconChain::process_block`. + assert!(matches!( + harness + .chain + .process_block(altair_block.clone()) + .err() + .expect("should error when processing altair block"), + BlockError::InconsistentFork(InconsistentFork { + fork_at_slot: ForkName::Base, + object_fork: ForkName::Altair, + }) + )); + + // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. + assert!(matches!( + harness.chain.process_chain_segment(vec![altair_block]), + ChainSegmentResult::Failed { + imported_blocks: 0, + error: BlockError::InconsistentFork(InconsistentFork { + fork_at_slot: ForkName::Base, + object_fork: ForkName::Altair, + }) + } + )); +} diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 8d86d01ce63..34ee8483b4e 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -7,16 +7,12 @@ extern crate lazy_static; use beacon_chain::observed_operations::ObservationOutcome; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, + test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; use store::{LevelDB, StoreConfig}; use tempfile::{tempdir, TempDir}; -use types::test_utils::{ - AttesterSlashingTestTask, ProposerSlashingTestTask, TestingAttesterSlashingBuilder, - TestingProposerSlashingBuilder, TestingVoluntaryExitBuilder, -}; use types::*; pub const VALIDATOR_COUNT: usize = 24; @@ -32,7 +28,7 @@ type TestHarness = BeaconChainHarness>; type HotColdDB = store::HotColdDB, LevelDB>; fn get_store(db_path: &TempDir) -> Arc { - let spec = E::default_spec(); + let spec = test_spec::(); let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); let config = StoreConfig::default(); @@ -44,6 +40,7 @@ fn get_store(db_path: &TempDir) -> Arc { fn get_harness(store: Arc, validator_count: usize) -> TestHarness { let harness = BeaconChainHarness::new_with_disk_store( MinimalEthSpec, + None, store, KEYPAIRS[0..validator_count].to_vec(), ); @@ -64,21 +61,13 @@ fn voluntary_exit() { AttestationStrategy::AllValidators, ); - let head_info = harness.chain.head_info().unwrap(); - - let make_exit = |validator_index: usize, exit_epoch: u64| { - TestingVoluntaryExitBuilder::new(Epoch::new(exit_epoch), validator_index as u64).build( - &KEYPAIRS[validator_index].sk, - &head_info.fork, - head_info.genesis_validators_root, - spec, - ) - }; - let validator_index1 = VALIDATOR_COUNT - 1; let validator_index2 = VALIDATOR_COUNT - 2; - let exit1 = make_exit(validator_index1, spec.shard_committee_period); + let exit1 = harness.make_voluntary_exit( + validator_index1 as u64, + Epoch::new(spec.shard_committee_period), + ); // First verification should show it to be fresh. assert!(matches!( @@ -98,14 +87,20 @@ fn voluntary_exit() { )); // A different exit for the same validator should also be detected as a duplicate. - let exit2 = make_exit(validator_index1, spec.shard_committee_period + 1); + let exit2 = harness.make_voluntary_exit( + validator_index1 as u64, + Epoch::new(spec.shard_committee_period + 1), + ); assert!(matches!( harness.chain.verify_voluntary_exit_for_gossip(exit2), Ok(ObservationOutcome::AlreadyKnown) )); // Exit for a different validator should be fine. - let exit3 = make_exit(validator_index2, spec.shard_committee_period); + let exit3 = harness.make_voluntary_exit( + validator_index2 as u64, + Epoch::new(spec.shard_committee_period), + ); assert!(matches!( harness .chain @@ -120,25 +115,11 @@ fn proposer_slashing() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), VALIDATOR_COUNT); - let spec = &harness.chain.spec; - - let head_info = harness.chain.head_info().unwrap(); let validator_index1 = VALIDATOR_COUNT - 1; let validator_index2 = VALIDATOR_COUNT - 2; - let make_slashing = |validator_index: usize| { - TestingProposerSlashingBuilder::double_vote::( - ProposerSlashingTestTask::Valid, - validator_index as u64, - &KEYPAIRS[validator_index].sk, - &head_info.fork, - head_info.genesis_validators_root, - spec, - ) - }; - - let slashing1 = make_slashing(validator_index1); + let slashing1 = harness.make_proposer_slashing(validator_index1 as u64); // First slashing for this proposer should be allowed. assert!(matches!( @@ -171,7 +152,7 @@ fn proposer_slashing() { )); // Proposer slashing for a different index should be accepted - let slashing3 = make_slashing(validator_index2); + let slashing3 = harness.make_proposer_slashing(validator_index2 as u64); assert!(matches!( harness .chain @@ -186,9 +167,6 @@ fn attester_slashing() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), VALIDATOR_COUNT); - let spec = &harness.chain.spec; - - let head_info = harness.chain.head_info().unwrap(); // First third of the validators let first_third = (0..VALIDATOR_COUNT as u64 / 3).collect::>(); @@ -199,25 +177,8 @@ fn attester_slashing() { // Last half of the validators let second_half = (VALIDATOR_COUNT as u64 / 2..VALIDATOR_COUNT as u64).collect::>(); - let signer = |idx: u64, message: &[u8]| { - KEYPAIRS[idx as usize] - .sk - .sign(Hash256::from_slice(&message)) - }; - - let make_slashing = |validators| { - TestingAttesterSlashingBuilder::double_vote::<_, E>( - AttesterSlashingTestTask::Valid, - validators, - signer, - &head_info.fork, - head_info.genesis_validators_root, - spec, - ) - }; - // Slashing for first third of validators should be accepted. - let slashing1 = make_slashing(&first_third); + let slashing1 = harness.make_attester_slashing(first_third); assert!(matches!( harness .chain @@ -227,7 +188,7 @@ fn attester_slashing() { )); // Overlapping slashing for first half of validators should also be accepted. - let slashing2 = make_slashing(&first_half); + let slashing2 = harness.make_attester_slashing(first_half); assert!(matches!( harness .chain @@ -253,7 +214,7 @@ fn attester_slashing() { )); // Slashing for last half of validators should be accepted (distinct from all existing) - let slashing3 = make_slashing(&second_half); + let slashing3 = harness.make_attester_slashing(second_half); assert!(matches!( harness .chain @@ -262,7 +223,7 @@ fn attester_slashing() { ObservationOutcome::New(_) )); // Slashing for last third (contained in last half) should be rejected. - let slashing4 = make_slashing(&last_third); + let slashing4 = harness.make_attester_slashing(last_third); assert!(matches!( harness .chain diff --git a/beacon_node/beacon_chain/tests/persistence_tests.rs b/beacon_node/beacon_chain/tests/persistence_tests.rs deleted file mode 100644 index 0f5aa8a6b67..00000000000 --- a/beacon_node/beacon_chain/tests/persistence_tests.rs +++ /dev/null @@ -1,161 +0,0 @@ -#![cfg(not(debug_assertions))] - -#[macro_use] -extern crate lazy_static; - -use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}, - BeaconChain, BeaconChainTypes, -}; -use sloggers::{null::NullLoggerBuilder, Build}; -use std::sync::Arc; -use store::{HotColdDB, LevelDB, StoreConfig}; -use tempfile::{tempdir, TempDir}; -use types::{EthSpec, Keypair, MinimalEthSpec}; - -type E = MinimalEthSpec; - -// Should ideally be divisible by 3. -pub const VALIDATOR_COUNT: usize = 24; - -lazy_static! { - /// A cached set of keys. - static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); -} - -fn get_store(db_path: &TempDir) -> Arc, LevelDB>> { - let spec = E::default_spec(); - let hot_path = db_path.path().join("hot_db"); - let cold_path = db_path.path().join("cold_db"); - let config = StoreConfig::default(); - let log = NullLoggerBuilder.build().expect("logger should build"); - HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log) - .expect("disk store should initialize") -} - -#[test] -fn finalizes_after_resuming_from_db() { - let validator_count = 16; - let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8; - let first_half = num_blocks_produced / 2; - - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - - let harness = BeaconChainHarness::new_with_disk_store( - MinimalEthSpec, - store.clone(), - KEYPAIRS[0..validator_count].to_vec(), - ); - - harness.advance_slot(); - - harness.extend_chain( - first_half as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); - - assert!( - harness - .chain - .head() - .expect("should read head") - .beacon_state - .finalized_checkpoint - .epoch - > 0, - "the chain should have already finalized" - ); - - let latest_slot = harness.chain.slot().expect("should have a slot"); - - harness - .chain - .persist_head_and_fork_choice() - .expect("should persist the head and fork choice"); - harness - .chain - .persist_op_pool() - .expect("should persist the op pool"); - harness - .chain - .persist_eth1_cache() - .expect("should persist the eth1 cache"); - - let data_dir = harness.data_dir; - let original_chain = harness.chain; - - let resumed_harness = BeaconChainHarness::resume_from_disk_store( - MinimalEthSpec, - store, - KEYPAIRS[0..validator_count].to_vec(), - data_dir, - ); - - assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain); - - // Set the slot clock of the resumed harness to be in the slot following the previous harness. - // - // This allows us to produce the block at the next slot. - resumed_harness - .chain - .slot_clock - .set_slot(latest_slot.as_u64() + 1); - - resumed_harness.extend_chain( - (num_blocks_produced - first_half) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); - - let state = &resumed_harness - .chain - .head() - .expect("should read head") - .beacon_state; - assert_eq!( - state.slot, num_blocks_produced, - "head should be at the current slot" - ); - assert_eq!( - state.current_epoch(), - num_blocks_produced / MinimalEthSpec::slots_per_epoch(), - "head should be at the expected epoch" - ); - assert_eq!( - state.current_justified_checkpoint.epoch, - state.current_epoch() - 1, - "the head should be justified one behind the current epoch" - ); - assert_eq!( - state.finalized_checkpoint.epoch, - state.current_epoch() - 2, - "the head should be finalized two behind the current epoch" - ); -} - -/// Checks that two chains are the same, for the purpose of this tests. -/// -/// Several fields that are hard/impossible to check are ignored (e.g., the store). -fn assert_chains_pretty_much_the_same(a: &BeaconChain, b: &BeaconChain) { - assert_eq!(a.spec, b.spec, "spec should be equal"); - assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal"); - assert_eq!( - a.head().unwrap(), - b.head().unwrap(), - "head() should be equal" - ); - assert_eq!(a.heads(), b.heads(), "heads() should be equal"); - assert_eq!( - a.genesis_block_root, b.genesis_block_root, - "genesis_block_root should be equal" - ); - - let slot = a.slot().unwrap(); - assert!( - a.fork_choice.write().get_head(slot).unwrap() - == b.fork_choice.write().get_head(slot).unwrap(), - "fork_choice heads should be equal" - ); -} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 73c20723899..4d526e72b19 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2,9 +2,9 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::test_utils::{ - test_logger, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, + test_logger, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; -use beacon_chain::BeaconSnapshot; +use beacon_chain::{BeaconChain, BeaconChainTypes, BeaconSnapshot}; use lazy_static::lazy_static; use maplit::hashset; use rand::Rng; @@ -34,7 +34,7 @@ type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; fn get_store(db_path: &TempDir) -> Arc, LevelDB>> { - let spec = MinimalEthSpec::default_spec(); + let spec = test_spec::(); let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); let config = StoreConfig::default(); @@ -50,6 +50,7 @@ fn get_harness( ) -> TestHarness { let harness = BeaconChainHarness::new_with_disk_store( MinimalEthSpec, + None, store, KEYPAIRS[0..validator_count].to_vec(), ); @@ -107,7 +108,11 @@ fn randomised_skips() { let state = &harness.chain.head().expect("should get head").beacon_state; - assert_eq!(state.slot, num_slots, "head should be at the current slot"); + assert_eq!( + state.slot(), + num_slots, + "head should be at the current slot" + ); check_split_slot(&harness, store); check_chain_dump(&harness, num_blocks_produced + 1); @@ -195,7 +200,7 @@ fn randao_genesis_storage() { .head() .expect("should get head") .beacon_state - .randao_mixes + .randao_mixes() .iter() .find(|x| **x == genesis_value) .is_some()); @@ -212,7 +217,7 @@ fn randao_genesis_storage() { .head() .expect("should get head") .beacon_state - .randao_mixes + .randao_mixes() .iter() .find(|x| **x == genesis_value) .is_none()); @@ -347,8 +352,12 @@ fn delete_blocks_and_states() { let store = get_store(&db_path); let validators_keypairs = types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); - let harness = - BeaconChainHarness::new_with_disk_store(MinimalEthSpec, store.clone(), validators_keypairs); + let harness = BeaconChainHarness::new_with_disk_store( + MinimalEthSpec, + None, + store.clone(), + validators_keypairs, + ); let unforked_blocks: u64 = 4 * E::slots_per_epoch(); @@ -471,7 +480,7 @@ fn multi_epoch_fork_valid_blocks_test( let validators_keypairs = types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); let harness = - BeaconChainHarness::new_with_disk_store(MinimalEthSpec, store, validators_keypairs); + BeaconChainHarness::new_with_disk_store(MinimalEthSpec, None, store, validators_keypairs); let num_fork1_blocks: u64 = num_fork1_blocks_.try_into().unwrap(); let num_fork2_blocks: u64 = num_fork2_blocks_.try_into().unwrap(); @@ -550,18 +559,21 @@ fn multiple_attestations_per_block() { let head = harness.chain.head().unwrap(); let committees_per_slot = head .beacon_state - .get_committee_count_at_slot(head.beacon_state.slot) + .get_committee_count_at_slot(head.beacon_state.slot()) .unwrap(); assert!(committees_per_slot > 1); for snapshot in harness.chain.chain_dump().unwrap() { + let slot = snapshot.beacon_block.slot(); assert_eq!( - snapshot.beacon_block.message.body.attestations.len() as u64, - if snapshot.beacon_block.slot() <= 1 { - 0 - } else { - committees_per_slot - } + snapshot + .beacon_block + .deconstruct() + .0 + .body() + .attestations() + .len() as u64, + if slot <= 1 { 0 } else { committees_per_slot } ); } } @@ -758,7 +770,7 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -863,7 +875,7 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs); let slots_per_epoch = rig.slots_per_epoch(); let (state, state_root) = rig.get_current_state_and_root(); @@ -988,7 +1000,7 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs); let slots_per_epoch = rig.slots_per_epoch(); let (mut state, state_root) = rig.get_current_state_and_root(); @@ -1078,7 +1090,7 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs); let (state, state_root) = rig.get_current_state_and_root(); // Fill up 0th epoch with canonical chain blocks @@ -1216,7 +1228,7 @@ fn prunes_skipped_slots_states() { let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs); let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = @@ -1335,7 +1347,7 @@ fn finalizes_non_epoch_start_slot() { let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); - let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); + let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs); let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = @@ -1691,15 +1703,17 @@ fn garbage_collect_temp_states_from_failed_block() { let genesis_state = harness.get_current_state(); let block_slot = Slot::new(2 * slots_per_epoch); - let (mut block, state) = harness.make_block(genesis_state, block_slot); + let (signed_block, state) = harness.make_block(genesis_state, block_slot); + + let (mut block, _) = signed_block.deconstruct(); // Mutate the block to make it invalid, and re-sign it. - block.message.state_root = Hash256::repeat_byte(0xff); - let proposer_index = block.message.proposer_index as usize; - let block = block.message.sign( + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = block.sign( &harness.validator_keypairs[proposer_index].sk, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), &harness.spec, ); @@ -1720,12 +1734,143 @@ fn garbage_collect_temp_states_from_failed_block() { assert_eq!(store.iter_temporary_state_roots().count(), 0); } +#[test] +fn finalizes_after_resuming_from_db() { + let validator_count = 16; + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8; + let first_half = num_blocks_produced / 2; + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + + let harness = BeaconChainHarness::new_with_disk_store( + MinimalEthSpec, + None, + store.clone(), + KEYPAIRS[0..validator_count].to_vec(), + ); + + harness.advance_slot(); + + harness.extend_chain( + first_half as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + assert!( + harness + .chain + .head() + .expect("should read head") + .beacon_state + .finalized_checkpoint() + .epoch + > 0, + "the chain should have already finalized" + ); + + let latest_slot = harness.chain.slot().expect("should have a slot"); + + harness + .chain + .persist_head_and_fork_choice() + .expect("should persist the head and fork choice"); + harness + .chain + .persist_op_pool() + .expect("should persist the op pool"); + harness + .chain + .persist_eth1_cache() + .expect("should persist the eth1 cache"); + + let data_dir = harness.data_dir; + let original_chain = harness.chain; + + let resumed_harness = BeaconChainHarness::resume_from_disk_store( + MinimalEthSpec, + None, + store, + KEYPAIRS[0..validator_count].to_vec(), + data_dir, + ); + + assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain); + + // Set the slot clock of the resumed harness to be in the slot following the previous harness. + // + // This allows us to produce the block at the next slot. + resumed_harness + .chain + .slot_clock + .set_slot(latest_slot.as_u64() + 1); + + resumed_harness.extend_chain( + (num_blocks_produced - first_half) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let state = &resumed_harness + .chain + .head() + .expect("should read head") + .beacon_state; + assert_eq!( + state.slot(), + num_blocks_produced, + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); +} + +/// Checks that two chains are the same, for the purpose of these tests. +/// +/// Several fields that are hard/impossible to check are ignored (e.g., the store). +fn assert_chains_pretty_much_the_same(a: &BeaconChain, b: &BeaconChain) { + assert_eq!(a.spec, b.spec, "spec should be equal"); + assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal"); + assert_eq!( + a.head().unwrap(), + b.head().unwrap(), + "head() should be equal" + ); + assert_eq!(a.heads(), b.heads(), "heads() should be equal"); + assert_eq!( + a.genesis_block_root, b.genesis_block_root, + "genesis_block_root should be equal" + ); + + let slot = a.slot().unwrap(); + assert!( + a.fork_choice.write().get_head(slot).unwrap() + == b.fork_choice.write().get_head(slot).unwrap(), + "fork_choice heads should be equal" + ); +} + /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { let state = &harness.chain.head().expect("should get head").beacon_state; assert_eq!( - state.slot, expected_slot, + state.slot(), + expected_slot, "head should be at the current slot" ); } @@ -1737,12 +1882,12 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { check_slot(harness, expected_slot); assert_eq!( - state.current_justified_checkpoint.epoch, + state.current_justified_checkpoint().epoch, state.current_epoch() - 1, "the head should be justified one behind the current epoch" ); assert_eq!( - state.finalized_checkpoint.epoch, + state.finalized_checkpoint().epoch, state.current_epoch() - 2, "the head should be finalized two behind the current epoch" ); @@ -1757,7 +1902,7 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L .head() .expect("should get head") .beacon_state - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()), split_slot @@ -1788,8 +1933,8 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) { .get_state(&checkpoint.beacon_state_root(), None) .expect("no error") .expect("state exists") - .slot, - checkpoint.beacon_state.slot + .slot(), + checkpoint.beacon_state.slot() ); } @@ -1864,7 +2009,7 @@ fn get_finalized_epoch_boundary_blocks( ) -> HashSet { dump.iter() .cloned() - .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into()) + .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) .collect() } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c6bf515a379..2740d566a85 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -29,6 +29,7 @@ lazy_static! { fn get_harness(validator_count: usize) -> BeaconChainHarness> { let harness = BeaconChainHarness::new_with_store_config( MinimalEthSpec, + None, KEYPAIRS[0..validator_count].to_vec(), StoreConfig::default(), ); @@ -41,7 +42,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness 1, "the state should skip at least one slot"); + assert!(state.slot() > 1, "the state should skip at least one slot"); assert_eq!( error, SlotProcessingError::EpochProcessingError(EpochProcessingError::BeaconStateError( @@ -134,7 +135,7 @@ fn iterators() { assert_eq!( *state_roots.last().expect("should have some state roots"), - (head.beacon_state_root(), head.beacon_state.slot), + (head.beacon_state_root(), head.beacon_state.slot()), "last state root and slot should be for the head state" ); } @@ -153,7 +154,7 @@ fn find_reorgs() { ); let head_state = harness.chain.head_beacon_state().unwrap(); - let head_slot = head_state.slot; + let head_slot = head_state.slot(); let genesis_state = harness .chain .state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots) @@ -167,7 +168,7 @@ fn find_reorgs() { .find_reorg_slot(&genesis_state, harness.chain.genesis_block_root) .unwrap(), head_state - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(MinimalEthSpec::slots_per_epoch()) ); @@ -237,7 +238,7 @@ fn chooses_fork() { let state = &harness.chain.head().expect("should get head").beacon_state; assert_eq!( - state.slot, + state.slot(), Slot::from(initial_blocks + honest_fork_blocks), "head should be at the current slot" ); @@ -268,7 +269,8 @@ fn finalizes_with_full_participation() { let state = &harness.chain.head().expect("should get head").beacon_state; assert_eq!( - state.slot, num_blocks_produced, + state.slot(), + num_blocks_produced, "head should be at the current slot" ); assert_eq!( @@ -277,12 +279,12 @@ fn finalizes_with_full_participation() { "head should be at the expected epoch" ); assert_eq!( - state.current_justified_checkpoint.epoch, + state.current_justified_checkpoint().epoch, state.current_epoch() - 1, "the head should be justified one behind the current epoch" ); assert_eq!( - state.finalized_checkpoint.epoch, + state.finalized_checkpoint().epoch, state.current_epoch() - 2, "the head should be finalized two behind the current epoch" ); @@ -306,7 +308,8 @@ fn finalizes_with_two_thirds_participation() { let state = &harness.chain.head().expect("should get head").beacon_state; assert_eq!( - state.slot, num_blocks_produced, + state.slot(), + num_blocks_produced, "head should be at the current slot" ); assert_eq!( @@ -320,12 +323,12 @@ fn finalizes_with_two_thirds_participation() { // included in blocks during that epoch. assert_eq!( - state.current_justified_checkpoint.epoch, + state.current_justified_checkpoint().epoch, state.current_epoch() - 2, "the head should be justified two behind the current epoch" ); assert_eq!( - state.finalized_checkpoint.epoch, + state.finalized_checkpoint().epoch, state.current_epoch() - 4, "the head should be finalized three behind the current epoch" ); @@ -350,7 +353,8 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { let state = &harness.chain.head().expect("should get head").beacon_state; assert_eq!( - state.slot, num_blocks_produced, + state.slot(), + num_blocks_produced, "head should be at the current slot" ); assert_eq!( @@ -359,11 +363,13 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { "head should be at the expected epoch" ); assert_eq!( - state.current_justified_checkpoint.epoch, 0, + state.current_justified_checkpoint().epoch, + 0, "no epoch should have been justified" ); assert_eq!( - state.finalized_checkpoint.epoch, 0, + state.finalized_checkpoint().epoch, + 0, "no epoch should have been finalized" ); } @@ -383,7 +389,8 @@ fn does_not_finalize_without_attestation() { let state = &harness.chain.head().expect("should get head").beacon_state; assert_eq!( - state.slot, num_blocks_produced, + state.slot(), + num_blocks_produced, "head should be at the current slot" ); assert_eq!( @@ -392,11 +399,13 @@ fn does_not_finalize_without_attestation() { "head should be at the expected epoch" ); assert_eq!( - state.current_justified_checkpoint.epoch, 0, + state.current_justified_checkpoint().epoch, + 0, "no epoch should have been justified" ); assert_eq!( - state.finalized_checkpoint.epoch, 0, + state.finalized_checkpoint().epoch, + 0, "no epoch should have been finalized" ); } @@ -681,7 +690,14 @@ fn block_roots_skip_slot_behaviour() { let harness = get_harness(VALIDATOR_COUNT); // Test should be longer than the block roots to ensure a DB lookup is triggered. - let chain_length = harness.chain.head().unwrap().beacon_state.block_roots.len() as u64 * 3; + let chain_length = harness + .chain + .head() + .unwrap() + .beacon_state + .block_roots() + .len() as u64 + * 3; let skipped_slots = [1, 6, 7, 10, chain_length]; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 8b45ea61ef4..d7de3e0d5df 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -19,7 +19,6 @@ use network::{NetworkConfig, NetworkMessage, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn}; -use ssz::Decode; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -196,7 +195,7 @@ where "Starting from known genesis state"; ); - let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes) + let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; builder.genesis_state(genesis_state).map(|v| (v, None))? diff --git a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs index 4e84d354bc0..b7deb959d47 100644 --- a/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/eth2_libp2p/src/rpc/codec/ssz_snappy.rs @@ -13,7 +13,7 @@ use std::io::ErrorKind; use std::io::{Read, Write}; use std::marker::PhantomData; use tokio_util::codec::{Decoder, Encoder}; -use types::{EthSpec, SignedBeaconBlock}; +use types::{EthSpec, SignedBeaconBlock, SignedBeaconBlockBase}; use unsigned_varint::codec::Uvi; /* Inbound Codec */ @@ -298,12 +298,18 @@ impl Decoder for SSZSnappyOutboundCodec { Protocol::Goodbye => Err(RPCError::InvalidData), Protocol::BlocksByRange => match self.protocol.version { Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new( - SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?, + // FIXME(altair): support Altair blocks + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes( + &decoded_buffer, + )?), )))), }, Protocol::BlocksByRoot => match self.protocol.version { + // FIXME(altair): support Altair blocks Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new( - SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?, + SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes( + &decoded_buffer, + )?), )))), }, Protocol::Ping => match self.protocol.version { diff --git a/beacon_node/eth2_libp2p/src/rpc/methods.rs b/beacon_node/eth2_libp2p/src/rpc/methods.rs index df362b316c7..8facac48af9 100644 --- a/beacon_node/eth2_libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2_libp2p/src/rpc/methods.rs @@ -354,10 +354,10 @@ impl std::fmt::Display for RPCResponse { match self { RPCResponse::Status(status) => write!(f, "{}", status), RPCResponse::BlocksByRange(block) => { - write!(f, "BlocksByRange: Block slot: {}", block.message.slot) + write!(f, "BlocksByRange: Block slot: {}", block.slot()) } RPCResponse::BlocksByRoot(block) => { - write!(f, "BlocksByRoot: BLock slot: {}", block.message.slot) + write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number), diff --git a/beacon_node/eth2_libp2p/src/rpc/protocol.rs b/beacon_node/eth2_libp2p/src/rpc/protocol.rs index 81804c82a75..44e180fb598 100644 --- a/beacon_node/eth2_libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2_libp2p/src/rpc/protocol.rs @@ -24,16 +24,16 @@ use types::{BeaconBlock, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeac lazy_static! { // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is // same across different `EthSpec` implementations. - pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock:: { - message: BeaconBlock::empty(&MainnetEthSpec::default_spec()), - signature: Signature::empty(), - } + pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock::::from_block( + BeaconBlock::empty(&MainnetEthSpec::default_spec()), + Signature::empty(), + ) .as_ssz_bytes() .len(); - pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock:: { - message: BeaconBlock::full(&MainnetEthSpec::default_spec()), - signature: Signature::empty(), - } + pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock::::from_block( + BeaconBlock::full(&MainnetEthSpec::default_spec()), + Signature::empty(), + ) .as_ssz_bytes() .len(); pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = diff --git a/beacon_node/eth2_libp2p/src/types/pubsub.rs b/beacon_node/eth2_libp2p/src/types/pubsub.rs index 325a6c44319..f1ba987058a 100644 --- a/beacon_node/eth2_libp2p/src/types/pubsub.rs +++ b/beacon_node/eth2_libp2p/src/types/pubsub.rs @@ -10,7 +10,7 @@ use std::io::{Error, ErrorKind}; use types::SubnetId; use types::{ Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, + SignedBeaconBlock, SignedBeaconBlockBase, SignedVoluntaryExit, }; #[derive(Debug, Clone, PartialEq)] @@ -141,8 +141,11 @@ impl PubsubMessage { )))) } GossipKind::BeaconBlock => { - let beacon_block = SignedBeaconBlock::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?; + // FIXME(altair): support Altair blocks + let beacon_block = SignedBeaconBlock::Base( + SignedBeaconBlockBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block))) } GossipKind::VoluntaryExit => { @@ -189,7 +192,8 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::BeaconBlock(block) => write!( f, "Beacon Block: slot: {}, proposer_index: {}", - block.message.slot, block.message.proposer_index + block.slot(), + block.message().proposer_index() ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, diff --git a/beacon_node/eth2_libp2p/tests/rpc_tests.rs b/beacon_node/eth2_libp2p/tests/rpc_tests.rs index 43f054d27f6..1b565a4655e 100644 --- a/beacon_node/eth2_libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2_libp2p/tests/rpc_tests.rs @@ -140,10 +140,7 @@ fn test_blocks_by_range_chunked_rpc() { // BlocksByRange Response let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); - let empty_signed = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty(), - }; + let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); // keep count of the number of messages received @@ -257,10 +254,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { // BlocksByRange Response let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); - let empty_signed = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty(), - }; + let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); // keep count of the number of messages received @@ -390,10 +384,7 @@ fn test_blocks_by_range_single_empty_rpc() { // BlocksByRange Response let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); - let empty_signed = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty(), - }; + let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); let messages_to_send = 1; @@ -510,10 +501,7 @@ fn test_blocks_by_root_chunked_rpc() { // BlocksByRoot Response let full_block = BeaconBlock::full(&spec); - let signed_full_block = SignedBeaconBlock { - message: full_block, - signature: Signature::empty(), - }; + let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); // keep count of the number of messages received @@ -634,10 +622,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { // BlocksByRoot Response let full_block = BeaconBlock::full(&spec); - let signed_full_block = SignedBeaconBlock { - message: full_block, - signature: Signature::empty(), - }; + let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); // keep count of the number of messages received diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 447a0119690..d5ef6ad0d90 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -5,7 +5,7 @@ use eth1::{DepositLog, Eth1Block, Service as Eth1Service}; use slog::{debug, error, info, trace, Logger}; use state_processing::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, - per_block_processing::process_deposit, process_activations, + per_block_processing::process_operations::process_deposit, process_activations, }; use std::sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, @@ -190,7 +190,7 @@ impl Eth1GenesisService { .get_active_validator_indices(E::genesis_epoch(), &spec) .map_err(|e| format!("Genesis validators error: {:?}", e))? .len(), - "genesis_time" => genesis_state.genesis_time, + "genesis_time" => genesis_state.genesis_time(), ); break Ok(genesis_state); } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index fcead9fbed6..e36c115b477 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -48,10 +48,12 @@ pub fn interop_genesis_state( ) .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; - state.genesis_time = genesis_time; + *state.genesis_time_mut() = genesis_time; - // Invalid all the caches after all the manual state surgery. - state.drop_all_caches(); + // Invalidate all the caches after all the manual state surgery. + state + .drop_all_caches() + .map_err(|e| format!("Unable to drop caches: {:?}", e))?; Ok(state) } @@ -75,24 +77,25 @@ mod test { .expect("should build state"); assert_eq!( - state.eth1_data.block_hash, + state.eth1_data().block_hash, Hash256::from_slice(&[0x42; 32]), "eth1 block hash should be co-ordinated junk" ); assert_eq!( - state.genesis_time, genesis_time, + state.genesis_time(), + genesis_time, "genesis time should be as specified" ); - for b in &state.balances { + for b in state.balances() { assert_eq!( *b, spec.max_effective_balance, "validator balances should be max effective balance" ); } - for v in &state.validators { + for v in state.validators() { let creds = v.withdrawal_credentials.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, @@ -106,13 +109,13 @@ mod test { } assert_eq!( - state.balances.len(), + state.balances().len(), validator_count, "validator balances len should be correct" ); assert_eq!( - state.validators.len(), + state.validators().len(), validator_count, "validator count should be correct" ); diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index d9ed1808606..f99000ee8cc 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -92,12 +92,12 @@ fn basic() { // Note: using ganache these deposits are 1-per-block, therefore we know there should only be // the minimum number of validators. assert_eq!( - state.validators.len(), + state.validators().len(), spec.min_genesis_active_validator_count as usize, "should have expected validator count" ); - assert!(state.genesis_time > 0, "should have some genesis time"); + assert!(state.genesis_time() > 0, "should have some genesis time"); assert!( is_valid_genesis_state(&state, &spec), diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index d6b80510764..25dcbcf0edf 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -37,8 +37,9 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttesterSlashing, CommitteeCache, Epoch, EthSpec, ProposerSlashing, RelativeEpoch, - SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig, + Attestation, AttesterSlashing, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, + ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBeaconBlock, + SignedVoluntaryExit, Slot, }; use warp::http::StatusCode; use warp::sse::Event; @@ -75,6 +76,7 @@ pub struct Config { pub listen_addr: Ipv4Addr, pub listen_port: u16, pub allow_origin: Option, + pub serve_legacy_spec: bool, } impl Default for Config { @@ -84,6 +86,7 @@ impl Default for Config { listen_addr: Ipv4Addr::new(127, 0, 0, 1), listen_port: 5052, allow_origin: None, + serve_legacy_spec: true, } } } @@ -332,7 +335,8 @@ pub fn serve( .untuple_one(); // Create a `warp` filter that provides access to the logger. - let log_filter = warp::any().map(move || ctx.log.clone()); + let inner_ctx = ctx.clone(); + let log_filter = warp::any().map(move || inner_ctx.log.clone()); /* * @@ -407,9 +411,9 @@ pub fn serve( state_id .map_state(&chain, |state| { Ok(api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint, - current_justified: state.current_justified_checkpoint, - finalized: state.finalized_checkpoint, + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), }) }) .map(api_types::GenericResponse::from) @@ -430,9 +434,9 @@ pub fn serve( state_id .map_state(&chain, |state| { Ok(state - .validators + .validators() .iter() - .zip(state.balances.iter()) + .zip(state.balances().iter()) .enumerate() // filter by validator id(s) if provided .filter(|(index, (validator, _))| { @@ -475,9 +479,9 @@ pub fn serve( let far_future_epoch = chain.spec.far_future_epoch; Ok(state - .validators + .validators() .iter() - .zip(state.balances.iter()) + .zip(state.balances().iter()) .enumerate() // filter by validator id(s) if provided .filter(|(index, (validator, _))| { @@ -541,15 +545,15 @@ pub fn serve( .map_state(&chain, |state| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { - state.validators.iter().position(|v| v.pubkey == *pubkey) + state.validators().iter().position(|v| v.pubkey == *pubkey) } ValidatorId::Index(index) => Some(*index as usize), }; index_opt .and_then(|index| { - let validator = state.validators.get(index)?; - let balance = *state.balances.get(index)?; + let validator = state.validators().get(index)?; + let balance = *state.balances().get(index)?; let epoch = state.current_epoch(); let far_future_epoch = chain.spec.far_future_epoch; @@ -591,7 +595,7 @@ pub fn serve( blocking_json_task(move || { query_state_id.map_state(&chain, |state| { - let epoch = state.slot.epoch(T::EthSpec::slots_per_epoch()); + let epoch = state.slot().epoch(T::EthSpec::slots_per_epoch()); let committee_cache = if state .committee_cache_is_initialized(RelativeEpoch::Current) @@ -725,8 +729,8 @@ pub fn serve( root, canonical: true, header: api_types::BlockHeaderAndSignature { - message: block.message.block_header(), - signature: block.signature.into(), + message: block.message().block_header(), + signature: block.signature().clone().into(), }, }; @@ -760,8 +764,8 @@ pub fn serve( root, canonical, header: api_types::BlockHeaderAndSignature { - message: block.message.block_header(), - signature: block.signature.into(), + message: block.message().block_header(), + signature: block.signature().clone().into(), }, }; @@ -799,7 +803,7 @@ pub fn serve( // Determine the delay after the start of the slot, register it with metrics. let delay = - get_block_delay_ms(seen_timestamp, &block.message, &chain.slot_clock); + get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); metrics::observe_duration( &metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay, @@ -817,7 +821,7 @@ pub fn serve( // Notify the validator monitor. chain.validator_monitor.read().register_api_block( seen_timestamp, - &block.message, + block.message(), root, &chain.slot_clock, ); @@ -935,7 +939,8 @@ pub fn serve( blocking_json_task(move || { block_id .block(&chain) - .map(|block| block.message.body.attestations) + // FIXME(altair): could avoid clone with by-value accessor + .map(|block| block.message().body().attestations().clone()) .map(api_types::GenericResponse::from) }) }); @@ -1266,17 +1271,19 @@ pub fn serve( }); // GET config/spec + let serve_legacy_spec = ctx.config.serve_legacy_spec; let get_config_spec = config_path .and(warp::path("spec")) .and(warp::path::end()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { + .and_then(move |chain: Arc>| { blocking_json_task(move || { - Ok(api_types::GenericResponse::from(YamlConfig::from_spec::< - T::EthSpec, - >( - &chain.spec - ))) + let mut config_and_preset = + ConfigAndPreset::from_chain_spec::(&chain.spec); + if serve_legacy_spec { + config_and_preset.make_backwards_compat(&chain.spec); + } + Ok(api_types::GenericResponse::from(config_and_preset)) }) }); diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index b9cb0751217..69c9f738a76 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -148,7 +148,7 @@ fn compute_and_cache_proposer_duties( state.current_epoch(), dependent_root, indices.clone(), - state.fork, + state.fork(), ) .map_err(BeaconChainError::from) .map_err(warp_utils::reject::beacon_chain_error)?; diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 11800648f25..8b52e48152e 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -57,7 +57,7 @@ impl StateId { &self, chain: &BeaconChain, ) -> Result { - self.map_state(chain, |state| Ok(state.fork)) + self.map_state(chain, |state| Ok(state.fork())) } /// Return the `BeaconState` identified by `self`. diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 90847dd6b4e..cdd2a51621b 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -20,7 +20,7 @@ pub fn global_validator_inclusion_data( let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; validator_statuses - .process_attestations(&state, &chain.spec) + .process_attestations(&state) .map_err(warp_utils::reject::beacon_state_error)?; let totals = validator_statuses.total_balances; @@ -49,7 +49,7 @@ pub fn validator_inclusion_data( let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; validator_statuses - .process_attestations(&state, &chain.spec) + .process_attestations(&state) .map_err(warp_utils::reject::beacon_state_error)?; state diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 558fedb0666..6cb60832f2b 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -23,11 +23,9 @@ use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; -use std::iter::Iterator; use std::net::Ipv4Addr; use std::sync::Arc; -use tokio::sync::mpsc; -use tokio::sync::oneshot; +use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tree_hash::TreeHash; use types::{ @@ -77,6 +75,7 @@ impl ApiTester { pub fn new() -> Self { let mut harness = BeaconChainHarness::new( MainnetEthSpec, + None, generate_deterministic_keypairs(VALIDATOR_COUNT), ); @@ -189,6 +188,7 @@ impl ApiTester { listen_addr: Ipv4Addr::new(127, 0, 0, 1), listen_port: 0, allow_origin: None, + serve_legacy_spec: true, }, chain: Some(chain.clone()), network_tx: Some(network_tx), @@ -235,6 +235,7 @@ impl ApiTester { pub fn new_from_genesis() -> Self { let harness = BeaconChainHarness::new( MainnetEthSpec, + None, generate_deterministic_keypairs(VALIDATOR_COUNT), ); @@ -301,6 +302,7 @@ impl ApiTester { listen_addr: Ipv4Addr::new(127, 0, 0, 1), listen_port: 0, allow_origin: None, + serve_legacy_spec: true, }, chain: Some(chain.clone()), network_tx: Some(network_tx), @@ -445,8 +447,8 @@ impl ApiTester { let state = self.chain.head().unwrap().beacon_state; let expected = GenesisData { - genesis_time: state.genesis_time, - genesis_validators_root: state.genesis_validators_root, + genesis_time: state.genesis_time(), + genesis_validators_root: state.genesis_validators_root(), genesis_fork_version: self.chain.spec.genesis_fork_version, }; @@ -508,7 +510,7 @@ impl ApiTester { .unwrap() .map(|res| res.data); - let expected = self.get_state(state_id).map(|state| state.fork); + let expected = self.get_state(state_id).map(|state| state.fork()); assert_eq!(result, expected, "{:?}", state_id); } @@ -528,9 +530,9 @@ impl ApiTester { let expected = self .get_state(state_id) .map(|state| FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint, - current_justified: state.current_justified_checkpoint, - finalized: state.finalized_checkpoint, + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), }); assert_eq!(result, expected, "{:?}", state_id); @@ -544,7 +546,7 @@ impl ApiTester { for validator_indices in self.interesting_validator_indices() { let state_opt = self.get_state(state_id); let validators: Vec = match state_opt.as_ref() { - Some(state) => state.validators.clone().into(), + Some(state) => state.validators().clone().into(), None => vec![], }; let validator_index_ids = validator_indices @@ -587,10 +589,10 @@ impl ApiTester { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { - if i < state.balances.len() as u64 { + if i < state.balances().len() as u64 { validators.push(ValidatorBalanceData { index: i as u64, - balance: state.balances[i as usize], + balance: state.balances()[i as usize], }); } } @@ -612,7 +614,7 @@ impl ApiTester { for validator_indices in self.interesting_validator_indices() { let state_opt = self.get_state(state_id); let validators: Vec = match state_opt.as_ref() { - Some(state) => state.validators.clone().into(), + Some(state) => state.validators().clone().into(), None => vec![], }; let validator_index_ids = validator_indices @@ -661,10 +663,10 @@ impl ApiTester { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { - if i >= state.validators.len() as u64 { + if i >= state.validators().len() as u64 { continue; } - let validator = state.validators[i as usize].clone(); + let validator = state.validators()[i as usize].clone(); let status = ValidatorStatus::from_validator( &validator, epoch, @@ -676,7 +678,7 @@ impl ApiTester { { validators.push(ValidatorData { index: i as u64, - balance: state.balances[i as usize], + balance: state.balances()[i as usize], status, validator, }); @@ -699,7 +701,7 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let state_opt = self.get_state(state_id); let validators = match state_opt.as_ref() { - Some(state) => state.validators.clone().into(), + Some(state) => state.validators().clone().into(), None => vec![], }; @@ -729,7 +731,7 @@ impl ApiTester { ValidatorData { index: i as u64, - balance: state.balances[i], + balance: state.balances()[i], status: ValidatorStatus::from_validator( &validator, epoch, @@ -846,8 +848,8 @@ impl ApiTester { root, canonical: true, header: BlockHeaderAndSignature { - message: block.message.block_header(), - signature: block.signature.into(), + message: block.message().block_header(), + signature: block.signature().clone().into(), }, }; let expected = vec![header]; @@ -927,13 +929,13 @@ impl ApiTester { assert_eq!(result.root, block_root, "{:?}", block_id); assert_eq!( result.header.message, - block.message.block_header(), + block.message().block_header(), "{:?}", block_id ); assert_eq!( result.header.signature, - block.signature.into(), + block.signature().clone().into(), "{:?}", block_id ); @@ -980,7 +982,7 @@ impl ApiTester { pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { let mut next_block = self.next_block.clone(); - next_block.message.proposer_index += 1; + *next_block.message_mut().proposer_index_mut() += 1; assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); @@ -1012,7 +1014,11 @@ impl ApiTester { .map(|res| res.data); assert_eq!(json_result, expected, "{:?}", block_id); - let ssz_result = self.client.get_beacon_blocks_ssz(block_id).await.unwrap(); + let ssz_result = self + .client + .get_beacon_blocks_ssz(block_id, &self.chain.spec) + .await + .unwrap(); assert_eq!(ssz_result, expected, "{:?}", block_id); } @@ -1030,7 +1036,7 @@ impl ApiTester { let expected = self .get_block(block_id) - .map(|block| block.message.body.attestations.into()); + .map(|block| block.message().body().attestations().clone().into()); if let BlockId::Slot(slot) = block_id { if expected.is_none() { @@ -1264,7 +1270,8 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self.client.get_config_spec().await.unwrap().data; - let expected = YamlConfig::from_spec::(&self.chain.spec); + let mut expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec); + expected.make_backwards_compat(&self.chain.spec); assert_eq!(result, expected); @@ -1432,7 +1439,7 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result_ssz = self .client - .get_debug_beacon_states_ssz(state_id) + .get_debug_beacon_states_ssz(state_id, &self.chain.spec) .await .unwrap(); let result_json = self @@ -1471,7 +1478,7 @@ impl ApiTester { } fn validator_count(&self) -> usize { - self.chain.head().unwrap().beacon_state.validators.len() + self.chain.head().unwrap().beacon_state.validators().len() } fn interesting_validator_indices(&self) -> Vec> { @@ -1575,7 +1582,7 @@ impl ApiTester { let expected_len = indices .iter() - .filter(|i| **i < state.validators.len() as u64) + .filter(|i| **i < state.validators().len() as u64) .count(); assert_eq!(result_duties.len(), expected_len); @@ -1586,7 +1593,7 @@ impl ApiTester { .unwrap() { let expected = AttesterData { - pubkey: state.validators[i as usize].pubkey.clone().into(), + pubkey: state.validators()[i as usize].pubkey.clone().into(), validator_index: i, committees_at_slot: duty.committees_at_slot, committee_index: duty.index, @@ -1691,7 +1698,7 @@ impl ApiTester { let index = state .get_beacon_proposer_index(slot, &self.chain.spec) .unwrap(); - let pubkey = state.validators[index].pubkey.clone().into(); + let pubkey = state.validators()[index].pubkey.clone().into(); ProposerData { pubkey, @@ -1849,7 +1856,7 @@ impl ApiTester { pub async fn test_get_validator_attestation_data(self) -> Self { let mut state = self.chain.head_beacon_state().unwrap(); - let slot = state.slot; + let slot = state.slot(); state .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) .unwrap(); @@ -1879,9 +1886,9 @@ impl ApiTester { .chain .head_beacon_block() .unwrap() - .message - .body - .attestations[0] + .message() + .body() + .attestations()[0] .clone(); let result = self @@ -1915,7 +1922,7 @@ impl ApiTester { .unwrap(); let committee_len = head.beacon_state.get_committee_count_at_slot(slot).unwrap(); - let fork = head.beacon_state.fork; + let fork = head.beacon_state.fork(); let genesis_validators_root = self.chain.genesis_validators_root; let duties = self @@ -2118,7 +2125,7 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_lighthouse_beacon_states_ssz(&state_id) + .get_lighthouse_beacon_states_ssz(&state_id, &self.chain.spec) .await .unwrap(); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 0f0fad35b7e..10808c58eb3 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -43,7 +43,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } task_executor = { path = "../../common/task_executor" } igd = "0.11.1" -itertools = "0.9.0" +itertools = "0.10.0" num_cpus = "1.13.0" lru_cache = { path = "../../common/lru_cache" } if-addrs = "0.6.4" diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 6549a0263ec..c177d1c17b5 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -3,10 +3,10 @@ use crate::beacon_processor::*; use crate::{service::NetworkMessage, sync::SyncMessage}; -use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, - BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY, +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; +use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use discv5::enr::{CombinedKey, EnrBuilder}; use environment::{null_logger, Environment, EnvironmentBuilder}; use eth2_libp2p::{rpc::methods::MetaData, types::EnrBitfield, MessageId, NetworkGlobals, PeerId}; @@ -66,6 +66,7 @@ impl TestRig { pub fn new(chain_length: u64) -> Self { let mut harness = BeaconChainHarness::new( MainnetEthSpec, + None, generate_deterministic_keypairs(VALIDATOR_COUNT), ); diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index f184f0576aa..96a4d86c800 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -244,7 +244,7 @@ impl Worker { // Log metrics to track delay from other nodes on the network. metrics::observe_duration( &metrics::BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME, - get_block_delay_ms(seen_duration, &block.message, &self.chain.slot_clock), + get_block_delay_ms(seen_duration, block.message(), &self.chain.slot_clock), ); let verified_block = match self.chain.verify_block_for_gossip(block) { @@ -305,6 +305,7 @@ impl Worker { | Err(e @ BlockError::InvalidSignature) | Err(e @ BlockError::TooManySkippedSlots { .. }) | Err(e @ BlockError::WeakSubjectivityConflict) + | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::GenesisBlock) => { warn!(self.log, "Could not verify block for gossip, rejecting the block"; "error" => %e); @@ -322,7 +323,7 @@ impl Worker { // verified. self.chain.validator_monitor.read().register_gossip_block( seen_duration, - &verified_block.block.message, + verified_block.block.message(), verified_block.block_root, &self.chain.slot_clock, ); diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 65118e3de5f..5db81131e1e 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -57,8 +57,8 @@ impl Worker { match process_id { // this a request from the range sync ProcessId::RangeBatchId(chain_id, epoch) => { - let start_slot = downloaded_blocks.first().map(|b| b.message.slot.as_u64()); - let end_slot = downloaded_blocks.last().map(|b| b.message.slot.as_u64()); + let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); + let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); let result = match self.process_blocks(downloaded_blocks.iter()) { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 082c1353fbf..75786cdc510 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -277,7 +277,7 @@ fn spawn_service( .map(|current_epoch| { head .beacon_state - .validators + .validators() .iter() .filter(|validator| validator.is_active_at(current_epoch) diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index c7186a8393e..02be935c1fc 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -38,6 +38,7 @@ mod tests { let beacon_chain = Arc::new( BeaconChainHarness::new_with_store_config( MinimalEthSpec, + None, generate_deterministic_keypairs(8), StoreConfig::default(), ) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 8f4a238b232..2b678751ea5 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -331,8 +331,13 @@ impl SyncManager { // check if the parent of this block isn't in our failed cache. If it is, this // chain should be dropped and the peer downscored. - if self.failed_chains.contains(&block.message.parent_root) { - debug!(self.log, "Parent chain ignored due to past failure"; "block" => ?block.message.parent_root, "slot" => block.message.slot); + if self.failed_chains.contains(&block.message().parent_root()) { + debug!( + self.log, + "Parent chain ignored due to past failure"; + "block" => ?block.message().parent_root(), + "slot" => block.slot() + ); if !parent_request.downloaded_blocks.is_empty() { // Add the root block to failed chains self.failed_chains @@ -490,7 +495,7 @@ impl SyncManager { .head_info() .map(|info| info.slot) .unwrap_or_else(|_| Slot::from(0u64)); - let unknown_block_slot = block.message.slot; + let unknown_block_slot = block.slot(); // if the block is far in the future, ignore it. If its within the slot tolerance of // our current head, regardless of the syncing state, fetch it. @@ -505,10 +510,10 @@ impl SyncManager { let block_root = block.canonical_root(); // If this block or it's parent is part of a known failed chain, ignore it. - if self.failed_chains.contains(&block.message.parent_root) + if self.failed_chains.contains(&block.message().parent_root()) || self.failed_chains.contains(&block_root) { - debug!(self.log, "Block is from a past failed chain. Dropping"; "block_root" => ?block_root, "block_slot" => block.message.slot); + debug!(self.log, "Block is from a past failed chain. Dropping"; "block_root" => ?block_root, "block_slot" => block.slot()); return; } @@ -525,7 +530,7 @@ impl SyncManager { } } - debug!(self.log, "Unknown block received. Starting a parent lookup"; "block_slot" => block.message.slot, "block_hash" => %block.canonical_root()); + debug!(self.log, "Unknown block received. Starting a parent lookup"; "block_slot" => block.slot(), "block_hash" => %block.canonical_root()); let parent_request = ParentRequests { downloaded_blocks: vec![block], diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 6b20a806dc7..219932c84b4 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -21,3 +21,5 @@ store = { path = "../store" } [dev-dependencies] rand = "0.7.3" +lazy_static = "1.4.0" +beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index ea12abaf4a0..2ed580cae28 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,7 +1,13 @@ use crate::max_cover::MaxCover; -use state_processing::common::{get_attesting_indices, get_base_reward}; +use state_processing::common::{ + altair, base, get_attestation_participation_flag_indices, get_attesting_indices, +}; use std::collections::HashMap; -use types::{Attestation, BeaconState, BitList, ChainSpec, EthSpec}; +use types::{ + beacon_state::BeaconStateBase, + consts::altair::{PARTICIPATION_FLAG_WEIGHTS, WEIGHT_DENOMINATOR}, + Attestation, BeaconState, BitList, ChainSpec, EthSpec, +}; #[derive(Debug, Clone)] pub struct AttMaxCover<'a, T: EthSpec> { @@ -18,7 +24,22 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { total_active_balance: u64, spec: &ChainSpec, ) -> Option { - let fresh_validators = earliest_attestation_validators(att, state); + if let BeaconState::Base(ref base_state) = state { + Self::new_for_base(att, state, base_state, total_active_balance, spec) + } else { + Self::new_for_altair(att, state, total_active_balance, spec) + } + } + + /// Initialise an attestation cover object for base/phase0 hard fork. + pub fn new_for_base( + att: &'a Attestation, + state: &BeaconState, + base_state: &BeaconStateBase, + total_active_balance: u64, + spec: &ChainSpec, + ) -> Option { + let fresh_validators = earliest_attestation_validators(att, state, base_state); let committee = state .get_beacon_committee(att.data.slot, att.data.index) .ok()?; @@ -27,10 +48,14 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { .iter() .map(|i| *i as u64) .flat_map(|validator_index| { - let reward = - get_base_reward(state, validator_index as usize, total_active_balance, spec) - .ok()? - / spec.proposer_reward_quotient; + let reward = base::get_base_reward( + state, + validator_index as usize, + total_active_balance, + spec, + ) + .ok()? + .checked_div(spec.proposer_reward_quotient)?; Some((validator_index, reward)) }) .collect(); @@ -39,6 +64,62 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { fresh_validators_rewards, }) } + + /// Initialise an attestation cover object for Altair or later. + pub fn new_for_altair( + att: &'a Attestation, + state: &BeaconState, + total_active_balance: u64, + spec: &ChainSpec, + ) -> Option { + let committee = state + .get_beacon_committee(att.data.slot, att.data.index) + .ok()?; + let attesting_indices = + get_attesting_indices::(committee.committee, &att.aggregation_bits).ok()?; + + let participation_list = if att.data.target.epoch == state.current_epoch() { + state.current_epoch_participation().ok()? + } else if att.data.target.epoch == state.previous_epoch() { + state.previous_epoch_participation().ok()? + } else { + return None; + }; + + let inclusion_delay = state.slot().as_u64().checked_sub(att.data.slot.as_u64())?; + let att_participation_flags = + get_attestation_participation_flag_indices(state, &att.data, inclusion_delay, spec) + .ok()?; + + let fresh_validators_rewards = attesting_indices + .iter() + .filter_map(|&index| { + let mut proposer_reward_numerator = 0; + let participation = participation_list.get(index)?; + + let base_reward = + altair::get_base_reward(state, index, total_active_balance, spec).ok()?; + + for (flag_index, weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { + if att_participation_flags.contains(&flag_index) + && !participation.has_flag(flag_index).ok()? + { + proposer_reward_numerator += base_reward.checked_mul(*weight)?; + } + } + + let proposer_reward = proposer_reward_numerator + .checked_div(WEIGHT_DENOMINATOR.checked_mul(spec.proposer_reward_quotient)?)?; + + Some((index as u64, proposer_reward)).filter(|_| proposer_reward != 0) + }) + .collect(); + + Some(Self { + att, + fresh_validators_rewards, + }) + } } impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { @@ -58,6 +139,11 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { /// confusing committees when updating covering sets, we update only those attestations /// whose slot and index match the attestation being included in the solution, by the logic /// that a slot and index uniquely identify a committee. + /// + /// We completely remove any validator covered by another attestation. This is close to optimal + /// because including two attestations on chain to satisfy different participation bits is + /// impossible without the validator double voting. I.e. it is only suboptimal in the presence + /// of slashable voting, which is rare. fn update_covering_set( &mut self, best_att: &Attestation, @@ -81,19 +167,20 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { /// is judged against the state's `current_epoch_attestations` or `previous_epoch_attestations` /// depending on when it was created, and all those validators who have already attested are /// removed from the `aggregation_bits` before returning it. -// TODO: This could be optimised with a map from validator index to whether that validator has -// attested in each of the current and previous epochs. Currently quadratic in number of validators. +/// +/// This isn't optimal, but with the Altair fork this code is obsolete and not worth upgrading. pub fn earliest_attestation_validators( attestation: &Attestation, state: &BeaconState, + base_state: &BeaconStateBase, ) -> BitList { // Bitfield of validators whose attestations are new/fresh. let mut new_validators = attestation.aggregation_bits.clone(); let state_attestations = if attestation.data.target.epoch == state.current_epoch() { - &state.current_epoch_attestations + &base_state.current_epoch_attestations } else if attestation.data.target.epoch == state.previous_epoch() { - &state.previous_epoch_attestations + &base_state.previous_epoch_attestations } else { return BitList::with_capacity(0).unwrap(); }; diff --git a/beacon_node/operation_pool/src/attester_slashing.rs b/beacon_node/operation_pool/src/attester_slashing.rs index ad4cd01ea2f..2cb63ad252e 100644 --- a/beacon_node/operation_pool/src/attester_slashing.rs +++ b/beacon_node/operation_pool/src/attester_slashing.rs @@ -1,7 +1,7 @@ use crate::max_cover::MaxCover; use state_processing::per_block_processing::get_slashable_indices_modular; use std::collections::{HashMap, HashSet}; -use types::{AttesterSlashing, BeaconState, ChainSpec, EthSpec}; +use types::{AttesterSlashing, BeaconState, EthSpec}; #[derive(Debug, Clone)] pub struct AttesterSlashingMaxCover<'a, T: EthSpec> { @@ -14,7 +14,6 @@ impl<'a, T: EthSpec> AttesterSlashingMaxCover<'a, T> { slashing: &'a AttesterSlashing, proposer_slashing_indices: &HashSet, state: &BeaconState, - spec: &ChainSpec, ) -> Option { let mut effective_balances: HashMap = HashMap::new(); let epoch = state.current_epoch(); @@ -22,21 +21,18 @@ impl<'a, T: EthSpec> AttesterSlashingMaxCover<'a, T> { let slashable_validators = get_slashable_indices_modular(state, slashing, |index, validator| { validator.is_slashable_at(epoch) && !proposer_slashing_indices.contains(&index) - }); - - if let Ok(validators) = slashable_validators { - for vd in &validators { - let eff_balance = state.get_effective_balance(*vd as usize, spec).ok()?; - effective_balances.insert(*vd, eff_balance); - } - - Some(Self { - slashing, - effective_balances, }) - } else { - None + .ok()?; + + for vd in slashable_validators { + let eff_balance = state.get_effective_balance(vd as usize).ok()?; + effective_balances.insert(vd, eff_balance); } + + Some(Self { + slashing, + effective_balances, + }) } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 2e938d591e6..85faaa1f4d5 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -109,8 +109,8 @@ impl OperationPool { ) -> impl Iterator> + Send { let domain_bytes = AttestationId::compute_domain_bytes( epoch, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), spec, ); all_attestations @@ -153,7 +153,7 @@ impl OperationPool { .get_cached_active_validator_indices(RelativeEpoch::Current) .map_err(OpPoolError::GetAttestationsTotalBalanceError)?; let total_active_balance = state - .get_total_balance(&active_indices, spec) + .get_total_balance(active_indices, spec) .map_err(OpPoolError::GetAttestationsTotalBalanceError)?; // Split attestations for the previous & current epochs, so that we @@ -175,11 +175,15 @@ impl OperationPool { spec, ); - let prev_epoch_limit = std::cmp::min( - T::MaxPendingAttestations::to_usize() - .saturating_sub(state.previous_epoch_attestations.len()), - T::MaxAttestations::to_usize(), - ); + let prev_epoch_limit = if let BeaconState::Base(base_state) = state { + std::cmp::min( + T::MaxPendingAttestations::to_usize() + .saturating_sub(base_state.previous_epoch_attestations.len()), + T::MaxAttestations::to_usize(), + ) + } else { + T::MaxAttestations::to_usize() + }; let (prev_cover, curr_cover) = rayon::join( move || { @@ -246,13 +250,12 @@ impl OperationPool { pub fn get_slashings( &self, state: &BeaconState, - spec: &ChainSpec, ) -> (Vec, Vec>) { let proposer_slashings = filter_limit_operations( self.proposer_slashings.read().values(), |slashing| { state - .validators + .validators() .get(slashing.signed_header_1.message.proposer_index as usize) .map_or(false, |validator| !validator.slashed) }, @@ -269,8 +272,8 @@ impl OperationPool { let reader = self.attester_slashings.read(); let relevant_attester_slashings = reader.iter().flat_map(|(slashing, fork)| { - if *fork == state.fork.previous_version || *fork == state.fork.current_version { - AttesterSlashingMaxCover::new(&slashing, &to_be_slashed, state, spec) + if *fork == state.fork().previous_version || *fork == state.fork().current_version { + AttesterSlashingMaxCover::new(&slashing, &to_be_slashed, state) } else { None } @@ -291,7 +294,7 @@ impl OperationPool { pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { prune_validator_hash_map( &mut self.proposer_slashings.write(), - |validator| validator.exit_epoch <= head_state.finalized_checkpoint.epoch, + |validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -303,11 +306,11 @@ impl OperationPool { .write() .retain(|(slashing, fork_version)| { let previous_fork_is_finalized = - head_state.finalized_checkpoint.epoch >= head_state.fork.epoch; + head_state.finalized_checkpoint().epoch >= head_state.fork().epoch; // Prune any slashings which don't match the current fork version, or the previous // fork version if it is not finalized yet. - let fork_ok = (fork_version == &head_state.fork.current_version) - || (fork_version == &head_state.fork.previous_version + let fork_ok = (*fork_version == head_state.fork().current_version) + || (*fork_version == head_state.fork().previous_version && !previous_fork_is_finalized); // Slashings that don't slash any validators can also be dropped. let slashing_ok = @@ -317,7 +320,7 @@ impl OperationPool { // // We cannot check the `slashed` field since the `head` is not finalized and // a fork could un-slash someone. - validator.exit_epoch > head_state.finalized_checkpoint.epoch + validator.exit_epoch > head_state.finalized_checkpoint().epoch }) .map_or(false, |indices| !indices.is_empty()); @@ -365,7 +368,7 @@ impl OperationPool { // // We choose simplicity over the gain of pruning more exits since they are small and // should not be seen frequently. - |validator| validator.exit_epoch <= head_state.finalized_checkpoint.epoch, + |validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -474,7 +477,7 @@ fn prune_validator_hash_map( { map.retain(|&validator_index, _| { head_state - .validators + .validators() .get(validator_index as usize) .map_or(true, |validator| !prune_if(validator)) }); @@ -493,86 +496,69 @@ impl PartialEq for OperationPool { } } -// TODO: more tests #[cfg(all(test, not(debug_assertions)))] mod release_tests { + use lazy_static::lazy_static; + use super::attestation::earliest_attestation_validators; use super::*; + use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use state_processing::{ - common::{get_attesting_indices, get_base_reward}, + common::{base::get_base_reward, get_attesting_indices}, VerifyOperation, }; use std::collections::BTreeSet; use std::iter::FromIterator; - use types::test_utils::*; + use store::StoreConfig; use types::*; - /// Create a signed attestation for use in tests. - /// Signed by all validators in `committee[signing_range]` and `committee[extra_signer]`. - fn signed_attestation, E: EthSpec>( - committee: &[usize], - index: u64, - keypairs: &[Keypair], - signing_range: R, - slot: Slot, - state: &BeaconState, - spec: &ChainSpec, - extra_signer: Option, - ) -> Attestation { - let mut builder = TestingAttestationBuilder::new( - AttestationTestTask::Valid, - state, - committee, - slot, - index, - spec, - ); - let signers = &committee[signing_range]; - let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::>(); - builder.sign( - AttestationTestTask::Valid, - signers, - &committee_keys, - &state.fork, - state.genesis_validators_root, - spec, + pub const MAX_VALIDATOR_COUNT: usize = 4 * 32 * 128; + + lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); + } + + fn get_harness( + validator_count: usize, + ) -> BeaconChainHarness> { + let harness = BeaconChainHarness::new_with_store_config( + E::default(), + None, + KEYPAIRS[0..validator_count].to_vec(), + StoreConfig::default(), ); - extra_signer.map(|c_idx| { - let validator_index = committee[c_idx]; - builder.sign( - AttestationTestTask::Valid, - &[validator_index], - &[&keypairs[validator_index].sk], - &state.fork, - state.genesis_validators_root, - spec, - ) - }); - builder.build() + + harness.advance_slot(); + + harness } /// Test state for attestation-related tests. fn attestation_test_state( num_committees: usize, - ) -> (BeaconState, Vec, ChainSpec) { + ) -> (BeaconChainHarness>, ChainSpec) { let spec = E::default_spec(); let num_validators = num_committees * E::slots_per_epoch() as usize * spec.target_committee_size; - let mut state_builder = - TestingBeaconStateBuilder::from_deterministic_keypairs(num_validators, &spec); - let slot_offset = 1000 * E::slots_per_epoch() + E::slots_per_epoch() / 2; - let slot = spec.genesis_slot + slot_offset; - state_builder.teleport_to_slot(slot); - state_builder.build_caches(&spec).unwrap(); - let (state, keypairs) = state_builder.build(); - (state, keypairs, spec) + let harness = get_harness::(num_validators); + + let slot_offset = 5 * E::slots_per_epoch() + E::slots_per_epoch() / 2; + + // advance until we have finalized and justified epochs + for _ in 0..slot_offset { + harness.advance_slot(); + } + + (harness, spec) } #[test] fn test_earliest_attestation() { - let (ref mut state, ref keypairs, ref spec) = attestation_test_state::(1); - let slot = state.slot - 1; + let (harness, ref spec) = attestation_test_state::(1); + let mut state = harness.get_current_state(); + let slot = state.slot() - 1; let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -580,33 +566,44 @@ mod release_tests { .map(BeaconCommittee::into_owned) .collect::>(); - for bc in committees { - let att1 = signed_attestation( - &bc.committee, - bc.index, - keypairs, - ..2, - slot, - state, - spec, - None, - ); - let att2 = signed_attestation( - &bc.committee, - bc.index, - keypairs, - .., - slot, - state, - spec, - None, - ); + let num_validators = + MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; + + let attestations = harness.make_attestations( + (0..num_validators).collect::>().as_slice(), + &state, + Hash256::zero(), + SignedBeaconBlockHash::from(Hash256::zero()), + slot, + ); + + for (atts, aggregate) in &attestations { + let att2 = aggregate.as_ref().unwrap().message.aggregate.clone(); + + let att1 = atts + .into_iter() + .map(|(att, _)| att) + .take(2) + .fold::>, _>(None, |att, new_att| { + if let Some(mut a) = att { + a.aggregate(&new_att); + Some(a) + } else { + Some(new_att.clone()) + } + }) + .unwrap(); assert_eq!( att1.aggregation_bits.num_set_bits(), - earliest_attestation_validators(&att1, state).num_set_bits() + earliest_attestation_validators(&att1, &state, state.as_base().unwrap()) + .num_set_bits() ); + + // FIXME(altair): handle altair in these tests state + .as_base_mut() + .unwrap() .current_epoch_attestations .push(PendingAttestation { aggregation_bits: att1.aggregation_bits.clone(), @@ -617,8 +614,9 @@ mod release_tests { .unwrap(); assert_eq!( - bc.committee.len() - 2, - earliest_attestation_validators(&att2, state).num_set_bits() + committees.get(0).unwrap().committee.len() - 2, + earliest_attestation_validators(&att2, &state, state.as_base().unwrap()) + .num_set_bits() ); } } @@ -626,11 +624,12 @@ mod release_tests { /// End-to-end test of basic attestation handling. #[test] fn attestation_aggregation_insert_get_prune() { - let (ref mut state, ref keypairs, ref spec) = attestation_test_state::(1); + let (harness, ref spec) = attestation_test_state::(1); - let op_pool = OperationPool::new(); + let op_pool = OperationPool::::new(); + let mut state = harness.get_current_state(); - let slot = state.slot - 1; + let slot = state.slot() - 1; let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -644,21 +643,21 @@ mod release_tests { "we expect just one committee with this many validators" ); - for bc in &committees { - let step_size = 2; - for i in (0..bc.committee.len()).step_by(step_size) { - let att = signed_attestation( - &bc.committee, - bc.index, - keypairs, - i..i + step_size, - slot, - state, - spec, - None, - ); + let num_validators = + MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; + + let attestations = harness.make_attestations( + (0..num_validators).collect::>().as_slice(), + &state, + Hash256::zero(), + SignedBeaconBlockHash::from(Hash256::zero()), + slot, + ); + + for (atts, _) in attestations { + for att in atts.into_iter() { op_pool - .insert_attestation(att, &state.fork, state.genesis_validators_root, spec) + .insert_attestation(att.0, &state.fork(), state.genesis_validators_root(), spec) .unwrap(); } } @@ -667,20 +666,20 @@ mod release_tests { assert_eq!(op_pool.num_attestations(), committees.len()); // Before the min attestation inclusion delay, get_attestations shouldn't return anything. - state.slot -= 1; + *state.slot_mut() -= 1; assert_eq!( op_pool - .get_attestations(state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, spec) .expect("should have attestations") .len(), 0 ); // Then once the delay has elapsed, we should get a single aggregated attestation. - state.slot += spec.min_attestation_inclusion_delay; + *state.slot_mut() += spec.min_attestation_inclusion_delay; let block_attestations = op_pool - .get_attestations(state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, spec) .expect("Should have block attestations"); assert_eq!(block_attestations.len(), committees.len()); @@ -696,7 +695,7 @@ mod release_tests { // But once we advance to more than an epoch after the attestation, it should prune it // out of existence. - state.slot += 2 * MainnetEthSpec::slots_per_epoch(); + *state.slot_mut() += 2 * MainnetEthSpec::slots_per_epoch(); op_pool.prune_attestations(state.current_epoch()); assert_eq!(op_pool.num_attestations(), 0); } @@ -704,11 +703,13 @@ mod release_tests { /// Adding an attestation already in the pool should not increase the size of the pool. #[test] fn attestation_duplicate() { - let (ref mut state, ref keypairs, ref spec) = attestation_test_state::(1); + let (harness, ref spec) = attestation_test_state::(1); - let op_pool = OperationPool::new(); + let state = harness.get_current_state(); - let slot = state.slot - 1; + let op_pool = OperationPool::::new(); + + let slot = state.slot() - 1; let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -716,27 +717,28 @@ mod release_tests { .map(BeaconCommittee::into_owned) .collect::>(); - for bc in &committees { - let att = signed_attestation( - &bc.committee, - bc.index, - keypairs, - .., - slot, - state, - spec, - None, - ); + let num_validators = + MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; + let attestations = harness.make_attestations( + (0..num_validators).collect::>().as_slice(), + &state, + Hash256::zero(), + SignedBeaconBlockHash::from(Hash256::zero()), + slot, + ); + + for (_, aggregate) in attestations { + let att = aggregate.unwrap().message.aggregate; op_pool .insert_attestation( att.clone(), - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), spec, ) .unwrap(); op_pool - .insert_attestation(att, &state.fork, state.genesis_validators_root, spec) + .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) .unwrap(); } @@ -747,11 +749,13 @@ mod release_tests { /// attestations. #[test] fn attestation_pairwise_overlapping() { - let (ref mut state, ref keypairs, ref spec) = attestation_test_state::(1); + let (harness, ref spec) = attestation_test_state::(1); - let op_pool = OperationPool::new(); + let state = harness.get_current_state(); - let slot = state.slot - 1; + let op_pool = OperationPool::::new(); + + let slot = state.slot() - 1; let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -759,23 +763,68 @@ mod release_tests { .map(BeaconCommittee::into_owned) .collect::>(); + let num_validators = + MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; + + let attestations = harness.make_attestations( + (0..num_validators).collect::>().as_slice(), + &state, + Hash256::zero(), + SignedBeaconBlockHash::from(Hash256::zero()), + slot, + ); + let step_size = 2; - for bc in &committees { - // Create attestations that overlap on `step_size` validators, like: - // {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ... - for i in (0..bc.committee.len() - step_size).step_by(step_size) { - let att = signed_attestation( - &bc.committee, - bc.index, - keypairs, - i..i + 2 * step_size, - slot, - state, - spec, - None, - ); + // Create attestations that overlap on `step_size` validators, like: + // {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ... + for (atts1, _) in attestations { + let atts2 = atts1.clone(); + let aggs1 = atts1 + .chunks_exact(step_size * 2) + .map(|chunk| { + let agg = chunk.into_iter().map(|(att, _)| att).fold::, + >, _>( + None, + |att, new_att| { + if let Some(mut a) = att { + a.aggregate(new_att); + Some(a) + } else { + Some(new_att.clone()) + } + }, + ); + agg.unwrap() + }) + .collect::>(); + let aggs2 = atts2 + .into_iter() + .skip(step_size) + .collect::>() + .as_slice() + .chunks_exact(step_size * 2) + .map(|chunk| { + let agg = chunk.into_iter().map(|(att, _)| att).fold::, + >, _>( + None, + |att, new_att| { + if let Some(mut a) = att { + a.aggregate(new_att); + Some(a) + } else { + Some(new_att.clone()) + } + }, + ); + agg.unwrap() + }) + .collect::>(); + + for att in aggs1.into_iter().chain(aggs2.into_iter()) { op_pool - .insert_attestation(att, &state.fork, state.genesis_validators_root, spec) + .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) .unwrap(); } } @@ -795,13 +844,15 @@ mod release_tests { fn attestation_get_max() { let small_step_size = 2; let big_step_size = 4; + let num_committees = big_step_size; + + let (harness, ref spec) = attestation_test_state::(num_committees); - let (ref mut state, ref keypairs, ref spec) = - attestation_test_state::(big_step_size); + let mut state = harness.get_current_state(); - let op_pool = OperationPool::new(); + let op_pool = OperationPool::::new(); - let slot = state.slot - 1; + let slot = state.slot() - 1; let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -811,31 +862,50 @@ mod release_tests { let max_attestations = ::MaxAttestations::to_usize(); let target_committee_size = spec.target_committee_size as usize; + let num_validators = num_committees + * MainnetEthSpec::slots_per_epoch() as usize + * spec.target_committee_size; + + let attestations = harness.make_attestations( + (0..num_validators).collect::>().as_slice(), + &state, + Hash256::zero(), + SignedBeaconBlockHash::from(Hash256::zero()), + slot, + ); - let insert_attestations = |bc: &OwnedBeaconCommittee, step_size| { - for i in (0..target_committee_size).step_by(step_size) { - let att = signed_attestation( - &bc.committee, - bc.index, - keypairs, - i..i + step_size, - slot, - state, - spec, - if i == 0 { None } else { Some(0) }, - ); + let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, + step_size| { + let att_0 = attestations.get(0).unwrap().0.clone(); + let aggs = attestations + .chunks_exact(step_size) + .map(|chunk| { + chunk + .into_iter() + .map(|(att, _)| att) + .fold::, _>( + att_0.clone(), + |mut att, new_att| { + att.aggregate(new_att); + att + }, + ) + }) + .collect::>(); + + for att in aggs { op_pool - .insert_attestation(att, &state.fork, state.genesis_validators_root, spec) + .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) .unwrap(); } }; - for committee in &committees { - assert_eq!(committee.committee.len(), target_committee_size); + for (atts, _) in attestations { + assert_eq!(atts.len(), target_committee_size); // Attestations signed by only 2-3 validators - insert_attestations(committee, small_step_size); + insert_attestations(atts.clone(), small_step_size); // Attestations signed by 4+ validators - insert_attestations(committee, big_step_size); + insert_attestations(atts, big_step_size); } let num_small = target_committee_size / small_step_size; @@ -848,9 +918,9 @@ mod release_tests { ); assert!(op_pool.num_attestations() > max_attestations); - state.slot += spec.min_attestation_inclusion_delay; + *state.slot_mut() += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, spec) .expect("should have best attestations"); assert_eq!(best_attestations.len(), max_attestations); @@ -864,13 +934,14 @@ mod release_tests { fn attestation_rewards() { let small_step_size = 2; let big_step_size = 4; + let num_committees = big_step_size; - let (ref mut state, ref keypairs, ref spec) = - attestation_test_state::(big_step_size); + let (harness, ref spec) = attestation_test_state::(num_committees); - let op_pool = OperationPool::new(); + let mut state = harness.get_current_state(); + let op_pool = OperationPool::::new(); - let slot = state.slot - 1; + let slot = state.slot() - 1; let committees = state .get_beacon_committees_at_slot(slot) .unwrap() @@ -883,34 +954,53 @@ mod release_tests { // Each validator will have a multiple of 1_000_000_000 wei. // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). - for i in 0..state.validators.len() { - state.validators[i].effective_balance = 1_000_000_000 * i as u64; + for i in 0..state.validators().len() { + state.validators_mut()[i].effective_balance = 1_000_000_000 * i as u64; } - let insert_attestations = |bc: &OwnedBeaconCommittee, step_size| { - for i in (0..target_committee_size).step_by(step_size) { - let att = signed_attestation( - &bc.committee, - bc.index, - keypairs, - i..i + step_size, - slot, - state, - spec, - if i == 0 { None } else { Some(0) }, - ); + let num_validators = num_committees + * MainnetEthSpec::slots_per_epoch() as usize + * spec.target_committee_size; + let attestations = harness.make_attestations( + (0..num_validators).collect::>().as_slice(), + &state, + Hash256::zero(), + SignedBeaconBlockHash::from(Hash256::zero()), + slot, + ); + + let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, + step_size| { + let att_0 = attestations.get(0).unwrap().0.clone(); + let aggs = attestations + .chunks_exact(step_size) + .map(|chunk| { + chunk + .into_iter() + .map(|(att, _)| att) + .fold::, _>( + att_0.clone(), + |mut att, new_att| { + att.aggregate(new_att); + att + }, + ) + }) + .collect::>(); + + for att in aggs { op_pool - .insert_attestation(att, &state.fork, state.genesis_validators_root, spec) + .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) .unwrap(); } }; - for committee in &committees { - assert_eq!(committee.committee.len(), target_committee_size); + for (atts, _) in attestations { + assert_eq!(atts.len(), target_committee_size); // Attestations signed by only 2-3 validators - insert_attestations(committee, small_step_size); + insert_attestations(atts.clone(), small_step_size); // Attestations signed by 4+ validators - insert_attestations(committee, big_step_size); + insert_attestations(atts, big_step_size); } let num_small = target_committee_size / small_step_size; @@ -923,16 +1013,16 @@ mod release_tests { ); assert!(op_pool.num_attestations() > max_attestations); - state.slot += spec.min_attestation_inclusion_delay; + *state.slot_mut() += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, spec) .expect("should have valid best attestations"); assert_eq!(best_attestations.len(), max_attestations); let active_indices = state .get_cached_active_validator_indices(RelativeEpoch::Current) .unwrap(); - let total_active_balance = state.get_total_balance(&active_indices, spec).unwrap(); + let total_active_balance = state.get_total_balance(active_indices, spec).unwrap(); // Set of indices covered by previous attestations in `best_attestations`. let mut seen_indices = BTreeSet::new(); @@ -940,7 +1030,8 @@ mod release_tests { let mut prev_reward = u64::max_value(); for att in &best_attestations { - let fresh_validators_bitlist = earliest_attestation_validators(att, state); + let fresh_validators_bitlist = + earliest_attestation_validators(att, &state, state.as_base().unwrap()); let committee = state .get_beacon_committee(att.data.slot, att.data.index) .expect("should get beacon committee"); @@ -958,8 +1049,13 @@ mod release_tests { let rewards = fresh_indices .iter() .map(|validator_index| { - get_base_reward(state, *validator_index as usize, total_active_balance, spec) - .unwrap() + get_base_reward( + &state, + *validator_index as usize, + total_active_balance, + spec, + ) + .unwrap() / spec.proposer_reward_quotient }) .sum(); @@ -972,275 +1068,221 @@ mod release_tests { } } - struct TestContext { - spec: ChainSpec, - state: BeaconState, - keypairs: Vec, - op_pool: OperationPool, - } - - impl TestContext { - fn new() -> Self { - let spec = MainnetEthSpec::default_spec(); - let num_validators = 32; - let mut state_builder = - TestingBeaconStateBuilder::::from_deterministic_keypairs( - num_validators, - &spec, - ); - state_builder.build_caches(&spec).unwrap(); - let (state, keypairs) = state_builder.build(); - let op_pool = OperationPool::new(); - - TestContext { - spec, - state, - keypairs, - op_pool, - } - } - - fn proposer_slashing(&self, proposer_index: u64) -> ProposerSlashing { - TestingProposerSlashingBuilder::double_vote::( - ProposerSlashingTestTask::Valid, - proposer_index, - &self.keypairs[proposer_index as usize].sk, - &self.state.fork, - self.state.genesis_validators_root, - &self.spec, - ) - } - - fn attester_slashing(&self, slashed_indices: &[u64]) -> AttesterSlashing { - let signer = |idx: u64, message: &[u8]| { - self.keypairs[idx as usize] - .sk - .sign(Hash256::from_slice(&message)) - }; - TestingAttesterSlashingBuilder::double_vote( - AttesterSlashingTestTask::Valid, - slashed_indices, - signer, - &self.state.fork, - self.state.genesis_validators_root, - &self.spec, - ) - } - - fn attester_slashing_two_indices( - &self, - slashed_indices_1: &[u64], - slashed_indices_2: &[u64], - ) -> AttesterSlashing { - let signer = |idx: u64, message: &[u8]| { - self.keypairs[idx as usize] - .sk - .sign(Hash256::from_slice(&message)) - }; - TestingAttesterSlashingBuilder::double_vote_with_additional_indices( - AttesterSlashingTestTask::Valid, - slashed_indices_1, - Some(slashed_indices_2), - signer, - &self.state.fork, - self.state.genesis_validators_root, - &self.spec, - ) - } - } - /// Insert two slashings for the same proposer and ensure only one is returned. #[test] fn duplicate_proposer_slashing() { - let ctxt = TestContext::new(); - let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec); + let harness = get_harness(32); + let state = harness.get_current_state(); + let op_pool = OperationPool::::new(); + let proposer_index = 0; - let slashing1 = ctxt.proposer_slashing(proposer_index); + let slashing1 = harness.make_proposer_slashing(proposer_index); + let slashing2 = ProposerSlashing { signed_header_1: slashing1.signed_header_2.clone(), signed_header_2: slashing1.signed_header_1.clone(), }; // Both slashings should be valid and accepted by the pool. - op_pool.insert_proposer_slashing(slashing1.clone().validate(state, spec).unwrap()); - op_pool.insert_proposer_slashing(slashing2.clone().validate(state, spec).unwrap()); + op_pool + .insert_proposer_slashing(slashing1.clone().validate(&state, &harness.spec).unwrap()); + op_pool + .insert_proposer_slashing(slashing2.clone().validate(&state, &harness.spec).unwrap()); // Should only get the second slashing back. - assert_eq!(op_pool.get_slashings(state, spec).0, vec![slashing2]); + assert_eq!(op_pool.get_slashings(&state).0, vec![slashing2]); } // Sanity check on the pruning of proposer slashings #[test] fn prune_proposer_slashing_noop() { - let ctxt = TestContext::new(); - let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec); - let slashing = ctxt.proposer_slashing(0); - op_pool.insert_proposer_slashing(slashing.clone().validate(state, spec).unwrap()); - op_pool.prune_proposer_slashings(state); - assert_eq!(op_pool.get_slashings(state, spec).0, vec![slashing]); + let harness = get_harness(32); + let state = harness.get_current_state(); + let op_pool = OperationPool::::new(); + + let slashing = harness.make_proposer_slashing(0); + op_pool.insert_proposer_slashing(slashing.clone().validate(&state, &harness.spec).unwrap()); + op_pool.prune_proposer_slashings(&state); + assert_eq!(op_pool.get_slashings(&state).0, vec![slashing]); } // Sanity check on the pruning of attester slashings #[test] fn prune_attester_slashing_noop() { - let ctxt = TestContext::new(); - let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec); - let slashing = ctxt.attester_slashing(&[1, 3, 5, 7, 9]); - op_pool - .insert_attester_slashing(slashing.clone().validate(state, spec).unwrap(), state.fork); - op_pool.prune_attester_slashings(state); - assert_eq!(op_pool.get_slashings(state, spec).1, vec![slashing]); + let harness = get_harness(32); + let spec = &harness.spec; + let state = harness.get_current_state(); + let op_pool = OperationPool::::new(); + + let slashing = harness.make_attester_slashing(vec![1, 3, 5, 7, 9]); + op_pool.insert_attester_slashing( + slashing.clone().validate(&state, spec).unwrap(), + state.fork(), + ); + op_pool.prune_attester_slashings(&state); + assert_eq!(op_pool.get_slashings(&state).1, vec![slashing]); } // Check that we get maximum coverage for attester slashings (highest qty of validators slashed) #[test] fn simple_max_cover_attester_slashing() { - let ctxt = TestContext::new(); - let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec); + let harness = get_harness(32); + let spec = &harness.spec; + let state = harness.get_current_state(); + let op_pool = OperationPool::::new(); - let slashing_1 = ctxt.attester_slashing(&[1]); - let slashing_2 = ctxt.attester_slashing(&[2, 3]); - let slashing_3 = ctxt.attester_slashing(&[4, 5, 6]); - let slashing_4 = ctxt.attester_slashing(&[7, 8, 9, 10]); + let slashing_1 = harness.make_attester_slashing(vec![1]); + let slashing_2 = harness.make_attester_slashing(vec![2, 3]); + let slashing_3 = harness.make_attester_slashing(vec![4, 5, 6]); + let slashing_4 = harness.make_attester_slashing(vec![7, 8, 9, 10]); op_pool.insert_attester_slashing( - slashing_1.clone().validate(state, spec).unwrap(), - state.fork, + slashing_1.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_2.clone().validate(state, spec).unwrap(), - state.fork, + slashing_2.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_3.clone().validate(state, spec).unwrap(), - state.fork, + slashing_3.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_4.clone().validate(state, spec).unwrap(), - state.fork, + slashing_4.clone().validate(&state, spec).unwrap(), + state.fork(), ); - let best_slashings = op_pool.get_slashings(state, spec); + let best_slashings = op_pool.get_slashings(&state); assert_eq!(best_slashings.1, vec![slashing_4, slashing_3]); } // Check that we get maximum coverage for attester slashings with overlapping indices #[test] fn overlapping_max_cover_attester_slashing() { - let ctxt = TestContext::new(); - let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec); + let harness = get_harness(32); + let spec = &harness.spec; + let state = harness.get_current_state(); + let op_pool = OperationPool::::new(); - let slashing_1 = ctxt.attester_slashing(&[1, 2, 3, 4]); - let slashing_2 = ctxt.attester_slashing(&[1, 2, 5]); - let slashing_3 = ctxt.attester_slashing(&[5, 6]); - let slashing_4 = ctxt.attester_slashing(&[6]); + let slashing_1 = harness.make_attester_slashing(vec![1, 2, 3, 4]); + let slashing_2 = harness.make_attester_slashing(vec![1, 2, 5]); + let slashing_3 = harness.make_attester_slashing(vec![5, 6]); + let slashing_4 = harness.make_attester_slashing(vec![6]); op_pool.insert_attester_slashing( - slashing_1.clone().validate(state, spec).unwrap(), - state.fork, + slashing_1.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_2.clone().validate(state, spec).unwrap(), - state.fork, + slashing_2.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_3.clone().validate(state, spec).unwrap(), - state.fork, + slashing_3.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_4.clone().validate(state, spec).unwrap(), - state.fork, + slashing_4.clone().validate(&state, spec).unwrap(), + state.fork(), ); - let best_slashings = op_pool.get_slashings(state, spec); + let best_slashings = op_pool.get_slashings(&state); assert_eq!(best_slashings.1, vec![slashing_1, slashing_3]); } // Max coverage of attester slashings taking into account proposer slashings #[test] fn max_coverage_attester_proposer_slashings() { - let ctxt = TestContext::new(); - let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec); + let harness = get_harness(32); + let spec = &harness.spec; + let state = harness.get_current_state(); + let op_pool = OperationPool::::new(); - let p_slashing = ctxt.proposer_slashing(1); - let a_slashing_1 = ctxt.attester_slashing(&[1, 2, 3, 4]); - let a_slashing_2 = ctxt.attester_slashing(&[1, 3, 4]); - let a_slashing_3 = ctxt.attester_slashing(&[5, 6]); + let p_slashing = harness.make_proposer_slashing(1); + let a_slashing_1 = harness.make_attester_slashing(vec![1, 2, 3, 4]); + let a_slashing_2 = harness.make_attester_slashing(vec![1, 3, 4]); + let a_slashing_3 = harness.make_attester_slashing(vec![5, 6]); - op_pool.insert_proposer_slashing(p_slashing.clone().validate(state, spec).unwrap()); + op_pool.insert_proposer_slashing(p_slashing.clone().validate(&state, spec).unwrap()); op_pool.insert_attester_slashing( - a_slashing_1.clone().validate(state, spec).unwrap(), - state.fork, + a_slashing_1.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - a_slashing_2.clone().validate(state, spec).unwrap(), - state.fork, + a_slashing_2.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - a_slashing_3.clone().validate(state, spec).unwrap(), - state.fork, + a_slashing_3.clone().validate(&state, spec).unwrap(), + state.fork(), ); - let best_slashings = op_pool.get_slashings(state, spec); + let best_slashings = op_pool.get_slashings(&state); assert_eq!(best_slashings.1, vec![a_slashing_1, a_slashing_3]); } //Max coverage checking that non overlapping indices are still recognized for their value #[test] fn max_coverage_different_indices_set() { - let ctxt = TestContext::new(); - let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec); - - let slashing_1 = - ctxt.attester_slashing_two_indices(&[1, 2, 3, 4, 5, 6], &[3, 4, 5, 6, 7, 8]); - let slashing_2 = ctxt.attester_slashing(&[5, 6]); - let slashing_3 = ctxt.attester_slashing(&[1, 2, 3]); + let harness = get_harness(32); + let spec = &harness.spec; + let state = harness.get_current_state(); + let op_pool = OperationPool::::new(); + + let slashing_1 = harness.make_attester_slashing_different_indices( + vec![1, 2, 3, 4, 5, 6], + vec![3, 4, 5, 6, 7, 8], + ); + let slashing_2 = harness.make_attester_slashing(vec![5, 6]); + let slashing_3 = harness.make_attester_slashing(vec![1, 2, 3]); op_pool.insert_attester_slashing( - slashing_1.clone().validate(state, spec).unwrap(), - state.fork, + slashing_1.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_2.clone().validate(state, spec).unwrap(), - state.fork, + slashing_2.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_3.clone().validate(state, spec).unwrap(), - state.fork, + slashing_3.clone().validate(&state, spec).unwrap(), + state.fork(), ); - let best_slashings = op_pool.get_slashings(state, spec); + let best_slashings = op_pool.get_slashings(&state); assert_eq!(best_slashings.1, vec![slashing_1, slashing_3]); } //Max coverage should be affected by the overall effective balances #[test] fn max_coverage_effective_balances() { - let mut ctxt = TestContext::new(); - ctxt.state.validators[1].effective_balance = 17_000_000_000; - ctxt.state.validators[2].effective_balance = 17_000_000_000; - ctxt.state.validators[3].effective_balance = 17_000_000_000; - - let (op_pool, state, spec) = (&ctxt.op_pool, &ctxt.state, &ctxt.spec); - - let slashing_1 = ctxt.attester_slashing(&[1, 2, 3]); - let slashing_2 = ctxt.attester_slashing(&[4, 5, 6]); - let slashing_3 = ctxt.attester_slashing(&[7, 8]); + let harness = get_harness(32); + let spec = &harness.spec; + let mut state = harness.get_current_state(); + let op_pool = OperationPool::::new(); + state.validators_mut()[1].effective_balance = 17_000_000_000; + state.validators_mut()[2].effective_balance = 17_000_000_000; + state.validators_mut()[3].effective_balance = 17_000_000_000; + + let slashing_1 = harness.make_attester_slashing(vec![1, 2, 3]); + let slashing_2 = harness.make_attester_slashing(vec![4, 5, 6]); + let slashing_3 = harness.make_attester_slashing(vec![7, 8]); op_pool.insert_attester_slashing( - slashing_1.clone().validate(state, spec).unwrap(), - state.fork, + slashing_1.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_2.clone().validate(state, spec).unwrap(), - state.fork, + slashing_2.clone().validate(&state, spec).unwrap(), + state.fork(), ); op_pool.insert_attester_slashing( - slashing_3.clone().validate(state, spec).unwrap(), - state.fork, + slashing_3.clone().validate(&state, spec).unwrap(), + state.fork(), ); - let best_slashings = op_pool.get_slashings(state, spec); + let best_slashings = op_pool.get_slashings(&state); assert_eq!(best_slashings.1, vec![slashing_2, slashing_3]); } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 30d4245a4d6..e6f1673964a 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -198,6 +198,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5052).") .takes_value(true), ) + .arg( + Arg::with_name("http-disable-legacy-spec") + .long("http-disable-legacy-spec") + .help("Disable serving of legacy data on the /config/spec endpoint. May be \ + disabled by default in a future release.") + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::with_name("metrics") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b32a0cec15b..6677e6a090f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -107,6 +107,10 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } + if cli_args.is_present("http-disable-legacy-spec") { + client_config.http_api.serve_legacy_spec = false; + } + /* * Prometheus metrics HTTP server */ diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 5882987741b..19a1944a6b8 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -4,20 +4,15 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" -[[bench]] -name = "benches" -harness = false - [dev-dependencies] tempfile = "3.1.0" -criterion = "0.3.3" -rayon = "1.4.1" +beacon_chain = {path = "../beacon_chain"} [dependencies] db-key = "0.0.5" leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.11.0" -itertools = "0.9.0" +itertools = "0.10.0" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" tree_hash = "0.1.1" diff --git a/beacon_node/store/benches/benches.rs b/beacon_node/store/benches/benches.rs deleted file mode 100644 index b1721fa0560..00000000000 --- a/beacon_node/store/benches/benches.rs +++ /dev/null @@ -1,115 +0,0 @@ -#![allow(deprecated)] - -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; -use rayon::prelude::*; -use ssz::{Decode, Encode}; -use std::convert::TryInto; -use store::BeaconStateStorageContainer; -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, Hash256, - MainnetEthSpec, Validator, -}; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state.balances.push(i as u64).expect("should add balance"); - } - - state.validators = (0..validator_count) - .collect::>() - .par_iter() - .map(|&i| Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: spec.max_effective_balance, - slashed: false, - activation_eligibility_epoch: Epoch::new(0), - activation_epoch: Epoch::new(0), - exit_epoch: Epoch::from(u64::max_value()), - withdrawable_epoch: Epoch::from(u64::max_value()), - }) - .collect::>() - .into(); - - state.build_all_caches(spec).expect("should build caches"); - - state -} - -fn all_benches(c: &mut Criterion) { - let validator_count = 16_384; - let state = get_state::(validator_count); - let storage_container = BeaconStateStorageContainer::new(&state); - let state_bytes = storage_container.as_ssz_bytes(); - - let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("encode/beacon_state", move |b| { - b.iter_batched_ref( - || inner_state.clone(), - |state| black_box(BeaconStateStorageContainer::new(state).as_ssz_bytes()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("encode/beacon_state/tree_hash_cache", move |b| { - b.iter_batched_ref( - || inner_state.tree_hash_cache.clone(), - |tree_hash_cache| black_box(tree_hash_cache.as_ssz_bytes()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let inner_state = state; - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("encode/beacon_state/committee_cache[0]", move |b| { - b.iter_batched_ref( - || inner_state.committee_caches[0].clone(), - |committee_cache| black_box(committee_cache.as_ssz_bytes()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("decode/beacon_state", move |b| { - b.iter_batched_ref( - || state_bytes.clone(), - |bytes| { - let state: BeaconState = - BeaconStateStorageContainer::from_ssz_bytes(&bytes) - .expect("should decode") - .try_into() - .expect("should convert into state"); - black_box(state) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); -} - -criterion_group!(benches, all_benches,); -criterion_main!(benches); diff --git a/beacon_node/store/examples/ssz_encode_state_container.rs b/beacon_node/store/examples/ssz_encode_state_container.rs deleted file mode 100644 index d44f7b4e9e4..00000000000 --- a/beacon_node/store/examples/ssz_encode_state_container.rs +++ /dev/null @@ -1,62 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use rayon::prelude::*; -use ssz::{Decode, Encode}; -use std::convert::TryInto; -use store::BeaconStateStorageContainer; -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, Hash256, - MainnetEthSpec, Validator, -}; - -type E = MainnetEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state.balances.push(i as u64).expect("should add balance"); - } - - state.validators = (0..validator_count) - .collect::>() - .par_iter() - .map(|&i| Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: spec.max_effective_balance, - slashed: false, - activation_eligibility_epoch: Epoch::new(0), - activation_epoch: Epoch::new(0), - exit_epoch: Epoch::from(u64::max_value()), - withdrawable_epoch: Epoch::from(u64::max_value()), - }) - .collect::>() - .into(); - - state.build_all_caches(spec).expect("should build caches"); - - state -} - -fn main() { - let validator_count = 1_024; - let state = get_state::(validator_count); - let storage_container = BeaconStateStorageContainer::new(&state); - - for _ in 0..1024 { - let container_bytes = storage_container.as_ssz_bytes(); - let _: BeaconState = BeaconStateStorageContainer::from_ssz_bytes(&container_bytes) - .expect("should decode") - .try_into() - .expect("should convert into state"); - } -} diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 9a26304a0b5..5958d6b624f 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -226,12 +226,12 @@ pub trait Field: Copy { /// Extract the genesis value for a fixed length field from an /// - /// Will only return a correct value if `slot_needs_genesis_value(state.slot, spec) == true`. + /// Will only return a correct value if `slot_needs_genesis_value(state.slot(), spec) == true`. fn extract_genesis_value( state: &BeaconState, spec: &ChainSpec, ) -> Result { - let (_, end_vindex) = Self::start_and_end_vindex(state.slot, spec); + let (_, end_vindex) = Self::start_and_end_vindex(state.slot(), spec); match Self::update_pattern(spec) { // Genesis value is guaranteed to exist at `end_vindex`, as it won't yet have been // updated @@ -295,7 +295,7 @@ field!( T::SlotsPerHistoricalRoot, DBColumn::BeaconBlockRoots, |_| OncePerNSlots { n: 1 }, - |state: &BeaconState<_>, index, _| safe_modulo_index(&state.block_roots, index) + |state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index) ); field!( @@ -305,7 +305,7 @@ field!( T::SlotsPerHistoricalRoot, DBColumn::BeaconStateRoots, |_| OncePerNSlots { n: 1 }, - |state: &BeaconState<_>, index, _| safe_modulo_index(&state.state_roots, index) + |state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index) ); field!( @@ -317,7 +317,7 @@ field!( |_| OncePerNSlots { n: T::SlotsPerHistoricalRoot::to_u64() }, - |state: &BeaconState<_>, index, _| safe_modulo_index(&state.historical_roots, index) + |state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index) ); field!( @@ -327,7 +327,7 @@ field!( T::EpochsPerHistoricalVector, DBColumn::BeaconRandaoMixes, |_| OncePerEpoch { lag: 1 }, - |state: &BeaconState<_>, index, _| safe_modulo_index(&state.randao_mixes, index) + |state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index) ); pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( @@ -338,12 +338,12 @@ pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( ops: &mut Vec, ) -> Result<(), Error> { let chunk_size = F::chunk_size(); - let (start_vindex, end_vindex) = F::start_and_end_vindex(state.slot, spec); + let (start_vindex, end_vindex) = F::start_and_end_vindex(state.slot(), spec); let start_cindex = start_vindex / chunk_size; let end_cindex = end_vindex / chunk_size; // Store the genesis value if we have access to it, and it hasn't been stored already. - if F::slot_needs_genesis_value(state.slot, spec) { + if F::slot_needs_genesis_value(state.slot(), spec) { let genesis_value = F::extract_genesis_value(state, spec)?; F::check_and_store_genesis_value(store, genesis_value, ops)?; } diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index cec57e64ec3..5a77863d543 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -71,7 +71,7 @@ impl SimpleForwardsBlockRootsIterator { ) -> Result { // Iterate backwards from the end state, stopping at the start slot. let values = process_results( - std::iter::once(Ok((end_block_root, end_state.slot))) + std::iter::once(Ok((end_block_root, end_state.slot()))) .chain(BlockRootsIterator::owned(store, end_state)), |iter| { iter.take_while(|(_, slot)| *slot >= start_slot) @@ -237,7 +237,7 @@ impl SimpleForwardsStateRootsIterator { ) -> Result { // Iterate backwards from the end state, stopping at the start slot. let values = process_results( - std::iter::once(Ok((end_state_root, end_state.slot))) + std::iter::once(Ok((end_state_root, end_state.slot()))) .chain(StateRootsIterator::owned(store, end_state)), |iter| { iter.take_while(|(_, slot)| *slot >= start_slot) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6489301c9d2..3840ade4f2b 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -3,7 +3,10 @@ use crate::chunked_vector::{ }; use crate::config::{OnDiskStoreConfig, StoreConfig}; use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator}; -use crate::impls::beacon_state::{get_full_state, store_full_state}; +use crate::impls::{ + beacon_block_as_kv_store_op, + beacon_state::{get_full_state, store_full_state}, +}; use crate::iter::{ParentRootBlockIterator, StateRootsIterator}; use crate::leveldb_store::BytesKey; use crate::leveldb_store::LevelDB; @@ -240,7 +243,8 @@ impl, Cold: ItemStore> HotColdDB block: SignedBeaconBlock, ) -> Result<(), Error> { // Store on disk. - self.hot_db.put(block_root, &block)?; + self.hot_db + .do_atomically(vec![beacon_block_as_kv_store_op(block_root, &block)])?; // Update cache. self.block_cache.lock().put(*block_root, block); @@ -259,20 +263,34 @@ impl, Cold: ItemStore> HotColdDB } // Fetch from database. - match self.hot_db.get::>(block_root)? { - Some(block) => { + match self + .hot_db + .get_bytes(DBColumn::BeaconBlock.into(), block_root.as_bytes())? + { + Some(block_bytes) => { + // Deserialize. + let block = SignedBeaconBlock::from_ssz_bytes(&block_bytes, &self.spec)?; + // Add to cache. self.block_cache.lock().put(*block_root, block.clone()); + Ok(Some(block)) } None => Ok(None), } } + /// Determine whether a block exists in the database. + pub fn block_exists(&self, block_root: &Hash256) -> Result { + self.hot_db + .key_exists(DBColumn::BeaconBlock.into(), block_root.as_bytes()) + } + /// Delete a block from the store and the block cache. pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { self.block_cache.lock().pop(block_root); - self.hot_db.delete::>(block_root) + self.hot_db + .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes()) } pub fn put_state_summary( @@ -286,7 +304,7 @@ impl, Cold: ItemStore> HotColdDB /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { let mut ops: Vec = Vec::new(); - if state.slot < self.get_split_slot() { + if state.slot() < self.get_split_slot() { self.store_cold_state(state_root, &state, &mut ops)?; self.cold_db.do_atomically(ops) } else { @@ -456,7 +474,7 @@ impl, Cold: ItemStore> HotColdDB for op in batch { match op { StoreOp::PutBlock(block_root, block) => { - key_value_batch.push(block.as_kv_store_op(*block_root)); + key_value_batch.push(beacon_block_as_kv_store_op(block_root, block)); } StoreOp::PutState(state_root, state) => { @@ -538,11 +556,11 @@ impl, Cold: ItemStore> HotColdDB ops: &mut Vec, ) -> Result<(), Error> { // On the epoch boundary, store the full state. - if state.slot % E::slots_per_epoch() == 0 { + if state.slot() % E::slots_per_epoch() == 0 { trace!( self.log, "Storing full state on epoch boundary"; - "slot" => state.slot.as_u64(), + "slot" => state.slot().as_u64(), "state_root" => format!("{:?}", state_root) ); store_full_state(state_root, &state, ops)?; @@ -580,9 +598,10 @@ impl, Cold: ItemStore> HotColdDB epoch_boundary_state_root, }) = self.load_hot_state_summary(state_root)? { - let boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root)?.ok_or( - HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), - )?; + let boundary_state = + get_full_state(&self.hot_db, &epoch_boundary_state_root, &self.spec)?.ok_or( + HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), + )?; // Optimization to avoid even *thinking* about replaying blocks if we're already // on an epoch boundary. @@ -590,7 +609,7 @@ impl, Cold: ItemStore> HotColdDB boundary_state } else { let blocks = - self.load_blocks_to_replay(boundary_state.slot, slot, latest_block_root)?; + self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?; self.replay_blocks(boundary_state, blocks, slot, block_replay)? }; @@ -610,11 +629,11 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - if state.slot % self.config.slots_per_restore_point != 0 { + if state.slot() % self.config.slots_per_restore_point != 0 { warn!( self.log, "Not storing non-restore point state in freezer"; - "slot" => state.slot.as_u64(), + "slot" => state.slot().as_u64(), "state_root" => format!("{:?}", state_root) ); return Ok(()); @@ -623,7 +642,7 @@ impl, Cold: ItemStore> HotColdDB trace!( self.log, "Creating restore point"; - "slot" => state.slot, + "slot" => state.slot(), "state_root" => format!("{:?}", state_root) ); @@ -640,7 +659,7 @@ impl, Cold: ItemStore> HotColdDB store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?; // 3. Store restore point. - let restore_point_index = state.slot.as_u64() / self.config.slots_per_restore_point; + let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point; self.store_restore_point_hash(restore_point_index, *state_root, ops); Ok(()) @@ -670,10 +689,12 @@ impl, Cold: ItemStore> HotColdDB /// Load a restore point state by its `state_root`. fn load_restore_point(&self, state_root: &Hash256) -> Result, Error> { - let mut partial_state: PartialBeaconState = self + let partial_state_bytes = self .cold_db - .get(state_root)? + .get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? .ok_or_else(|| HotColdDBError::MissingRestorePoint(*state_root))?; + let mut partial_state: PartialBeaconState = + PartialBeaconState::from_ssz_bytes(&partial_state_bytes, &self.spec)?; // Fill in the fields of the partial state. partial_state.load_block_roots(&self.cold_db, &self.spec)?; @@ -717,7 +738,7 @@ impl, Cold: ItemStore> HotColdDB // 2. Load the blocks from the high restore point back to the low restore point. let blocks = self.load_blocks_to_replay( - low_restore_point.slot, + low_restore_point.slot(), slot, self.get_high_restore_point_block_root(&high_restore_point, slot)?, )?; @@ -759,14 +780,14 @@ impl, Cold: ItemStore> HotColdDB .filter(|result| { result .as_ref() - .map_or(true, |block| block.message.slot <= end_slot) + .map_or(true, |block| block.slot() <= end_slot) }) // Include the block at the start slot (if any). Whilst it doesn't need to be applied // to the state, it contains a potentially useful state root. .take_while(|result| { result .as_ref() - .map_or(true, |block| block.message.slot >= start_slot) + .map_or(true, |block| block.slot() >= start_slot) }) .collect::>()?; blocks.reverse(); @@ -786,18 +807,36 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Error> { if block_replay == BlockReplay::InconsistentStateRoots { for i in 0..blocks.len() { - blocks[i].message.state_root = Hash256::zero(); + let prev_block_root = if i > 0 { + blocks[i - 1].canonical_root() + } else { + // Not read. + Hash256::zero() + }; + + let (state_root, parent_root) = match &mut blocks[i] { + SignedBeaconBlock::Base(block) => ( + &mut block.message.state_root, + &mut block.message.parent_root, + ), + SignedBeaconBlock::Altair(block) => ( + &mut block.message.state_root, + &mut block.message.parent_root, + ), + }; + + *state_root = Hash256::zero(); if i > 0 { - blocks[i].message.parent_root = blocks[i - 1].canonical_root() + *parent_root = prev_block_root; } } } let state_root_from_prev_block = |i: usize, state: &BeaconState| { if i > 0 { - let prev_block = &blocks[i - 1].message; - if prev_block.slot == state.slot { - Some(prev_block.state_root) + let prev_block = blocks[i - 1].message(); + if prev_block.slot() == state.slot() { + Some(prev_block.state_root()) } else { None } @@ -807,11 +846,11 @@ impl, Cold: ItemStore> HotColdDB }; for (i, block) in blocks.iter().enumerate() { - if block.message.slot <= state.slot { + if block.slot() <= state.slot() { continue; } - while state.slot < block.message.slot { + while state.slot() < block.slot() { let state_root = match block_replay { BlockReplay::Accurate => state_root_from_prev_block(i, &state), BlockReplay::InconsistentStateRoots => Some(Hash256::zero()), @@ -830,7 +869,7 @@ impl, Cold: ItemStore> HotColdDB .map_err(HotColdDBError::BlockReplayBlockError)?; } - while state.slot < target_slot { + while state.slot() < target_slot { let state_root = match block_replay { BlockReplay::Accurate => state_root_from_prev_block(blocks.len(), &state), BlockReplay::InconsistentStateRoots => Some(Hash256::zero()), @@ -1011,7 +1050,7 @@ pub fn migrate_database, Cold: ItemStore>( debug!( store.log, "Freezer migration started"; - "slot" => frozen_head.slot + "slot" => frozen_head.slot() ); // 0. Check that the migration is sensible. @@ -1019,16 +1058,16 @@ pub fn migrate_database, Cold: ItemStore>( // boundary (in order for the hot state summary scheme to work). let current_split_slot = store.split.read().slot; - if frozen_head.slot < current_split_slot { + if frozen_head.slot() < current_split_slot { return Err(HotColdDBError::FreezeSlotError { current_split_slot, - proposed_split_slot: frozen_head.slot, + proposed_split_slot: frozen_head.slot(), } .into()); } - if frozen_head.slot % E::slots_per_epoch() != 0 { - return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot).into()); + if frozen_head.slot() % E::slots_per_epoch() != 0 { + return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot()).into()); } let mut hot_db_ops: Vec> = Vec::new(); @@ -1045,7 +1084,7 @@ pub fn migrate_database, Cold: ItemStore>( let mut cold_db_ops: Vec = Vec::new(); if slot % store.config.slots_per_restore_point == 0 { - let state: BeaconState = get_full_state(&store.hot_db, &state_root)? + let state: BeaconState = get_full_state(&store.hot_db, &state_root, &store.spec)? .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; @@ -1102,7 +1141,7 @@ pub fn migrate_database, Cold: ItemStore>( // Before updating the in-memory split value, we flush it to disk first, so that should the // OS process die at this point, we pick up from the right place after a restart. let split = Split { - slot: frozen_head.slot, + slot: frozen_head.slot(), state_root: frozen_head_root, }; store.hot_db.put_sync(&SPLIT_KEY, &split)?; @@ -1119,7 +1158,7 @@ pub fn migrate_database, Cold: ItemStore>( debug!( store.log, "Freezer migration complete"; - "slot" => frozen_head.slot + "slot" => frozen_head.slot() ); Ok(()) @@ -1176,8 +1215,8 @@ impl HotStateSummary { // Fill in the state root on the latest block header if necessary (this happens on all // slots where there isn't a skip). let latest_block_root = state.get_latest_block_root(*state_root); - let epoch_boundary_slot = state.slot / E::slots_per_epoch() * E::slots_per_epoch(); - let epoch_boundary_state_root = if epoch_boundary_slot == state.slot { + let epoch_boundary_slot = state.slot() / E::slots_per_epoch() * E::slots_per_epoch(); + let epoch_boundary_state_root = if epoch_boundary_slot == state.slot() { *state_root } else { *state @@ -1186,7 +1225,7 @@ impl HotStateSummary { }; Ok(HotStateSummary { - slot: state.slot, + slot: state.slot(), latest_block_root, epoch_boundary_state_root, }) diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index b89bf1d4020..2321caf2b11 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -1,35 +1,15 @@ use crate::*; -use ssz::{Decode, Encode}; +use ssz::Encode; pub mod beacon_state; -pub mod partial_beacon_state; -impl StoreItem for SignedBeaconBlock { - fn db_column() -> DBColumn { - DBColumn::BeaconBlock - } - - fn as_store_bytes(&self) -> Vec { - let timer = metrics::start_timer(&metrics::BEACON_BLOCK_WRITE_TIMES); - let bytes = self.as_ssz_bytes(); - - metrics::stop_timer(timer); - metrics::inc_counter(&metrics::BEACON_BLOCK_WRITE_COUNT); - metrics::inc_counter_by(&metrics::BEACON_BLOCK_WRITE_BYTES, bytes.len() as u64); - - bytes - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - let timer = metrics::start_timer(&metrics::BEACON_BLOCK_READ_TIMES); - - let len = bytes.len(); - let result = Self::from_ssz_bytes(bytes).map_err(Into::into); - - metrics::stop_timer(timer); - metrics::inc_counter(&metrics::BEACON_BLOCK_READ_COUNT); - metrics::inc_counter_by(&metrics::BEACON_BLOCK_READ_BYTES, len as u64); - - result - } +/// Prepare a signed beacon block for storage in the database. +#[must_use] +pub fn beacon_block_as_kv_store_op( + key: &Hash256, + block: &SignedBeaconBlock, +) -> KeyValueStoreOp { + // FIXME(altair): re-add block write/overhead metrics, or remove them + let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_bytes()); + KeyValueStoreOp::PutKeyValue(db_key, block.as_ssz_bytes()) } diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index 175095fc8aa..88d1d2d7a16 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -1,6 +1,6 @@ use crate::*; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::{Decode, Encode}; +use ssz::{DecodeError, Encode}; +use ssz_derive::Encode; use std::convert::TryInto; use types::beacon_state::{CloneConfig, CommitteeCache, CACHED_EPOCHS}; @@ -23,13 +23,14 @@ pub fn store_full_state( pub fn get_full_state, E: EthSpec>( db: &KV, state_root: &Hash256, + spec: &ChainSpec, ) -> Result>, Error> { let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? { Some(bytes) => { let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES); - let container = StorageContainer::from_ssz_bytes(&bytes)?; + let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; metrics::stop_timer(overhead_timer); metrics::stop_timer(total_timer); @@ -44,7 +45,7 @@ pub fn get_full_state, E: EthSpec>( /// A container for storing `BeaconState` components. // TODO: would be more space efficient with the caches stored separately and referenced by hash -#[derive(Encode, Decode)] +#[derive(Encode)] pub struct StorageContainer { state: BeaconState, committee_caches: Vec, @@ -55,9 +56,28 @@ impl StorageContainer { pub fn new(state: &BeaconState) -> Self { Self { state: state.clone_with(CloneConfig::none()), - committee_caches: state.committee_caches.to_vec(), + committee_caches: state.committee_caches().to_vec(), } } + + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + // We need to use the slot-switching `from_ssz_bytes` of `BeaconState`, which doesn't + // compose with the other SSZ utils, so we duplicate some parts of `ssz_derive` here. + let mut builder = ssz::SszDecoderBuilder::new(bytes); + + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + + let mut decoder = builder.build()?; + + let state = decoder.decode_next_with(|bytes| BeaconState::from_ssz_bytes(bytes, spec))?; + let committee_caches = decoder.decode_next()?; + + Ok(Self { + state, + committee_caches, + }) + } } impl TryInto> for StorageContainer { @@ -73,7 +93,7 @@ impl TryInto> for StorageContainer { ))); }; - state.committee_caches[i] = self.committee_caches.remove(i); + state.committee_caches_mut()[i] = self.committee_caches.remove(i); } Ok(state) diff --git a/beacon_node/store/src/impls/partial_beacon_state.rs b/beacon_node/store/src/impls/partial_beacon_state.rs deleted file mode 100644 index c3c284314b4..00000000000 --- a/beacon_node/store/src/impls/partial_beacon_state.rs +++ /dev/null @@ -1,16 +0,0 @@ -use crate::*; -use ssz::{Decode, Encode}; - -impl StoreItem for PartialBeaconState { - fn db_column() -> DBColumn { - DBColumn::BeaconState - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 54b1f5df697..014565cb2b0 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -27,7 +27,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> store: Arc>, ) -> Option> { let state = store - .get_state(&self.message.state_root, Some(self.message.slot)) + .get_state(&self.message().state_root(), Some(self.slot())) .ok()??; Some(BlockRootsIterator::owned(store, state)) @@ -161,7 +161,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, pub fn new(store: Arc>, beacon_state: &'a BeaconState) -> Self { Self { store, - slot: beacon_state.slot, + slot: beacon_state.slot(), beacon_state: Cow::Borrowed(beacon_state), } } @@ -169,7 +169,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, pub fn owned(store: Arc>, beacon_state: BeaconState) -> Self { Self { store, - slot: beacon_state.slot, + slot: beacon_state.slot(), beacon_state: Cow::Owned(beacon_state), } } @@ -188,7 +188,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, } fn do_next(&mut self) -> Result, Error> { - if self.slot == 0 || self.slot > self.beacon_state.slot { + if self.slot == 0 || self.slot > self.beacon_state.slot() { return Ok(None); } @@ -257,7 +257,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> .store .get_block(&block_root)? .ok_or(Error::BlockNotFound(block_root))?; - self.next_block_root = block.message.parent_root; + self.next_block_root = block.message().parent_root(); Ok(Some((block_root, block))) } } @@ -323,7 +323,7 @@ fn next_historical_root_backtrack_state, Cold: Ite // a restore point slot (thus avoiding replaying blocks). In the case where we're // not frozen, this just means we might not jump back by the maximum amount on // our first jump (i.e. at most 1 extra state load). - let new_state_slot = slot_of_prev_restore_point::(current_state.slot); + let new_state_slot = slot_of_prev_restore_point::(current_state.slot()); let new_state_root = current_state.get_state_root(new_state_slot)?; Ok(store .get_state(new_state_root, Some(new_state_slot))? @@ -339,46 +339,50 @@ fn slot_of_prev_restore_point(current_slot: Slot) -> Slot { #[cfg(test)] mod test { use super::*; - use crate::config::StoreConfig; use crate::HotColdDB; + use crate::StoreConfig as Config; + use beacon_chain::store::StoreConfig; + use beacon_chain::test_utils::BeaconChainHarness; + use beacon_chain::types::{ChainSpec, Keypair, MainnetEthSpec}; use sloggers::{null::NullLoggerBuilder, Build}; - use types::{test_utils::TestingBeaconStateBuilder, ChainSpec, Keypair, MainnetEthSpec}; fn get_state() -> BeaconState { - let builder = TestingBeaconStateBuilder::from_single_keypair( - 0, - &Keypair::random(), - &T::default_spec(), + let harness = BeaconChainHarness::new_with_store_config( + T::default(), + None, + vec![Keypair::random()], + StoreConfig::default(), ); - let (state, _keypairs) = builder.build(); - state + harness.advance_slot(); + harness.get_current_state() } #[test] fn block_root_iter() { let log = NullLoggerBuilder.build().unwrap(); let store = Arc::new( - HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(), + HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(), ); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); let mut state_b: BeaconState = get_state(); - state_a.slot = Slot::from(slots_per_historical_root); - state_b.slot = Slot::from(slots_per_historical_root * 2); + *state_a.slot_mut() = Slot::from(slots_per_historical_root); + *state_b.slot_mut() = Slot::from(slots_per_historical_root * 2); let mut hashes = (0..).map(Hash256::from_low_u64_be); - - for root in &mut state_a.block_roots[..] { - *root = hashes.next().unwrap() + let roots_a = state_a.block_roots_mut(); + for i in 0..roots_a.len() { + roots_a[i] = hashes.next().unwrap() } - for root in &mut state_b.block_roots[..] { - *root = hashes.next().unwrap() + let roots_b = state_b.block_roots_mut(); + for i in 0..roots_b.len() { + roots_b[i] = hashes.next().unwrap() } let state_a_root = hashes.next().unwrap(); - state_b.state_roots[0] = state_a_root; + state_b.state_roots_mut()[0] = state_a_root; store.put_state(&state_a_root, &state_a).unwrap(); let iter = BlockRootsIterator::new(store, &state_b); @@ -405,15 +409,15 @@ mod test { fn state_root_iter() { let log = NullLoggerBuilder.build().unwrap(); let store = Arc::new( - HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(), + HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(), ); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); let mut state_b: BeaconState = get_state(); - state_a.slot = Slot::from(slots_per_historical_root); - state_b.slot = Slot::from(slots_per_historical_root * 2); + *state_a.slot_mut() = Slot::from(slots_per_historical_root); + *state_b.slot_mut() = Slot::from(slots_per_historical_root * 2); let mut hashes = (0..).map(Hash256::from_low_u64_be); diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index e2d67458638..a6cd02bbc37 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -2,17 +2,21 @@ use crate::chunked_vector::{ load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots, }; -use crate::{Error, KeyValueStore}; +use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp}; +use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::convert::TryInto; +use types::superstruct; use types::*; /// Lightweight variant of the `BeaconState` that is stored in the database. /// /// Utilises lazy-loading from separate storage for its vector fields. -/// -/// Spec v0.12.1 -#[derive(Debug, PartialEq, Clone, Encode, Decode)] +#[superstruct( + variants(Base, Altair), + variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) +)] +#[derive(Debug, PartialEq, Clone, Encode)] pub struct PartialBeaconState where T: EthSpec, @@ -20,6 +24,7 @@ where // Versioning pub genesis_time: u64, pub genesis_validators_root: Hash256, + #[superstruct(getter(copy))] pub slot: Slot, pub fork: Fork, @@ -56,71 +61,152 @@ where // Slashings slashings: FixedVector, - // Attestations + // Attestations (genesis fork only) + #[superstruct(only(Base))] pub previous_epoch_attestations: VariableList, T::MaxPendingAttestations>, + #[superstruct(only(Base))] pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, + // Participation (Altair and later) + #[superstruct(only(Altair))] + pub previous_epoch_participation: VariableList, + #[superstruct(only(Altair))] + pub current_epoch_participation: VariableList, + // Finality pub justification_bits: BitVector, pub previous_justified_checkpoint: Checkpoint, pub current_justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, + + // Inactivity + #[superstruct(only(Altair))] + pub inactivity_scores: VariableList, + + // Light-client sync committees + #[superstruct(only(Altair))] + pub current_sync_committee: SyncCommittee, + #[superstruct(only(Altair))] + pub next_sync_committee: SyncCommittee, } -impl PartialBeaconState { - /// Convert a `BeaconState` to a `PartialBeaconState`, while dropping the optional fields. - pub fn from_state_forgetful(s: &BeaconState) -> Self { - // TODO: could use references/Cow for fields to avoid cloning - PartialBeaconState { - genesis_time: s.genesis_time, - genesis_validators_root: s.genesis_validators_root, - slot: s.slot, - fork: s.fork, +/// Implement the conversion function from BeaconState -> PartialBeaconState. +macro_rules! impl_from_state_forgetful { + ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + PartialBeaconState::$variant_name($struct_name { + // Versioning + genesis_time: $s.genesis_time, + genesis_validators_root: $s.genesis_validators_root, + slot: $s.slot, + fork: $s.fork, // History - latest_block_header: s.latest_block_header.clone(), + latest_block_header: $s.latest_block_header.clone(), block_roots: None, state_roots: None, historical_roots: None, // Eth1 - eth1_data: s.eth1_data.clone(), - eth1_data_votes: s.eth1_data_votes.clone(), - eth1_deposit_index: s.eth1_deposit_index, + eth1_data: $s.eth1_data.clone(), + eth1_data_votes: $s.eth1_data_votes.clone(), + eth1_deposit_index: $s.eth1_deposit_index, // Validator registry - validators: s.validators.clone(), - balances: s.balances.clone(), + validators: $s.validators.clone(), + balances: $s.balances.clone(), // Shuffling - latest_randao_value: *s - .get_randao_mix(s.current_epoch()) + latest_randao_value: *$outer + .get_randao_mix($outer.current_epoch()) .expect("randao at current epoch is OK"), randao_mixes: None, // Slashings - slashings: s.get_all_slashings().to_vec().into(), - - // Attestations - previous_epoch_attestations: s.previous_epoch_attestations.clone(), - current_epoch_attestations: s.current_epoch_attestations.clone(), + slashings: $s.slashings.clone(), // Finality - justification_bits: s.justification_bits.clone(), - previous_justified_checkpoint: s.previous_justified_checkpoint, - current_justified_checkpoint: s.current_justified_checkpoint, - finalized_checkpoint: s.finalized_checkpoint, + justification_bits: $s.justification_bits.clone(), + previous_justified_checkpoint: $s.previous_justified_checkpoint, + current_justified_checkpoint: $s.current_justified_checkpoint, + finalized_checkpoint: $s.finalized_checkpoint, + + // Variant-specific fields + $( + $extra_fields: $s.$extra_fields.clone() + ),* + }) + } +} + +impl PartialBeaconState { + /// Convert a `BeaconState` to a `PartialBeaconState`, while dropping the optional fields. + pub fn from_state_forgetful(outer: &BeaconState) -> Self { + match outer { + BeaconState::Base(s) => impl_from_state_forgetful!( + s, + outer, + Base, + PartialBeaconStateBase, + [previous_epoch_attestations, current_epoch_attestations] + ), + BeaconState::Altair(s) => impl_from_state_forgetful!( + s, + outer, + Altair, + PartialBeaconStateAltair, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores + ] + ), } } + /// SSZ decode. + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). + let slot_offset = ::ssz_fixed_len() + ::ssz_fixed_len(); + let slot_len = ::ssz_fixed_len(); + let slot_bytes = bytes.get(slot_offset..slot_offset + slot_len).ok_or( + DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_offset + slot_len, + }, + )?; + + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let epoch = slot.epoch(T::slots_per_epoch()); + + if spec + .altair_fork_epoch + .map_or(true, |altair_epoch| epoch < altair_epoch) + { + PartialBeaconStateBase::from_ssz_bytes(bytes).map(Self::Base) + } else { + PartialBeaconStateAltair::from_ssz_bytes(bytes).map(Self::Altair) + } + } + + /// Prepare the partial state for storage in the KV database. + #[must_use] + pub fn as_kv_store_op(&self, state_root: Hash256) -> KeyValueStoreOp { + let db_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_bytes()); + KeyValueStoreOp::PutKeyValue(db_key, self.as_ssz_bytes()) + } + pub fn load_block_roots>( &mut self, store: &S, spec: &ChainSpec, ) -> Result<(), Error> { - if self.block_roots.is_none() { - self.block_roots = Some(load_vector_from_db::( - store, self.slot, spec, + if self.block_roots().is_none() { + *self.block_roots_mut() = Some(load_vector_from_db::( + store, + self.slot(), + spec, )?); } Ok(()) @@ -131,9 +217,11 @@ impl PartialBeaconState { store: &S, spec: &ChainSpec, ) -> Result<(), Error> { - if self.state_roots.is_none() { - self.state_roots = Some(load_vector_from_db::( - store, self.slot, spec, + if self.state_roots().is_none() { + *self.state_roots_mut() = Some(load_vector_from_db::( + store, + self.slot(), + spec, )?); } Ok(()) @@ -144,10 +232,10 @@ impl PartialBeaconState { store: &S, spec: &ChainSpec, ) -> Result<(), Error> { - if self.historical_roots.is_none() { - self.historical_roots = Some(load_variable_list_from_db::( - store, self.slot, spec, - )?); + if self.historical_roots().is_none() { + *self.historical_roots_mut() = Some( + load_variable_list_from_db::(store, self.slot(), spec)?, + ); } Ok(()) } @@ -157,72 +245,101 @@ impl PartialBeaconState { store: &S, spec: &ChainSpec, ) -> Result<(), Error> { - if self.randao_mixes.is_none() { + if self.randao_mixes().is_none() { // Load the per-epoch values from the database let mut randao_mixes = - load_vector_from_db::(store, self.slot, spec)?; + load_vector_from_db::(store, self.slot(), spec)?; // Patch the value for the current slot into the index for the current epoch - let current_epoch = self.slot.epoch(T::slots_per_epoch()); + let current_epoch = self.slot().epoch(T::slots_per_epoch()); let len = randao_mixes.len(); - randao_mixes[current_epoch.as_usize() % len] = self.latest_randao_value; + randao_mixes[current_epoch.as_usize() % len] = *self.latest_randao_value(); - self.randao_mixes = Some(randao_mixes) + *self.randao_mixes_mut() = Some(randao_mixes) } Ok(()) } } -impl TryInto> for PartialBeaconState { - type Error = Error; - - fn try_into(self) -> Result, Error> { - fn unpack(x: Option) -> Result { - x.ok_or(Error::PartialBeaconStateError) - } - - Ok(BeaconState { - genesis_time: self.genesis_time, - genesis_validators_root: self.genesis_validators_root, - slot: self.slot, - fork: self.fork, +/// Implement the conversion from PartialBeaconState -> BeaconState. +macro_rules! impl_try_into_beacon_state { + ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + BeaconState::$variant_name($struct_name { + // Versioning + genesis_time: $inner.genesis_time, + genesis_validators_root: $inner.genesis_validators_root, + slot: $inner.slot, + fork: $inner.fork, // History - latest_block_header: self.latest_block_header, - block_roots: unpack(self.block_roots)?, - state_roots: unpack(self.state_roots)?, - historical_roots: unpack(self.historical_roots)?, + latest_block_header: $inner.latest_block_header, + block_roots: unpack_field($inner.block_roots)?, + state_roots: unpack_field($inner.state_roots)?, + historical_roots: unpack_field($inner.historical_roots)?, // Eth1 - eth1_data: self.eth1_data, - eth1_data_votes: self.eth1_data_votes, - eth1_deposit_index: self.eth1_deposit_index, + eth1_data: $inner.eth1_data, + eth1_data_votes: $inner.eth1_data_votes, + eth1_deposit_index: $inner.eth1_deposit_index, // Validator registry - validators: self.validators, - balances: self.balances, + validators: $inner.validators, + balances: $inner.balances, // Shuffling - randao_mixes: unpack(self.randao_mixes)?, + randao_mixes: unpack_field($inner.randao_mixes)?, // Slashings - slashings: self.slashings, - - // Attestations - previous_epoch_attestations: self.previous_epoch_attestations, - current_epoch_attestations: self.current_epoch_attestations, + slashings: $inner.slashings, // Finality - justification_bits: self.justification_bits, - previous_justified_checkpoint: self.previous_justified_checkpoint, - current_justified_checkpoint: self.current_justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, + justification_bits: $inner.justification_bits, + previous_justified_checkpoint: $inner.previous_justified_checkpoint, + current_justified_checkpoint: $inner.current_justified_checkpoint, + finalized_checkpoint: $inner.finalized_checkpoint, // Caching committee_caches: <_>::default(), pubkey_cache: <_>::default(), exit_cache: <_>::default(), tree_hash_cache: <_>::default(), + + // Variant-specific fields + $( + $extra_fields: $inner.$extra_fields + ),* }) } } + +fn unpack_field(x: Option) -> Result { + x.ok_or(Error::PartialBeaconStateError) +} + +impl TryInto> for PartialBeaconState { + type Error = Error; + + fn try_into(self) -> Result, Error> { + let state = match self { + PartialBeaconState::Base(inner) => impl_try_into_beacon_state!( + inner, + Base, + BeaconStateBase, + [previous_epoch_attestations, current_epoch_attestations] + ), + PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!( + inner, + Altair, + BeaconStateAltair, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores + ] + ), + }; + Ok(state) + } +} diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index 516f05bfbe9..1c11a8349dd 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -53,7 +53,7 @@ fn http_server_genesis_state() { .expect("client should have beacon chain") .state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots) .expect("should find state"); - db_state.drop_all_caches(); + db_state.drop_all_caches().unwrap(); assert_eq!( api_state, db_state, diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index db88fbe64e9..54f450a54ee 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -83,12 +83,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { } else { // build the enr_fork_id and add it to the local_enr if it exists let enr_fork = { - let spec = eth2_network_config - .yaml_config - .as_ref() - .ok_or("The network directory must contain a spec config")? - .apply_to_chain_spec::(&T::default_spec()) - .ok_or("The loaded config is not compatible with the current spec")?; + let spec = eth2_network_config.chain_spec::()?; if eth2_network_config.beacon_state_is_known() { let genesis_state = eth2_network_config.beacon_state::()?; @@ -96,7 +91,7 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string()); let enr_fork = spec.enr_fork_id( types::Slot::from(0u64), - genesis_state.genesis_validators_root, + genesis_state.genesis_validators_root(), ); Some(enr_fork.as_ssz_bytes()) diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index 6e883872806..3f69f53bac7 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -51,7 +51,6 @@ pub fn run(matches: &ArgMatches<'_>, eth_spec_id: EthSpecId, debug_level: String if let Err(e) = match eth_spec_id { EthSpecId::Minimal => main::(matches, log), EthSpecId::Mainnet => main::(matches, log), - EthSpecId::V012Legacy => main::(matches, log), } { slog::crit!(slog_scope::logger(), "{}", e); } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 5e1b434601d..19269337640 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -21,7 +21,6 @@ use reqwest::{IntoUrl, Response}; pub use reqwest::{StatusCode, Url}; use sensitive_url::SensitiveUrl; use serde::{de::DeserializeOwned, Serialize}; -use ssz::Decode; use std::convert::TryFrom; use std::fmt; use std::iter::Iterator; @@ -498,6 +497,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_ssz( &self, block_id: BlockId, + spec: &ChainSpec, ) -> Result>, Error> { let mut path = self.eth_path()?; @@ -509,7 +509,7 @@ impl BeaconNodeHttpClient { self.get_bytes_opt_accept_header(path, Accept::Ssz) .await? - .map(|bytes| SignedBeaconBlock::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz)) + .map(|bytes| SignedBeaconBlock::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) .transpose() } @@ -715,7 +715,7 @@ impl BeaconNodeHttpClient { } /// `GET config/spec` - pub async fn get_config_spec(&self) -> Result, Error> { + pub async fn get_config_spec(&self) -> Result, Error> { let mut path = self.eth_path()?; path.path_segments_mut() @@ -883,6 +883,7 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states_ssz( &self, state_id: StateId, + spec: &ChainSpec, ) -> Result>, Error> { let mut path = self.eth_path()?; @@ -895,7 +896,7 @@ impl BeaconNodeHttpClient { self.get_bytes_opt_accept_header(path, Accept::Ssz) .await? - .map(|bytes| BeaconState::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz)) + .map(|bytes| BeaconState::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) .transpose() } diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index f81fc607fcb..716ac41e523 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -2,13 +2,12 @@ use crate::{ ok_or_error, - types::{BeaconState, Epoch, EthSpec, GenericResponse, ValidatorId}, + types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId}, BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, }; use proto_array::core::ProtoArray; use reqwest::IntoUrl; use serde::{Deserialize, Serialize}; -use ssz::Decode; use ssz_derive::{Decode, Encode}; pub use eth2_libp2p::{types::SyncState, PeerInfo}; @@ -470,6 +469,7 @@ impl BeaconNodeHttpClient { pub async fn get_lighthouse_beacon_states_ssz( &self, state_id: &StateId, + spec: &ChainSpec, ) -> Result>, Error> { let mut path = self.server.full.clone(); @@ -483,7 +483,7 @@ impl BeaconNodeHttpClient { self.get_bytes_opt(path) .await? - .map(|bytes| BeaconState::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz)) + .map(|bytes| BeaconState::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) .transpose() } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 6cb1bf33881..c6a12350987 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -211,7 +211,7 @@ impl ValidatorClientHttpClient { } /// `GET lighthouse/spec` - pub async fn get_lighthouse_spec(&self) -> Result, Error> { + pub async fn get_lighthouse_spec(&self) -> Result, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index a62518908a3..c30dab5df7a 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -44,13 +44,6 @@ impl Eth2Config { spec: ChainSpec::minimal(), } } - - pub fn v012_legacy() -> Self { - Self { - eth_spec_id: EthSpecId::V012Legacy, - spec: ChainSpec::v012_legacy(), - } - } } /// A directory that can be built by downloading files via HTTP. @@ -112,16 +105,8 @@ macro_rules! define_net { }; } -define_net!(altona, include_altona_file, "altona", true); - -define_net!(medalla, include_medalla_file, "medalla", true); - -define_net!(spadina, include_spadina_file, "spadina", true); - define_net!(pyrmont, include_pyrmont_file, "pyrmont", true); define_net!(mainnet, include_mainnet_file, "mainnet", true); -define_net!(toledo, include_toledo_file, "toledo", true); - define_net!(prater, include_prater_file, "prater", true); diff --git a/common/eth2_network_config/build.rs b/common/eth2_network_config/build.rs index effcb4fa0e8..d84dbde4d88 100644 --- a/common/eth2_network_config/build.rs +++ b/common/eth2_network_config/build.rs @@ -1,19 +1,12 @@ //! Extracts zipped genesis states on first run. -use eth2_config::{ - altona, mainnet, medalla, prater, pyrmont, spadina, toledo, Eth2NetArchiveAndDirectory, - GENESIS_FILE_NAME, -}; +use eth2_config::{mainnet, prater, pyrmont, Eth2NetArchiveAndDirectory, GENESIS_FILE_NAME}; use std::fs::File; use std::io; use zip::ZipArchive; const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[ - altona::ETH2_NET_DIR, - medalla::ETH2_NET_DIR, - spadina::ETH2_NET_DIR, mainnet::ETH2_NET_DIR, pyrmont::ETH2_NET_DIR, - toledo::ETH2_NET_DIR, prater::ETH2_NET_DIR, ]; diff --git a/common/eth2_network_config/built_in_network_configs/altona/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/altona/boot_enr.yaml deleted file mode 100644 index 3e50ecd9df5..00000000000 --- a/common/eth2_network_config/built_in_network_configs/altona/boot_enr.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- enr:-LK4QFtV7Pz4reD5a7cpfi1z6yPrZ2I9eMMU5mGQpFXLnLoKZW8TXvVubShzLLpsEj6aayvVO1vFx-MApijD3HLPhlECh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD6etXjAAABIf__________gmlkgnY0gmlwhDMPYfCJc2VjcDI1NmsxoQIerw_qBc9apYfZqo2awiwS930_vvmGnW2psuHsTzrJ8YN0Y3CCIyiDdWRwgiMo -- enr:-LK4QPVkFd_MKzdW0219doTZryq40tTe8rwWYO75KDmeZM78fBskGsfCuAww9t8y3u0Q0FlhXOhjE1CWpx3SGbUaU80Ch2F0dG5ldHOIAAAAAAAAAACEZXRoMpD6etXjAAABIf__________gmlkgnY0gmlwhDMPRgeJc2VjcDI1NmsxoQNHu-QfNgzl8VxbMiPgv6wgAljojnqAOrN18tzJMuN8oYN0Y3CCIyiDdWRwgiMo -- enr:-LK4QHe52XPPrcv6-MvcmN5GqDe_sgCwo24n_2hedlfwD_oxNt7cXL3tXJ7h9aYv6CTS1C_H2G2_dkeqm_LBO9nrpiYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhANzD9uJc2VjcDI1NmsxoQJX7zMnRU3szfGfS8MAIfPaQKOBpu3sBVTXf4Qq0b_m-4N0Y3CCIyiDdWRwgiMo -- enr:-LK4QLkbbq7xuRa_EnWd_kc0TkQk0pd0B0cZYR5LvBsncFQBDyPbGdy8d24TzRVeK7ZWwM5_2EcSJK223f8TYUOQYfwBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhAPsjtOJc2VjcDI1NmsxoQJNw_aZgWXl2SstD--WAjooGudjWLjEbbCIddJuEPxzWYN0Y3CCIyiDdWRwgiMo -- enr:-LK4QHy-glnxN1WTk5f6d7-xXwy_UKJLs5k7p_S4KRY9I925KTzW_kQLjfFriIpH0de7kygBwrSl726ukq9_OG_sgKMCh2F0dG5ldHOIUjEAIQEAFMiEZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhBLmhrCJc2VjcDI1NmsxoQNlU7gT0HUvpLA41n-P5GrCgjwMwtG02YsRRO0lAmpmBYN0Y3CCIyiDdWRwgiMo -- enr:-LK4QDz0n0vpyOpuStB8e22h9ayHVcvmN7o0trC7eC0DnZV9GYGzK5uKv7WlzpMQM2nDTG43DWvF_DZYwJOZCbF4iCQBh2F0dG5ldHOI__________-EZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhBKN136Jc2VjcDI1NmsxoQP5gcOUcaruHuMuTv8ht7ZEawp3iih7CmeLqcoY1hxOnoN0Y3CCIyiDdWRwgiMo -- enr:-LK4QOScOZ35sOXEH6CEW15lfv7I3DhqQAzCPQ_nRav95otuSh4yi9ol0AruKDiIk9qqGXyD-wQDaBAPLhwl4t-rUSQBh2F0dG5ldHOI__________-EZXRoMpD9yjmwAAABIf__________gmlkgnY0gmlwhCL68KuJc2VjcDI1NmsxoQK5fYR3Ipoc01dz0d2-EcL7m26zKQSkAbf4rwcMMM09CoN0Y3CCIyiDdWRwgiMo -- enr:-Ku4QMqmWPFkgM58F16wxB50cqWDaWaIsyANHL8wUNSB4Cy1TP9__uJQNRODvx_dvO6rY-BT3psrYTMAaxnMGXb6DuoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQNoed9JnQh7ltcAacHEGOjwocL1BhMQbYTgaPX0kFuXtIN1ZHCCE4g -- enr:-LK4QDHu6BtDKnGbthNp-GvweQlW0jiOX9KFCj5Ql9kScrFed76tgHlFv7A-9ZRB-EVZpKItvlNjo3yxjj7jYIZUJa4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAAAAAAAAAAAAAAAAAAAAAAgmlkgnY0gmlwhDbUyQKJc2VjcDI1NmsxoQLV6Yse8baXDFu9r_dvm9BVd2ni2-wwvANWA-4ewbhniIN0Y3CCIyiDdWRwgiMo -- enr:-LK4QF3lT3Ch8Ljyx-KwoPrvoJHO-HDd3jOREMIZCWzi_HkHFVub5qt52MliDTLDgpXMS9tBzzLI4ObT_Z2m2Kus9vMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAAAAAAAAAAAAAAAAAAAAAAgmlkgnY0gmlwhBKNqHeJc2VjcDI1NmsxoQOTO9uI9UZjuTOpcWvnCfhfQTmcMaIzBFsjMpXYnppET4N0Y3CCIyiDdWRwgiMo diff --git a/common/eth2_network_config/built_in_network_configs/altona/config.yaml b/common/eth2_network_config/built_in_network_configs/altona/config.yaml deleted file mode 100644 index 0a3bff66bd9..00000000000 --- a/common/eth2_network_config/built_in_network_configs/altona/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -CONFIG_NAME: "altona" -MAX_COMMITTEES_PER_SLOT: 64 -TARGET_COMMITTEE_SIZE: 128 -MAX_VALIDATORS_PER_COMMITTEE: 2048 -MIN_PER_EPOCH_CHURN_LIMIT: 4 -CHURN_LIMIT_QUOTIENT: 65536 -SHUFFLE_ROUND_COUNT: 90 -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 640 -MIN_GENESIS_TIME: 1593433800 -HYSTERESIS_QUOTIENT: 4 -HYSTERESIS_DOWNWARD_MULTIPLIER: 1 -HYSTERESIS_UPWARD_MULTIPLIER: 5 -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 -ETH1_FOLLOW_DISTANCE: 1024 -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -RANDOM_SUBNETS_PER_VALIDATOR: 1 -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -SECONDS_PER_ETH1_BLOCK: 14 -DEPOSIT_CONTRACT_ADDRESS: 0x16e82D77882A663454Ef92806b7DeCa1D394810f -MIN_DEPOSIT_AMOUNT: 1000000000 -MAX_EFFECTIVE_BALANCE: 32000000000 -EJECTION_BALANCE: 16000000000 -EFFECTIVE_BALANCE_INCREMENT: 1000000000 -GENESIS_FORK_VERSION: 0x00000121 -BLS_WITHDRAWAL_PREFIX: 0x00 -GENESIS_DELAY: 172800 -SECONDS_PER_SLOT: 12 -MIN_ATTESTATION_INCLUSION_DELAY: 1 -SLOTS_PER_EPOCH: 32 -MIN_SEED_LOOKAHEAD: 1 -MAX_SEED_LOOKAHEAD: 4 -EPOCHS_PER_ETH1_VOTING_PERIOD: 32 -SLOTS_PER_HISTORICAL_ROOT: 8192 -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -SHARD_COMMITTEE_PERIOD: 256 -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 -EPOCHS_PER_HISTORICAL_VECTOR: 65536 -EPOCHS_PER_SLASHINGS_VECTOR: 8192 -HISTORICAL_ROOTS_LIMIT: 16777216 -VALIDATOR_REGISTRY_LIMIT: 1099511627776 -BASE_REWARD_FACTOR: 64 -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -PROPOSER_REWARD_QUOTIENT: 8 -INACTIVITY_PENALTY_QUOTIENT: 16777216 -MIN_SLASHING_PENALTY_QUOTIENT: 32 -MAX_PROPOSER_SLASHINGS: 16 -MAX_ATTESTER_SLASHINGS: 2 -MAX_ATTESTATIONS: 128 -MAX_DEPOSITS: 16 -MAX_VOLUNTARY_EXITS: 16 -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -PROPORTIONAL_SLASHING_MULTIPLIER: 3 diff --git a/common/eth2_network_config/built_in_network_configs/altona/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/altona/deploy_block.txt deleted file mode 100644 index 5306ea6645f..00000000000 --- a/common/eth2_network_config/built_in_network_configs/altona/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -2917810 diff --git a/common/eth2_network_config/built_in_network_configs/altona/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/altona/genesis.ssz.zip deleted file mode 100644 index 79fd978bb51..00000000000 Binary files a/common/eth2_network_config/built_in_network_configs/altona/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index ace44dd2325..47b02aa8d97 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -1,155 +1,71 @@ -# Mainnet preset +# Mainnet config -CONFIG_NAME: "mainnet" +# Extends the mainnet preset +PRESET_BASE: 'mainnet' -# Misc +# Genesis # --------------------------------------------------------------- -# 2**6 (= 64) -MAX_COMMITTEES_PER_SLOT: 64 -# 2**7 (= 128) -TARGET_COMMITTEE_SIZE: 128 -# 2**11 (= 2,048) -MAX_VALIDATORS_PER_COMMITTEE: 2048 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 -# See issue 563 -SHUFFLE_ROUND_COUNT: 90 # `2**14` (= 16,384) MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 # Dec 1, 2020, 12pm UTC MIN_GENESIS_TIME: 1606824000 -# 4 -HYSTERESIS_QUOTIENT: 4 -# 1 (minus 0.25) -HYSTERESIS_DOWNWARD_MULTIPLIER: 1 -# 5 (plus 1.25) -HYSTERESIS_UPWARD_MULTIPLIER: 5 - - -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - -# Validator -# --------------------------------------------------------------- -# 2**11 (= 2,048) -ETH1_FOLLOW_DISTANCE: 2048 -# 2**4 (= 16) -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -# 2**0 (= 1) -RANDOM_SUBNETS_PER_VALIDATOR: 1 -# 2**8 (= 256) -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 - - -# Deposit contract -# --------------------------------------------------------------- -# Ethereum PoW Mainnet -DEPOSIT_CHAIN_ID: 1 -DEPOSIT_NETWORK_ID: 1 -# **TBD** -DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa +# Mainnet initial fork version, recommend altering for testnets +GENESIS_FORK_VERSION: 0x00000000 +# 604800 seconds (7 days) +GENESIS_DELAY: 604800 -# Gwei values +# Forking # --------------------------------------------------------------- -# 2**0 * 10**9 (= 1,000,000,000) Gwei -MIN_DEPOSIT_AMOUNT: 1000000000 -# 2**5 * 10**9 (= 32,000,000,000) Gwei -MAX_EFFECTIVE_BALANCE: 32000000000 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**0 * 10**9 (= 1,000,000,000) Gwei -EFFECTIVE_BALANCE_INCREMENT: 1000000000 +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 +# Altair +ALTAIR_FORK_VERSION: 0x01000000 +ALTAIR_FORK_EPOCH: 18446744073709551615 +# Merge +MERGE_FORK_VERSION: 0x02000000 +MERGE_FORK_EPOCH: 18446744073709551615 +# Sharding +SHARDING_FORK_VERSION: 0x03000000 +SHARDING_FORK_EPOCH: 18446744073709551615 -# Initial values -# --------------------------------------------------------------- -# Mainnet initial fork version, recommend altering for testnets -GENESIS_FORK_VERSION: 0x00000000 -BLS_WITHDRAWAL_PREFIX: 0x00 +# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. +TRANSITION_TOTAL_DIFFICULTY: 4294967296 # Time parameters # --------------------------------------------------------------- -# 604800 seconds (7 days) -GENESIS_DELAY: 604800 # 12 seconds SECONDS_PER_SLOT: 12 -# 2**0 (= 1) slots 12 seconds -MIN_ATTESTATION_INCLUSION_DELAY: 1 -# 2**5 (= 32) slots 6.4 minutes -SLOTS_PER_EPOCH: 32 -# 2**0 (= 1) epochs 6.4 minutes -MIN_SEED_LOOKAHEAD: 1 -# 2**2 (= 4) epochs 25.6 minutes -MAX_SEED_LOOKAHEAD: 4 -# 2**6 (= 64) epochs ~6.8 hours -EPOCHS_PER_ETH1_VOTING_PERIOD: 64 -# 2**13 (= 8,192) slots ~13 hours -SLOTS_PER_HISTORICAL_ROOT: 8192 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 # 2**8 (= 256) epochs ~27 hours MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**8 (= 256) epochs ~27 hours SHARD_COMMITTEE_PERIOD: 256 -# 2**2 (= 4) epochs 25.6 minutes -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 - - -# State vector lengths -# --------------------------------------------------------------- -# 2**16 (= 65,536) epochs ~0.8 years -EPOCHS_PER_HISTORICAL_VECTOR: 65536 -# 2**13 (= 8,192) epochs ~36 days -EPOCHS_PER_SLASHINGS_VECTOR: 8192 -# 2**24 (= 16,777,216) historical roots, ~26,131 years -HISTORICAL_ROOTS_LIMIT: 16777216 -# 2**40 (= 1,099,511,627,776) validator spots -VALIDATOR_REGISTRY_LIMIT: 1099511627776 - - -# Reward and penalty quotients -# --------------------------------------------------------------- -# 2**6 (= 64) -BASE_REWARD_FACTOR: 64 -# 2**9 (= 512) -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -# 2**3 (= 8) -PROPOSER_REWARD_QUOTIENT: 8 -# 2**26 (= 67,108,864) -INACTIVITY_PENALTY_QUOTIENT: 67108864 -# 2**7 (= 128) (lower safety margin at Phase 0 genesis) -MIN_SLASHING_PENALTY_QUOTIENT: 128 -# 1 (lower safety margin at Phase 0 genesis) -PROPORTIONAL_SLASHING_MULTIPLIER: 1 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 -# Max operations per block +# Validator cycle # --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 # 2**4 (= 16) -MAX_PROPOSER_SLASHINGS: 16 -# 2**1 (= 2) -MAX_ATTESTER_SLASHINGS: 2 -# 2**7 (= 128) -MAX_ATTESTATIONS: 128 -# 2**4 (= 16) -MAX_DEPOSITS: 16 -# 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 -# Signature domains +# Deposit contract # --------------------------------------------------------------- -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 +# Ethereum PoW Mainnet +DEPOSIT_CHAIN_ID: 1 +DEPOSIT_NETWORK_ID: 1 +DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa diff --git a/common/eth2_network_config/built_in_network_configs/medalla/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/medalla/boot_enr.yaml deleted file mode 100644 index 835451842da..00000000000 --- a/common/eth2_network_config/built_in_network_configs/medalla/boot_enr.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# lighthouse Node -- enr:-LK4QCGFeQXjpQkgOfLHsbTjD65IOtSqV7Qo-Qdqv6SrL8lqFY7INPMMGP5uGKkVDcJkeXimSeNeypaZV3MHkcJgr9QCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDnp11aAAAAAf__________gmlkgnY0gmlwhA37LMaJc2VjcDI1NmsxoQJ7k0mKtTd_kdEq251flOjD1HKpqgMmIETDoD-Msy_O-4N0Y3CCIyiDdWRwgiMo -# Lighthouse node -- enr:-LK4QCpyWmMLYwC2umMJ_g0c9VY7YOFwZyaR80_tuQNTWOzJbaR82DDhVQYqmE_0gvN6Du5jwnxzIaaNRZQlVXzfIK0Dh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDnp11aAAAAAf__________gmlkgnY0gmlwhCLR2xuJc2VjcDI1NmsxoQOYiWqrQtQksTEtS3qY6idxJE5wkm0t9wKqpzv2gCR21oN0Y3CCIyiDdWRwgiMo -# Prysm -- enr:-Ku4QOnVSyvzS3VbF87J8MubaRuTyfPi6B67XQg6-5eAV_uILAhn9geTTQmfqDIOcIeAxWHUUajQp6lYniAXPWncp6UBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAYrkzLAAAAAf__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQKekYKqUtwbaJKKCct_srE5-g7tBUm68mj_jpeSb7CCqYN1ZHCCC7g -# Prysm -- enr:-Ku4QHWezvidY_m0dWEwERrNrqjEQWrlIx7b8K4EIxGgTrLmUxHCZPW5-t8PsS8nFxAJ8k8YacKP5zPRk5gbsTSsRTQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAYrkzLAAAAAf__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMypP_ODwTuBq2v0oIdjPGCEyu9Hb_jHDbuIX_iNvBRGoN1ZHCCGWQ -# Cat-dog -- enr:-Ku4QJmPsyq4lmDdFebMKXk7vdt8WsLWkArYT2K8eN057oFudm2tITrZJD9sq1x92-bRmXTyAJgb2FD4ior-KHIU3KcDh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDaNQiCAAAAA___________gmlkgnY0gmlwhBK4vdCJc2VjcDI1NmsxoQMWAsR84_ETgq4-14FV2x00ptmI-YU3tdkZV9CUgYPEnIN1ZHCCI1s diff --git a/common/eth2_network_config/built_in_network_configs/medalla/config.yaml b/common/eth2_network_config/built_in_network_configs/medalla/config.yaml deleted file mode 100644 index 7b0c8a3ffb9..00000000000 --- a/common/eth2_network_config/built_in_network_configs/medalla/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -CONFIG_NAME: "medalla" -MAX_COMMITTEES_PER_SLOT: 64 -TARGET_COMMITTEE_SIZE: 128 -MAX_VALIDATORS_PER_COMMITTEE: 2048 -MIN_PER_EPOCH_CHURN_LIMIT: 4 -CHURN_LIMIT_QUOTIENT: 65536 -SHUFFLE_ROUND_COUNT: 90 -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 -MIN_GENESIS_TIME: 1596546000 -HYSTERESIS_QUOTIENT: 4 -HYSTERESIS_DOWNWARD_MULTIPLIER: 1 -HYSTERESIS_UPWARD_MULTIPLIER: 5 -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 -ETH1_FOLLOW_DISTANCE: 1024 -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -RANDOM_SUBNETS_PER_VALIDATOR: 1 -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -SECONDS_PER_ETH1_BLOCK: 14 -DEPOSIT_CONTRACT_ADDRESS: 0x07b39F4fDE4A38bACe212b546dAc87C58DfE3fDC -MIN_DEPOSIT_AMOUNT: 1000000000 -MAX_EFFECTIVE_BALANCE: 32000000000 -EJECTION_BALANCE: 16000000000 -EFFECTIVE_BALANCE_INCREMENT: 1000000000 -GENESIS_FORK_VERSION: 0x00000001 -BLS_WITHDRAWAL_PREFIX: 0x00 -GENESIS_DELAY: 172800 -SECONDS_PER_SLOT: 12 -MIN_ATTESTATION_INCLUSION_DELAY: 1 -SLOTS_PER_EPOCH: 32 -MIN_SEED_LOOKAHEAD: 1 -MAX_SEED_LOOKAHEAD: 4 -EPOCHS_PER_ETH1_VOTING_PERIOD: 32 -SLOTS_PER_HISTORICAL_ROOT: 8192 -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -SHARD_COMMITTEE_PERIOD: 256 -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 -EPOCHS_PER_HISTORICAL_VECTOR: 65536 -EPOCHS_PER_SLASHINGS_VECTOR: 8192 -HISTORICAL_ROOTS_LIMIT: 16777216 -VALIDATOR_REGISTRY_LIMIT: 1099511627776 -BASE_REWARD_FACTOR: 64 -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -PROPOSER_REWARD_QUOTIENT: 8 -INACTIVITY_PENALTY_QUOTIENT: 16777216 -MIN_SLASHING_PENALTY_QUOTIENT: 32 -MAX_PROPOSER_SLASHINGS: 16 -MAX_ATTESTER_SLASHINGS: 2 -MAX_ATTESTATIONS: 128 -MAX_DEPOSITS: 16 -MAX_VOLUNTARY_EXITS: 16 -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -PROPORTIONAL_SLASHING_MULTIPLIER: 3 diff --git a/common/eth2_network_config/built_in_network_configs/medalla/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/medalla/deploy_block.txt deleted file mode 100644 index a5ea0ed2edf..00000000000 --- a/common/eth2_network_config/built_in_network_configs/medalla/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -3085928 diff --git a/common/eth2_network_config/built_in_network_configs/medalla/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/medalla/genesis.ssz.zip deleted file mode 100644 index 761b72c0d44..00000000000 Binary files a/common/eth2_network_config/built_in_network_configs/medalla/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index 6328a92ddca..e99939cabd2 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -1,155 +1,70 @@ -# Prater preset +# Prater config -CONFIG_NAME: "prater" +# Extends the mainnet preset +PRESET_BASE: 'mainnet' -# Misc +# Genesis # --------------------------------------------------------------- -# 2**6 (= 64) -MAX_COMMITTEES_PER_SLOT: 64 -# 2**7 (= 128) -TARGET_COMMITTEE_SIZE: 128 -# 2**11 (= 2,048) -MAX_VALIDATORS_PER_COMMITTEE: 2048 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 -# See issue 563 -SHUFFLE_ROUND_COUNT: 90 # `2**14` (= 16,384) MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 # Mar-01-2021 08:53:32 AM +UTC MIN_GENESIS_TIME: 1614588812 -# 4 -HYSTERESIS_QUOTIENT: 4 -# 1 (minus 0.25) -HYSTERESIS_DOWNWARD_MULTIPLIER: 1 -# 5 (plus 1.25) -HYSTERESIS_UPWARD_MULTIPLIER: 5 - - -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - -# Validator -# --------------------------------------------------------------- -# 2**11 (= 2,048) -ETH1_FOLLOW_DISTANCE: 2048 -# 2**4 (= 16) -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -# 2**0 (= 1) -RANDOM_SUBNETS_PER_VALIDATOR: 1 -# 2**8 (= 256) -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 - - -# Deposit contract -# --------------------------------------------------------------- -# Ethereum Goerli testnet -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -# Prater test deposit contract on Goerli Testnet -DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b - +# Prater area code (Vienna) +GENESIS_FORK_VERSION: 0x00001020 +# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC) +GENESIS_DELAY: 1919188 -# Gwei values +# Forking # --------------------------------------------------------------- -# 2**0 * 10**9 (= 1,000,000,000) Gwei -MIN_DEPOSIT_AMOUNT: 1000000000 -# 2**5 * 10**9 (= 32,000,000,000) Gwei -MAX_EFFECTIVE_BALANCE: 32000000000 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**0 * 10**9 (= 1,000,000,000) Gwei -EFFECTIVE_BALANCE_INCREMENT: 1000000000 +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 +# Altair +ALTAIR_FORK_VERSION: 0x01000000 +ALTAIR_FORK_EPOCH: 18446744073709551615 +# Merge +MERGE_FORK_VERSION: 0x02000000 +MERGE_FORK_EPOCH: 18446744073709551615 +# Sharding +SHARDING_FORK_VERSION: 0x03000000 +SHARDING_FORK_EPOCH: 18446744073709551615 -# Initial values -# --------------------------------------------------------------- -# Prater area code (Vienna) -GENESIS_FORK_VERSION: 0x00001020 -BLS_WITHDRAWAL_PREFIX: 0x00 +# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. +TRANSITION_TOTAL_DIFFICULTY: 4294967296 # Time parameters # --------------------------------------------------------------- -# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC) -GENESIS_DELAY: 1919188 # 12 seconds SECONDS_PER_SLOT: 12 -# 2**0 (= 1) slots 12 seconds -MIN_ATTESTATION_INCLUSION_DELAY: 1 -# 2**5 (= 32) slots 6.4 minutes -SLOTS_PER_EPOCH: 32 -# 2**0 (= 1) epochs 6.4 minutes -MIN_SEED_LOOKAHEAD: 1 -# 2**2 (= 4) epochs 25.6 minutes -MAX_SEED_LOOKAHEAD: 4 -# 2**6 (= 64) epochs ~6.8 hours -EPOCHS_PER_ETH1_VOTING_PERIOD: 64 -# 2**13 (= 8,192) slots ~13 hours -SLOTS_PER_HISTORICAL_ROOT: 8192 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 # 2**8 (= 256) epochs ~27 hours MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**8 (= 256) epochs ~27 hours SHARD_COMMITTEE_PERIOD: 256 -# 2**2 (= 4) epochs 25.6 minutes -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 - - -# State vector lengths -# --------------------------------------------------------------- -# 2**16 (= 65,536) epochs ~0.8 years -EPOCHS_PER_HISTORICAL_VECTOR: 65536 -# 2**13 (= 8,192) epochs ~36 days -EPOCHS_PER_SLASHINGS_VECTOR: 8192 -# 2**24 (= 16,777,216) historical roots, ~26,131 years -HISTORICAL_ROOTS_LIMIT: 16777216 -# 2**40 (= 1,099,511,627,776) validator spots -VALIDATOR_REGISTRY_LIMIT: 1099511627776 - - -# Reward and penalty quotients -# --------------------------------------------------------------- -# 2**6 (= 64) -BASE_REWARD_FACTOR: 64 -# 2**9 (= 512) -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -# 2**3 (= 8) -PROPOSER_REWARD_QUOTIENT: 8 -# 2**26 (= 67,108,864) -INACTIVITY_PENALTY_QUOTIENT: 67108864 -# 2**7 (= 128) (lower safety margin at Phase 0 genesis) -MIN_SLASHING_PENALTY_QUOTIENT: 128 -# 1 (lower safety margin at Phase 0 genesis) -PROPORTIONAL_SLASHING_MULTIPLIER: 1 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 -# Max operations per block +# Validator cycle # --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 # 2**4 (= 16) -MAX_PROPOSER_SLASHINGS: 16 -# 2**1 (= 2) -MAX_ATTESTER_SLASHINGS: 2 -# 2**7 (= 128) -MAX_ATTESTATIONS: 128 -# 2**4 (= 16) -MAX_DEPOSITS: 16 -# 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 - +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 -# Signature domains +# Deposit contract # --------------------------------------------------------------- -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 +# Ethereum Goerli testnet +DEPOSIT_CHAIN_ID: 5 +DEPOSIT_NETWORK_ID: 5 +# Prater test deposit contract on Goerli Testnet +DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b diff --git a/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml b/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml index f9d1b92be4c..2cdca808d85 100644 --- a/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml @@ -1,154 +1,70 @@ -# Pyrmont preset -CONFIG_NAME: "pyrmont" +# Pyrmont config -# Misc +# Extends the mainnet preset +PRESET_BASE: 'mainnet' + +# Genesis # --------------------------------------------------------------- -# 2**6 (= 64) -MAX_COMMITTEES_PER_SLOT: 64 -# 2**7 (= 128) -TARGET_COMMITTEE_SIZE: 128 -# 2**11 (= 2,048) -MAX_VALIDATORS_PER_COMMITTEE: 2048 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 -# See issue 563 -SHUFFLE_ROUND_COUNT: 90 # `2**14` (= 16,384) MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 # Nov 18, 2020, 12pm UTC MIN_GENESIS_TIME: 1605700800 -# 4 -HYSTERESIS_QUOTIENT: 4 -# 1 (minus 0.25) -HYSTERESIS_DOWNWARD_MULTIPLIER: 1 -# 5 (plus 1.25) -HYSTERESIS_UPWARD_MULTIPLIER: 5 - - -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - -# Validator -# --------------------------------------------------------------- -# 2**11 (= 2,048) -ETH1_FOLLOW_DISTANCE: 2048 -# 2**4 (= 16) -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -# 2**0 (= 1) -RANDOM_SUBNETS_PER_VALIDATOR: 1 -# 2**8 (= 256) -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 - +# Pyrmont area code +GENESIS_FORK_VERSION: 0x00002009 +# Customized for Pyrmont: 432000 seconds (5 days) +GENESIS_DELAY: 432000 -# Deposit contract +# Forking # --------------------------------------------------------------- -# Ethereum Goerli testnet -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -# Pyrmont test deposit contract on Goerli (2nd edition, 0x00002009 fork version) -DEPOSIT_CONTRACT_ADDRESS: 0x8c5fecdC472E27Bc447696F431E425D02dd46a8c +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 +# Altair +ALTAIR_FORK_VERSION: 0x01000000 +ALTAIR_FORK_EPOCH: 18446744073709551615 +# Merge +MERGE_FORK_VERSION: 0x02000000 +MERGE_FORK_EPOCH: 18446744073709551615 +# Sharding +SHARDING_FORK_VERSION: 0x03000000 +SHARDING_FORK_EPOCH: 18446744073709551615 -# Gwei values -# --------------------------------------------------------------- -# 2**0 * 10**9 (= 1,000,000,000) Gwei -MIN_DEPOSIT_AMOUNT: 1000000000 -# 2**5 * 10**9 (= 32,000,000,000) Gwei -MAX_EFFECTIVE_BALANCE: 32000000000 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**0 * 10**9 (= 1,000,000,000) Gwei -EFFECTIVE_BALANCE_INCREMENT: 1000000000 - - -# Initial values -# --------------------------------------------------------------- -# Pyrmont area code -GENESIS_FORK_VERSION: 0x00002009 -BLS_WITHDRAWAL_PREFIX: 0x00 +# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. +TRANSITION_TOTAL_DIFFICULTY: 4294967296 # Time parameters # --------------------------------------------------------------- -# Customized for Pyrmont: 432000 seconds (5 days) -GENESIS_DELAY: 432000 # 12 seconds SECONDS_PER_SLOT: 12 -# 2**0 (= 1) slots 12 seconds -MIN_ATTESTATION_INCLUSION_DELAY: 1 -# 2**5 (= 32) slots 6.4 minutes -SLOTS_PER_EPOCH: 32 -# 2**0 (= 1) epochs 6.4 minutes -MIN_SEED_LOOKAHEAD: 1 -# 2**2 (= 4) epochs 25.6 minutes -MAX_SEED_LOOKAHEAD: 4 -# 2**6 (= 64) epochs ~6.8 hours -EPOCHS_PER_ETH1_VOTING_PERIOD: 64 -# 2**13 (= 8,192) slots ~13 hours -SLOTS_PER_HISTORICAL_ROOT: 8192 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 # 2**8 (= 256) epochs ~27 hours MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**8 (= 256) epochs ~27 hours SHARD_COMMITTEE_PERIOD: 256 -# 2**2 (= 4) epochs 25.6 minutes -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 - - -# State vector lengths -# --------------------------------------------------------------- -# 2**16 (= 65,536) epochs ~0.8 years -EPOCHS_PER_HISTORICAL_VECTOR: 65536 -# 2**13 (= 8,192) epochs ~36 days -EPOCHS_PER_SLASHINGS_VECTOR: 8192 -# 2**24 (= 16,777,216) historical roots, ~26,131 years -HISTORICAL_ROOTS_LIMIT: 16777216 -# 2**40 (= 1,099,511,627,776) validator spots -VALIDATOR_REGISTRY_LIMIT: 1099511627776 - - -# Reward and penalty quotients -# --------------------------------------------------------------- -# 2**6 (= 64) -BASE_REWARD_FACTOR: 64 -# 2**9 (= 512) -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -# 2**3 (= 8) -PROPOSER_REWARD_QUOTIENT: 8 -# 2**26 (= 67,108,864) -INACTIVITY_PENALTY_QUOTIENT: 67108864 -# 2**7 (= 128) (lower safety margin at Phase 0 genesis) -MIN_SLASHING_PENALTY_QUOTIENT: 128 -# 1 (lower safety margin at Phase 0 genesis) -PROPORTIONAL_SLASHING_MULTIPLIER: 1 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 -# Max operations per block +# Validator cycle # --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 # 2**4 (= 16) -MAX_PROPOSER_SLASHINGS: 16 -# 2**1 (= 2) -MAX_ATTESTER_SLASHINGS: 2 -# 2**7 (= 128) -MAX_ATTESTATIONS: 128 -# 2**4 (= 16) -MAX_DEPOSITS: 16 -# 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 - +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 -# Signature domains +# Deposit contract # --------------------------------------------------------------- -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 +# Ethereum Goerli testnet +DEPOSIT_CHAIN_ID: 5 +DEPOSIT_NETWORK_ID: 5 +# Pyrmont test deposit contract on Goerli (2nd edition, 0x00002009 fork version) +DEPOSIT_CONTRACT_ADDRESS: 0x8c5fecdC472E27Bc447696F431E425D02dd46a8c diff --git a/common/eth2_network_config/built_in_network_configs/spadina/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/spadina/boot_enr.yaml deleted file mode 100644 index e5da1c73c20..00000000000 --- a/common/eth2_network_config/built_in_network_configs/spadina/boot_enr.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# Lighthouse -- enr:-KG4QEPVpcw8HLNsDuqNPIx4sXLCUsuDOHVtPcNmgSewWcDgSkd6s-vGCXlac86BTYIU8sYqhvD-ZeTW1uG5OtEBm-QDhGV0aDKQCfsKEgAAAAL__________4JpZIJ2NIJpcIQ0ECjWiXNlY3AyNTZrMaEDCavdC37lb2fgBgKrvrLRZ-ZvL6JFNeUHHc5TXZ_BYqmDdGNwgiMog3VkcIIjKA -# teku -- enr:-KG4QA-EcFfXQsL2dcneG8vp8HTWLrpwHQ5HhfyIytfpeKOISzROy2kYSsf_v-BZKnIx5XHDjqJ-ttz0hoz6qJA7tasEhGV0aDKQxKgkDQAAAAL__________4JpZIJ2NIJpcIQDFt-UiXNlY3AyNTZrMaECkR4C5DVO_9rB48eHTY4kdyOHsguTEDlvb7Ce0_mvghSDdGNwgiMog3VkcIIjKA -# prysm -- enr:-Ku4QGQJf2bcDAwVGvbvtq3AB4KKwAvStTenY-i_QnW2ABNRRBncIU_5qR_e_um-9t3s9g-Y5ZfFATj1nhtzq6lvgc4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDEqCQNAAAAAv__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQNoed9JnQh7ltcAacHEGOjwocL1BhMQbYTgaPX0kFuXtIN1ZHCCE4g -# proto -- enr:-Ku4QFW1SLbtzJ_ghQQC8-8xezvZ1Mx95J-zer9IPmDE2BKeD_SM7j4vH6xmroUFVuyK-54n2Ey2ueB-Lf-fkbcLwAQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDEqCQNAAAAAv__________gmlkgnY0gmlwhGQZkSyJc2VjcDI1NmsxoQJMcbZhTCEKYSH5-qPQPgYfSHHUMLGBAKU-f-96yYKFMIN1ZHCCIyg diff --git a/common/eth2_network_config/built_in_network_configs/spadina/config.yaml b/common/eth2_network_config/built_in_network_configs/spadina/config.yaml deleted file mode 100644 index 5cc153174e4..00000000000 --- a/common/eth2_network_config/built_in_network_configs/spadina/config.yaml +++ /dev/null @@ -1,60 +0,0 @@ -CONFIG_NAME: "spadina" -MAX_COMMITTEES_PER_SLOT: 64 -TARGET_COMMITTEE_SIZE: 128 -MAX_VALIDATORS_PER_COMMITTEE: 2048 -MIN_PER_EPOCH_CHURN_LIMIT: 4 -CHURN_LIMIT_QUOTIENT: 65536 -SHUFFLE_ROUND_COUNT: 90 -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 1024 -MIN_GENESIS_TIME: 1601380800 -HYSTERESIS_QUOTIENT: 4 -HYSTERESIS_DOWNWARD_MULTIPLIER: 1 -HYSTERESIS_UPWARD_MULTIPLIER: 5 -PROPORTIONAL_SLASHING_MULTIPLIER: 3 -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 -ETH1_FOLLOW_DISTANCE: 1024 -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -RANDOM_SUBNETS_PER_VALIDATOR: 1 -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -SECONDS_PER_ETH1_BLOCK: 14 -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -DEPOSIT_CONTRACT_ADDRESS: 0x48B597F4b53C21B48AD95c7256B49D1779Bd5890 -MIN_DEPOSIT_AMOUNT: 1000000000 -MAX_EFFECTIVE_BALANCE: 32000000000 -EJECTION_BALANCE: 16000000000 -EFFECTIVE_BALANCE_INCREMENT: 1000000000 -GENESIS_FORK_VERSION: 0x00000002 -BLS_WITHDRAWAL_PREFIX: 0x00 -GENESIS_DELAY: 172800 -SECONDS_PER_SLOT: 12 -MIN_ATTESTATION_INCLUSION_DELAY: 1 -SLOTS_PER_EPOCH: 32 -MIN_SEED_LOOKAHEAD: 1 -MAX_SEED_LOOKAHEAD: 4 -EPOCHS_PER_ETH1_VOTING_PERIOD: 32 -SLOTS_PER_HISTORICAL_ROOT: 8192 -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -SHARD_COMMITTEE_PERIOD: 256 -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 -EPOCHS_PER_HISTORICAL_VECTOR: 65536 -EPOCHS_PER_SLASHINGS_VECTOR: 8192 -HISTORICAL_ROOTS_LIMIT: 16777216 -VALIDATOR_REGISTRY_LIMIT: 1099511627776 -BASE_REWARD_FACTOR: 64 -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -PROPOSER_REWARD_QUOTIENT: 8 -INACTIVITY_PENALTY_QUOTIENT: 16777216 -MIN_SLASHING_PENALTY_QUOTIENT: 32 -MAX_PROPOSER_SLASHINGS: 16 -MAX_ATTESTER_SLASHINGS: 2 -MAX_ATTESTATIONS: 128 -MAX_DEPOSITS: 16 -MAX_VOLUNTARY_EXITS: 16 -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 diff --git a/common/eth2_network_config/built_in_network_configs/spadina/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/spadina/deploy_block.txt deleted file mode 100644 index afa03d34ee5..00000000000 --- a/common/eth2_network_config/built_in_network_configs/spadina/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -3384340 diff --git a/common/eth2_network_config/built_in_network_configs/spadina/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/spadina/genesis.ssz.zip deleted file mode 100644 index 3b97893c544..00000000000 Binary files a/common/eth2_network_config/built_in_network_configs/spadina/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/built_in_network_configs/toledo/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/toledo/boot_enr.yaml deleted file mode 100644 index 9b3f564b8e3..00000000000 --- a/common/eth2_network_config/built_in_network_configs/toledo/boot_enr.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# discv5.1-only bootnode @protolambda -- enr:-Ku4QL5E378NT4-vqP6v1mZ7kHxiTHJvuBvQixQsuTTCffa0PJNWMBlG3Mduvsvd6T2YP1U3l5tBKO5H-9wyX2SCtPkBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC4EvfsAHAe0P__________gmlkgnY0gmlwhDaetEeJc2VjcDI1NmsxoQKtGC2CAuba7goLLdle899M3esUmoWRvzi7GBVhq6ViCYN1ZHCCIyg - -# lighthouse (Canada) @protolambda -- enr:-LK4QHLujdDjOwm2siyFJ2XGz19_ip-qTtozG3ceZ3_56G-LMWb4um67gTSYRJg0WsSkyvRMBEpz8uuIYl-7HfWvktgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhCO3C5OJc2VjcDI1NmsxoQKXw9BLDY6YwmqTtfkzUnlJQb82UrlX4lIAnSSYWHFRlYN0Y3CCIyiDdWRwgiMo - -# lighthouse (Sao Paulo) @protolambda -- enr:-LK4QMxmk7obupScBebKFaasSH3QmYUg-HaEmMAljfmGQCLbKwdOhszzx-VfVPvlH7bZZbOmg3-SNWbJsFfytdjD7a4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhBLkdWuJc2VjcDI1NmsxoQOwYsJyLOjJcDIqiQSSZtDi_EwwSaUjPBSnLVY_PYu-HoN0Y3CCIyiDdWRwgiMo - -# Teku @protolambda -- enr:-KG4QKqo0mG4C35ntJg8icO54wd973aZ7aBiAnC2t1XkGvgqNDOEHwNe2ykxYVUj9AWjm_lKD7brlhXKCZEskGbie2cDhGV0aDKQl5uvZwBwHtD__________4JpZIJ2NIJpcIQNOThwiXNlY3AyNTZrMaECn1dwC8MRt8rk2VUT8RjzEBaceF09d4CEQI20O_SWYcqDdGNwgiMog3VkcIIjKA - -# Prysm @protolambda -- enr:-LK4QAhU5smiLgU0AgrdFv8eCKmDPCBkXCMCIy8Aktaci5qvCYOsW98xVqJS6OoPWt4Sz_YoTdLQBWxd-RZ756vmGPMBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhDTTDL2Jc2VjcDI1NmsxoQOmSJ0mKsQjab7Zralm1Hi0AEReZ2SEqYdKoOPmoA98DoN0Y3CCIyiDdWRwgiMo - -# Lighthouse: @sigp -- enr:-LK4QBsu_4I-tmA5WgxkJWRuVUCj2_QE2mmrwX0sFvAc3NR_YPrub4kpvPCb_OjKLwEefxey81SAcvQ7mr2Vvh8xhbgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhA3UHZWJc2VjcDI1NmsxoQL9FPylFeunleHuPXlbB938eIMd3X9y9cJ8ZI8y3Li0u4N0Y3CCIyiDdWRwgiMo - -# Lighthouse: @sigp -- enr:-LK4QEfW9TCASUUy8L5xamlTVs3JbgT8iYOUspJkbh3rj-BuUndLjtonockiN2K_0g-cBQGq-wvsgAiz5Q3-ic-Wz_ABh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCXm69nAHAe0P__________gmlkgnY0gmlwhCLV8-OJc2VjcDI1NmsxoQKYJuiXbqPzkbT0NAKIJneNWiX0136HiYI9qtx5NF1IloN0Y3CCIyiDdWRwgiMo diff --git a/common/eth2_network_config/built_in_network_configs/toledo/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/toledo/deploy_block.txt deleted file mode 100644 index 99798b05602..00000000000 --- a/common/eth2_network_config/built_in_network_configs/toledo/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -3702432 diff --git a/common/eth2_network_config/built_in_network_configs/toledo/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/toledo/genesis.ssz.zip deleted file mode 100644 index 842591737af..00000000000 Binary files a/common/eth2_network_config/built_in_network_configs/toledo/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 18fd7ef9608..4f38905931c 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -1,23 +1,21 @@ use eth2_config::{predefined_networks_dir, *}; use enr::{CombinedKey, Enr}; -use ssz::Decode; use std::fs::{create_dir_all, File}; use std::io::{Read, Write}; use std::path::PathBuf; -use types::{BeaconState, EthSpec, EthSpecId, YamlConfig}; +use types::{BeaconState, ChainSpec, Config, EthSpec, EthSpecId}; -pub const ADDRESS_FILE: &str = "deposit_contract.txt"; pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; pub const GENESIS_STATE_FILE: &str = "genesis.ssz"; -pub const YAML_CONFIG_FILE: &str = "config.yaml"; +pub const BASE_CONFIG_FILE: &str = "config.yaml"; #[derive(Copy, Clone, Debug, PartialEq)] pub struct HardcodedNet { pub name: &'static str, pub genesis_is_known: bool, - pub yaml_config: &'static [u8], + pub config: &'static [u8], pub deploy_block: &'static [u8], pub boot_enr: &'static [u8], pub genesis_state_bytes: &'static [u8], @@ -30,7 +28,7 @@ macro_rules! define_net { HardcodedNet { name: ETH2_NET_DIR.name, genesis_is_known: ETH2_NET_DIR.genesis_is_known, - yaml_config: $include_file!("../", "config.yaml"), + config: $include_file!("../", "config.yaml"), deploy_block: $include_file!("../", "deploy_block.txt"), boot_enr: $include_file!("../", "boot_enr.yaml"), genesis_state_bytes: $include_file!("../", "genesis.ssz"), @@ -38,16 +36,11 @@ macro_rules! define_net { }}; } -const ALTONA: HardcodedNet = define_net!(altona, include_altona_file); -const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file); -const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file); const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file); const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file); -const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file); const PRATER: HardcodedNet = define_net!(prater, include_prater_file); -const HARDCODED_NETS: &[HardcodedNet] = - &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO, PRATER]; +const HARDCODED_NETS: &[HardcodedNet] = &[PYRMONT, MAINNET, PRATER]; pub const DEFAULT_HARDCODED_NETWORK: &str = "mainnet"; /// Specifies an Eth2 network. @@ -60,7 +53,7 @@ pub struct Eth2NetworkConfig { pub deposit_contract_deploy_block: u64, pub boot_enr: Option>>, pub genesis_state_bytes: Option>, - pub yaml_config: Option, + pub config: Config, } impl Eth2NetworkConfig { @@ -85,24 +78,17 @@ impl Eth2NetworkConfig { ), genesis_state_bytes: Some(net.genesis_state_bytes.to_vec()) .filter(|bytes| !bytes.is_empty()), - yaml_config: Some( - serde_yaml::from_reader(net.yaml_config) - .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?, - ), + config: serde_yaml::from_reader(net.config) + .map_err(|e| format!("Unable to parse yaml config: {:?}", e))?, }) } /// Returns an identifier that should be used for selecting an `EthSpec` instance for this /// network configuration. pub fn eth_spec_id(&self) -> Result { - self.yaml_config - .as_ref() - .ok_or_else(|| "YAML specification file missing".to_string()) - .and_then(|config| { - config - .eth_spec_id() - .ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name)) - }) + self.config + .eth_spec_id() + .ok_or_else(|| "Config does not match any known preset".to_string()) } /// Returns `true` if this configuration contains a `BeaconState`. @@ -110,14 +96,25 @@ impl Eth2NetworkConfig { self.genesis_state_bytes.is_some() } + /// Construct a consolidated `ChainSpec` from the YAML config. + pub fn chain_spec(&self) -> Result { + ChainSpec::from_config::(&self.config).ok_or_else(|| { + format!( + "YAML configuration incompatible with spec constants for {}", + E::spec_name() + ) + }) + } + /// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid. pub fn beacon_state(&self) -> Result, String> { + let spec = self.chain_spec::()?; let genesis_state_bytes = self .genesis_state_bytes .as_ref() .ok_or("Genesis state is unknown")?; - BeaconState::from_ssz_bytes(genesis_state_bytes) + BeaconState::from_ssz_bytes(genesis_state_bytes, &spec) .map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e)) } @@ -167,9 +164,7 @@ impl Eth2NetworkConfig { write_to_yaml_file!(BOOT_ENR_FILE, boot_enr); } - if let Some(yaml_config) = &self.yaml_config { - write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config); - } + write_to_yaml_file!(BASE_CONFIG_FILE, &self.config); // The genesis state is a special case because it uses SSZ, not YAML. if let Some(genesis_state_bytes) = &self.genesis_state_bytes { @@ -210,7 +205,7 @@ impl Eth2NetworkConfig { let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE); let boot_enr = optional_load_from_file!(BOOT_ENR_FILE); - let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE); + let config = load_from_file!(BASE_CONFIG_FILE); // The genesis state is a special case because it uses SSZ, not YAML. let genesis_file_path = base_dir.join(GENESIS_STATE_FILE); @@ -232,7 +227,7 @@ impl Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr, genesis_state_bytes, - yaml_config, + config, }) } } @@ -242,9 +237,16 @@ mod tests { use super::*; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig}; + use types::{Config, Eth1Data, Hash256, MainnetEthSpec}; + + type E = MainnetEthSpec; - type E = V012LegacyEthSpec; + #[test] + fn mainnet_config_eq_chain_spec() { + let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap(); + let spec = ChainSpec::mainnet(); + assert_eq!(spec, config.chain_spec::().unwrap()); + } #[test] fn hard_coded_nets_work() { @@ -252,27 +254,8 @@ mod tests { let config = Eth2NetworkConfig::from_hardcoded_net(net) .unwrap_or_else(|_| panic!("{:?}", net.name)); - if net.name == "mainnet" - || net.name == "toledo" - || net.name == "pyrmont" - || net.name == "prater" - { - // Ensure we can parse the YAML config to a chain spec. - config - .yaml_config - .as_ref() - .unwrap() - .apply_to_chain_spec::(&E::default_spec()) - .unwrap(); - } else { - // Ensure we can parse the YAML config to a chain spec. - config - .yaml_config - .as_ref() - .unwrap() - .apply_to_chain_spec::(&E::default_spec()) - .unwrap(); - } + // Ensure we can parse the YAML config to a chain spec. + config.chain_spec::().unwrap(); assert_eq!( config.genesis_state_bytes.is_some(), @@ -296,16 +279,16 @@ mod tests { // TODO: figure out how to generate ENR and add some here. let boot_enr = None; let genesis_state = Some(BeaconState::new(42, eth1_data, spec)); - let yaml_config = Some(YamlConfig::from_spec::(spec)); + let config = Config::from_chain_spec::(spec); - do_test::(boot_enr, genesis_state, yaml_config); - do_test::(None, None, None); + do_test::(boot_enr, genesis_state, config.clone()); + do_test::(None, None, config); } fn do_test( boot_enr: Option>>, genesis_state: Option>, - yaml_config: Option, + config: Config, ) { let temp_dir = TempBuilder::new() .prefix("eth2_testnet_test") @@ -318,7 +301,7 @@ mod tests { deposit_contract_deploy_block, boot_enr, genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes), - yaml_config, + config, }; testnet diff --git a/common/fallback/Cargo.toml b/common/fallback/Cargo.toml index 04b2c5ea280..31a701d16eb 100644 --- a/common/fallback/Cargo.toml +++ b/common/fallback/Cargo.toml @@ -7,4 +7,4 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -itertools = "0.9.0" +itertools = "0.10.0" diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 8acc55b055a..2c672d870cc 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -244,8 +244,8 @@ where genesis_block: &BeaconBlock, genesis_state: &BeaconState, ) -> Result> { - let finalized_block_slot = genesis_block.slot; - let finalized_block_state_root = genesis_block.state_root; + let finalized_block_slot = genesis_block.slot(); + let finalized_block_state_root = genesis_block.state_root(); let current_epoch_shuffling_id = AttestationShufflingId::new(genesis_block_root, genesis_state, RelativeEpoch::Current) .map_err(Error::BeaconStateError)?; @@ -370,7 +370,7 @@ where ) -> Result> { self.update_time(current_slot)?; - let new_justified_checkpoint = &state.current_justified_checkpoint; + let new_justified_checkpoint = &state.current_justified_checkpoint(); if compute_slots_since_epoch_start::(self.fc_store.get_current_slot()) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED @@ -382,10 +382,10 @@ where compute_start_slot_at_epoch::(self.fc_store.justified_checkpoint().epoch); // This sanity check is not in the spec, but the invariant is implied. - if justified_slot >= state.slot { + if justified_slot >= state.slot() { return Err(Error::AttemptToRevertJustification { store: justified_slot, - state: state.slot, + state: state.slot(), }); } @@ -434,9 +434,9 @@ where let current_slot = self.update_time(current_slot)?; // Parent block must be known. - if !self.proto_array.contains_block(&block.parent_root) { + if !self.proto_array.contains_block(&block.parent_root()) { return Err(Error::InvalidBlock(InvalidBlock::UnknownParent( - block.parent_root, + block.parent_root(), ))); } @@ -444,10 +444,10 @@ where // the are in the past. // // Note: presently, we do not delay consideration. We just drop the block. - if block.slot > current_slot { + if block.slot() > current_slot { return Err(Error::InvalidBlock(InvalidBlock::FutureSlot { current_slot, - block_slot: block.slot, + block_slot: block.slot(), })); } @@ -455,10 +455,10 @@ where // get_ancestor). let finalized_slot = compute_start_slot_at_epoch::(self.fc_store.finalized_checkpoint().epoch); - if block.slot <= finalized_slot { + if block.slot() <= finalized_slot { return Err(Error::InvalidBlock(InvalidBlock::FinalizedSlot { finalized_slot, - block_slot: block.slot, + block_slot: block.slot(), })); } @@ -471,7 +471,7 @@ where // `self.proto_array` to do this search. See: // // https://github.com/ethereum/eth2.0-specs/pull/1884 - let block_ancestor = self.get_ancestor(block.parent_root, finalized_slot)?; + let block_ancestor = self.get_ancestor(block.parent_root(), finalized_slot)?; let finalized_root = self.fc_store.finalized_checkpoint().root; if block_ancestor != Some(finalized_root) { return Err(Error::InvalidBlock(InvalidBlock::NotFinalizedDescendant { @@ -481,24 +481,24 @@ where } // Update justified checkpoint. - if state.current_justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch { - if state.current_justified_checkpoint.epoch + if state.current_justified_checkpoint().epoch > self.fc_store.justified_checkpoint().epoch { + if state.current_justified_checkpoint().epoch > self.fc_store.best_justified_checkpoint().epoch { self.fc_store - .set_best_justified_checkpoint(state.current_justified_checkpoint); + .set_best_justified_checkpoint(state.current_justified_checkpoint()); } if self.should_update_justified_checkpoint(current_slot, state)? { self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint) + .set_justified_checkpoint(state.current_justified_checkpoint()) .map_err(Error::UnableToSetJustifiedCheckpoint)?; } } // Update finalized checkpoint. - if state.finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch { + if state.finalized_checkpoint().epoch > self.fc_store.finalized_checkpoint().epoch { self.fc_store - .set_finalized_checkpoint(state.finalized_checkpoint); + .set_finalized_checkpoint(state.finalized_checkpoint()); let finalized_slot = compute_start_slot_at_epoch::(self.fc_store.finalized_checkpoint().epoch); @@ -507,24 +507,24 @@ where // information: // // https://github.com/ethereum/eth2.0-specs/pull/1880 - if *self.fc_store.justified_checkpoint() != state.current_justified_checkpoint - && (state.current_justified_checkpoint.epoch + if *self.fc_store.justified_checkpoint() != state.current_justified_checkpoint() + && (state.current_justified_checkpoint().epoch > self.fc_store.justified_checkpoint().epoch || self .get_ancestor(self.fc_store.justified_checkpoint().root, finalized_slot)? != Some(self.fc_store.finalized_checkpoint().root)) { self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint) + .set_justified_checkpoint(state.current_justified_checkpoint()) .map_err(Error::UnableToSetJustifiedCheckpoint)?; } } let target_slot = block - .slot + .slot() .epoch(E::slots_per_epoch()) .start_slot(E::slots_per_epoch()); - let target_root = if block.slot == target_slot { + let target_root = if block.slot() == target_slot { block_root } else { *state @@ -539,9 +539,9 @@ where // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. self.proto_array.process_block(ProtoBlock { - slot: block.slot, + slot: block.slot(), root: block_root, - parent_root: Some(block.parent_root), + parent_root: Some(block.parent_root()), target_root, current_epoch_shuffling_id: AttestationShufflingId::new( block_root, @@ -555,9 +555,9 @@ where RelativeEpoch::Next, ) .map_err(Error::BeaconStateError)?, - state_root: block.state_root, - justified_epoch: state.current_justified_checkpoint.epoch, - finalized_epoch: state.finalized_checkpoint.epoch, + state_root: block.state_root(), + justified_epoch: state.current_justified_checkpoint().epoch, + finalized_epoch: state.finalized_checkpoint().epoch, })?; Ok(()) diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ec0b1277432..21bb6341838 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1,7 +1,12 @@ #![cfg(not(debug_assertions))] +use std::fmt; +use std::sync::Mutex; + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +}; use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError, StateSkipConfig, WhenSlotSkipped, }; @@ -9,14 +14,12 @@ use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation, SAFE_SLOTS_TO_UPDATE_JUSTIFIED, }; -use std::fmt; -use std::sync::Mutex; use store::{MemoryStore, StoreConfig}; use types::{ test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, - Checkpoint, Epoch, EthSpec, IndexedAttestation, MainnetEthSpec, Slot, SubnetId, + BeaconBlock, BeaconBlockRef, BeaconState, Checkpoint, Epoch, EthSpec, Hash256, + IndexedAttestation, MainnetEthSpec, Slot, SubnetId, }; -use types::{BeaconBlock, BeaconState, Hash256, SignedBeaconBlock}; pub type E = MainnetEthSpec; @@ -47,6 +50,7 @@ impl ForkChoiceTest { pub fn new() -> Self { let harness = BeaconChainHarness::new_with_target_aggregators( MainnetEthSpec, + None, generate_deterministic_keypairs(VALIDATOR_COUNT), // Ensure we always have an aggregator for each slot. u64::max_value(), @@ -60,6 +64,7 @@ impl ForkChoiceTest { pub fn new_with_chain_config(chain_config: ChainConfig) -> Self { let harness = BeaconChainHarness::new_with_chain_config( MainnetEthSpec, + None, generate_deterministic_keypairs(VALIDATOR_COUNT), // Ensure we always have an aggregator for each slot. u64::max_value(), @@ -170,7 +175,7 @@ impl ForkChoiceTest { /// Build the chain whilst `predicate` returns `true` and `process_block_result` does not error. pub fn apply_blocks_while(self, mut predicate: F) -> Result where - F: FnMut(&BeaconBlock, &BeaconState) -> bool, + F: FnMut(BeaconBlockRef<'_, E>, &BeaconState) -> bool, { self.harness.advance_slot(); let mut state = self.harness.get_current_state(); @@ -179,7 +184,7 @@ impl ForkChoiceTest { let slot = self.harness.get_current_slot(); let (block, state_) = self.harness.make_block(state, slot); state = state_; - if !predicate(&block.message, &state) { + if !predicate(block.message(), &state) { break; } if let Ok(block_hash) = self.harness.process_block_result(block.clone()) { @@ -264,14 +269,15 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (mut block, mut state) = self.harness.make_block(state, slot); - func(&mut block.message, &mut state); + let (signed_block, mut state) = self.harness.make_block(state, slot); + let (mut block, _) = signed_block.deconstruct(); + func(&mut block, &mut state); let current_slot = self.harness.get_current_slot(); self.harness .chain .fork_choice .write() - .on_block(current_slot, &block.message, block.canonical_root(), &state) + .on_block(current_slot, &block, block.canonical_root(), &state) .unwrap(); self } @@ -297,15 +303,16 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (mut block, mut state) = self.harness.make_block(state, slot); - mutation_func(&mut block.message, &mut state); + let (signed_block, mut state) = self.harness.make_block(state, slot); + let (mut block, _) = signed_block.deconstruct(); + mutation_func(&mut block, &mut state); let current_slot = self.harness.get_current_slot(); let err = self .harness .chain .fork_choice .write() - .on_block(current_slot, &block.message, block.canonical_root(), &state) + .on_block(current_slot, &block, block.canonical_root(), &state) .err() .expect("on_block did not return an error"); comparison_func(err); @@ -321,11 +328,11 @@ impl ForkChoiceTest { let state_root = harness .chain .store - .get_item::>(&fc.fc_store().justified_checkpoint().root) + .get_block(&fc.fc_store().justified_checkpoint().root) .unwrap() .unwrap() - .message - .state_root; + .message() + .state_root(); let state = harness .chain .store @@ -333,7 +340,7 @@ impl ForkChoiceTest { .unwrap() .unwrap(); let balances = state - .validators + .validators() .into_iter() .map(|v| { if v.is_active_at(state.current_epoch()) { @@ -401,7 +408,7 @@ impl ForkChoiceTest { .sign( &validator_sk, validator_committee_index, - &head.beacon_state.fork, + &head.beacon_state.fork(), self.harness.chain.genesis_validators_root, &self.harness.chain.spec, ) @@ -467,7 +474,7 @@ fn is_safe_to_update(slot: Slot) -> bool { #[test] fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .unwrap() .move_inside_safe_to_update() .assert_justified_epoch(0) @@ -481,7 +488,7 @@ fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { #[test] fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch <= 2) + .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) .unwrap() .move_outside_safe_to_update() .assert_justified_epoch(2) @@ -496,7 +503,7 @@ fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { #[test] fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .unwrap() .move_to_next_unsafe_period() .assert_justified_epoch(0) @@ -512,19 +519,19 @@ fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { #[test] fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .move_inside_safe_to_update() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { // The finalized checkpoint should not change. - state.finalized_checkpoint.epoch = Epoch::new(0); + state.finalized_checkpoint().epoch = Epoch::new(0); // The justified checkpoint has changed. - state.current_justified_checkpoint.epoch = Epoch::new(3); + state.current_justified_checkpoint_mut().epoch = Epoch::new(3); // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint.root = *state + state.current_justified_checkpoint_mut().root = *state .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) @@ -538,19 +545,19 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi #[test] fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { // The finalized checkpoint should not change. - state.finalized_checkpoint.epoch = Epoch::new(0); + state.finalized_checkpoint().epoch = Epoch::new(0); // The justified checkpoint has changed. - state.current_justified_checkpoint.epoch = Epoch::new(3); + state.current_justified_checkpoint_mut().epoch = Epoch::new(3); // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint.root = *state + state.current_justified_checkpoint_mut().root = *state .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) @@ -564,19 +571,19 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f #[test] fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { // The finalized checkpoint should change. - state.finalized_checkpoint.epoch = Epoch::new(1); + state.finalized_checkpoint_mut().epoch = Epoch::new(1); // The justified checkpoint has changed. - state.current_justified_checkpoint.epoch = Epoch::new(3); + state.current_justified_checkpoint_mut().epoch = Epoch::new(3); // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint.root = *state + state.current_justified_checkpoint_mut().root = *state .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) @@ -588,7 +595,7 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_fina #[test] fn justified_balances() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .assert_justified_epoch(2) @@ -617,7 +624,7 @@ fn invalid_block_unknown_parent() { .apply_blocks(2) .apply_invalid_block_directly_to_fork_choice( |block, _| { - block.parent_root = junk; + *block.parent_root_mut() = junk; }, |err| { assert_invalid_block!( @@ -638,7 +645,7 @@ fn invalid_block_future_slot() { .apply_blocks(2) .apply_invalid_block_directly_to_fork_choice( |block, _| { - block.slot = block.slot + 1; + *block.slot_mut() += 1; }, |err| assert_invalid_block!(err, InvalidBlock::FutureSlot { .. }), ); @@ -650,12 +657,12 @@ fn invalid_block_future_slot() { #[test] fn invalid_block_finalized_slot() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .apply_invalid_block_directly_to_fork_choice( |block, _| { - block.slot = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; + *block.slot_mut() = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; }, |err| { assert_invalid_block!( @@ -670,7 +677,7 @@ fn invalid_block_finalized_slot() { /// Specification v0.12.1 /// /// assert get_ancestor(store, hash_tree_root(block), finalized_slot) == -/// store.finalized_checkpoint.root +/// store.finalized_checkpoint().root /// /// Note: we technically don't do this exact check, but an equivalent check. Reference: /// @@ -680,16 +687,16 @@ fn invalid_block_finalized_descendant() { let invalid_ancestor = Mutex::new(Hash256::zero()); ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .assert_finalized_epoch(2) .apply_invalid_block_directly_to_fork_choice( |block, state| { - block.parent_root = *state + *block.parent_root_mut() = *state .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); - *invalid_ancestor.lock().unwrap() = block.parent_root; + *invalid_ancestor.lock().unwrap() = block.parent_root(); }, |err| { assert_invalid_block!( @@ -966,7 +973,7 @@ fn valid_attestation_skip_across_epoch() { #[test] fn can_read_finalized_block() { ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .check_finalized_block_is_accessible(); @@ -1004,7 +1011,7 @@ fn weak_subjectivity_pass_on_startup() { #[test] fn weak_subjectivity_check_passes() { let setup_harness = ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .assert_finalized_epoch(2); @@ -1022,7 +1029,7 @@ fn weak_subjectivity_check_passes() { }; ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .assert_finalized_epoch(2) @@ -1032,7 +1039,7 @@ fn weak_subjectivity_check_passes() { #[test] fn weak_subjectivity_check_fails_early_epoch() { let setup_harness = ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .assert_finalized_epoch(2); @@ -1052,7 +1059,7 @@ fn weak_subjectivity_check_fails_early_epoch() { }; ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 3) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); @@ -1061,7 +1068,7 @@ fn weak_subjectivity_check_fails_early_epoch() { #[test] fn weak_subjectivity_check_fails_late_epoch() { let setup_harness = ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .assert_finalized_epoch(2); @@ -1081,7 +1088,7 @@ fn weak_subjectivity_check_fails_late_epoch() { }; ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 4) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 4) .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); @@ -1090,7 +1097,7 @@ fn weak_subjectivity_check_fails_late_epoch() { #[test] fn weak_subjectivity_check_fails_incorrect_root() { let setup_harness = ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .apply_blocks(1) .assert_finalized_epoch(2); @@ -1110,7 +1117,7 @@ fn weak_subjectivity_check_fails_incorrect_root() { }; ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 3) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); @@ -1120,7 +1127,7 @@ fn weak_subjectivity_check_fails_incorrect_root() { fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { let setup_harness = ForkChoiceTest::new() // first two epochs - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap(); // get the head, it will become the finalized root of epoch 4 @@ -1129,7 +1136,7 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 5) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) .unwrap() .apply_blocks(1) .assert_finalized_epoch(5); @@ -1147,10 +1154,10 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .skip_slots(E::slots_per_epoch() as usize) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 5) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) .unwrap() .apply_blocks(1) .assert_finalized_epoch(5) @@ -1161,7 +1168,7 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { let setup_harness = ForkChoiceTest::new() // first two epochs - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap(); // get the head, it will become the finalized root of epoch 4 @@ -1170,7 +1177,7 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 5) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) .unwrap() .apply_blocks(1) .assert_finalized_epoch(5); @@ -1188,10 +1195,10 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch == 0) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .unwrap() .skip_slots(E::slots_per_epoch() as usize) - .apply_blocks_while(|_, state| state.finalized_checkpoint.epoch < 6) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 6) .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs index 24edf1ebee2..5c3fa0f0aa5 100644 --- a/consensus/serde_utils/src/quoted_int.rs +++ b/consensus/serde_utils/src/quoted_int.rs @@ -70,6 +70,17 @@ macro_rules! define_mod { pub value: T, } + /// Compositional wrapper type that allows quotes or no quotes. + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] + #[serde(transparent)] + pub struct MaybeQuoted + where + T: From<$int> + Into<$int> + Copy + TryFrom, + { + #[serde(with = "self")] + pub value: T, + } + /// Serialize with quotes. pub fn serialize(value: &T, serializer: S) -> Result where diff --git a/consensus/ssz/src/decode.rs b/consensus/ssz/src/decode.rs index 38cd7c5fdcd..52ff6d35cb1 100644 --- a/consensus/ssz/src/decode.rs +++ b/consensus/ssz/src/decode.rs @@ -145,6 +145,31 @@ impl<'a> SszDecoderBuilder<'a> { } } + /// Registers a variable-length object as the next item in `bytes`, without specifying the + /// actual type. + /// + /// ## Notes + /// + /// Use of this function is generally discouraged since it cannot detect if some type changes + /// from variable to fixed length. + /// + /// Use `Self::register_type` wherever possible. + pub fn register_anonymous_variable_length_item(&mut self) -> Result<(), DecodeError> { + struct Anonymous; + + impl Decode for Anonymous { + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(_bytes: &[u8]) -> Result { + unreachable!("Anonymous should never be decoded") + } + } + + self.register_type::() + } + /// Declares that some type `T` is the next item in `bytes`. pub fn register_type(&mut self) -> Result<(), DecodeError> { if T::is_ssz_fixed_len() { @@ -277,6 +302,14 @@ impl<'a> SszDecoder<'a> { pub fn decode_next(&mut self) -> Result { T::from_ssz_bytes(self.items.remove(0)) } + + /// Decodes the next item using the provided function. + pub fn decode_next_with(&mut self, f: F) -> Result + where + F: FnOnce(&'a [u8]) -> Result, + { + f(self.items.remove(0)) + } } /// Reads a `BYTES_PER_LENGTH_OFFSET`-byte union index from `bytes`, where `bytes.len() >= diff --git a/consensus/ssz_derive/src/lib.rs b/consensus/ssz_derive/src/lib.rs index 2170251aee4..86327d94fff 100644 --- a/consensus/ssz_derive/src/lib.rs +++ b/consensus/ssz_derive/src/lib.rs @@ -3,11 +3,9 @@ //! //! Supports field attributes, see each derive macro for more information. -extern crate proc_macro; - use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, DeriveInput}; +use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput}; /// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields /// that should not be serialized. @@ -57,7 +55,7 @@ fn should_skip_serializing(field: &syn::Field) -> bool { }) } -/// Implements `ssz::Encode` for some `struct`. +/// Implements `ssz::Encode` for some `struct` or `enum`. /// /// Fields are encoded in the order they are defined. /// @@ -68,17 +66,20 @@ fn should_skip_serializing(field: &syn::Field) -> bool { pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); + match &item.data { + syn::Data::Struct(s) => ssz_encode_derive_struct(&item, s), + syn::Data::Enum(s) => ssz_encode_derive_enum(&item, s), + _ => panic!("ssz_derive only supports structs and enums"), + } +} - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("ssz_derive only supports structs."), - }; +fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct) -> TokenStream { + let name = &derive_input.ident; + let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - let field_idents = get_serializable_named_field_idents(&struct_data); - let field_idents_a = get_serializable_named_field_idents(&struct_data); - let field_types_a = get_serializable_field_types(&struct_data); + let field_idents = get_serializable_named_field_idents(struct_data); + let field_idents_a = get_serializable_named_field_idents(struct_data); + let field_types_a = get_serializable_field_types(struct_data); let field_types_b = field_types_a.clone(); let field_types_d = field_types_a.clone(); let field_types_e = field_types_a.clone(); @@ -152,6 +153,72 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { output.into() } +/// Derive `Encode` for a restricted subset of all possible enum types. +/// +/// Only supports: +/// - Enums with a single field per variant, where +/// - All fields are variably sized from an SSZ-perspective (not fixed size). +/// +/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run +/// time* if the variable-size requirement isn't met. +fn ssz_encode_derive_enum(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { + let name = &derive_input.ident; + let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); + + let (patterns, assert_exprs): (Vec<_>, Vec<_>) = enum_data + .variants + .iter() + .map(|variant| { + let variant_name = &variant.ident; + + if variant.fields.len() != 1 { + panic!("ssz::Encode can only be derived for enums with 1 field per variant"); + } + + let pattern = quote! { + #name::#variant_name(ref inner) + }; + + let ty = &(&variant.fields).into_iter().next().unwrap().ty; + let type_assert = quote! { + !<#ty as ssz::Encode>::is_ssz_fixed_len() + }; + (pattern, type_assert) + }) + .unzip(); + + let output = quote! { + impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { + fn is_ssz_fixed_len() -> bool { + assert!( + #( + #assert_exprs && + )* true, + "not all enum variants are variably-sized" + ); + false + } + + fn ssz_bytes_len(&self) -> usize { + match self { + #( + #patterns => inner.ssz_bytes_len(), + )* + } + } + + fn ssz_append(&self, buf: &mut Vec) { + match self { + #( + #patterns => inner.ssz_append(buf), + )* + } + } + } + }; + output.into() +} + /// Returns true if some field has an attribute declaring it should not be deserialized. /// /// The field attribute is: `#[ssz(skip_deserializing)]` diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 0d7664c1dcc..a60102aa88c 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -2,7 +2,7 @@ use crate::tree_hash::vec_tree_hash_root; use crate::Error; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; -use std::ops::{Deref, Index, IndexMut}; +use std::ops::{Deref, DerefMut, Index, IndexMut}; use std::slice::SliceIndex; use tree_hash::Hash256; use typenum::Unsigned; @@ -147,6 +147,16 @@ impl Deref for FixedVector { } } +// This implementation is required to use `get_mut` to access elements. +// +// It's safe because none of the methods on mutable slices allow changing the length +// of the backing vec. +impl DerefMut for FixedVector { + fn deref_mut(&mut self) -> &mut [T] { + &mut self.vec[..] + } +} + impl tree_hash::TreeHash for FixedVector where T: tree_hash::TreeHash, @@ -210,7 +220,7 @@ where impl ssz::Decode for FixedVector where - T: ssz::Decode + Default, + T: ssz::Decode, { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() @@ -250,18 +260,21 @@ where .map(|chunk| T::from_ssz_bytes(chunk)) .collect::, _>>() .and_then(|vec| { - if vec.len() == fixed_len { - Ok(vec.into()) - } else { - Err(ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements, got: {}, expected: {}", - vec.len(), - N::to_usize() - ))) - } + Self::new(vec).map_err(|e| { + ssz::DecodeError::BytesInvalid(format!( + "Wrong number of FixedVector elements: {:?}", + e + )) + }) }) } else { - ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len)).map(|vec| vec.into()) + let vec = ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len))?; + Self::new(vec).map_err(|e| { + ssz::DecodeError::BytesInvalid(format!( + "Wrong number of FixedVector elements: {:?}", + e + )) + }) } } } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 166e3ab5bd9..63f8b448686 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -4,22 +4,18 @@ version = "0.2.0" authors = ["Paul Hauner ", "Michael Sproul "] edition = "2018" -[[bench]] -name = "benches" -harness = false - [dev-dependencies] -criterion = "0.3.3" env_logger = "0.8.2" serde = "1.0.116" serde_derive = "1.0.116" lazy_static = "1.4.0" serde_yaml = "0.8.13" +beacon_chain = { path = "../../beacon_node/beacon_chain" } [dependencies] bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" -itertools = "0.9.0" +itertools = "0.10.0" eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../ssz_types" } merkle_proof = { path = "../merkle_proof" } @@ -31,6 +27,7 @@ types = { path = "../types", default-features = false } rayon = "1.4.1" eth2_hashing = "0.1.0" int_to_bytes = { path = "../int_to_bytes" } +smallvec = "1.6.1" arbitrary = { version = "0.4.6", features = ["derive"], optional = true } [features] diff --git a/consensus/state_processing/benches/benches.rs b/consensus/state_processing/benches/benches.rs deleted file mode 100644 index 771b47099ac..00000000000 --- a/consensus/state_processing/benches/benches.rs +++ /dev/null @@ -1,431 +0,0 @@ -#![allow(deprecated)] - -extern crate env_logger; - -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; -use ssz::Encode; -use state_processing::{test_utils::BlockBuilder, BlockSignatureStrategy, VerifySignatures}; -use types::{ - BeaconState, ChainSpec, EthSpec, MainnetEthSpec, MinimalEthSpec, SignedBeaconBlock, Slot, -}; - -pub const VALIDATORS_LOW: usize = 32_768; -pub const VALIDATORS_HIGH: usize = 300_032; - -fn all_benches(c: &mut Criterion) { - env_logger::init(); - - average_bench::(c, "minimal", VALIDATORS_LOW); - average_bench::(c, "mainnet", VALIDATORS_LOW); - average_bench::(c, "mainnet", VALIDATORS_HIGH); - - worst_bench::(c, "minimal", VALIDATORS_LOW); - worst_bench::(c, "mainnet", VALIDATORS_LOW); - worst_bench::(c, "mainnet", VALIDATORS_HIGH); -} - -/// Run a bench with a average complexity block. -fn average_bench(c: &mut Criterion, spec_desc: &str, validator_count: usize) { - let spec = &T::default_spec(); - - let (block, state) = get_average_block(validator_count, spec); - bench_block::(c, block, state, spec, spec_desc, "average_complexity_block"); -} - -/// Run a bench with a highly complex block. -fn worst_bench(c: &mut Criterion, spec_desc: &str, validator_count: usize) { - let mut spec = &mut T::default_spec(); - - // Allows the exits to be processed sucessfully. - spec.shard_committee_period = 0; - - let (block, state) = get_worst_block(validator_count, spec); - bench_block::(c, block, state, spec, spec_desc, "high_complexity_block"); -} - -/// Return a block and state where the block has "average" complexity. I.e., the number of -/// operations we'd generally expect to see. -fn get_average_block( - validator_count: usize, - spec: &ChainSpec, -) -> (SignedBeaconBlock, BeaconState) { - let mut builder: BlockBuilder = BlockBuilder::new(validator_count, &spec); - // builder.num_attestations = T::MaxAttestations::to_usize(); - builder.num_attestations = 16; - builder.set_slot(Slot::from(T::slots_per_epoch() * 3 - 2)); - builder.build_caches(&spec); - builder.build(&spec) -} - -/// Return a block and state where the block has the "worst" complexity. The block is not -/// _guaranteed_ to be the worst possible complexity, it just has the max possible operations. -fn get_worst_block( - validator_count: usize, - spec: &ChainSpec, -) -> (SignedBeaconBlock, BeaconState) { - let mut builder: BlockBuilder = BlockBuilder::new(validator_count, &spec); - builder.maximize_block_operations(); - - // FIXME: enable deposits once we can generate them with valid proofs. - builder.num_deposits = 0; - - builder.set_slot(Slot::from(T::slots_per_epoch() * 3 - 2)); - builder.build_caches(&spec); - builder.build(&spec) -} - -#[allow(clippy::unit_arg)] -fn bench_block( - c: &mut Criterion, - block: SignedBeaconBlock, - state: BeaconState, - spec: &ChainSpec, - spec_desc: &str, - block_desc: &str, -) { - let validator_count = state.validators.len(); - - let title = &format!( - "{}/{}_validators/{}", - spec_desc, validator_count, block_desc - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new( - "per_block_processing/individual_signature_verification", - move |b| { - b.iter_batched_ref( - || (local_spec.clone(), local_state.clone(), local_block.clone()), - |(spec, ref mut state, block)| { - black_box( - state_processing::per_block_processing::( - state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ) - .expect("block processing should succeed"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }, - ) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new( - "per_block_processing/bulk_signature_verification", - move |b| { - b.iter_batched_ref( - || (local_spec.clone(), local_state.clone(), local_block.clone()), - |(spec, ref mut state, block)| { - black_box( - state_processing::per_block_processing::( - state, - &block, - None, - BlockSignatureStrategy::VerifyBulk, - &spec, - ) - .expect("block processing should succeed"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }, - ) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new("per_block_processing/no_signature_verification", move |b| { - b.iter_batched_ref( - || (local_spec.clone(), local_state.clone(), local_block.clone()), - |(spec, ref mut state, block)| { - black_box( - state_processing::per_block_processing::( - state, - &block, - None, - BlockSignatureStrategy::NoVerification, - &spec, - ) - .expect("block processing should succeed"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new("process_block_header", move |b| { - b.iter_batched_ref( - || (local_spec.clone(), local_state.clone(), local_block.clone()), - |(spec, ref mut state, block)| { - black_box( - state_processing::per_block_processing::process_block_header::( - state, - &block.message, - &spec, - ) - .expect("process_block_header should succeed"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new("verify_block_signature", move |b| { - b.iter_batched_ref( - || (local_spec.clone(), local_state.clone(), local_block.clone()), - |(spec, ref mut state, block)| { - black_box( - state_processing::per_block_processing::verify_block_signature::( - state, &block, None, &spec, - ) - .expect("verify_block_signature should succeed"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new("process_attestations", move |b| { - b.iter_batched_ref( - || (local_spec.clone(), local_state.clone(), local_block.clone()), - |(spec, ref mut state, block)| { - black_box( - state_processing::per_block_processing::process_attestations::( - state, - &block.message.body.attestations, - VerifySignatures::True, - &spec, - ) - .expect("attestation processing should succeed"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new("verify_attestation", move |b| { - b.iter_batched_ref( - || { - let attestation = &local_block.message.body.attestations[0]; - - (local_spec.clone(), local_state.clone(), attestation.clone()) - }, - |(spec, ref mut state, attestation)| { - black_box( - state_processing::per_block_processing::verify_attestation_for_block_inclusion( - state, - &attestation, - VerifySignatures::True, - spec, - ) - .expect("should verify attestation"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - c.bench( - &title, - Benchmark::new("get_indexed_attestation", move |b| { - b.iter_batched_ref( - || { - let attestation = &local_block.message.body.attestations[0]; - let committee = local_state - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .unwrap(); - (committee.committee, attestation.clone()) - }, - |(committee, attestation)| { - black_box( - state_processing::common::get_indexed_attestation(committee, &attestation) - .expect("should get indexed attestation"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new("is_valid_indexed_attestation_with_signature", move |b| { - b.iter_batched_ref( - || { - let attestation = &local_block.message.body.attestations[0]; - let committee = local_state - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .unwrap(); - let indexed_attestation = state_processing::common::get_indexed_attestation( - &committee.committee, - &attestation, - ) - .expect("should get indexed attestation"); - - (local_spec.clone(), local_state.clone(), indexed_attestation) - }, - |(spec, ref mut state, indexed_attestation)| { - black_box( - state_processing::per_block_processing::is_valid_indexed_attestation( - state, - &indexed_attestation, - VerifySignatures::True, - spec, - ) - .expect("should run is_valid_indexed_attestation"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state.clone(); - let local_spec = spec.clone(); - c.bench( - &title, - Benchmark::new("is_valid_indexed_attestation_without_signature", move |b| { - b.iter_batched_ref( - || { - let attestation = &local_block.message.body.attestations[0]; - let committee = local_state - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .unwrap(); - let indexed_attestation = state_processing::common::get_indexed_attestation( - &committee.committee, - &attestation, - ) - .expect("should get indexed attestation"); - - (local_spec.clone(), local_state.clone(), indexed_attestation) - }, - |(spec, ref mut state, indexed_attestation)| { - black_box( - state_processing::per_block_processing::is_valid_indexed_attestation( - state, - &indexed_attestation, - VerifySignatures::False, - spec, - ) - .expect("should run is_valid_indexed_attestation_without_signature"), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - let local_state = state; - c.bench( - &title, - Benchmark::new("get_attesting_indices", move |b| { - b.iter_batched_ref( - || { - let attestation = &local_block.message.body.attestations[0]; - let committee = local_state - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .unwrap(); - - (committee.committee, attestation.clone()) - }, - |(committee, attestation)| { - black_box(state_processing::common::get_attesting_indices::( - committee, - &attestation.aggregation_bits, - )) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block.clone(); - c.bench( - &title, - Benchmark::new("ssz_serialize_block", move |b| { - b.iter_batched_ref( - || (), - |_| black_box(local_block.as_ssz_bytes()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let local_block = block; - c.bench( - &title, - Benchmark::new("ssz_block_len", move |b| { - b.iter_batched_ref( - || (), - |_| black_box(local_block.ssz_bytes_len()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); -} - -criterion_group!(benches, all_benches,); -criterion_main!(benches); diff --git a/consensus/state_processing/src/common/altair.rs b/consensus/state_processing/src/common/altair.rs new file mode 100644 index 00000000000..6cf80bdd9ed --- /dev/null +++ b/consensus/state_processing/src/common/altair.rs @@ -0,0 +1,32 @@ +use integer_sqrt::IntegerSquareRoot; +use safe_arith::{ArithError, SafeArith}; +use types::*; + +/// Returns the base reward for some validator. +/// +/// Spec v1.1.0 +pub fn get_base_reward( + state: &BeaconState, + index: usize, + // Should be == get_total_active_balance(state, spec) + total_active_balance: u64, + spec: &ChainSpec, +) -> Result { + state + .get_effective_balance(index)? + .safe_div(spec.effective_balance_increment)? + .safe_mul(get_base_reward_per_increment(total_active_balance, spec)?) + .map_err(Into::into) +} + +/// Returns the base reward for some validator. +/// +/// Spec v1.1.0 +pub fn get_base_reward_per_increment( + total_active_balance: u64, + spec: &ChainSpec, +) -> Result { + spec.effective_balance_increment + .safe_mul(spec.base_reward_factor)? + .safe_div(total_active_balance.integer_sqrt()) +} diff --git a/consensus/state_processing/src/common/get_base_reward.rs b/consensus/state_processing/src/common/base.rs similarity index 53% rename from consensus/state_processing/src/common/get_base_reward.rs rename to consensus/state_processing/src/common/base.rs index 2586c2c69e1..b5cb382721f 100644 --- a/consensus/state_processing/src/common/get_base_reward.rs +++ b/consensus/state_processing/src/common/base.rs @@ -3,8 +3,6 @@ use safe_arith::SafeArith; use types::*; /// Returns the base reward for some validator. -/// -/// Spec v0.12.1 pub fn get_base_reward( state: &BeaconState, index: usize, @@ -12,13 +10,10 @@ pub fn get_base_reward( total_active_balance: u64, spec: &ChainSpec, ) -> Result { - if total_active_balance == 0 { - Ok(0) - } else { - Ok(state - .get_effective_balance(index, spec)? - .safe_mul(spec.base_reward_factor)? - .safe_div(total_active_balance.integer_sqrt())? - .safe_div(spec.base_rewards_per_epoch)?) - } + state + .get_effective_balance(index)? + .safe_mul(spec.base_reward_factor)? + .safe_div(total_active_balance.integer_sqrt())? + .safe_div(spec.base_rewards_per_epoch) + .map_err(Into::into) } diff --git a/consensus/state_processing/src/common/get_attestation_participation.rs b/consensus/state_processing/src/common/get_attestation_participation.rs new file mode 100644 index 00000000000..499d8fa8f86 --- /dev/null +++ b/consensus/state_processing/src/common/get_attestation_participation.rs @@ -0,0 +1,54 @@ +use integer_sqrt::IntegerSquareRoot; +use smallvec::SmallVec; +use types::{ + consts::altair::{ + NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, + TIMELY_TARGET_FLAG_INDEX, + }, + BeaconStateError as Error, +}; +use types::{AttestationData, BeaconState, ChainSpec, EthSpec}; + +/// Get the participation flags for a valid attestation. +/// +/// You should have called `verify_attestation_for_block_inclusion` or similar before +/// calling this function, in order to ensure that the attestation's source is correct. +/// +/// This function will return an error if the source of the attestation doesn't match the +/// state's relevant justified checkpoint. +pub fn get_attestation_participation_flag_indices( + state: &BeaconState, + data: &AttestationData, + inclusion_delay: u64, + spec: &ChainSpec, +) -> Result, Error> { + let justified_checkpoint = if data.target.epoch == state.current_epoch() { + state.current_justified_checkpoint() + } else { + state.previous_justified_checkpoint() + }; + + // Matching roots. + let is_matching_source = data.source == justified_checkpoint; + let is_matching_target = is_matching_source + && data.target.root == *state.get_block_root_at_epoch(data.target.epoch)?; + let is_matching_head = + is_matching_target && data.beacon_block_root == *state.get_block_root(data.slot)?; + + if !is_matching_source { + return Err(Error::IncorrectAttestationSource); + } + + // Participation flag indices + let mut participation_flag_indices = SmallVec::new(); + if is_matching_source && inclusion_delay <= T::slots_per_epoch().integer_sqrt() { + participation_flag_indices.push(TIMELY_SOURCE_FLAG_INDEX); + } + if is_matching_target && inclusion_delay <= T::slots_per_epoch() { + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); + } + if is_matching_head && inclusion_delay == spec.min_attestation_inclusion_delay { + participation_flag_indices.push(TIMELY_HEAD_FLAG_INDEX); + } + Ok(participation_flag_indices) +} diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index 3d2638a35a7..85e5e1df1db 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -3,40 +3,36 @@ use std::cmp::max; use types::{BeaconStateError as Error, *}; /// Initiate the exit of the validator of the given `index`. -/// -/// Spec v0.12.1 pub fn initiate_validator_exit( state: &mut BeaconState, index: usize, spec: &ChainSpec, ) -> Result<(), Error> { - if index >= state.validators.len() { - return Err(Error::UnknownValidator(index as u64)); - } - // Return if the validator already initiated exit - if state.validators[index].exit_epoch != spec.far_future_epoch { + if state.get_validator(index)?.exit_epoch != spec.far_future_epoch { return Ok(()); } // Ensure the exit cache is built. - state.exit_cache.build(&state.validators, spec)?; + state.build_exit_cache(spec)?; // Compute exit queue epoch let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec)?; let mut exit_queue_epoch = state - .exit_cache + .exit_cache() .max_epoch()? .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); - let exit_queue_churn = state.exit_cache.get_churn_at(exit_queue_epoch)?; + let exit_queue_churn = state.exit_cache().get_churn_at(exit_queue_epoch)?; if exit_queue_churn >= state.get_churn_limit(spec)? { exit_queue_epoch.safe_add_assign(1)?; } - state.exit_cache.record_validator_exit(exit_queue_epoch)?; - state.validators[index].exit_epoch = exit_queue_epoch; - state.validators[index].withdrawable_epoch = + state + .exit_cache_mut() + .record_validator_exit(exit_queue_epoch)?; + state.get_validator_mut(index)?.exit_epoch = exit_queue_epoch; + state.get_validator_mut(index)?.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; Ok(()) diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index cdcab4c48d3..334a293ed51 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -1,34 +1,40 @@ mod deposit_data_tree; +mod get_attestation_participation; mod get_attesting_indices; -mod get_base_reward; mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; +pub mod altair; +pub mod base; + pub use deposit_data_tree::DepositDataTree; +pub use get_attestation_participation::get_attestation_participation_flag_indices; pub use get_attesting_indices::get_attesting_indices; -pub use get_base_reward::get_base_reward; pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; -use safe_arith::{ArithError, SafeArith}; -use types::{BeaconState, EthSpec}; +use safe_arith::SafeArith; +use types::{BeaconState, BeaconStateError, EthSpec}; /// Increase the balance of a validator, erroring upon overflow, as per the spec. -/// -/// Spec v0.12.1 pub fn increase_balance( state: &mut BeaconState, index: usize, delta: u64, -) -> Result<(), ArithError> { - state.balances[index].safe_add_assign(delta) +) -> Result<(), BeaconStateError> { + state.get_balance_mut(index)?.safe_add_assign(delta)?; + Ok(()) } /// Decrease the balance of a validator, saturating upon overflow, as per the spec. -/// -/// Spec v0.12.1 -pub fn decrease_balance(state: &mut BeaconState, index: usize, delta: u64) { - state.balances[index] = state.balances[index].saturating_sub(delta); +pub fn decrease_balance( + state: &mut BeaconState, + index: usize, + delta: u64, +) -> Result<(), BeaconStateError> { + let balance = state.get_balance_mut(index)?; + *balance = balance.saturating_sub(delta); + Ok(()) } diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index a5e5604e8ff..7643043bab2 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -1,55 +1,61 @@ use crate::common::{decrease_balance, increase_balance, initiate_validator_exit}; use safe_arith::SafeArith; use std::cmp; -use types::{BeaconStateError as Error, *}; +use types::{ + consts::altair::{PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, + BeaconStateError as Error, *, +}; -/// Slash the validator with index ``index``. -/// -/// Spec v0.12.1 +/// Slash the validator with index `slashed_index`. pub fn slash_validator( state: &mut BeaconState, slashed_index: usize, opt_whistleblower_index: Option, spec: &ChainSpec, ) -> Result<(), Error> { - if slashed_index >= state.validators.len() || slashed_index >= state.balances.len() { - return Err(BeaconStateError::UnknownValidator(slashed_index as u64)); - } - let epoch = state.current_epoch(); initiate_validator_exit(state, slashed_index, spec)?; - state.validators[slashed_index].slashed = true; - state.validators[slashed_index].withdrawable_epoch = cmp::max( - state.validators[slashed_index].withdrawable_epoch, + let validator = state.get_validator_mut(slashed_index)?; + validator.slashed = true; + validator.withdrawable_epoch = cmp::max( + validator.withdrawable_epoch, epoch.safe_add(T::EpochsPerSlashingsVector::to_u64())?, ); - let validator_effective_balance = state.get_effective_balance(slashed_index, spec)?; + let validator_effective_balance = validator.effective_balance; state.set_slashings( epoch, state .get_slashings(epoch)? .safe_add(validator_effective_balance)?, )?; + + let min_slashing_penalty_quotient = match state { + BeaconState::Base(_) => spec.min_slashing_penalty_quotient, + BeaconState::Altair(_) => spec.min_slashing_penalty_quotient_altair, + }; decrease_balance( state, slashed_index, - validator_effective_balance.safe_div(spec.min_slashing_penalty_quotient)?, - ); + validator_effective_balance.safe_div(min_slashing_penalty_quotient)?, + )?; // Apply proposer and whistleblower rewards - let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?; + let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)?; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); let whistleblower_reward = validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; - let proposer_reward = whistleblower_reward.safe_div(spec.proposer_reward_quotient)?; + let proposer_reward = match state { + BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, + BeaconState::Altair(_) => whistleblower_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)?, + }; // Ensure the whistleblower index is in the validator registry. - if state.validators.get(whistleblower_index).is_none() { - return Err(BeaconStateError::UnknownValidator( - whistleblower_index as u64, - )); + if state.validators().get(whistleblower_index).is_none() { + return Err(BeaconStateError::UnknownValidator(whistleblower_index)); } increase_balance(state, proposer_index, proposer_reward)?; diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 0f4369a86f3..bbc24534081 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -1,14 +1,14 @@ -use super::per_block_processing::{errors::BlockProcessingError, process_deposit}; +use super::per_block_processing::{ + errors::BlockProcessingError, process_operations::process_deposit, +}; use crate::common::DepositDataTree; +use crate::upgrade::upgrade_to_altair; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; use types::*; /// Initialize a `BeaconState` from genesis data. -/// -/// Spec v0.12.1 -// TODO: this is quite inefficient and we probably want to rethink how we do this pub fn initialize_beacon_state_from_eth1( eth1_block_hash: Hash256, eth1_timestamp: u64, @@ -33,42 +33,53 @@ pub fn initialize_beacon_state_from_eth1( deposit_tree .push_leaf(deposit.data.tree_hash_root()) .map_err(BlockProcessingError::MerkleTreeError)?; - state.eth1_data.deposit_root = deposit_tree.root(); + state.eth1_data_mut().deposit_root = deposit_tree.root(); process_deposit(&mut state, &deposit, spec, true)?; } process_activations(&mut state, spec)?; + // To support testnets with Altair enabled from genesis, perform a possible state upgrade here. + // This must happen *after* deposits and activations are processed or the calculation of sync + // committees during the upgrade will fail. It's a bit cheeky to do this instead of having + // separate Altair genesis initialization logic, but it turns out that our + // use of `BeaconBlock::empty` in `BeaconState::new` is sufficient to correctly initialise + // the `latest_block_header` as per: + // https://github.com/ethereum/eth2.0-specs/pull/2323 + if spec.fork_name_at_epoch(state.current_epoch()) == ForkName::Altair { + upgrade_to_altair(&mut state, spec)?; + } + // Now that we have our validators, initialize the caches (including the committees) state.build_all_caches(spec)?; // Set genesis validators root for domain separation and chain versioning - state.genesis_validators_root = state.update_validators_tree_hash_cache()?; + *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?; Ok(state) } /// Determine whether a candidate genesis state is suitable for starting the chain. -/// -/// Spec v0.12.1 pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSpec) -> bool { state .get_active_validator_indices(T::genesis_epoch(), spec) .map_or(false, |active_validators| { - state.genesis_time >= spec.min_genesis_time + state.genesis_time() >= spec.min_genesis_time && active_validators.len() as u64 >= spec.min_genesis_active_validator_count }) } /// Activate genesis validators, if their balance is acceptable. -/// -/// Spec v0.12.1 pub fn process_activations( state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { - for (index, validator) in state.validators.iter_mut().enumerate() { - let balance = state.balances[index]; + let (validators, balances) = state.validators_and_balances_mut(); + for (index, validator) in validators.iter_mut().enumerate() { + let balance = balances + .get(index) + .copied() + .ok_or(Error::BalancesOutOfBounds(index))?; validator.effective_balance = std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index b0a75528110..91959cd866b 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -1,5 +1,16 @@ -#![deny(clippy::integer_arithmetic)] -#![deny(clippy::disallowed_method)] +// Clippy lint set-up (disabled in tests) +#![cfg_attr( + not(test), + deny( + clippy::integer_arithmetic, + clippy::disallowed_method, + clippy::indexing_slicing, + clippy::unwrap_used, + clippy::expect_used, + clippy::panic, + clippy::let_underscore_must_use + ) +)] #[macro_use] mod macros; @@ -10,7 +21,7 @@ pub mod per_block_processing; pub mod per_epoch_processing; pub mod per_slot_processing; pub mod state_advance; -pub mod test_utils; +pub mod upgrade; pub mod verify_operation; pub use genesis::{ @@ -21,6 +32,8 @@ pub use per_block_processing::{ block_signature_verifier, errors::BlockProcessingError, per_block_processing, signature_sets, BlockSignatureStrategy, BlockSignatureVerifier, VerifySignatures, }; -pub use per_epoch_processing::{errors::EpochProcessingError, per_epoch_processing}; +pub use per_epoch_processing::{ + errors::EpochProcessingError, process_epoch as per_epoch_processing, +}; pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError}; pub use verify_operation::{SigVerifiedOp, VerifyOperation}; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index d1db65d26f0..41f85a88957 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -1,5 +1,4 @@ -use crate::common::{increase_balance, initiate_validator_exit, slash_validator}; -use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid, IntoWithIndex}; +use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid}; use rayon::prelude::*; use safe_arith::{ArithError, SafeArith}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; @@ -10,8 +9,10 @@ pub use self::verify_attester_slashing::{ get_slashable_indices, get_slashable_indices_modular, verify_attester_slashing, }; pub use self::verify_proposer_slashing::verify_proposer_slashing; +pub use altair::sync_committee::process_sync_aggregate; pub use block_signature_verifier::BlockSignatureVerifier; pub use is_valid_indexed_attestation::is_valid_indexed_attestation; +pub use process_operations::process_operations; pub use verify_attestation::{ verify_attestation_for_block_inclusion, verify_attestation_for_state, }; @@ -20,10 +21,11 @@ pub use verify_deposit::{ }; pub use verify_exit::{verify_exit, verify_exit_time_independent_only}; -pub mod block_processing_builder; +pub mod altair; pub mod block_signature_verifier; pub mod errors; mod is_valid_indexed_attestation; +pub mod process_operations; pub mod signature_sets; pub mod tests; mod verify_attestation; @@ -74,16 +76,25 @@ impl VerifySignatures { /// re-calculating the root when it is already known. Note `block_root` should be equal to the /// tree hash root of the block, NOT the signing root of the block. This function takes /// care of mixing in the domain. -/// -/// Spec v0.12.1 pub fn per_block_processing( - mut state: &mut BeaconState, + state: &mut BeaconState, signed_block: &SignedBeaconBlock, block_root: Option, block_signature_strategy: BlockSignatureStrategy, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - let block = &signed_block.message; + let block = signed_block.message(); + + // Verify that the `SignedBeaconBlock` instantiation matches the fork at `signed_block.slot()`. + signed_block + .fork_name(spec) + .map_err(BlockProcessingError::InconsistentBlockFork)?; + + // Verify that the `BeaconState` instantiation matches the fork at `state.slot()`. + state + .fork_name(spec) + .map_err(BlockProcessingError::InconsistentStateFork)?; + let verify_signatures = match block_signature_strategy { BlockSignatureStrategy::VerifyBulk => { // Verify all signatures in the block at once. @@ -104,70 +115,51 @@ pub fn per_block_processing( BlockSignatureStrategy::NoVerification => VerifySignatures::False, }; - process_block_header(state, block, spec)?; + let proposer_index = process_block_header(state, block, spec)?; if verify_signatures.is_true() { - verify_block_signature(&state, signed_block, block_root, &spec)?; + verify_block_signature(state, signed_block, block_root, spec)?; } // Ensure the current and previous epoch caches are built. state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; - process_randao(&mut state, &block, verify_signatures, &spec)?; - process_eth1_data(&mut state, &block.body.eth1_data)?; - process_proposer_slashings( - &mut state, - &block.body.proposer_slashings, - verify_signatures, - spec, - )?; - process_attester_slashings( - &mut state, - &block.body.attester_slashings, - verify_signatures, - spec, - )?; - process_attestations( - &mut state, - &block.body.attestations, - verify_signatures, - spec, - )?; - process_deposits(&mut state, &block.body.deposits, spec)?; - process_exits( - &mut state, - &block.body.voluntary_exits, - verify_signatures, - spec, - )?; + process_randao(state, block, verify_signatures, spec)?; + process_eth1_data(state, block.body().eth1_data())?; + process_operations(state, block.body(), verify_signatures, spec)?; + + if let BeaconBlockRef::Altair(inner) = block { + process_sync_aggregate(state, &inner.body.sync_aggregate, proposer_index, spec)?; + } Ok(()) } -/// Processes the block header. -/// -/// Spec v0.12.1 +/// Processes the block header, returning the proposer index. pub fn process_block_header( state: &mut BeaconState, - block: &BeaconBlock, + block: BeaconBlockRef<'_, T>, spec: &ChainSpec, -) -> Result<(), BlockOperationError> { +) -> Result> { // Verify that the slots match - verify!(block.slot == state.slot, HeaderInvalid::StateSlotMismatch); + verify!( + block.slot() == state.slot(), + HeaderInvalid::StateSlotMismatch + ); // Verify that the block is newer than the latest block header verify!( - block.slot > state.latest_block_header.slot, + block.slot() > state.latest_block_header().slot, HeaderInvalid::OlderThanLatestBlockHeader { - block_slot: block.slot, - latest_block_header_slot: state.latest_block_header.slot, + block_slot: block.slot(), + latest_block_header_slot: state.latest_block_header().slot, } ); // Verify that proposer index is the correct index - let proposer_index = block.proposer_index as usize; - let state_proposer_index = state.get_beacon_proposer_index(block.slot, spec)?; + let proposer_index = block.proposer_index() as usize; + let state_proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; verify!( proposer_index == state_proposer_index, HeaderInvalid::ProposerIndexMismatch { @@ -176,25 +168,24 @@ pub fn process_block_header( } ); - let expected_previous_block_root = state.latest_block_header.tree_hash_root(); + let expected_previous_block_root = state.latest_block_header().tree_hash_root(); verify!( - block.parent_root == expected_previous_block_root, + block.parent_root() == expected_previous_block_root, HeaderInvalid::ParentBlockRootMismatch { state: expected_previous_block_root, - block: block.parent_root, + block: block.parent_root(), } ); - state.latest_block_header = block.temporary_block_header(); + *state.latest_block_header_mut() = block.temporary_block_header(); // Verify proposer is not slashed - let proposer = &state.validators[proposer_index]; verify!( - !proposer.slashed, + !state.get_validator(proposer_index)?.slashed, HeaderInvalid::ProposerSlashed(proposer_index) ); - Ok(()) + Ok(block.proposer_index()) } /// Verifies the signature of a block. @@ -223,11 +214,9 @@ pub fn verify_block_signature( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. -/// -/// Spec v0.12.1 pub fn process_randao( state: &mut BeaconState, - block: &BeaconBlock, + block: BeaconBlockRef<'_, T>, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -240,37 +229,33 @@ pub fn process_randao( } // Update the current epoch RANDAO mix. - state.update_randao_mix(state.current_epoch(), &block.body.randao_reveal)?; + state.update_randao_mix(state.current_epoch(), block.body().randao_reveal())?; Ok(()) } /// Update the `state.eth1_data_votes` based upon the `eth1_data` provided. -/// -/// Spec v0.12.1 pub fn process_eth1_data( state: &mut BeaconState, eth1_data: &Eth1Data, ) -> Result<(), Error> { if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data)? { - state.eth1_data = new_eth1_data; + *state.eth1_data_mut() = new_eth1_data; } - state.eth1_data_votes.push(eth1_data.clone())?; + state.eth1_data_votes_mut().push(eth1_data.clone())?; Ok(()) } /// Returns `Ok(Some(eth1_data))` if adding the given `eth1_data` to `state.eth1_data_votes` would /// result in a change to `state.eth1_data`. -/// -/// Spec v0.12.1 pub fn get_new_eth1_data( state: &BeaconState, eth1_data: &Eth1Data, ) -> Result, ArithError> { let num_votes = state - .eth1_data_votes + .eth1_data_votes() .iter() .filter(|vote| *vote == eth1_data) .count(); @@ -282,226 +267,3 @@ pub fn get_new_eth1_data( Ok(None) } } - -/// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object. -/// -/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns -/// an `Err` describing the invalid object or cause of failure. -/// -/// Spec v0.12.1 -pub fn process_proposer_slashings( - state: &mut BeaconState, - proposer_slashings: &[ProposerSlashing], - verify_signatures: VerifySignatures, - spec: &ChainSpec, -) -> Result<(), BlockProcessingError> { - // Verify and apply proposer slashings in series. - // We have to verify in series because an invalid block may contain multiple slashings - // for the same validator, and we need to correctly detect and reject that. - proposer_slashings - .iter() - .enumerate() - .try_for_each(|(i, proposer_slashing)| { - verify_proposer_slashing(proposer_slashing, &state, verify_signatures, spec) - .map_err(|e| e.into_with_index(i))?; - - slash_validator( - state, - proposer_slashing.signed_header_1.message.proposer_index as usize, - None, - spec, - )?; - - Ok(()) - }) -} - -/// Validates each `AttesterSlashing` and updates the state, short-circuiting on an invalid object. -/// -/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns -/// an `Err` describing the invalid object or cause of failure. -/// -/// Spec v0.12.1 -pub fn process_attester_slashings( - state: &mut BeaconState, - attester_slashings: &[AttesterSlashing], - verify_signatures: VerifySignatures, - spec: &ChainSpec, -) -> Result<(), BlockProcessingError> { - for (i, attester_slashing) in attester_slashings.iter().enumerate() { - verify_attester_slashing(&state, &attester_slashing, verify_signatures, spec) - .map_err(|e| e.into_with_index(i))?; - - let slashable_indices = - get_slashable_indices(&state, &attester_slashing).map_err(|e| e.into_with_index(i))?; - - for i in slashable_indices { - slash_validator(state, i as usize, None, spec)?; - } - } - - Ok(()) -} - -/// Validates each `Attestation` and updates the state, short-circuiting on an invalid object. -/// -/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns -/// an `Err` describing the invalid object or cause of failure. -/// -/// Spec v0.12.1 -pub fn process_attestations( - state: &mut BeaconState, - attestations: &[Attestation], - verify_signatures: VerifySignatures, - spec: &ChainSpec, -) -> Result<(), BlockProcessingError> { - // Ensure the previous epoch cache exists. - state.build_committee_cache(RelativeEpoch::Previous, spec)?; - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec)? as u64; - - // Verify and apply each attestation. - for (i, attestation) in attestations.iter().enumerate() { - verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) - .map_err(|e| e.into_with_index(i))?; - - let pending_attestation = PendingAttestation { - aggregation_bits: attestation.aggregation_bits.clone(), - data: attestation.data.clone(), - inclusion_delay: state.slot.safe_sub(attestation.data.slot)?.as_u64(), - proposer_index, - }; - - if attestation.data.target.epoch == state.current_epoch() { - state.current_epoch_attestations.push(pending_attestation)?; - } else { - state - .previous_epoch_attestations - .push(pending_attestation)?; - } - } - - Ok(()) -} - -/// Validates each `Deposit` and updates the state, short-circuiting on an invalid object. -/// -/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns -/// an `Err` describing the invalid object or cause of failure. -/// -/// Spec v0.12.1 -pub fn process_deposits( - state: &mut BeaconState, - deposits: &[Deposit], - spec: &ChainSpec, -) -> Result<(), BlockProcessingError> { - let expected_deposit_len = std::cmp::min( - T::MaxDeposits::to_u64(), - state.get_outstanding_deposit_len()?, - ); - block_verify!( - deposits.len() as u64 == expected_deposit_len, - BlockProcessingError::DepositCountInvalid { - expected: expected_deposit_len as usize, - found: deposits.len(), - } - ); - - // Verify merkle proofs in parallel. - deposits - .par_iter() - .enumerate() - .try_for_each(|(i, deposit)| { - verify_deposit_merkle_proof( - state, - deposit, - state.eth1_deposit_index.safe_add(i as u64)?, - spec, - ) - .map_err(|e| e.into_with_index(i)) - })?; - - // Update the state in series. - for deposit in deposits { - process_deposit(state, deposit, spec, false)?; - } - - Ok(()) -} - -/// Process a single deposit, optionally verifying its merkle proof. -/// -/// Spec v0.12.1 -pub fn process_deposit( - state: &mut BeaconState, - deposit: &Deposit, - spec: &ChainSpec, - verify_merkle_proof: bool, -) -> Result<(), BlockProcessingError> { - let deposit_index = state.eth1_deposit_index as usize; - if verify_merkle_proof { - verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index, spec) - .map_err(|e| e.into_with_index(deposit_index))?; - } - - state.eth1_deposit_index.safe_add_assign(1)?; - - // Get an `Option` where `u64` is the validator index if this deposit public key - // already exists in the beacon_state. - let validator_index = get_existing_validator_index(state, &deposit.data.pubkey) - .map_err(|e| e.into_with_index(deposit_index))?; - - let amount = deposit.data.amount; - - if let Some(index) = validator_index { - // Update the existing validator balance. - increase_balance(state, index as usize, amount)?; - } else { - // The signature should be checked for new validators. Return early for a bad - // signature. - if verify_deposit_signature(&deposit.data, spec).is_err() { - return Ok(()); - } - - // Create a new validator. - let validator = Validator { - pubkey: deposit.data.pubkey, - withdrawal_credentials: deposit.data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), - slashed: false, - }; - state.validators.push(validator)?; - state.balances.push(deposit.data.amount)?; - } - - Ok(()) -} - -/// Validates each `Exit` and updates the state, short-circuiting on an invalid object. -/// -/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns -/// an `Err` describing the invalid object or cause of failure. -/// -/// Spec v0.12.1 -pub fn process_exits( - state: &mut BeaconState, - voluntary_exits: &[SignedVoluntaryExit], - verify_signatures: VerifySignatures, - spec: &ChainSpec, -) -> Result<(), BlockProcessingError> { - // Verify and apply each exit in series. We iterate in series because higher-index exits may - // become invalid due to the application of lower-index ones. - for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(&state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?; - - initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; - } - Ok(()) -} diff --git a/consensus/state_processing/src/per_block_processing/altair.rs b/consensus/state_processing/src/per_block_processing/altair.rs new file mode 100644 index 00000000000..1f649b71645 --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/altair.rs @@ -0,0 +1 @@ +pub mod sync_committee; diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs new file mode 100644 index 00000000000..7c8714386c3 --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -0,0 +1,86 @@ +use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance}; +use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; +use safe_arith::SafeArith; +use tree_hash::TreeHash; +use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR}; +use types::{BeaconState, ChainSpec, Domain, EthSpec, SigningData, SyncAggregate, Unsigned}; + +pub fn process_sync_aggregate( + state: &mut BeaconState, + aggregate: &SyncAggregate, + proposer_index: u64, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + // Verify sync committee aggregate signature signing over the previous slot block root + let previous_slot = state.slot().saturating_sub(1u64); + + let current_sync_committee = state.current_sync_committee()?.clone(); + let committee_pubkeys = ¤t_sync_committee.pubkeys; + + let participant_pubkeys = committee_pubkeys + .iter() + .zip(aggregate.sync_committee_bits.iter()) + .flat_map(|(pubkey, bit)| { + if bit { + // FIXME(altair): accelerate pubkey decompression with a cache + Some(pubkey.decompress()) + } else { + None + } + }) + .collect::, _>>() + .map_err(|_| SyncAggregateInvalid::PubkeyInvalid)?; + + let domain = spec.get_domain( + previous_slot.epoch(T::slots_per_epoch()), + Domain::SyncCommittee, + &state.fork(), + state.genesis_validators_root(), + ); + + let signing_root = SigningData { + object_root: *state.get_block_root(previous_slot)?, + domain, + } + .tree_hash_root(); + + let pubkey_refs = participant_pubkeys.iter().collect::>(); + if !aggregate + .sync_committee_signature + .eth2_fast_aggregate_verify(signing_root, &pubkey_refs) + { + return Err(SyncAggregateInvalid::SignatureInvalid.into()); + } + + // Compute participant and proposer rewards + let total_active_balance = state.get_total_active_balance(spec)?; + let total_active_increments = + total_active_balance.safe_div(spec.effective_balance_increment)?; + let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? + .safe_mul(total_active_increments)?; + let max_participant_rewards = total_base_rewards + .safe_mul(SYNC_REWARD_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR)? + .safe_div(T::slots_per_epoch())?; + let participant_reward = max_participant_rewards.safe_div(T::SyncCommitteeSize::to_u64())?; + let proposer_reward = participant_reward + .safe_mul(PROPOSER_WEIGHT)? + .safe_div(WEIGHT_DENOMINATOR.safe_sub(PROPOSER_WEIGHT)?)?; + + // Apply participant and proposer rewards + let committee_indices = state.get_sync_committee_indices(¤t_sync_committee)?; + + for (participant_index, participation_bit) in committee_indices + .into_iter() + .zip(aggregate.sync_committee_bits.iter()) + { + if participation_bit { + increase_balance(state, participant_index as usize, participant_reward)?; + increase_balance(state, proposer_index as usize, proposer_reward)?; + } else { + decrease_balance(state, participant_index as usize, participant_reward)?; + } + } + + Ok(()) +} diff --git a/consensus/state_processing/src/per_block_processing/block_processing_builder.rs b/consensus/state_processing/src/per_block_processing/block_processing_builder.rs deleted file mode 100644 index 8ef2a18d22a..00000000000 --- a/consensus/state_processing/src/per_block_processing/block_processing_builder.rs +++ /dev/null @@ -1,380 +0,0 @@ -use tree_hash::TreeHash; -use types::test_utils::{ - AttestationTestTask, AttesterSlashingTestTask, DepositTestTask, ProposerSlashingTestTask, - TestingAttestationDataBuilder, TestingBeaconBlockBuilder, TestingBeaconStateBuilder, -}; -use types::*; - -pub struct BlockProcessingBuilder<'a, T: EthSpec> { - pub state: BeaconState, - pub keypairs: Vec, - pub block_builder: TestingBeaconBlockBuilder, - pub spec: &'a ChainSpec, -} - -impl<'a, T: EthSpec> BlockProcessingBuilder<'a, T> { - pub fn new(num_validators: usize, state_slot: Slot, spec: &'a ChainSpec) -> Self { - let mut state_builder = - TestingBeaconStateBuilder::from_deterministic_keypairs(num_validators, &spec); - state_builder.teleport_to_slot(state_slot); - let (state, keypairs) = state_builder.build(); - let block_builder = TestingBeaconBlockBuilder::new(spec); - - Self { - state, - keypairs, - block_builder, - spec, - } - } - - pub fn build_caches(mut self) -> Self { - self.state - .build_all_caches(self.spec) - .expect("caches build OK"); - self - } - - pub fn build_with_n_deposits( - mut self, - num_deposits: u64, - test_task: DepositTestTask, - randao_sk: Option, - previous_block_root: Option, - spec: &ChainSpec, - ) -> (SignedBeaconBlock, BeaconState) { - let (mut state, keypairs) = (self.state, self.keypairs); - - let builder = &mut self.block_builder; - - builder.set_slot(state.slot); - - match previous_block_root { - Some(root) => builder.set_parent_root(root), - None => builder.set_parent_root(state.latest_block_header.tree_hash_root()), - } - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - let keypair = &keypairs[proposer_index]; - - builder.set_proposer_index(proposer_index as u64); - - match randao_sk { - Some(sk) => { - builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec) - } - None => builder.set_randao_reveal( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ), - } - - self.block_builder.insert_deposits( - spec.max_effective_balance, - test_task, - 1, - num_deposits, - &mut state, - spec, - ); - - let block = self.block_builder.build( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ); - - (block, state) - } - - /// Insert a signed `VoluntaryIndex` for the given validator at the given `exit_epoch`. - pub fn insert_exit(mut self, validator_index: u64, exit_epoch: Epoch) -> Self { - self.block_builder.insert_exit( - validator_index, - exit_epoch, - &self.keypairs[validator_index as usize].sk, - &self.state, - self.spec, - ); - self - } - - /// Insert an attestation for the given slot and index. - /// - /// It will be signed by all validators for which `should_sign` returns `true` - /// when called with `(committee_position, validator_index)`. - // TODO: consider using this pattern to replace the TestingAttestationBuilder - pub fn insert_attestation( - mut self, - slot: Slot, - index: u64, - mut should_sign: impl FnMut(usize, usize) -> bool, - ) -> Self { - let committee = self.state.get_beacon_committee(slot, index).unwrap(); - let data = TestingAttestationDataBuilder::new( - AttestationTestTask::Valid, - &self.state, - index, - slot, - self.spec, - ) - .build(); - - let mut attestation = Attestation { - aggregation_bits: BitList::with_capacity(committee.committee.len()).unwrap(), - data, - signature: AggregateSignature::empty(), - }; - - for (i, &validator_index) in committee.committee.iter().enumerate() { - if should_sign(i, validator_index) { - attestation - .sign( - &self.keypairs[validator_index].sk, - i, - &self.state.fork, - self.state.genesis_validators_root, - self.spec, - ) - .unwrap(); - } - } - - self.block_builder - .block - .body - .attestations - .push(attestation) - .unwrap(); - - self - } - - /// Apply a mutation to the `BeaconBlock` before signing. - pub fn modify(mut self, f: impl FnOnce(&mut BeaconBlock)) -> Self { - self.block_builder.modify(f); - self - } - - pub fn build_with_n_attestations( - mut self, - test_task: AttestationTestTask, - num_attestations: u64, - randao_sk: Option, - previous_block_root: Option, - spec: &ChainSpec, - ) -> (SignedBeaconBlock, BeaconState) { - let (state, keypairs) = (self.state, self.keypairs); - let builder = &mut self.block_builder; - - builder.set_slot(state.slot); - - match previous_block_root { - Some(root) => builder.set_parent_root(root), - None => builder.set_parent_root(state.latest_block_header.tree_hash_root()), - } - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - let keypair = &keypairs[proposer_index]; - - builder.set_proposer_index(proposer_index as u64); - - match randao_sk { - Some(sk) => { - builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec) - } - None => builder.set_randao_reveal( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ), - } - - let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); - self.block_builder - .insert_attestations( - test_task, - &state, - &all_secret_keys, - num_attestations as usize, - spec, - ) - .unwrap(); - let block = self.block_builder.build( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ); - - (block, state) - } - - pub fn build_with_attester_slashing( - mut self, - test_task: AttesterSlashingTestTask, - num_attester_slashings: u64, - randao_sk: Option, - previous_block_root: Option, - spec: &ChainSpec, - ) -> (SignedBeaconBlock, BeaconState) { - let (state, keypairs) = (self.state, self.keypairs); - let builder = &mut self.block_builder; - - builder.set_slot(state.slot); - - match previous_block_root { - Some(root) => builder.set_parent_root(root), - None => builder.set_parent_root(state.latest_block_header.tree_hash_root()), - } - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - let keypair = &keypairs[proposer_index]; - - builder.set_proposer_index(proposer_index as u64); - - match randao_sk { - Some(sk) => { - builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec) - } - None => builder.set_randao_reveal( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ), - } - - let mut validator_indices = vec![]; - let mut secret_keys = vec![]; - for i in 0..num_attester_slashings { - validator_indices.push(i); - secret_keys.push(&keypairs[i as usize].sk); - } - - for _ in 0..num_attester_slashings { - self.block_builder.insert_attester_slashing( - test_task, - &validator_indices, - &secret_keys, - &state.fork, - state.genesis_validators_root, - spec, - ); - } - let block = self.block_builder.build( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ); - - (block, state) - } - - pub fn build_with_proposer_slashing( - mut self, - test_task: ProposerSlashingTestTask, - num_proposer_slashings: u64, - randao_sk: Option, - previous_block_root: Option, - spec: &ChainSpec, - ) -> (SignedBeaconBlock, BeaconState) { - let (state, keypairs) = (self.state, self.keypairs); - let builder = &mut self.block_builder; - - builder.set_slot(state.slot); - - match previous_block_root { - Some(root) => builder.set_parent_root(root), - None => builder.set_parent_root(state.latest_block_header.tree_hash_root()), - } - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - let keypair = &keypairs[proposer_index]; - - builder.set_proposer_index(proposer_index as u64); - - match randao_sk { - Some(sk) => { - builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec) - } - None => builder.set_randao_reveal( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ), - } - - for i in 0..num_proposer_slashings { - let validator_indices = i; - let secret_keys = &keypairs[i as usize].sk; - self.block_builder.insert_proposer_slashing( - test_task, - validator_indices, - &secret_keys, - &state.fork, - state.genesis_validators_root, - spec, - ); - } - let block = self.block_builder.build( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ); - - (block, state) - } - - // NOTE: could remove optional args - // NOTE: could return keypairs as well - pub fn build( - mut self, - randao_sk: Option, - previous_block_root: Option, - ) -> (SignedBeaconBlock, BeaconState) { - let (state, keypairs) = (self.state, self.keypairs); - let spec = self.spec; - let builder = &mut self.block_builder; - - builder.set_slot(state.slot); - - match previous_block_root { - Some(root) => builder.set_parent_root(root), - None => builder.set_parent_root(state.latest_block_header.tree_hash_root()), - } - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - let keypair = &keypairs[proposer_index]; - - builder.set_proposer_index(proposer_index as u64); - - match randao_sk { - Some(sk) => { - builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec) - } - None => builder.set_randao_reveal( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ), - } - - let block = self.block_builder.build( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ); - - (block, state) - } -} diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 63c4fb7f4c3..3a1f6002c3b 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -194,7 +194,7 @@ where let set = randao_signature_set( self.state, self.get_pubkey.clone(), - &block.message, + block.message(), self.spec, )?; self.sets.push(set); @@ -204,12 +204,12 @@ where /// Includes all signatures in `self.block.body.proposer_slashings` for verification. pub fn include_proposer_slashings(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { self.sets - .reserve(block.message.body.proposer_slashings.len() * 2); + .reserve(block.message().body().proposer_slashings().len() * 2); block - .message - .body - .proposer_slashings + .message() + .body() + .proposer_slashings() .iter() .try_for_each(|proposer_slashing| { let (set_1, set_2) = proposer_slashing_signature_set( @@ -229,12 +229,12 @@ where /// Includes all signatures in `self.block.body.attester_slashings` for verification. pub fn include_attester_slashings(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { self.sets - .reserve(block.message.body.attester_slashings.len() * 2); + .reserve(block.message().body().attester_slashings().len() * 2); block - .message - .body - .attester_slashings + .message() + .body() + .attester_slashings() .iter() .try_for_each(|attester_slashing| { let (set_1, set_2) = attester_slashing_signature_sets( @@ -256,15 +256,16 @@ where &mut self, block: &'a SignedBeaconBlock, ) -> Result>> { - self.sets.reserve(block.message.body.attestations.len()); + self.sets + .reserve(block.message().body().attestations().len()); block - .message - .body - .attestations + .message() + .body() + .attestations() .iter() .try_fold( - Vec::with_capacity(block.message.body.attestations.len()), + Vec::with_capacity(block.message().body().attestations().len()), |mut vec, attestation| { let committee = self .state @@ -290,12 +291,13 @@ where /// Includes all signatures in `self.block.body.voluntary_exits` for verification. pub fn include_exits(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { - self.sets.reserve(block.message.body.voluntary_exits.len()); + self.sets + .reserve(block.message().body().voluntary_exits().len()); block - .message - .body - .voluntary_exits + .message() + .body() + .voluntary_exits() .iter() .try_for_each(|exit| { let exit = diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 81f06a52621..4ebf2a644a8 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -11,6 +11,8 @@ use types::*; /// (e.g., when processing attestations instead of when processing deposits). #[derive(Debug, PartialEq, Clone)] pub enum BlockProcessingError { + /// Logic error indicating that the wrong state type was provided. + IncorrectStateType, RandaoSignatureInvalid, BulkSignatureVerificationFailed, StateRootMismatch, @@ -45,11 +47,16 @@ pub enum BlockProcessingError { index: usize, reason: ExitInvalid, }, + SyncAggregateInvalid { + reason: SyncAggregateInvalid, + }, BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), MerkleTreeError(MerkleTreeError), ArithError(ArithError), + InconsistentBlockFork(InconsistentFork), + InconsistentStateFork(InconsistentFork), } impl From for BlockProcessingError { @@ -76,6 +83,12 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(reason: SyncAggregateInvalid) -> Self { + BlockProcessingError::SyncAggregateInvalid { reason } + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { @@ -339,3 +352,11 @@ pub enum ExitInvalid { /// been invalid or an internal error occurred. SignatureSetError(SignatureSetError), } + +#[derive(Debug, PartialEq, Clone)] +pub enum SyncAggregateInvalid { + /// One or more of the aggregate public keys is invalid. + PubkeyInvalid, + /// The signature is invalid. + SignatureInvalid, +} diff --git a/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs b/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs index 66e364723bd..c52abf31198 100644 --- a/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs @@ -1,6 +1,7 @@ use super::errors::{BlockOperationError, IndexedAttestationInvalid as Invalid}; use super::signature_sets::{get_pubkey_from_state, indexed_attestation_signature_set}; use crate::VerifySignatures; +use itertools::Itertools; use types::*; type Result = std::result::Result>; @@ -10,8 +11,6 @@ fn error(reason: Invalid) -> BlockOperationError { } /// Verify an `IndexedAttestation`. -/// -/// Spec v0.12.1 pub fn is_valid_indexed_attestation( state: &BeaconState, indexed_attestation: &IndexedAttestation, @@ -25,13 +24,16 @@ pub fn is_valid_indexed_attestation( // Check that indices are sorted and unique let check_sorted = |list: &[u64]| -> Result<()> { - list.windows(2).enumerate().try_for_each(|(i, pair)| { - if pair[0] < pair[1] { - Ok(()) - } else { - Err(error(Invalid::BadValidatorIndicesOrdering(i))) - } - })?; + list.iter() + .tuple_windows() + .enumerate() + .try_for_each(|(i, (x, y))| { + if x < y { + Ok(()) + } else { + Err(error(Invalid::BadValidatorIndicesOrdering(i))) + } + })?; Ok(()) }; check_sorted(indices)?; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs new file mode 100644 index 00000000000..d576396fb83 --- /dev/null +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -0,0 +1,359 @@ +use super::*; +use crate::common::{ + altair::get_base_reward, get_attestation_participation_flag_indices, increase_balance, + initiate_validator_exit, slash_validator, +}; +use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use crate::VerifySignatures; +use safe_arith::SafeArith; +use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; + +pub fn process_operations<'a, T: EthSpec>( + state: &mut BeaconState, + block_body: BeaconBlockBodyRef<'a, T>, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + process_proposer_slashings( + state, + block_body.proposer_slashings(), + verify_signatures, + spec, + )?; + process_attester_slashings( + state, + block_body.attester_slashings(), + verify_signatures, + spec, + )?; + process_attestations(state, block_body, verify_signatures, spec)?; + process_deposits(state, block_body.deposits(), spec)?; + process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?; + Ok(()) +} + +pub mod base { + use super::*; + + /// Validates each `Attestation` and updates the state, short-circuiting on an invalid object. + /// + /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns + /// an `Err` describing the invalid object or cause of failure. + pub fn process_attestations( + state: &mut BeaconState, + attestations: &[Attestation], + verify_signatures: VerifySignatures, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + // Ensure the previous epoch cache exists. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + + let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; + + // Verify and apply each attestation. + for (i, attestation) in attestations.iter().enumerate() { + verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; + + let pending_attestation = PendingAttestation { + aggregation_bits: attestation.aggregation_bits.clone(), + data: attestation.data.clone(), + inclusion_delay: state.slot().safe_sub(attestation.data.slot)?.as_u64(), + proposer_index, + }; + + if attestation.data.target.epoch == state.current_epoch() { + state + .as_base_mut()? + .current_epoch_attestations + .push(pending_attestation)?; + } else { + state + .as_base_mut()? + .previous_epoch_attestations + .push(pending_attestation)?; + } + } + + Ok(()) + } +} + +pub mod altair { + use super::*; + + pub fn process_attestations( + state: &mut BeaconState, + attestations: &[Attestation], + verify_signatures: VerifySignatures, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + attestations + .iter() + .enumerate() + .try_for_each(|(i, attestation)| { + process_attestation(state, attestation, i, verify_signatures, spec) + }) + } + + pub fn process_attestation( + state: &mut BeaconState, + attestation: &Attestation, + att_index: usize, + verify_signatures: VerifySignatures, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + + let indexed_attestation = + verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec) + .map_err(|e| e.into_with_index(att_index))?; + + // Matching roots, participation flag indices + let data = &attestation.data; + let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); + let participation_flag_indices = + get_attestation_participation_flag_indices(state, data, inclusion_delay, spec)?; + + // Update epoch participation flags. + let total_active_balance = state.get_total_active_balance(spec)?; + let mut proposer_reward_numerator = 0; + for index in &indexed_attestation.attesting_indices { + let index = *index as usize; + + for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { + let epoch_participation = state.get_epoch_participation_mut(data.target.epoch)?; + let validator_participation = epoch_participation + .get_mut(index) + .ok_or(BeaconStateError::ParticipationOutOfBounds(index))?; + + if participation_flag_indices.contains(&flag_index) + && !validator_participation.has_flag(flag_index)? + { + validator_participation.add_flag(flag_index)?; + proposer_reward_numerator.safe_add_assign( + get_base_reward(state, index, total_active_balance, spec)? + .safe_mul(weight)?, + )?; + } + } + } + + let proposer_reward_denominator = WEIGHT_DENOMINATOR + .safe_sub(PROPOSER_WEIGHT)? + .safe_mul(WEIGHT_DENOMINATOR)? + .safe_div(PROPOSER_WEIGHT)?; + let proposer_reward = proposer_reward_numerator.safe_div(proposer_reward_denominator)?; + // FIXME(altair): optimise by passing in proposer_index + let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)?; + increase_balance(state, proposer_index, proposer_reward)?; + Ok(()) + } +} + +/// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +pub fn process_proposer_slashings( + state: &mut BeaconState, + proposer_slashings: &[ProposerSlashing], + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + // Verify and apply proposer slashings in series. + // We have to verify in series because an invalid block may contain multiple slashings + // for the same validator, and we need to correctly detect and reject that. + proposer_slashings + .iter() + .enumerate() + .try_for_each(|(i, proposer_slashing)| { + verify_proposer_slashing(proposer_slashing, &state, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; + + slash_validator( + state, + proposer_slashing.signed_header_1.message.proposer_index as usize, + None, + spec, + )?; + + Ok(()) + }) +} + +/// Validates each `AttesterSlashing` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +pub fn process_attester_slashings( + state: &mut BeaconState, + attester_slashings: &[AttesterSlashing], + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for (i, attester_slashing) in attester_slashings.iter().enumerate() { + verify_attester_slashing(&state, &attester_slashing, verify_signatures, spec) + .map_err(|e| e.into_with_index(i))?; + + let slashable_indices = + get_slashable_indices(&state, &attester_slashing).map_err(|e| e.into_with_index(i))?; + + for i in slashable_indices { + slash_validator(state, i as usize, None, spec)?; + } + } + + Ok(()) +} +/// Wrapper function to handle calling the correct version of `process_attestations` based on +/// the fork. +pub fn process_attestations<'a, T: EthSpec>( + state: &mut BeaconState, + block_body: BeaconBlockBodyRef<'a, T>, + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + match block_body { + BeaconBlockBodyRef::Base(_) => { + base::process_attestations(state, block_body.attestations(), verify_signatures, spec)?; + } + BeaconBlockBodyRef::Altair(_) => { + altair::process_attestations( + state, + block_body.attestations(), + verify_signatures, + spec, + )?; + } + } + Ok(()) +} + +/// Validates each `Exit` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +pub fn process_exits( + state: &mut BeaconState, + voluntary_exits: &[SignedVoluntaryExit], + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + // Verify and apply each exit in series. We iterate in series because higher-index exits may + // become invalid due to the application of lower-index ones. + for (i, exit) in voluntary_exits.iter().enumerate() { + verify_exit(&state, exit, verify_signatures, spec).map_err(|e| e.into_with_index(i))?; + + initiate_validator_exit(state, exit.message.validator_index as usize, spec)?; + } + Ok(()) +} + +/// Validates each `Deposit` and updates the state, short-circuiting on an invalid object. +/// +/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns +/// an `Err` describing the invalid object or cause of failure. +pub fn process_deposits( + state: &mut BeaconState, + deposits: &[Deposit], + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + let expected_deposit_len = std::cmp::min( + T::MaxDeposits::to_u64(), + state.get_outstanding_deposit_len()?, + ); + block_verify!( + deposits.len() as u64 == expected_deposit_len, + BlockProcessingError::DepositCountInvalid { + expected: expected_deposit_len as usize, + found: deposits.len(), + } + ); + + // Verify merkle proofs in parallel. + deposits + .par_iter() + .enumerate() + .try_for_each(|(i, deposit)| { + verify_deposit_merkle_proof( + state, + deposit, + state.eth1_deposit_index().safe_add(i as u64)?, + spec, + ) + .map_err(|e| e.into_with_index(i)) + })?; + + // Update the state in series. + for deposit in deposits { + process_deposit(state, deposit, spec, false)?; + } + + Ok(()) +} + +/// Process a single deposit, optionally verifying its merkle proof. +pub fn process_deposit( + state: &mut BeaconState, + deposit: &Deposit, + spec: &ChainSpec, + verify_merkle_proof: bool, +) -> Result<(), BlockProcessingError> { + let deposit_index = state.eth1_deposit_index() as usize; + if verify_merkle_proof { + verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index(), spec) + .map_err(|e| e.into_with_index(deposit_index))?; + } + + state.eth1_deposit_index_mut().safe_add_assign(1)?; + + // Get an `Option` where `u64` is the validator index if this deposit public key + // already exists in the beacon_state. + let validator_index = get_existing_validator_index(state, &deposit.data.pubkey) + .map_err(|e| e.into_with_index(deposit_index))?; + + let amount = deposit.data.amount; + + if let Some(index) = validator_index { + // Update the existing validator balance. + increase_balance(state, index as usize, amount)?; + } else { + // The signature should be checked for new validators. Return early for a bad + // signature. + if verify_deposit_signature(&deposit.data, spec).is_err() { + return Ok(()); + } + + // Create a new validator. + let validator = Validator { + pubkey: deposit.data.pubkey, + withdrawal_credentials: deposit.data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: std::cmp::min( + amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, + spec.max_effective_balance, + ), + slashed: false, + }; + state.validators_mut().push(validator)?; + state.balances_mut().push(deposit.data.amount)?; + + // Altair-specific initializations. + if let BeaconState::Altair(altair_state) = state { + altair_state + .previous_epoch_participation + .push(ParticipationFlags::default())?; + altair_state + .current_epoch_participation + .push(ParticipationFlags::default())?; + altair_state.inactivity_scores.push(0)?; + } + } + + Ok(()) +} diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 6dd0c660bc8..df8513e0ed8 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -7,10 +7,10 @@ use ssz::DecodeError; use std::borrow::Cow; use tree_hash::TreeHash; use types::{ - AggregateSignature, AttesterSlashing, BeaconBlock, BeaconState, BeaconStateError, ChainSpec, - DepositData, Domain, EthSpec, Fork, Hash256, IndexedAttestation, ProposerSlashing, PublicKey, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, SignedRoot, - SignedVoluntaryExit, SigningData, + AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, + DepositData, Domain, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, + ProposerSlashing, PublicKey, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockHeader, SignedRoot, SignedVoluntaryExit, SigningData, }; pub type Result = std::result::Result; @@ -35,6 +35,8 @@ pub enum Error { /// The public key bytes stored in the `BeaconState` were not valid. This is a serious internal /// error. BadBlsBytes { validator_index: u64 }, + /// The block structure is not appropriate for the fork at `block.slot()`. + InconsistentBlockFork(InconsistentFork), } impl From for Error { @@ -52,7 +54,7 @@ where T: EthSpec, { state - .validators + .validators() .get(validator_index) .and_then(|v| { let pk: Option = v.pubkey.decompress().ok(); @@ -73,21 +75,26 @@ where T: EthSpec, F: Fn(usize) -> Option>, { - let block = &signed_block.message; - let proposer_index = state.get_beacon_proposer_index(block.slot, spec)?; + // Verify that the `SignedBeaconBlock` instantiation matches the fork at `signed_block.slot()`. + signed_block + .fork_name(spec) + .map_err(Error::InconsistentBlockFork)?; - if proposer_index as u64 != block.proposer_index { + let block = signed_block.message(); + let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; + + if proposer_index as u64 != block.proposer_index() { return Err(Error::IncorrectBlockProposer { - block: block.proposer_index, + block: block.proposer_index(), local_shuffling: proposer_index as u64, }); } let domain = spec.get_domain( - block.slot.epoch(T::slots_per_epoch()), + block.slot().epoch(T::slots_per_epoch()), Domain::BeaconProposer, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), ); let message = if let Some(root) = block_root { @@ -101,7 +108,7 @@ where }; Ok(SignatureSet::single_pubkey( - &signed_block.signature, + signed_block.signature(), get_pubkey(proposer_index).ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?, message, )) @@ -111,26 +118,29 @@ where pub fn randao_signature_set<'a, T, F>( state: &'a BeaconState, get_pubkey: F, - block: &'a BeaconBlock, + block: BeaconBlockRef<'a, T>, spec: &'a ChainSpec, ) -> Result> where T: EthSpec, F: Fn(usize) -> Option>, { - let proposer_index = state.get_beacon_proposer_index(block.slot, spec)?; + let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; let domain = spec.get_domain( - block.slot.epoch(T::slots_per_epoch()), + block.slot().epoch(T::slots_per_epoch()), Domain::Randao, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), ); - let message = block.slot.epoch(T::slots_per_epoch()).signing_root(domain); + let message = block + .slot() + .epoch(T::slots_per_epoch()) + .signing_root(domain); Ok(SignatureSet::single_pubkey( - &block.body.randao_reveal, + block.body().randao_reveal(), get_pubkey(proposer_index).ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?, message, )) @@ -177,8 +187,8 @@ fn block_header_signature_set<'a, T: EthSpec>( let domain = spec.get_domain( signed_header.message.slot.epoch(T::slots_per_epoch()), Domain::BeaconProposer, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), ); let message = signed_header.message.signing_root(domain); @@ -208,8 +218,8 @@ where let domain = spec.get_domain( indexed_attestation.data.target.epoch, Domain::BeaconAttester, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), ); let message = indexed_attestation.data.signing_root(domain); @@ -309,8 +319,8 @@ where let domain = spec.get_domain( exit.epoch, Domain::VoluntaryExit, - &state.fork, - state.genesis_validators_root, + &state.fork(), + state.genesis_validators_root(), ); let message = exit.signing_root(domain); diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 471ca011dda..63e57bddc6a 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1,25 +1,66 @@ #![cfg(all(test, not(feature = "fake_crypto")))] -use super::block_processing_builder::BlockProcessingBuilder; -use super::errors::*; -use crate::{per_block_processing, BlockSignatureStrategy}; -use types::test_utils::{ - AttestationTestTask, AttesterSlashingTestTask, DepositTestTask, ProposerSlashingTestTask, +use crate::per_block_processing; +use crate::per_block_processing::errors::{ + AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError, + DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, + ProposerSlashingInvalid, }; +use crate::{per_block_processing::process_operations, BlockSignatureStrategy, VerifySignatures}; +use beacon_chain::store::StoreConfig; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use lazy_static::lazy_static; +use ssz_types::Bitfield; +use test_utils::generate_deterministic_keypairs; use types::*; +pub const MAX_VALIDATOR_COUNT: usize = 97; pub const NUM_DEPOSITS: u64 = 1; pub const VALIDATOR_COUNT: usize = 64; pub const EPOCH_OFFSET: u64 = 4; pub const NUM_ATTESTATIONS: u64 = 1; -type E = MainnetEthSpec; +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); +} + +fn get_harness( + epoch_offset: u64, + num_validators: usize, +) -> BeaconChainHarness> { + // Set the state and block to be in the last slot of the `epoch_offset`th epoch. + let last_slot_of_epoch = + (MainnetEthSpec::genesis_epoch() + epoch_offset).end_slot(E::slots_per_epoch()); + let harness = BeaconChainHarness::new_with_store_config( + E::default(), + None, + KEYPAIRS[0..num_validators].to_vec(), + StoreConfig::default(), + ); + let state = harness.get_current_state(); + if last_slot_of_epoch > Slot::new(0) { + harness.add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..last_slot_of_epoch.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..num_validators).collect::>().as_slice(), + ); + } + harness +} #[test] fn valid_block_ok() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let (block, mut state) = builder.build(None, None); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let state = harness.get_current_state(); + + let slot = state.slot(); + let (block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); let result = per_block_processing( &mut state, @@ -29,21 +70,24 @@ fn valid_block_ok() { &spec, ); - assert_eq!(result, Ok(())); + assert!(result.is_ok()); } #[test] fn invalid_block_header_state_slot() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let (mut block, mut state) = builder.build(None, None); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - state.slot = Slot::new(133_713); - block.message.slot = Slot::new(424_242); + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot); + let (mut block, signature) = signed_block.deconstruct(); + *block.slot_mut() = slot + Slot::new(1); let result = per_block_processing( &mut state, - &block, + &SignedBeaconBlock::from_block(block, signature), None, BlockSignatureStrategy::VerifyIndividual, &spec, @@ -60,13 +104,18 @@ fn invalid_block_header_state_slot() { #[test] fn invalid_parent_block_root() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let invalid_parent_root = Hash256::from([0xAA; 32]); - let (block, mut state) = builder.build(None, Some(invalid_parent_root)); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + + let state = harness.get_current_state(); + let slot = state.slot(); + + let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (mut block, signature) = signed_block.deconstruct(); + *block.parent_root_mut() = Hash256::from([0xAA; 32]); let result = per_block_processing( &mut state, - &block, + &SignedBeaconBlock::from_block(block, signature), None, BlockSignatureStrategy::VerifyIndividual, &spec, @@ -76,8 +125,8 @@ fn invalid_parent_block_root() { result, Err(BlockProcessingError::HeaderInvalid { reason: HeaderInvalid::ParentBlockRootMismatch { - state: state.latest_block_header.canonical_root(), - block: block.parent_root() + state: state.latest_block_header().canonical_root(), + block: Hash256::from([0xAA; 32]) } }) ); @@ -86,22 +135,16 @@ fn invalid_parent_block_root() { #[test] fn invalid_block_signature() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let (block, mut state) = builder.build(None, None); - - // sign the block with a keypair that is not the expected proposer - let keypair = Keypair::random(); - let block = block.message.sign( - &keypair.sk, - &state.fork, - state.genesis_validators_root, - &spec, - ); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + + let state = harness.get_current_state(); + let slot = state.slot(); + let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (block, _) = signed_block.deconstruct(); - // process block with invalid block signature let result = per_block_processing( &mut state, - &block, + &SignedBeaconBlock::from_block(block, Signature::empty()), None, BlockSignatureStrategy::VerifyIndividual, &spec, @@ -119,15 +162,18 @@ fn invalid_block_signature() { #[test] fn invalid_randao_reveal_signature() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + + let state = harness.get_current_state(); + let slot = state.slot(); - // sign randao reveal with random keypair - let keypair = Keypair::random(); - let (block, mut state) = builder.build(Some(keypair.sk), None); + let (signed_block, mut state) = harness.make_block_with_modifier(state, slot + 1, |block| { + *block.body_mut().randao_reveal_mut() = Signature::empty(); + }); let result = per_block_processing( &mut state, - &block, + &signed_block, None, BlockSignatureStrategy::VerifyIndividual, &spec, @@ -140,18 +186,17 @@ fn invalid_randao_reveal_signature() { #[test] fn valid_4_deposits() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = DepositTestTask::Valid; + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut state = harness.get_current_state(); - let (block, mut state) = builder.build_with_n_deposits(4, test_task, None, None, &spec); + let (deposits, mut state) = harness.make_deposits(&mut state, 4, None, None); + let deposits = VariableList::from(deposits); - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + *head_block.to_mut().body_mut().deposits_mut() = deposits; + + let result = + process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); // Expecting Ok because these are valid deposits. assert_eq!(result, Ok(())); @@ -160,22 +205,19 @@ fn valid_4_deposits() { #[test] fn invalid_deposit_deposit_count_too_big() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = DepositTestTask::Valid; + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut state = harness.get_current_state(); - let (block, mut state) = - builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); + let deposits = VariableList::from(deposits); - let big_deposit_count = NUM_DEPOSITS + 1; - state.eth1_data.deposit_count = big_deposit_count; + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + *head_block.to_mut().body_mut().deposits_mut() = deposits; - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); + let big_deposit_count = NUM_DEPOSITS + 1; + state.eth1_data_mut().deposit_count = big_deposit_count; + let result = + process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); // Expecting DepositCountInvalid because we incremented the deposit_count assert_eq!( @@ -190,22 +232,19 @@ fn invalid_deposit_deposit_count_too_big() { #[test] fn invalid_deposit_count_too_small() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = DepositTestTask::Valid; + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut state = harness.get_current_state(); - let (block, mut state) = - builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); + let deposits = VariableList::from(deposits); - let small_deposit_count = NUM_DEPOSITS - 1; - state.eth1_data.deposit_count = small_deposit_count; + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + *head_block.to_mut().body_mut().deposits_mut() = deposits; - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); + let small_deposit_count = NUM_DEPOSITS - 1; + state.eth1_data_mut().deposit_count = small_deposit_count; + let result = + process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); // Expecting DepositCountInvalid because we decremented the deposit_count assert_eq!( @@ -220,24 +259,21 @@ fn invalid_deposit_count_too_small() { #[test] fn invalid_deposit_bad_merkle_proof() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = DepositTestTask::Valid; + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut state = harness.get_current_state(); - let (block, mut state) = - builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); + let deposits = VariableList::from(deposits); - let bad_index = state.eth1_deposit_index as usize; + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + *head_block.to_mut().body_mut().deposits_mut() = deposits; + let bad_index = state.eth1_deposit_index() as usize; // Manually offsetting deposit count and index to trigger bad merkle proof - state.eth1_data.deposit_count += 1; - state.eth1_deposit_index += 1; - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); + state.eth1_data_mut().deposit_count += 1; + *state.eth1_deposit_index_mut() += 1; + let result = + process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); // Expecting BadMerkleProof because the proofs were created with different indices assert_eq!( @@ -249,44 +285,21 @@ fn invalid_deposit_bad_merkle_proof() { ); } -#[test] -fn invalid_deposit_wrong_pubkey() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = DepositTestTask::BadPubKey; - - let (block, mut state) = - builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); - - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting Ok(()) even though the public key provided does not correspond to the correct public key - assert_eq!(result, Ok(())); -} - #[test] fn invalid_deposit_wrong_sig() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = DepositTestTask::BadSig; + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut state = harness.get_current_state(); - let (block, mut state) = - builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + let (deposits, mut state) = + harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); + let deposits = VariableList::from(deposits); - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + *head_block.to_mut().body_mut().deposits_mut() = deposits; + let result = + process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); // Expecting Ok(()) even though the block signature does not correspond to the correct public key assert_eq!(result, Ok(())); } @@ -294,62 +307,37 @@ fn invalid_deposit_wrong_sig() { #[test] fn invalid_deposit_invalid_pub_key() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = DepositTestTask::InvalidPubKey; - - let (block, mut state) = - builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); - - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut state = harness.get_current_state(); - // Expecting Ok(()) even though we passed in invalid publickeybytes in the public key field of the deposit data. - assert_eq!(result, Ok(())); -} + let (deposits, mut state) = + harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); + let deposits = VariableList::from(deposits); -#[test] -fn valid_attestations() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::Valid; - let (block, mut state) = - builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + *head_block.to_mut().body_mut().deposits_mut() = deposits; - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); + let result = + process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); - // Expecting Ok(()) because these are valid attestations + // Expecting Ok(()) even though we passed in invalid publickeybytes in the public key field of the deposit data. assert_eq!(result, Ok(())); } #[test] fn invalid_attestation_no_committee_for_index() { let spec = MainnetEthSpec::default_spec(); - let slot = Epoch::new(EPOCH_OFFSET).start_slot(E::slots_per_epoch()); - let builder = - get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT).insert_attestation(slot, 0, |_, _| true); - let committee_index = builder.state.get_committee_count_at_slot(slot).unwrap(); - let (block, mut state) = builder - .modify(|block| { - block.body.attestations[0].data.index = committee_index; - }) - .build(None, None); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut state = harness.get_current_state(); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + head_block.to_mut().body_mut().attestations_mut()[0] + .data + .index += 1; + let result = process_operations::process_attestations( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + head_block.body(), + VerifySignatures::True, &spec, ); @@ -366,16 +354,21 @@ fn invalid_attestation_no_committee_for_index() { #[test] fn invalid_attestation_wrong_justified_checkpoint() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::WrongJustifiedCheckpoint; - let (block, mut state) = - builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut state = harness.get_current_state(); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let old_justified_checkpoint = head_block.body().attestations()[0].data.source; + let mut new_justified_checkpoint = old_justified_checkpoint; + new_justified_checkpoint.epoch += Epoch::new(1); + head_block.to_mut().body_mut().attestations_mut()[0] + .data + .source = new_justified_checkpoint; + + let result = process_operations::process_attestations( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + head_block.body(), + VerifySignatures::True, &spec, ); @@ -386,61 +379,28 @@ fn invalid_attestation_wrong_justified_checkpoint() { Err(BlockProcessingError::AttestationInvalid { index: 0, reason: AttestationInvalid::WrongJustifiedCheckpoint { - state: Checkpoint { - epoch: Epoch::from(2_u64), - root: Hash256::zero(), - }, - attestation: Checkpoint { - epoch: Epoch::from(0_u64), - root: Hash256::zero(), - }, + state: old_justified_checkpoint, + attestation: new_justified_checkpoint, is_current: true, } }) ); } -#[test] -fn invalid_attestation_bad_indexed_attestation_bad_signature() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::BadIndexedAttestationBadSignature; - let (block, mut state) = - builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); - - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting BadIndexedAttestation(BadSignature) because we ommitted the aggregation bits in the attestation - assert_eq!( - result, - Err(BlockProcessingError::AttestationInvalid { - index: 0, - reason: AttestationInvalid::BadIndexedAttestation( - IndexedAttestationInvalid::BadSignature - ) - }) - ); -} - #[test] fn invalid_attestation_bad_aggregation_bitfield_len() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::BadAggregationBitfieldLen; - let (block, mut state) = - builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut state = harness.get_current_state(); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits = + Bitfield::with_capacity(spec.target_committee_size).unwrap(); + + let result = process_operations::process_attestations( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + head_block.body(), + VerifySignatures::True, &spec, ); @@ -456,18 +416,18 @@ fn invalid_attestation_bad_aggregation_bitfield_len() { #[test] fn invalid_attestation_bad_signature() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, 97); // minimal number of required validators for this test - let test_task = AttestationTestTask::BadSignature; - let (block, mut state) = - builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); - let result = per_block_processing( + let harness = get_harness::(EPOCH_OFFSET, 97); // minimal number of required validators for this test + + let mut state = harness.get_current_state(); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty(); + + let result = process_operations::process_attestations( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + head_block.body(), + VerifySignatures::True, &spec, ); - // Expecting BadSignature because we're signing with invalid secret_keys assert_eq!( result, @@ -483,16 +443,20 @@ fn invalid_attestation_bad_signature() { #[test] fn invalid_attestation_included_too_early() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::IncludedTooEarly; - let (block, mut state) = - builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut state = harness.get_current_state(); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let new_attesation_slot = head_block.body().attestations()[0].data.slot + + Slot::new(MainnetEthSpec::slots_per_epoch()); + head_block.to_mut().body_mut().attestations_mut()[0] + .data + .slot = new_attesation_slot; + + let result = process_operations::process_attestations( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + head_block.body(), + VerifySignatures::True, &spec, ); @@ -502,9 +466,9 @@ fn invalid_attestation_included_too_early() { Err(BlockProcessingError::AttestationInvalid { index: 0, reason: AttestationInvalid::IncludedTooEarly { - state: state.slot, + state: state.slot(), delay: spec.min_attestation_inclusion_delay, - attestation: block.message.body.attestations[0].data.slot, + attestation: new_attesation_slot, } }) ); @@ -514,26 +478,29 @@ fn invalid_attestation_included_too_early() { fn invalid_attestation_included_too_late() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::IncludedTooLate; - let (block, mut state) = - builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut state = harness.get_current_state(); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let new_attesation_slot = head_block.body().attestations()[0].data.slot + - Slot::new(MainnetEthSpec::slots_per_epoch()); + head_block.to_mut().body_mut().attestations_mut()[0] + .data + .slot = new_attesation_slot; + + let result = process_operations::process_attestations( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + head_block.body(), + VerifySignatures::True, &spec, ); - assert_eq!( result, Err(BlockProcessingError::AttestationInvalid { index: 0, reason: AttestationInvalid::IncludedTooLate { - state: state.slot, - attestation: block.message.body.attestations[0].data.slot, + state: state.slot(), + attestation: new_attesation_slot, } }) ); @@ -543,27 +510,28 @@ fn invalid_attestation_included_too_late() { fn invalid_attestation_target_epoch_slot_mismatch() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::TargetEpochSlotMismatch; - let (block, mut state) = - builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut state = harness.get_current_state(); + let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + head_block.to_mut().body_mut().attestations_mut()[0] + .data + .target + .epoch += Epoch::new(1); + + let result = process_operations::process_attestations( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + head_block.body(), + VerifySignatures::True, &spec, ); - - let attestation = &block.message.body.attestations[0].data; assert_eq!( result, Err(BlockProcessingError::AttestationInvalid { index: 0, reason: AttestationInvalid::TargetEpochSlotMismatch { - target_epoch: attestation.target.epoch, - slot_epoch: attestation.slot.epoch(E::slots_per_epoch()), + target_epoch: Epoch::new(EPOCH_OFFSET + 1), + slot_epoch: Epoch::new(EPOCH_OFFSET), } }) ); @@ -572,17 +540,15 @@ fn invalid_attestation_target_epoch_slot_mismatch() { #[test] fn valid_insert_attester_slashing() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttesterSlashingTestTask::Valid; - let num_attester_slashings = 1; - let (block, mut state) = - builder.build_with_attester_slashing(test_task, num_attester_slashings, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let attester_slashing = harness.make_attester_slashing(vec![1, 2]); + + let mut state = harness.get_current_state(); + let result = process_operations::process_attester_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[attester_slashing], + VerifySignatures::True, &spec, ); @@ -593,16 +559,16 @@ fn valid_insert_attester_slashing() { #[test] fn invalid_attester_slashing_not_slashable() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttesterSlashingTestTask::NotSlashable; - let num_attester_slashings = 1; - let (block, mut state) = - builder.build_with_attester_slashing(test_task, num_attester_slashings, None, None, &spec); - let result = per_block_processing( + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + + let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); + attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); + + let mut state = harness.get_current_state(); + let result = process_operations::process_attester_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[attester_slashing], + VerifySignatures::True, &spec, ); @@ -619,17 +585,16 @@ fn invalid_attester_slashing_not_slashable() { #[test] fn invalid_attester_slashing_1_invalid() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttesterSlashingTestTask::IndexedAttestation1Invalid; - let num_attester_slashings = 1; - let (block, mut state) = - builder.build_with_attester_slashing(test_task, num_attester_slashings, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); + attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); + + let mut state = harness.get_current_state(); + let result = process_operations::process_attester_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[attester_slashing], + VerifySignatures::True, &spec, ); @@ -649,17 +614,16 @@ fn invalid_attester_slashing_1_invalid() { #[test] fn invalid_attester_slashing_2_invalid() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = AttesterSlashingTestTask::IndexedAttestation2Invalid; - let num_attester_slashings = 1; - let (block, mut state) = - builder.build_with_attester_slashing(test_task, num_attester_slashings, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); + attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); + + let mut state = harness.get_current_state(); + let result = process_operations::process_attester_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[attester_slashing], + VerifySignatures::True, &spec, ); @@ -679,36 +643,35 @@ fn invalid_attester_slashing_2_invalid() { #[test] fn valid_insert_proposer_slashing() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = ProposerSlashingTestTask::Valid; - let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); - - let result = per_block_processing( + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let proposer_slashing = harness.make_proposer_slashing(1); + let mut state = harness.get_current_state(); + let result = process_operations::process_proposer_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[proposer_slashing], + VerifySignatures::True, &spec, ); - - // Expecting Ok(()) because we inserted a valid proposer slashing - assert_eq!(result, Ok(())); + // Expecting Ok(_) because we inserted a valid proposer slashing + assert!(result.is_ok()); } #[test] fn invalid_proposer_slashing_proposals_identical() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = ProposerSlashingTestTask::ProposalsIdentical; - let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut proposer_slashing = harness.make_proposer_slashing(1); + proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone(); + + let mut state = harness.get_current_state(); + let result = process_operations::process_proposer_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[proposer_slashing], + VerifySignatures::True, &spec, ); + // Expecting ProposalsIdentical because we the two headers are identical assert_eq!( result, @@ -722,15 +685,17 @@ fn invalid_proposer_slashing_proposals_identical() { #[test] fn invalid_proposer_slashing_proposer_unknown() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = ProposerSlashingTestTask::ProposerUnknown; - let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - let result = per_block_processing( + let mut proposer_slashing = harness.make_proposer_slashing(1); + proposer_slashing.signed_header_1.message.proposer_index = 3_141_592; + proposer_slashing.signed_header_2.message.proposer_index = 3_141_592; + + let mut state = harness.get_current_state(); + let result = process_operations::process_proposer_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[proposer_slashing], + VerifySignatures::True, &spec, ); @@ -745,63 +710,32 @@ fn invalid_proposer_slashing_proposer_unknown() { } #[test] -fn invalid_proposer_slashing_not_slashable() { +fn invalid_proposer_slashing_duplicate_slashing() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = ProposerSlashingTestTask::ProposerNotSlashable; - let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); - state.validators[0].slashed = true; - let result = per_block_processing( + let proposer_slashing = harness.make_proposer_slashing(1); + let mut state = harness.get_current_state(); + let result_1 = process_operations::process_proposer_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[proposer_slashing.clone()], + VerifySignatures::False, &spec, ); + assert!(result_1.is_ok()); - // Expecting ProposerNotSlashable because we've already slashed the validator - assert_eq!( - result, - Err(BlockProcessingError::ProposerSlashingInvalid { - index: 0, - reason: ProposerSlashingInvalid::ProposerNotSlashable(0) - }) - ); -} - -#[test] -fn invalid_proposer_slashing_duplicate_slashing() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = ProposerSlashingTestTask::Valid; - let (mut block, mut state) = - builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); - - let slashing = block.message.body.proposer_slashings[0].clone(); - let slashed_proposer = slashing.signed_header_1.message.proposer_index; - block - .message - .body - .proposer_slashings - .push(slashing) - .expect("should push slashing"); - - let result = per_block_processing( + let result_2 = process_operations::process_proposer_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::NoVerification, + &[proposer_slashing], + VerifySignatures::False, &spec, ); - - // Expecting ProposerNotSlashable for the 2nd slashing because the validator has been - // slashed by the 1st slashing. + // Expecting ProposerNotSlashable because we've already slashed the validator assert_eq!( - result, + result_2, Err(BlockProcessingError::ProposerSlashingInvalid { - index: 1, - reason: ProposerSlashingInvalid::ProposerNotSlashable(slashed_proposer) + index: 0, + reason: ProposerSlashingInvalid::ProposerNotSlashable(1) }) ); } @@ -809,15 +743,14 @@ fn invalid_proposer_slashing_duplicate_slashing() { #[test] fn invalid_bad_proposal_1_signature() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = ProposerSlashingTestTask::BadProposal1Signature; - let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); - - let result = per_block_processing( + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut proposer_slashing = harness.make_proposer_slashing(1); + proposer_slashing.signed_header_1.signature = Signature::empty(); + let mut state = harness.get_current_state(); + let result = process_operations::process_proposer_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[proposer_slashing], + VerifySignatures::True, &spec, ); @@ -834,15 +767,14 @@ fn invalid_bad_proposal_1_signature() { #[test] fn invalid_bad_proposal_2_signature() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = ProposerSlashingTestTask::BadProposal2Signature; - let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); - - let result = per_block_processing( + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut proposer_slashing = harness.make_proposer_slashing(1); + proposer_slashing.signed_header_2.signature = Signature::empty(); + let mut state = harness.get_current_state(); + let result = process_operations::process_proposer_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[proposer_slashing], + VerifySignatures::True, &spec, ); @@ -859,15 +791,15 @@ fn invalid_bad_proposal_2_signature() { #[test] fn invalid_proposer_slashing_proposal_epoch_mismatch() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, EPOCH_OFFSET, VALIDATOR_COUNT); - let test_task = ProposerSlashingTestTask::ProposalEpochMismatch; - let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); - - let result = per_block_processing( + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); + let mut proposer_slashing = harness.make_proposer_slashing(1); + proposer_slashing.signed_header_1.message.slot = Slot::new(0); + proposer_slashing.signed_header_2.message.slot = Slot::new(128); + let mut state = harness.get_current_state(); + let result = process_operations::process_proposer_slashings( &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, + &[proposer_slashing], + VerifySignatures::False, &spec, ); @@ -883,14 +815,3 @@ fn invalid_proposer_slashing_proposal_epoch_mismatch() { }) ); } - -fn get_builder( - spec: &ChainSpec, - epoch_offset: u64, - num_validators: usize, -) -> BlockProcessingBuilder { - // Set the state and block to be in the last slot of the `epoch_offset`th epoch. - let last_slot_of_epoch = (MainnetEthSpec::genesis_epoch() + epoch_offset) - .end_slot(MainnetEthSpec::slots_per_epoch()); - BlockProcessingBuilder::new(num_validators, last_slot_of_epoch, &spec).build_caches() -} diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 678ba28e160..5d8113af4f0 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -15,28 +15,26 @@ fn error(reason: Invalid) -> BlockOperationError { /// to `state`. Otherwise, returns a descriptive `Err`. /// /// Optionally verifies the aggregate signature, depending on `verify_signatures`. -/// -/// Spec v0.12.1 pub fn verify_attestation_for_block_inclusion( state: &BeaconState, attestation: &Attestation, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result<()> { +) -> Result> { let data = &attestation.data; verify!( - data.slot.safe_add(spec.min_attestation_inclusion_delay)? <= state.slot, + data.slot.safe_add(spec.min_attestation_inclusion_delay)? <= state.slot(), Invalid::IncludedTooEarly { - state: state.slot, + state: state.slot(), delay: spec.min_attestation_inclusion_delay, attestation: data.slot, } ); verify!( - state.slot <= data.slot.safe_add(T::slots_per_epoch())?, + state.slot() <= data.slot.safe_add(T::slots_per_epoch())?, Invalid::IncludedTooLate { - state: state.slot, + state: state.slot(), attestation: data.slot, } ); @@ -56,7 +54,7 @@ pub fn verify_attestation_for_state( attestation: &Attestation, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result<()> { +) -> Result> { let data = &attestation.data; verify!( @@ -72,7 +70,7 @@ pub fn verify_attestation_for_state( let indexed_attestation = get_indexed_attestation(committee.committee, attestation)?; is_valid_indexed_attestation(state, &indexed_attestation, verify_signatures, spec)?; - Ok(()) + Ok(indexed_attestation) } /// Check target epoch and source checkpoint. @@ -92,9 +90,9 @@ fn verify_casper_ffg_vote( ); if data.target.epoch == state.current_epoch() { verify!( - data.source == state.current_justified_checkpoint, + data.source == state.current_justified_checkpoint(), Invalid::WrongJustifiedCheckpoint { - state: state.current_justified_checkpoint, + state: state.current_justified_checkpoint(), attestation: data.source, is_current: true, } @@ -102,9 +100,9 @@ fn verify_casper_ffg_vote( Ok(()) } else if data.target.epoch == state.previous_epoch() { verify!( - data.source == state.previous_justified_checkpoint, + data.source == state.previous_justified_checkpoint(), Invalid::WrongJustifiedCheckpoint { - state: state.previous_justified_checkpoint, + state: state.previous_justified_checkpoint(), attestation: data.source, is_current: false, } diff --git a/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs b/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs index e28007429e3..e4a46c98054 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -83,7 +83,7 @@ where for index in &attesting_indices_1 & &attesting_indices_2 { let validator = state - .validators + .validators() .get(index as usize) .ok_or_else(|| error(Invalid::UnknownValidator(index)))?; diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index 7290df1a091..0cedc564b2e 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -60,7 +60,7 @@ pub fn verify_deposit_merkle_proof( &deposit.proof[..], spec.deposit_contract_tree_depth.safe_add(1)? as usize, deposit_index as usize, - state.eth1_data.deposit_root, + state.eth1_data().deposit_root, ), DepositInvalid::BadMerkleProof ); diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index 16c4db221d6..efaf57f6d93 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -52,7 +52,7 @@ fn verify_exit_parametric( let exit = &signed_exit.message; let validator = state - .validators + .validators() .get(exit.validator_index as usize) .ok_or_else(|| error(ExitInvalid::ValidatorUnknown(exit.validator_index)))?; diff --git a/consensus/state_processing/src/per_block_processing/verify_proposer_slashing.rs b/consensus/state_processing/src/per_block_processing/verify_proposer_slashing.rs index ffc9ccbd8e2..9b290a47e19 100644 --- a/consensus/state_processing/src/per_block_processing/verify_proposer_slashing.rs +++ b/consensus/state_processing/src/per_block_processing/verify_proposer_slashing.rs @@ -41,7 +41,7 @@ pub fn verify_proposer_slashing( // Check proposer is slashable let proposer = state - .validators + .validators() .get(header_1.proposer_index as usize) .ok_or_else(|| error(Invalid::ProposerUnknown(header_1.proposer_index)))?; diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index cc1464eef93..4c659cfff83 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -1,21 +1,28 @@ +#![deny(clippy::wildcard_imports)] + +// FIXME(altair): refactor to remove phase0/base structs, including `EpochProcessingSummary` +pub use base::{TotalBalances, ValidatorStatus, ValidatorStatuses}; use errors::EpochProcessingError as Error; +pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; -use tree_hash::TreeHash; -use types::*; +pub use slashings::process_slashings; +use types::{BeaconState, ChainSpec, EthSpec}; +pub use weigh_justification_and_finalization::weigh_justification_and_finalization; -pub mod apply_rewards; +pub mod altair; +pub mod base; +pub mod effective_balance_updates; pub mod errors; -pub mod process_slashings; +pub mod historical_roots_update; pub mod registry_updates; +pub mod resets; +pub mod slashings; pub mod tests; pub mod validator_statuses; - -pub use apply_rewards::process_rewards_and_penalties; -pub use process_slashings::process_slashings; -pub use registry_updates::process_registry_updates; -pub use validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; +pub mod weigh_justification_and_finalization; /// Provides a summary of validator participation during the epoch. +#[derive(PartialEq, Debug)] pub struct EpochProcessingSummary { pub total_balances: TotalBalances, pub statuses: Vec, @@ -25,195 +32,44 @@ pub struct EpochProcessingSummary { /// /// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is /// returned, a state might be "half-processed" and therefore in an invalid state. -/// -/// Spec v0.12.1 -pub fn per_epoch_processing( +pub fn process_epoch( state: &mut BeaconState, spec: &ChainSpec, ) -> Result { - // Ensure the committee caches are built. - state.build_committee_cache(RelativeEpoch::Previous, spec)?; - state.build_committee_cache(RelativeEpoch::Current, spec)?; - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - // Load the struct we use to assign validators into sets based on their participation. - // - // E.g., attestation in the previous epoch, attested to the head, etc. - let mut validator_statuses = ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - - // Justification and finalization. - process_justification_and_finalization(state, &validator_statuses.total_balances)?; - - // Rewards and Penalties. - process_rewards_and_penalties(state, &mut validator_statuses, spec)?; - - // Registry Updates. - process_registry_updates(state, spec)?; - - // Slashings. - process_slashings( - state, - validator_statuses.total_balances.current_epoch(), - spec, - )?; - - // Final updates. - process_final_updates(state, spec)?; - - // Rotate the epoch caches to suit the epoch transition. - state.advance_caches(); - - Ok(EpochProcessingSummary { - total_balances: validator_statuses.total_balances, - statuses: validator_statuses.statuses, - }) -} - -/// Update the following fields on the `BeaconState`: -/// -/// - `justification_bitfield`. -/// - `previous_justified_epoch` -/// - `previous_justified_root` -/// - `current_justified_epoch` -/// - `current_justified_root` -/// - `finalized_epoch` -/// - `finalized_root` -/// -/// Spec v0.12.1 -#[allow(clippy::if_same_then_else)] // For readability and consistency with spec. -pub fn process_justification_and_finalization( - state: &mut BeaconState, - total_balances: &TotalBalances, -) -> Result<(), Error> { - if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(()); - } - - let previous_epoch = state.previous_epoch(); - let current_epoch = state.current_epoch(); - - let old_previous_justified_checkpoint = state.previous_justified_checkpoint; - let old_current_justified_checkpoint = state.current_justified_checkpoint; - - // Process justifications - state.previous_justified_checkpoint = state.current_justified_checkpoint; - state.justification_bits.shift_up(1)?; - - if total_balances - .previous_epoch_target_attesters() - .safe_mul(3)? - >= total_balances.current_epoch().safe_mul(2)? - { - state.current_justified_checkpoint = Checkpoint { - epoch: previous_epoch, - root: *state.get_block_root_at_epoch(previous_epoch)?, - }; - state.justification_bits.set(1, true)?; - } - // If the current epoch gets justified, fill the last bit. - if total_balances - .current_epoch_target_attesters() - .safe_mul(3)? - >= total_balances.current_epoch().safe_mul(2)? - { - state.current_justified_checkpoint = Checkpoint { - epoch: current_epoch, - root: *state.get_block_root_at_epoch(current_epoch)?, - }; - state.justification_bits.set(0, true)?; - } - - let bits = &state.justification_bits; - - // The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source. - if (1..4).all(|i| bits.get(i).unwrap_or(false)) - && old_previous_justified_checkpoint.epoch.safe_add(3)? == current_epoch - { - state.finalized_checkpoint = old_previous_justified_checkpoint; - } - // The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source. - else if (1..3).all(|i| bits.get(i).unwrap_or(false)) - && old_previous_justified_checkpoint.epoch.safe_add(2)? == current_epoch - { - state.finalized_checkpoint = old_previous_justified_checkpoint; - } - // The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3nd as source. - if (0..3).all(|i| bits.get(i).unwrap_or(false)) - && old_current_justified_checkpoint.epoch.safe_add(2)? == current_epoch - { - state.finalized_checkpoint = old_current_justified_checkpoint; - } - // The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source. - else if (0..2).all(|i| bits.get(i).unwrap_or(false)) - && old_current_justified_checkpoint.epoch.safe_add(1)? == current_epoch - { - state.finalized_checkpoint = old_current_justified_checkpoint; + // Verify that the `BeaconState` instantiation matches the fork at `state.slot()`. + state + .fork_name(spec) + .map_err(Error::InconsistentStateFork)?; + + match state { + BeaconState::Base(_) => base::process_epoch(state, spec), + BeaconState::Altair(_) => altair::process_epoch(state, spec), } - - Ok(()) } -/// Finish up an epoch update. -/// -/// Spec v0.12.1 -pub fn process_final_updates( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result<(), Error> { - let current_epoch = state.current_epoch(); - let next_epoch = state.next_epoch()?; +/// Used to track the changes to a validator's balance. +#[derive(Default, Clone)] +pub struct Delta { + pub rewards: u64, + pub penalties: u64, +} - // Reset eth1 data votes. - if state - .slot - .safe_add(1)? - .safe_rem(T::SlotsPerEth1VotingPeriod::to_u64())? - == 0 - { - state.eth1_data_votes = VariableList::empty(); +impl Delta { + /// Reward the validator with the `reward`. + pub fn reward(&mut self, reward: u64) -> Result<(), Error> { + self.rewards = self.rewards.safe_add(reward)?; + Ok(()) } - // Update effective balances with hysteresis (lag). - let hysteresis_increment = spec - .effective_balance_increment - .safe_div(spec.hysteresis_quotient)?; - let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; - let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; - for (index, validator) in state.validators.iter_mut().enumerate() { - let balance = state.balances[index]; - - if balance.safe_add(downward_threshold)? < validator.effective_balance - || validator.effective_balance.safe_add(upward_threshold)? < balance - { - validator.effective_balance = std::cmp::min( - balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ); - } + /// Penalize the validator with the `penalty`. + pub fn penalize(&mut self, penalty: u64) -> Result<(), Error> { + self.penalties = self.penalties.safe_add(penalty)?; + Ok(()) } - // Reset slashings - state.set_slashings(next_epoch, 0)?; - - // Set randao mix - state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; - - // Set historical root accumulator - if next_epoch - .as_u64() - .safe_rem(T::SlotsPerHistoricalRoot::to_u64().safe_div(T::slots_per_epoch())?)? - == 0 - { - let historical_batch = state.historical_batch(); - state - .historical_roots - .push(historical_batch.tree_hash_root())?; + /// Combine two deltas. + fn combine(&mut self, other: Delta) -> Result<(), Error> { + self.reward(other.rewards)?; + self.penalize(other.penalties) } - - // Rotate current/previous epoch attestations - state.previous_epoch_attestations = - std::mem::replace(&mut state.current_epoch_attestations, VariableList::empty()); - - Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs new file mode 100644 index 00000000000..79a72118cba --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -0,0 +1,82 @@ +use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +use crate::per_epoch_processing::{ + effective_balance_updates::process_effective_balance_updates, + historical_roots_update::process_historical_roots_update, + resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, + validator_statuses::ValidatorStatuses, +}; +pub use inactivity_updates::process_inactivity_updates; +pub use justification_and_finalization::process_justification_and_finalization; +pub use participation_flag_updates::process_participation_flag_updates; +pub use rewards_and_penalties::process_rewards_and_penalties; +pub use sync_committee_updates::process_sync_committee_updates; +use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; + +pub mod inactivity_updates; +pub mod justification_and_finalization; +pub mod participation_flag_updates; +pub mod rewards_and_penalties; +pub mod sync_committee_updates; + +pub fn process_epoch( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result { + // Ensure the committee caches are built. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; + + // Justification and finalization. + process_justification_and_finalization(state, spec)?; + + process_inactivity_updates(state, spec)?; + + // Rewards and Penalties. + process_rewards_and_penalties(state, spec)?; + + // Registry Updates. + process_registry_updates(state, spec)?; + + // Slashings. + process_slashings( + state, + state.get_total_active_balance(spec)?, + spec.proportional_slashing_multiplier_altair, + spec, + )?; + + // Reset eth1 data votes. + process_eth1_data_reset(state)?; + + // Update effective balances with hysteresis (lag). + process_effective_balance_updates(state, spec)?; + + // Reset slashings + process_slashings_reset(state)?; + + // Set randao mix + process_randao_mixes_reset(state)?; + + // Set historical root accumulator + process_historical_roots_update(state)?; + + // Rotate current/previous epoch participation + process_participation_flag_updates(state)?; + + process_sync_committee_updates(state, spec)?; + + // Rotate the epoch caches to suit the epoch transition. + state.advance_caches()?; + + // FIXME(altair): this is an incorrect dummy value, we should think harder + // about how we want to unify validator statuses between phase0 & altair. + // We should benchmark the new state transition and work out whether Altair could + // be accelerated by some similar cache. + let validator_statuses = ValidatorStatuses::new(state, spec)?; + + Ok(EpochProcessingSummary { + total_balances: validator_statuses.total_balances, + statuses: validator_statuses.statuses, + }) +} diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs new file mode 100644 index 00000000000..cc629c1ef09 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -0,0 +1,44 @@ +use crate::EpochProcessingError; +use core::result::Result; +use core::result::Result::Ok; +use safe_arith::SafeArith; +use std::cmp::min; +use types::beacon_state::BeaconState; +use types::chain_spec::ChainSpec; +use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; +use types::eth_spec::EthSpec; + +pub fn process_inactivity_updates( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), EpochProcessingError> { + // Score updates based on previous epoch participation, skip genesis epoch + if state.current_epoch() == T::genesis_epoch() { + return Ok(()); + } + + let unslashed_indices = state.get_unslashed_participating_indices( + TIMELY_TARGET_FLAG_INDEX, + state.previous_epoch(), + spec, + )?; + + for index in state.get_eligible_validator_indices()? { + // Increase inactivity score of inactive validators + if unslashed_indices.contains(&index) { + let inactivity_score = state.get_inactivity_score_mut(index)?; + inactivity_score.safe_sub_assign(min(1, *inactivity_score))?; + } else { + state + .get_inactivity_score_mut(index)? + .safe_add_assign(spec.inactivity_score_bias)?; + } + // Decrease the score of all validators for forgiveness when not during a leak + if !state.is_in_inactivity_leak(spec) { + let inactivity_score = state.get_inactivity_score_mut(index)?; + inactivity_score + .safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?; + } + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs new file mode 100644 index 00000000000..13e14d4d8cd --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs @@ -0,0 +1,39 @@ +use crate::per_epoch_processing::weigh_justification_and_finalization; +use crate::per_epoch_processing::Error; +use safe_arith::SafeArith; +use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; +use types::{BeaconState, ChainSpec, EthSpec}; + +/// Update the justified and finalized checkpoints for matching target attestations. +pub fn process_justification_and_finalization( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { + return Ok(()); + } + + let previous_epoch = state.previous_epoch(); + let current_epoch = state.current_epoch(); + let previous_indices = state.get_unslashed_participating_indices( + TIMELY_TARGET_FLAG_INDEX, + previous_epoch, + spec, + )?; + let current_indices = + state.get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, current_epoch, spec)?; + let total_active_balance = state.get_total_balance( + state + .get_active_validator_indices(current_epoch, spec)? + .as_slice(), + spec, + )?; + let previous_target_balance = state.get_total_balance(&previous_indices, spec)?; + let current_target_balance = state.get_total_balance(¤t_indices, spec)?; + weigh_justification_and_finalization( + state, + total_active_balance, + previous_target_balance, + current_target_balance, + ) +} diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs new file mode 100644 index 00000000000..7162fa7f4af --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs @@ -0,0 +1,20 @@ +use crate::EpochProcessingError; +use core::result::Result; +use core::result::Result::Ok; +use types::beacon_state::BeaconState; +use types::eth_spec::EthSpec; +use types::participation_flags::ParticipationFlags; +use types::VariableList; + +pub fn process_participation_flag_updates( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + *state.previous_epoch_participation_mut()? = + std::mem::take(state.current_epoch_participation_mut()?); + *state.current_epoch_participation_mut()? = VariableList::new(vec![ + ParticipationFlags::default( + ); + state.validators().len() + ])?; + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs new file mode 100644 index 00000000000..6e1475d06d0 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -0,0 +1,124 @@ +use safe_arith::SafeArith; +use types::consts::altair::{ + PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, + WEIGHT_DENOMINATOR, +}; +use types::{BeaconState, ChainSpec, EthSpec}; + +use crate::common::{altair::get_base_reward, decrease_balance, increase_balance}; +use crate::per_epoch_processing::{Delta, Error}; + +/// Apply attester and proposer rewards. +/// +/// Spec v1.1.0 +pub fn process_rewards_and_penalties( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + if state.current_epoch() == T::genesis_epoch() { + return Ok(()); + } + + let mut deltas = vec![Delta::default(); state.validators().len()]; + + let total_active_balance = state.get_total_active_balance(spec)?; + + for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { + get_flag_index_deltas(&mut deltas, state, flag_index, total_active_balance, spec)?; + } + + get_inactivity_penalty_deltas(&mut deltas, state, spec)?; + + // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 + // instead). + for (i, delta) in deltas.into_iter().enumerate() { + increase_balance(state, i, delta.rewards)?; + decrease_balance(state, i, delta.penalties)?; + } + + Ok(()) +} + +/// Return the deltas for a given flag index by scanning through the participation flags. +/// +/// Spec v1.1.0 +pub fn get_flag_index_deltas( + deltas: &mut Vec, + state: &BeaconState, + flag_index: usize, + total_active_balance: u64, + spec: &ChainSpec, +) -> Result<(), Error> { + let previous_epoch = state.previous_epoch(); + let unslashed_participating_indices = + state.get_unslashed_participating_indices(flag_index, previous_epoch, spec)?; + let weight = get_flag_weight(flag_index)?; + let unslashed_participating_balance = + state.get_total_balance(&unslashed_participating_indices, spec)?; + let unslashed_participating_increments = + unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; + let active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; + + for index in state.get_eligible_validator_indices()? { + let base_reward = get_base_reward(state, index, total_active_balance, spec)?; + let mut delta = Delta::default(); + + if unslashed_participating_indices.contains(&(index as usize)) { + if !state.is_in_inactivity_leak(spec) { + let reward_numerator = base_reward + .safe_mul(weight)? + .safe_mul(unslashed_participating_increments)?; + delta.reward( + reward_numerator.safe_div(active_increments.safe_mul(WEIGHT_DENOMINATOR)?)?, + )?; + } + } else if flag_index != TIMELY_HEAD_FLAG_INDEX { + delta.penalize(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)?)?; + } + deltas + .get_mut(index as usize) + .ok_or(Error::DeltaOutOfBounds(index as usize))? + .combine(delta)?; + } + Ok(()) +} + +/// Get the weight for a `flag_index` from the constant list of all weights. +pub fn get_flag_weight(flag_index: usize) -> Result { + PARTICIPATION_FLAG_WEIGHTS + .get(flag_index) + .copied() + .ok_or(Error::InvalidFlagIndex(flag_index)) +} + +pub fn get_inactivity_penalty_deltas( + deltas: &mut Vec, + state: &BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let previous_epoch = state.previous_epoch(); + let matching_target_indices = state.get_unslashed_participating_indices( + TIMELY_TARGET_FLAG_INDEX, + previous_epoch, + spec, + )?; + for index in state.get_eligible_validator_indices()? { + let mut delta = Delta::default(); + + if !matching_target_indices.contains(&index) { + let penalty_numerator = state + .get_validator(index)? + .effective_balance + .safe_mul(state.get_inactivity_score(index)?)?; + let penalty_denominator = spec + .inactivity_score_bias + .safe_mul(spec.inactivity_penalty_quotient_altair)?; + delta.penalize(penalty_numerator.safe_div(penalty_denominator)?)?; + } + deltas + .get_mut(index) + .ok_or(Error::DeltaOutOfBounds(index))? + .combine(delta)?; + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/altair/sync_committee_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/sync_committee_updates.rs new file mode 100644 index 00000000000..1edc845cb4e --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/altair/sync_committee_updates.rs @@ -0,0 +1,18 @@ +use crate::EpochProcessingError; +use safe_arith::SafeArith; +use types::beacon_state::BeaconState; +use types::chain_spec::ChainSpec; +use types::eth_spec::EthSpec; + +pub fn process_sync_committee_updates( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), EpochProcessingError> { + let next_epoch = state.next_epoch()?; + if next_epoch.safe_rem(spec.epochs_per_sync_committee_period)? == 0 { + *state.current_sync_committee_mut()? = state.next_sync_committee()?.clone(); + + *state.next_sync_committee_mut()? = state.get_next_sync_committee(spec)?; + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs new file mode 100644 index 00000000000..c28d4b17803 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -0,0 +1,76 @@ +use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; +pub use crate::per_epoch_processing::validator_statuses::{ + TotalBalances, ValidatorStatus, ValidatorStatuses, +}; +use crate::per_epoch_processing::{ + effective_balance_updates::process_effective_balance_updates, + historical_roots_update::process_historical_roots_update, + resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, +}; +pub use justification_and_finalization::process_justification_and_finalization; +pub use participation_record_updates::process_participation_record_updates; +pub use rewards_and_penalties::process_rewards_and_penalties; +use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; + +pub mod justification_and_finalization; +pub mod participation_record_updates; +pub mod rewards_and_penalties; + +pub fn process_epoch( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result { + // Ensure the committee caches are built. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; + + // Load the struct we use to assign validators into sets based on their participation. + // + // E.g., attestation in the previous epoch, attested to the head, etc. + let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(&state)?; + + // Justification and finalization. + process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; + + // Rewards and Penalties. + process_rewards_and_penalties(state, &mut validator_statuses, spec)?; + + // Registry Updates. + process_registry_updates(state, spec)?; + + // Slashings. + process_slashings( + state, + validator_statuses.total_balances.current_epoch(), + spec.proportional_slashing_multiplier, + spec, + )?; + + // Reset eth1 data votes. + process_eth1_data_reset(state)?; + + // Update effective balances with hysteresis (lag). + process_effective_balance_updates(state, spec)?; + + // Reset slashings + process_slashings_reset(state)?; + + // Set randao mix + process_randao_mixes_reset(state)?; + + // Set historical root accumulator + process_historical_roots_update(state)?; + + // Rotate current/previous epoch attestations + process_participation_record_updates(state)?; + + // Rotate the epoch caches to suit the epoch transition. + state.advance_caches()?; + + Ok(EpochProcessingSummary { + total_balances: validator_statuses.total_balances, + statuses: validator_statuses.statuses, + }) +} diff --git a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs new file mode 100644 index 00000000000..89fb506eecd --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs @@ -0,0 +1,23 @@ +use crate::per_epoch_processing::base::TotalBalances; +use crate::per_epoch_processing::weigh_justification_and_finalization; +use crate::per_epoch_processing::Error; +use safe_arith::SafeArith; +use types::{BeaconState, ChainSpec, EthSpec}; + +/// Update the justified and finalized checkpoints for matching target attestations. +pub fn process_justification_and_finalization( + state: &mut BeaconState, + total_balances: &TotalBalances, + _spec: &ChainSpec, +) -> Result<(), Error> { + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { + return Ok(()); + } + + weigh_justification_and_finalization( + state, + total_balances.current_epoch(), + total_balances.previous_epoch_target_attesters(), + total_balances.current_epoch_target_attesters(), + ) +} diff --git a/consensus/state_processing/src/per_epoch_processing/base/participation_record_updates.rs b/consensus/state_processing/src/per_epoch_processing/base/participation_record_updates.rs new file mode 100644 index 00000000000..2cb82d187df --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/base/participation_record_updates.rs @@ -0,0 +1,12 @@ +use crate::EpochProcessingError; +use types::beacon_state::BeaconState; +use types::eth_spec::EthSpec; + +pub fn process_participation_record_updates( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + let base_state = state.as_base_mut()?; + base_state.previous_epoch_attestations = + std::mem::take(&mut base_state.current_epoch_attestations); + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/apply_rewards.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs similarity index 75% rename from consensus/state_processing/src/per_epoch_processing/apply_rewards.rs rename to consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index 4115bfef3bb..d0983a20fb8 100644 --- a/consensus/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -1,40 +1,49 @@ -use super::super::common::get_base_reward; -use super::validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; -use super::Error; +use crate::common::{base::get_base_reward, decrease_balance, increase_balance}; +use crate::per_epoch_processing::validator_statuses::{ + TotalBalances, ValidatorStatus, ValidatorStatuses, +}; +use crate::per_epoch_processing::{Delta, Error}; use safe_arith::SafeArith; +use std::array::IntoIter as ArrayIter; +use types::{BeaconState, ChainSpec, EthSpec}; -use types::*; - -/// Use to track the changes to a validators balance. +/// Combination of several deltas for different components of an attestation reward. +/// +/// Exists only for compatibility with EF rewards tests. #[derive(Default, Clone)] -pub struct Delta { - rewards: u64, - penalties: u64, +pub struct AttestationDelta { + pub source_delta: Delta, + pub target_delta: Delta, + pub head_delta: Delta, + pub inclusion_delay_delta: Delta, + pub inactivity_penalty_delta: Delta, } -impl Delta { - /// Reward the validator with the `reward`. - pub fn reward(&mut self, reward: u64) -> Result<(), Error> { - self.rewards = self.rewards.safe_add(reward)?; - Ok(()) - } - - /// Penalize the validator with the `penalty`. - pub fn penalize(&mut self, penalty: u64) -> Result<(), Error> { - self.penalties = self.penalties.safe_add(penalty)?; - Ok(()) - } - - /// Combine two deltas. - fn combine(&mut self, other: Delta) -> Result<(), Error> { - self.reward(other.rewards)?; - self.penalize(other.penalties) +impl AttestationDelta { + /// Flatten into a single delta. + pub fn flatten(self) -> Result { + let AttestationDelta { + source_delta, + target_delta, + head_delta, + inclusion_delay_delta, + inactivity_penalty_delta, + } = self; + let mut result = Delta::default(); + for delta in ArrayIter::new([ + source_delta, + target_delta, + head_delta, + inclusion_delay_delta, + inactivity_penalty_delta, + ]) { + result.combine(delta)?; + } + Ok(result) } } /// Apply attester and proposer rewards. -/// -/// Spec v0.12.1 pub fn process_rewards_and_penalties( state: &mut BeaconState, validator_statuses: &mut ValidatorStatuses, @@ -45,8 +54,8 @@ pub fn process_rewards_and_penalties( } // Guard against an out-of-bounds during the validator balance update. - if validator_statuses.statuses.len() != state.balances.len() - || validator_statuses.statuses.len() != state.validators.len() + if validator_statuses.statuses.len() != state.balances().len() + || validator_statuses.statuses.len() != state.validators().len() { return Err(Error::ValidatorStatusesInconsistent); } @@ -55,28 +64,27 @@ pub fn process_rewards_and_penalties( // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 // instead). - for (i, delta) in deltas.iter().enumerate() { - state.balances[i] = state.balances[i].safe_add(delta.rewards)?; - state.balances[i] = state.balances[i].saturating_sub(delta.penalties); + for (i, delta) in deltas.into_iter().enumerate() { + let combined_delta = delta.flatten()?; + increase_balance(state, i, combined_delta.rewards)?; + decrease_balance(state, i, combined_delta.penalties)?; } Ok(()) } /// Apply rewards for participation in attestations during the previous epoch. -/// -/// Spec v0.12.1 -fn get_attestation_deltas( +pub fn get_attestation_deltas( state: &BeaconState, validator_statuses: &ValidatorStatuses, spec: &ChainSpec, -) -> Result, Error> { +) -> Result, Error> { let finality_delay = state .previous_epoch() - .safe_sub(state.finalized_checkpoint.epoch)? + .safe_sub(state.finalized_checkpoint().epoch)? .as_u64(); - let mut deltas = vec![Delta::default(); state.validators.len()]; + let mut deltas = vec![AttestationDelta::default(); state.validators().len()]; let total_balances = &validator_statuses.total_balances; @@ -102,18 +110,23 @@ fn get_attestation_deltas( let inactivity_penalty_delta = get_inactivity_penalty_delta(validator, base_reward, finality_delay, spec)?; - deltas[index].combine(source_delta)?; - deltas[index].combine(target_delta)?; - deltas[index].combine(head_delta)?; - deltas[index].combine(inclusion_delay_delta)?; - deltas[index].combine(inactivity_penalty_delta)?; + let delta = deltas + .get_mut(index) + .ok_or(Error::DeltaOutOfBounds(index))?; + delta.source_delta.combine(source_delta)?; + delta.target_delta.combine(target_delta)?; + delta.head_delta.combine(head_delta)?; + delta.inclusion_delay_delta.combine(inclusion_delay_delta)?; + delta + .inactivity_penalty_delta + .combine(inactivity_penalty_delta)?; if let Some((proposer_index, proposer_delta)) = proposer_delta { - if proposer_index >= deltas.len() { - return Err(Error::ValidatorStatusesInconsistent); - } - - deltas[proposer_index].combine(proposer_delta)?; + deltas + .get_mut(proposer_index) + .ok_or(Error::ValidatorStatusesInconsistent)? + .inclusion_delay_delta + .combine(proposer_delta)?; } } @@ -219,7 +232,6 @@ fn get_inclusion_delay_delta( let proposer_reward = get_proposer_reward(base_reward, spec)?; proposer_delta.reward(proposer_reward)?; - let max_attester_reward = base_reward.safe_sub(proposer_reward)?; delta.reward(max_attester_reward.safe_div(inclusion_info.delay)?)?; diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs new file mode 100644 index 00000000000..c166667b5a9 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -0,0 +1,33 @@ +use super::errors::EpochProcessingError; +use safe_arith::SafeArith; +use types::beacon_state::BeaconState; +use types::chain_spec::ChainSpec; +use types::{BeaconStateError, EthSpec}; + +pub fn process_effective_balance_updates( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), EpochProcessingError> { + let hysteresis_increment = spec + .effective_balance_increment + .safe_div(spec.hysteresis_quotient)?; + let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; + let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; + let (validators, balances) = state.validators_and_balances_mut(); + for (index, validator) in validators.iter_mut().enumerate() { + let balance = balances + .get(index) + .copied() + .ok_or(BeaconStateError::BalancesOutOfBounds(index))?; + + if balance.safe_add(downward_threshold)? < validator.effective_balance + || validator.effective_balance.safe_add(upward_threshold)? < balance + { + validator.effective_balance = std::cmp::min( + balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, + spec.max_effective_balance, + ); + } + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index 245935c1d7a..651bf41ca26 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,4 +1,4 @@ -use types::*; +use types::{BeaconStateError, InconsistentFork}; #[derive(Debug, PartialEq)] pub enum EpochProcessingError { @@ -10,6 +10,7 @@ pub enum EpochProcessingError { InclusionDistanceZero, ValidatorStatusesInconsistent, DeltasInconsistent, + DeltaOutOfBounds(usize), /// Unable to get the inclusion distance for a validator that should have an inclusion /// distance. This indicates an internal inconsistency. /// @@ -19,6 +20,9 @@ pub enum EpochProcessingError { InclusionError(InclusionError), SszTypesError(ssz_types::Error), ArithError(safe_arith::ArithError), + InconsistentStateFork(InconsistentFork), + InvalidJustificationBit(ssz_types::Error), + InvalidFlagIndex(usize), } impl From for EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs new file mode 100644 index 00000000000..8466104aa53 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs @@ -0,0 +1,25 @@ +use super::errors::EpochProcessingError; +use core::result::Result; +use core::result::Result::Ok; +use safe_arith::SafeArith; +use tree_hash::TreeHash; +use types::beacon_state::BeaconState; +use types::eth_spec::EthSpec; +use types::Unsigned; + +pub fn process_historical_roots_update( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + let next_epoch = state.next_epoch()?; + if next_epoch + .as_u64() + .safe_rem(T::SlotsPerHistoricalRoot::to_u64().safe_div(T::slots_per_epoch())?)? + == 0 + { + let historical_batch = state.historical_batch(); + state + .historical_roots_mut() + .push(historical_batch.tree_hash_root())?; + } + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 26f055ba4f7..4fd2d685867 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -1,11 +1,11 @@ use crate::{common::initiate_validator_exit, per_epoch_processing::Error}; use itertools::Itertools; use safe_arith::SafeArith; -use types::*; +use types::{BeaconState, ChainSpec, EthSpec, Validator}; /// Performs a validator registry update, if required. /// -/// Spec v0.12.1 +/// NOTE: unchanged in Altair pub fn process_registry_updates( state: &mut BeaconState, spec: &ChainSpec, @@ -20,7 +20,7 @@ pub fn process_registry_updates( && validator.effective_balance <= spec.ejection_balance }; let indices_to_update: Vec<_> = state - .validators + .validators() .iter() .enumerate() .filter(|(_, validator)| { @@ -30,17 +30,18 @@ pub fn process_registry_updates( .collect(); for index in indices_to_update { - if state.validators[index].is_eligible_for_activation_queue(spec) { - state.validators[index].activation_eligibility_epoch = current_epoch.safe_add(1)?; + let validator = state.get_validator_mut(index)?; + if validator.is_eligible_for_activation_queue(spec) { + validator.activation_eligibility_epoch = current_epoch.safe_add(1)?; } - if is_ejectable(&state.validators[index]) { + if is_ejectable(validator) { initiate_validator_exit(state, index, spec)?; } } // Queue validators eligible for activation and not dequeued for activation prior to finalized epoch let activation_queue = state - .validators + .validators() .iter() .enumerate() .filter(|(_, validator)| validator.is_eligible_for_activation(state, spec)) @@ -52,8 +53,7 @@ pub fn process_registry_updates( let churn_limit = state.get_churn_limit(spec)? as usize; let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec)?; for index in activation_queue.into_iter().take(churn_limit) { - let validator = &mut state.validators[index]; - validator.activation_epoch = delayed_activation_epoch; + state.get_validator_mut(index)?.activation_epoch = delayed_activation_epoch; } Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/resets.rs b/consensus/state_processing/src/per_epoch_processing/resets.rs new file mode 100644 index 00000000000..dc3c9f07c06 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/resets.rs @@ -0,0 +1,38 @@ +use super::errors::EpochProcessingError; +use core::result::Result; +use core::result::Result::Ok; +use safe_arith::SafeArith; +use types::beacon_state::BeaconState; +use types::eth_spec::EthSpec; +use types::{Unsigned, VariableList}; + +pub fn process_eth1_data_reset( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + if state + .slot() + .safe_add(1)? + .safe_rem(T::SlotsPerEth1VotingPeriod::to_u64())? + == 0 + { + *state.eth1_data_votes_mut() = VariableList::empty(); + } + Ok(()) +} + +pub fn process_slashings_reset( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + let next_epoch = state.next_epoch()?; + state.set_slashings(next_epoch, 0)?; + Ok(()) +} + +pub fn process_randao_mixes_reset( + state: &mut BeaconState, +) -> Result<(), EpochProcessingError> { + let current_epoch = state.current_epoch(); + let next_epoch = state.next_epoch()?; + state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; + Ok(()) +} diff --git a/consensus/state_processing/src/per_epoch_processing/process_slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs similarity index 62% rename from consensus/state_processing/src/per_epoch_processing/process_slashings.rs rename to consensus/state_processing/src/per_epoch_processing/slashings.rs index 40d96f30ccd..ed77018e2d4 100644 --- a/consensus/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -1,22 +1,22 @@ +use crate::per_epoch_processing::Error; use safe_arith::{SafeArith, SafeArithIter}; -use types::{BeaconStateError as Error, *}; +use types::{BeaconState, BeaconStateError, ChainSpec, EthSpec, Unsigned}; /// Process slashings. -/// -/// Spec v0.12.1 pub fn process_slashings( state: &mut BeaconState, total_balance: u64, + slashing_multiplier: u64, spec: &ChainSpec, ) -> Result<(), Error> { let epoch = state.current_epoch(); let sum_slashings = state.get_all_slashings().iter().copied().safe_sum()?; - let adjusted_total_slashing_balance = std::cmp::min( - sum_slashings.safe_mul(spec.proportional_slashing_multiplier)?, - total_balance, - ); - for (index, validator) in state.validators.iter().enumerate() { + let adjusted_total_slashing_balance = + std::cmp::min(sum_slashings.safe_mul(slashing_multiplier)?, total_balance); + + let (validators, balances) = state.validators_and_balances_mut(); + for (index, validator) in validators.iter().enumerate() { if validator.slashed && epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? == validator.withdrawable_epoch @@ -31,7 +31,10 @@ pub fn process_slashings( .safe_mul(increment)?; // Equivalent to `decrease_balance(state, index, penalty)`, but avoids borrowing `state`. - state.balances[index] = state.balances[index].saturating_sub(penalty); + let balance = balances + .get_mut(index) + .ok_or(BeaconStateError::BalancesOutOfBounds(index))?; + *balance = balance.saturating_sub(penalty); } } diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index 9fdc82c6f31..ac1ce6b0194 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -1,23 +1,165 @@ #![cfg(test)] -use crate::per_epoch_processing::per_epoch_processing; +use crate::per_epoch_processing::process_epoch; +use beacon_chain::store::StoreConfig; +use beacon_chain::test_utils::BeaconChainHarness; +use beacon_chain::types::{EthSpec, MinimalEthSpec}; +use bls::Hash256; use env_logger::{Builder, Env}; -use types::test_utils::TestingBeaconStateBuilder; -use types::*; +use types::Slot; #[test] fn runs_without_error() { Builder::from_env(Env::default().default_filter_or("error")).init(); - let spec = MinimalEthSpec::default_spec(); - - let mut builder: TestingBeaconStateBuilder = - TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); + let harness = BeaconChainHarness::new_with_store_config( + MinimalEthSpec, + None, + types::test_utils::generate_deterministic_keypairs(8), + StoreConfig::default(), + ); + harness.advance_slot(); + let spec = MinimalEthSpec::default_spec(); let target_slot = (MinimalEthSpec::genesis_epoch() + 4).end_slot(MinimalEthSpec::slots_per_epoch()); - builder.teleport_to_slot(target_slot); - let (mut state, _keypairs) = builder.build(); + let state = harness.get_current_state(); + harness.add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..target_slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..8).collect::>().as_slice(), + ); + let mut new_head_state = harness.get_current_state(); + + process_epoch(&mut new_head_state, &spec).unwrap(); +} + +#[cfg(not(debug_assertions))] +mod release_tests { + use super::*; + use crate::{ + per_slot_processing::per_slot_processing, EpochProcessingError, SlotProcessingError, + }; + use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; + use types::{Epoch, ForkName, InconsistentFork, MainnetEthSpec}; + + #[test] + fn altair_state_on_base_fork() { + let mut spec = MainnetEthSpec::default_spec(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + // The Altair fork happens at epoch 1. + spec.altair_fork_epoch = Some(Epoch::new(1)); + + let altair_state = { + let harness = BeaconChainHarness::new( + MainnetEthSpec, + Some(spec.clone()), + types::test_utils::generate_deterministic_keypairs(8), + ); + + harness.advance_slot(); + + harness.extend_chain( + // Build out enough blocks so we get an Altair block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + harness.get_current_state() + }; + + // Pre-conditions for a valid test. + assert_eq!(altair_state.fork_name(&spec).unwrap(), ForkName::Altair); + assert_eq!( + altair_state.slot(), + altair_state.current_epoch().end_slot(slots_per_epoch) + ); + + // Check the state is valid before starting this test. + process_epoch(&mut altair_state.clone(), &spec) + .expect("state passes intial epoch processing"); + per_slot_processing(&mut altair_state.clone(), None, &spec) + .expect("state passes intial slot processing"); + + // Modify the spec so altair never happens. + spec.altair_fork_epoch = None; + + let expected_err = InconsistentFork { + fork_at_slot: ForkName::Base, + object_fork: ForkName::Altair, + }; + + assert_eq!(altair_state.fork_name(&spec), Err(expected_err)); + assert_eq!( + process_epoch(&mut altair_state.clone(), &spec), + Err(EpochProcessingError::InconsistentStateFork(expected_err)) + ); + assert_eq!( + per_slot_processing(&mut altair_state.clone(), None, &spec), + Err(SlotProcessingError::InconsistentStateFork(expected_err)) + ); + } + + #[test] + fn base_state_on_altair_fork() { + let mut spec = MainnetEthSpec::default_spec(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + // The Altair fork never happens. + spec.altair_fork_epoch = None; + + let base_state = { + let harness = BeaconChainHarness::new( + MainnetEthSpec, + Some(spec.clone()), + types::test_utils::generate_deterministic_keypairs(8), + ); + + harness.advance_slot(); + + harness.extend_chain( + // Build out enough blocks so we get a block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + harness.get_current_state() + }; + + // Pre-conditions for a valid test. + assert_eq!(base_state.fork_name(&spec).unwrap(), ForkName::Base); + assert_eq!( + base_state.slot(), + base_state.current_epoch().end_slot(slots_per_epoch) + ); + + // Check the state is valid before starting this test. + process_epoch(&mut base_state.clone(), &spec) + .expect("state passes intial epoch processing"); + per_slot_processing(&mut base_state.clone(), None, &spec) + .expect("state passes intial slot processing"); + + // Modify the spec so Altair happens at the first epoch. + spec.altair_fork_epoch = Some(Epoch::new(1)); + + let expected_err = InconsistentFork { + fork_at_slot: ForkName::Altair, + object_fork: ForkName::Base, + }; - per_epoch_processing(&mut state, &spec).unwrap(); + assert_eq!(base_state.fork_name(&spec), Err(expected_err)); + assert_eq!( + process_epoch(&mut base_state.clone(), &spec), + Err(EpochProcessingError::InconsistentStateFork(expected_err)) + ); + assert_eq!( + per_slot_processing(&mut base_state.clone(), None, &spec), + Err(SlotProcessingError::InconsistentStateFork(expected_err)) + ); + } } diff --git a/consensus/state_processing/src/per_epoch_processing/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/validator_statuses.rs index 6c62c2cd4cf..b40f91ce5a1 100644 --- a/consensus/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,6 +1,6 @@ use crate::common::get_attesting_indices; use safe_arith::SafeArith; -use types::*; +use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, PendingAttestation}; #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -17,7 +17,7 @@ macro_rules! set_self_if_other_is_true { /// The information required to reward a block producer for including an attestation in a block. #[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq)] pub struct InclusionInfo { /// The distance between the attestation slot and the slot that attestation was included in a /// block. @@ -49,7 +49,7 @@ impl InclusionInfo { /// Information required to reward some validator during the current and previous epoch. #[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct ValidatorStatus { /// True if the validator has been slashed, ever. pub is_slashed: bool, @@ -114,7 +114,7 @@ impl ValidatorStatus { /// The total effective balances for different sets of validators during the previous and current /// epochs. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] pub struct TotalBalances { /// The effective balance increment from the spec. @@ -192,11 +192,11 @@ impl ValidatorStatuses { state: &BeaconState, spec: &ChainSpec, ) -> Result { - let mut statuses = Vec::with_capacity(state.validators.len()); + let mut statuses = Vec::with_capacity(state.validators().len()); let mut total_balances = TotalBalances::new(spec); - for (i, validator) in state.validators.iter().enumerate() { - let effective_balance = state.get_effective_balance(i, spec)?; + for (i, validator) in state.validators().iter().enumerate() { + let effective_balance = state.get_effective_balance(i)?; let mut status = ValidatorStatus { is_slashed: validator.slashed, is_withdrawable_in_current_epoch: validator @@ -235,12 +235,12 @@ impl ValidatorStatuses { pub fn process_attestations( &mut self, state: &BeaconState, - spec: &ChainSpec, ) -> Result<(), BeaconStateError> { - for a in state + let base_state = state.as_base()?; + for a in base_state .previous_epoch_attestations .iter() - .chain(state.current_epoch_attestations.iter()) + .chain(base_state.current_epoch_attestations.iter()) { let committee = state.get_beacon_committee(a.data.slot, a.data.index)?; let attesting_indices = @@ -277,7 +277,10 @@ impl ValidatorStatuses { // Loop through the participating validator indices and update the status vec. for validator_index in attesting_indices { - self.statuses[validator_index].update(&status); + self.statuses + .get_mut(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))? + .update(&status); } } @@ -285,7 +288,7 @@ impl ValidatorStatuses { for (index, v) in self.statuses.iter().enumerate() { // According to the spec, we only count unslashed validators towards the totals. if !v.is_slashed { - let validator_balance = state.get_effective_balance(index, spec)?; + let validator_balance = state.get_effective_balance(index)?; if v.is_current_epoch_attester { self.total_balances diff --git a/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs new file mode 100644 index 00000000000..6e90ee8f374 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs @@ -0,0 +1,70 @@ +use crate::per_epoch_processing::Error; +use safe_arith::SafeArith; +use std::ops::Range; +use types::{BeaconState, Checkpoint, EthSpec}; + +/// Update the justified and finalized checkpoints for matching target attestations. +#[allow(clippy::if_same_then_else)] // For readability and consistency with spec. +pub fn weigh_justification_and_finalization( + state: &mut BeaconState, + total_active_balance: u64, + previous_target_balance: u64, + current_target_balance: u64, +) -> Result<(), Error> { + let previous_epoch = state.previous_epoch(); + let current_epoch = state.current_epoch(); + + let old_previous_justified_checkpoint = state.previous_justified_checkpoint(); + let old_current_justified_checkpoint = state.current_justified_checkpoint(); + + // Process justifications + *state.previous_justified_checkpoint_mut() = state.current_justified_checkpoint(); + state.justification_bits_mut().shift_up(1)?; + + if previous_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { + *state.current_justified_checkpoint_mut() = Checkpoint { + epoch: previous_epoch, + root: *state.get_block_root_at_epoch(previous_epoch)?, + }; + state.justification_bits_mut().set(1, true)?; + } + // If the current epoch gets justified, fill the last bit. + if current_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { + *state.current_justified_checkpoint_mut() = Checkpoint { + epoch: current_epoch, + root: *state.get_block_root_at_epoch(current_epoch)?, + }; + state.justification_bits_mut().set(0, true)?; + } + + let bits = state.justification_bits().clone(); + let all_bits_set = |range: Range| -> Result { + for i in range { + if !bits.get(i).map_err(Error::InvalidJustificationBit)? { + return Ok(false); + } + } + Ok(true) + }; + + // The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source. + if all_bits_set(1..4)? && old_previous_justified_checkpoint.epoch.safe_add(3)? == current_epoch + { + *state.finalized_checkpoint_mut() = old_previous_justified_checkpoint; + } + // The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source. + if all_bits_set(1..3)? && old_previous_justified_checkpoint.epoch.safe_add(2)? == current_epoch + { + *state.finalized_checkpoint_mut() = old_previous_justified_checkpoint; + } + // The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3nd as source. + if all_bits_set(0..3)? && old_current_justified_checkpoint.epoch.safe_add(2)? == current_epoch { + *state.finalized_checkpoint_mut() = old_current_justified_checkpoint; + } + // The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source. + if all_bits_set(0..2)? && old_current_justified_checkpoint.epoch.safe_add(1)? == current_epoch { + *state.finalized_checkpoint_mut() = old_current_justified_checkpoint; + } + + Ok(()) +} diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 82cf5abfe19..6bb38fa393f 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,3 +1,4 @@ +use crate::upgrade::upgrade_to_altair; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -7,6 +8,7 @@ pub enum Error { BeaconStateError(BeaconStateError), EpochProcessingError(EpochProcessingError), ArithError(ArithError), + InconsistentStateFork(InconsistentFork), } impl From for Error { @@ -20,24 +22,34 @@ impl From for Error { /// If the root of the supplied `state` is known, then it can be passed as `state_root`. If /// `state_root` is `None`, the root of `state` will be computed using a cached tree hash. /// Providing the `state_root` makes this function several orders of magniude faster. -/// -/// Spec v0.12.1 pub fn per_slot_processing( state: &mut BeaconState, state_root: Option, spec: &ChainSpec, ) -> Result, Error> { + // Verify that the `BeaconState` instantiation matches the fork at `state.slot()`. + state + .fork_name(spec) + .map_err(Error::InconsistentStateFork)?; + cache_state(state, state_root)?; - let summary = if state.slot > spec.genesis_slot - && state.slot.safe_add(1)?.safe_rem(T::slots_per_epoch())? == 0 + let summary = if state.slot() > spec.genesis_slot + && state.slot().safe_add(1)?.safe_rem(T::slots_per_epoch())? == 0 { Some(per_epoch_processing(state, spec)?) } else { None }; - state.slot.safe_add_assign(1)?; + state.slot_mut().safe_add_assign(1)?; + + // If the Altair fork epoch is reached, perform an irregular state upgrade. + if state.slot().safe_rem(T::slots_per_epoch())? == 0 + && spec.altair_fork_epoch == Some(state.current_epoch()) + { + upgrade_to_altair(state, spec)?; + } Ok(summary) } @@ -56,23 +68,23 @@ fn cache_state( // getter/setter functions. // // This is a bit hacky, however it gets the job done safely without lots of code. - let previous_slot = state.slot; - state.slot.safe_add_assign(1)?; + let previous_slot = state.slot(); + state.slot_mut().safe_add_assign(1)?; // Store the previous slot's post state transition root. state.set_state_root(previous_slot, previous_state_root)?; // Cache latest block header state root - if state.latest_block_header.state_root == Hash256::zero() { - state.latest_block_header.state_root = previous_state_root; + if state.latest_block_header().state_root == Hash256::zero() { + state.latest_block_header_mut().state_root = previous_state_root; } // Cache block root - let latest_block_root = state.latest_block_header.canonical_root(); + let latest_block_root = state.latest_block_header().canonical_root(); state.set_block_root(previous_slot, latest_block_root)?; // Set the state slot back to what it should be. - state.slot.safe_sub_assign(1)?; + state.slot_mut().safe_sub_assign(1)?; Ok(()) } diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index 24cf990196f..c3911be2145 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -31,9 +31,9 @@ pub fn complete_state_advance( target_slot: Slot, spec: &ChainSpec, ) -> Result<(), Error> { - check_target_slot(state.slot, target_slot)?; + check_target_slot(state.slot(), target_slot)?; - while state.slot < target_slot { + while state.slot() < target_slot { // Use the initial state root on the first iteration of the loop, then use `None` for any // future iterations. let state_root_opt = state_root_opt.take(); @@ -64,7 +64,7 @@ pub fn partial_state_advance( target_slot: Slot, spec: &ChainSpec, ) -> Result<(), Error> { - check_target_slot(state.slot, target_slot)?; + check_target_slot(state.slot(), target_slot)?; // The only time that a state root is mandatory is if a block has been applied to the state // without it yet being advanced another slot. @@ -72,13 +72,13 @@ pub fn partial_state_advance( // Failing to provide a state root in this scenario would result in corrupting the // `state.block_roots` array, since the `state.latest_block_header` would contain an invalid // (all-zeros) state root. - let mut initial_state_root = Some(if state.slot > state.latest_block_header.slot { + let mut initial_state_root = Some(if state.slot() > state.latest_block_header().slot { state_root_opt.unwrap_or_else(Hash256::zero) } else { state_root_opt.ok_or(Error::StateRootNotProvided)? }); - while state.slot < target_slot { + while state.slot() < target_slot { // Use the initial state root on the first iteration of the loop, then use `[0; 32]` for any // later iterations. // diff --git a/consensus/state_processing/src/test_utils.rs b/consensus/state_processing/src/test_utils.rs deleted file mode 100644 index e54a936ed97..00000000000 --- a/consensus/state_processing/src/test_utils.rs +++ /dev/null @@ -1,184 +0,0 @@ -use log::info; -use types::test_utils::{ - AttestationTestTask, AttesterSlashingTestTask, DepositTestTask, ProposerSlashingTestTask, - TestingBeaconBlockBuilder, TestingBeaconStateBuilder, -}; -use types::{EthSpec, *}; - -pub use crate::per_block_processing::block_processing_builder::BlockProcessingBuilder; - -pub struct BlockBuilder { - pub state_builder: TestingBeaconStateBuilder, - pub block_builder: TestingBeaconBlockBuilder, - - pub num_validators: usize, - pub num_proposer_slashings: usize, - pub num_attester_slashings: usize, - pub num_attestations: usize, - pub num_deposits: usize, - pub num_exits: usize, -} - -impl BlockBuilder { - pub fn new(num_validators: usize, spec: &ChainSpec) -> Self { - let state_builder = - TestingBeaconStateBuilder::from_deterministic_keypairs(num_validators, &spec); - let block_builder = TestingBeaconBlockBuilder::new(spec); - - Self { - state_builder, - block_builder, - num_validators: 0, - num_proposer_slashings: 0, - num_attester_slashings: 0, - num_attestations: 0, - num_deposits: 0, - num_exits: 0, - } - } - - pub fn maximize_block_operations(&mut self) { - self.num_proposer_slashings = T::MaxProposerSlashings::to_usize(); - self.num_attester_slashings = T::MaxAttesterSlashings::to_usize(); - self.num_attestations = T::MaxAttestations::to_usize(); - self.num_deposits = T::MaxDeposits::to_usize(); - self.num_exits = T::MaxVoluntaryExits::to_usize(); - } - - pub fn set_slot(&mut self, slot: Slot) { - self.state_builder.teleport_to_slot(slot); - } - - pub fn build_caches(&mut self, spec: &ChainSpec) { - // Builds all caches; benches will not contain shuffling/committee building times. - self.state_builder.build_caches(&spec).unwrap(); - } - - pub fn build(mut self, spec: &ChainSpec) -> (SignedBeaconBlock, BeaconState) { - let (mut state, keypairs) = self.state_builder.build(); - let builder = &mut self.block_builder; - - builder.set_slot(state.slot); - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - - let proposer_keypair = &keypairs[proposer_index]; - - builder.set_proposer_index(proposer_index as u64); - - builder.set_randao_reveal( - &proposer_keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ); - - let parent_root = state.latest_block_header.canonical_root(); - builder.set_parent_root(parent_root); - - // Used as a stream of validator indices for use in slashings, exits, etc. - let mut validators_iter = 0..keypairs.len() as u64; - - // Insert `ProposerSlashing` objects. - for _ in 0..self.num_proposer_slashings { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - builder.insert_proposer_slashing( - ProposerSlashingTestTask::Valid, - validator_index, - &keypairs[validator_index as usize].sk, - &state.fork, - state.genesis_validators_root, - spec, - ); - } - info!( - "Inserted {} proposer slashings.", - builder.block.body.proposer_slashings.len() - ); - - // Insert `AttesterSlashing` objects - for _ in 0..self.num_attester_slashings { - let mut attesters: Vec = vec![]; - let mut secret_keys: Vec<&SecretKey> = vec![]; - - const NUM_SLASHED_INDICES: usize = 12; - - for _ in 0..NUM_SLASHED_INDICES { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - attesters.push(validator_index); - secret_keys.push(&keypairs[validator_index as usize].sk); - } - - builder.insert_attester_slashing( - AttesterSlashingTestTask::Valid, - &attesters, - &secret_keys, - &state.fork, - state.genesis_validators_root, - spec, - ); - } - info!( - "Inserted {} attester slashings.", - builder.block.body.attester_slashings.len() - ); - - // Insert `Attestation` objects. - let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); - builder - .insert_attestations( - AttestationTestTask::Valid, - &state, - &all_secret_keys, - self.num_attestations as usize, - spec, - ) - .unwrap(); - info!( - "Inserted {} attestations.", - builder.block.body.attestations.len() - ); - - // Insert `Deposit` objects. - builder.insert_deposits( - 32_000_000_000, - DepositTestTask::NoReset, - state.eth1_data.deposit_count, - self.num_deposits as u64, - &mut state, - spec, - ); - info!("Inserted {} deposits.", builder.block.body.deposits.len()); - - // Insert the maximum possible number of `Exit` objects. - for _ in 0..self.num_exits { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - builder.insert_exit( - validator_index, - state.current_epoch(), - &keypairs[validator_index as usize].sk, - &state, - spec, - ); - } - info!( - "Inserted {} exits.", - builder.block.body.voluntary_exits.len() - ); - - // Set the eth1 data to be different from the state. - self.block_builder.block.body.eth1_data.block_hash = Hash256::from_slice(&[42; 32]); - - let block = self.block_builder.build( - &proposer_keypair.sk, - &state.fork, - state.genesis_validators_root, - spec, - ); - - (block, state) - } -} diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs new file mode 100644 index 00000000000..ca8e515967e --- /dev/null +++ b/consensus/state_processing/src/upgrade.rs @@ -0,0 +1,3 @@ +pub mod altair; + +pub use altair::upgrade_to_altair; diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs new file mode 100644 index 00000000000..34ccc9e0b0d --- /dev/null +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -0,0 +1,119 @@ +use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices}; +use std::mem; +use types::{ + BeaconState, BeaconStateAltair, BeaconStateError as Error, ChainSpec, EthSpec, Fork, + ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, VariableList, +}; + +/// Translate the participation information from the epoch prior to the fork into Altair's format. +pub fn translate_participation( + state: &mut BeaconState, + pending_attestations: &VariableList, E::MaxPendingAttestations>, + spec: &ChainSpec, +) -> Result<(), Error> { + // Previous epoch committee cache is required for `get_attesting_indices`. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; + + for attestation in pending_attestations { + let data = &attestation.data; + let inclusion_delay = attestation.inclusion_delay; + + // Translate attestation inclusion info to flag indices. + let participation_flag_indices = + get_attestation_participation_flag_indices(state, data, inclusion_delay, spec)?; + + // Apply flags to all attesting validators. + let committee = state.get_beacon_committee(data.slot, data.index)?; + let attesting_indices = + get_attesting_indices::(&committee.committee, &attestation.aggregation_bits)?; + let epoch_participation = state.previous_epoch_participation_mut()?; + + for index in attesting_indices { + for flag_index in &participation_flag_indices { + epoch_participation + .get_mut(index) + .ok_or(Error::UnknownValidator(index))? + .add_flag(*flag_index)?; + } + } + } + Ok(()) +} + +/// Transform a `Base` state into an `Altair` state. +pub fn upgrade_to_altair( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_base_mut()?; + + let default_epoch_participation = + VariableList::new(vec![ParticipationFlags::default(); pre.validators.len()])?; + let inactivity_scores = VariableList::new(vec![0; pre.validators.len()])?; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let mut post = BeaconState::Altair(BeaconStateAltair { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: pre.fork.current_version, + current_version: spec.altair_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: default_epoch_participation.clone(), + current_epoch_participation: default_epoch_participation, + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores, + // Sync committees + current_sync_committee: SyncCommittee::temporary()?, // not read + next_sync_committee: SyncCommittee::temporary()?, // not read + // Caches + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + // Fill in previous epoch participation from the pre state's pending attestations. + translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?; + + // Fill in sync committees + // Note: A duplicate committee is assigned for the current and next committee at the fork + // boundary + let sync_committee = post.get_next_sync_committee(spec)?; + post.as_altair_mut()?.current_sync_committee = sync_committee.clone(); + post.as_altair_mut()?.next_sync_committee = sync_committee; + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/state_processing/tests/tests.rs b/consensus/state_processing/tests/tests.rs deleted file mode 100644 index cc136850e04..00000000000 --- a/consensus/state_processing/tests/tests.rs +++ /dev/null @@ -1,228 +0,0 @@ -// #![cfg(not(feature = "fake_crypto"))] - -use state_processing::{ - per_block_processing, test_utils::BlockBuilder, BlockProcessingError, BlockSignatureStrategy, -}; -use types::{ - AggregateSignature, BeaconState, ChainSpec, EthSpec, Hash256, Keypair, MinimalEthSpec, - Signature, SignedBeaconBlock, Slot, -}; - -const VALIDATOR_COUNT: usize = 64; - -fn get_block(mut mutate_builder: F) -> (SignedBeaconBlock, BeaconState) -where - T: EthSpec, - F: FnMut(&mut BlockBuilder), -{ - let spec = T::default_spec(); - let mut builder: BlockBuilder = BlockBuilder::new(VALIDATOR_COUNT, &spec); - builder.set_slot(Slot::from(T::slots_per_epoch() * 3 - 2)); - builder.build_caches(&spec); - mutate_builder(&mut builder); - builder.build(&spec) -} - -fn test_scenario(mutate_builder: F, mut invalidate_block: G, spec: &ChainSpec) -where - T: EthSpec, - F: FnMut(&mut BlockBuilder), - G: FnMut(&mut SignedBeaconBlock), -{ - let (mut block, mut state) = get_block::(mutate_builder); - - /* - * Control check to ensure the valid block should pass verification. - */ - - assert_eq!( - per_block_processing( - &mut state.clone(), - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - spec - ), - Ok(()), - "valid block should pass with verify individual" - ); - - assert_eq!( - per_block_processing( - &mut state.clone(), - &block, - None, - BlockSignatureStrategy::VerifyBulk, - spec - ), - Ok(()), - "valid block should pass with verify bulk" - ); - - invalidate_block(&mut block); - - /* - * Check to ensure the invalid block fails. - */ - - assert!( - per_block_processing( - &mut state.clone(), - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - spec - ) - .is_err(), - "invalid block should fail with verify individual" - ); - - assert_eq!( - per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyBulk, - spec - ), - Err(BlockProcessingError::BulkSignatureVerificationFailed), - "invalid block should fail with verify bulk" - ); -} - -// TODO: use lazy static -fn agg_sig() -> AggregateSignature { - let mut agg_sig = AggregateSignature::infinity(); - agg_sig.add_assign(&sig()); - agg_sig -} - -// TODO: use lazy static -fn sig() -> Signature { - let keypair = Keypair::random(); - keypair.sk.sign(Hash256::from_low_u64_be(42)) -} - -type TestEthSpec = MinimalEthSpec; - -mod signatures_minimal { - use super::*; - - #[test] - fn block_proposal() { - let spec = &TestEthSpec::default_spec(); - - test_scenario::(|_| {}, |block| block.signature = sig(), spec); - } - - #[test] - fn randao() { - let spec = &TestEthSpec::default_spec(); - - test_scenario::( - |_| {}, - |block| block.message.body.randao_reveal = sig(), - spec, - ); - } - - #[test] - fn proposer_slashing() { - let spec = &TestEthSpec::default_spec(); - - test_scenario::( - |mut builder| { - builder.num_proposer_slashings = 1; - }, - |block| { - block.message.body.proposer_slashings[0] - .signed_header_1 - .signature = sig() - }, - spec, - ); - test_scenario::( - |mut builder| { - builder.num_proposer_slashings = 1; - }, - |block| { - block.message.body.proposer_slashings[0] - .signed_header_2 - .signature = sig() - }, - spec, - ); - } - - #[test] - fn attester_slashing() { - let spec = &TestEthSpec::default_spec(); - - test_scenario::( - |mut builder| { - builder.num_attester_slashings = 1; - }, - |block| { - block.message.body.attester_slashings[0] - .attestation_1 - .signature = agg_sig() - }, - spec, - ); - test_scenario::( - |mut builder| { - builder.num_attester_slashings = 1; - }, - |block| { - block.message.body.attester_slashings[0] - .attestation_2 - .signature = agg_sig() - }, - spec, - ); - } - - #[test] - fn attestation() { - let spec = &TestEthSpec::default_spec(); - - test_scenario::( - |mut builder| { - builder.num_attestations = 1; - }, - |block| block.message.body.attestations[0].signature = agg_sig(), - spec, - ); - } - - #[test] - // TODO: fix fail by making valid merkle proofs. - #[should_panic] - fn deposit() { - let spec = &TestEthSpec::default_spec(); - - test_scenario::( - |mut builder| { - builder.num_deposits = 1; - }, - |block| block.message.body.deposits[0].data.signature = sig().into(), - spec, - ); - } - - #[test] - fn exit() { - let mut spec = &mut TestEthSpec::default_spec(); - - // Allows the test to pass. - spec.shard_committee_period = 0; - - test_scenario::( - |mut builder| { - builder.num_exits = 1; - }, - |block| block.message.body.voluntary_exits[0].signature = sig(), - spec, - ); - } -} diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index d968ff172bd..8251fe795dd 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -6,16 +6,12 @@ edition = "2018" license = "Apache-2.0" description = "Efficient Merkle-hashing as used in Ethereum 2.0" -[[bench]] -name = "benches" -harness = false - [dev-dependencies] -criterion = "0.3.3" rand = "0.7.3" tree_hash_derive = "0.2.0" types = { path = "../types" } lazy_static = "1.4.0" +beacon_chain = { path = "../../beacon_node/beacon_chain" } [dependencies] ethereum-types = "0.9.2" diff --git a/consensus/tree_hash/benches/benches.rs b/consensus/tree_hash/benches/benches.rs deleted file mode 100644 index 4728dc47910..00000000000 --- a/consensus/tree_hash/benches/benches.rs +++ /dev/null @@ -1,96 +0,0 @@ -#![allow(deprecated)] - -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; -use lazy_static::lazy_static; -use types::test_utils::{generate_deterministic_keypairs, TestingBeaconStateBuilder}; -use types::{BeaconState, EthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; - -lazy_static! { - static ref KEYPAIRS: Vec = generate_deterministic_keypairs(300_000); -} - -fn build_state(validator_count: usize) -> BeaconState { - let (state, _keypairs) = TestingBeaconStateBuilder::from_keypairs( - KEYPAIRS[0..validator_count].to_vec(), - &T::default_spec(), - ) - .build(); - - assert_eq!(state.validators.len(), validator_count); - assert_eq!(state.balances.len(), validator_count); - assert!(state.previous_epoch_attestations.is_empty()); - assert!(state.current_epoch_attestations.is_empty()); - assert!(state.eth1_data_votes.is_empty()); - assert!(state.historical_roots.is_empty()); - - state -} - -// Note: `state.canonical_root()` uses whatever `tree_hash` that the `types` crate -// uses, which is not necessarily this crate. If you want to ensure that types is -// using this local version of `tree_hash`, ensure you add a workspace-level -// [dependency -// patch](https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section). -fn bench_suite(c: &mut Criterion, spec_desc: &str, validator_count: usize) { - let state1 = build_state::(validator_count); - let state2 = state1.clone(); - let mut state3 = state1.clone(); - state3.update_tree_hash_cache().unwrap(); - - c.bench( - &format!("{}/{}_validators/no_cache", spec_desc, validator_count), - Benchmark::new("genesis_state", move |b| { - b.iter_batched_ref( - || state1.clone(), - |state| black_box(state.canonical_root()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - c.bench( - &format!("{}/{}_validators/empty_cache", spec_desc, validator_count), - Benchmark::new("genesis_state", move |b| { - b.iter_batched_ref( - || state2.clone(), - |state| { - assert!(state.tree_hash_cache.is_none()); - black_box(state.update_tree_hash_cache().unwrap()) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - c.bench( - &format!( - "{}/{}_validators/up_to_date_cache", - spec_desc, validator_count - ), - Benchmark::new("genesis_state", move |b| { - b.iter_batched_ref( - || state3.clone(), - |state| { - assert!(state.tree_hash_cache.is_some()); - black_box(state.update_tree_hash_cache().unwrap()) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); -} - -fn all_benches(c: &mut Criterion) { - bench_suite::(c, "minimal", 100_000); - bench_suite::(c, "minimal", 300_000); - - bench_suite::(c, "mainnet", 100_000); - bench_suite::(c, "mainnet", 300_000); -} - -criterion_group!(benches, all_benches,); -criterion_main!(benches); diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs index f4934ec83c1..309c2a2cc19 100644 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ b/consensus/tree_hash/examples/flamegraph_beacon_state.rs @@ -1,26 +1,46 @@ -use types::test_utils::TestingBeaconStateBuilder; +use beacon_chain::store::StoreConfig; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use types::{BeaconState, EthSpec, MainnetEthSpec}; const TREE_HASH_LOOPS: usize = 1_000; const VALIDATOR_COUNT: usize = 1_000; -fn build_state(validator_count: usize) -> BeaconState { - let (state, _keypairs) = - TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, &T::default_spec()) - .build(); +fn get_harness() -> BeaconChainHarness> { + let harness = BeaconChainHarness::new_with_store_config( + T::default(), + None, + types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT), + StoreConfig::default(), + ); - assert_eq!(state.validators.len(), validator_count); - assert_eq!(state.balances.len(), validator_count); - assert!(state.previous_epoch_attestations.is_empty()); - assert!(state.current_epoch_attestations.is_empty()); - assert!(state.eth1_data_votes.is_empty()); - assert!(state.historical_roots.is_empty()); + harness.advance_slot(); + + harness +} + +fn build_state() -> BeaconState { + let state = get_harness::().chain.head_beacon_state().unwrap(); + + assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); + assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); + assert!(state + .as_base() + .unwrap() + .previous_epoch_attestations + .is_empty()); + assert!(state + .as_base() + .unwrap() + .current_epoch_attestations + .is_empty()); + assert!(state.as_base().unwrap().eth1_data_votes.is_empty()); + assert!(state.as_base().unwrap().historical_roots.is_empty()); state } fn main() { - let state = build_state::(VALIDATOR_COUNT); + let state = build_state::(); // This vec is an attempt to ensure the compiler doesn't optimize-out the hashing. let mut vec = Vec::with_capacity(TREE_HASH_LOOPS); diff --git a/consensus/tree_hash/src/lib.rs b/consensus/tree_hash/src/lib.rs index 7008e0068dd..f44208564db 100644 --- a/consensus/tree_hash/src/lib.rs +++ b/consensus/tree_hash/src/lib.rs @@ -95,6 +95,28 @@ pub trait TreeHash { fn tree_hash_root(&self) -> Hash256; } +/// Punch through references. +impl<'a, T> TreeHash for &'a T +where + T: TreeHash, +{ + fn tree_hash_type() -> TreeHashType { + T::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + T::tree_hash_packed_encoding(*self) + } + + fn tree_hash_packing_factor() -> usize { + T::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Hash256 { + T::tree_hash_root(*self) + } +} + #[macro_export] macro_rules! tree_hash_ssz_encoding_as_vector { ($type: ident) => { diff --git a/consensus/tree_hash_derive/src/lib.rs b/consensus/tree_hash_derive/src/lib.rs index 97a33239430..1317e56e86e 100644 --- a/consensus/tree_hash_derive/src/lib.rs +++ b/consensus/tree_hash_derive/src/lib.rs @@ -1,9 +1,7 @@ #![recursion_limit = "256"] -extern crate proc_macro; - use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, Attribute, DeriveInput, Meta}; +use syn::{parse_macro_input, Attribute, DataEnum, DataStruct, DeriveInput, Meta}; /// Return a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields /// that should not be hashed. @@ -85,14 +83,17 @@ fn should_skip_hashing(field: &syn::Field) -> bool { pub fn tree_hash_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); + match &item.data { + syn::Data::Struct(s) => tree_hash_derive_struct(&item, s), + syn::Data::Enum(e) => tree_hash_derive_enum(&item, e), + _ => panic!("tree_hash_derive only supports structs."), + } +} + +fn tree_hash_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> TokenStream { let name = &item.ident; let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("tree_hash_derive only supports structs."), - }; - let idents = get_hashable_fields(&struct_data); let num_leaves = idents.len(); @@ -124,3 +125,70 @@ pub fn tree_hash_derive(input: TokenStream) -> TokenStream { }; output.into() } + +/// Derive `TreeHash` for a restricted subset of all possible enum types. +/// +/// Only supports: +/// - Enums with a single field per variant, where +/// - All fields are "container" types. +/// +/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run +/// time* if the container type requirement isn't met. +fn tree_hash_derive_enum(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { + let name = &derive_input.ident; + let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); + + let (patterns, type_exprs): (Vec<_>, Vec<_>) = enum_data + .variants + .iter() + .map(|variant| { + let variant_name = &variant.ident; + + if variant.fields.len() != 1 { + panic!("TreeHash can only be derived for enums with 1 field per variant"); + } + + let pattern = quote! { + #name::#variant_name(ref inner) + }; + + let ty = &(&variant.fields).into_iter().next().unwrap().ty; + let type_expr = quote! { + <#ty as tree_hash::TreeHash>::tree_hash_type() + }; + (pattern, type_expr) + }) + .unzip(); + + let output = quote! { + impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { + fn tree_hash_type() -> tree_hash::TreeHashType { + #( + assert_eq!( + #type_exprs, + tree_hash::TreeHashType::Container, + "all variants must be of container type" + ); + )* + tree_hash::TreeHashType::Container + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("Enum should never be packed") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Enum should never be packed") + } + + fn tree_hash_root(&self) -> Hash256 { + match self { + #( + #patterns => inner.tree_hash_root(), + )* + } + } + } + }; + output.into() +} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 397ee7e1695..bf79d378690 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -42,10 +42,14 @@ arbitrary = { version = "0.4.6", features = ["derive"], optional = true } serde_utils = { path = "../serde_utils" } regex = "1.3.9" lazy_static = "1.4.0" +parking_lot = "0.11.1" +itertools = "0.10.0" +superstruct = "0.2.0" [dev-dependencies] serde_json = "1.0.58" criterion = "0.3.3" +beacon_chain = { path = "../../beacon_node/beacon_chain" } [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index aa3b191c81e..28f57e70804 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -3,7 +3,8 @@ use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; use rayon::prelude::*; -use ssz::{Decode, Encode}; +use ssz::Encode; +use std::sync::Arc; use types::{ test_utils::generate_deterministic_keypair, BeaconState, Epoch, Eth1Data, EthSpec, Hash256, MainnetEthSpec, Validator, @@ -20,10 +21,13 @@ fn get_state(validator_count: usize) -> BeaconState { let mut state = BeaconState::new(0, eth1_data, spec); for i in 0..validator_count { - state.balances.push(i as u64).expect("should add balance"); + state + .balances_mut() + .push(i as u64) + .expect("should add balance"); } - state.validators = (0..validator_count) + *state.validators_mut() = (0..validator_count) .collect::>() .par_iter() .map(|&i| Validator { @@ -44,10 +48,10 @@ fn get_state(validator_count: usize) -> BeaconState { fn all_benches(c: &mut Criterion) { let validator_count = 16_384; - let spec = &MainnetEthSpec::default_spec(); + let spec = Arc::new(MainnetEthSpec::default_spec()); let mut state = get_state::(validator_count); - state.build_all_caches(spec).expect("should build caches"); + state.build_all_caches(&spec).expect("should build caches"); let state_bytes = state.as_ssz_bytes(); let inner_state = state.clone(); @@ -67,10 +71,10 @@ fn all_benches(c: &mut Criterion) { &format!("{}_validators", validator_count), Benchmark::new("decode/beacon_state", move |b| { b.iter_batched_ref( - || state_bytes.clone(), - |bytes| { + || (state_bytes.clone(), spec.clone()), + |(bytes, spec)| { let state: BeaconState = - BeaconState::from_ssz_bytes(&bytes).expect("should decode"); + BeaconState::from_ssz_bytes(&bytes, &spec).expect("should decode"); black_box(state) }, criterion::BatchSize::SmallInput, @@ -98,7 +102,7 @@ fn all_benches(c: &mut Criterion) { Benchmark::new("clone/tree_hash_cache", move |b| { b.iter_batched_ref( || inner_state.clone(), - |state| black_box(state.tree_hash_cache.clone()), + |state| black_box(state.tree_hash_cache().clone()), criterion::BatchSize::SmallInput, ) }) @@ -122,7 +126,7 @@ fn all_benches(c: &mut Criterion) { ); let mut inner_state = state.clone(); - inner_state.drop_all_caches(); + inner_state.drop_all_caches().unwrap(); c.bench( &format!("{}_validators", validator_count), Benchmark::new("non_initialized_cached_tree_hash/beacon_state", move |b| { @@ -152,11 +156,11 @@ fn all_benches(c: &mut Criterion) { let mut state = inner_state.clone(); for _ in 0..16 { state - .validators + .validators_mut() .push(Validator::default()) .expect("should push validatorj"); state - .balances + .balances_mut() .push(32_000_000_000) .expect("should push balance"); } diff --git a/consensus/types/examples/clone_state.rs b/consensus/types/examples/clone_state.rs index 73f601bce0d..a7e80cf4078 100644 --- a/consensus/types/examples/clone_state.rs +++ b/consensus/types/examples/clone_state.rs @@ -19,9 +19,12 @@ fn get_state(validator_count: usize) -> BeaconState { let mut state = BeaconState::new(0, eth1_data, spec); for i in 0..validator_count { - state.balances.push(i as u64).expect("should add balance"); state - .validators + .balances_mut() + .push(i as u64) + .expect("should add balance"); + state + .validators_mut() .push(Validator { pubkey: generate_deterministic_keypair(i).pk.into(), withdrawal_credentials: Hash256::from_low_u64_le(i as u64), diff --git a/consensus/types/examples/ssz_encode_state.rs b/consensus/types/examples/ssz_encode_state.rs index 826835306e3..5d0a2db17c7 100644 --- a/consensus/types/examples/ssz_encode_state.rs +++ b/consensus/types/examples/ssz_encode_state.rs @@ -1,7 +1,7 @@ //! These examples only really exist so we can use them for flamegraph. If they get annoying to //! maintain, feel free to delete. -use ssz::{Decode, Encode}; +use ssz::Encode; use types::{ test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, MinimalEthSpec, Validator, @@ -20,9 +20,12 @@ fn get_state(validator_count: usize) -> BeaconState { let mut state = BeaconState::new(0, eth1_data, spec); for i in 0..validator_count { - state.balances.push(i as u64).expect("should add balance"); state - .validators + .balances_mut() + .push(i as u64) + .expect("should add balance"); + state + .validators_mut() .push(Validator { pubkey: generate_deterministic_keypair(i).pk.into(), withdrawal_credentials: Hash256::from_low_u64_le(i as u64), @@ -45,6 +48,7 @@ fn main() { for _ in 0..1_024 { let state_bytes = state.as_ssz_bytes(); - let _: BeaconState = BeaconState::from_ssz_bytes(&state_bytes).expect("should decode"); + let _: BeaconState = + BeaconState::from_ssz_bytes(&state_bytes, &E::default_spec()).expect("should decode"); } } diff --git a/consensus/types/examples/tree_hash_state.rs b/consensus/types/examples/tree_hash_state.rs index 03811c3083f..a421a23ad5a 100644 --- a/consensus/types/examples/tree_hash_state.rs +++ b/consensus/types/examples/tree_hash_state.rs @@ -19,9 +19,12 @@ fn get_state(validator_count: usize) -> BeaconState { let mut state = BeaconState::new(0, eth1_data, spec); for i in 0..validator_count { - state.balances.push(i as u64).expect("should add balance"); state - .validators + .balances_mut() + .push(i as u64) + .expect("should add balance"); + state + .validators_mut() .push(Validator { pubkey: generate_deterministic_keypair(i).pk.into(), withdrawal_credentials: Hash256::from_low_u64_le(i as u64), diff --git a/consensus/types/presets/mainnet/altair.yaml b/consensus/types/presets/mainnet/altair.yaml new file mode 100644 index 00000000000..9a17b780327 --- /dev/null +++ b/consensus/types/presets/mainnet/altair.yaml @@ -0,0 +1,24 @@ +# Mainnet preset - Altair + +# Updated penalty values +# --------------------------------------------------------------- +# 3 * 2**24 (= 50,331,648) +INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648 +# 2**6 (= 64) +MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64 +# 2 +PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2 + + +# Sync committee +# --------------------------------------------------------------- +# 2**9 (= 512) +SYNC_COMMITTEE_SIZE: 512 +# 2**8 (= 256) +EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256 + + +# Sync protocol +# --------------------------------------------------------------- +# 1 +MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 diff --git a/common/eth2_network_config/built_in_network_configs/toledo/config.yaml b/consensus/types/presets/mainnet/phase0.yaml similarity index 58% rename from common/eth2_network_config/built_in_network_configs/toledo/config.yaml rename to consensus/types/presets/mainnet/phase0.yaml index 99a9012d732..89bb97d6a87 100644 --- a/common/eth2_network_config/built_in_network_configs/toledo/config.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -1,6 +1,4 @@ -# Toledo preset, variant of mainnet - -CONFIG_NAME: "toledo" +# Mainnet preset - Phase0 # Misc # --------------------------------------------------------------- @@ -10,16 +8,8 @@ MAX_COMMITTEES_PER_SLOT: 64 TARGET_COMMITTEE_SIZE: 128 # 2**11 (= 2,048) MAX_VALIDATORS_PER_COMMITTEE: 2048 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 # See issue 563 SHUFFLE_ROUND_COUNT: 90 -# `2**14` (= 16,384) -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 -# Nov 10, 2020, 12pm UTC -MIN_GENESIS_TIME: 1605009600 # 4 HYSTERESIS_QUOTIENT: 4 # 1 (minus 0.25) @@ -34,53 +24,18 @@ HYSTERESIS_UPWARD_MULTIPLIER: 5 SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 -# Validator -# --------------------------------------------------------------- -# 2**11 (= 2,048) -ETH1_FOLLOW_DISTANCE: 2048 -# 2**4 (= 16) -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -# 2**0 (= 1) -RANDOM_SUBNETS_PER_VALIDATOR: 1 -# 2**8 (= 256) -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 - - -# Deposit contract -# --------------------------------------------------------------- -# Ethereum Goerli testnet -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -# Toledo permissioned test deposit contract on Goerli -DEPOSIT_CONTRACT_ADDRESS: 0x47709dC7a8c18688a1f051761fc34ac253970bC0 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei MIN_DEPOSIT_AMOUNT: 1000000000 # 2**5 * 10**9 (= 32,000,000,000) Gwei MAX_EFFECTIVE_BALANCE: 32000000000 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 # 2**0 * 10**9 (= 1,000,000,000) Gwei EFFECTIVE_BALANCE_INCREMENT: 1000000000 -# Initial values -# --------------------------------------------------------------- -GENESIS_FORK_VERSION: 0x00701ED0 -BLS_WITHDRAWAL_PREFIX: 0x00 - - # Time parameters # --------------------------------------------------------------- -# 86400 seconds (1 day) -GENESIS_DELAY: 86400 -# 12 seconds -SECONDS_PER_SLOT: 12 # 2**0 (= 1) slots 12 seconds MIN_ATTESTATION_INCLUSION_DELAY: 1 # 2**5 (= 32) slots 6.4 minutes @@ -91,17 +46,13 @@ MIN_SEED_LOOKAHEAD: 1 MAX_SEED_LOOKAHEAD: 4 # 2**6 (= 64) epochs ~6.8 hours EPOCHS_PER_ETH1_VOTING_PERIOD: 64 -# 2**13 (= 8,192) slots ~13 hours +# 2**13 (= 8,192) slots ~27 hours SLOTS_PER_HISTORICAL_ROOT: 8192 -# 2**8 (= 256) epochs ~27 hours -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours -SHARD_COMMITTEE_PERIOD: 256 # 2**2 (= 4) epochs 25.6 minutes MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 -# State vector lengths +# State list lengths # --------------------------------------------------------------- # 2**16 (= 65,536) epochs ~0.8 years EPOCHS_PER_HISTORICAL_VECTOR: 65536 @@ -141,14 +92,3 @@ MAX_ATTESTATIONS: 128 MAX_DEPOSITS: 16 # 2**4 (= 16) MAX_VOLUNTARY_EXITS: 16 - - -# Signature domains -# --------------------------------------------------------------- -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 diff --git a/consensus/types/presets/minimal/altair.yaml b/consensus/types/presets/minimal/altair.yaml new file mode 100644 index 00000000000..88d78bea365 --- /dev/null +++ b/consensus/types/presets/minimal/altair.yaml @@ -0,0 +1,24 @@ +# Minimal preset - Altair + +# Updated penalty values +# --------------------------------------------------------------- +# 3 * 2**24 (= 50,331,648) +INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648 +# 2**6 (= 64) +MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64 +# 2 +PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2 + + +# Sync committee +# --------------------------------------------------------------- +# [customized] +SYNC_COMMITTEE_SIZE: 32 +# [customized] +EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8 + + +# Sync protocol +# --------------------------------------------------------------- +# 1 +MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml new file mode 100644 index 00000000000..c9c81325f1b --- /dev/null +++ b/consensus/types/presets/minimal/phase0.yaml @@ -0,0 +1,94 @@ +# Minimal preset - Phase0 + +# Misc +# --------------------------------------------------------------- +# [customized] Just 4 committees for slot for testing purposes +MAX_COMMITTEES_PER_SLOT: 4 +# [customized] unsecure, but fast +TARGET_COMMITTEE_SIZE: 4 +# 2**11 (= 2,048) +MAX_VALIDATORS_PER_COMMITTEE: 2048 +# [customized] Faster, but unsecure. +SHUFFLE_ROUND_COUNT: 10 +# 4 +HYSTERESIS_QUOTIENT: 4 +# 1 (minus 0.25) +HYSTERESIS_DOWNWARD_MULTIPLIER: 1 +# 5 (plus 1.25) +HYSTERESIS_UPWARD_MULTIPLIER: 5 + + +# Fork Choice +# --------------------------------------------------------------- +# 2**1 (= 1) +SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2 + + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MAX_EFFECTIVE_BALANCE: 32000000000 +# 2**0 * 10**9 (= 1,000,000,000) Gwei +EFFECTIVE_BALANCE_INCREMENT: 1000000000 + + +# Time parameters +# --------------------------------------------------------------- +# 2**0 (= 1) slots 6 seconds +MIN_ATTESTATION_INCLUSION_DELAY: 1 +# [customized] fast epochs +SLOTS_PER_EPOCH: 8 +# 2**0 (= 1) epochs +MIN_SEED_LOOKAHEAD: 1 +# 2**2 (= 4) epochs +MAX_SEED_LOOKAHEAD: 4 +# [customized] higher frequency new deposits from eth1 for testing +EPOCHS_PER_ETH1_VOTING_PERIOD: 4 +# [customized] smaller state +SLOTS_PER_HISTORICAL_ROOT: 64 +# 2**2 (= 4) epochs +MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 + + +# State list lengths +# --------------------------------------------------------------- +# [customized] smaller state +EPOCHS_PER_HISTORICAL_VECTOR: 64 +# [customized] smaller state +EPOCHS_PER_SLASHINGS_VECTOR: 64 +# 2**24 (= 16,777,216) historical roots +HISTORICAL_ROOTS_LIMIT: 16777216 +# 2**40 (= 1,099,511,627,776) validator spots +VALIDATOR_REGISTRY_LIMIT: 1099511627776 + + +# Reward and penalty quotients +# --------------------------------------------------------------- +# 2**6 (= 64) +BASE_REWARD_FACTOR: 64 +# 2**9 (= 512) +WHISTLEBLOWER_REWARD_QUOTIENT: 512 +# 2**3 (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# [customized] 2**25 (= 33,554,432) +INACTIVITY_PENALTY_QUOTIENT: 33554432 +# [customized] 2**6 (= 64) +MIN_SLASHING_PENALTY_QUOTIENT: 64 +# [customized] 2 (lower safety margin than Phase 0 genesis but different than mainnet config for testing) +PROPORTIONAL_SLASHING_MULTIPLIER: 2 + + +# Max operations per block +# --------------------------------------------------------------- +# 2**4 (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**1 (= 2) +MAX_ATTESTER_SLASHINGS: 2 +# 2**7 (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 (= 16) +MAX_DEPOSITS: 16 +# 2**4 (= 16) +MAX_VOLUNTARY_EXITS: 16 diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 71b0f9545fe..67caabd570f 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,58 +1,71 @@ +use crate::beacon_block_body::{ + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyRef, BeaconBlockBodyRefMut, +}; use crate::test_utils::TestRandom; use crate::*; use bls::Signature; - use serde_derive::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. -/// -/// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[superstruct( + variants(Base, Altair), + variant_attributes( + derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom + ), + serde(bound = "T: EthSpec", deny_unknown_fields), + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + ), + ref_attributes(derive(Debug, PartialEq, TreeHash)) +)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[serde(untagged)] #[serde(bound = "T: EthSpec")] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct BeaconBlock { + #[superstruct(getter(copy))] pub slot: Slot, + #[superstruct(getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, + #[superstruct(getter(copy))] pub parent_root: Hash256, + #[superstruct(getter(copy))] pub state_root: Hash256, - pub body: BeaconBlockBody, + #[superstruct(only(Base), partial_getter(rename = "body_base"))] + pub body: BeaconBlockBodyBase, + #[superstruct(only(Altair), partial_getter(rename = "body_altair"))] + pub body: BeaconBlockBodyAltair, } impl SignedRoot for BeaconBlock {} +impl<'a, T: EthSpec> SignedRoot for BeaconBlockRef<'a, T> {} impl BeaconBlock { /// Returns an empty block to be used during genesis. - /// - /// Spec v0.12.1 pub fn empty(spec: &ChainSpec) -> Self { - BeaconBlock { - slot: spec.genesis_slot, - proposer_index: 0, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body: BeaconBlockBody { - randao_reveal: Signature::empty(), - eth1_data: Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - deposit_count: 0, - }, - graffiti: Graffiti::default(), - proposer_slashings: VariableList::empty(), - attester_slashings: VariableList::empty(), - attestations: VariableList::empty(), - deposits: VariableList::empty(), - voluntary_exits: VariableList::empty(), - }, + if spec.altair_fork_epoch == Some(T::genesis_epoch()) { + Self::Altair(BeaconBlockAltair::empty(spec)) + } else { + Self::Base(BeaconBlockBase::empty(spec)) } } - /// Return a block where the block has the max possible operations. + /// Return a block where the block has maximum size. pub fn full(spec: &ChainSpec) -> BeaconBlock { let header = BeaconBlockHeader { slot: Slot::new(1), @@ -114,7 +127,8 @@ impl BeaconBlock { signature: Signature::empty(), }; - let mut block: BeaconBlock = BeaconBlock::empty(spec); + // FIXME(altair): use an Altair block (they're bigger) + let mut block = BeaconBlockBase::::empty(spec); for _ in 0..T::MaxProposerSlashings::to_usize() { block .body @@ -143,19 +157,50 @@ impl BeaconBlock { for _ in 0..T::MaxAttestations::to_usize() { block.body.attestations.push(attestation.clone()).unwrap(); } - block + BeaconBlock::Base(block) + } + + /// Custom SSZ decoder that takes a `ChainSpec` as context. + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + let slot_len = ::ssz_fixed_len(); + let slot_bytes = bytes + .get(0..slot_len) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_len, + })?; + + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let epoch = slot.epoch(T::slots_per_epoch()); + + if spec + .altair_fork_epoch + .map_or(true, |altair_epoch| epoch < altair_epoch) + { + BeaconBlockBase::from_ssz_bytes(bytes).map(Self::Base) + } else { + BeaconBlockAltair::from_ssz_bytes(bytes).map(Self::Altair) + } + } + + /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. + pub fn body(&self) -> BeaconBlockBodyRef<'_, T> { + self.to_ref().body() + } + + /// Convenience accessor for the `body` as a `BeaconBlockBodyRefMut`. + pub fn body_mut(&mut self) -> BeaconBlockBodyRefMut<'_, T> { + self.to_mut().body_mut() } - /// Returns the epoch corresponding to `self.slot`. + /// Returns the epoch corresponding to `self.slot()`. pub fn epoch(&self) -> Epoch { - self.slot.epoch(T::slots_per_epoch()) + self.slot().epoch(T::slots_per_epoch()) } /// Returns the `tree_hash_root` of the block. - /// - /// Spec v0.12.1 pub fn canonical_root(&self) -> Hash256 { - Hash256::from_slice(&self.tree_hash_root()[..]) + self.tree_hash_root() } /// Returns a full `BeaconBlockHeader` of this block. @@ -164,26 +209,18 @@ impl BeaconBlock { /// when you want to have the block _and_ the header. /// /// Note: performs a full tree-hash of `self.body`. - /// - /// Spec v0.12.1 pub fn block_header(&self) -> BeaconBlockHeader { - BeaconBlockHeader { - slot: self.slot, - proposer_index: self.proposer_index, - parent_root: self.parent_root, - state_root: self.state_root, - body_root: Hash256::from_slice(&self.body.tree_hash_root()[..]), - } + self.to_ref().block_header() } /// Returns a "temporary" header, where the `state_root` is `Hash256::zero()`. - /// - /// Spec v0.12.1 pub fn temporary_block_header(&self) -> BeaconBlockHeader { - BeaconBlockHeader { - state_root: Hash256::zero(), - ..self.block_header() - } + self.to_ref().temporary_block_header() + } + + /// Return the tree hash root of the block's body. + pub fn body_root(&self) -> Hash256 { + self.to_ref().body_root() } /// Signs `self`, producing a `SignedBeaconBlock`. @@ -202,9 +239,106 @@ impl BeaconBlock { ); let message = self.signing_root(domain); let signature = secret_key.sign(message); - SignedBeaconBlock { - message: self, - signature, + SignedBeaconBlock::from_block(self, signature) + } +} + +impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { + /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. + pub fn body(&self) -> BeaconBlockBodyRef<'a, T> { + match self { + BeaconBlockRef::Base(block) => BeaconBlockBodyRef::Base(&block.body), + BeaconBlockRef::Altair(block) => BeaconBlockBodyRef::Altair(&block.body), + } + } + + /// Return the tree hash root of the block's body. + pub fn body_root(&self) -> Hash256 { + match self { + BeaconBlockRef::Base(block) => block.body.tree_hash_root(), + BeaconBlockRef::Altair(block) => block.body.tree_hash_root(), + } + } + + /// Returns a full `BeaconBlockHeader` of this block. + pub fn block_header(&self) -> BeaconBlockHeader { + BeaconBlockHeader { + slot: self.slot(), + proposer_index: self.proposer_index(), + parent_root: self.parent_root(), + state_root: self.state_root(), + body_root: self.body_root(), + } + } + + /// Returns a "temporary" header, where the `state_root` is `Hash256::zero()`. + pub fn temporary_block_header(self) -> BeaconBlockHeader { + BeaconBlockHeader { + state_root: Hash256::zero(), + ..self.block_header() + } + } +} + +impl<'a, T: EthSpec> BeaconBlockRefMut<'a, T> { + /// Convert a mutable reference to a beacon block to a mutable ref to its body. + pub fn body_mut(self) -> BeaconBlockBodyRefMut<'a, T> { + match self { + BeaconBlockRefMut::Base(block) => BeaconBlockBodyRefMut::Base(&mut block.body), + BeaconBlockRefMut::Altair(block) => BeaconBlockBodyRefMut::Altair(&mut block.body), + } + } +} + +impl BeaconBlockBase { + /// Returns an empty block to be used during genesis. + pub fn empty(spec: &ChainSpec) -> Self { + BeaconBlockBase { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyBase { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + }, + } + } +} + +impl BeaconBlockAltair { + /// Returns an empty block to be used during genesis. + pub fn empty(spec: &ChainSpec) -> Self { + BeaconBlockAltair { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + }, } } } @@ -212,6 +346,110 @@ impl BeaconBlock { #[cfg(test)] mod tests { use super::*; + use crate::test_utils::{test_ssz_tree_hash_pair_with, SeedableRng, TestRandom, XorShiftRng}; + use crate::{ForkName, MainnetEthSpec}; + use ssz::Encode; + + type BeaconBlock = super::BeaconBlock; + type BeaconBlockBase = super::BeaconBlockBase; + type BeaconBlockAltair = super::BeaconBlockAltair; + + #[test] + fn roundtrip_base_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Base.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockBase { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyBase::random_for_test(rng), + }; + let block = BeaconBlock::Base(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } - ssz_and_tree_hash_tests!(BeaconBlock); + #[test] + fn roundtrip_altair_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Altair.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockAltair { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyAltair::random_for_test(rng), + }; + let block = BeaconBlock::Altair(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + + #[test] + fn decode_base_and_altair() { + type E = MainnetEthSpec; + + let rng = &mut XorShiftRng::from_seed([42; 16]); + + let fork_epoch = Epoch::from_ssz_bytes(&[7, 6, 5, 4, 3, 2, 1, 0]).unwrap(); + + let base_epoch = fork_epoch.saturating_sub(1_u64); + let base_slot = base_epoch.end_slot(E::slots_per_epoch()); + let altair_epoch = fork_epoch; + let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(fork_epoch); + + // BeaconBlockBase + { + let good_base_block = BeaconBlock::Base(BeaconBlockBase { + slot: base_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have a base block with a slot higher than the fork epoch. + let bad_base_block = { + let mut bad = good_base_block.clone(); + *bad.slot_mut() = altair_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_base_block.as_ssz_bytes(), &spec) + .expect("good base block can be decoded"), + good_base_block + ); + BeaconBlock::from_ssz_bytes(&bad_base_block.as_ssz_bytes(), &spec) + .expect_err("bad base block cannot be decoded"); + } + + // BeaconBlockAltair + { + let good_altair_block = BeaconBlock::Altair(BeaconBlockAltair { + slot: altair_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Altair block with a epoch lower than the fork epoch. + let bad_altair_block = { + let mut bad = good_altair_block.clone(); + *bad.slot_mut() = base_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_altair_block.as_ssz_bytes(), &spec) + .expect("good altair block can be decoded"), + good_altair_block + ); + BeaconBlock::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec) + .expect_err("bad altair block cannot be decoded"); + } + } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index ef28307edcc..1924ca14f4d 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,18 +1,37 @@ use crate::test_utils::TestRandom; use crate::*; - use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; /// The body of a `BeaconChain` block, containing operations. /// -/// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +/// This *superstruct* abstracts over the hard-fork. +#[superstruct( + variants(Base, Altair), + variant_attributes( + derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom + ), + serde(bound = "T: EthSpec", deny_unknown_fields), + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + ) +)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(untagged)] #[serde(bound = "T: EthSpec")] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct BeaconBlockBody { pub randao_reveal: Signature, pub eth1_data: Eth1Data, @@ -22,11 +41,18 @@ pub struct BeaconBlockBody { pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, + #[superstruct(only(Altair))] + pub sync_aggregate: SyncAggregate, } #[cfg(test)] mod tests { - use super::*; - - ssz_and_tree_hash_tests!(BeaconBlockBody); + mod base { + use super::super::*; + ssz_and_tree_hash_tests!(BeaconBlockBodyBase); + } + mod altair { + use super::super::*; + ssz_and_tree_hash_tests!(BeaconBlockBodyAltair); + } } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index 82222b03589..64fbaef8659 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -33,19 +33,6 @@ impl BeaconBlockHeader { Hash256::from_slice(&self.tree_hash_root()[..]) } - /// Given a `body`, consumes `self` and returns a complete `BeaconBlock`. - /// - /// Spec v0.12.1 - pub fn into_block(self, body: BeaconBlockBody) -> BeaconBlock { - BeaconBlock { - slot: self.slot, - proposer_index: self.proposer_index, - parent_root: self.parent_root, - state_root: self.state_root, - body, - } - } - /// Signs `self`, producing a `SignedBeaconBlockHeader`. pub fn sign( self, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index fd1dba881b9..dcdaf059c39 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -2,19 +2,21 @@ use self::committee_cache::get_active_validator_indices; use self::exit_cache::ExitCache; use crate::test_utils::TestRandom; use crate::*; - -use cached_tree_hash::{CacheArena, CachedTreeHash}; +use compare_fields::CompareFields; use compare_fields_derive::CompareFields; +use derivative::Derivative; use eth2_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; -use ssz::{ssz_encode, Encode}; +use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; +use std::collections::HashSet; use std::convert::TryInto; -use std::fmt; +use std::{fmt, mem}; +use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -40,13 +42,25 @@ const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; #[derive(Debug, PartialEq, Clone)] pub enum Error { + /// A state for a different hard-fork was required -- a severe logic error. + IncorrectStateVariant, EpochOutOfBounds, SlotOutOfBounds, - UnknownValidator(u64), + UnknownValidator(usize), UnableToDetermineProducer, InvalidBitfield, ValidatorIsWithdrawable, UnableToShuffle, + ShuffleIndexOutOfBounds(usize), + IsAggregatorOutOfBounds, + BlockRootsOutOfBounds(usize), + StateRootsOutOfBounds(usize), + SlashingsOutOfBounds(usize), + BalancesOutOfBounds(usize), + RandaoMixesOutOfBounds(usize), + CommitteeCachesOutOfBounds(usize), + ParticipationOutOfBounds(usize), + InactivityScoresOutOfBounds(usize), TooManyValidators, InsufficientValidators, InsufficientRandaoMixes, @@ -70,6 +84,8 @@ pub enum Error { RelativeEpochError(RelativeEpochError), ExitCacheUninitialized, CommitteeCacheUninitialized(Option), + SyncCommitteeCacheUninitialized, + BlsError(bls::Error), SszTypesError(ssz_types::Error), TreeHashCacheNotInitialized, NonLinearTreeHashCacheHistory, @@ -86,6 +102,8 @@ pub enum Error { deposit_count: u64, deposit_index: u64, }, + /// Attestation slipped through block processing with a non-matching source. + IncorrectAttestationSource, /// An arithmetic operation occurred which would have overflowed or divided by 0. /// /// This represents a serious bug in either the spec or Lighthouse! @@ -139,30 +157,45 @@ impl From for Hash256 { } /// The state of the `BeaconChain` at some slot. -/// -/// Spec v0.12.1 -#[derive( - Debug, - PartialEq, - Clone, - Serialize, - Deserialize, - TestRandom, - Encode, - Decode, - TreeHash, - CompareFields, +#[superstruct( + variants(Base, Altair), + variant_attributes( + derive( + Derivative, + Debug, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + CompareFields, + ), + serde(bound = "T: EthSpec", deny_unknown_fields), + derivative(Clone), + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] +#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash)] +#[serde(untagged)] #[serde(bound = "T: EthSpec")] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct BeaconState where T: EthSpec, { // Versioning + #[superstruct(getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, + #[superstruct(getter(copy))] pub genesis_validators_root: Hash256, + #[superstruct(getter(copy))] pub slot: Slot, + #[superstruct(getter(copy))] pub fork: Fork, // History @@ -176,6 +209,7 @@ where // Ethereum 1.0 chain data pub eth1_data: Eth1Data, pub eth1_data_votes: VariableList, + #[superstruct(getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, @@ -193,52 +227,81 @@ where #[serde(with = "ssz_types::serde_utils::quoted_u64_fixed_vec")] pub slashings: FixedVector, - // Attestations + // Attestations (genesis fork only) + #[superstruct(only(Base))] pub previous_epoch_attestations: VariableList, T::MaxPendingAttestations>, + #[superstruct(only(Base))] pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, + // Participation (Altair and later) + #[superstruct(only(Altair))] + pub previous_epoch_participation: VariableList, + #[superstruct(only(Altair))] + pub current_epoch_participation: VariableList, + // Finality #[test_random(default)] pub justification_bits: BitVector, + #[superstruct(getter(copy))] pub previous_justified_checkpoint: Checkpoint, + #[superstruct(getter(copy))] pub current_justified_checkpoint: Checkpoint, + #[superstruct(getter(copy))] pub finalized_checkpoint: Checkpoint, + // Inactivity + #[superstruct(only(Altair))] + pub inactivity_scores: VariableList, + + // Light-client sync committees + #[superstruct(only(Altair))] + pub current_sync_committee: SyncCommittee, + #[superstruct(only(Altair))] + pub next_sync_committee: SyncCommittee, + // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] + #[derivative(Clone(clone_with = "clone_default"))] pub committee_caches: [CommitteeCache; CACHED_EPOCHS], #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] + #[derivative(Clone(clone_with = "clone_default"))] pub pubkey_cache: PubkeyCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] + #[derivative(Clone(clone_with = "clone_default"))] pub exit_cache: ExitCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - pub tree_hash_cache: Option>, + #[derivative(Clone(clone_with = "clone_default"))] + pub tree_hash_cache: BeaconTreeHashCache, +} + +impl Clone for BeaconState { + fn clone(&self) -> Self { + self.clone_with(CloneConfig::all()) + } } impl BeaconState { /// Create a new BeaconState suitable for genesis. /// /// Not a complete genesis state, see `initialize_beacon_state_from_eth1`. - /// - /// Spec v0.12.1 pub fn new(genesis_time: u64, eth1_data: Eth1Data, spec: &ChainSpec) -> Self { - BeaconState { + BeaconState::Base(BeaconStateBase { // Versioning genesis_time, genesis_validators_root: Hash256::zero(), // Set later. @@ -288,7 +351,55 @@ impl BeaconState { ], pubkey_cache: PubkeyCache::default(), exit_cache: ExitCache::default(), - tree_hash_cache: None, + tree_hash_cache: <_>::default(), + }) + } + + /// Returns the name of the fork pertaining to `self`. + /// + /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork + /// dictated by `self.slot()`. + pub fn fork_name(&self, spec: &ChainSpec) -> Result { + let fork_at_slot = spec.fork_name_at_epoch(self.current_epoch()); + let object_fork = match self { + BeaconState::Base { .. } => ForkName::Base, + BeaconState::Altair { .. } => ForkName::Altair, + }; + + if fork_at_slot == object_fork { + Ok(object_fork) + } else { + Err(InconsistentFork { + fork_at_slot, + object_fork, + }) + } + } + + /// Specialised deserialisation method that uses the `ChainSpec` as context. + #[allow(clippy::integer_arithmetic)] + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). + let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); + let slot_end = slot_start + ::ssz_fixed_len(); + + let slot_bytes = bytes + .get(slot_start..slot_end) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_end, + })?; + + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let epoch = slot.epoch(T::slots_per_epoch()); + + if spec + .altair_fork_epoch + .map_or(true, |altair_epoch| epoch < altair_epoch) + { + BeaconStateBase::from_ssz_bytes(bytes).map(Self::Base) + } else { + BeaconStateAltair::from_ssz_bytes(bytes).map(Self::Altair) } } @@ -301,8 +412,8 @@ impl BeaconState { pub fn historical_batch(&self) -> HistoricalBatch { HistoricalBatch { - block_roots: self.block_roots.clone(), - state_roots: self.state_roots.clone(), + block_roots: self.block_roots().clone(), + state_roots: self.state_roots().clone(), } } @@ -311,21 +422,17 @@ impl BeaconState { /// otherwise returns `None`. pub fn get_validator_index(&mut self, pubkey: &PublicKeyBytes) -> Result, Error> { self.update_pubkey_cache()?; - Ok(self.pubkey_cache.get(pubkey)) + Ok(self.pubkey_cache().get(pubkey)) } - /// The epoch corresponding to `self.slot`. - /// - /// Spec v0.12.1 + /// The epoch corresponding to `self.slot()`. pub fn current_epoch(&self) -> Epoch { - self.slot.epoch(T::slots_per_epoch()) + self.slot().epoch(T::slots_per_epoch()) } /// The epoch prior to `self.current_epoch()`. /// /// If the current epoch is the genesis epoch, the genesis_epoch is returned. - /// - /// Spec v0.12.1 pub fn previous_epoch(&self) -> Epoch { let current_epoch = self.current_epoch(); if current_epoch > T::genesis_epoch() { @@ -379,8 +486,6 @@ impl BeaconState { /// Returns the active validator indices for the given epoch. /// /// Does not utilize the cache, performs a full iteration over the validator registry. - /// - /// Spec v0.12.1 pub fn get_active_validator_indices( &self, epoch: Epoch, @@ -389,7 +494,7 @@ impl BeaconState { if epoch >= self.compute_activation_exit_epoch(self.current_epoch(), spec)? { Err(BeaconStateError::EpochOutOfBounds) } else { - Ok(get_active_validator_indices(&self.validators, epoch)) + Ok(get_active_validator_indices(self.validators(), epoch)) } } @@ -455,7 +560,7 @@ impl BeaconState { /// shuffling. It should be set to the latest block applied to `self` or the genesis block root. pub fn proposer_shuffling_decision_root(&self, block_root: Hash256) -> Result { let decision_slot = self.proposer_shuffling_decision_slot(); - if self.slot == decision_slot { + if self.slot() == decision_slot { Ok(block_root) } else { self.get_block_root(decision_slot).map(|root| *root) @@ -483,7 +588,7 @@ impl BeaconState { relative_epoch: RelativeEpoch, ) -> Result { let decision_slot = self.attester_shuffling_decision_slot(relative_epoch); - if self.slot == decision_slot { + if self.slot() == decision_slot { Ok(block_root) } else { self.get_block_root(decision_slot).map(|root| *root) @@ -503,9 +608,6 @@ impl BeaconState { } /// Compute the proposer (not necessarily for the Beacon chain) from a list of indices. - /// - /// Spec v0.12.1 - // NOTE: be sure to test this bad boy. pub fn compute_proposer_index( &self, indices: &[usize], @@ -518,20 +620,18 @@ impl BeaconState { let mut i = 0; loop { - let candidate_index = indices[compute_shuffled_index( + let shuffled_index = compute_shuffled_index( i.safe_rem(indices.len())?, indices.len(), seed, spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?]; - let random_byte = { - let mut preimage = seed.to_vec(); - preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64)); - let hash = hash(&preimage); - hash[i.safe_rem(32)?] - }; - let effective_balance = self.validators[candidate_index].effective_balance; + .ok_or(Error::UnableToShuffle)?; + let candidate_index = *indices + .get(shuffled_index) + .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + let random_byte = Self::shuffling_random_byte(i, seed)?; + let effective_balance = self.get_effective_balance(candidate_index)?; if effective_balance.safe_mul(MAX_RANDOM_BYTE)? >= spec .max_effective_balance @@ -543,6 +643,19 @@ impl BeaconState { } } + /// Get a random byte from the given `seed`. + /// + /// Used by the proposer & sync committee selection functions. + fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { + let mut preimage = seed.to_vec(); + preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64)); + let index = i.safe_rem(32)?; + hash(&preimage) + .get(index) + .copied() + .ok_or(Error::ShuffleIndexOutOfBounds(index)) + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 @@ -560,9 +673,10 @@ impl BeaconState { ); let signature_hash = hash(&slot_signature.as_ssz_bytes()); let signature_hash_int = u64::from_le_bytes( - signature_hash[0..8] - .try_into() - .expect("first 8 bytes of signature should always convert to fixed array"), + signature_hash + .get(0..8) + .and_then(|bytes| bytes.try_into().ok()) + .ok_or(Error::IsAggregatorOutOfBounds)?, ); Ok(signature_hash_int.safe_rem(modulo)? == 0) @@ -606,7 +720,7 @@ impl BeaconState { /// Compute the seed to use for the beacon proposer selection at the given `slot`. /// /// Spec v0.12.1 - fn get_beacon_proposer_seed(&self, slot: Slot, spec: &ChainSpec) -> Result, Error> { + pub fn get_beacon_proposer_seed(&self, slot: Slot, spec: &ChainSpec) -> Result, Error> { let epoch = slot.epoch(T::slots_per_epoch()); let mut preimage = self .get_seed(epoch, Domain::BeaconProposer, spec)? @@ -616,18 +730,94 @@ impl BeaconState { Ok(hash(&preimage)) } + /// Get the validator indices of all validators from `sync_committee`. + pub fn get_sync_committee_indices( + &mut self, + sync_committee: &SyncCommittee, + ) -> Result, Error> { + sync_committee + .pubkeys + .iter() + .map(|pubkey| { + self.get_validator_index(&pubkey)? + .ok_or(Error::PubkeyCacheInconsistent) + }) + .collect() + } + + /// Compute the sync committee indices for the next sync committee. + fn get_next_sync_committee_indices(&self, spec: &ChainSpec) -> Result, Error> { + let epoch = self.current_epoch().safe_add(1)?; + + let active_validator_indices = self.get_active_validator_indices(epoch, spec)?; + let active_validator_count = active_validator_indices.len(); + + let seed = self.get_seed(epoch, Domain::SyncCommittee, spec)?; + + let mut i = 0; + let mut sync_committee_indices = Vec::with_capacity(T::SyncCommitteeSize::to_usize()); + while sync_committee_indices.len() < T::SyncCommitteeSize::to_usize() { + let shuffled_index = compute_shuffled_index( + i.safe_rem(active_validator_count)?, + active_validator_count, + seed.as_bytes(), + spec.shuffle_round_count, + ) + .ok_or(Error::UnableToShuffle)?; + let candidate_index = *active_validator_indices + .get(shuffled_index) + .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + let random_byte = Self::shuffling_random_byte(i, seed.as_bytes())?; + let effective_balance = self.get_validator(candidate_index)?.effective_balance; + if effective_balance.safe_mul(MAX_RANDOM_BYTE)? + >= spec + .max_effective_balance + .safe_mul(u64::from(random_byte))? + { + sync_committee_indices.push(candidate_index); + } + i.safe_add_assign(1)?; + } + Ok(sync_committee_indices) + } + + /// Compute the next sync committee. + pub fn get_next_sync_committee(&self, spec: &ChainSpec) -> Result, Error> { + let sync_committee_indices = self.get_next_sync_committee_indices(spec)?; + + let pubkeys = sync_committee_indices + .iter() + .map(|&index| { + self.validators() + .get(index) + .map(|v| v.pubkey) + .ok_or(Error::UnknownValidator(index)) + }) + .collect::, _>>()?; + let decompressed_pubkeys = pubkeys + .iter() + .map(|pk| pk.decompress()) + .collect::, _>>()?; + let aggregate_pubkey = AggregatePublicKey::aggregate(&decompressed_pubkeys)?; + + Ok(SyncCommittee { + pubkeys: FixedVector::new(pubkeys)?, + aggregate_pubkey: aggregate_pubkey.to_public_key().compress(), + }) + } + /// Get the canonical root of the `latest_block_header`, filling in its state root if necessary. /// /// It needs filling in on all slots where there isn't a skip. /// /// Spec v0.12.1 pub fn get_latest_block_root(&self, current_state_root: Hash256) -> Hash256 { - if self.latest_block_header.state_root.is_zero() { - let mut latest_block_header = self.latest_block_header.clone(); + if self.latest_block_header().state_root.is_zero() { + let mut latest_block_header = self.latest_block_header().clone(); latest_block_header.state_root = current_state_root; latest_block_header.canonical_root() } else { - self.latest_block_header.canonical_root() + self.latest_block_header().canonical_root() } } @@ -635,8 +825,8 @@ impl BeaconState { /// /// Spec v0.12.1 fn get_latest_block_roots_index(&self, slot: Slot) -> Result { - if slot < self.slot && self.slot <= slot.safe_add(self.block_roots.len() as u64)? { - Ok(slot.as_usize().safe_rem(self.block_roots.len())?) + if slot < self.slot() && self.slot() <= slot.safe_add(self.block_roots().len() as u64)? { + Ok(slot.as_usize().safe_rem(self.block_roots().len())?) } else { Err(BeaconStateError::SlotOutOfBounds) } @@ -650,37 +840,37 @@ impl BeaconState { } /// Return the block root at a recent `slot`. - /// - /// Spec v0.12.1 pub fn get_block_root(&self, slot: Slot) -> Result<&Hash256, BeaconStateError> { let i = self.get_latest_block_roots_index(slot)?; - Ok(&self.block_roots[i]) + self.block_roots() + .get(i) + .ok_or(Error::BlockRootsOutOfBounds(i)) } /// Return the block root at a recent `epoch`. /// - /// Spec v0.12.1 - // NOTE: the spec calls this get_block_root + /// Note that the spec calls this `get_block_root`. pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> { self.get_block_root(epoch.start_slot(T::slots_per_epoch())) } /// Sets the block root for some given slot. - /// - /// Spec v0.12.1 pub fn set_block_root( &mut self, slot: Slot, block_root: Hash256, ) -> Result<(), BeaconStateError> { let i = self.get_latest_block_roots_index(slot)?; - self.block_roots[i] = block_root; + *self + .block_roots_mut() + .get_mut(i) + .ok_or(Error::BlockRootsOutOfBounds(i))? = block_root; Ok(()) } /// Fill `randao_mixes` with pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) { - self.randao_mixes = FixedVector::from_elem(index_root); + *self.randao_mixes_mut() = FixedVector::from_elem(index_root); } /// Safely obtains the index for `randao_mixes` @@ -708,8 +898,6 @@ impl BeaconState { /// # Errors: /// /// See `Self::get_randao_mix`. - /// - /// Spec v0.12.1 pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { let i = epoch .as_usize() @@ -717,17 +905,21 @@ impl BeaconState { let signature_hash = Hash256::from_slice(&hash(&ssz_encode(signature))); - self.randao_mixes[i] = *self.get_randao_mix(epoch)? ^ signature_hash; + *self + .randao_mixes_mut() + .get_mut(i) + .ok_or(Error::RandaoMixesOutOfBounds(i))? = + *self.get_randao_mix(epoch)? ^ signature_hash; Ok(()) } /// Return the randao mix at a recent ``epoch``. - /// - /// Spec v0.12.1 pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?; - Ok(&self.randao_mixes[i]) + self.randao_mixes() + .get(i) + .ok_or(Error::RandaoMixesOutOfBounds(i)) } /// Set the randao mix at a recent ``epoch``. @@ -735,7 +927,10 @@ impl BeaconState { /// Spec v0.12.1 pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?; - self.randao_mixes[i] = mix; + *self + .randao_mixes_mut() + .get_mut(i) + .ok_or(Error::RandaoMixesOutOfBounds(i))? = mix; Ok(()) } @@ -743,60 +938,44 @@ impl BeaconState { /// /// Spec v0.12.1 fn get_latest_state_roots_index(&self, slot: Slot) -> Result { - if slot < self.slot && self.slot <= slot.safe_add(self.state_roots.len() as u64)? { - Ok(slot.as_usize().safe_rem(self.state_roots.len())?) + if slot < self.slot() && self.slot() <= slot.safe_add(self.state_roots().len() as u64)? { + Ok(slot.as_usize().safe_rem(self.state_roots().len())?) } else { Err(BeaconStateError::SlotOutOfBounds) } } /// Gets the state root for some slot. - /// - /// Spec v0.12.1 pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, Error> { let i = self.get_latest_state_roots_index(slot)?; - Ok(&self.state_roots[i]) + self.state_roots() + .get(i) + .ok_or(Error::StateRootsOutOfBounds(i)) } /// Gets the oldest (earliest slot) state root. - /// - /// Spec v0.12.1 pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> { - let i = - self.get_latest_state_roots_index(self.slot.saturating_sub(self.state_roots.len()))?; - Ok(&self.state_roots[i]) + let oldest_slot = self.slot().saturating_sub(self.state_roots().len()); + self.get_state_root(oldest_slot) } /// Gets the oldest (earliest slot) block root. - /// - /// Spec v0.12.1 pub fn get_oldest_block_root(&self) -> Result<&Hash256, Error> { - let i = self.get_latest_block_roots_index( - self.slot.saturating_sub(self.block_roots.len() as u64), - )?; - Ok(&self.block_roots[i]) - } - - pub fn get_block_state_roots( - &self, - slot: Slot, - ) -> Result<(SignedBeaconBlockHash, BeaconStateHash), Error> { - let i = self.get_latest_block_roots_index(slot)?; - Ok((self.block_roots[i].into(), self.state_roots[i].into())) + let oldest_slot = self.slot().saturating_sub(self.block_roots().len()); + self.get_block_root(oldest_slot) } /// Sets the latest state root for slot. - /// - /// Spec v0.12.1 pub fn set_state_root(&mut self, slot: Slot, state_root: Hash256) -> Result<(), Error> { let i = self.get_latest_state_roots_index(slot)?; - self.state_roots[i] = state_root; + *self + .state_roots_mut() + .get_mut(i) + .ok_or(Error::StateRootsOutOfBounds(i))? = state_root; Ok(()) } /// Safely obtain the index for `slashings`, given some `epoch`. - /// - /// Spec v0.12.1 fn get_slashings_index( &self, epoch: Epoch, @@ -817,48 +996,38 @@ impl BeaconState { } /// Get a reference to the entire `slashings` vector. - /// - /// Spec v0.12.1 pub fn get_all_slashings(&self) -> &[u64] { - &self.slashings + self.slashings() } /// Get the total slashed balances for some epoch. - /// - /// Spec v0.12.1 pub fn get_slashings(&self, epoch: Epoch) -> Result { let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?; - Ok(self.slashings[i]) + self.slashings() + .get(i) + .copied() + .ok_or(Error::SlashingsOutOfBounds(i)) } /// Set the total slashed balances for some epoch. - /// - /// Spec v0.12.1 pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?; - self.slashings[i] = value; + *self + .slashings_mut() + .get_mut(i) + .ok_or(Error::SlashingsOutOfBounds(i))? = value; Ok(()) } - /// Get the attestations from the current or previous epoch. - /// - /// Spec v0.12.1 - pub fn get_matching_source_attestations( - &self, - epoch: Epoch, - ) -> Result<&[PendingAttestation], Error> { - if epoch == self.current_epoch() { - Ok(&self.current_epoch_attestations) - } else if epoch == self.previous_epoch() { - Ok(&self.previous_epoch_attestations) - } else { - Err(Error::EpochOutOfBounds) + /// Convenience accessor for validators and balances simultaneously. + pub fn validators_and_balances_mut(&mut self) -> (&mut [Validator], &mut [u64]) { + match self { + BeaconState::Base(state) => (&mut state.validators, &mut state.balances), + BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), } } /// Generate a seed for the given `epoch`. - /// - /// Spec v0.12.1 pub fn get_seed( &self, epoch: Epoch, @@ -872,7 +1041,10 @@ impl BeaconState { .safe_add(T::EpochsPerHistoricalVector::to_u64())? .safe_sub(spec.min_seed_lookahead)? .safe_sub(1)?; - self.randao_mixes[i.as_usize().safe_rem(self.randao_mixes.len())?] + let i_mod = i.as_usize().safe_rem(self.randao_mixes().len())?; + self.randao_mixes() + .get(i_mod) + .ok_or(Error::RandaoMixesOutOfBounds(i_mod))? }; let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); let epoch_bytes = int_to_bytes8(epoch.as_u64()); @@ -890,18 +1062,50 @@ impl BeaconState { Ok(Hash256::from_slice(&hash(&preimage))) } - /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. - /// - /// Spec v0.12.1 - pub fn get_effective_balance( - &self, - validator_index: usize, - _spec: &ChainSpec, - ) -> Result { - self.validators + /// Safe indexer for the `validators` list. + pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, Error> { + self.validators() .get(validator_index) + .ok_or(Error::UnknownValidator(validator_index)) + } + + /// Safe mutator for the `validators` list. + pub fn get_validator_mut(&mut self, validator_index: usize) -> Result<&mut Validator, Error> { + self.validators_mut() + .get_mut(validator_index) + .ok_or(Error::UnknownValidator(validator_index)) + } + + /// Return the effective balance for a validator with the given `validator_index`. + pub fn get_effective_balance(&self, validator_index: usize) -> Result { + self.get_validator(validator_index) .map(|v| v.effective_balance) - .ok_or_else(|| Error::UnknownValidator(validator_index as u64)) + } + + /// Get the inactivity score for a single validator. + /// + /// Will error if the state lacks an `inactivity_scores` field. + pub fn get_inactivity_score(&self, validator_index: usize) -> Result { + self.inactivity_scores()? + .get(validator_index) + .copied() + .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + } + + /// Get a mutable reference to the inactivity score for a single validator. + /// + /// Will error if the state lacks an `inactivity_scores` field. + pub fn get_inactivity_score_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + self.inactivity_scores_mut()? + .get_mut(validator_index) + .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + } + + /// Get a mutable reference to the balance of a single validator. + pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + self.balances_mut() + .get_mut(validator_index) + .ok_or(Error::BalancesOutOfBounds(validator_index)) } /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. @@ -946,38 +1150,71 @@ impl BeaconState { Ok(cache.get_attestation_duties(validator_index)) } - /// Return the combined effective balance of an array of validators. + /// Implementation of `get_total_balance`, matching the spec. /// - /// Spec v0.12.1 - pub fn get_total_balance( - &self, - validator_indices: &[usize], + /// Returns minimum `EFFECTIVE_BALANCE_INCREMENT`, to avoid div by 0. + pub fn get_total_balance<'a, I: IntoIterator>( + &'a self, + validator_indices: I, spec: &ChainSpec, ) -> Result { - validator_indices.iter().try_fold(0_u64, |acc, i| { - self.get_effective_balance(*i, spec) + let total_balance = validator_indices.into_iter().try_fold(0_u64, |acc, i| { + self.get_effective_balance(*i) .and_then(|bal| Ok(acc.safe_add(bal)?)) - }) + })?; + Ok(std::cmp::max( + total_balance, + spec.effective_balance_increment, + )) + } + + /// Implementation of `get_total_active_balance`, matching the spec. + pub fn get_total_active_balance(&self, spec: &ChainSpec) -> Result { + // Order is irrelevant, so use the cached indices. + self.get_total_balance( + self.get_cached_active_validator_indices(RelativeEpoch::Current)?, + spec, + ) + } + + /// Get a mutable reference to the epoch participation flags for `epoch`. + pub fn get_epoch_participation_mut( + &mut self, + epoch: Epoch, + ) -> Result<&mut VariableList, Error> { + if epoch == self.current_epoch() { + match self { + BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), + BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), + } + } else if epoch == self.previous_epoch() { + match self { + BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), + BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), + } + } else { + Err(BeaconStateError::EpochOutOfBounds) + } } /// Get the number of outstanding deposits. /// /// Returns `Err` if the state is invalid. pub fn get_outstanding_deposit_len(&self) -> Result { - self.eth1_data + self.eth1_data() .deposit_count - .checked_sub(self.eth1_deposit_index) + .checked_sub(self.eth1_deposit_index()) .ok_or(Error::InvalidDepositState { - deposit_count: self.eth1_data.deposit_count, - deposit_index: self.eth1_deposit_index, + deposit_count: self.eth1_data().deposit_count, + deposit_index: self.eth1_deposit_index(), }) } - /// Build all the caches, if they need to be built. + /// Build all caches (except the tree hash cache), if they need to be built. pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { self.build_all_committee_caches(spec)?; self.update_pubkey_cache()?; - self.exit_cache.build(&self.validators, spec)?; + self.build_exit_cache(spec)?; Ok(()) } @@ -990,21 +1227,32 @@ impl BeaconState { Ok(()) } + /// Build the exit cache, if it needs to be built. + pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + if self.exit_cache().check_initialized().is_err() { + *self.exit_cache_mut() = ExitCache::new(self.validators(), spec)?; + } + Ok(()) + } + /// Drop all caches on the state. - pub fn drop_all_caches(&mut self) { - self.drop_committee_cache(RelativeEpoch::Previous); - self.drop_committee_cache(RelativeEpoch::Current); - self.drop_committee_cache(RelativeEpoch::Next); + pub fn drop_all_caches(&mut self) -> Result<(), Error> { + self.drop_committee_cache(RelativeEpoch::Previous)?; + self.drop_committee_cache(RelativeEpoch::Current)?; + self.drop_committee_cache(RelativeEpoch::Next)?; self.drop_pubkey_cache(); self.drop_tree_hash_cache(); - self.exit_cache = ExitCache::default(); + *self.exit_cache_mut() = ExitCache::default(); + Ok(()) } /// Returns `true` if the committee cache for `relative_epoch` is built and ready to use. pub fn committee_cache_is_initialized(&self, relative_epoch: RelativeEpoch) -> bool { let i = Self::committee_cache_index(relative_epoch); - self.committee_caches[i].is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) + self.committee_cache_at_index(i).map_or(false, |cache| { + cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) + }) } /// Build an epoch cache, unless it is has already been built. @@ -1014,10 +1262,11 @@ impl BeaconState { spec: &ChainSpec, ) -> Result<(), Error> { let i = Self::committee_cache_index(relative_epoch); + let is_initialized = self + .committee_cache_at_index(i)? + .is_initialized_at(relative_epoch.into_epoch(self.current_epoch())); - if self.committee_caches[i] - .is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) - { + if is_initialized { Ok(()) } else { self.force_build_committee_cache(relative_epoch, spec) @@ -1031,9 +1280,9 @@ impl BeaconState { spec: &ChainSpec, ) -> Result<(), Error> { let epoch = relative_epoch.into_epoch(self.current_epoch()); + let i = Self::committee_cache_index(relative_epoch); - self.committee_caches[Self::committee_cache_index(relative_epoch)] = - CommitteeCache::initialized(&self, epoch, spec)?; + *self.committee_cache_at_index_mut(i)? = CommitteeCache::initialized(&self, epoch, spec)?; Ok(()) } @@ -1042,12 +1291,12 @@ impl BeaconState { /// This should be used if the `slot` of this state is advanced beyond an epoch boundary. /// /// Note: whilst this function will preserve already-built caches, it will not build any. - pub fn advance_caches(&mut self) { - let caches = &mut self.committee_caches[..]; - caches.rotate_left(1); + pub fn advance_caches(&mut self) -> Result<(), Error> { + self.committee_caches_mut().rotate_left(1); let next = Self::committee_cache_index(RelativeEpoch::Next); - caches[next] = CommitteeCache::default(); + *self.committee_cache_at_index_mut(next)? = CommitteeCache::default(); + Ok(()) } fn committee_cache_index(relative_epoch: RelativeEpoch) -> usize { @@ -1067,10 +1316,25 @@ impl BeaconState { self.committee_cache(relative_epoch) } + /// Get the committee cache at a given index. + fn committee_cache_at_index(&self, index: usize) -> Result<&CommitteeCache, Error> { + self.committee_caches() + .get(index) + .ok_or(Error::CommitteeCachesOutOfBounds(index)) + } + + /// Get a mutable reference to the committee cache at a given index. + fn committee_cache_at_index_mut(&mut self, index: usize) -> Result<&mut CommitteeCache, Error> { + self.committee_caches_mut() + .get_mut(index) + .ok_or(Error::CommitteeCachesOutOfBounds(index)) + } + /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been /// initialized. pub fn committee_cache(&self, relative_epoch: RelativeEpoch) -> Result<&CommitteeCache, Error> { - let cache = &self.committee_caches[Self::committee_cache_index(relative_epoch)]; + let i = Self::committee_cache_index(relative_epoch); + let cache = self.committee_cache_at_index(i)?; if cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) { Ok(cache) @@ -1080,9 +1344,10 @@ impl BeaconState { } /// Drops the cache, leaving it in an uninitialized state. - fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) { - self.committee_caches[Self::committee_cache_index(relative_epoch)] = + pub fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) -> Result<(), Error> { + *self.committee_cache_at_index_mut(Self::committee_cache_index(relative_epoch))? = CommitteeCache::default(); + Ok(()) } /// Updates the pubkey cache, if required. @@ -1090,30 +1355,32 @@ impl BeaconState { /// Adds all `pubkeys` from the `validators` which are not already in the cache. Will /// never re-add a pubkey. pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { + let mut pubkey_cache = mem::take(self.pubkey_cache_mut()); for (i, validator) in self - .validators + .validators() .iter() .enumerate() - .skip(self.pubkey_cache.len()) + .skip(pubkey_cache.len()) { - let success = self.pubkey_cache.insert(validator.pubkey, i); + let success = pubkey_cache.insert(validator.pubkey, i); if !success { return Err(Error::PubkeyCacheInconsistent); } } + *self.pubkey_cache_mut() = pubkey_cache; Ok(()) } /// Completely drops the `pubkey_cache`, replacing it with a new, empty cache. pub fn drop_pubkey_cache(&mut self) { - self.pubkey_cache = PubkeyCache::default() + *self.pubkey_cache_mut() = PubkeyCache::default() } /// Initialize but don't fill the tree hash cache, if it isn't already initialized. pub fn initialize_tree_hash_cache(&mut self) { - if self.tree_hash_cache.is_none() { - self.tree_hash_cache = Some(BeaconTreeHashCache::new(self)) + if !self.tree_hash_cache().is_initialized() { + *self.tree_hash_cache_mut() = BeaconTreeHashCache::new(self) } } @@ -1123,13 +1390,13 @@ impl BeaconState { pub fn update_tree_hash_cache(&mut self) -> Result { self.initialize_tree_hash_cache(); - let cache = self.tree_hash_cache.take(); + let cache = self.tree_hash_cache_mut().take(); if let Some(mut cache) = cache { // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as // None. There's no need to keep a cache that fails. let root = cache.recalculate_tree_hash_root(&self)?; - self.tree_hash_cache = Some(cache); + self.tree_hash_cache_mut().restore(cache); Ok(root) } else { Err(Error::TreeHashCacheNotInitialized) @@ -1142,13 +1409,13 @@ impl BeaconState { pub fn update_validators_tree_hash_cache(&mut self) -> Result { self.initialize_tree_hash_cache(); - let cache = self.tree_hash_cache.take(); + let cache = self.tree_hash_cache_mut().take(); if let Some(mut cache) = cache { // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as // None. There's no need to keep a cache that fails. - let root = cache.recalculate_validators_tree_hash_root(&self.validators)?; - self.tree_hash_cache = Some(cache); + let root = cache.recalculate_validators_tree_hash_root(self.validators())?; + self.tree_hash_cache_mut().restore(cache); Ok(root) } else { Err(Error::TreeHashCacheNotInitialized) @@ -1157,80 +1424,95 @@ impl BeaconState { /// Completely drops the tree hash cache, replacing it with a new, empty cache. pub fn drop_tree_hash_cache(&mut self) { - self.tree_hash_cache = None; + self.tree_hash_cache_mut().uninitialize(); } /// Clone the state whilst preserving only the selected caches. pub fn clone_with(&self, config: CloneConfig) -> Self { - BeaconState { - genesis_time: self.genesis_time, - genesis_validators_root: self.genesis_validators_root, - slot: self.slot, - fork: self.fork, - latest_block_header: self.latest_block_header.clone(), - block_roots: self.block_roots.clone(), - state_roots: self.state_roots.clone(), - historical_roots: self.historical_roots.clone(), - eth1_data: self.eth1_data.clone(), - eth1_data_votes: self.eth1_data_votes.clone(), - eth1_deposit_index: self.eth1_deposit_index, - validators: self.validators.clone(), - balances: self.balances.clone(), - randao_mixes: self.randao_mixes.clone(), - slashings: self.slashings.clone(), - previous_epoch_attestations: self.previous_epoch_attestations.clone(), - current_epoch_attestations: self.current_epoch_attestations.clone(), - justification_bits: self.justification_bits.clone(), - previous_justified_checkpoint: self.previous_justified_checkpoint, - current_justified_checkpoint: self.current_justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - committee_caches: if config.committee_caches { - self.committee_caches.clone() - } else { - [ - CommitteeCache::default(), - CommitteeCache::default(), - CommitteeCache::default(), - ] - }, - pubkey_cache: if config.pubkey_cache { - self.pubkey_cache.clone() - } else { - PubkeyCache::default() - }, - exit_cache: if config.exit_cache { - self.exit_cache.clone() - } else { - ExitCache::default() - }, - tree_hash_cache: if config.tree_hash_cache { - self.tree_hash_cache.clone() - } else { - None - }, + let mut res = match self { + BeaconState::Base(inner) => BeaconState::Base(inner.clone()), + BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), + }; + if config.committee_caches { + *res.committee_caches_mut() = self.committee_caches().clone(); + } + if config.pubkey_cache { + *res.pubkey_cache_mut() = self.pubkey_cache().clone(); } + if config.exit_cache { + *res.exit_cache_mut() = self.exit_cache().clone(); + } + if config.tree_hash_cache { + *res.tree_hash_cache_mut() = self.tree_hash_cache().clone(); + } + res } pub fn clone_with_only_committee_caches(&self) -> Self { self.clone_with(CloneConfig::committee_caches_only()) } -} -/// This implementation primarily exists to satisfy some testing requirements (ef_tests). It is -/// recommended to use the methods directly on the beacon state instead. -impl CachedTreeHash> for BeaconState { - fn new_tree_hash_cache(&self, _arena: &mut CacheArena) -> BeaconTreeHashCache { - BeaconTreeHashCache::new(self) + /// Get the unslashed participating indices for a given `flag_index`. + /// + /// The `self` state must be Altair or later. + pub fn get_unslashed_participating_indices( + &self, + flag_index: usize, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result, Error> { + let epoch_participation = if epoch == self.current_epoch() { + self.current_epoch_participation()? + } else if epoch == self.previous_epoch() { + self.previous_epoch_participation()? + } else { + return Err(Error::EpochOutOfBounds); + }; + let active_validator_indices = self.get_active_validator_indices(epoch, spec)?; + itertools::process_results( + active_validator_indices.into_iter().map(|val_index| { + let has_flag = epoch_participation + .get(val_index) + .ok_or(Error::ParticipationOutOfBounds(val_index))? + .has_flag(flag_index)?; + let not_slashed = !self.get_validator(val_index)?.slashed; + Ok((val_index, has_flag && not_slashed)) + }), + |iter| { + iter.filter(|(_, eligible)| *eligible) + .map(|(validator_index, _)| validator_index) + .collect() + }, + ) } - fn recalculate_tree_hash_root( - &self, - _arena: &mut CacheArena, - cache: &mut BeaconTreeHashCache, - ) -> Result { - cache - .recalculate_tree_hash_root(self) - .map_err(|_| cached_tree_hash::Error::CacheInconsistent) + pub fn get_eligible_validator_indices(&self) -> Result, Error> { + match self { + BeaconState::Base(_) => Err(Error::IncorrectStateVariant), + BeaconState::Altair(_) => { + let previous_epoch = self.previous_epoch(); + Ok(self + .validators() + .iter() + .enumerate() + .filter_map(|(i, val)| { + if val.is_active_at(previous_epoch) + || (val.slashed + && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) + { + Some(i) + } else { + None + } + }) + .collect()) + } + } + } + + pub fn is_in_inactivity_leak(&self, spec: &ChainSpec) -> bool { + (self.previous_epoch() - self.finalized_checkpoint().epoch) + > spec.min_epochs_to_inactivity_penalty } } @@ -1246,6 +1528,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: bls::Error) -> Error { + Error::BlsError(e) + } +} + impl From for Error { fn from(e: cached_tree_hash::Error) -> Error { Error::CachedTreeHashError(e) @@ -1264,45 +1552,17 @@ impl From for Error { } } -#[cfg(feature = "arbitrary-fuzz")] -impl arbitrary::Arbitrary for BeaconState { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - Ok(Self { - genesis_time: u64::arbitrary(u)?, - genesis_validators_root: Hash256::arbitrary(u)?, - slot: Slot::arbitrary(u)?, - fork: Fork::arbitrary(u)?, - latest_block_header: BeaconBlockHeader::arbitrary(u)?, - block_roots: >::arbitrary(u)?, - state_roots: >::arbitrary(u)?, - historical_roots: >::arbitrary(u)?, - eth1_data: Eth1Data::arbitrary(u)?, - eth1_data_votes: >::arbitrary(u)?, - eth1_deposit_index: u64::arbitrary(u)?, - validators: >::arbitrary(u)?, - balances: >::arbitrary(u)?, - randao_mixes: >::arbitrary(u)?, - slashings: >::arbitrary(u)?, - previous_epoch_attestations: , - T::MaxPendingAttestations, - >>::arbitrary(u)?, - current_epoch_attestations: , - T::MaxPendingAttestations, - >>::arbitrary(u)?, - justification_bits: >::arbitrary(u)?, - previous_justified_checkpoint: Checkpoint::arbitrary(u)?, - current_justified_checkpoint: Checkpoint::arbitrary(u)?, - finalized_checkpoint: Checkpoint::arbitrary(u)?, - committee_caches: [ - CommitteeCache::arbitrary(u)?, - CommitteeCache::arbitrary(u)?, - CommitteeCache::arbitrary(u)?, - ], - pubkey_cache: PubkeyCache::arbitrary(u)?, - exit_cache: ExitCache::arbitrary(u)?, - tree_hash_cache: None, - }) +/// Helper function for "cloning" a field by using its default value. +fn clone_default(_value: &T) -> T { + T::default() +} + +impl CompareFields for BeaconState { + fn compare_fields(&self, other: &Self) -> Vec { + match (self, other) { + (BeaconState::Base(x), BeaconState::Base(y)) => x.compare_fields(y), + (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), + _ => panic!("compare_fields: mismatched state variants"), + } } } diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 0d435f16839..9c8f428d83e 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -39,7 +39,7 @@ impl CommitteeCache { return Err(Error::ZeroSlotsPerEpoch); } - let active_validator_indices = get_active_validator_indices(&state.validators, epoch); + let active_validator_indices = get_active_validator_indices(state.validators(), epoch); if active_validator_indices.is_empty() { return Err(Error::InsufficientValidators); @@ -59,13 +59,15 @@ impl CommitteeCache { .ok_or(Error::UnableToShuffle)?; // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. - if state.validators.len() == usize::max_value() { + if state.validators().len() == usize::max_value() { return Err(Error::TooManyValidators); } - let mut shuffling_positions = vec![None; state.validators.len()]; - for (i, v) in shuffling.iter().enumerate() { - shuffling_positions[*v] = NonZeroUsize::new(i + 1); + let mut shuffling_positions = vec![None; state.validators().len()]; + for (i, &v) in shuffling.iter().enumerate() { + *shuffling_positions + .get_mut(v) + .ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1); } Ok(CommitteeCache { @@ -229,7 +231,7 @@ impl CommitteeCache { /// /// Spec v0.12.1 fn compute_committee(&self, index: usize) -> Option<&[usize]> { - Some(&self.shuffling[self.compute_committee_range(index)?]) + self.shuffling.get(self.compute_committee_range(index)?) } /// Returns a range of `self.shuffling` that represents the `index`'th committee in the epoch. @@ -255,7 +257,7 @@ impl CommitteeCache { /// Returns the index of some validator in `self.shuffling`. /// /// Always returns `None` for a non-initialized epoch. - fn shuffled_position(&self, validator_index: usize) -> Option { + pub fn shuffled_position(&self, validator_index: usize) -> Option { self.shuffling_positions .get(validator_index)? .and_then(|p| Some(p.get() - 1)) diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index 94e6c2c8493..e4a7ccf4616 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -1,6 +1,27 @@ #![cfg(test)] -use super::*; -use crate::{test_utils::*, *}; +use crate::test_utils::*; +use beacon_chain::store::StoreConfig; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use beacon_chain::types::*; +use swap_or_not_shuffle::shuffle_list; + +pub const VALIDATOR_COUNT: usize = 16; + +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +fn get_harness(validator_count: usize) -> BeaconChainHarness> { + let harness = BeaconChainHarness::new_with_store_config( + E::default(), + None, + KEYPAIRS[0..validator_count].to_vec(), + StoreConfig::default(), + ); + harness.advance_slot(); + harness +} #[test] fn default_values() { @@ -16,27 +37,26 @@ fn default_values() { } fn new_state(validator_count: usize, slot: Slot) -> BeaconState { - let spec = &T::default_spec(); - - let mut builder = - TestingBeaconStateBuilder::from_single_keypair(validator_count, &Keypair::random(), spec); - - builder.teleport_to_slot(slot); - - let (state, _keypairs) = builder.build(); - - state + let harness = get_harness(validator_count); + let head_state = harness.get_current_state(); + if slot > Slot::new(0) { + harness.add_attested_blocks_at_slots( + head_state, + Hash256::zero(), + (1..slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..validator_count).collect::>().as_slice(), + ); + } + harness.get_current_state() } #[test] +#[should_panic] fn fails_without_validators() { - let state = new_state::(0, Slot::new(0)); - let spec = &MinimalEthSpec::default_spec(); - - assert_eq!( - CommitteeCache::initialized(&state, state.current_epoch(), &spec), - Err(BeaconStateError::InsufficientValidators) - ); + new_state::(0, Slot::new(0)); } #[test] @@ -45,24 +65,22 @@ fn initializes_with_the_right_epoch() { let spec = &MinimalEthSpec::default_spec(); let cache = CommitteeCache::default(); - assert_eq!(cache.initialized_epoch, None); + assert!(!cache.is_initialized_at(state.current_epoch())); let cache = CommitteeCache::initialized(&state, state.current_epoch(), &spec).unwrap(); - assert_eq!(cache.initialized_epoch, Some(state.current_epoch())); + assert!(cache.is_initialized_at(state.current_epoch())); let cache = CommitteeCache::initialized(&state, state.previous_epoch(), &spec).unwrap(); - assert_eq!(cache.initialized_epoch, Some(state.previous_epoch())); + assert!(cache.is_initialized_at(state.previous_epoch())); let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), &spec).unwrap(); - assert_eq!(cache.initialized_epoch, Some(state.next_epoch().unwrap())); + assert!(cache.is_initialized_at(state.next_epoch().unwrap())); } #[test] fn shuffles_for_the_right_epoch() { - use crate::EthSpec; - let num_validators = MinimalEthSpec::minimum_validator_count() * 2; - let epoch = Epoch::new(100_000_000); + let epoch = Epoch::new(6); let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); let mut state = new_state::(num_validators, slot); @@ -72,7 +90,7 @@ fn shuffles_for_the_right_epoch() { .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); - state.randao_mixes = FixedVector::from(distinct_hashes); + *state.randao_mixes_mut() = FixedVector::from(distinct_hashes); let previous_seed = state .get_seed(state.previous_epoch(), Domain::BeaconAttester, spec) @@ -97,9 +115,9 @@ fn shuffles_for_the_right_epoch() { }; let assert_shuffling_positions_accurate = |cache: &CommitteeCache| { - for (i, v) in cache.shuffling.iter().enumerate() { + for (i, v) in cache.shuffling().iter().enumerate() { assert_eq!( - cache.shuffling_positions[*v].unwrap().get() - 1, + cache.shuffled_position(*v).unwrap(), i, "Shuffling position inaccurate" ); @@ -107,14 +125,14 @@ fn shuffles_for_the_right_epoch() { }; let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling, shuffling_with_seed(current_seed)); + assert_eq!(cache.shuffling(), shuffling_with_seed(current_seed)); assert_shuffling_positions_accurate(&cache); let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling, shuffling_with_seed(previous_seed)); + assert_eq!(cache.shuffling(), shuffling_with_seed(previous_seed)); assert_shuffling_positions_accurate(&cache); let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), spec).unwrap(); - assert_eq!(cache.shuffling, shuffling_with_seed(next_seed)); + assert_eq!(cache.shuffling(), shuffling_with_seed(next_seed)); assert_shuffling_positions_accurate(&cache); } diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index 364c1daf0d5..f1d1b498b74 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -11,22 +11,18 @@ pub struct ExitCache { } impl ExitCache { - /// Build the cache if not initialized. - pub fn build( - &mut self, - validators: &[Validator], - spec: &ChainSpec, - ) -> Result<(), BeaconStateError> { - if self.initialized { - return Ok(()); - } - - self.initialized = true; + /// Initialize a new cache for the given list of validators. + pub fn new(validators: &[Validator], spec: &ChainSpec) -> Result { + let mut exit_cache = ExitCache { + initialized: true, + ..ExitCache::default() + }; // Add all validators with a non-default exit epoch to the cache. validators .iter() .filter(|validator| validator.exit_epoch != spec.far_future_epoch) - .try_for_each(|validator| self.record_validator_exit(validator.exit_epoch)) + .try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch))?; + Ok(exit_cache) } /// Check that the cache is initialized and return an error if it is not. diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/beacon_state/iter.rs index 5f858dd8af8..2c00913ce96 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/beacon_state/iter.rs @@ -4,7 +4,7 @@ use crate::*; /// /// The iterator has the following characteristics: /// -/// - Will only return *at most* `state.block_roots.len()` entries. +/// - Will only return *at most* `state.block_roots().len()` entries. /// - Will not return slots prior to the genesis_slot. /// - Each call to next will result in a slot one less than the prior one (or `None`). /// - Skipped slots will contain the block root from the prior non-skipped slot. @@ -22,7 +22,7 @@ impl<'a, T: EthSpec> BlockRootsIter<'a, T> { Self { state, genesis_slot, - prev: state.slot, + prev: state.slot(), } } } @@ -35,8 +35,8 @@ impl<'a, T: EthSpec> Iterator for BlockRootsIter<'a, T> { && self.prev > self .state - .slot - .saturating_sub(self.state.block_roots.len() as u64) + .slot() + .saturating_sub(self.state.block_roots().len() as u64) { self.prev = self.prev.saturating_sub(1_u64); Some( @@ -73,12 +73,13 @@ mod test { let mut state: BeaconState = BeaconState::new(0, <_>::default(), &spec); - for i in 0..state.block_roots.len() { - state.block_roots[i] = root_slot(i).1; + for i in 0..state.block_roots().len() { + state.block_roots_mut()[i] = root_slot(i).1; } assert_eq!( - state.slot, spec.genesis_slot, + state.slot(), + spec.genesis_slot, "test assume a genesis slot state" ); assert_eq!( @@ -87,22 +88,22 @@ mod test { "state at genesis slot has no history" ); - state.slot = Slot::new(1); + *state.slot_mut() = Slot::new(1); assert_eq!( all_roots(&state, &spec), vec![root_slot(0)], "first slot after genesis has one slot history" ); - state.slot = Slot::new(2); + *state.slot_mut() = Slot::new(2); assert_eq!( all_roots(&state, &spec), vec![root_slot(1), root_slot(0)], "second slot after genesis has two slot history" ); - state.slot = Slot::from(state.block_roots.len() + 2); - let expected = (2..state.block_roots.len() + 2) + *state.slot_mut() = Slot::from(state.block_roots().len() + 2); + let expected = (2..state.block_roots().len() + 2) .rev() .map(|i| (Slot::from(i), *state.get_block_root(Slot::from(i)).unwrap())) .collect::>(); @@ -120,12 +121,13 @@ mod test { let mut state: BeaconState = BeaconState::new(0, <_>::default(), &spec); - for i in 0..state.block_roots.len() { - state.block_roots[i] = root_slot(i).1; + for i in 0..state.block_roots().len() { + state.block_roots_mut()[i] = root_slot(i).1; } assert_eq!( - state.slot, spec.genesis_slot, + state.slot(), + spec.genesis_slot, "test assume a genesis slot state" ); assert_eq!( @@ -134,14 +136,14 @@ mod test { "state at genesis slot has no history" ); - state.slot = Slot::new(5); + *state.slot_mut() = Slot::new(5); assert_eq!( all_roots(&state, &spec), vec![root_slot(4)], "first slot after genesis has one slot history" ); - state.slot = Slot::new(6); + *state.slot_mut() = Slot::new(6); assert_eq!( all_roots(&state, &spec), vec![root_slot(5), root_slot(4)], diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index c14bd94fcd0..ff7c503f249 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -1,23 +1,60 @@ #![cfg(test)] -use super::*; use crate::test_utils::*; +use beacon_chain::store::config::StoreConfig; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use beacon_chain::types::{ + test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, + ChainSpec, CloneConfig, Domain, Epoch, EthSpec, FixedVector, Hash256, Keypair, MainnetEthSpec, + MinimalEthSpec, RelativeEpoch, Slot, +}; +use ssz::{Decode, Encode}; use std::ops::Mul; +use swap_or_not_shuffle::compute_shuffled_index; -ssz_and_tree_hash_tests!(FoundationBeaconState); +pub const MAX_VALIDATOR_COUNT: usize = 129; +pub const SLOT_OFFSET: Slot = Slot::new(1); -fn test_beacon_proposer_index() { - let spec = T::default_spec(); - let relative_epoch = RelativeEpoch::Current; +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); +} - // Build a state for testing. - let build_state = |validator_count: usize| -> BeaconState { - let builder: TestingBeaconStateBuilder = - TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, &spec); - let (mut state, _keypairs) = builder.build(); - state.build_committee_cache(relative_epoch, &spec).unwrap(); +fn get_harness( + validator_count: usize, + slot: Slot, +) -> BeaconChainHarness> { + let harness = BeaconChainHarness::new_with_store_config( + E::default(), + None, + KEYPAIRS[0..validator_count].to_vec(), + StoreConfig::default(), + ); - state - }; + let skip_to_slot = slot - SLOT_OFFSET; + if skip_to_slot > Slot::new(0) { + let slots = (skip_to_slot.as_u64()..=slot.as_u64()) + .map(Slot::new) + .collect::>(); + let state = harness.get_current_state(); + harness.add_attested_blocks_at_slots( + state, + Hash256::zero(), + slots.as_slice(), + (0..validator_count).collect::>().as_slice(), + ); + } + harness +} + +fn build_state(validator_count: usize) -> BeaconState { + get_harness(validator_count, Slot::new(0)) + .chain + .head_beacon_state() + .unwrap() +} + +fn test_beacon_proposer_index() { + let spec = T::default_spec(); // Get the i'th candidate proposer for the given state and slot let ith_candidate = |state: &BeaconState, slot: Slot, i: usize, spec: &ChainSpec| { @@ -56,9 +93,9 @@ fn test_beacon_proposer_index() { } // Test with two validators per slot, first validator has zero balance. - let mut state = build_state((T::slots_per_epoch() as usize).mul(2)); + let mut state = build_state::((T::slots_per_epoch() as usize).mul(2)); let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); - state.validators[slot0_candidate0].effective_balance = 0; + state.validators_mut()[slot0_candidate0].effective_balance = 0; test(&state, Slot::new(0), 1); for i in 1..T::slots_per_epoch() { test(&state, Slot::from(i), 0); @@ -81,17 +118,9 @@ fn test_cache_initialization( spec: &ChainSpec, ) { let slot = relative_epoch - .into_epoch(state.slot.epoch(T::slots_per_epoch())) + .into_epoch(state.slot().epoch(T::slots_per_epoch())) .start_slot(T::slots_per_epoch()); - // Assuming the cache isn't already built, assert that a call to a cache-using function fails. - assert_eq!( - state.get_attestation_duties(0, relative_epoch), - Err(BeaconStateError::CommitteeCacheUninitialized(Some( - relative_epoch - ))) - ); - // Build the cache. state.build_committee_cache(relative_epoch, spec).unwrap(); @@ -99,7 +128,7 @@ fn test_cache_initialization( state.get_beacon_committee(slot, 0).unwrap(); // Drop the cache. - state.drop_committee_cache(relative_epoch); + state.drop_committee_cache(relative_epoch).unwrap(); // Assert a call to a cache-using function fail. assert_eq!( @@ -114,11 +143,9 @@ fn test_cache_initialization( fn cache_initialization() { let spec = MinimalEthSpec::default_spec(); - let builder: TestingBeaconStateBuilder = - TestingBeaconStateBuilder::from_deterministic_keypairs(16, &spec); - let (mut state, _keypairs) = builder.build(); + let mut state = build_state::(16); - state.slot = + *state.slot_mut() = (MinimalEthSpec::genesis_epoch() + 1).start_slot(MinimalEthSpec::slots_per_epoch()); test_cache_initialization(&mut state, RelativeEpoch::Previous, &spec); @@ -150,25 +177,29 @@ fn test_clone_config(base_state: &BeaconState, clone_config: Clon .expect_err("shouldn't exist"); } if clone_config.pubkey_cache { - assert_ne!(state.pubkey_cache.len(), 0); + assert_ne!(state.pubkey_cache().len(), 0); } else { - assert_eq!(state.pubkey_cache.len(), 0); + assert_eq!(state.pubkey_cache().len(), 0); } if clone_config.exit_cache { state - .exit_cache + .exit_cache() .check_initialized() .expect("exit cache exists"); } else { state - .exit_cache + .exit_cache() .check_initialized() .expect_err("exit cache doesn't exist"); } if clone_config.tree_hash_cache { - assert!(state.tree_hash_cache.is_some()); + assert!(state.tree_hash_cache().is_initialized()); } else { - assert!(state.tree_hash_cache.is_none(), "{:?}", clone_config); + assert!( + !state.tree_hash_cache().is_initialized(), + "{:?}", + clone_config + ); } } @@ -176,9 +207,7 @@ fn test_clone_config(base_state: &BeaconState, clone_config: Clon fn clone_config() { let spec = MinimalEthSpec::default_spec(); - let builder: TestingBeaconStateBuilder = - TestingBeaconStateBuilder::from_deterministic_keypairs(16, &spec); - let (mut state, _keypairs) = builder.build(); + let mut state = build_state::(16); state.build_all_caches(&spec).unwrap(); state @@ -198,69 +227,10 @@ fn clone_config() { } } -#[test] -fn tree_hash_cache() { - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use tree_hash::TreeHash; - - let mut rng = XorShiftRng::from_seed([42; 16]); - - let mut state: FoundationBeaconState = BeaconState::random_for_test(&mut rng); - - let root = state.update_tree_hash_cache().unwrap(); - - assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); - - /* - * A cache should hash twice without updating the slot. - */ - - assert_eq!( - state.update_tree_hash_cache().unwrap(), - root, - "tree hash result should be identical on the same slot" - ); - - /* - * A cache should not hash after updating the slot but not updating the state roots. - */ - - // The tree hash cache needs to be rebuilt since it was dropped when it failed. - state - .update_tree_hash_cache() - .expect("should rebuild cache"); - - state.slot += 1; - - assert_eq!( - state.update_tree_hash_cache(), - Err(BeaconStateError::NonLinearTreeHashCacheHistory), - "should not build hash without updating the state root" - ); - - /* - * The cache should update if the slot and state root are updated. - */ - - // The tree hash cache needs to be rebuilt since it was dropped when it failed. - let root = state - .update_tree_hash_cache() - .expect("should rebuild cache"); - - state.slot += 1; - state - .set_state_root(state.slot - 1, root) - .expect("should set state root"); - - let root = state.update_tree_hash_cache().unwrap(); - assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); -} - /// Tests committee-specific components #[cfg(test)] mod committees { use super::*; - use crate::beacon_state::MinimalEthSpec; use std::ops::{Add, Div}; use swap_or_not_shuffle::shuffle_list; @@ -343,35 +313,33 @@ mod committees { ) { let spec = &T::default_spec(); - let mut builder = TestingBeaconStateBuilder::from_single_keypair( - validator_count, - &Keypair::random(), - spec, - ); - let slot = state_epoch.start_slot(T::slots_per_epoch()); - builder.teleport_to_slot(slot); - - let (mut state, _keypairs): (BeaconState, _) = builder.build(); + let harness = get_harness::(validator_count, slot); + let mut new_head_state = harness.get_current_state(); let distinct_hashes: Vec = (0..T::epochs_per_historical_vector()) .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); - state.randao_mixes = FixedVector::from(distinct_hashes); + *new_head_state.randao_mixes_mut() = FixedVector::from(distinct_hashes); - state - .build_committee_cache(RelativeEpoch::Previous, spec) + new_head_state + .force_build_committee_cache(RelativeEpoch::Previous, spec) .unwrap(); - state - .build_committee_cache(RelativeEpoch::Current, spec) + new_head_state + .force_build_committee_cache(RelativeEpoch::Current, spec) .unwrap(); - state - .build_committee_cache(RelativeEpoch::Next, spec) + new_head_state + .force_build_committee_cache(RelativeEpoch::Next, spec) .unwrap(); let cache_epoch = cache_epoch.into_epoch(state_epoch); - execute_committee_consistency_test(state, cache_epoch, validator_count as usize, &spec); + execute_committee_consistency_test( + new_head_state, + cache_epoch, + validator_count as usize, + &spec, + ); } fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { @@ -419,16 +387,12 @@ mod committees { mod get_outstanding_deposit_len { use super::*; - use crate::test_utils::TestingBeaconStateBuilder; - use crate::MinimalEthSpec; fn state() -> BeaconState { - let spec = MinimalEthSpec::default_spec(); - let builder: TestingBeaconStateBuilder = - TestingBeaconStateBuilder::from_deterministic_keypairs(16, &spec); - let (state, _keypairs) = builder.build(); - - state + get_harness(16, Slot::new(0)) + .chain + .head_beacon_state() + .unwrap() } #[test] @@ -436,8 +400,8 @@ mod get_outstanding_deposit_len { let mut state = state(); assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); - state.eth1_data.deposit_count = 17; - state.eth1_deposit_index = 16; + state.eth1_data_mut().deposit_count = 17; + *state.eth1_deposit_index_mut() = 16; assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); } @@ -445,8 +409,8 @@ mod get_outstanding_deposit_len { fn returns_err_if_the_state_is_invalid() { let mut state = state(); // The state is invalid, deposit count is lower than deposit index. - state.eth1_data.deposit_count = 16; - state.eth1_deposit_index = 17; + state.eth1_data_mut().deposit_count = 16; + *state.eth1_deposit_index_mut() = 17; assert_eq!( state.get_outstanding_deposit_len(), @@ -457,3 +421,124 @@ mod get_outstanding_deposit_len { ); } } + +#[test] +fn decode_base_and_altair() { + type E = MainnetEthSpec; + + let rng = &mut XorShiftRng::from_seed([42; 16]); + + let fork_epoch = Epoch::from_ssz_bytes(&[7, 6, 5, 4, 3, 2, 1, 0]).unwrap(); + + let base_epoch = fork_epoch.saturating_sub(1_u64); + let base_slot = base_epoch.end_slot(E::slots_per_epoch()); + let altair_epoch = fork_epoch; + let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_epoch); + + // BeaconStateBase + { + let good_base_block: BeaconState = BeaconState::Base(BeaconStateBase { + slot: base_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have a base block with a slot higher than the fork slot. + let bad_base_block = { + let mut bad = good_base_block.clone(); + *bad.slot_mut() = altair_slot; + bad + }; + + assert_eq!( + BeaconState::from_ssz_bytes(&good_base_block.as_ssz_bytes(), &spec) + .expect("good base block can be decoded"), + good_base_block + ); + >::from_ssz_bytes(&bad_base_block.as_ssz_bytes(), &spec) + .expect_err("bad base block cannot be decoded"); + } + + // BeaconStateAltair + { + let good_altair_block: BeaconState = + BeaconState::Altair(BeaconStateAltair { + slot: altair_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have an Altair block with a slot lower than the fork slot. + let bad_altair_block = { + let mut bad = good_altair_block.clone(); + *bad.slot_mut() = base_slot; + bad + }; + + assert_eq!( + BeaconState::from_ssz_bytes(&good_altair_block.as_ssz_bytes(), &spec) + .expect("good altair block can be decoded"), + good_altair_block + ); + >::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec) + .expect_err("bad altair block cannot be decoded"); + } +} + +#[test] +fn tree_hash_cache_linear_history() { + use crate::test_utils::{SeedableRng, XorShiftRng}; + use tree_hash::TreeHash; + + let mut rng = XorShiftRng::from_seed([42; 16]); + + let mut state: BeaconState = + BeaconState::Base(BeaconStateBase::random_for_test(&mut rng)); + + let root = state.update_tree_hash_cache().unwrap(); + + assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); + + /* + * A cache should hash twice without updating the slot. + */ + + assert_eq!( + state.update_tree_hash_cache().unwrap(), + root, + "tree hash result should be identical on the same slot" + ); + + /* + * A cache should not hash after updating the slot but not updating the state roots. + */ + + // The tree hash cache needs to be rebuilt since it was dropped when it failed. + state + .update_tree_hash_cache() + .expect("should rebuild cache"); + + *state.slot_mut() += 1; + + assert_eq!( + state.update_tree_hash_cache(), + Err(BeaconStateError::NonLinearTreeHashCacheHistory), + "should not build hash without updating the state root" + ); + + /* + * The cache should update if the slot and state root are updated. + */ + + // The tree hash cache needs to be rebuilt since it was dropped when it failed. + let root = state + .update_tree_hash_cache() + .expect("should rebuild cache"); + + *state.slot_mut() += 1; + state + .set_state_root(state.slot() - 1, root) + .expect("should set state root"); + + let root = state.update_tree_hash_cache().unwrap(); + assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); +} diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index ee8cfb2dd55..22b6ace21e6 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -1,5 +1,6 @@ #![allow(clippy::integer_arithmetic)] #![allow(clippy::disallowed_method)] +#![allow(clippy::indexing_slicing)] use super::Error; use crate::{BeaconState, EthSpec, Hash256, Slot, Unsigned, Validator}; @@ -11,8 +12,13 @@ use std::cmp::Ordering; use std::iter::ExactSizeIterator; use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; -/// The number of fields on a beacon state. -const NUM_BEACON_STATE_HASHING_FIELDS: usize = 20; +/// The number of leaves (including padding) on the `BeaconState` Merkle tree. +/// +/// ## Note +/// +/// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the +/// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.** +const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; /// The number of nodes in the Merkle tree of a validator record. const NODES_PER_VALIDATOR: usize = 15; @@ -41,7 +47,7 @@ impl Eth1DataVotesTreeHashCache { pub fn new(state: &BeaconState) -> Self { let mut arena = CacheArena::default(); let roots: VariableList<_, _> = state - .eth1_data_votes + .eth1_data_votes() .iter() .map(|eth1_data| eth1_data.tree_hash_root()) .collect::>() @@ -51,7 +57,7 @@ impl Eth1DataVotesTreeHashCache { Self { arena, tree_hash_cache, - voting_period: Self::voting_period(state.slot), + voting_period: Self::voting_period(state.slot()), roots, } } @@ -61,14 +67,14 @@ impl Eth1DataVotesTreeHashCache { } pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState) -> Result { - if state.eth1_data_votes.len() < self.roots.len() - || Self::voting_period(state.slot) != self.voting_period + if state.eth1_data_votes().len() < self.roots.len() + || Self::voting_period(state.slot()) != self.voting_period { *self = Self::new(state); } state - .eth1_data_votes + .eth1_data_votes() .iter() .skip(self.roots.len()) .try_for_each(|eth1_data| self.roots.push(eth1_data.tree_hash_root()))?; @@ -80,8 +86,42 @@ impl Eth1DataVotesTreeHashCache { } /// A cache that performs a caching tree hash of the entire `BeaconState` struct. -#[derive(Debug, PartialEq, Clone, Encode, Decode)] +/// +/// This type is a wrapper around the inner cache, which does all the work. +#[derive(Debug, Default, PartialEq, Clone)] pub struct BeaconTreeHashCache { + inner: Option>, +} + +impl BeaconTreeHashCache { + pub fn new(state: &BeaconState) -> Self { + Self { + inner: Some(BeaconTreeHashCacheInner::new(state)), + } + } + + pub fn is_initialized(&self) -> bool { + self.inner.is_some() + } + + /// Move the inner cache out so that the containing `BeaconState` can be borrowed. + pub fn take(&mut self) -> Option> { + self.inner.take() + } + + /// Restore the inner cache after using `take`. + pub fn restore(&mut self, inner: BeaconTreeHashCacheInner) { + self.inner = Some(inner); + } + + /// Make the cache empty. + pub fn uninitialize(&mut self) { + self.inner = None; + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BeaconTreeHashCacheInner { /// Tracks the previously generated state root to ensure the next state root provided descends /// directly from this state. previous_state: Option<(Hash256, Slot)>, @@ -101,25 +141,27 @@ pub struct BeaconTreeHashCache { eth1_data_votes: Eth1DataVotesTreeHashCache, } -impl BeaconTreeHashCache { +impl BeaconTreeHashCacheInner { /// Instantiates a new cache. /// /// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are /// hashed, leaving the internal nodes as all-zeros. pub fn new(state: &BeaconState) -> Self { let mut fixed_arena = CacheArena::default(); - let block_roots = state.block_roots.new_tree_hash_cache(&mut fixed_arena); - let state_roots = state.state_roots.new_tree_hash_cache(&mut fixed_arena); - let historical_roots = state.historical_roots.new_tree_hash_cache(&mut fixed_arena); - let randao_mixes = state.randao_mixes.new_tree_hash_cache(&mut fixed_arena); + let block_roots = state.block_roots().new_tree_hash_cache(&mut fixed_arena); + let state_roots = state.state_roots().new_tree_hash_cache(&mut fixed_arena); + let historical_roots = state + .historical_roots() + .new_tree_hash_cache(&mut fixed_arena); + let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena); - let validators = ValidatorsListTreeHashCache::new::(&state.validators[..]); + let validators = ValidatorsListTreeHashCache::new::(state.validators()); let mut balances_arena = CacheArena::default(); - let balances = state.balances.new_tree_hash_cache(&mut balances_arena); + let balances = state.balances().new_tree_hash_cache(&mut balances_arena); let mut slashings_arena = CacheArena::default(); - let slashings = state.slashings.new_tree_hash_cache(&mut slashings_arena); + let slashings = state.slashings().new_tree_hash_cache(&mut slashings_arena); Self { previous_state: None, @@ -150,100 +192,132 @@ impl BeaconTreeHashCache { // efficient algorithm. if let Some((previous_root, previous_slot)) = self.previous_state { // The previously-hashed state must not be newer than `state`. - if previous_slot > state.slot { + if previous_slot > state.slot() { return Err(Error::TreeHashCacheSkippedSlot { cache: previous_slot, - state: state.slot, + state: state.slot(), }); } // If the state is newer, the previous root must be in the history of the given state. - if previous_slot < state.slot && *state.get_state_root(previous_slot)? != previous_root + if previous_slot < state.slot() + && *state.get_state_root(previous_slot)? != previous_root { return Err(Error::NonLinearTreeHashCacheHistory); } } - let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASHING_FIELDS); + let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES); - hasher.write(state.genesis_time.tree_hash_root().as_bytes())?; - hasher.write(state.genesis_validators_root.tree_hash_root().as_bytes())?; - hasher.write(state.slot.tree_hash_root().as_bytes())?; - hasher.write(state.fork.tree_hash_root().as_bytes())?; - hasher.write(state.latest_block_header.tree_hash_root().as_bytes())?; + hasher.write(state.genesis_time().tree_hash_root().as_bytes())?; + hasher.write(state.genesis_validators_root().tree_hash_root().as_bytes())?; + hasher.write(state.slot().tree_hash_root().as_bytes())?; + hasher.write(state.fork().tree_hash_root().as_bytes())?; + hasher.write(state.latest_block_header().tree_hash_root().as_bytes())?; hasher.write( state - .block_roots + .block_roots() .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)? .as_bytes(), )?; hasher.write( state - .state_roots + .state_roots() .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)? .as_bytes(), )?; hasher.write( state - .historical_roots + .historical_roots() .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)? .as_bytes(), )?; - hasher.write(state.eth1_data.tree_hash_root().as_bytes())?; + hasher.write(state.eth1_data().tree_hash_root().as_bytes())?; hasher.write( self.eth1_data_votes .recalculate_tree_hash_root(&state)? .as_bytes(), )?; - hasher.write(state.eth1_deposit_index.tree_hash_root().as_bytes())?; + hasher.write(state.eth1_deposit_index().tree_hash_root().as_bytes())?; hasher.write( self.validators - .recalculate_tree_hash_root(&state.validators[..])? + .recalculate_tree_hash_root(state.validators())? .as_bytes(), )?; hasher.write( state - .balances + .balances() .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)? .as_bytes(), )?; hasher.write( state - .randao_mixes + .randao_mixes() .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)? .as_bytes(), )?; hasher.write( state - .slashings + .slashings() .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)? .as_bytes(), )?; + + // Participation + match state { + BeaconState::Base(state) => { + hasher.write( + state + .previous_epoch_attestations + .tree_hash_root() + .as_bytes(), + )?; + hasher.write(state.current_epoch_attestations.tree_hash_root().as_bytes())?; + } + // FIXME(altair): add a cache to accelerate hashing of these fields + BeaconState::Altair(state) => { + hasher.write( + state + .previous_epoch_participation + .tree_hash_root() + .as_bytes(), + )?; + hasher.write( + state + .current_epoch_participation + .tree_hash_root() + .as_bytes(), + )?; + } + } + + hasher.write(state.justification_bits().tree_hash_root().as_bytes())?; hasher.write( state - .previous_epoch_attestations - .tree_hash_root() - .as_bytes(), - )?; - hasher.write(state.current_epoch_attestations.tree_hash_root().as_bytes())?; - hasher.write(state.justification_bits.tree_hash_root().as_bytes())?; - hasher.write( - state - .previous_justified_checkpoint + .previous_justified_checkpoint() .tree_hash_root() .as_bytes(), )?; hasher.write( state - .current_justified_checkpoint + .current_justified_checkpoint() .tree_hash_root() .as_bytes(), )?; - hasher.write(state.finalized_checkpoint.tree_hash_root().as_bytes())?; + hasher.write(state.finalized_checkpoint().tree_hash_root().as_bytes())?; + + // Inactivity & light-client sync committees + if let BeaconState::Altair(ref state) = state { + // FIXME(altair): add cache for this field + hasher.write(state.inactivity_scores.tree_hash_root().as_bytes())?; + + hasher.write(state.current_sync_committee.tree_hash_root().as_bytes())?; + hasher.write(state.next_sync_committee.tree_hash_root().as_bytes())?; + } let root = hasher.finish()?; - self.previous_state = Some((root, state.slot)); + self.previous_state = Some((root, state.slot())); Ok(root) } @@ -432,6 +506,13 @@ impl ParallelValidatorTreeHash { } } +#[cfg(feature = "arbitrary-fuzz")] +impl arbitrary::Arbitrary for BeaconTreeHashCache { + fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(Self::default()) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b6cb5142f44..4ce57f3ad74 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,14 +1,12 @@ use crate::*; use int_to_bytes::int_to_bytes4; use serde_derive::{Deserialize, Serialize}; -use std::collections::HashMap; +use serde_utils::quoted_u64::MaybeQuoted; use std::fs::File; use std::path::Path; use tree_hash::TreeHash; /// Each of the BLS signature domains. -/// -/// Spec v0.12.1 #[derive(Debug, PartialEq, Clone, Copy)] pub enum Domain { BeaconProposer, @@ -18,11 +16,12 @@ pub enum Domain { VoluntaryExit, SelectionProof, AggregateAndProof, + SyncCommittee, } -/// Holds all the "constants" for a BeaconChain. +/// Lighthouse's internal configuration struct. /// -/// Spec v0.12.1 +/// Contains a mixture of "preset" and "config" values w.r.t to the EF definitions. #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone)] pub struct ChainSpec { @@ -87,13 +86,13 @@ pub struct ChainSpec { /* * Signature domains */ - domain_beacon_proposer: u32, - domain_beacon_attester: u32, - domain_randao: u32, - domain_deposit: u32, - domain_voluntary_exit: u32, - domain_selection_proof: u32, - domain_aggregate_and_proof: u32, + pub(crate) domain_beacon_proposer: u32, + pub(crate) domain_beacon_attester: u32, + pub(crate) domain_randao: u32, + pub(crate) domain_deposit: u32, + pub(crate) domain_voluntary_exit: u32, + pub(crate) domain_selection_proof: u32, + pub(crate) domain_aggregate_and_proof: u32, /* * Fork choice @@ -109,6 +108,23 @@ pub struct ChainSpec { pub deposit_network_id: u64, pub deposit_contract_address: Address, + /* + * Altair hard fork params + */ + pub inactivity_penalty_quotient_altair: u64, + pub min_slashing_penalty_quotient_altair: u64, + pub proportional_slashing_multiplier_altair: u64, + pub epochs_per_sync_committee_period: Epoch, + pub inactivity_score_bias: u64, + pub inactivity_score_recovery_rate: u64, + pub min_sync_committee_participants: u64, + pub(crate) domain_sync_committee: u32, + pub(crate) domain_sync_committee_selection_proof: u32, + pub(crate) domain_contribution_and_proof: u32, + pub altair_fork_version: [u8; 4], + /// The Altair fork epoch is optional, with `None` representing "Altair never happens". + pub altair_fork_epoch: Option, + /* * Networking */ @@ -123,6 +139,12 @@ pub struct ChainSpec { } impl ChainSpec { + /// Construct a `ChainSpec` from a standard config. + pub fn from_config(config: &Config) -> Option { + let spec = T::default_spec(); + config.apply_to_chain_spec::(&spec) + } + /// Returns an `EnrForkId` for the given `slot`. /// /// Presently, we don't have any forks so we just ignore the slot. In the future this function @@ -146,6 +168,19 @@ impl ChainSpec { None } + /// Returns the name of the fork which is active at `slot`. + pub fn fork_name_at_slot(&self, slot: Slot) -> ForkName { + self.fork_name_at_epoch(slot.epoch(E::slots_per_epoch())) + } + + /// Returns the name of the fork which is active at `epoch`. + pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { + match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + } + } + /// Get the domain number, unmodified by the fork. /// /// Spec v0.12.1 @@ -158,6 +193,7 @@ impl ChainSpec { Domain::VoluntaryExit => self.domain_voluntary_exit, Domain::SelectionProof => self.domain_selection_proof, Domain::AggregateAndProof => self.domain_aggregate_and_proof, + Domain::SyncCommittee => self.domain_sync_committee, } } @@ -211,13 +247,15 @@ impl ChainSpec { ) -> [u8; 4] { let mut result = [0; 4]; let root = Self::compute_fork_data_root(current_version, genesis_validators_root); - result.copy_from_slice(&root.as_bytes()[0..4]); + result.copy_from_slice( + root.as_bytes() + .get(0..4) + .expect("root hash is at least 4 bytes"), + ); result } /// Compute a domain by applying the given `fork_version`. - /// - /// Spec v0.12.1 pub fn compute_domain( &self, domain: Domain, @@ -229,7 +267,10 @@ impl ChainSpec { let mut domain = [0; 32]; domain[0..4].copy_from_slice(&int_to_bytes4(domain_constant)); domain[4..].copy_from_slice( - &Self::compute_fork_data_root(fork_version, genesis_validators_root)[..28], + Self::compute_fork_data_root(fork_version, genesis_validators_root) + .as_bytes() + .get(..28) + .expect("fork has is 32 bytes so first 28 bytes should exist"), ); Hash256::from(domain) @@ -265,10 +306,22 @@ impl ChainSpec { /* * Gwei values */ - min_deposit_amount: u64::pow(2, 0).saturating_mul(u64::pow(10, 9)), - max_effective_balance: u64::pow(2, 5).saturating_mul(u64::pow(10, 9)), - ejection_balance: u64::pow(2, 4).saturating_mul(u64::pow(10, 9)), - effective_balance_increment: u64::pow(2, 0).saturating_mul(u64::pow(10, 9)), + min_deposit_amount: option_wrapper(|| { + u64::checked_pow(2, 0)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_effective_balance: option_wrapper(|| { + u64::checked_pow(2, 5)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + ejection_balance: option_wrapper(|| { + u64::checked_pow(2, 4)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + effective_balance_increment: option_wrapper(|| { + u64::checked_pow(2, 0)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), /* * Initial Values @@ -294,7 +347,7 @@ impl ChainSpec { base_reward_factor: 64, whistleblower_reward_quotient: 512, proposer_reward_quotient: 8, - inactivity_penalty_quotient: u64::pow(2, 26), + inactivity_penalty_quotient: u64::checked_pow(2, 26).expect("pow does not overflow"), min_slashing_penalty_quotient: 128, proportional_slashing_multiplier: 1, @@ -325,6 +378,26 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Altair hard fork params + */ + inactivity_penalty_quotient_altair: option_wrapper(|| { + u64::checked_pow(2, 24)?.checked_mul(3) + }) + .expect("calculation does not overflow"), + min_slashing_penalty_quotient_altair: u64::checked_pow(2, 6) + .expect("pow does not overflow"), + proportional_slashing_multiplier_altair: 2, + inactivity_score_bias: 4, + inactivity_score_recovery_rate: 16, + min_sync_committee_participants: 1, + epochs_per_sync_committee_period: Epoch::new(256), + domain_sync_committee: 7, + domain_sync_committee_selection_proof: 8, + domain_contribution_and_proof: 9, + altair_fork_version: [0x01, 0x00, 0x00, 0x00], + altair_fork_epoch: Some(Epoch::new(u64::MAX)), + /* * Network specific */ @@ -340,8 +413,6 @@ impl ChainSpec { } /// Ethereum Foundation minimal spec, as defined in the eth2.0-specs repo. - /// - /// Spec v0.12.1 pub fn minimal() -> Self { // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; @@ -357,10 +428,15 @@ impl ChainSpec { shard_committee_period: 64, genesis_delay: 300, seconds_per_slot: 6, - inactivity_penalty_quotient: u64::pow(2, 25), + inactivity_penalty_quotient: u64::checked_pow(2, 25).expect("pow does not overflow"), min_slashing_penalty_quotient: 64, proportional_slashing_multiplier: 2, safe_slots_to_update_justified: 2, + // Altair + epochs_per_sync_committee_period: Epoch::new(8), + altair_fork_version: [0x01, 0x00, 0x00, 0x01], + altair_fork_epoch: Some(Epoch::new(u64::MAX)), + // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, deposit_network_id: 5, @@ -371,24 +447,6 @@ impl ChainSpec { ..ChainSpec::mainnet() } } - - /// Suits the `v0.12.3` version of the eth2 spec: - /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/configs/mainnet/phase0.yaml - /// - /// This method only needs to exist whilst we provide support for "legacy" testnets prior to v1.0.0 - /// (e.g., Medalla, Pyrmont, Spadina, Altona, etc.). - pub fn v012_legacy() -> Self { - let boot_nodes = vec![]; - - Self { - genesis_delay: 172_800, // 2 days - inactivity_penalty_quotient: u64::pow(2, 24), - min_slashing_penalty_quotient: 32, - eth1_follow_distance: 1024, - boot_nodes, - ..ChainSpec::mainnet() - } - } } impl Default for ChainSpec { @@ -397,285 +455,103 @@ impl Default for ChainSpec { } } -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_mainnet_spec_can_be_constructed() { - let _ = ChainSpec::mainnet(); - } - - #[allow(clippy::useless_vec)] - fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) { - let previous_version = [0, 0, 0, 1]; - let current_version = [0, 0, 0, 2]; - let genesis_validators_root = Hash256::from_low_u64_le(77); - let fork_epoch = Epoch::new(1024); - let fork = Fork { - previous_version, - current_version, - epoch: fork_epoch, - }; - - for (epoch, version) in vec![ - (fork_epoch - 1, previous_version), - (fork_epoch, current_version), - (fork_epoch + 1, current_version), - ] { - let domain1 = spec.get_domain(epoch, domain_type, &fork, genesis_validators_root); - let domain2 = spec.compute_domain(domain_type, version, genesis_validators_root); - - assert_eq!(domain1, domain2); - assert_eq!(&domain1.as_bytes()[0..4], &int_to_bytes4(raw_domain)[..]); - } - } - - #[test] - fn test_get_domain() { - let spec = ChainSpec::mainnet(); - - test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); - test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec); - test_domain(Domain::Randao, spec.domain_randao, &spec); - test_domain(Domain::Deposit, spec.domain_deposit, &spec); - test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec); - test_domain(Domain::SelectionProof, spec.domain_selection_proof, &spec); - test_domain( - Domain::AggregateAndProof, - spec.domain_aggregate_and_proof, - &spec, - ); - } -} - -/// YAML config file as defined by the spec. +/// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON). #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] -pub struct YamlConfig { - pub config_name: String, - // ChainSpec - #[serde(with = "serde_utils::quoted_u64")] - max_committees_per_slot: u64, - #[serde(with = "serde_utils::quoted_u64")] - target_committee_size: u64, - #[serde(with = "serde_utils::quoted_u64")] - min_per_epoch_churn_limit: u64, - #[serde(with = "serde_utils::quoted_u64")] - churn_limit_quotient: u64, - #[serde(with = "serde_utils::quoted_u8")] - shuffle_round_count: u8, +pub struct Config { + #[serde(default)] + pub preset_base: String, + #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, #[serde(with = "serde_utils::quoted_u64")] min_genesis_time: u64, + #[serde(with = "serde_utils::bytes_4_hex")] + genesis_fork_version: [u8; 4], #[serde(with = "serde_utils::quoted_u64")] genesis_delay: u64, - #[serde(with = "serde_utils::quoted_u64")] - min_deposit_amount: u64, - #[serde(with = "serde_utils::quoted_u64")] - max_effective_balance: u64, - #[serde(with = "serde_utils::quoted_u64")] - ejection_balance: u64, - #[serde(with = "serde_utils::quoted_u64")] - effective_balance_increment: u64, - #[serde(with = "serde_utils::quoted_u64")] - hysteresis_quotient: u64, - #[serde(with = "serde_utils::quoted_u64")] - hysteresis_downward_multiplier: u64, - #[serde(with = "serde_utils::quoted_u64")] - hysteresis_upward_multiplier: u64, + #[serde(with = "serde_utils::bytes_4_hex")] - genesis_fork_version: [u8; 4], - #[serde(with = "serde_utils::u8_hex")] - bls_withdrawal_prefix: u8, + altair_fork_version: [u8; 4], + altair_fork_epoch: Option>, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "serde_utils::quoted_u64")] - min_attestation_inclusion_delay: u64, - #[serde(with = "serde_utils::quoted_u64")] - min_seed_lookahead: u64, - #[serde(with = "serde_utils::quoted_u64")] - max_seed_lookahead: u64, - #[serde(with = "serde_utils::quoted_u64")] - min_epochs_to_inactivity_penalty: u64, + seconds_per_eth1_block: u64, #[serde(with = "serde_utils::quoted_u64")] - min_validator_withdrawability_delay: u64, + min_validator_withdrawability_delay: Epoch, #[serde(with = "serde_utils::quoted_u64")] shard_committee_period: u64, #[serde(with = "serde_utils::quoted_u64")] - base_reward_factor: u64, - #[serde(with = "serde_utils::quoted_u64")] - whistleblower_reward_quotient: u64, - #[serde(with = "serde_utils::quoted_u64")] - proposer_reward_quotient: u64, - #[serde(with = "serde_utils::quoted_u64")] - inactivity_penalty_quotient: u64, - #[serde(with = "serde_utils::quoted_u64")] - min_slashing_penalty_quotient: u64, - #[serde(with = "serde_utils::quoted_u64")] - proportional_slashing_multiplier: u64, - #[serde(with = "serde_utils::quoted_u64")] - safe_slots_to_update_justified: u64, - - #[serde(with = "serde_utils::u32_hex")] - domain_beacon_proposer: u32, - #[serde(with = "serde_utils::u32_hex")] - domain_beacon_attester: u32, - #[serde(with = "serde_utils::u32_hex")] - domain_randao: u32, - #[serde(with = "serde_utils::u32_hex")] - domain_deposit: u32, - #[serde(with = "serde_utils::u32_hex")] - domain_voluntary_exit: u32, - #[serde(with = "serde_utils::u32_hex")] - domain_selection_proof: u32, - #[serde(with = "serde_utils::u32_hex")] - domain_aggregate_and_proof: u32, - // EthSpec - #[serde(with = "serde_utils::quoted_u32")] - max_validators_per_committee: u32, - #[serde(with = "serde_utils::quoted_u64")] - slots_per_epoch: u64, - #[serde(with = "serde_utils::quoted_u64")] - epochs_per_eth1_voting_period: u64, - #[serde(with = "serde_utils::quoted_u64")] - slots_per_historical_root: u64, - #[serde(with = "serde_utils::quoted_u64")] - epochs_per_historical_vector: u64, - #[serde(with = "serde_utils::quoted_u64")] - epochs_per_slashings_vector: u64, - #[serde(with = "serde_utils::quoted_u64")] - historical_roots_limit: u64, - #[serde(with = "serde_utils::quoted_u64")] - validator_registry_limit: u64, - #[serde(with = "serde_utils::quoted_u32")] - max_proposer_slashings: u32, - #[serde(with = "serde_utils::quoted_u32")] - max_attester_slashings: u32, - #[serde(with = "serde_utils::quoted_u32")] - max_attestations: u32, - #[serde(with = "serde_utils::quoted_u32")] - max_deposits: u32, - #[serde(with = "serde_utils::quoted_u32")] - max_voluntary_exits: u32, - // Validator - #[serde(with = "serde_utils::quoted_u64")] eth1_follow_distance: u64, + #[serde(with = "serde_utils::quoted_u64")] - target_aggregators_per_committee: u64, + inactivity_score_bias: u64, #[serde(with = "serde_utils::quoted_u64")] - random_subnets_per_validator: u64, + inactivity_score_recovery_rate: u64, #[serde(with = "serde_utils::quoted_u64")] - epochs_per_random_subnet_subscription: u64, + ejection_balance: u64, #[serde(with = "serde_utils::quoted_u64")] - seconds_per_eth1_block: u64, + min_per_epoch_churn_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + churn_limit_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] deposit_chain_id: u64, #[serde(with = "serde_utils::quoted_u64")] deposit_network_id: u64, deposit_contract_address: Address, - - // Extra fields (could be from a future hard-fork that we don't yet know). - #[serde(flatten)] - pub extra_fields: HashMap, } -impl Default for YamlConfig { +impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); - YamlConfig::from_spec::(&chain_spec) + Config::from_chain_spec::(&chain_spec) } } -/// Spec v0.12.1 -impl YamlConfig { - /// Maps `self.config_name` to an identifier for an `EthSpec` instance. +impl Config { + /// Maps `self` to an identifier for an `EthSpec` instance. /// /// Returns `None` if there is no match. pub fn eth_spec_id(&self) -> Option { - Some(match self.config_name.as_str() { - "mainnet" => EthSpecId::Mainnet, - "minimal" => EthSpecId::Minimal, - "toledo" => EthSpecId::Mainnet, - "prater" => EthSpecId::Mainnet, - "pyrmont" => EthSpecId::Mainnet, - "spadina" => EthSpecId::V012Legacy, - "medalla" => EthSpecId::V012Legacy, - "altona" => EthSpecId::V012Legacy, - _ => return None, - }) + match self.preset_base.as_str() { + "minimal" => Some(EthSpecId::Minimal), + "mainnet" => Some(EthSpecId::Mainnet), + _ => None, + } } - pub fn from_spec(spec: &ChainSpec) -> Self { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { Self { - config_name: T::spec_name().to_string(), - // ChainSpec - max_committees_per_slot: spec.max_committees_per_slot as u64, - target_committee_size: spec.target_committee_size as u64, - min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, - churn_limit_quotient: spec.churn_limit_quotient, - shuffle_round_count: spec.shuffle_round_count, + preset_base: T::spec_name().to_string(), + min_genesis_active_validator_count: spec.min_genesis_active_validator_count, min_genesis_time: spec.min_genesis_time, + genesis_fork_version: spec.genesis_fork_version, genesis_delay: spec.genesis_delay, - min_deposit_amount: spec.min_deposit_amount, - max_effective_balance: spec.max_effective_balance, - ejection_balance: spec.ejection_balance, - effective_balance_increment: spec.effective_balance_increment, - hysteresis_quotient: spec.hysteresis_quotient, - hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier, - hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier, - proportional_slashing_multiplier: spec.proportional_slashing_multiplier, - bls_withdrawal_prefix: spec.bls_withdrawal_prefix_byte, + + altair_fork_version: spec.altair_fork_version, + altair_fork_epoch: spec + .altair_fork_epoch + .map(|slot| MaybeQuoted { value: slot }), + seconds_per_slot: spec.seconds_per_slot, - min_attestation_inclusion_delay: spec.min_attestation_inclusion_delay, - min_seed_lookahead: spec.min_seed_lookahead.into(), - max_seed_lookahead: spec.max_seed_lookahead.into(), - min_validator_withdrawability_delay: spec.min_validator_withdrawability_delay.into(), + seconds_per_eth1_block: spec.seconds_per_eth1_block, + min_validator_withdrawability_delay: spec.min_validator_withdrawability_delay, shard_committee_period: spec.shard_committee_period, - min_epochs_to_inactivity_penalty: spec.min_epochs_to_inactivity_penalty, - base_reward_factor: spec.base_reward_factor, - whistleblower_reward_quotient: spec.whistleblower_reward_quotient, - proposer_reward_quotient: spec.proposer_reward_quotient, - inactivity_penalty_quotient: spec.inactivity_penalty_quotient, - min_slashing_penalty_quotient: spec.min_slashing_penalty_quotient, - genesis_fork_version: spec.genesis_fork_version, - safe_slots_to_update_justified: spec.safe_slots_to_update_justified, - domain_beacon_proposer: spec.domain_beacon_proposer, - domain_beacon_attester: spec.domain_beacon_attester, - domain_randao: spec.domain_randao, - domain_deposit: spec.domain_deposit, - domain_voluntary_exit: spec.domain_voluntary_exit, - domain_selection_proof: spec.domain_selection_proof, - domain_aggregate_and_proof: spec.domain_aggregate_and_proof, - - // EthSpec - max_validators_per_committee: T::MaxValidatorsPerCommittee::to_u32(), - slots_per_epoch: T::slots_per_epoch(), - epochs_per_eth1_voting_period: T::EpochsPerEth1VotingPeriod::to_u64(), - slots_per_historical_root: T::slots_per_historical_root() as u64, - epochs_per_historical_vector: T::epochs_per_historical_vector() as u64, - epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_u64(), - historical_roots_limit: T::HistoricalRootsLimit::to_u64(), - validator_registry_limit: T::ValidatorRegistryLimit::to_u64(), - max_proposer_slashings: T::MaxProposerSlashings::to_u32(), - max_attester_slashings: T::MaxAttesterSlashings::to_u32(), - max_attestations: T::MaxAttestations::to_u32(), - max_deposits: T::MaxDeposits::to_u32(), - max_voluntary_exits: T::MaxVoluntaryExits::to_u32(), - - // Validator eth1_follow_distance: spec.eth1_follow_distance, - target_aggregators_per_committee: spec.target_aggregators_per_committee, - random_subnets_per_validator: spec.random_subnets_per_validator, - epochs_per_random_subnet_subscription: spec.epochs_per_random_subnet_subscription, - seconds_per_eth1_block: spec.seconds_per_eth1_block, + + inactivity_score_bias: spec.inactivity_score_bias, + inactivity_score_recovery_rate: spec.inactivity_score_recovery_rate, + ejection_balance: spec.ejection_balance, + churn_limit_quotient: spec.churn_limit_quotient, + min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, + deposit_chain_id: spec.deposit_chain_id, deposit_network_id: spec.deposit_network_id, deposit_contract_address: spec.deposit_contract_address, - - extra_fields: HashMap::new(), } } @@ -687,121 +563,120 @@ impl YamlConfig { } pub fn apply_to_chain_spec(&self, chain_spec: &ChainSpec) -> Option { - // Check that YAML values match type-level EthSpec constants - if self.max_validators_per_committee != T::MaxValidatorsPerCommittee::to_u32() - || self.slots_per_epoch != T::slots_per_epoch() - || self.epochs_per_eth1_voting_period != T::EpochsPerEth1VotingPeriod::to_u64() - || self.slots_per_historical_root != T::slots_per_historical_root() as u64 - || self.epochs_per_historical_vector != T::epochs_per_historical_vector() as u64 - || self.epochs_per_slashings_vector != T::EpochsPerSlashingsVector::to_u64() - || self.historical_roots_limit != T::HistoricalRootsLimit::to_u64() - || self.validator_registry_limit != T::ValidatorRegistryLimit::to_u64() - || self.max_proposer_slashings != T::MaxProposerSlashings::to_u32() - || self.max_attester_slashings != T::MaxAttesterSlashings::to_u32() - || self.max_attestations != T::MaxAttestations::to_u32() - || self.max_deposits != T::MaxDeposits::to_u32() - || self.max_voluntary_exits != T::MaxVoluntaryExits::to_u32() - { + // Pattern match here to avoid missing any fields. + let &Config { + ref preset_base, + min_genesis_active_validator_count, + min_genesis_time, + genesis_fork_version, + genesis_delay, + altair_fork_version, + altair_fork_epoch, + seconds_per_slot, + seconds_per_eth1_block, + min_validator_withdrawability_delay, + shard_committee_period, + eth1_follow_distance, + inactivity_score_bias, + inactivity_score_recovery_rate, + ejection_balance, + min_per_epoch_churn_limit, + churn_limit_quotient, + deposit_chain_id, + deposit_network_id, + deposit_contract_address, + } = self; + + if preset_base != T::spec_name().to_string().as_str() { return None; } - // Create a ChainSpec from the yaml config Some(ChainSpec { - /* - * Misc - */ - max_committees_per_slot: self.max_committees_per_slot as usize, - target_committee_size: self.target_committee_size as usize, - min_per_epoch_churn_limit: self.min_per_epoch_churn_limit, - churn_limit_quotient: self.churn_limit_quotient, - shuffle_round_count: self.shuffle_round_count, - min_genesis_active_validator_count: self.min_genesis_active_validator_count, - min_genesis_time: self.min_genesis_time, - hysteresis_quotient: self.hysteresis_quotient, - hysteresis_downward_multiplier: self.hysteresis_downward_multiplier, - hysteresis_upward_multiplier: self.hysteresis_upward_multiplier, - proportional_slashing_multiplier: self.proportional_slashing_multiplier, - /* - * Fork Choice - */ - safe_slots_to_update_justified: self.safe_slots_to_update_justified, - /* - * Validator - */ - eth1_follow_distance: self.eth1_follow_distance, - target_aggregators_per_committee: self.target_aggregators_per_committee, - random_subnets_per_validator: self.random_subnets_per_validator, - epochs_per_random_subnet_subscription: self.epochs_per_random_subnet_subscription, - seconds_per_eth1_block: self.seconds_per_eth1_block, - deposit_chain_id: self.deposit_chain_id, - deposit_network_id: self.deposit_network_id, - deposit_contract_address: self.deposit_contract_address, - /* - * Gwei values - */ - min_deposit_amount: self.min_deposit_amount, - max_effective_balance: self.max_effective_balance, - ejection_balance: self.ejection_balance, - effective_balance_increment: self.effective_balance_increment, - /* - * Initial values - */ - genesis_fork_version: self.genesis_fork_version, - bls_withdrawal_prefix_byte: self.bls_withdrawal_prefix, - /* - * Time parameters - */ - genesis_delay: self.genesis_delay, - seconds_per_slot: self.seconds_per_slot, - min_attestation_inclusion_delay: self.min_attestation_inclusion_delay, - min_seed_lookahead: Epoch::from(self.min_seed_lookahead), - max_seed_lookahead: Epoch::from(self.max_seed_lookahead), - min_validator_withdrawability_delay: Epoch::from( - self.min_validator_withdrawability_delay, - ), - shard_committee_period: self.shard_committee_period, - min_epochs_to_inactivity_penalty: self.min_epochs_to_inactivity_penalty, - /* - * Reward and penalty quotients - */ - base_reward_factor: self.base_reward_factor, - whistleblower_reward_quotient: self.whistleblower_reward_quotient, - proposer_reward_quotient: self.proposer_reward_quotient, - inactivity_penalty_quotient: self.inactivity_penalty_quotient, - min_slashing_penalty_quotient: self.min_slashing_penalty_quotient, - /* - * Signature domains - */ - domain_beacon_proposer: self.domain_beacon_proposer, - domain_beacon_attester: self.domain_beacon_attester, - domain_randao: self.domain_randao, - domain_deposit: self.domain_deposit, - domain_voluntary_exit: self.domain_voluntary_exit, - domain_selection_proof: self.domain_selection_proof, - domain_aggregate_and_proof: self.domain_aggregate_and_proof, - /* - * Lighthouse-specific parameters - * - * These are paramaters that are present in the chain spec but aren't part of the YAML - * config. We avoid using `..chain_spec` so that changes to the set of fields don't - * accidentally get forgotten (explicit better than implicit, yada yada). - */ - boot_nodes: chain_spec.boot_nodes.clone(), - network_id: chain_spec.network_id, - attestation_propagation_slot_range: chain_spec.attestation_propagation_slot_range, - maximum_gossip_clock_disparity_millis: chain_spec.maximum_gossip_clock_disparity_millis, - attestation_subnet_count: chain_spec.attestation_subnet_count, - /* - * Constants, not configurable. - */ - genesis_slot: chain_spec.genesis_slot, - far_future_epoch: chain_spec.far_future_epoch, - base_rewards_per_epoch: chain_spec.base_rewards_per_epoch, - deposit_contract_tree_depth: chain_spec.deposit_contract_tree_depth, + min_genesis_active_validator_count, + min_genesis_time, + genesis_fork_version, + genesis_delay, + altair_fork_version, + altair_fork_epoch: altair_fork_epoch.map(|q| q.value), + seconds_per_slot, + seconds_per_eth1_block, + min_validator_withdrawability_delay, + shard_committee_period, + eth1_follow_distance, + inactivity_score_bias, + inactivity_score_recovery_rate, + ejection_balance, + min_per_epoch_churn_limit, + churn_limit_quotient, + deposit_chain_id, + deposit_network_id, + deposit_contract_address, + ..chain_spec.clone() }) } } +/// A simple wrapper to permit the in-line use of `?`. +fn option_wrapper(f: F) -> Option +where + F: Fn() -> Option, +{ + f() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_mainnet_spec_can_be_constructed() { + let _ = ChainSpec::mainnet(); + } + + #[allow(clippy::useless_vec)] + fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) { + let previous_version = [0, 0, 0, 1]; + let current_version = [0, 0, 0, 2]; + let genesis_validators_root = Hash256::from_low_u64_le(77); + let fork_epoch = Epoch::new(1024); + let fork = Fork { + previous_version, + current_version, + epoch: fork_epoch, + }; + + for (epoch, version) in vec![ + (fork_epoch - 1, previous_version), + (fork_epoch, current_version), + (fork_epoch + 1, current_version), + ] { + let domain1 = spec.get_domain(epoch, domain_type, &fork, genesis_validators_root); + let domain2 = spec.compute_domain(domain_type, version, genesis_validators_root); + + assert_eq!(domain1, domain2); + assert_eq!(&domain1.as_bytes()[0..4], &int_to_bytes4(raw_domain)[..]); + } + } + + #[test] + fn test_get_domain() { + let spec = ChainSpec::mainnet(); + + test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); + test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec); + test_domain(Domain::Randao, spec.domain_randao, &spec); + test_domain(Domain::Deposit, spec.domain_deposit, &spec); + test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec); + test_domain(Domain::SelectionProof, spec.domain_selection_proof, &spec); + test_domain( + Domain::AggregateAndProof, + spec.domain_aggregate_and_proof, + &spec, + ); + test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); + } +} + #[cfg(test)] mod yaml_tests { use super::*; @@ -819,7 +694,7 @@ mod yaml_tests { .expect("error opening file"); let minimal_spec = ChainSpec::minimal(); - let yamlconfig = YamlConfig::from_spec::(&minimal_spec); + let yamlconfig = Config::from_chain_spec::(&minimal_spec); // write fresh minimal config to file serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); @@ -829,7 +704,7 @@ mod yaml_tests { .open(tmp_file.as_ref()) .expect("error while opening the file"); // deserialize minimal config from file - let from: YamlConfig = serde_yaml::from_reader(reader).expect("error while deserializing"); + let from: Config = serde_yaml::from_reader(reader).expect("error while deserializing"); assert_eq!(from, yamlconfig); } @@ -842,32 +717,7 @@ mod yaml_tests { .open(tmp_file.as_ref()) .expect("error opening file"); let mainnet_spec = ChainSpec::mainnet(); - let yamlconfig = YamlConfig::from_spec::(&mainnet_spec); - serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); - - let reader = OpenOptions::new() - .read(true) - .write(false) - .open(tmp_file.as_ref()) - .expect("error while opening the file"); - let from: YamlConfig = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(from, yamlconfig); - } - - #[test] - fn extra_fields_round_trip() { - let tmp_file = NamedTempFile::new().expect("failed to create temp file"); - let writer = OpenOptions::new() - .read(false) - .write(true) - .open(tmp_file.as_ref()) - .expect("error opening file"); - let mainnet_spec = ChainSpec::mainnet(); - let mut yamlconfig = YamlConfig::from_spec::(&mainnet_spec); - let (k1, v1) = ("SAMPLE_HARDFORK_KEY1", "123456789"); - let (k2, v2) = ("SAMPLE_HARDFORK_KEY2", "987654321"); - yamlconfig.extra_fields.insert(k1.into(), v1.into()); - yamlconfig.extra_fields.insert(k2.into(), v2.into()); + let yamlconfig = Config::from_chain_spec::(&mainnet_spec); serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); let reader = OpenOptions::new() @@ -875,17 +725,17 @@ mod yaml_tests { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: YamlConfig = serde_yaml::from_reader(reader).expect("error while deserializing"); + let from: Config = serde_yaml::from_reader(reader).expect("error while deserializing"); assert_eq!(from, yamlconfig); } #[test] fn apply_to_spec() { let mut spec = ChainSpec::minimal(); - let yamlconfig = YamlConfig::from_spec::(&spec); + let yamlconfig = Config::from_chain_spec::(&spec); // modifying the original spec - spec.max_committees_per_slot += 1; + spec.min_genesis_active_validator_count += 1; spec.deposit_chain_id += 1; spec.deposit_network_id += 1; // Applying a yaml config with incorrect EthSpec should fail diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs new file mode 100644 index 00000000000..16d36c850c3 --- /dev/null +++ b/consensus/types/src/config_and_preset.rs @@ -0,0 +1,119 @@ +use crate::{AltairPreset, BasePreset, ChainSpec, Config, EthSpec}; +use serde_derive::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Fusion of a runtime-config with the compile-time preset values. +/// +/// Mostly useful for the API. +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct ConfigAndPreset { + #[serde(flatten)] + pub config: Config, + + #[serde(flatten)] + pub base_preset: BasePreset, + #[serde(flatten)] + pub altair_preset: AltairPreset, + + /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. + #[serde(flatten)] + pub extra_fields: HashMap, +} + +impl ConfigAndPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + let config = Config::from_chain_spec::(spec); + let base_preset = BasePreset::from_chain_spec::(spec); + let altair_preset = AltairPreset::from_chain_spec::(spec); + let extra_fields = HashMap::new(); + + Self { + config, + base_preset, + altair_preset, + extra_fields, + } + } + + /// Add fields that were previously part of the config but are now constants. + pub fn make_backwards_compat(&mut self, spec: &ChainSpec) { + let hex_string = |value: &[u8]| format!("0x{}", hex::encode(&value)); + let u32_hex = |v: u32| hex_string(&v.to_le_bytes()); + let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); + let fields = vec![ + ("config_name", self.config.preset_base.clone()), + ( + "bls_withdrawal_prefix", + u8_hex(spec.bls_withdrawal_prefix_byte), + ), + ( + "domain_beacon_proposer", + u32_hex(spec.domain_beacon_proposer), + ), + ( + "domain_beacon_attester", + u32_hex(spec.domain_beacon_attester), + ), + ("domain_randao", u32_hex(spec.domain_randao)), + ("domain_deposit", u32_hex(spec.domain_deposit)), + ("domain_voluntary_exit", u32_hex(spec.domain_voluntary_exit)), + ( + "domain_selection_proof", + u32_hex(spec.domain_selection_proof), + ), + ( + "domain_aggregate_and_proof", + u32_hex(spec.domain_aggregate_and_proof), + ), + ( + "target_aggregators_per_committee", + spec.target_aggregators_per_committee.to_string(), + ), + ( + "random_subnets_per_validator", + spec.random_subnets_per_validator.to_string(), + ), + ( + "epochs_per_random_subnet_subscription", + spec.epochs_per_random_subnet_subscription.to_string(), + ), + ]; + for (key, value) in fields { + self.extra_fields.insert(key.to_uppercase(), value); + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::MainnetEthSpec; + use std::fs::OpenOptions; + use tempfile::NamedTempFile; + + #[test] + fn extra_fields_round_trip() { + let tmp_file = NamedTempFile::new().expect("failed to create temp file"); + let writer = OpenOptions::new() + .read(false) + .write(true) + .open(tmp_file.as_ref()) + .expect("error opening file"); + let mainnet_spec = ChainSpec::mainnet(); + let mut yamlconfig = ConfigAndPreset::from_chain_spec::(&mainnet_spec); + let (k1, v1) = ("SAMPLE_HARDFORK_KEY1", "123456789"); + let (k2, v2) = ("SAMPLE_HARDFORK_KEY2", "987654321"); + yamlconfig.extra_fields.insert(k1.into(), v1.into()); + yamlconfig.extra_fields.insert(k2.into(), v2.into()); + serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); + + let reader = OpenOptions::new() + .read(true) + .write(false) + .open(tmp_file.as_ref()) + .expect("error while opening the file"); + let from: ConfigAndPreset = + serde_yaml::from_reader(reader).expect("error while deserializing"); + assert_eq!(from, yamlconfig); + } +} diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs new file mode 100644 index 00000000000..1001d702a7c --- /dev/null +++ b/consensus/types/src/consts.rs @@ -0,0 +1,19 @@ +pub mod altair { + pub const TIMELY_SOURCE_FLAG_INDEX: usize = 0; + pub const TIMELY_TARGET_FLAG_INDEX: usize = 1; + pub const TIMELY_HEAD_FLAG_INDEX: usize = 2; + pub const TIMELY_SOURCE_WEIGHT: u64 = 14; + pub const TIMELY_TARGET_WEIGHT: u64 = 26; + pub const TIMELY_HEAD_WEIGHT: u64 = 14; + pub const SYNC_REWARD_WEIGHT: u64 = 2; + pub const PROPOSER_WEIGHT: u64 = 8; + pub const WEIGHT_DENOMINATOR: u64 = 64; + + pub const PARTICIPATION_FLAG_WEIGHTS: [u64; NUM_FLAG_INDICES] = [ + TIMELY_SOURCE_WEIGHT, + TIMELY_TARGET_WEIGHT, + TIMELY_HEAD_WEIGHT, + ]; + + pub const NUM_FLAG_INDICES: usize = 3; +} diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 3cd5555a2c2..dbf70f78e32 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,15 +3,14 @@ use crate::*; use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use ssz_types::typenum::{ - Unsigned, U0, U1024, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, U4096, U64, - U65536, U8, U8192, + Unsigned, U0, U1024, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, U4096, U512, + U64, U65536, U8, U8192, }; use std::fmt::{self, Debug}; use std::str::FromStr; const MAINNET: &str = "mainnet"; const MINIMAL: &str = "minimal"; -const LEGACY: &str = "v0.12-legacy"; /// Used to identify one of the `EthSpec` instances defined here. #[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -19,7 +18,6 @@ const LEGACY: &str = "v0.12-legacy"; pub enum EthSpecId { Mainnet, Minimal, - V012Legacy, } impl FromStr for EthSpecId { @@ -29,7 +27,6 @@ impl FromStr for EthSpecId { match s { MAINNET => Ok(EthSpecId::Mainnet), MINIMAL => Ok(EthSpecId::Minimal), - LEGACY => Ok(EthSpecId::V012Legacy), _ => Err(format!("Unknown eth spec: {}", s)), } } @@ -40,7 +37,6 @@ impl fmt::Display for EthSpecId { let s = match self { EthSpecId::Mainnet => MAINNET, EthSpecId::Minimal => MINIMAL, - EthSpecId::V012Legacy => LEGACY, }; write!(f, "{}", s) } @@ -78,6 +74,10 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type MaxAttestations: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxDeposits: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxVoluntaryExits: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Altair + */ + type SyncCommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -182,8 +182,6 @@ macro_rules! params_from_eth_spec { } /// Ethereum Foundation specifications. -/// -/// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] pub struct MainnetEthSpec; @@ -205,6 +203,7 @@ impl EthSpec for MainnetEthSpec { type MaxAttestations = U128; type MaxDeposits = U16; type MaxVoluntaryExits = U16; + type SyncCommitteeSize = U512; type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -217,11 +216,7 @@ impl EthSpec for MainnetEthSpec { } } -pub type FoundationBeaconState = BeaconState; - /// Ethereum Foundation minimal spec, as defined in the eth2.0-specs repo. -/// -/// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] pub struct MinimalEthSpec; @@ -232,6 +227,7 @@ impl EthSpec for MinimalEthSpec { type SlotsPerHistoricalRoot = U64; type EpochsPerHistoricalVector = U64; type EpochsPerSlashingsVector = U64; + type SyncCommitteeSize = U32; type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch @@ -257,46 +253,3 @@ impl EthSpec for MinimalEthSpec { EthSpecId::Minimal } } - -pub type MinimalBeaconState = BeaconState; - -/// Suits the `v0.12.3` version of the eth2 spec: -/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/configs/mainnet/phase0.yaml -/// -/// This struct only needs to exist whilst we provide support for "legacy" testnets prior to v1.0.0 -/// (e.g., Medalla, Pyrmont, Spadina, Altona, etc.). -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Clone, PartialEq, Eq, Debug, Default, Serialize, Deserialize)] -pub struct V012LegacyEthSpec; - -impl EthSpec for V012LegacyEthSpec { - type EpochsPerEth1VotingPeriod = U32; - type SlotsPerEth1VotingPeriod = U1024; // 32 epochs * 32 slots per epoch - - params_from_eth_spec!(MainnetEthSpec { - SlotsPerEpoch, - SlotsPerHistoricalRoot, - EpochsPerHistoricalVector, - EpochsPerSlashingsVector, - MaxPendingAttestations, - JustificationBitsLength, - SubnetBitfieldLength, - MaxValidatorsPerCommittee, - GenesisEpoch, - HistoricalRootsLimit, - ValidatorRegistryLimit, - MaxProposerSlashings, - MaxAttesterSlashings, - MaxAttestations, - MaxDeposits, - MaxVoluntaryExits - }); - - fn default_spec() -> ChainSpec { - ChainSpec::v012_legacy() - } - - fn spec_name() -> EthSpecId { - EthSpecId::V012Legacy - } -} diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs new file mode 100644 index 00000000000..b6c939709ae --- /dev/null +++ b/consensus/types/src/fork_name.rs @@ -0,0 +1,47 @@ +use crate::{ChainSpec, Epoch}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum ForkName { + Base, + Altair, +} + +impl ForkName { + pub fn list_all() -> Vec { + vec![ForkName::Base, ForkName::Altair] + } + + /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` + /// is the only fork in effect from genesis. + pub fn make_genesis_spec(&self, mut spec: ChainSpec) -> ChainSpec { + // Assumes GENESIS_EPOCH = 0, which is safe because it's a constant. + match self { + ForkName::Base => { + spec.altair_fork_epoch = None; + spec + } + ForkName::Altair => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec + } + } + } +} + +impl std::str::FromStr for ForkName { + type Err = (); + + fn from_str(fork_name: &str) -> Result { + Ok(match fork_name { + "phase0" | "base" => ForkName::Base, + "altair" => ForkName::Altair, + _ => return Err(()), + }) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InconsistentFork { + pub fork_at_slot: ForkName, + pub object_fork: ForkName, +} diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index c30ec647be6..86e45699b06 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -74,14 +74,17 @@ impl<'de> Deserialize<'de> for GraffitiString { impl Into for GraffitiString { fn into(self) -> Graffiti { let graffiti_bytes = self.0.as_bytes(); - let mut graffiti = [0; 32]; + let mut graffiti = [0; GRAFFITI_BYTES_LEN]; - let graffiti_len = std::cmp::min(graffiti_bytes.len(), 32); + let graffiti_len = std::cmp::min(graffiti_bytes.len(), GRAFFITI_BYTES_LEN); // Copy the provided bytes over. // // Panic-free because `graffiti_bytes.len()` <= `GRAFFITI_BYTES_LEN`. - graffiti[..graffiti_len].copy_from_slice(&graffiti_bytes); + graffiti + .get_mut(..graffiti_len) + .expect("graffiti_len <= GRAFFITI_BYTES_LEN") + .copy_from_slice(&graffiti_bytes); graffiti.into() } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 5016b6f1305..b30ca63d8a5 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -3,8 +3,14 @@ // Required for big type-level numbers #![recursion_limit = "128"] // Clippy lint set up -#![deny(clippy::integer_arithmetic)] -#![deny(clippy::disallowed_method)] +#![cfg_attr( + not(test), + deny( + clippy::integer_arithmetic, + clippy::disallowed_method, + clippy::indexing_slicing + ) +)] #[macro_use] extern crate lazy_static; @@ -23,6 +29,7 @@ pub mod beacon_committee; pub mod beacon_state; pub mod chain_spec; pub mod checkpoint; +pub mod consts; pub mod deposit; pub mod deposit_data; pub mod deposit_message; @@ -31,6 +38,7 @@ pub mod eth1_data; pub mod eth_spec; pub mod fork; pub mod fork_data; +pub mod fork_name; pub mod free_attestation; pub mod graffiti; pub mod historical_batch; @@ -50,8 +58,13 @@ pub mod validator_subscription; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; +pub mod config_and_preset; +pub mod participation_flags; +pub mod preset; pub mod slot_epoch; pub mod subnet_id; +pub mod sync_aggregate; +pub mod sync_committee; mod tree_hash_impls; #[cfg(feature = "sqlite")] @@ -64,13 +77,19 @@ pub use crate::attestation::{Attestation, Error as AttestationError}; pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; -pub use crate::beacon_block::BeaconBlock; -pub use crate::beacon_block_body::BeaconBlockBody; +pub use crate::beacon_block::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockRef, BeaconBlockRefMut, +}; +pub use crate::beacon_block_body::{ + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyRef, + BeaconBlockBodyRefMut, +}; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; -pub use crate::chain_spec::{ChainSpec, Domain, YamlConfig}; +pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; +pub use crate::config_and_preset::ConfigAndPreset; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; @@ -79,22 +98,29 @@ pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::fork::Fork; pub use crate::fork_data::ForkData; +pub use crate::fork_name::{ForkName, InconsistentFork}; pub use crate::free_attestation::FreeAttestation; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; +pub use crate::participation_flags::ParticipationFlags; pub use crate::pending_attestation::PendingAttestation; +pub use crate::preset::{AltairPreset, BasePreset}; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; -pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash}; +pub use crate::signed_beacon_block::{ + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, +}; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_data::{SignedRoot, SigningData}; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::subnet_id::SubnetId; +pub use crate::sync_aggregate::SyncAggregate; +pub use crate::sync_committee::SyncCommittee; pub use crate::validator::Validator; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; @@ -105,6 +131,8 @@ pub type Address = H160; pub type ForkVersion = [u8; 4]; pub use bls::{ - AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, + AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, + Signature, SignatureBytes, }; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; +pub use superstruct::superstruct; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs new file mode 100644 index 00000000000..c0ccb6db2c5 --- /dev/null +++ b/consensus/types/src/participation_flags.rs @@ -0,0 +1,87 @@ +use crate::{consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom, Hash256}; +use safe_arith::{ArithError, SafeArith}; +use serde_derive::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use test_random_derive::TestRandom; +use tree_hash::{TreeHash, TreeHashType}; + +#[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] +#[serde(transparent)] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +pub struct ParticipationFlags { + bits: u8, +} + +impl ParticipationFlags { + pub fn add_flag(&mut self, flag_index: usize) -> Result<(), ArithError> { + if flag_index >= NUM_FLAG_INDICES { + return Err(ArithError::Overflow); + } + self.bits |= 1u8.safe_shl(flag_index as u32)?; + Ok(()) + } + + pub fn has_flag(&self, flag_index: usize) -> Result { + if flag_index >= NUM_FLAG_INDICES { + return Err(ArithError::Overflow); + } + let mask = 1u8.safe_shl(flag_index as u32)?; + Ok(self.bits & mask == mask) + } +} + +/// Decode implementation that transparently behaves like the inner `u8`. +impl Decode for ParticipationFlags { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + u8::from_ssz_bytes(bytes).map(|bits| Self { bits }) + } +} + +/// Encode implementation that transparently behaves like the inner `u8`. +impl Encode for ParticipationFlags { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.bits.ssz_append(buf); + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.bits.ssz_bytes_len() + } + + fn as_ssz_bytes(&self) -> Vec { + self.bits.as_ssz_bytes() + } +} + +impl TreeHash for ParticipationFlags { + fn tree_hash_type() -> TreeHashType { + u8::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.bits.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u8::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Hash256 { + self.bits.tree_hash_root() + } +} diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs new file mode 100644 index 00000000000..fcb550b81ca --- /dev/null +++ b/consensus/types/src/preset.rs @@ -0,0 +1,196 @@ +use crate::{ChainSpec, Epoch, EthSpec, Unsigned}; +use serde_derive::{Deserialize, Serialize}; + +/// Value-level representation of an Ethereum consensus "preset". +/// +/// This should only be used to check consistency of the compile-time constants +/// with a preset YAML file, or to make preset values available to the API. Prefer +/// the constants on `EthSpec` or the fields on `ChainSpec` to constructing and using +/// one of these structs. +/// +/// https://github.com/ethereum/eth2.0-specs/blob/dev/presets/mainnet/phase0.yaml +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct BasePreset { + #[serde(with = "serde_utils::quoted_u64")] + pub max_committees_per_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub target_committee_size: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_validators_per_committee: u64, + #[serde(with = "serde_utils::quoted_u8")] + pub shuffle_round_count: u8, + #[serde(with = "serde_utils::quoted_u64")] + pub hysteresis_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub hysteresis_downward_multiplier: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub hysteresis_upward_multiplier: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub safe_slots_to_update_justified: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub min_deposit_amount: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_effective_balance: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub effective_balance_increment: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub min_attestation_inclusion_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub slots_per_epoch: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub min_seed_lookahead: Epoch, + #[serde(with = "serde_utils::quoted_u64")] + pub max_seed_lookahead: Epoch, + #[serde(with = "serde_utils::quoted_u64")] + pub epochs_per_eth1_voting_period: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub slots_per_historical_root: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub min_epochs_to_inactivity_penalty: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub epochs_per_historical_vector: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub epochs_per_slashings_vector: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub historical_roots_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_registry_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub base_reward_factor: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub whistleblower_reward_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub proposer_reward_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub inactivity_penalty_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub min_slashing_penalty_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub proportional_slashing_multiplier: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_proposer_slashings: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_attester_slashings: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_attestations: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_deposits: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_voluntary_exits: u64, +} + +impl BasePreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + max_committees_per_slot: spec.max_committees_per_slot as u64, + target_committee_size: spec.target_committee_size as u64, + max_validators_per_committee: T::MaxValidatorsPerCommittee::to_u64(), + shuffle_round_count: spec.shuffle_round_count, + hysteresis_quotient: spec.hysteresis_quotient, + hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier, + hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier, + safe_slots_to_update_justified: spec.safe_slots_to_update_justified, + min_deposit_amount: spec.min_deposit_amount, + max_effective_balance: spec.max_effective_balance, + effective_balance_increment: spec.effective_balance_increment, + min_attestation_inclusion_delay: spec.min_attestation_inclusion_delay, + slots_per_epoch: T::SlotsPerEpoch::to_u64(), + min_seed_lookahead: spec.min_seed_lookahead, + max_seed_lookahead: spec.max_seed_lookahead, + epochs_per_eth1_voting_period: T::EpochsPerEth1VotingPeriod::to_u64(), + slots_per_historical_root: T::SlotsPerHistoricalRoot::to_u64(), + min_epochs_to_inactivity_penalty: spec.min_epochs_to_inactivity_penalty, + epochs_per_historical_vector: T::EpochsPerHistoricalVector::to_u64(), + epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_u64(), + historical_roots_limit: T::HistoricalRootsLimit::to_u64(), + validator_registry_limit: T::ValidatorRegistryLimit::to_u64(), + base_reward_factor: spec.base_reward_factor, + whistleblower_reward_quotient: spec.whistleblower_reward_quotient, + proposer_reward_quotient: spec.proposer_reward_quotient, + inactivity_penalty_quotient: spec.inactivity_penalty_quotient, + min_slashing_penalty_quotient: spec.min_slashing_penalty_quotient, + proportional_slashing_multiplier: spec.proportional_slashing_multiplier, + max_proposer_slashings: T::MaxProposerSlashings::to_u64(), + max_attester_slashings: T::MaxAttesterSlashings::to_u64(), + max_attestations: T::MaxAttestations::to_u64(), + max_deposits: T::MaxDeposits::to_u64(), + max_voluntary_exits: T::MaxVoluntaryExits::to_u64(), + } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct AltairPreset { + #[serde(with = "serde_utils::quoted_u64")] + pub inactivity_penalty_quotient_altair: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub min_slashing_penalty_quotient_altair: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub proportional_slashing_multiplier_altair: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub sync_committee_size: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub epochs_per_sync_committee_period: Epoch, + #[serde(with = "serde_utils::quoted_u64")] + pub min_sync_committee_participants: u64, +} + +impl AltairPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + inactivity_penalty_quotient_altair: spec.inactivity_penalty_quotient_altair, + min_slashing_penalty_quotient_altair: spec.min_slashing_penalty_quotient_altair, + proportional_slashing_multiplier_altair: spec.proportional_slashing_multiplier_altair, + sync_committee_size: T::SyncCommitteeSize::to_u64(), + epochs_per_sync_committee_period: spec.epochs_per_sync_committee_period, + min_sync_committee_participants: spec.min_sync_committee_participants, + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{MainnetEthSpec, MinimalEthSpec}; + use serde::de::DeserializeOwned; + use std::env; + use std::fs::File; + use std::path::PathBuf; + + fn presets_base_path() -> PathBuf { + env::var("CARGO_MANIFEST_DIR") + .expect("should know manifest dir") + .parse::() + .expect("should parse manifest dir as path") + .join("presets") + } + + fn preset_from_file(preset_name: &str, filename: &str) -> T { + let f = File::open(&presets_base_path().join(preset_name).join(filename)) + .expect("preset file exists"); + serde_yaml::from_reader(f).unwrap() + } + + fn preset_test() { + let preset_name = E::spec_name().to_string(); + let spec = E::default_spec(); + + let phase0: BasePreset = preset_from_file(&preset_name, "phase0.yaml"); + assert_eq!(phase0, BasePreset::from_chain_spec::(&spec)); + + let altair: AltairPreset = preset_from_file(&preset_name, "altair.yaml"); + assert_eq!(altair, AltairPreset::from_chain_spec::(&spec)); + } + + #[test] + fn mainnet_presets_consistent() { + preset_test::(); + } + + #[test] + fn minimal_presets_consistent() { + preset_test::(); + } +} diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index 508b261de8a..0a360b01554 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -49,8 +49,9 @@ impl SelectionProof { pub fn is_aggregator_from_modulo(&self, modulo: u64) -> Result { let signature_hash = hash(&self.0.as_ssz_bytes()); let signature_hash_int = u64::from_le_bytes( - signature_hash[0..8] - .as_ref() + signature_hash + .get(0..8) + .expect("hash is 32 bytes") .try_into() .expect("first 8 bytes of signature should always convert to fixed array"), ); diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index cd2e8507221..1a9e93b8e2d 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,12 +1,9 @@ -use crate::{ - test_utils::TestRandom, BeaconBlock, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, - SignedBeaconBlockHeader, SignedRoot, SigningData, Slot, -}; +use crate::*; use bls::Signature; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; -use test_random_derive::TestRandom; +use superstruct::superstruct; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; @@ -39,17 +36,116 @@ impl From for Hash256 { } /// A `BeaconBlock` and a signature from its proposer. -/// -/// Spec v0.12.1 -#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[superstruct( + variants(Base, Altair), + variant_attributes( + derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash + ), + cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), + serde(bound = "E: EthSpec") + ) +)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[serde(untagged)] #[serde(bound = "E: EthSpec")] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct SignedBeaconBlock { - pub message: BeaconBlock, + #[superstruct(only(Base), partial_getter(rename = "message_base"))] + pub message: BeaconBlockBase, + #[superstruct(only(Altair), partial_getter(rename = "message_altair"))] + pub message: BeaconBlockAltair, pub signature: Signature, } impl SignedBeaconBlock { + /// Returns the name of the fork pertaining to `self`. + /// + /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork + /// dictated by `self.slot()`. + pub fn fork_name(&self, spec: &ChainSpec) -> Result { + let fork_at_slot = spec.fork_name_at_slot::(self.slot()); + let object_fork = match self { + SignedBeaconBlock::Base { .. } => ForkName::Base, + SignedBeaconBlock::Altair { .. } => ForkName::Altair, + }; + + if fork_at_slot == object_fork { + Ok(object_fork) + } else { + Err(InconsistentFork { + fork_at_slot, + object_fork, + }) + } + } + + /// SSZ decode. + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + // We need to use the slot-switching `from_ssz_bytes` of `BeaconBlock`, which doesn't + // compose with the other SSZ utils, so we duplicate some parts of `ssz_derive` here. + let mut builder = ssz::SszDecoderBuilder::new(bytes); + + builder.register_anonymous_variable_length_item()?; + builder.register_type::()?; + + let mut decoder = builder.build()?; + + // Read the first item as a `BeaconBlock`. + let message = decoder.decode_next_with(|bytes| BeaconBlock::from_ssz_bytes(bytes, spec))?; + let signature = decoder.decode_next()?; + + Ok(Self::from_block(message, signature)) + } + + /// Create a new `SignedBeaconBlock` from a `BeaconBlock` and `Signature`. + pub fn from_block(block: BeaconBlock, signature: Signature) -> Self { + match block { + BeaconBlock::Base(message) => { + SignedBeaconBlock::Base(SignedBeaconBlockBase { message, signature }) + } + BeaconBlock::Altair(message) => { + SignedBeaconBlock::Altair(SignedBeaconBlockAltair { message, signature }) + } + } + } + + /// Deconstruct the `SignedBeaconBlock` into a `BeaconBlock` and `Signature`. + /// + /// This is necessary to get a `&BeaconBlock` from a `SignedBeaconBlock` because + /// `SignedBeaconBlock` only contains a `BeaconBlock` _variant_. + pub fn deconstruct(self) -> (BeaconBlock, Signature) { + match self { + SignedBeaconBlock::Base(block) => (BeaconBlock::Base(block.message), block.signature), + SignedBeaconBlock::Altair(block) => { + (BeaconBlock::Altair(block.message), block.signature) + } + } + } + + /// Accessor for the block's `message` field as a ref. + pub fn message(&self) -> BeaconBlockRef<'_, E> { + match self { + SignedBeaconBlock::Base(inner) => BeaconBlockRef::Base(&inner.message), + SignedBeaconBlock::Altair(inner) => BeaconBlockRef::Altair(&inner.message), + } + } + + /// Accessor for the block's `message` as a mutable reference (for testing only). + pub fn message_mut(&mut self) -> BeaconBlockRefMut<'_, E> { + match self { + SignedBeaconBlock::Base(inner) => BeaconBlockRefMut::Base(&mut inner.message), + SignedBeaconBlock::Altair(inner) => BeaconBlockRefMut::Altair(&mut inner.message), + } + } + /// Verify `self.signature`. /// /// If the root of `block.message` is already known it can be passed in via `object_root_opt`. @@ -62,8 +158,14 @@ impl SignedBeaconBlock { genesis_validators_root: Hash256, spec: &ChainSpec, ) -> bool { + // Refuse to verify the signature of a block if its structure does not match the fork at + // `self.slot()`. + if self.fork_name(spec).is_err() { + return false; + } + let domain = spec.get_domain( - self.message.slot.epoch(E::slots_per_epoch()), + self.slot().epoch(E::slots_per_epoch()), Domain::BeaconProposer, fork, genesis_validators_root, @@ -76,47 +178,37 @@ impl SignedBeaconBlock { } .tree_hash_root() } else { - self.message.signing_root(domain) + self.message().signing_root(domain) }; - self.signature.verify(pubkey, message) + self.signature().verify(pubkey, message) } /// Produce a signed beacon block header corresponding to this block. pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { SignedBeaconBlockHeader { - message: self.message.block_header(), - signature: self.signature.clone(), + message: self.message().block_header(), + signature: self.signature().clone(), } } /// Convenience accessor for the block's slot. pub fn slot(&self) -> Slot { - self.message.slot + self.message().slot() } /// Convenience accessor for the block's parent root. pub fn parent_root(&self) -> Hash256 { - self.message.parent_root + self.message().parent_root() } /// Convenience accessor for the block's state root. pub fn state_root(&self) -> Hash256 { - self.message.state_root + self.message().state_root() } /// Returns the `tree_hash_root` of the block. - /// - /// Spec v0.12.1 pub fn canonical_root(&self) -> Hash256 { - Hash256::from_slice(&self.message.tree_hash_root()[..]) + self.message().tree_hash_root() } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::MainnetEthSpec; - - ssz_tests!(SignedBeaconBlock); -} diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs new file mode 100644 index 00000000000..85547e1b0ec --- /dev/null +++ b/consensus/types/src/sync_aggregate.rs @@ -0,0 +1,36 @@ +use crate::test_utils::TestRandom; +use crate::{AggregateSignature, BitVector, EthSpec}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct SyncAggregate { + pub sync_committee_bits: BitVector, + pub sync_committee_signature: AggregateSignature, +} + +impl SyncAggregate { + /// New aggregate to be used as the seed for aggregating other signatures. + #[allow(clippy::new_without_default)] + pub fn new() -> Self { + Self { + sync_committee_bits: BitVector::default(), + sync_committee_signature: AggregateSignature::infinity(), + } + } + + /// Empty aggregate to be used at genesis. + /// + /// Contains an empty signature and should *not* be used as the starting point for aggregation, + /// use `new` instead. + pub fn empty() -> Self { + Self { + sync_committee_bits: BitVector::default(), + sync_committee_signature: AggregateSignature::empty(), + } + } +} diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs new file mode 100644 index 00000000000..085f0bc04fe --- /dev/null +++ b/consensus/types/src/sync_committee.rs @@ -0,0 +1,29 @@ +use crate::test_utils::TestRandom; +use crate::typenum::Unsigned; +use crate::{EthSpec, FixedVector}; +use bls::PublicKeyBytes; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[serde(bound = "T: EthSpec")] +pub struct SyncCommittee { + pub pubkeys: FixedVector, + pub aggregate_pubkey: PublicKeyBytes, +} + +impl SyncCommittee { + /// Create a temporary sync committee that should *never* be included in a legitimate consensus object. + pub fn temporary() -> Result { + Ok(Self { + pubkeys: FixedVector::new(vec![ + PublicKeyBytes::empty(); + T::SyncCommitteeSize::to_usize() + ])?, + aggregate_pubkey: PublicKeyBytes::empty(), + }) + } +} diff --git a/consensus/types/src/test_utils/builders.rs b/consensus/types/src/test_utils/builders.rs deleted file mode 100644 index 5bbe7b76949..00000000000 --- a/consensus/types/src/test_utils/builders.rs +++ /dev/null @@ -1,19 +0,0 @@ -mod testing_attestation_builder; -mod testing_attestation_data_builder; -mod testing_attester_slashing_builder; -mod testing_beacon_block_builder; -mod testing_beacon_state_builder; -mod testing_deposit_builder; -mod testing_pending_attestation_builder; -mod testing_proposer_slashing_builder; -mod testing_voluntary_exit_builder; - -pub use testing_attestation_builder::*; -pub use testing_attestation_data_builder::*; -pub use testing_attester_slashing_builder::*; -pub use testing_beacon_block_builder::*; -pub use testing_beacon_state_builder::*; -pub use testing_deposit_builder::*; -pub use testing_pending_attestation_builder::*; -pub use testing_proposer_slashing_builder::*; -pub use testing_voluntary_exit_builder::*; diff --git a/consensus/types/src/test_utils/builders/testing_attestation_builder.rs b/consensus/types/src/test_utils/builders/testing_attestation_builder.rs deleted file mode 100644 index a2e5f5f536a..00000000000 --- a/consensus/types/src/test_utils/builders/testing_attestation_builder.rs +++ /dev/null @@ -1,112 +0,0 @@ -use crate::test_utils::{AttestationTestTask, TestingAttestationDataBuilder}; -use crate::*; - -/// Builds an attestation to be used for testing purposes. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingAttestationBuilder { - committee: Vec, - attestation: Attestation, -} - -impl TestingAttestationBuilder { - /// Create a new attestation builder. - pub fn new( - test_task: AttestationTestTask, - state: &BeaconState, - committee: &[usize], - slot: Slot, - index: u64, - spec: &ChainSpec, - ) -> Self { - let data_builder = TestingAttestationDataBuilder::new(test_task, state, index, slot, spec); - - let mut aggregation_bits_len = committee.len(); - - if test_task == AttestationTestTask::BadAggregationBitfieldLen { - aggregation_bits_len += 1 - } - - let mut aggregation_bits = BitList::with_capacity(aggregation_bits_len).unwrap(); - - for i in 0..committee.len() { - aggregation_bits.set(i, false).unwrap(); - } - - let attestation = Attestation { - aggregation_bits, - data: data_builder.build(), - signature: AggregateSignature::empty(), - }; - - Self { - attestation, - committee: committee.to_vec(), - } - } - - /// Signs the attestation with a subset (or all) committee members. - /// - /// `secret_keys` must be supplied in the same order as `signing_validators`. I.e., the first - /// keypair must be that of the first signing validator. - pub fn sign( - &mut self, - test_task: AttestationTestTask, - signing_validators: &[usize], - secret_keys: &[&SecretKey], - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> &mut Self { - assert_eq!( - signing_validators.len(), - secret_keys.len(), - "Must be a key for each validator" - ); - - for (key_index, validator_index) in signing_validators.iter().enumerate() { - let committee_index = self - .committee - .iter() - .position(|v| *v == *validator_index) - .expect("Signing validator not in attestation committee"); - - let index = if test_task == AttestationTestTask::BadSignature { - 0 - } else { - key_index - }; - - self.attestation - .sign( - secret_keys[index], - committee_index, - fork, - genesis_validators_root, - spec, - ) - .expect("can sign attestation"); - - self.attestation - .aggregation_bits - .set(committee_index, true) - .unwrap(); - } - - if test_task == AttestationTestTask::BadIndexedAttestationBadSignature { - // Flip an aggregation bit, to make the aggregate invalid - // (We also want to avoid making it completely empty) - self.attestation - .aggregation_bits - .set(0, !self.attestation.aggregation_bits.get(0).unwrap()) - .unwrap(); - } - - self - } - - /// Consume the builder and return the attestation. - pub fn build(self) -> Attestation { - self.attestation - } -} diff --git a/consensus/types/src/test_utils/builders/testing_attestation_data_builder.rs b/consensus/types/src/test_utils/builders/testing_attestation_data_builder.rs deleted file mode 100644 index a704374eee0..00000000000 --- a/consensus/types/src/test_utils/builders/testing_attestation_data_builder.rs +++ /dev/null @@ -1,98 +0,0 @@ -use crate::test_utils::AttestationTestTask; -use crate::*; -use safe_arith::SafeArith; - -/// Builds an `AttestationData` to be used for testing purposes. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingAttestationDataBuilder { - data: AttestationData, -} - -impl TestingAttestationDataBuilder { - /// Configures a new `AttestationData` which attests to all of the same parameters as the - /// state. - pub fn new( - test_task: AttestationTestTask, - state: &BeaconState, - index: u64, - mut slot: Slot, - spec: &ChainSpec, - ) -> Self { - let current_epoch = state.current_epoch(); - let previous_epoch = state.previous_epoch(); - - let is_previous_epoch = slot.epoch(T::slots_per_epoch()) != current_epoch; - - let mut source = if is_previous_epoch { - state.previous_justified_checkpoint - } else { - state.current_justified_checkpoint - }; - - let mut target = if is_previous_epoch { - Checkpoint { - epoch: previous_epoch, - root: *state - .get_block_root(previous_epoch.start_slot(T::slots_per_epoch())) - .unwrap(), - } - } else { - Checkpoint { - epoch: current_epoch, - root: *state - .get_block_root(current_epoch.start_slot(T::slots_per_epoch())) - .unwrap(), - } - }; - - let beacon_block_root = *state.get_block_root(slot).unwrap(); - - match test_task { - AttestationTestTask::IncludedTooEarly => { - slot = state - .slot - .safe_sub(spec.min_attestation_inclusion_delay) - .unwrap() - .safe_add(1u64) - .unwrap(); - } - AttestationTestTask::IncludedTooLate => slot - .safe_sub_assign(Slot::new(T::SlotsPerEpoch::to_u64())) - .unwrap(), - AttestationTestTask::TargetEpochSlotMismatch => { - target = Checkpoint { - epoch: current_epoch.safe_add(1u64).unwrap(), - root: Hash256::zero(), - }; - assert_ne!(target.epoch, slot.epoch(T::slots_per_epoch())); - } - AttestationTestTask::WrongJustifiedCheckpoint => { - source = Checkpoint { - epoch: Epoch::from(0_u64), - root: Hash256::zero(), - } - } - _ => (), - } - - let data = AttestationData { - slot, - index, - - // LMD GHOST vote - beacon_block_root, - - // FFG Vote - source, - target, - }; - - Self { data } - } - - /// Returns the `AttestationData`, consuming the builder. - pub fn build(self) -> AttestationData { - self.data - } -} diff --git a/consensus/types/src/test_utils/builders/testing_attester_slashing_builder.rs b/consensus/types/src/test_utils/builders/testing_attester_slashing_builder.rs deleted file mode 100644 index c9c358998cf..00000000000 --- a/consensus/types/src/test_utils/builders/testing_attester_slashing_builder.rs +++ /dev/null @@ -1,134 +0,0 @@ -use crate::test_utils::AttesterSlashingTestTask; -use crate::*; - -/// Builds an `AttesterSlashing`. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingAttesterSlashingBuilder(); - -impl TestingAttesterSlashingBuilder { - /// Builds an `AttesterSlashing` that is a double vote. - /// - /// The `signer` function is used to sign the double-vote and accepts: - /// - /// - `validator_index: u64` - /// - `message: &[u8]` - pub fn double_vote( - test_task: AttesterSlashingTestTask, - validator_indices: &[u64], - signer: F, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> AttesterSlashing - where - F: Fn(u64, &[u8]) -> Signature, - { - TestingAttesterSlashingBuilder::double_vote_with_additional_indices( - test_task, - validator_indices, - None, - signer, - fork, - genesis_validators_root, - spec, - ) - } - - pub fn double_vote_with_additional_indices( - test_task: AttesterSlashingTestTask, - validator_indices: &[u64], - additional_validator_indices: Option<&[u64]>, - signer: F, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> AttesterSlashing - where - F: Fn(u64, &[u8]) -> Signature, - { - let slot = Slot::new(1); - let index = 0; - let epoch_1 = Epoch::new(1); - let hash_1 = Hash256::from_low_u64_le(1); - let hash_2 = Hash256::from_low_u64_le(2); - let checkpoint_1 = Checkpoint { - epoch: epoch_1, - root: hash_1, - }; - let checkpoint_2 = Checkpoint { - epoch: epoch_1, - root: hash_2, - }; - - let data_1 = AttestationData { - slot, - index, - beacon_block_root: hash_1, - source: checkpoint_1, - target: checkpoint_1, - }; - - let data_2 = if test_task == AttesterSlashingTestTask::NotSlashable { - data_1.clone() - } else { - AttestationData { - target: checkpoint_2, - ..data_1 - } - }; - - let mut attestation_1 = IndexedAttestation { - attesting_indices: if test_task == AttesterSlashingTestTask::IndexedAttestation1Invalid - { - // Trigger bad validator indices ordering error. - vec![1, 0].into() - } else { - validator_indices.to_vec().into() - }, - data: data_1, - signature: AggregateSignature::empty(), - }; - - let mut attestation_2 = IndexedAttestation { - attesting_indices: if test_task == AttesterSlashingTestTask::IndexedAttestation2Invalid - { - // Trigger bad validator indices ordering error. - vec![1, 0].into() - } else { - match additional_validator_indices { - Some(x) => x.to_vec().into(), - None => validator_indices.to_vec().into(), - } - }, - data: data_2, - signature: AggregateSignature::empty(), - }; - - let add_signatures = |attestation: &mut IndexedAttestation, indices_to_sign: &[u64]| { - let domain = spec.get_domain( - attestation.data.target.epoch, - Domain::BeaconAttester, - fork, - genesis_validators_root, - ); - let message = attestation.data.signing_root(domain); - - for validator_index in indices_to_sign { - let signature = signer(*validator_index, message.as_bytes()); - attestation.signature.add_assign(&signature); - } - }; - - add_signatures(&mut attestation_1, validator_indices); - add_signatures( - &mut attestation_2, - additional_validator_indices.unwrap_or(validator_indices), - ); - - AttesterSlashing { - attestation_1, - attestation_2, - } - } -} diff --git a/consensus/types/src/test_utils/builders/testing_beacon_block_builder.rs b/consensus/types/src/test_utils/builders/testing_beacon_block_builder.rs deleted file mode 100644 index 97fe62780d8..00000000000 --- a/consensus/types/src/test_utils/builders/testing_beacon_block_builder.rs +++ /dev/null @@ -1,428 +0,0 @@ -use crate::{ - test_utils::{ - TestingAttestationBuilder, TestingAttesterSlashingBuilder, TestingDepositBuilder, - TestingProposerSlashingBuilder, TestingVoluntaryExitBuilder, - }, - typenum::U4294967296, - *, -}; -use int_to_bytes::int_to_bytes32; -use merkle_proof::MerkleTree; -use rayon::prelude::*; -use safe_arith::SafeArith; -use tree_hash::TreeHash; - -/// Builds a beacon block to be used for testing purposes. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingBeaconBlockBuilder { - pub block: BeaconBlock, -} - -/// Enum used for passing test options to builder -#[derive(PartialEq, Clone, Copy)] -pub enum DepositTestTask { - Valid, - BadPubKey, - BadSig, - InvalidPubKey, - NoReset, -} - -/// Enum used for passing test options to builder -#[derive(PartialEq, Clone, Copy)] -pub enum AttestationTestTask { - Valid, - WrongJustifiedCheckpoint, - BadIndexedAttestationBadSignature, - BadAggregationBitfieldLen, - BadSignature, - ValidatorUnknown, - IncludedTooEarly, - IncludedTooLate, - TargetEpochSlotMismatch, - // Note: BadTargetEpoch is unreachable in block processing due to valid inclusion window and - // slot check -} - -/// Enum used for passing test options to builder -#[derive(PartialEq, Clone, Copy)] -pub enum AttesterSlashingTestTask { - Valid, - NotSlashable, - IndexedAttestation1Invalid, - IndexedAttestation2Invalid, -} - -/// Enum used for passing test options to builder -#[derive(PartialEq, Clone, Copy)] -pub enum ProposerSlashingTestTask { - Valid, - ProposerUnknown, - ProposalEpochMismatch, - ProposalsIdentical, - ProposerNotSlashable, - BadProposal1Signature, - BadProposal2Signature, -} - -impl TestingBeaconBlockBuilder { - /// Create a new builder from genesis. - pub fn new(spec: &ChainSpec) -> Self { - Self { - block: BeaconBlock::empty(spec), - } - } - - /// Set the previous block root - pub fn set_parent_root(&mut self, root: Hash256) { - self.block.parent_root = root; - } - - /// Set the slot of the block. - pub fn set_slot(&mut self, slot: Slot) { - self.block.slot = slot; - } - - /// Set the proposer index of the block. - pub fn set_proposer_index(&mut self, proposer_index: u64) { - self.block.proposer_index = proposer_index; - } - - /// Sets the randao to be a signature across the blocks epoch. - /// - /// Modifying the block's slot after signing may invalidate the signature. - pub fn set_randao_reveal( - &mut self, - sk: &SecretKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) { - let epoch = self.block.slot.epoch(T::slots_per_epoch()); - let domain = spec.get_domain(epoch, Domain::Randao, fork, genesis_validators_root); - let message = epoch.signing_root(domain); - self.block.body.randao_reveal = sk.sign(message); - } - - /// Has the randao reveal been set? - pub fn randao_reveal_not_set(&mut self) -> bool { - self.block.body.randao_reveal.is_empty() - } - - /// Inserts a signed, valid `ProposerSlashing` for the validator. - pub fn insert_proposer_slashing( - &mut self, - test_task: ProposerSlashingTestTask, - validator_index: u64, - secret_key: &SecretKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) { - let proposer_slashing = build_proposer_slashing::( - test_task, - validator_index, - secret_key, - fork, - genesis_validators_root, - spec, - ); - self.block - .body - .proposer_slashings - .push(proposer_slashing) - .unwrap(); - } - - /// Inserts a signed, valid `AttesterSlashing` for each validator index in `validator_indices`. - pub fn insert_attester_slashing( - &mut self, - test_task: AttesterSlashingTestTask, - validator_indices: &[u64], - secret_keys: &[&SecretKey], - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) { - let attester_slashing = build_double_vote_attester_slashing( - test_task, - validator_indices, - secret_keys, - fork, - genesis_validators_root, - spec, - ); - let _ = self.block.body.attester_slashings.push(attester_slashing); - } - - /// Fills the block with `num_attestations` attestations. - /// - /// It will first go and get each committee that is able to include an attestation in this - /// block. If there _are_ enough committees, it will produce an attestation for each. If there - /// _are not_ enough committees, it will start splitting the committees in half until it - /// achieves the target. It will then produce separate attestations for each split committee. - /// - /// Note: the signed messages of the split committees will be identical -- it would be possible - /// to aggregate these split attestations. - pub fn insert_attestations( - &mut self, - test_task: AttestationTestTask, - state: &BeaconState, - secret_keys: &[&SecretKey], - num_attestations: usize, - spec: &ChainSpec, - ) -> Result<(), BeaconStateError> { - let mut slot = self - .block - .slot - .safe_sub(spec.min_attestation_inclusion_delay)?; - let mut attestations_added = 0; - - // Stores the following (in order): - // - // - The slot of the committee. - // - A list of all validators in the committee. - // - A list of all validators in the committee that should sign the attestation. - // - The index of the committee. - let mut committees: Vec<(Slot, Vec, Vec, u64)> = vec![]; - - if slot < T::slots_per_epoch() { - panic!("slot is too low, will get stuck in loop") - } - - // Loop backwards through slots gathering each committee, until: - // - // - The slot is too old to be included in a block at this slot. - // - The `MAX_ATTESTATIONS`. - loop { - if state.slot >= slot.safe_add(T::slots_per_epoch())? { - break; - } - - for beacon_committee in state.get_beacon_committees_at_slot(slot)? { - if attestations_added >= num_attestations { - break; - } - - committees.push(( - slot, - beacon_committee.committee.to_vec(), - beacon_committee.committee.to_vec(), - beacon_committee.index, - )); - - attestations_added += 1; - } - - slot.safe_sub_assign(1u64)?; - } - - // Loop through all the committees, splitting each one in half until we have - // `MAX_ATTESTATIONS` committees. - loop { - if committees.len() >= num_attestations as usize { - break; - } - - for i in 0..committees.len() { - if committees.len() >= num_attestations as usize { - break; - } - - let (slot, committee, mut signing_validators, index) = committees[i].clone(); - - let new_signing_validators = - signing_validators.split_off(signing_validators.len() / 2); - - committees[i] = (slot, committee.clone(), signing_validators, index); - committees.push((slot, committee, new_signing_validators, index)); - } - } - - let attestations: Vec<_> = committees - .par_iter() - .map(|(slot, committee, signing_validators, index)| { - let mut builder = TestingAttestationBuilder::new( - test_task, state, committee, *slot, *index, spec, - ); - - let signing_secret_keys: Vec<&SecretKey> = signing_validators - .iter() - .map(|validator_index| secret_keys[*validator_index]) - .collect(); - builder.sign( - test_task, - signing_validators, - &signing_secret_keys, - &state.fork, - state.genesis_validators_root, - spec, - ); - - builder.build() - }) - .collect(); - - for attestation in attestations { - self.block.body.attestations.push(attestation).unwrap(); - } - - Ok(()) - } - - /// Insert a `Valid` deposit into the state. - pub fn insert_deposits( - &mut self, - amount: u64, - test_task: DepositTestTask, - // TODO: deal with the fact deposits no longer have explicit indices - _index: u64, - num_deposits: u64, - state: &mut BeaconState, - spec: &ChainSpec, - ) { - // Vector containing deposits' data - let mut datas = vec![]; - for _ in 0..num_deposits { - let keypair = Keypair::random(); - - let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount); - builder.sign(test_task, &keypair, spec); - datas.push(builder.build().data); - } - - // Vector containing all leaves - let leaves = datas - .iter() - .map(|data| data.tree_hash_root()) - .collect::>(); - - // Building a VarList from leaves - let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone()); - - // Setting the deposit_root to be the tree_hash_root of the VarList - state.eth1_data.deposit_root = deposit_data_list.tree_hash_root(); - - // Building the merkle tree used for generating proofs - let tree = MerkleTree::create(&leaves[..], spec.deposit_contract_tree_depth as usize); - - // Building proofs - let mut proofs = vec![]; - for i in 0..leaves.len() { - let (_, mut proof) = tree.generate_proof(i, spec.deposit_contract_tree_depth as usize); - proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64))); - proofs.push(proof); - } - - // Building deposits - let deposits = datas - .into_par_iter() - .zip(proofs.into_par_iter()) - .map(|(data, proof)| (data, proof.into())) - .map(|(data, proof)| Deposit { proof, data }) - .collect::>(); - - // Pushing deposits to block body - for deposit in deposits { - let _ = self.block.body.deposits.push(deposit); - } - - // Manually setting the deposit_count to process deposits - // This is for test purposes only - if test_task == DepositTestTask::NoReset { - state.eth1_data.deposit_count += num_deposits; - } else { - state.eth1_deposit_index = 0; - state.eth1_data.deposit_count = num_deposits; - } - } - - /// Insert an exit for the given validator at the given epoch into the block. - pub fn insert_exit( - &mut self, - validator_index: u64, - exit_epoch: Epoch, - secret_key: &SecretKey, - state: &BeaconState, - spec: &ChainSpec, - ) { - let builder = TestingVoluntaryExitBuilder::new(exit_epoch, validator_index); - let exit = builder.build(secret_key, &state.fork, state.genesis_validators_root, spec); - self.block.body.voluntary_exits.push(exit).unwrap(); - } - - /// Mutate the block before signing. - pub fn modify(&mut self, f: impl FnOnce(&mut BeaconBlock)) { - f(&mut self.block) - } - - /// Signs and returns the block, consuming the builder. - pub fn build( - self, - sk: &SecretKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> SignedBeaconBlock { - self.block.sign(sk, fork, genesis_validators_root, spec) - } - - /// Returns the block, consuming the builder. - pub fn build_without_signing(self) -> SignedBeaconBlock { - SignedBeaconBlock { - message: self.block, - signature: Signature::empty(), - } - } -} - -/// Builds an `ProposerSlashing` for some `validator_index`. -/// -/// Signs the message using a `BeaconChainHarness`. -pub fn build_proposer_slashing( - test_task: ProposerSlashingTestTask, - validator_index: u64, - secret_key: &SecretKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, -) -> ProposerSlashing { - TestingProposerSlashingBuilder::double_vote::( - test_task, - validator_index, - secret_key, - fork, - genesis_validators_root, - spec, - ) -} - -/// Builds an `AttesterSlashing` for some `validator_indices`. -/// -/// Signs the message using a `BeaconChainHarness`. -pub fn build_double_vote_attester_slashing( - test_task: AttesterSlashingTestTask, - validator_indices: &[u64], - secret_keys: &[&SecretKey], - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, -) -> AttesterSlashing { - let signer = |validator_index: u64, message: &[u8]| { - let key_index = validator_indices - .iter() - .position(|&i| i == validator_index) - .expect("Unable to find attester slashing key"); - secret_keys[key_index].sign(Hash256::from_slice(message)) - }; - - TestingAttesterSlashingBuilder::double_vote( - test_task, - validator_indices, - signer, - fork, - genesis_validators_root, - spec, - ) -} diff --git a/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs b/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs deleted file mode 100644 index 922d4017fea..00000000000 --- a/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ /dev/null @@ -1,194 +0,0 @@ -use super::super::generate_deterministic_keypairs; -use crate::test_utils::{AttestationTestTask, TestingPendingAttestationBuilder}; -use crate::*; -use bls::get_withdrawal_credentials; -use log::debug; -use rayon::prelude::*; - -pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; - -/// Builds a beacon state to be used for testing purposes. -/// -/// This struct should **never be used for production purposes.** -#[derive(Clone)] -pub struct TestingBeaconStateBuilder { - state: BeaconState, - keypairs: Vec, -} - -impl TestingBeaconStateBuilder { - /// Generates the validator keypairs deterministically. - pub fn from_deterministic_keypairs(validator_count: usize, spec: &ChainSpec) -> Self { - debug!("Generating {} deterministic keypairs...", validator_count); - let keypairs = generate_deterministic_keypairs(validator_count); - TestingBeaconStateBuilder::from_keypairs(keypairs, spec) - } - - /// Uses the given keypair for all validators. - pub fn from_single_keypair( - validator_count: usize, - keypair: &Keypair, - spec: &ChainSpec, - ) -> Self { - debug!("Generating {} cloned keypairs...", validator_count); - - let mut keypairs = Vec::with_capacity(validator_count); - for _ in 0..validator_count { - keypairs.push(keypair.clone()) - } - - TestingBeaconStateBuilder::from_keypairs(keypairs, spec) - } - - /// Creates the builder from an existing set of keypairs. - pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { - let validator_count = keypairs.len(); - let starting_balance = spec.max_effective_balance; - - debug!( - "Building {} Validator objects from keypairs...", - validator_count - ); - let validators = keypairs - .par_iter() - .map(|keypair| { - let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( - &keypair.pk, - spec.bls_withdrawal_prefix_byte, - )); - - Validator { - pubkey: keypair.pk.clone().into(), - withdrawal_credentials, - // All validators start active. - activation_eligibility_epoch: T::genesis_epoch(), - activation_epoch: T::genesis_epoch(), - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - slashed: false, - effective_balance: starting_balance, - } - }) - .collect::>() - .into(); - - let genesis_time = 1_567_052_589; // 29 August, 2019; - - let mut state = BeaconState::new( - genesis_time, - Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }, - spec, - ); - - state.eth1_data.deposit_count = validator_count as u64; - state.eth1_deposit_index = validator_count as u64; - - let balances = vec![starting_balance; validator_count].into(); - - debug!("Importing {} existing validators...", validator_count); - state.validators = validators; - state.balances = balances; - - debug!("BeaconState initialized."); - - Self { state, keypairs } - } - - /// Consume the builder and return the `BeaconState` and the keypairs for each validator. - pub fn build(self) -> (BeaconState, Vec) { - (self.state, self.keypairs) - } - - /// Ensures that the state returned from `Self::build(..)` has all caches pre-built. - /// - /// Note: this performs the build when called. Ensure that no changes are made that would - /// invalidate this cache. - pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { - self.state.build_all_caches(spec).unwrap(); - - Ok(()) - } - - /// Sets the `BeaconState` to be in a slot, calling `teleport_to_epoch` to update the epoch. - pub fn teleport_to_slot(&mut self, slot: Slot) -> &mut Self { - self.teleport_to_epoch(slot.epoch(T::slots_per_epoch())); - self.state.slot = slot; - self - } - - /// Sets the `BeaconState` to be in the first slot of the given epoch. - /// - /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., - /// highest justified and finalized slots, full justification bitfield, etc). - fn teleport_to_epoch(&mut self, epoch: Epoch) { - let state = &mut self.state; - - let slot = epoch.start_slot(T::slots_per_epoch()); - - state.slot = slot; - - state.previous_justified_checkpoint.epoch = epoch.saturating_sub(3u64); - state.current_justified_checkpoint.epoch = epoch.saturating_sub(2u64); - state.justification_bits = BitVector::from_bytes(vec![0b0000_1111]).unwrap(); - - state.finalized_checkpoint.epoch = state.previous_justified_checkpoint.epoch; - } - - /// Creates a full set of attestations for the `BeaconState`. Each attestation has full - /// participation from its committee and references the expected beacon_block hashes. - /// - /// These attestations should be fully conducive to justification and finalization. - pub fn insert_attestations(&mut self, spec: &ChainSpec) { - let state = &mut self.state; - - state - .build_committee_cache(RelativeEpoch::Previous, spec) - .unwrap(); - state - .build_committee_cache(RelativeEpoch::Current, spec) - .unwrap(); - - let current_epoch = state.current_epoch(); - let previous_epoch = state.previous_epoch(); - - let first_slot = previous_epoch.start_slot(T::slots_per_epoch()).as_u64(); - let last_slot = current_epoch.end_slot(T::slots_per_epoch()).as_u64() - - spec.min_attestation_inclusion_delay; - let last_slot = std::cmp::min(state.slot.as_u64(), last_slot); - - for slot in first_slot..=last_slot { - let slot = Slot::from(slot); - - let committees: Vec = state - .get_beacon_committees_at_slot(slot) - .unwrap() - .into_iter() - .map(|c| c.clone().into_owned()) - .collect(); - - for beacon_committee in committees { - let mut builder = TestingPendingAttestationBuilder::new( - AttestationTestTask::Valid, - state, - beacon_committee.index, - slot, - spec, - ); - // The entire committee should have signed the pending attestation. - let signers = vec![true; beacon_committee.committee.len()]; - builder.add_committee_participation(signers); - let attestation = builder.build(); - - if attestation.data.target.epoch < state.current_epoch() { - state.previous_epoch_attestations.push(attestation).unwrap() - } else { - state.current_epoch_attestations.push(attestation).unwrap() - } - } - } - } -} diff --git a/consensus/types/src/test_utils/builders/testing_deposit_builder.rs b/consensus/types/src/test_utils/builders/testing_deposit_builder.rs deleted file mode 100644 index 2ece83f7f9d..00000000000 --- a/consensus/types/src/test_utils/builders/testing_deposit_builder.rs +++ /dev/null @@ -1,64 +0,0 @@ -use crate::test_utils::DepositTestTask; -use crate::*; -use bls::{get_withdrawal_credentials, PublicKeyBytes, SignatureBytes}; - -/// Builds an deposit to be used for testing purposes. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingDepositBuilder { - deposit: Deposit, -} - -impl TestingDepositBuilder { - /// Instantiates a new builder. - pub fn new(pubkey: PublicKey, amount: u64) -> Self { - let deposit = Deposit { - proof: vec![].into(), - data: DepositData { - pubkey: PublicKeyBytes::from(pubkey), - withdrawal_credentials: Hash256::zero(), - amount, - signature: SignatureBytes::empty(), - }, - }; - - Self { deposit } - } - - /// Signs the deposit, also setting the following values: - /// - /// - `pubkey` to the signing pubkey. - /// - `withdrawal_credentials` to the signing pubkey. - /// - `proof_of_possession` - pub fn sign(&mut self, test_task: DepositTestTask, keypair: &Keypair, spec: &ChainSpec) { - let new_key = Keypair::random(); - let mut pubkeybytes = PublicKeyBytes::from(keypair.pk.clone()); - let mut secret_key = keypair.sk.clone(); - - match test_task { - DepositTestTask::BadPubKey => pubkeybytes = PublicKeyBytes::from(new_key.pk), - DepositTestTask::InvalidPubKey => { - // Creating invalid public key bytes - let mut public_key_bytes: Vec = vec![0; 48]; - public_key_bytes[0] = 255; - pubkeybytes = PublicKeyBytes::deserialize(&public_key_bytes).unwrap(); - } - DepositTestTask::BadSig => secret_key = new_key.sk, - _ => (), - } - - let withdrawal_credentials = Hash256::from_slice( - &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], - ); - - // Building the data and signing it - self.deposit.data.pubkey = pubkeybytes; - self.deposit.data.withdrawal_credentials = withdrawal_credentials; - self.deposit.data.signature = self.deposit.data.create_signature(&secret_key, spec); - } - - /// Builds the deposit, consuming the builder. - pub fn build(self) -> Deposit { - self.deposit - } -} diff --git a/consensus/types/src/test_utils/builders/testing_pending_attestation_builder.rs b/consensus/types/src/test_utils/builders/testing_pending_attestation_builder.rs deleted file mode 100644 index c56deb647d7..00000000000 --- a/consensus/types/src/test_utils/builders/testing_pending_attestation_builder.rs +++ /dev/null @@ -1,60 +0,0 @@ -use crate::test_utils::{AttestationTestTask, TestingAttestationDataBuilder}; -use crate::*; - -/// Builds an `AttesterSlashing` to be used for testing purposes. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingPendingAttestationBuilder { - pending_attestation: PendingAttestation, -} - -impl TestingPendingAttestationBuilder { - /// Create a new valid* `PendingAttestation` for the given parameters. - /// - /// The `inclusion_delay` will be set to `MIN_ATTESTATION_INCLUSION_DELAY`. - /// - /// * The aggregation bitfield will be empty, it needs to be set with - /// `Self::add_committee_participation`. - pub fn new( - test_task: AttestationTestTask, - state: &BeaconState, - index: u64, - slot: Slot, - spec: &ChainSpec, - ) -> Self { - let data_builder = TestingAttestationDataBuilder::new(test_task, state, index, slot, spec); - - let proposer_index = state.get_beacon_proposer_index(slot, spec).unwrap() as u64; - - let pending_attestation = PendingAttestation { - aggregation_bits: BitList::with_capacity(T::MaxValidatorsPerCommittee::to_usize()) - .unwrap(), - data: data_builder.build(), - inclusion_delay: spec.min_attestation_inclusion_delay, - proposer_index, - }; - - Self { - pending_attestation, - } - } - - /// Sets the committee participation in the `PendingAttestation`. - /// - /// The `PendingAttestation` will appear to be signed by each committee member who's value in - /// `signers` is true. - pub fn add_committee_participation(&mut self, signers: Vec) { - let mut aggregation_bits = BitList::with_capacity(signers.len()).unwrap(); - - for (i, signed) in signers.iter().enumerate() { - aggregation_bits.set(i, *signed).unwrap(); - } - - self.pending_attestation.aggregation_bits = aggregation_bits; - } - - /// Returns the `PendingAttestation`, consuming the builder. - pub fn build(self) -> PendingAttestation { - self.pending_attestation - } -} diff --git a/consensus/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/consensus/types/src/test_utils/builders/testing_proposer_slashing_builder.rs deleted file mode 100644 index 51c2aeaf496..00000000000 --- a/consensus/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ /dev/null @@ -1,82 +0,0 @@ -use crate::test_utils::ProposerSlashingTestTask; -use crate::*; - -/// Builds a `ProposerSlashing`. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingProposerSlashingBuilder; - -impl TestingProposerSlashingBuilder { - /// Builds a `ProposerSlashing` that is a double vote. - /// - /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). - pub fn double_vote( - test_task: ProposerSlashingTestTask, - proposer_index: u64, - secret_key: &SecretKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> ProposerSlashing - where - T: EthSpec, - { - let slot = Slot::new(0); - let hash_1 = Hash256::from([1; 32]); - let hash_2 = if test_task == ProposerSlashingTestTask::ProposalsIdentical { - hash_1 - } else { - Hash256::from([2; 32]) - }; - - let mut signed_header_1 = SignedBeaconBlockHeader { - message: BeaconBlockHeader { - slot, - proposer_index, - parent_root: hash_1, - state_root: hash_1, - body_root: hash_1, - }, - signature: Signature::empty(), - }; - - let slot_2 = if test_task == ProposerSlashingTestTask::ProposalEpochMismatch { - Slot::new(128) - } else { - Slot::new(0) - }; - - let mut signed_header_2 = SignedBeaconBlockHeader { - message: BeaconBlockHeader { - parent_root: hash_2, - slot: slot_2, - ..signed_header_1.message - }, - signature: Signature::empty(), - }; - - if test_task != ProposerSlashingTestTask::BadProposal1Signature { - signed_header_1 = - signed_header_1 - .message - .sign::(secret_key, fork, genesis_validators_root, spec); - } - - if test_task != ProposerSlashingTestTask::BadProposal2Signature { - signed_header_2 = - signed_header_2 - .message - .sign::(secret_key, fork, genesis_validators_root, spec); - } - - if test_task == ProposerSlashingTestTask::ProposerUnknown { - signed_header_1.message.proposer_index = 3_141_592; - signed_header_2.message.proposer_index = 3_141_592; - } - - ProposerSlashing { - signed_header_1, - signed_header_2, - } - } -} diff --git a/consensus/types/src/test_utils/builders/testing_voluntary_exit_builder.rs b/consensus/types/src/test_utils/builders/testing_voluntary_exit_builder.rs deleted file mode 100644 index 330b48ad5fa..00000000000 --- a/consensus/types/src/test_utils/builders/testing_voluntary_exit_builder.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::*; - -/// Builds an exit to be used for testing purposes. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingVoluntaryExitBuilder { - exit: VoluntaryExit, -} - -impl TestingVoluntaryExitBuilder { - /// Instantiates a new builder. - pub fn new(epoch: Epoch, validator_index: u64) -> Self { - let exit = VoluntaryExit { - epoch, - validator_index, - }; - - Self { exit } - } - - /// Build and sign the exit. - /// - /// The signing secret key must match that of the exiting validator. - pub fn build( - self, - secret_key: &SecretKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> SignedVoluntaryExit { - self.exit - .sign(secret_key, fork, genesis_validators_root, spec) - } -} diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index 63d90a83f47..c0333bcfd66 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -1,15 +1,50 @@ #![allow(clippy::integer_arithmetic)] -#[macro_use] -mod macros; -mod builders; -mod generate_deterministic_keypairs; -mod test_random; +use std::fmt::Debug; + +pub use rand::{RngCore, SeedableRng}; +pub use rand_xorshift::XorShiftRng; -pub use builders::*; pub use generate_deterministic_keypairs::generate_deterministic_keypair; pub use generate_deterministic_keypairs::generate_deterministic_keypairs; pub use generate_deterministic_keypairs::load_keypairs_from_yaml; -pub use rand::{RngCore, SeedableRng}; -pub use rand_xorshift::XorShiftRng; +use ssz::{ssz_encode, Decode, Encode}; pub use test_random::{test_random_instance, TestRandom}; +use tree_hash::TreeHash; + +#[macro_use] +mod macros; +mod generate_deterministic_keypairs; +mod test_random; + +pub fn test_ssz_tree_hash_pair(v1: &T, v2: &U) +where + T: TreeHash + Encode + Decode + Debug + PartialEq, + U: TreeHash + Encode + Decode + Debug + PartialEq, +{ + test_ssz_tree_hash_pair_with(v1, v2, T::from_ssz_bytes) +} + +pub fn test_ssz_tree_hash_pair_with( + v1: &T, + v2: &U, + t_decoder: impl FnOnce(&[u8]) -> Result, +) where + T: TreeHash + Encode + Debug + PartialEq, + U: TreeHash + Encode + Decode + Debug + PartialEq, +{ + // SSZ encoding should agree between the two types. + let encoding1 = ssz_encode(v1); + let encoding2 = ssz_encode(v2); + assert_eq!(encoding1, encoding2); + + // Decoding the encoding should yield either value. + let decoded1 = t_decoder(&encoding1).unwrap(); + assert_eq!(&decoded1, v1); + + let decoded2 = U::from_ssz_bytes(&encoding1).unwrap(); + assert_eq!(&decoded2, v2); + + // Tree hashing should agree. + assert_eq!(v1.tree_hash_root(), v2.tree_hash_root()); +} diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index f92d267178a..5a88c166308 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -41,6 +41,12 @@ impl TestRandom for u32 { } } +impl TestRandom for u8 { + fn random_for_test(rng: &mut impl RngCore) -> Self { + rng.next_u32().to_be_bytes()[0] + } +} + impl TestRandom for usize { fn random_for_test(rng: &mut impl RngCore) -> Self { rng.next_u32() as usize @@ -64,16 +70,15 @@ where impl TestRandom for FixedVector where - T: TestRandom + Default, + T: TestRandom, { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut output = vec![]; - - for _ in 0..(usize::random_for_test(rng) % std::cmp::min(4, N::to_usize())) { - output.push(::random_for_test(rng)); - } - - output.into() + Self::new( + (0..N::to_usize()) + .map(|_| T::random_for_test(rng)) + .collect(), + ) + .expect("N items provided") } } diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 87ffa8ada9d..b27bc90e3ad 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -61,7 +61,7 @@ impl Validator { spec: &ChainSpec, ) -> bool { // Placement in queue is finalized - self.activation_eligibility_epoch <= state.finalized_checkpoint.epoch + self.activation_eligibility_epoch <= state.finalized_checkpoint().epoch // Has not yet been activated && self.activation_epoch == spec.far_future_epoch } diff --git a/crypto/bls/src/generic_aggregate_public_key.rs b/crypto/bls/src/generic_aggregate_public_key.rs index ad25a59e8f9..5d998f4bb1d 100644 --- a/crypto/bls/src/generic_aggregate_public_key.rs +++ b/crypto/bls/src/generic_aggregate_public_key.rs @@ -1,6 +1,37 @@ +use crate::{generic_public_key::GenericPublicKey, Error}; +use std::marker::PhantomData; + /// Implemented on some struct from a BLS library so it may be used internally in this crate. -pub trait TAggregatePublicKey: Sized + Clone {} +pub trait TAggregatePublicKey: Sized + Clone { + fn to_public_key(&self) -> GenericPublicKey; + + // NOTE: this API *could* take a `&[&Pub]` as that's what the underlying library needs, + // but it seems that this type would rarely occur due to our use of wrapper structs + fn aggregate(pubkeys: &[GenericPublicKey]) -> Result; +} + +/// A BLS aggregate public key that is generic across some BLS point (`AggPub`). +/// +/// Provides generic functionality whilst deferring all serious cryptographic operations to `AggPub`. +#[derive(Clone)] +pub struct GenericAggregatePublicKey { + /// The underlying point which performs *actual* cryptographic operations. + point: AggPub, + _phantom: PhantomData, +} + +impl GenericAggregatePublicKey +where + AggPub: TAggregatePublicKey, +{ + pub fn to_public_key(&self) -> GenericPublicKey { + self.point.to_public_key() + } -/* - * Note: there is no immediate need for a `GenericAggregatePublicKey` struct. - */ + pub fn aggregate(pubkeys: &[GenericPublicKey]) -> Result { + Ok(Self { + point: AggPub::aggregate(pubkeys)?, + _phantom: PhantomData, + }) + } +} diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index d0cfac848f9..7569c2f7931 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -173,7 +173,7 @@ where impl GenericAggregateSignature where Pub: TPublicKey + Clone, - AggPub: TAggregatePublicKey + Clone, + AggPub: TAggregatePublicKey + Clone, Sig: TSignature, AggSig: TAggregateSignature, { @@ -189,6 +189,18 @@ where } } + /// Wrapper to `fast_aggregate_verify` accepting the infinity signature when `pubkeys` is empty. + pub fn eth2_fast_aggregate_verify( + &self, + msg: Hash256, + pubkeys: &[&GenericPublicKey], + ) -> bool { + if pubkeys.is_empty() && self.is_infinity { + return true; + } + self.fast_aggregate_verify(msg, pubkeys) + } + /// Verify that `self` represents an aggregate signature where all `pubkeys` have signed their /// corresponding message in `msgs`. /// diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index face79a4bfd..0976f5c82ba 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -27,7 +27,7 @@ pub trait TPublicKey: Sized + Clone { fn deserialize(bytes: &[u8]) -> Result; } -/// A BLS aggregate public key that is generic across some BLS point (`Pub`). +/// A BLS public key that is generic across some BLS point (`Pub`). /// /// Provides generic functionality whilst deferring all serious cryptographic operations to `Pub`. #[derive(Clone)] diff --git a/crypto/bls/src/generic_signature_set.rs b/crypto/bls/src/generic_signature_set.rs index 16f845fc4f5..a64db7adef4 100644 --- a/crypto/bls/src/generic_signature_set.rs +++ b/crypto/bls/src/generic_signature_set.rs @@ -74,7 +74,7 @@ where impl<'a, Pub, AggPub, Sig, AggSig> GenericSignatureSet<'a, Pub, AggPub, Sig, AggSig> where Pub: TPublicKey + Clone, - AggPub: TAggregatePublicKey + Clone, + AggPub: TAggregatePublicKey + Clone, Sig: TSignature + Clone, AggSig: TAggregateSignature + Clone, { diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index 70b7c90edf0..e94f5a9abd0 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -153,7 +153,19 @@ impl PartialEq for BlstAggregatePublicKey { } } -impl TAggregatePublicKey for BlstAggregatePublicKey {} +impl TAggregatePublicKey for BlstAggregatePublicKey { + fn to_public_key(&self) -> GenericPublicKey { + GenericPublicKey::from_point(self.0.to_public_key()) + } + + fn aggregate(pubkeys: &[GenericPublicKey]) -> Result { + let pubkey_refs = pubkeys.iter().map(|pk| pk.point()).collect::>(); + + // Public keys have already been checked for subgroup and infinity + let agg_pub = blst_core::AggregatePublicKey::aggregate(&pubkey_refs, false)?; + Ok(BlstAggregatePublicKey(agg_pub)) + } +} impl TSignature for blst_core::Signature { fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN] { diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index 72cc641aa14..1004dc20034 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -6,6 +6,7 @@ use crate::{ generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, Error, Hash256, ZeroizeHash, INFINITY_PUBLIC_KEY, INFINITY_SIGNATURE, }; + /// Provides the externally-facing, core BLS types. pub mod types { pub use super::verify_signature_sets; @@ -63,7 +64,15 @@ impl PartialEq for PublicKey { #[derive(Clone)] pub struct AggregatePublicKey([u8; PUBLIC_KEY_BYTES_LEN]); -impl TAggregatePublicKey for AggregatePublicKey {} +impl TAggregatePublicKey for AggregatePublicKey { + fn to_public_key(&self) -> GenericPublicKey { + GenericPublicKey::from_point(PublicKey(self.0)) + } + + fn aggregate(_pubkeys: &[GenericPublicKey]) -> Result { + Ok(Self(INFINITY_PUBLIC_KEY)) + } +} impl Eq for AggregatePublicKey {} diff --git a/crypto/bls/src/impls/milagro.rs b/crypto/bls/src/impls/milagro.rs index 829c5aa3d89..7eaa9ad105e 100644 --- a/crypto/bls/src/impls/milagro.rs +++ b/crypto/bls/src/impls/milagro.rs @@ -86,7 +86,18 @@ impl TPublicKey for milagro::PublicKey { } } -impl TAggregatePublicKey for milagro::AggregatePublicKey {} +impl TAggregatePublicKey for milagro::AggregatePublicKey { + fn to_public_key(&self) -> GenericPublicKey { + GenericPublicKey::from_point(milagro::PublicKey { + point: self.point.clone(), + }) + } + + fn aggregate(pubkeys: &[GenericPublicKey]) -> Result { + let pubkey_refs = pubkeys.iter().map(|pk| pk.point()).collect::>(); + Ok(milagro::AggregatePublicKey::aggregate(&pubkey_refs)?) + } +} impl TSignature for milagro::Signature { fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN] { diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 5aaa4f8c818..8a31a90a148 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -79,6 +79,7 @@ impl From for Error { /// Generic implementations which are only generally useful for docs. pub mod generics { + pub use crate::generic_aggregate_public_key::GenericAggregatePublicKey; pub use crate::generic_aggregate_signature::GenericAggregateSignature; pub use crate::generic_keypair::GenericKeypair; pub use crate::generic_public_key::GenericPublicKey; @@ -102,6 +103,8 @@ macro_rules! define_mod { pub type PublicKey = GenericPublicKey; pub type PublicKeyBytes = GenericPublicKeyBytes; + pub type AggregatePublicKey = + GenericAggregatePublicKey; pub type Signature = GenericSignature; pub type AggregateSignature = GenericAggregateSignature< bls_variant::PublicKey, diff --git a/lcli/src/change_genesis_time.rs b/lcli/src/change_genesis_time.rs index 94ee6b2cf31..6b7b812e878 100644 --- a/lcli/src/change_genesis_time.rs +++ b/lcli/src/change_genesis_time.rs @@ -1,11 +1,12 @@ use clap::ArgMatches; -use ssz::{Decode, Encode}; +use eth2_network_config::Eth2NetworkConfig; +use ssz::Encode; use std::fs::File; use std::io::{Read, Write}; use std::path::PathBuf; use types::{BeaconState, EthSpec}; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let path = matches .value_of("ssz-state") .ok_or("ssz-state not specified")? @@ -18,6 +19,9 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { .parse::() .map_err(|e| format!("Unable to parse genesis-time: {}", e))?; + let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; + let spec = ð2_network_config.chain_spec::()?; + let mut state: BeaconState = { let mut file = File::open(&path).map_err(|e| format!("Unable to open file: {}", e))?; @@ -26,10 +30,11 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { file.read_to_end(&mut ssz) .map_err(|e| format!("Unable to read file: {}", e))?; - BeaconState::from_ssz_bytes(&ssz).map_err(|e| format!("Unable to decode SSZ: {:?}", e))? + BeaconState::from_ssz_bytes(&ssz, spec) + .map_err(|e| format!("Unable to decode SSZ: {:?}", e))? }; - state.genesis_time = genesis_time; + *state.genesis_time_mut() = genesis_time; let mut file = File::create(path).map_err(|e| format!("Unable to create file: {}", e))?; diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index d929b1784c4..689107228e8 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -12,7 +12,11 @@ use types::EthSpec; /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); -pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { +pub fn run( + mut env: Environment, + testnet_dir: PathBuf, + matches: &ArgMatches<'_>, +) -> Result<(), String> { let endpoints = matches .value_of("eth1-endpoint") .map(|e| { @@ -25,29 +29,9 @@ pub fn run(mut env: Environment, matches: &ArgMatches<'_>) -> Res .map(|s| s.split(',').map(String::from).collect()) }); - let testnet_dir = matches - .value_of("testnet-dir") - .ok_or(()) - .and_then(|dir| dir.parse::().map_err(|_| ())) - .unwrap_or_else(|_| { - dirs::home_dir() - .map(|home| home.join(directory::DEFAULT_ROOT_DIR).join("testnet")) - .expect("should locate home directory") - }); - let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; - let spec = eth2_network_config - .yaml_config - .as_ref() - .ok_or("The testnet directory must contain a spec config")? - .apply_to_chain_spec::(&env.core_context().eth2_config.spec) - .ok_or_else(|| { - format!( - "The loaded config is not compatible with the {} spec", - &env.core_context().eth2_config.eth_spec_id - ) - })?; + let spec = eth2_network_config.chain_spec::()?; let mut config = Eth1Config::default(); if let Some(v) = endpoints.clone() { diff --git a/lcli/src/helpers.rs b/lcli/src/helpers.rs deleted file mode 100644 index 441059cd191..00000000000 --- a/lcli/src/helpers.rs +++ /dev/null @@ -1,106 +0,0 @@ -use clap::ArgMatches; -use hex; -use std::path::PathBuf; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::Address; - -pub fn time_now() -> Result { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map(|duration| duration.as_secs()) - .map_err(|e| format!("Unable to get time: {:?}", e)) -} - -pub fn parse_path_with_default_in_home_dir( - matches: &ArgMatches, - name: &'static str, - default: PathBuf, -) -> Result { - matches - .value_of(name) - .map(|dir| { - dir.parse::() - .map_err(|e| format!("Unable to parse {}: {}", name, e)) - }) - .unwrap_or_else(|| { - dirs::home_dir() - .map(|home| home.join(default)) - .ok_or_else(|| format!("Unable to locate home directory. Try specifying {}", name)) - }) -} - -pub fn parse_path(matches: &ArgMatches, name: &'static str) -> Result { - matches - .value_of(name) - .ok_or_else(|| format!("{} not specified", name))? - .parse::() - .map_err(|e| format!("Unable to parse {}: {}", name, e)) -} - -pub fn parse_u64(matches: &ArgMatches, name: &'static str) -> Result { - matches - .value_of(name) - .ok_or_else(|| format!("{} not specified", name))? - .parse::() - .map_err(|e| format!("Unable to parse {}: {}", name, e)) -} - -pub fn parse_u64_opt(matches: &ArgMatches, name: &'static str) -> Result, String> { - matches - .value_of(name) - .map(|val| { - val.parse::() - .map_err(|e| format!("Unable to parse {}: {}", name, e)) - }) - .transpose() -} - -pub fn parse_address(matches: &ArgMatches, name: &'static str) -> Result { - matches - .value_of(name) - .ok_or_else(|| format!("{} not specified", name)) - .and_then(|val| { - if val.starts_with("0x") { - val[2..] - .parse() - .map_err(|e| format!("Unable to parse {}: {:?}", name, e)) - } else { - Err(format!("Unable to parse {}, must have 0x prefix", name)) - } - }) -} - -pub fn parse_fork_opt(matches: &ArgMatches, name: &'static str) -> Result, String> { - matches - .value_of(name) - .map(|val| { - if val.starts_with("0x") { - let vec = hex::decode(&val[2..]) - .map_err(|e| format!("Unable to parse {} as hex: {:?}", name, e))?; - - if vec.len() != 4 { - Err(format!("{} must be exactly 4 bytes", name)) - } else { - let mut arr = [0; 4]; - arr.copy_from_slice(&vec); - Ok(arr) - } - } else { - Err(format!("Unable to parse {}, must have 0x prefix", name)) - } - }) - .transpose() -} - -pub fn parse_hex_bytes(matches: &ArgMatches, name: &'static str) -> Result, String> { - matches - .value_of(name) - .ok_or_else(|| format!("{} not specified", name)) - .and_then(|val| { - if val.starts_with("0x") { - hex::decode(&val[2..]).map_err(|e| format!("Unable to parse {}: {:?}", name, e)) - } else { - Err(format!("Unable to parse {}, must have 0x prefix", name)) - } - }) -} diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index 744dd444715..6f35699fcae 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -1,6 +1,5 @@ use clap::ArgMatches; use clap_utils::parse_ssz_optional; -use environment::Environment; use eth2_network_config::Eth2NetworkConfig; use genesis::interop_genesis_state; use ssz::Encode; @@ -8,7 +7,7 @@ use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use types::{test_utils::generate_deterministic_keypairs, EthSpec}; -pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { +pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let validator_count = matches .value_of("validator-count") .ok_or("validator-count not specified")? @@ -26,29 +25,9 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< .as_secs() }; - let testnet_dir = matches - .value_of("testnet-dir") - .ok_or(()) - .and_then(|dir| dir.parse::().map_err(|_| ())) - .unwrap_or_else(|_| { - dirs::home_dir() - .map(|home| home.join(directory::DEFAULT_ROOT_DIR).join("testnet")) - .expect("should locate home directory") - }); - let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; - let mut spec = eth2_network_config - .yaml_config - .as_ref() - .ok_or("The testnet directory must contain a spec config")? - .apply_to_chain_spec::(&env.core_context().eth2_config.spec) - .ok_or_else(|| { - format!( - "The loaded config is not compatible with the {} spec", - &env.core_context().eth2_config.eth_spec_id - ) - })?; + let mut spec = eth2_network_config.chain_spec::()?; if let Some(v) = parse_ssz_optional(matches, "genesis-fork-version")? { spec.genesis_fork_version = v; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 0444efa5dd5..48ca0338dd2 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -8,24 +8,21 @@ mod generate_bootnode_enr; mod insecure_validators; mod interop_genesis; mod new_testnet; -mod parse_hex; +mod parse_ssz; mod replace_state_pubkeys; mod skip_slots; mod transition_blocks; use clap::{App, Arg, ArgMatches, SubCommand}; +use clap_utils::parse_path_with_default_in_home_dir; use environment::EnvironmentBuilder; use log::LevelFilter; -use parse_hex::run_parse_hex; -use std::fs::File; +use parse_ssz::run_parse_ssz; use std::path::PathBuf; use std::process; use std::str::FromStr; -use std::time::{SystemTime, UNIX_EPOCH}; use transition_blocks::run_transition_blocks; -use types::{ - test_utils::TestingBeaconStateBuilder, EthSpec, EthSpecId, MainnetEthSpec, MinimalEthSpec, -}; +use types::{EthSpec, EthSpecId}; fn main() { simple_logger::SimpleLogger::new() @@ -55,34 +52,6 @@ fn main() { .global(true) .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), ) - .subcommand( - SubCommand::with_name("genesis_yaml") - .about("Generates a genesis YAML file") - .arg( - Arg::with_name("num_validators") - .short("n") - .value_name("INTEGER") - .takes_value(true) - .required(true) - .help("Number of initial validators."), - ) - .arg( - Arg::with_name("genesis_time") - .short("g") - .value_name("INTEGER") - .takes_value(true) - .required(false) - .help("Eth2 genesis time (seconds since UNIX epoch)."), - ) - .arg( - Arg::with_name("output_file") - .short("f") - .value_name("PATH") - .takes_value(true) - .default_value("./genesis_state.yaml") - .help("Output file for generated state."), - ), - ) .subcommand( SubCommand::with_name("skip-slots") .about( @@ -138,22 +107,21 @@ fn main() { ), ) .subcommand( - SubCommand::with_name("pretty-hex") - .about("Parses SSZ encoded as ASCII 0x-prefixed hex") + SubCommand::with_name("pretty-ssz") + .about("Parses SSZ-encoded data from a file") .arg( Arg::with_name("type") .value_name("TYPE") .takes_value(true) .required(true) - .possible_values(&["block"]) - .help("The schema of the supplied SSZ."), + .help("Type to decode"), ) .arg( - Arg::with_name("hex_ssz") - .value_name("HEX") + Arg::with_name("ssz-file") + .value_name("FILE") .takes_value(true) .required(true) - .help("SSZ encoded as 0x-prefixed hex"), + .help("Path to SSZ bytes"), ), ) .subcommand( @@ -408,7 +376,16 @@ fn main() { "The block the deposit contract was deployed. Setting this is a huge optimization for nodes, please do it.", ), - ), + ) + .arg( + Arg::with_name("altair-fork-epoch") + .long("altair-fork-epoch") + .value_name("EPOCH") + .takes_value(true) + .help( + "The epoch at which to enable the Altair hard fork", + ), + ) ) .subcommand( SubCommand::with_name("check-deposit-data") @@ -516,7 +493,6 @@ fn main() { .and_then(|eth_spec_id| match eth_spec_id { EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches), EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches), - EthSpecId::V012Legacy => run(EnvironmentBuilder::v012_legacy(), &matches), }); match result { @@ -540,66 +516,37 @@ fn run( .build() .map_err(|e| format!("should build env: {:?}", e))?; - match matches.subcommand() { - ("genesis_yaml", Some(matches)) => { - let num_validators = matches - .value_of("num_validators") - .expect("slog requires num_validators") - .parse::() - .expect("num_validators must be a valid integer"); - - let genesis_time = if let Some(string) = matches.value_of("genesis_time") { - string - .parse::() - .expect("genesis_time must be a valid integer") - } else { - warn!("No genesis time supplied via CLI, using the current time."); - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("should obtain time since unix epoch") - .as_secs() - }; - - let file = matches - .value_of("output_file") - .expect("slog requires output file") - .parse::() - .expect("output_file must be a valid path"); - - info!( - "Creating genesis state with {} validators and genesis time {}.", - num_validators, genesis_time - ); + let testnet_dir = parse_path_with_default_in_home_dir( + matches, + "testnet-dir", + PathBuf::from(directory::DEFAULT_ROOT_DIR).join("testnet"), + )?; - match matches.value_of("spec").expect("spec is required by slog") { - "minimal" => genesis_yaml::(num_validators, genesis_time, file), - "mainnet" => genesis_yaml::(num_validators, genesis_time, file), - _ => unreachable!("guarded by slog possible_values"), - }; - info!("Genesis state YAML file created. Exiting successfully."); - Ok(()) - } - ("transition-blocks", Some(matches)) => run_transition_blocks::(matches) + match matches.subcommand() { + ("transition-blocks", Some(matches)) => run_transition_blocks::(testnet_dir, matches) .map_err(|e| format!("Failed to transition blocks: {}", e)), - ("skip-slots", Some(matches)) => { - skip_slots::run::(matches).map_err(|e| format!("Failed to skip slots: {}", e)) - } - ("pretty-hex", Some(matches)) => { - run_parse_hex::(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) + ("skip-slots", Some(matches)) => skip_slots::run::(testnet_dir, matches) + .map_err(|e| format!("Failed to skip slots: {}", e)), + ("pretty-ssz", Some(matches)) => { + run_parse_ssz::(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) } ("deploy-deposit-contract", Some(matches)) => { deploy_deposit_contract::run::(env, matches) .map_err(|e| format!("Failed to run deploy-deposit-contract command: {}", e)) } - ("eth1-genesis", Some(matches)) => eth1_genesis::run::(env, matches) + ("eth1-genesis", Some(matches)) => eth1_genesis::run::(env, testnet_dir, matches) .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)), - ("interop-genesis", Some(matches)) => interop_genesis::run::(env, matches) + ("interop-genesis", Some(matches)) => interop_genesis::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run interop-genesis command: {}", e)), - ("change-genesis-time", Some(matches)) => change_genesis_time::run::(matches) - .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)), - ("replace-state-pubkeys", Some(matches)) => replace_state_pubkeys::run::(matches) - .map_err(|e| format!("Failed to run replace-state-pubkeys command: {}", e)), - ("new-testnet", Some(matches)) => new_testnet::run::(matches) + ("change-genesis-time", Some(matches)) => { + change_genesis_time::run::(testnet_dir, matches) + .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)) + } + ("replace-state-pubkeys", Some(matches)) => { + replace_state_pubkeys::run::(testnet_dir, matches) + .map_err(|e| format!("Failed to run replace-state-pubkeys command: {}", e)) + } + ("new-testnet", Some(matches)) => new_testnet::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run new_testnet command: {}", e)), ("check-deposit-data", Some(matches)) => check_deposit_data::run::(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), @@ -610,22 +557,3 @@ fn run( (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } - -/// Creates a genesis state and writes it to a YAML file. -fn genesis_yaml(validator_count: usize, genesis_time: u64, output: PathBuf) { - let spec = &T::default_spec(); - - let builder: TestingBeaconStateBuilder = - TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, spec); - - let (mut state, _keypairs) = builder.build(); - state.genesis_time = genesis_time; - - info!("Generated state root: {:?}", state.canonical_root()); - - info!("Writing genesis state to {:?}", output); - - let file = File::create(output.clone()) - .unwrap_or_else(|e| panic!("unable to create file: {:?}. Error: {:?}", output, e)); - serde_yaml::to_writer(file, &state).expect("should be able to serialize BeaconState"); -} diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index af086102d7a..777633ca821 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,17 +1,10 @@ use clap::ArgMatches; -use clap_utils::{ - parse_optional, parse_path_with_default_in_home_dir, parse_required, parse_ssz_optional, -}; +use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; use eth2_network_config::Eth2NetworkConfig; use std::path::PathBuf; -use types::{Address, EthSpec, YamlConfig}; +use types::{Address, Config, EthSpec}; -pub fn run(matches: &ArgMatches) -> Result<(), String> { - let testnet_dir_path = parse_path_with_default_in_home_dir( - matches, - "testnet-dir", - PathBuf::from(directory::DEFAULT_ROOT_DIR).join("testnet"), - )?; +pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; let deposit_contract_deploy_block = parse_required(matches, "deposit-contract-deploy-block")?; @@ -56,11 +49,15 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { spec.genesis_fork_version = v; } + if let Some(fork_epoch) = parse_optional(matches, "altair-fork-epoch")? { + spec.altair_fork_epoch = Some(fork_epoch); + } + let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr: Some(vec![]), genesis_state_bytes: None, - yaml_config: Some(YamlConfig::from_spec::(&spec)), + config: Config::from_chain_spec::(&spec), }; testnet.write_to_file(testnet_dir_path, overwrite_files) diff --git a/lcli/src/parse_hex.rs b/lcli/src/parse_hex.rs deleted file mode 100644 index 992cbf91d20..00000000000 --- a/lcli/src/parse_hex.rs +++ /dev/null @@ -1,41 +0,0 @@ -use clap::ArgMatches; -use serde::Serialize; -use ssz::Decode; -use types::{BeaconBlock, BeaconState, EthSpec}; - -pub fn run_parse_hex(matches: &ArgMatches) -> Result<(), String> { - let type_str = matches.value_of("type").ok_or("No type supplied")?; - let mut hex: String = matches - .value_of("hex_ssz") - .ok_or("No hex ssz supplied")? - .to_string(); - - if hex.starts_with("0x") { - hex = hex[2..].to_string(); - } - - let hex = hex::decode(&hex).map_err(|e| format!("Failed to parse hex: {:?}", e))?; - - info!("Using {} spec", T::spec_name()); - info!("Type: {:?}", type_str); - - match type_str { - "block" => decode_and_print::>(&hex)?, - "state" => decode_and_print::>(&hex)?, - other => return Err(format!("Unknown type: {}", other)), - }; - - Ok(()) -} - -fn decode_and_print(bytes: &[u8]) -> Result<(), String> { - let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e))?; - - println!( - "{}", - serde_yaml::to_string(&item) - .map_err(|e| format!("Unable to write object to YAML: {:?}", e))? - ); - - Ok(()) -} diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index aba94beb0d8..676eb6294ac 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -1,26 +1,28 @@ -use crate::helpers::parse_path; use clap::ArgMatches; use serde::Serialize; use ssz::Decode; use std::fs::File; use std::io::Read; -use types::{EthSpec, SignedBeaconBlock}; +use types::*; -pub fn run(matches: &ArgMatches) -> Result<(), String> { - let type_str = matches - .value_of("type") - .ok_or("No type supplied")?; - let path = parse_path(matches, "path")?; - - info!("Type: {:?}", type_str); +pub fn run_parse_ssz(matches: &ArgMatches) -> Result<(), String> { + let type_str = matches.value_of("type").ok_or("No type supplied")?; + let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; let mut bytes = vec![]; - let mut file = File::open(&path).map_err(|e| format!("Unable to open {:?}: {}", path, e))?; + let mut file = + File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?; file.read_to_end(&mut bytes) - .map_err(|e| format!("Unable to read {:?}: {}", path, e))?; + .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + + info!("Using {} spec", T::spec_name()); + info!("Type: {:?}", type_str); match type_str { - "SignedBeaconBlock" => decode_and_print::>(&bytes)?, + "block_base" => decode_and_print::>(&bytes)?, + "block_altair" => decode_and_print::>(&bytes)?, + "state_base" => decode_and_print::>(&bytes)?, + "state_altair" => decode_and_print::>(&bytes)?, other => return Err(format!("Unknown type: {}", other)), }; @@ -28,7 +30,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { } fn decode_and_print(bytes: &[u8]) -> Result<(), String> { - let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e))?; + let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("SSZ decode failed: {:?}", e))?; println!( "{}", diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs index e1513b4c2ef..8e85f76aed2 100644 --- a/lcli/src/replace_state_pubkeys.rs +++ b/lcli/src/replace_state_pubkeys.rs @@ -1,14 +1,15 @@ use account_utils::{eth2_keystore::keypair_from_secret, mnemonic_from_phrase}; use clap::ArgMatches; +use eth2_network_config::Eth2NetworkConfig; use eth2_wallet::bip39::Seed; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; -use ssz::{Decode, Encode}; +use ssz::Encode; use std::fs::File; use std::io::{Read, Write}; use std::path::PathBuf; use types::{BeaconState, EthSpec}; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let path = matches .value_of("ssz-state") .ok_or("ssz-state not specified")? @@ -19,6 +20,9 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { .value_of("mnemonic") .ok_or("mnemonic not specified")?; + let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; + let spec = ð2_network_config.chain_spec::()?; + let mut state: BeaconState = { let mut file = File::open(&path).map_err(|e| format!("Unable to open file: {}", e))?; @@ -27,13 +31,14 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { file.read_to_end(&mut ssz) .map_err(|e| format!("Unable to read file: {}", e))?; - BeaconState::from_ssz_bytes(&ssz).map_err(|e| format!("Unable to decode SSZ: {:?}", e))? + BeaconState::from_ssz_bytes(&ssz, spec) + .map_err(|e| format!("Unable to decode SSZ: {:?}", e))? }; let mnemonic = mnemonic_from_phrase(mnemonic_phrase)?; let seed = Seed::new(&mnemonic, ""); - for (index, validator) in state.validators.iter_mut().enumerate() { + for (index, validator) in state.validators_mut().iter_mut().enumerate() { let (secret, _) = recover_validator_secret_from_mnemonic(seed.as_bytes(), index as u32, KeyType::Voting) .map_err(|e| format!("Unable to generate validator key: {:?}", e))?; diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 7ca0891dc14..cb502d37ae1 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -1,5 +1,6 @@ -use crate::transition_blocks::load_from_ssz; +use crate::transition_blocks::load_from_ssz_with; use clap::ArgMatches; +use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; use state_processing::per_slot_processing; use std::fs::File; @@ -7,7 +8,7 @@ use std::io::prelude::*; use std::path::PathBuf; use types::{BeaconState, EthSpec}; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let pre_state_path = matches .value_of("pre-state") .ok_or("No pre-state file supplied")? @@ -30,9 +31,11 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { info!("Pre-state path: {:?}", pre_state_path); info!("Slots: {:?}", slots); - let mut state: BeaconState = load_from_ssz(pre_state_path)?; + let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; + let spec = ð2_network_config.chain_spec::()?; - let spec = &T::default_spec(); + let mut state: BeaconState = + load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; state .build_all_caches(spec) diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index e6845cb08b5..04d15f5a11e 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -1,12 +1,16 @@ use clap::ArgMatches; -use ssz::{Decode, Encode}; +use eth2_network_config::Eth2NetworkConfig; +use ssz::Encode; use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy}; use std::fs::File; use std::io::prelude::*; -use std::path::PathBuf; -use types::{BeaconState, EthSpec, SignedBeaconBlock}; +use std::path::{Path, PathBuf}; +use types::{BeaconState, ChainSpec, EthSpec, SignedBeaconBlock}; -pub fn run_transition_blocks(matches: &ArgMatches) -> Result<(), String> { +pub fn run_transition_blocks( + testnet_dir: PathBuf, + matches: &ArgMatches, +) -> Result<(), String> { let pre_state_path = matches .value_of("pre-state") .ok_or("No pre-state file supplied")? @@ -29,10 +33,15 @@ pub fn run_transition_blocks(matches: &ArgMatches) -> Result<(), Str info!("Pre-state path: {:?}", pre_state_path); info!("Block path: {:?}", block_path); - let pre_state: BeaconState = load_from_ssz(pre_state_path)?; - let block: SignedBeaconBlock = load_from_ssz(block_path)?; + let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; + let spec = ð2_network_config.chain_spec::()?; - let post_state = do_transition(pre_state, block)?; + let pre_state: BeaconState = + load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; + let block: SignedBeaconBlock = + load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; + + let post_state = do_transition(pre_state, block, spec)?; let mut output_file = File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?; @@ -47,15 +56,14 @@ pub fn run_transition_blocks(matches: &ArgMatches) -> Result<(), Str fn do_transition( mut pre_state: BeaconState, block: SignedBeaconBlock, + spec: &ChainSpec, ) -> Result, String> { - let spec = &T::default_spec(); - pre_state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; // Transition the parent state to the block slot. - for i in pre_state.slot.as_u64()..block.slot().as_u64() { + for i in pre_state.slot().as_u64()..block.slot().as_u64() { per_slot_processing(&mut pre_state, None, spec) .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; } @@ -76,11 +84,15 @@ fn do_transition( Ok(pre_state) } -pub fn load_from_ssz(path: PathBuf) -> Result { +pub fn load_from_ssz_with( + path: &Path, + spec: &ChainSpec, + decoder: impl FnOnce(&[u8], &ChainSpec) -> Result, +) -> Result { let mut file = - File::open(path.clone()).map_err(|e| format!("Unable to open file {:?}: {:?}", path, e))?; + File::open(path).map_err(|e| format!("Unable to open file {:?}: {:?}", path, e))?; let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read from file {:?}: {:?}", path, e))?; - T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e)) + decoder(&bytes, spec).map_err(|e| format!("Ssz decode failed: {:?}", e)) } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 7cde85991d2..4e00785540e 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -25,9 +25,8 @@ use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; -use types::{EthSpec, MainnetEthSpec, MinimalEthSpec, V012LegacyEthSpec}; +use types::{EthSpec, MainnetEthSpec, MinimalEthSpec}; -pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; const LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; @@ -67,19 +66,6 @@ impl EnvironmentBuilder { } } -impl EnvironmentBuilder { - /// Creates a new builder using the v0.12.x eth2 specification. - pub fn v012_legacy() -> Self { - Self { - runtime: None, - log: None, - eth_spec_instance: V012LegacyEthSpec, - eth2_config: Eth2Config::v012_legacy(), - testnet: None, - } - } -} - impl EnvironmentBuilder { /// Specifies that a multi-threaded tokio runtime should be used. Ideal for production uses. /// @@ -226,18 +212,7 @@ impl EnvironmentBuilder { eth2_network_config: Eth2NetworkConfig, ) -> Result { // Create a new chain spec from the default configuration. - self.eth2_config.spec = eth2_network_config - .yaml_config - .as_ref() - .ok_or("The testnet directory must contain a spec config")? - .apply_to_chain_spec::(&self.eth2_config.spec) - .ok_or_else(|| { - format!( - "The loaded config is not compatible with the {} spec", - &self.eth2_config.eth_spec_id - ) - })?; - + self.eth2_config.spec = eth2_network_config.chain_spec::()?; self.testnet = Some(eth2_network_config); Ok(self) diff --git a/lighthouse/environment/tests/environment_builder.rs b/lighthouse/environment/tests/environment_builder.rs index ce03e686cf3..ad775c99f5e 100644 --- a/lighthouse/environment/tests/environment_builder.rs +++ b/lighthouse/environment/tests/environment_builder.rs @@ -3,10 +3,10 @@ use environment::EnvironmentBuilder; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; use std::path::PathBuf; -use types::{V012LegacyEthSpec, YamlConfig}; +use types::{Config, MainnetEthSpec}; -fn builder() -> EnvironmentBuilder { - EnvironmentBuilder::v012_legacy() +fn builder() -> EnvironmentBuilder { + EnvironmentBuilder::mainnet() .multi_threaded_tokio_runtime() .expect("should set runtime") .null_logger() @@ -23,11 +23,11 @@ mod setup_eth2_config { #[test] fn update_spec_with_yaml_config() { if let Some(mut eth2_network_config) = eth2_network_config() { - let config_yaml = PathBuf::from("./tests/testnet_dir/config.yaml"); + let testnet_dir = PathBuf::from("./tests/testnet_dir"); + let config = testnet_dir.join("config.yaml"); - eth2_network_config.yaml_config = Some( - YamlConfig::from_file(config_yaml.as_path()).expect("should load yaml config"), - ); + eth2_network_config.config = + Config::from_file(config.as_path()).expect("should load yaml config"); let environment = builder() .eth2_network_config(eth2_network_config) @@ -36,8 +36,15 @@ mod setup_eth2_config { .expect("should build environment"); assert_eq!( - environment.eth2_config.spec.max_committees_per_slot, - 128 // see testnet_dir/config.yaml + environment + .eth2_config + .spec + .min_genesis_active_validator_count, + 100000 // see testnet_dir/config.yaml + ); + assert_eq!( + environment.eth2_config.spec.inactivity_score_bias, + 2 // see testnet_dir/config.yaml ); } } diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 493827f2987..7d0105cca8e 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -1,156 +1,71 @@ -# Mainnet preset -# Note: the intention of this file (for now) is to illustrate what a mainnet configuration could look like. -# Some of these constants may still change before the launch of Phase 0. +# Mainnet config -CONFIG_NAME: "mainnet" +# Extends the mainnet preset +PRESET_BASE: 'mainnet' -# Misc +# Genesis # --------------------------------------------------------------- -MAX_COMMITTEES_PER_SLOT: 128 # MODIFIED FOR TESTING -# 2**7 (= 128) -TARGET_COMMITTEE_SIZE: 128 -# 2**11 (= 2,048) -MAX_VALIDATORS_PER_COMMITTEE: 2048 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 -# See issue 563 -SHUFFLE_ROUND_COUNT: 90 -# `2**14` (= 16,384) -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 -# Jan 3, 2020 -MIN_GENESIS_TIME: 1578009600 -# 4 -HYSTERESIS_QUOTIENT: 4 -# 1 (minus 0.25) -HYSTERESIS_DOWNWARD_MULTIPLIER: 1 -# 5 (plus 1.25) -HYSTERESIS_UPWARD_MULTIPLIER: 5 -# 3 -PROPORTIONAL_SLASHING_MULTIPLIER: 3 - - -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - -# Validator -# --------------------------------------------------------------- -# 2**10 (= 1,024) -ETH1_FOLLOW_DISTANCE: 1024 -# 2**4 (= 16) -TARGET_AGGREGATORS_PER_COMMITTEE: 16 -# 2**0 (= 1) -RANDOM_SUBNETS_PER_VALIDATOR: 1 -# 2**8 (= 256) -EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: 256 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 +# CUSTOMISED FOR TEST +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 100000 +# Dec 1, 2020, 12pm UTC +MIN_GENESIS_TIME: 1606824000 +# Mainnet initial fork version, recommend altering for testnets +GENESIS_FORK_VERSION: 0x00000000 +# 604800 seconds (7 days) +GENESIS_DELAY: 604800 -# Deposit contract +# Forking # --------------------------------------------------------------- -# Ethereum PoW Mainnet -DEPOSIT_CHAIN_ID: 1 -DEPOSIT_NETWORK_ID: 1 -# **TBD** -DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 +# Altair +ALTAIR_FORK_VERSION: 0x01000000 +ALTAIR_FORK_EPOCH: 18446744073709551615 +# Merge +MERGE_FORK_VERSION: 0x02000000 +MERGE_FORK_EPOCH: 18446744073709551615 +# Sharding +SHARDING_FORK_VERSION: 0x03000000 +SHARDING_FORK_EPOCH: 18446744073709551615 -# Gwei values -# --------------------------------------------------------------- -# 2**0 * 10**9 (= 1,000,000,000) Gwei -MIN_DEPOSIT_AMOUNT: 1000000000 -# 2**5 * 10**9 (= 32,000,000,000) Gwei -MAX_EFFECTIVE_BALANCE: 32000000000 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**0 * 10**9 (= 1,000,000,000) Gwei -EFFECTIVE_BALANCE_INCREMENT: 1000000000 - - -# Initial values -# --------------------------------------------------------------- -# Mainnet initial fork version, recommend altering for testnets -GENESIS_FORK_VERSION: 0x00000000 -BLS_WITHDRAWAL_PREFIX: 0x00 +# TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. +TRANSITION_TOTAL_DIFFICULTY: 4294967296 # Time parameters # --------------------------------------------------------------- -# 172800 seconds (2 days) -GENESIS_DELAY: 172800 # 12 seconds SECONDS_PER_SLOT: 12 -# 2**0 (= 1) slots 12 seconds -MIN_ATTESTATION_INCLUSION_DELAY: 1 -# 2**5 (= 32) slots 6.4 minutes -SLOTS_PER_EPOCH: 32 -# 2**0 (= 1) epochs 6.4 minutes -MIN_SEED_LOOKAHEAD: 1 -# 2**2 (= 4) epochs 25.6 minutes -MAX_SEED_LOOKAHEAD: 4 -# 2**5 (= 32) epochs ~3.4 hours -EPOCHS_PER_ETH1_VOTING_PERIOD: 32 -# 2**13 (= 8,192) slots ~13 hours -SLOTS_PER_HISTORICAL_ROOT: 8192 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 # 2**8 (= 256) epochs ~27 hours MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**8 (= 256) epochs ~27 hours SHARD_COMMITTEE_PERIOD: 256 -# 2**2 (= 4) epochs 25.6 minutes -MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4 - - -# State vector lengths -# --------------------------------------------------------------- -# 2**16 (= 65,536) epochs ~0.8 years -EPOCHS_PER_HISTORICAL_VECTOR: 65536 -# 2**13 (= 8,192) epochs ~36 days -EPOCHS_PER_SLASHINGS_VECTOR: 8192 -# 2**24 (= 16,777,216) historical roots, ~26,131 years -HISTORICAL_ROOTS_LIMIT: 16777216 -# 2**40 (= 1,099,511,627,776) validator spots -VALIDATOR_REGISTRY_LIMIT: 1099511627776 - - -# Reward and penalty quotients -# --------------------------------------------------------------- -# 2**6 (= 64) -BASE_REWARD_FACTOR: 64 -# 2**9 (= 512) -WHISTLEBLOWER_REWARD_QUOTIENT: 512 -# 2**3 (= 8) -PROPOSER_REWARD_QUOTIENT: 8 -# 2**24 (= 16,777,216) -INACTIVITY_PENALTY_QUOTIENT: 16777216 -# 2**5 (= 32) -MIN_SLASHING_PENALTY_QUOTIENT: 32 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 -# Max operations per block +# Validator cycle # --------------------------------------------------------------- +# CUSTOMISED FOR TEST +INACTIVITY_SCORE_BIAS: 2 # 2**4 (= 16) -MAX_PROPOSER_SLASHINGS: 16 -# 2**1 (= 2) -MAX_ATTESTER_SLASHINGS: 2 -# 2**7 (= 128) -MAX_ATTESTATIONS: 128 -# 2**4 (= 16) -MAX_DEPOSITS: 16 -# 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 -# Signature domains +# Deposit contract # --------------------------------------------------------------- -DOMAIN_BEACON_PROPOSER: 0x00000000 -DOMAIN_BEACON_ATTESTER: 0x01000000 -DOMAIN_RANDAO: 0x02000000 -DOMAIN_DEPOSIT: 0x03000000 -DOMAIN_VOLUNTARY_EXIT: 0x04000000 -DOMAIN_SELECTION_PROOF: 0x05000000 -DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 +# Ethereum PoW Mainnet +DEPOSIT_CHAIN_ID: 1 +DEPOSIT_NETWORK_ID: 1 +DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 2282455969d..5dccb7ab22b 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -18,8 +18,6 @@ use task_executor::ShutdownReason; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; -pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; - fn bls_library_name() -> &'static str { if cfg!(feature = "portable") { "blst-portable" @@ -125,7 +123,7 @@ fn main() { .long("network") .value_name("network") .help("Name of the Eth2 chain Lighthouse will sync and follow.") - .possible_values(&["medalla", "altona", "spadina", "pyrmont", "mainnet", "toledo", "prater"]) + .possible_values(&["pyrmont", "mainnet", "prater"]) .conflicts_with("testnet-dir") .takes_value(true) .global(true) @@ -207,11 +205,7 @@ fn main() { EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches, testnet_config), #[cfg(feature = "spec-minimal")] EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches, testnet_config), - #[cfg(feature = "spec-v12")] - EthSpecId::V012Legacy => { - run(EnvironmentBuilder::v012_legacy(), &matches, testnet_config) - } - #[cfg(any(not(feature = "spec-minimal"), not(feature = "spec-v12")))] + #[cfg(any(not(feature = "spec-minimal")))] other => { eprintln!( "Eth spec `{}` is not supported by this build of Lighthouse", diff --git a/remote_signer/tests/sign_attestation.rs b/remote_signer/tests/sign_attestation.rs index 11968b4bb91..bebefa44276 100644 --- a/remote_signer/tests/sign_attestation.rs +++ b/remote_signer/tests/sign_attestation.rs @@ -30,7 +30,7 @@ mod sign_attestation { testcase( "\"beacon_proposer\"", - "Unable to parse block from JSON: Error(\"missing field `proposer_index`\", line: 0, column: 0)", + "Unable to parse block from JSON: Error(\"data did not match any variant of untagged enum BeaconBlock\", line: 0, column: 0)" ); testcase( "\"randao\"", diff --git a/remote_signer/tests/sign_block.rs b/remote_signer/tests/sign_block.rs index 509557cad92..3ae1327211f 100644 --- a/remote_signer/tests/sign_block.rs +++ b/remote_signer/tests/sign_block.rs @@ -57,23 +57,23 @@ mod sign_block { testcase( "\"data\":{\"slot\":\"\",\"proposer_index\":\"0\"", - "Unable to parse block from JSON: Error(\"cannot parse integer from empty string\", line: 0, column: 0)" + "Unable to parse block from JSON: Error(\"data did not match any variant of untagged enum BeaconBlock\", line: 0, column: 0)" ); testcase( "\"data\":{\"slot\":\"-1\",\"proposer_index\":\"0\"", - "Unable to parse block from JSON: Error(\"invalid digit found in string\", line: 0, column: 0)" + "Unable to parse block from JSON: Error(\"data did not match any variant of untagged enum BeaconBlock\", line: 0, column: 0)" ); testcase( "\"data\":{\"proposer_index\":\"0\"", - "Unable to parse block from JSON: Error(\"missing field `slot`\", line: 0, column: 0)", + "Unable to parse block from JSON: Error(\"data did not match any variant of untagged enum BeaconBlock\", line: 0, column: 0)" ); testcase( "\"data\":{\"slot\":\"49463\"", - "Unable to parse block from JSON: Error(\"missing field `proposer_index`\", line: 0, column: 0)" + "Unable to parse block from JSON: Error(\"data did not match any variant of untagged enum BeaconBlock\", line: 0, column: 0)" ); testcase( "\"data\":{\"slot\":\"49463\",\"proposer_index\":\"\"", - "Unable to parse block from JSON: Error(\"cannot parse integer from empty string\", line: 0, column: 0)", + "Unable to parse block from JSON: Error(\"data did not match any variant of untagged enum BeaconBlock\", line: 0, column: 0)" ); test_signer.shutdown(); diff --git a/remote_signer/tests/sign_randao.rs b/remote_signer/tests/sign_randao.rs index 738c382420d..030c5b9ca49 100644 --- a/remote_signer/tests/sign_randao.rs +++ b/remote_signer/tests/sign_randao.rs @@ -30,7 +30,7 @@ mod sign_randao { testcase( "\"beacon_proposer\"", - "Unable to parse block from JSON: Error(\"invalid type: string \"49463\", expected struct BeaconBlock\", line: 0, column: 0)" + "Unable to parse block from JSON: Error(\"data did not match any variant of untagged enum BeaconBlock\", line: 0, column: 0)", ); testcase( "\"beacon_attester\"", diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index ad0e0537527..4e86ec88064 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -5,7 +5,7 @@ # Produces a testnet specification and a genesis state where the genesis time # is now + $GENESIS_DELAY. # -# Generates datadirs for multiple validator keys according to the +# Generates datadirs for multiple validator keys according to the # $VALIDATOR_COUNT and $NODE_COUNT variables. # @@ -30,6 +30,7 @@ lcli \ --min-genesis-time $GENESIS_TIME \ --genesis-delay $GENESIS_DELAY \ --genesis-fork-version $GENESIS_FORK_VERSION \ + --altair-fork-epoch $ALTAIR_FORK_EPOCH \ --eth1-id $NETWORK_ID \ --eth1-follow-distance 1 \ --seconds-per-eth1-block 1 \ diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 356ddc9d98a..3152dd49f47 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -24,4 +24,7 @@ GENESIS_DELAY=180 BOOTNODE_PORT=4242 # Network ID and Chain ID of local eth1 test network -NETWORK_ID=4242 +NETWORK_ID=4242 + +# Hard fork configuration +ALTAIR_FORK_EPOCH=18446744073709551615 diff --git a/testing/ef_tests/.gitignore b/testing/ef_tests/.gitignore index a83c5aa9612..c088fca6b20 100644 --- a/testing/ef_tests/.gitignore +++ b/testing/ef_tests/.gitignore @@ -1 +1,2 @@ /eth2.0-spec-tests +.accessed_file_log.txt diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 0c701b9cd67..76fc735524a 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -13,6 +13,8 @@ fake_crypto = ["bls/fake_crypto"] [dependencies] bls = { path = "../../crypto/bls", default-features = false } compare_fields = { path = "../../common/compare_fields" } +compare_fields_derive = { path = "../../common/compare_fields_derive" } +derivative = "2.1.1" ethereum-types = "0.9.2" hex = "0.4.2" rayon = "1.4.1" @@ -28,3 +30,6 @@ cached_tree_hash = { path = "../../consensus/cached_tree_hash" } state_processing = { path = "../../consensus/state_processing" } swap_or_not_shuffle = { path = "../../consensus/swap_or_not_shuffle" } types = { path = "../../consensus/types" } +snap = "1.0.1" +parking_lot = "0.11.0" +fs2 = "0.4.3" diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index d0123def270..b24d4b8686b 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.0.1 +TESTS_TAG := v1.1.0-alpha.7 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py new file mode 100755 index 00000000000..5c3275135bc --- /dev/null +++ b/testing/ef_tests/check_all_files_accessed.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +# The purpose of this script is to compare a list of file names that were accessed during testing +# against all the file names in the eth2.0-spec-tests repository. It then checks to see which files +# were not accessed and returns an error if any non-intentionally-ignored files are detected. +# +# The ultimate goal is to detect any accidentally-missed spec tests. + +import os +import sys + +# First argument should the path to a file which contains a list of accessed file names. +accessed_files_filename = sys.argv[1] + +# Second argument should be the path to the eth2.0-spec-tests directory. +tests_dir_filename = sys.argv[2] + +# If any of the file names found in the eth2.0-spec-tests directory *starts with* one of the +# following strings, we will assume they are to be ignored (i.e., we are purposefully *not* running +# the spec tests). +excluded_paths = [ + # Configs from future phases + "tests/mainnet/config/custody_game.yaml", + "tests/mainnet/config/sharding.yaml", + "tests/mainnet/config/merge.yaml", + "tests/minimal/config/custody_game.yaml", + "tests/minimal/config/sharding.yaml", + "tests/minimal/config/merge.yaml", + # Merge tests + "tests/minimal/merge", + "tests/mainnet/merge", + # Eth1Block + # + # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 + "tests/minimal/phase0/ssz_static/Eth1Block/", + "tests/mainnet/phase0/ssz_static/Eth1Block/", + "tests/minimal/altair/ssz_static/Eth1Block/", + "tests/mainnet/altair/ssz_static/Eth1Block/", + # LightClientStore + "tests/minimal/altair/ssz_static/LightClientStore", + "tests/mainnet/altair/ssz_static/LightClientStore", + # LightClientUpdate + "tests/minimal/altair/ssz_static/LightClientUpdate", + "tests/mainnet/altair/ssz_static/LightClientUpdate", + # LightClientSnapshot + "tests/minimal/altair/ssz_static/LightClientSnapshot", + "tests/mainnet/altair/ssz_static/LightClientSnapshot", + # ContributionAndProof + "tests/minimal/altair/ssz_static/ContributionAndProof", + "tests/mainnet/altair/ssz_static/ContributionAndProof", + # SignedContributionAndProof + "tests/minimal/altair/ssz_static/SignedContributionAndProof", + "tests/mainnet/altair/ssz_static/SignedContributionAndProof", + # SyncCommitteeContribution + "tests/minimal/altair/ssz_static/SyncCommitteeContribution", + "tests/mainnet/altair/ssz_static/SyncCommitteeContribution", + # SyncCommitteeMessage + "tests/minimal/altair/ssz_static/SyncCommitteeMessage", + "tests/mainnet/altair/ssz_static/SyncCommitteeMessage", + # SyncCommitteeSigningData + "tests/minimal/altair/ssz_static/SyncCommitteeSigningData", + "tests/mainnet/altair/ssz_static/SyncCommitteeSigningData", + # SyncAggregatorSelectionData + "tests/minimal/altair/ssz_static/SyncAggregatorSelectionData", + "tests/mainnet/altair/ssz_static/SyncAggregatorSelectionData", + # Fork choice + "tests/mainnet/phase0/fork_choice", + "tests/minimal/phase0/fork_choice", + "tests/mainnet/altair/fork_choice", + "tests/minimal/altair/fork_choice", +] + +def normalize_path(path): + return path.split("eth2.0-spec-tests/", )[1] + +# Determine the list of filenames which were accessed during tests. +passed = set() +for line in open(accessed_files_filename, 'r').readlines(): + file = normalize_path(line.strip().strip('"')) + passed.add(file) + +missed = set() +accessed_files = 0 +excluded_files = 0 + +# Iterate all files in the tests directory, ensure that all files were either accessed +# or intentionally missed. +for root, dirs, files in os.walk(tests_dir_filename): + for name in files: + name = normalize_path(os.path.join(root, name)) + if name not in passed: + excluded = False + for excluded_path in excluded_paths: + if name.startswith(excluded_path): + excluded = True + break + if excluded: + excluded_files += 1 + else: + print(name) + missed.add(name) + else: + accessed_files += 1 + +# Exit with an error if there were any files missed. +assert len(missed) == 0, "{} missed files".format(len(missed)) + +print("Accessed {} files ({} intentionally excluded)".format(accessed_files, excluded_files)) diff --git a/testing/ef_tests/src/case_result.rs b/testing/ef_tests/src/case_result.rs index 9df60f402ce..f20d14836bd 100644 --- a/testing/ef_tests/src/case_result.rs +++ b/testing/ef_tests/src/case_result.rs @@ -37,8 +37,8 @@ pub fn compare_beacon_state_results_without_caches( expected: &mut Option>, ) -> Result<(), Error> { if let (Ok(ref mut result), Some(ref mut expected)) = (result.as_mut(), expected.as_mut()) { - result.drop_all_caches(); - expected.drop_all_caches(); + result.drop_all_caches().unwrap(); + expected.drop_all_caches().unwrap(); } compare_result_detailed(&result, &expected) @@ -94,7 +94,7 @@ where (Err(_), None) => Ok(()), // Fail: The test failed when it should have produced a result (fail). (Err(e), Some(expected)) => Err(Error::NotEqual(format!( - "Got {:?} | Expected {:?}", + "Got {:?} | Expected {}", e, fmt_val(expected) ))), @@ -106,7 +106,7 @@ where Ok(()) } else { Err(Error::NotEqual(format!( - "Got {:?} | Expected {:?}", + "Got {} | Expected {}", fmt_val(result), fmt_val(expected) ))) diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 818af1a6eca..2e52d8c2b57 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -2,6 +2,7 @@ use super::*; use rayon::prelude::*; use std::fmt::Debug; use std::path::{Path, PathBuf}; +use types::ForkName; mod bls_aggregate_sigs; mod bls_aggregate_verify; @@ -10,14 +11,17 @@ mod bls_sign_msg; mod bls_verify_msg; mod common; mod epoch_processing; +mod fork; mod genesis_initialization; mod genesis_validity; mod operations; +mod rewards; mod sanity_blocks; mod sanity_slots; mod shuffling; mod ssz_generic; mod ssz_static; +mod transition; pub use bls_aggregate_sigs::*; pub use bls_aggregate_verify::*; @@ -26,18 +30,21 @@ pub use bls_sign_msg::*; pub use bls_verify_msg::*; pub use common::SszStaticType; pub use epoch_processing::*; +pub use fork::ForkTest; pub use genesis_initialization::*; pub use genesis_validity::*; pub use operations::*; +pub use rewards::RewardsTest; pub use sanity_blocks::*; pub use sanity_slots::*; pub use shuffling::*; pub use ssz_generic::*; pub use ssz_static::*; +pub use transition::TransitionTest; pub trait LoadCase: Sized { /// Load the test case from a test case directory. - fn load_from_dir(_path: &Path) -> Result; + fn load_from_dir(_path: &Path, _fork_name: ForkName) -> Result; } pub trait Case: Debug + Sync { @@ -48,11 +55,18 @@ pub trait Case: Debug + Sync { "no description".to_string() } + /// Whether or not this test exists for the given `fork_name`. + /// + /// Returns `true` by default. + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + true + } + /// Execute a test and return the result. /// /// `case_index` reports the index of the case in the set of test cases. It is not strictly /// necessary, but it's useful when troubleshooting specific failing tests. - fn result(&self, case_index: usize) -> Result<(), Error>; + fn result(&self, case_index: usize, fork_name: ForkName) -> Result<(), Error>; } #[derive(Debug)] @@ -61,11 +75,11 @@ pub struct Cases { } impl Cases { - pub fn test_results(&self) -> Vec { + pub fn test_results(&self, fork_name: ForkName) -> Vec { self.test_cases .into_par_iter() .enumerate() - .map(|(i, (ref path, ref tc))| CaseResult::new(i, path, tc, tc.result(i))) + .map(|(i, (ref path, ref tc))| CaseResult::new(i, path, tc, tc.result(i, fork_name))) .collect() } } diff --git a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs index 776e4107183..dfe6fe528a3 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -13,7 +13,7 @@ pub struct BlsAggregateSigs { impl BlsCase for BlsAggregateSigs {} impl Case for BlsAggregateSigs { - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut aggregate_signature = AggregateSignature::infinity(); for key_str in &self.input { diff --git a/testing/ef_tests/src/cases/bls_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_aggregate_verify.rs index 4f6df9981bb..3650e8a0d7f 100644 --- a/testing/ef_tests/src/cases/bls_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_aggregate_verify.rs @@ -21,7 +21,7 @@ pub struct BlsAggregateVerify { impl BlsCase for BlsAggregateVerify {} impl Case for BlsAggregateVerify { - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let messages = self .input .messages diff --git a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs index 7a0d870e04c..71743ad99b6 100644 --- a/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs +++ b/testing/ef_tests/src/cases/bls_fast_aggregate_verify.rs @@ -23,7 +23,7 @@ pub struct BlsFastAggregateVerify { impl BlsCase for BlsFastAggregateVerify {} impl Case for BlsFastAggregateVerify { - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let message = Hash256::from_slice( &hex::decode(&self.input.message[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?, diff --git a/testing/ef_tests/src/cases/bls_sign_msg.rs b/testing/ef_tests/src/cases/bls_sign_msg.rs index 6687dda2280..77d30281d11 100644 --- a/testing/ef_tests/src/cases/bls_sign_msg.rs +++ b/testing/ef_tests/src/cases/bls_sign_msg.rs @@ -20,7 +20,7 @@ pub struct BlsSign { impl BlsCase for BlsSign {} impl Case for BlsSign { - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { // Convert private_key and message to required types let sk = hex::decode(&self.input.privkey[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index 0684d76989a..83fd949684c 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -22,7 +22,7 @@ pub struct BlsVerify { impl BlsCase for BlsVerify {} impl Case for BlsVerify { - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let message = hex::decode(&self.input.message[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index e648ef6ec51..175ad113b61 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -2,18 +2,19 @@ use crate::cases::LoadCase; use crate::decode::yaml_decode_file; use crate::error::Error; use serde_derive::Deserialize; -use ssz::{Decode, Encode}; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::convert::TryFrom; use std::fmt::Debug; use std::path::Path; use tree_hash::TreeHash; +use types::ForkName; /// Trait for all BLS cases to eliminate some boilerplate. pub trait BlsCase: serde::de::DeserializeOwned {} impl LoadCase for T { - fn load_from_dir(path: &Path) -> Result { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { yaml_decode_file(&path.join("data.yaml")) } } @@ -60,13 +61,21 @@ macro_rules! uint_wrapper { uint_wrapper!(TestU128, ethereum_types::U128); uint_wrapper!(TestU256, ethereum_types::U256); -/// Trait alias for all deez bounds +/// Trait for types that can be used in SSZ static tests. pub trait SszStaticType: - serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync + serde::de::DeserializeOwned + Encode + TreeHash + Clone + PartialEq + Debug + Sync { } impl SszStaticType for T where - T: serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync + T: serde::de::DeserializeOwned + Encode + TreeHash + Clone + PartialEq + Debug + Sync { } + +/// Return the fork immediately prior to a fork. +pub fn previous_fork(fork_name: ForkName) -> ForkName { + match fork_name { + ForkName::Base => ForkName::Base, + ForkName::Altair => ForkName::Base, + } +} diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index fe19d7bd018..8ca3775f06d 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -1,18 +1,22 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; +use state_processing::per_epoch_processing::validator_statuses::ValidatorStatuses; use state_processing::per_epoch_processing::{ - errors::EpochProcessingError, process_final_updates, process_justification_and_finalization, - process_registry_updates, process_rewards_and_penalties, process_slashings, - validator_statuses::ValidatorStatuses, + altair, base, + effective_balance_updates::process_effective_balance_updates, + historical_roots_update::process_historical_roots_update, + process_registry_updates, process_slashings, + resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, }; +use state_processing::EpochProcessingError; use std::marker::PhantomData; use std::path::{Path, PathBuf}; -use types::{BeaconState, ChainSpec, EthSpec}; +use types::{BeaconState, ChainSpec, EthSpec, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] pub struct Metadata { @@ -44,7 +48,23 @@ pub struct RegistryUpdates; #[derive(Debug)] pub struct Slashings; #[derive(Debug)] -pub struct FinalUpdates; +pub struct Eth1DataReset; +#[derive(Debug)] +pub struct EffectiveBalanceUpdates; +#[derive(Debug)] +pub struct SlashingsReset; +#[derive(Debug)] +pub struct RandaoMixesReset; +#[derive(Debug)] +pub struct HistoricalRootsUpdate; +#[derive(Debug)] +pub struct ParticipationRecordUpdates; +#[derive(Debug)] +pub struct SyncCommitteeUpdates; +#[derive(Debug)] +pub struct InactivityUpdates; +#[derive(Debug)] +pub struct ParticipationFlagUpdates; type_name!( JustificationAndFinalization, @@ -53,21 +73,43 @@ type_name!( type_name!(RewardsAndPenalties, "rewards_and_penalties"); type_name!(RegistryUpdates, "registry_updates"); type_name!(Slashings, "slashings"); -type_name!(FinalUpdates, "final_updates"); +type_name!(Eth1DataReset, "eth1_data_reset"); +type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); +type_name!(SlashingsReset, "slashings_reset"); +type_name!(RandaoMixesReset, "randao_mixes_reset"); +type_name!(HistoricalRootsUpdate, "historical_roots_update"); +type_name!(ParticipationRecordUpdates, "participation_record_updates"); +type_name!(SyncCommitteeUpdates, "sync_committee_updates"); +type_name!(InactivityUpdates, "inactivity_updates"); +type_name!(ParticipationFlagUpdates, "participation_flag_updates"); impl EpochTransition for JustificationAndFinalization { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - let mut validator_statuses = ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(state, spec)?; - process_justification_and_finalization(state, &validator_statuses.total_balances) + match state { + BeaconState::Base(_) => { + let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; + base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + ) + } + BeaconState::Altair(_) => altair::process_justification_and_finalization(state, spec), + } } } impl EpochTransition for RewardsAndPenalties { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - let mut validator_statuses = ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(state, spec)?; - process_rewards_and_penalties(state, &mut validator_statuses, spec) + match state { + BeaconState::Base(_) => { + let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; + base::process_rewards_and_penalties(state, &mut validator_statuses, spec) + } + BeaconState::Altair(_) => altair::process_rewards_and_penalties(state, spec), + } } } @@ -79,35 +121,110 @@ impl EpochTransition for RegistryUpdates { impl EpochTransition for Slashings { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - process_slashings( - state, - validator_statuses.total_balances.current_epoch(), - spec, - )?; + match state { + BeaconState::Base(_) => { + let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; + validator_statuses.process_attestations(&state)?; + process_slashings( + state, + validator_statuses.total_balances.current_epoch(), + spec.proportional_slashing_multiplier, + spec, + )?; + } + BeaconState::Altair(_) => { + process_slashings( + state, + state.get_total_active_balance(spec)?, + spec.proportional_slashing_multiplier_altair, + spec, + )?; + } + }; Ok(()) } } -impl EpochTransition for FinalUpdates { +impl EpochTransition for Eth1DataReset { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_eth1_data_reset(state) + } +} + +impl EpochTransition for EffectiveBalanceUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_final_updates(state, spec) + process_effective_balance_updates(state, spec) + } +} + +impl EpochTransition for SlashingsReset { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_slashings_reset(state) + } +} + +impl EpochTransition for RandaoMixesReset { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_randao_mixes_reset(state) + } +} + +impl EpochTransition for HistoricalRootsUpdate { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_historical_roots_update(state) + } +} + +impl EpochTransition for ParticipationRecordUpdates { + fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { + if let BeaconState::Base(_) = state { + base::process_participation_record_updates(state) + } else { + Ok(()) + } + } +} + +impl EpochTransition for SyncCommitteeUpdates { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + match state { + BeaconState::Base(_) => Ok(()), + BeaconState::Altair(_) => altair::process_sync_committee_updates(state, spec), + } + } +} + +impl EpochTransition for InactivityUpdates { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + match state { + BeaconState::Base(_) => Ok(()), + BeaconState::Altair(_) => altair::process_inactivity_updates(state, spec), + } + } +} + +impl EpochTransition for ParticipationFlagUpdates { + fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { + match state { + BeaconState::Base(_) => Ok(()), + BeaconState::Altair(_) => altair::process_participation_flag_updates(state), + } } } impl> LoadCase for EpochProcessing { - fn load_from_dir(path: &Path) -> Result { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); let metadata_path = path.join("meta.yaml"); let metadata: Metadata = if metadata_path.is_file() { yaml_decode_file(&metadata_path)? } else { Metadata::default() }; - let pre = ssz_decode_file(&path.join("pre.ssz"))?; - let post_file = path.join("post.ssz"); + let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), spec)?; + let post_file = path.join("post.ssz_snappy"); let post = if post_file.is_file() { - Some(ssz_decode_file(&post_file)?) + Some(ssz_decode_state(&post_file, spec)?) } else { None }; @@ -130,11 +247,25 @@ impl> Case for EpochProcessing { .unwrap_or_else(String::new) } - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + match fork_name { + // No Altair tests for genesis fork. + ForkName::Base => { + T::name() != "sync_committee_updates" + && T::name() != "inactivity_updates" + && T::name() != "participation_flag_updates" + } + ForkName::Altair => true, + } + } + + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + self.metadata.bls_setting.unwrap_or_default().check()?; + let mut state = self.pre.clone(); let mut expected = self.post.clone(); - let spec = &E::default_spec(); + let spec = &testing_spec::(fork_name); let mut result = (|| { // Processing requires the committee caches. diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs new file mode 100644 index 00000000000..f3591bee729 --- /dev/null +++ b/testing/ef_tests/src/cases/fork.rs @@ -0,0 +1,67 @@ +use super::*; +use crate::case_result::compare_beacon_state_results_without_caches; +use crate::cases::common::previous_fork; +use crate::decode::{ssz_decode_state, yaml_decode_file}; +use serde_derive::Deserialize; +use state_processing::upgrade::upgrade_to_altair; +use types::{BeaconState, ForkName}; + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + pub fork: String, +} + +impl Metadata { + fn fork_name(&self) -> ForkName { + self.fork.parse().unwrap() + } +} + +#[derive(Debug)] +pub struct ForkTest { + pub metadata: Metadata, + pub pre: BeaconState, + pub post: BeaconState, +} + +impl LoadCase for ForkTest { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let metadata: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; + assert_eq!(metadata.fork_name(), fork_name); + + // Decode pre-state with previous fork. + let pre_spec = &previous_fork(fork_name).make_genesis_spec(E::default_spec()); + let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), pre_spec)?; + + // Decode post-state with target fork. + let post_spec = &fork_name.make_genesis_spec(E::default_spec()); + let post = ssz_decode_state(&path.join("post.ssz_snappy"), post_spec)?; + + Ok(Self { + metadata, + pre, + post, + }) + } +} + +impl Case for ForkTest { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + // Upgrades exist targeting all forks except phase0/base. + // Fork tests also need BLS. + cfg!(not(feature = "fake_crypto")) && fork_name != ForkName::Base + } + + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let mut result_state = self.pre.clone(); + let mut expected = Some(self.post.clone()); + let spec = &E::default_spec(); + + let mut result = match fork_name { + ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), + _ => panic!("unknown fork: {:?}", fork_name), + }; + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index 0fb64ccb379..2a9323c96a2 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -1,16 +1,22 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; -use types::{BeaconState, Deposit, EthSpec, Hash256}; +use types::{BeaconState, Deposit, EthSpec, ForkName, Hash256}; #[derive(Debug, Clone, Deserialize)] struct Metadata { deposits_count: usize, } +#[derive(Debug, Clone, Deserialize)] +struct Eth1 { + eth1_block_hash: Hash256, + eth1_timestamp: u64, +} + #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct GenesisInitialization { @@ -22,17 +28,20 @@ pub struct GenesisInitialization { } impl LoadCase for GenesisInitialization { - fn load_from_dir(path: &Path) -> Result { - let eth1_block_hash = ssz_decode_file(&path.join("eth1_block_hash.ssz"))?; - let eth1_timestamp = yaml_decode_file(&path.join("eth1_timestamp.yaml"))?; + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let Eth1 { + eth1_block_hash, + eth1_timestamp, + } = yaml_decode_file(&path.join("eth1.yaml"))?; let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; let deposits: Vec = (0..meta.deposits_count) .map(|i| { - let filename = format!("deposits_{}.ssz", i); + let filename = format!("deposits_{}.ssz_snappy", i); ssz_decode_file(&path.join(filename)) }) .collect::>()?; - let state = ssz_decode_file(&path.join("state.ssz"))?; + let spec = &testing_spec::(fork_name); + let state = ssz_decode_state(&path.join("state.ssz_snappy"), spec)?; Ok(Self { path: path.into(), @@ -45,8 +54,13 @@ impl LoadCase for GenesisInitialization { } impl Case for GenesisInitialization { - fn result(&self, _case_index: usize) -> Result<(), Error> { - let spec = &E::default_spec(); + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + // Altair genesis and later requires real crypto. + fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto")) + } + + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = &testing_spec::(fork_name); let mut result = initialize_beacon_state_from_eth1( self.eth1_block_hash, diff --git a/testing/ef_tests/src/cases/genesis_validity.rs b/testing/ef_tests/src/cases/genesis_validity.rs index f72ac4c3e66..e645d69adc0 100644 --- a/testing/ef_tests/src/cases/genesis_validity.rs +++ b/testing/ef_tests/src/cases/genesis_validity.rs @@ -1,29 +1,46 @@ use super::*; -use crate::decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::is_valid_genesis_state; use std::path::Path; -use types::{BeaconState, EthSpec}; +use types::{BeaconState, EthSpec, ForkName}; + +#[derive(Debug, Clone, Deserialize)] +pub struct Metadata { + description: String, +} #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct GenesisValidity { + pub metadata: Option, pub genesis: BeaconState, pub is_valid: bool, } impl LoadCase for GenesisValidity { - fn load_from_dir(path: &Path) -> Result { - let genesis = ssz_decode_file(&path.join("genesis.ssz"))?; + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); + let genesis = ssz_decode_state(&path.join("genesis.ssz_snappy"), spec)?; let is_valid = yaml_decode_file(&path.join("is_valid.yaml"))?; + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; - Ok(Self { genesis, is_valid }) + Ok(Self { + metadata, + genesis, + is_valid, + }) } } impl Case for GenesisValidity { - fn result(&self, _case_index: usize) -> Result<(), Error> { - let spec = &E::default_spec(); + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = &testing_spec::(fork_name); let is_valid = is_valid_genesis_state(&self.genesis, spec); diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 9f39a46c684..0f63d4eb0b8 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -1,20 +1,24 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; +use crate::testing_spec; use crate::type_name::TypeName; use serde_derive::Deserialize; -use ssz::Decode; use state_processing::per_block_processing::{ - errors::BlockProcessingError, process_attestations, process_attester_slashings, - process_block_header, process_deposits, process_exits, process_proposer_slashings, - VerifySignatures, + errors::BlockProcessingError, + process_block_header, + process_operations::{ + altair, base, process_attester_slashings, process_deposits, process_exits, + process_proposer_slashings, + }, + process_sync_aggregate, VerifySignatures, }; use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, - ProposerSlashing, SignedVoluntaryExit, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, ForkName, + ProposerSlashing, SignedVoluntaryExit, SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -31,15 +35,21 @@ pub struct Operations> { pub post: Option>, } -pub trait Operation: Decode + TypeName + Debug + Sync { +pub trait Operation: TypeName + Debug + Sync + Sized { fn handler_name() -> String { Self::name().to_lowercase() } fn filename() -> String { - format!("{}.ssz", Self::handler_name()) + format!("{}.ssz_snappy", Self::handler_name()) } + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + true + } + + fn decode(path: &Path, spec: &ChainSpec) -> Result; + fn apply_to( &self, state: &mut BeaconState, @@ -48,12 +58,23 @@ pub trait Operation: Decode + TypeName + Debug + Sync { } impl Operation for Attestation { + fn decode(path: &Path, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + fn apply_to( &self, state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - process_attestations(state, &[self.clone()], VerifySignatures::True, spec) + match state { + BeaconState::Base(_) => { + base::process_attestations(state, &[self.clone()], VerifySignatures::True, spec) + } + BeaconState::Altair(_) => { + altair::process_attestation(state, self, 0, VerifySignatures::True, spec) + } + } } } @@ -62,6 +83,10 @@ impl Operation for AttesterSlashing { "attester_slashing".into() } + fn decode(path: &Path, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + fn apply_to( &self, state: &mut BeaconState, @@ -72,6 +97,10 @@ impl Operation for AttesterSlashing { } impl Operation for Deposit { + fn decode(path: &Path, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + fn apply_to( &self, state: &mut BeaconState, @@ -86,6 +115,10 @@ impl Operation for ProposerSlashing { "proposer_slashing".into() } + fn decode(path: &Path, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + fn apply_to( &self, state: &mut BeaconState, @@ -100,6 +133,10 @@ impl Operation for SignedVoluntaryExit { "voluntary_exit".into() } + fn decode(path: &Path, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + fn apply_to( &self, state: &mut BeaconState, @@ -115,7 +152,11 @@ impl Operation for BeaconBlock { } fn filename() -> String { - "block.ssz".into() + "block.ssz_snappy".into() + } + + fn decode(path: &Path, spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) } fn apply_to( @@ -123,12 +164,41 @@ impl Operation for BeaconBlock { state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - Ok(process_block_header(state, self, spec)?) + process_block_header(state, self.to_ref(), spec)?; + Ok(()) + } +} + +impl Operation for SyncAggregate { + fn handler_name() -> String { + "sync_aggregate".into() + } + + fn filename() -> String { + "sync_aggregate.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base + } + + fn decode(path: &Path, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; + process_sync_aggregate(state, self, proposer_index, spec) } } impl> LoadCase for Operations { - fn load_from_dir(path: &Path) -> Result { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); let metadata_path = path.join("meta.yaml"); let metadata: Metadata = if metadata_path.is_file() { yaml_decode_file(&metadata_path)? @@ -136,12 +206,12 @@ impl> LoadCase for Operations { Metadata::default() }; - let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), spec)?; // Check BLS setting here before SSZ deserialization, as most types require signatures // to be valid. let (operation, bls_error) = if metadata.bls_setting.unwrap_or_default().check().is_ok() { - match ssz_decode_file(&path.join(O::filename())) { + match O::decode(&path.join(O::filename()), spec) { Ok(op) => (Some(op), None), Err(Error::InvalidBLSInput(error)) => (None, Some(error)), Err(e) => return Err(e), @@ -149,12 +219,12 @@ impl> LoadCase for Operations { } else { (None, None) }; - let post_filename = path.join("post.ssz"); + let post_filename = path.join("post.ssz_snappy"); let post = if post_filename.is_file() { if let Some(bls_error) = bls_error { panic!("input is unexpectedly invalid: {}", bls_error); } - Some(ssz_decode_file(&post_filename)?) + Some(ssz_decode_state(&post_filename, spec)?) } else { None }; @@ -176,8 +246,12 @@ impl> Case for Operations { .unwrap_or_else(String::new) } - fn result(&self, _case_index: usize) -> Result<(), Error> { - let spec = &E::default_spec(); + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + O::is_enabled_for_fork(fork_name) + } + + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = &testing_spec::(fork_name); let mut state = self.pre.clone(); let mut expected = self.post.clone(); diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs new file mode 100644 index 00000000000..df9c1766199 --- /dev/null +++ b/testing/ef_tests/src/cases/rewards.rs @@ -0,0 +1,206 @@ +use super::*; +use crate::case_result::compare_result_detailed; +use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; +use compare_fields_derive::CompareFields; +use serde_derive::Deserialize; +use ssz_derive::{Decode, Encode}; +use state_processing::per_epoch_processing::validator_statuses::ValidatorStatuses; +use state_processing::{ + per_epoch_processing::{ + altair::{self, rewards_and_penalties::get_flag_index_deltas}, + base::{self, rewards_and_penalties::AttestationDelta}, + Delta, + }, + EpochProcessingError, +}; +use std::path::{Path, PathBuf}; +use types::{ + consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, + BeaconState, EthSpec, ForkName, +}; + +#[derive(Debug, Clone, PartialEq, Decode, Encode, CompareFields)] +pub struct Deltas { + #[compare_fields(as_slice)] + rewards: Vec, + #[compare_fields(as_slice)] + penalties: Vec, +} + +#[derive(Debug, Clone, PartialEq, Decode, Encode, CompareFields)] +pub struct AllDeltas { + source_deltas: Deltas, + target_deltas: Deltas, + head_deltas: Deltas, + inclusion_delay_deltas: Option, + inactivity_penalty_deltas: Deltas, +} + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + pub description: Option, +} + +#[derive(Debug, Clone)] +pub struct RewardsTest { + pub path: PathBuf, + pub metadata: Metadata, + pub pre: BeaconState, + pub deltas: AllDeltas, +} + +/// Function that extracts a delta for a single component from an `AttestationDelta`. +type Accessor = fn(&AttestationDelta) -> Δ + +fn load_optional_deltas_file(path: &Path) -> Result, Error> { + let deltas = if path.is_file() { + Some(ssz_decode_file(&path)?) + } else { + None + }; + Ok(deltas) +} + +impl LoadCase for RewardsTest { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); + let metadata_path = path.join("meta.yaml"); + let metadata: Metadata = if metadata_path.is_file() { + yaml_decode_file(&metadata_path)? + } else { + Metadata::default() + }; + let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), spec)?; + let source_deltas = ssz_decode_file(&path.join("source_deltas.ssz_snappy"))?; + let target_deltas = ssz_decode_file(&path.join("target_deltas.ssz_snappy"))?; + let head_deltas = ssz_decode_file(&path.join("head_deltas.ssz_snappy"))?; + let inclusion_delay_deltas = + load_optional_deltas_file(&path.join("inclusion_delay_deltas.ssz_snappy"))?; + let inactivity_penalty_deltas = + ssz_decode_file(&path.join("inactivity_penalty_deltas.ssz_snappy"))?; + + let deltas = AllDeltas { + source_deltas, + target_deltas, + head_deltas, + inclusion_delay_deltas, + inactivity_penalty_deltas, + }; + + Ok(Self { + path: path.into(), + metadata, + pre, + deltas, + }) + } +} + +impl Case for RewardsTest { + fn description(&self) -> String { + self.metadata + .description + .clone() + .unwrap_or_else(String::new) + } + + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let mut state = self.pre.clone(); + let spec = &testing_spec::(fork_name); + + let deltas: Result = (|| { + // Processing requires the committee caches. + state.build_all_committee_caches(spec)?; + + if let BeaconState::Base(_) = state { + let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; + validator_statuses.process_attestations(&state)?; + + let deltas = base::rewards_and_penalties::get_attestation_deltas( + &state, + &validator_statuses, + spec, + )?; + + Ok(convert_all_base_deltas(&deltas)) + } else { + let total_active_balance = state.get_total_active_balance(spec)?; + + let source_deltas = compute_altair_flag_deltas( + &state, + TIMELY_SOURCE_FLAG_INDEX, + total_active_balance, + spec, + )?; + let target_deltas = compute_altair_flag_deltas( + &state, + TIMELY_TARGET_FLAG_INDEX, + total_active_balance, + spec, + )?; + let head_deltas = compute_altair_flag_deltas( + &state, + TIMELY_HEAD_FLAG_INDEX, + total_active_balance, + spec, + )?; + let inactivity_penalty_deltas = compute_altair_inactivity_deltas(&state, spec)?; + Ok(AllDeltas { + source_deltas, + target_deltas, + head_deltas, + inclusion_delay_deltas: None, + inactivity_penalty_deltas, + }) + } + })(); + + compare_result_detailed(&deltas, &Some(self.deltas.clone()))?; + + Ok(()) + } +} + +fn convert_all_base_deltas(ad: &[AttestationDelta]) -> AllDeltas { + AllDeltas { + source_deltas: convert_base_deltas(ad, |d| &d.source_delta), + target_deltas: convert_base_deltas(ad, |d| &d.target_delta), + head_deltas: convert_base_deltas(ad, |d| &d.head_delta), + inclusion_delay_deltas: Some(convert_base_deltas(ad, |d| &d.inclusion_delay_delta)), + inactivity_penalty_deltas: convert_base_deltas(ad, |d| &d.inactivity_penalty_delta), + } +} + +fn convert_base_deltas(attestation_deltas: &[AttestationDelta], accessor: Accessor) -> Deltas { + let (rewards, penalties) = attestation_deltas + .iter() + .map(accessor) + .map(|delta| (delta.rewards, delta.penalties)) + .unzip(); + Deltas { rewards, penalties } +} + +fn compute_altair_flag_deltas( + state: &BeaconState, + flag_index: usize, + total_active_balance: u64, + spec: &ChainSpec, +) -> Result { + let mut deltas = vec![Delta::default(); state.validators().len()]; + get_flag_index_deltas(&mut deltas, state, flag_index, total_active_balance, spec)?; + Ok(convert_altair_deltas(deltas)) +} + +fn compute_altair_inactivity_deltas( + state: &BeaconState, + spec: &ChainSpec, +) -> Result { + let mut deltas = vec![Delta::default(); state.validators().len()]; + altair::rewards_and_penalties::get_inactivity_penalty_deltas(&mut deltas, state, spec)?; + Ok(convert_altair_deltas(deltas)) +} + +fn convert_altair_deltas(deltas: Vec) -> Deltas { + let (rewards, penalties) = deltas.into_iter().map(|d| (d.rewards, d.penalties)).unzip(); + Deltas { rewards, penalties } +} diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 0cc7a02d631..cb5708b12e1 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -1,12 +1,12 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, }; -use types::{BeaconState, EthSpec, RelativeEpoch, SignedBeaconBlock}; +use types::{BeaconState, EthSpec, ForkName, RelativeEpoch, SignedBeaconBlock}; #[derive(Debug, Clone, Deserialize)] pub struct Metadata { @@ -25,18 +25,21 @@ pub struct SanityBlocks { } impl LoadCase for SanityBlocks { - fn load_from_dir(path: &Path) -> Result { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); let metadata: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; - let pre = ssz_decode_file(&path.join("pre.ssz"))?; - let blocks: Vec> = (0..metadata.blocks_count) + let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), spec)?; + let blocks = (0..metadata.blocks_count) .map(|i| { - let filename = format!("blocks_{}.ssz", i); - ssz_decode_file(&path.join(filename)) + let filename = format!("blocks_{}.ssz_snappy", i); + ssz_decode_file_with(&path.join(filename), |bytes| { + SignedBeaconBlock::from_ssz_bytes(bytes, spec) + }) }) - .collect::>()?; - let post_file = path.join("post.ssz"); + .collect::, _>>()?; + let post_file = path.join("post.ssz_snappy"); let post = if post_file.is_file() { - Some(ssz_decode_file(&post_file)?) + Some(ssz_decode_state(&post_file, spec)?) } else { None }; @@ -58,12 +61,12 @@ impl Case for SanityBlocks { .unwrap_or_else(String::new) } - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { self.metadata.bls_setting.unwrap_or_default().check()?; let mut bulk_state = self.pre.clone(); let mut expected = self.post.clone(); - let spec = &E::default_spec(); + let spec = &testing_spec::(fork_name); // Processing requires the epoch cache. bulk_state.build_all_caches(spec).unwrap(); @@ -76,8 +79,8 @@ impl Case for SanityBlocks { .blocks .iter() .try_for_each(|signed_block| { - let block = &signed_block.message; - while bulk_state.slot < block.slot { + let block = signed_block.message(); + while bulk_state.slot() < block.slot() { per_slot_processing(&mut bulk_state, None, spec).unwrap(); per_slot_processing(&mut indiv_state, None, spec).unwrap(); } @@ -106,8 +109,8 @@ impl Case for SanityBlocks { spec, )?; - if block.state_root == bulk_state.canonical_root() - && block.state_root == indiv_state.canonical_root() + if block.state_root() == bulk_state.canonical_root() + && block.state_root() == indiv_state.canonical_root() { Ok(()) } else { diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index ead9fb5a82d..93a05b3641b 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -1,10 +1,10 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::per_slot_processing; -use types::{BeaconState, EthSpec}; +use types::{BeaconState, EthSpec, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] pub struct Metadata { @@ -22,18 +22,19 @@ pub struct SanitySlots { } impl LoadCase for SanitySlots { - fn load_from_dir(path: &Path) -> Result { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let spec = &testing_spec::(fork_name); let metadata_path = path.join("meta.yaml"); let metadata: Metadata = if metadata_path.is_file() { yaml_decode_file(&metadata_path)? } else { Metadata::default() }; - let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), spec)?; let slots: u64 = yaml_decode_file(&path.join("slots.yaml"))?; - let post_file = path.join("post.ssz"); + let post_file = path.join("post.ssz_snappy"); let post = if post_file.is_file() { - Some(ssz_decode_file(&post_file)?) + Some(ssz_decode_state(&post_file, spec)?) } else { None }; @@ -55,12 +56,12 @@ impl Case for SanitySlots { .unwrap_or_else(String::new) } - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { self.metadata.bls_setting.unwrap_or_default().check()?; let mut state = self.pre.clone(); let mut expected = self.post.clone(); - let spec = &E::default_spec(); + let spec = &testing_spec::(fork_name); // Processing requires the epoch cache. state.build_all_caches(spec).unwrap(); diff --git a/testing/ef_tests/src/cases/shuffling.rs b/testing/ef_tests/src/cases/shuffling.rs index 2ed5c0bd464..b5ce019f5ca 100644 --- a/testing/ef_tests/src/cases/shuffling.rs +++ b/testing/ef_tests/src/cases/shuffling.rs @@ -4,6 +4,7 @@ use crate::decode::yaml_decode_file; use serde_derive::Deserialize; use std::marker::PhantomData; use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list}; +use types::ForkName; #[derive(Debug, Clone, Deserialize)] pub struct Shuffling { @@ -15,13 +16,13 @@ pub struct Shuffling { } impl LoadCase for Shuffling { - fn load_from_dir(path: &Path) -> Result { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { yaml_decode_file(&path.join("mapping.yaml")) } } impl Case for Shuffling { - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { if self.count == 0 { compare_result::<_, Error>(&Ok(vec![]), &Some(self.mapping.clone()))?; } else { diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 3a7131bbe08..9b46001f97b 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -3,15 +3,14 @@ use super::*; use crate::cases::common::{SszStaticType, TestU128, TestU256}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; -use crate::decode::yaml_decode_file; +use crate::decode::{snappy_decode_file, yaml_decode_file}; use serde::{de::Error as SerdeError, Deserializer}; use serde_derive::Deserialize; use ssz_derive::{Decode, Encode}; -use std::fs; use std::path::{Path, PathBuf}; use tree_hash_derive::TreeHash; use types::typenum::*; -use types::{BitList, BitVector, FixedVector, VariableList}; +use types::{BitList, BitVector, FixedVector, ForkName, VariableList}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -27,7 +26,7 @@ pub struct SszGeneric { } impl LoadCase for SszGeneric { - fn load_from_dir(path: &Path) -> Result { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { let components = path .components() .map(|c| c.as_os_str().to_string_lossy().into_owned()) @@ -119,7 +118,7 @@ macro_rules! type_dispatch { } impl Case for SszGeneric { - fn result(&self, _case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parts = self.case_name.split('_').collect::>(); match self.handler_name.as_str() { @@ -195,7 +194,7 @@ impl Case for SszGeneric { } } -fn ssz_generic_test(path: &Path) -> Result<(), Error> { +fn ssz_generic_test(path: &Path) -> Result<(), Error> { let meta_path = path.join("meta.yaml"); let meta: Option = if meta_path.is_file() { Some(yaml_decode_file(&meta_path)?) @@ -203,7 +202,8 @@ fn ssz_generic_test(path: &Path) -> Result<(), Error> { None }; - let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); + let serialized = snappy_decode_file(&path.join("serialized.ssz_snappy")) + .expect("serialized.ssz_snappy exists"); let value_path = path.join("value.yaml"); let value: Option = if value_path.is_file() { @@ -215,7 +215,7 @@ fn ssz_generic_test(path: &Path) -> Result<(), Error> { // Valid // TODO: signing root (annoying because of traits) if let Some(value) = value { - check_serialization(&value, &serialized)?; + check_serialization(&value, &serialized, T::from_ssz_bytes)?; if let Some(ref meta) = meta { check_tree_hash(&meta.root, value.tree_hash_root().as_bytes())?; diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index 88afea770ad..732a7d851ff 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -1,12 +1,11 @@ use super::*; use crate::case_result::compare_result; use crate::cases::common::SszStaticType; -use crate::decode::yaml_decode_file; -use cached_tree_hash::{CacheArena, CachedTreeHash}; +use crate::decode::{snappy_decode_file, yaml_decode_file}; use serde_derive::Deserialize; -use std::fs; -use std::marker::PhantomData; -use types::Hash256; +use ssz::Decode; +use tree_hash::TreeHash; +use types::{BeaconBlock, BeaconState, ForkName, Hash256, SignedBeaconBlock}; #[derive(Debug, Clone, Deserialize)] struct SszStaticRoots { @@ -14,6 +13,7 @@ struct SszStaticRoots { signing_root: Option, } +/// Runner for types that implement `ssz::Decode`. #[derive(Debug, Clone)] pub struct SszStatic { roots: SszStaticRoots, @@ -21,24 +21,43 @@ pub struct SszStatic { value: T, } +/// Runner for `BeaconState` (with tree hash cache). #[derive(Debug, Clone)] -pub struct SszStaticTHC { +pub struct SszStaticTHC { + roots: SszStaticRoots, + serialized: Vec, + value: T, +} + +/// Runner for types that require a `ChainSpec` to be decoded (`BeaconBlock`, etc). +#[derive(Debug, Clone)] +pub struct SszStaticWithSpec { roots: SszStaticRoots, serialized: Vec, value: T, - _phantom: PhantomData, } fn load_from_dir(path: &Path) -> Result<(SszStaticRoots, Vec, T), Error> { let roots = yaml_decode_file(&path.join("roots.yaml"))?; - let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); + let serialized = snappy_decode_file(&path.join("serialized.ssz_snappy")) + .expect("serialized.ssz_snappy exists"); let value = yaml_decode_file(&path.join("value.yaml"))?; Ok((roots, serialized, value)) } impl LoadCase for SszStatic { - fn load_from_dir(path: &Path) -> Result { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + load_from_dir(path).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + }) + } +} + +impl LoadCase for SszStaticTHC { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { load_from_dir(path).map(|(roots, serialized, value)| Self { roots, serialized, @@ -47,25 +66,28 @@ impl LoadCase for SszStatic { } } -impl, C: Debug + Sync> LoadCase for SszStaticTHC { - fn load_from_dir(path: &Path) -> Result { +impl LoadCase for SszStaticWithSpec { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { load_from_dir(path).map(|(roots, serialized, value)| Self { roots, serialized, value, - _phantom: PhantomData, }) } } -pub fn check_serialization(value: &T, serialized: &[u8]) -> Result<(), Error> { +pub fn check_serialization( + value: &T, + serialized: &[u8], + deserializer: impl FnOnce(&[u8]) -> Result, +) -> Result<(), Error> { // Check serialization let serialized_result = value.as_ssz_bytes(); compare_result::(&Ok(value.ssz_bytes_len()), &Some(serialized.len()))?; compare_result::, Error>(&Ok(serialized_result), &Some(serialized.to_vec()))?; // Check deserialization - let deserialized_result = T::from_ssz_bytes(serialized); + let deserialized_result = deserializer(serialized); compare_result(&deserialized_result, &Some(value.clone()))?; Ok(()) @@ -79,27 +101,49 @@ pub fn check_tree_hash(expected_str: &str, actual_root: &[u8]) -> Result<(), Err compare_result::(&Ok(tree_hash_root), &Some(expected_root)) } -impl Case for SszStatic { - fn result(&self, _case_index: usize) -> Result<(), Error> { - check_serialization(&self.value, &self.serialized)?; +impl Case for SszStatic { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + check_serialization(&self.value, &self.serialized, T::from_ssz_bytes)?; check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; Ok(()) } } -impl, C: Debug + Sync> Case for SszStaticTHC { - fn result(&self, _case_index: usize) -> Result<(), Error> { - check_serialization(&self.value, &self.serialized)?; +impl Case for SszStaticTHC> { + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = &testing_spec::(fork_name); + check_serialization(&self.value, &self.serialized, |bytes| { + BeaconState::from_ssz_bytes(bytes, spec) + })?; check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; - let arena = &mut CacheArena::default(); - let mut cache = self.value.new_tree_hash_cache(arena); - let cached_tree_hash_root = self - .value - .recalculate_tree_hash_root(arena, &mut cache) - .unwrap(); + let mut state = self.value.clone(); + state.initialize_tree_hash_cache(); + let cached_tree_hash_root = state.update_tree_hash_cache().unwrap(); check_tree_hash(&self.roots.root, cached_tree_hash_root.as_bytes())?; Ok(()) } } + +impl Case for SszStaticWithSpec> { + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = &testing_spec::(fork_name); + check_serialization(&self.value, &self.serialized, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + })?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; + Ok(()) + } +} + +impl Case for SszStaticWithSpec> { + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = &testing_spec::(fork_name); + check_serialization(&self.value, &self.serialized, |bytes| { + SignedBeaconBlock::from_ssz_bytes(bytes, spec) + })?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; + Ok(()) + } +} diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs new file mode 100644 index 00000000000..d41a52d52ff --- /dev/null +++ b/testing/ef_tests/src/cases/transition.rs @@ -0,0 +1,114 @@ +use super::*; +use crate::case_result::compare_beacon_state_results_without_caches; +use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; +use serde_derive::Deserialize; +use state_processing::{ + per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, +}; +use std::str::FromStr; +use types::{BeaconState, Epoch, ForkName, SignedBeaconBlock}; + +#[derive(Debug, Clone, Deserialize)] +pub struct Metadata { + pub post_fork: String, + pub fork_epoch: Epoch, + pub fork_block: Option, + pub blocks_count: usize, +} + +#[derive(Debug)] +pub struct TransitionTest { + pub metadata: Metadata, + pub pre: BeaconState, + pub blocks: Vec>, + pub post: BeaconState, + pub spec: ChainSpec, +} + +impl LoadCase for TransitionTest { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let metadata: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; + assert_eq!(ForkName::from_str(&metadata.post_fork).unwrap(), fork_name); + + // Make spec with appropriate fork block. + let mut spec = E::default_spec(); + match fork_name { + ForkName::Base => panic!("cannot fork to base/phase0"), + ForkName::Altair => { + spec.altair_fork_epoch = Some(metadata.fork_epoch); + } + } + + // Load blocks + let blocks = (0..metadata.blocks_count) + .map(|i| { + let filename = format!("blocks_{}.ssz_snappy", i); + ssz_decode_file_with(&path.join(filename), |bytes| { + SignedBeaconBlock::from_ssz_bytes(bytes, &spec) + }) + }) + .collect::, _>>()?; + + // Decode pre-state. + let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), &spec)?; + + // Decode post-state. + let post = ssz_decode_state(&path.join("post.ssz_snappy"), &spec)?; + + Ok(Self { + metadata, + pre, + blocks, + post, + spec, + }) + } +} + +impl Case for TransitionTest { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + // Upgrades exist targeting all forks except phase0/base. + // Transition tests also need BLS. + cfg!(not(feature = "fake_crypto")) && fork_name != ForkName::Base + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let mut state = self.pre.clone(); + let mut expected = Some(self.post.clone()); + let spec = &self.spec; + + let mut result: Result<_, String> = self + .blocks + .iter() + .try_for_each(|block| { + // Advance to block slot. + complete_state_advance(&mut state, None, block.slot(), spec) + .map_err(|e| format!("Failed to advance: {:?}", e))?; + + // Apply block. + per_block_processing( + &mut state, + block, + None, + BlockSignatureStrategy::VerifyBulk, + spec, + ) + .map_err(|e| format!("Block processing failed: {:?}", e))?; + + let state_root = state.update_tree_hash_cache().unwrap(); + if block.state_root() != state_root { + return Err(format!( + "Mismatched state root at slot {}, got: {:?}, expected: {:?}", + block.slot(), + state_root, + block.state_root() + )); + } + + Ok(()) + }) + .map(move |()| state); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index 8d6486bb846..d0c9e0d9696 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -1,12 +1,42 @@ use super::*; -use std::fs; +use fs2::FileExt; +use snap::raw::Decoder; +use std::fs::{self}; +use std::io::Write; use std::path::Path; +use std::path::PathBuf; +use types::{BeaconState, EthSpec}; + +/// See `log_file_access` for details. +const ACCESSED_FILE_LOG_FILENAME: &str = ".accessed_file_log.txt"; + +/// Writes `path` to a file that contains a log of all files accessed during testing. +/// +/// That log file might later be used to ensure that all spec tests were accessed and none were +/// accidentally missed. +pub fn log_file_access>(file_accessed: P) { + let passed_test_list_path = + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(ACCESSED_FILE_LOG_FILENAME); + + let mut file = fs::OpenOptions::new() + .append(true) + .create(true) + .open(passed_test_list_path) + .expect("should open file"); + + file.lock_exclusive().expect("unable to lock file"); + + writeln!(&mut file, "{:?}", file_accessed.as_ref()).expect("should write to file"); + + file.unlock().expect("unable to unlock file"); +} pub fn yaml_decode(string: &str) -> Result { serde_yaml::from_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) } pub fn yaml_decode_file(path: &Path) -> Result { + log_file_access(path); fs::read_to_string(path) .map_err(|e| { Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) @@ -14,26 +44,56 @@ pub fn yaml_decode_file(path: &Path) -> Result Result, Error> { + log_file_access(path); + let bytes = fs::read(path).map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + })?; + let mut decoder = Decoder::new(); + decoder.decompress_vec(&bytes).map_err(|e| { + Error::FailedToParseTest(format!( + "Error decoding snappy encoding for {}: {:?}", + path.display(), + e + )) + }) +} + +pub fn ssz_decode_file_with(path: &Path, f: F) -> Result +where + F: FnOnce(&[u8]) -> Result, +{ + log_file_access(path); + let bytes = snappy_decode_file(path)?; + f(&bytes).map_err(|e| { + match e { + // NOTE: this is a bit hacky, but seemingly better than the alternatives + ssz::DecodeError::BytesInvalid(message) + if message.contains("Blst") || message.contains("Milagro") => + { + Error::InvalidBLSInput(message) + } + e => Error::FailedToParseTest(format!( + "Unable to parse SSZ at {}: {:?}", + path.display(), + e + )), + } + }) +} + pub fn ssz_decode_file(path: &Path) -> Result { - fs::read(path) - .map_err(|e| { - Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) - }) - .and_then(|s| { - T::from_ssz_bytes(&s).map_err(|e| { - match e { - // NOTE: this is a bit hacky, but seemingly better than the alternatives - ssz::DecodeError::BytesInvalid(message) - if message.contains("Blst") || message.contains("Milagro") => - { - Error::InvalidBLSInput(message) - } - e => Error::FailedToParseTest(format!( - "Unable to parse SSZ at {}: {:?}", - path.display(), - e - )), - } - }) - }) + log_file_access(path); + ssz_decode_file_with(path, T::from_ssz_bytes) +} + +pub fn ssz_decode_state( + path: &Path, + spec: &ChainSpec, +) -> Result, Error> { + log_file_access(path); + ssz_decode_file_with(path, |bytes| BeaconState::from_ssz_bytes(bytes, spec)) } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 2613c25b984..6c89f70ad44 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -1,12 +1,11 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name; use crate::type_name::TypeName; -use cached_tree_hash::CachedTreeHash; -use std::fmt::Debug; +use derivative::Derivative; use std::fs; use std::marker::PhantomData; use std::path::PathBuf; -use types::EthSpec; +use types::{BeaconState, EthSpec, ForkName}; pub trait Handler { type Case: Case + LoadCase; @@ -15,22 +14,35 @@ pub trait Handler { "general" } - fn fork_name() -> &'static str { - "phase0" + fn runner_name() -> &'static str; + + fn handler_name(&self) -> String; + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + Self::Case::is_enabled_for_fork(fork_name) } - fn runner_name() -> &'static str; + fn run(&self) { + for fork_name in ForkName::list_all() { + if self.is_enabled_for_fork(fork_name) { + self.run_for_fork(fork_name) + } + } + } - fn handler_name() -> String; + fn run_for_fork(&self, fork_name: ForkName) { + let fork_name_str = match fork_name { + ForkName::Base => "phase0", + ForkName::Altair => "altair", + }; - fn run() { let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("eth2.0-spec-tests") .join("tests") .join(Self::config_name()) - .join(Self::fork_name()) + .join(fork_name_str) .join(Self::runner_name()) - .join(Self::handler_name()); + .join(self.handler_name()); // Iterate through test suites let test_cases = fs::read_dir(&handler_path) @@ -44,30 +56,41 @@ pub trait Handler { .flat_map(Result::ok) .map(|test_case_dir| { let path = test_case_dir.path(); - let case = Self::Case::load_from_dir(&path).expect("test should load"); + let case = Self::Case::load_from_dir(&path, fork_name).expect("test should load"); (path, case) }) .collect(); - let results = Cases { test_cases }.test_results(); + let results = Cases { test_cases }.test_results(fork_name); - let name = format!("{}/{}", Self::runner_name(), Self::handler_name()); + let name = format!( + "{}/{}/{}", + fork_name_str, + Self::runner_name(), + self.handler_name() + ); crate::results::assert_tests_pass(&name, &handler_path, &results); } } macro_rules! bls_handler { ($runner_name: ident, $case_name:ident, $handler_name:expr) => { + #[derive(Derivative)] + #[derivative(Default(bound = ""))] pub struct $runner_name; impl Handler for $runner_name { type Case = cases::$case_name; + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name == ForkName::Base + } + fn runner_name() -> &'static str { "bls" } - fn handler_name() -> String { + fn handler_name(&self) -> String { $handler_name.into() } } @@ -89,14 +112,47 @@ bls_handler!( ); /// Handler for SSZ types. -pub struct SszStaticHandler(PhantomData<(T, E)>); +pub struct SszStaticHandler { + supported_forks: Vec, + _phantom: PhantomData<(T, E)>, +} + +impl Default for SszStaticHandler { + fn default() -> Self { + Self::for_forks(ForkName::list_all()) + } +} + +impl SszStaticHandler { + pub fn for_forks(supported_forks: Vec) -> Self { + SszStaticHandler { + supported_forks, + _phantom: PhantomData, + } + } + + pub fn base_only() -> Self { + Self::for_forks(vec![ForkName::Base]) + } + + pub fn altair_only() -> Self { + Self::for_forks(vec![ForkName::Altair]) + } +} /// Handler for SSZ types that implement `CachedTreeHash`. -pub struct SszStaticTHCHandler(PhantomData<(T, C, E)>); +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct SszStaticTHCHandler(PhantomData<(T, E)>); + +/// Handler for SSZ types that don't implement `ssz::Decode`. +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct SszStaticWithSpecHandler(PhantomData<(T, E)>); impl Handler for SszStaticHandler where - T: cases::SszStaticType + TypeName, + T: cases::SszStaticType + ssz::Decode + TypeName, E: TypeName, { type Case = cases::SszStatic; @@ -109,18 +165,41 @@ where "ssz_static" } - fn handler_name() -> String { + fn handler_name(&self) -> String { T::name().into() } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + self.supported_forks.contains(&fork_name) + } } -impl Handler for SszStaticTHCHandler +impl Handler for SszStaticTHCHandler, E> where - T: cases::SszStaticType + CachedTreeHash + TypeName, - C: Debug + Sync, - E: TypeName, + E: EthSpec + TypeName, +{ + type Case = cases::SszStaticTHC>; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "ssz_static" + } + + fn handler_name(&self) -> String { + BeaconState::::name().into() + } +} + +impl Handler for SszStaticWithSpecHandler +where + T: TypeName, + E: EthSpec + TypeName, + cases::SszStaticWithSpec: Case + LoadCase, { - type Case = cases::SszStaticTHC; + type Case = cases::SszStaticWithSpec; fn config_name() -> &'static str { E::name() @@ -130,11 +209,13 @@ where "ssz_static" } - fn handler_name() -> String { + fn handler_name(&self) -> String { T::name().into() } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct ShufflingHandler(PhantomData); impl Handler for ShufflingHandler { @@ -148,11 +229,17 @@ impl Handler for ShufflingHandler { "shuffling" } - fn handler_name() -> String { + fn handler_name(&self) -> String { "core".into() } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name == ForkName::Base + } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct SanityBlocksHandler(PhantomData); impl Handler for SanityBlocksHandler { @@ -166,11 +253,19 @@ impl Handler for SanityBlocksHandler { "sanity" } - fn handler_name() -> String { + fn handler_name(&self) -> String { "blocks".into() } + + fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { + // FIXME(altair): v1.1.0-alpha.3 doesn't mark the historical blocks test as + // requiring real crypto, so only run these tests with real crypto for now. + cfg!(not(feature = "fake_crypto")) + } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct SanitySlotsHandler(PhantomData); impl Handler for SanitySlotsHandler { @@ -184,11 +279,13 @@ impl Handler for SanitySlotsHandler { "sanity" } - fn handler_name() -> String { + fn handler_name(&self) -> String { "slots".into() } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct EpochProcessingHandler(PhantomData<(E, T)>); impl> Handler for EpochProcessingHandler { @@ -202,11 +299,83 @@ impl> Handler for EpochProcessingHa "epoch_processing" } - fn handler_name() -> String { + fn handler_name(&self) -> String { T::name().into() } } +pub struct RewardsHandler { + handler_name: &'static str, + _phantom: PhantomData, +} + +impl RewardsHandler { + pub fn new(handler_name: &'static str) -> Self { + Self { + handler_name, + _phantom: PhantomData, + } + } +} + +impl Handler for RewardsHandler { + type Case = cases::RewardsTest; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "rewards" + } + + fn handler_name(&self) -> String { + self.handler_name.to_string() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct ForkHandler(PhantomData); + +impl Handler for ForkHandler { + type Case = cases::ForkTest; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "fork" + } + + fn handler_name(&self) -> String { + "fork".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct TransitionHandler(PhantomData); + +impl Handler for TransitionHandler { + type Case = cases::TransitionTest; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "transition" + } + + fn handler_name(&self) -> String { + "core".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct FinalityHandler(PhantomData); impl Handler for FinalityHandler { @@ -221,11 +390,13 @@ impl Handler for FinalityHandler { "finality" } - fn handler_name() -> String { + fn handler_name(&self) -> String { "finality".into() } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct GenesisValidityHandler(PhantomData); impl Handler for GenesisValidityHandler { @@ -239,11 +410,13 @@ impl Handler for GenesisValidityHandler { "genesis" } - fn handler_name() -> String { + fn handler_name(&self) -> String { "validity".into() } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct GenesisInitializationHandler(PhantomData); impl Handler for GenesisInitializationHandler { @@ -257,11 +430,13 @@ impl Handler for GenesisInitializationHandler { "genesis" } - fn handler_name() -> String { + fn handler_name(&self) -> String { "initialization".into() } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); impl> Handler for OperationsHandler { @@ -275,11 +450,13 @@ impl> Handler for OperationsHandler "operations" } - fn handler_name() -> String { + fn handler_name(&self) -> String { O::handler_name() } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] pub struct SszGenericHandler(PhantomData); impl Handler for SszGenericHandler { @@ -293,7 +470,12 @@ impl Handler for SszGenericHandler { "ssz_generic" } - fn handler_name() -> String { + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // SSZ generic tests are genesis only + fork_name == ForkName::Base + } + + fn handler_name(&self) -> String { H::name().into() } } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index ffc847a9c06..5c2ca3fb55e 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -1,13 +1,16 @@ -use types::EthSpec; - pub use case_result::CaseResult; pub use cases::Case; pub use cases::{ - FinalUpdates, JustificationAndFinalization, RegistryUpdates, RewardsAndPenalties, Slashings, + EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, InactivityUpdates, + JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, + RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, + SyncCommitteeUpdates, }; +pub use decode::log_file_access; pub use error::Error; pub use handler::*; pub use type_name::TypeName; +use types::{ChainSpec, EthSpec, ForkName}; mod bls_setting; mod case_result; @@ -17,3 +20,7 @@ mod error; mod handler; mod results; mod type_name; + +pub fn testing_spec(fork_name: ForkName) -> ChainSpec { + fork_name.make_genesis_spec(E::default_spec()) +} diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 9cf7fa40b73..ed5ef8d1fdc 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -41,9 +41,15 @@ type_name_generic!(Attestation); type_name!(AttestationData); type_name_generic!(AttesterSlashing); type_name_generic!(BeaconBlock); +type_name_generic!(BeaconBlockBase, "BeaconBlock"); +type_name_generic!(BeaconBlockAltair, "BeaconBlock"); type_name_generic!(BeaconBlockBody); +type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); +type_name_generic!(BeaconStateBase, "BeaconState"); +type_name_generic!(BeaconStateAltair, "BeaconState"); type_name!(Checkpoint); type_name!(Deposit); type_name!(DepositData); @@ -60,5 +66,7 @@ type_name_generic!(SignedBeaconBlock); type_name!(SignedBeaconBlockHeader); type_name!(SignedVoluntaryExit); type_name!(SigningData); +type_name_generic!(SyncAggregate); +type_name_generic!(SyncCommittee); type_name!(Validator); type_name!(VoluntaryExit); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index c1a50ffc43c..84168eb5a4d 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,40 +1,8 @@ #![cfg(feature = "ef_tests")] use ef_tests::*; -use std::collections::HashMap; -use std::path::PathBuf; use types::*; -// Check that the config from the Eth2.0 spec tests matches our minimal/mainnet config. -fn config_test() { - let config_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("eth2.0-spec-tests") - .join("tests") - .join(E::name()) - .join("config") - .join("phase0.yaml"); - let yaml_config = YamlConfig::from_file(&config_path).expect("config file loads OK"); - let spec = E::default_spec(); - let yaml_from_spec = YamlConfig::from_spec::(&spec); - assert_eq!(yaml_config.apply_to_chain_spec::(&spec), Some(spec)); - assert_eq!(yaml_from_spec, yaml_config); - assert_eq!( - yaml_config.extra_fields, - HashMap::new(), - "not all config fields read" - ); -} - -#[test] -fn mainnet_config_ok() { - config_test::(); -} - -#[test] -fn minimal_config_ok() { - config_test::(); -} - // Check that the hand-computed multiplications on EthSpec are correctly computed. // This test lives here because one is most likely to muck these up during a spec update. fn check_typenum_values() { @@ -56,118 +24,141 @@ fn derived_typenum_values() { #[test] fn shuffling() { - ShufflingHandler::::run(); - ShufflingHandler::::run(); + ShufflingHandler::::default().run(); + ShufflingHandler::::default().run(); } #[test] fn operations_deposit() { - OperationsHandler::::run(); - OperationsHandler::::run(); + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); } #[test] fn operations_exit() { - OperationsHandler::::run(); - OperationsHandler::::run(); + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); } #[test] fn operations_proposer_slashing() { - OperationsHandler::::run(); - OperationsHandler::::run(); + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); } #[test] fn operations_attester_slashing() { - OperationsHandler::>::run(); - OperationsHandler::>::run(); + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); } #[test] fn operations_attestation() { - OperationsHandler::>::run(); - OperationsHandler::>::run(); + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); } #[test] fn operations_block_header() { - OperationsHandler::>::run(); - OperationsHandler::>::run(); + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + +#[test] +fn operations_sync_aggregate() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); } #[test] fn sanity_blocks() { - SanityBlocksHandler::::run(); - SanityBlocksHandler::::run(); + SanityBlocksHandler::::default().run(); + SanityBlocksHandler::::default().run(); } #[test] fn sanity_slots() { - SanitySlotsHandler::::run(); - SanitySlotsHandler::::run(); + SanitySlotsHandler::::default().run(); + SanitySlotsHandler::::default().run(); } #[test] #[cfg(not(feature = "fake_crypto"))] fn bls_aggregate() { - BlsAggregateSigsHandler::run(); + BlsAggregateSigsHandler::default().run(); } #[test] #[cfg(not(feature = "fake_crypto"))] fn bls_sign() { - BlsSignMsgHandler::run(); + BlsSignMsgHandler::default().run(); } #[test] #[cfg(not(feature = "fake_crypto"))] fn bls_verify() { - BlsVerifyMsgHandler::run(); + BlsVerifyMsgHandler::default().run(); } #[test] #[cfg(not(feature = "fake_crypto"))] fn bls_aggregate_verify() { - BlsAggregateVerifyHandler::run(); + BlsAggregateVerifyHandler::default().run(); } #[test] #[cfg(not(feature = "fake_crypto"))] fn bls_fast_aggregate_verify() { - BlsFastAggregateVerifyHandler::run(); + BlsFastAggregateVerifyHandler::default().run(); } +/// As for `ssz_static_test_no_run` (below), but also executes the function as a test. #[cfg(feature = "fake_crypto")] macro_rules! ssz_static_test { - // Non-tree hash caching - ($test_name:ident, $typ:ident$(<$generics:tt>)?) => { - ssz_static_test!($test_name, SszStaticHandler, $typ$(<$generics>)?); + ($($args:tt)*) => { + ssz_static_test_no_run!(#[test] $($args)*); + }; +} + +/// Generate a function to run the SSZ static tests for a type. +/// +/// Quite complex in order to support an optional #[test] attrib, generics, and the two EthSpecs. +#[cfg(feature = "fake_crypto")] +macro_rules! ssz_static_test_no_run { + // Top-level + ($(#[$test:meta])? $test_name:ident, $typ:ident$(<$generics:tt>)?) => { + ssz_static_test_no_run!($(#[$test])? $test_name, SszStaticHandler, $typ$(<$generics>)?); }; // Generic - ($test_name:ident, $handler:ident, $typ:ident<_>) => { - ssz_static_test!( - $test_name, $handler, { + ($(#[$test:meta])? $test_name:ident, $handler:ident, $typ:ident<_>) => { + ssz_static_test_no_run!( + $(#[$test])? + $test_name, + $handler, + { ($typ, MinimalEthSpec), ($typ, MainnetEthSpec) } ); }; // Non-generic - ($test_name:ident, $handler:ident, $typ:ident) => { - ssz_static_test!( - $test_name, $handler, { + ($(#[$test:meta])? $test_name:ident, $handler:ident, $typ:ident) => { + ssz_static_test_no_run!( + $(#[$test])? + $test_name, + $handler, + { ($typ, MinimalEthSpec), ($typ, MainnetEthSpec) } ); }; // Base case - ($test_name:ident, $handler:ident, { $(($($typ:ty),+)),+ }) => { - #[test] + ($(#[$test:meta])? $test_name:ident, $handler:ident, { $(($($typ:ty),+)),+ }) => { + $(#[$test])? fn $test_name() { $( - $handler::<$($typ),+>::run(); + $handler::<$($typ),+>::default().run(); )+ } }; @@ -175,101 +166,190 @@ macro_rules! ssz_static_test { #[cfg(feature = "fake_crypto")] mod ssz_static { - use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler}; + use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; use types::*; ssz_static_test!(aggregate_and_proof, AggregateAndProof<_>); ssz_static_test!(attestation, Attestation<_>); ssz_static_test!(attestation_data, AttestationData); ssz_static_test!(attester_slashing, AttesterSlashing<_>); - ssz_static_test!(beacon_block, BeaconBlock<_>); - ssz_static_test!(beacon_block_body, BeaconBlockBody<_>); + ssz_static_test!(beacon_block, SszStaticWithSpecHandler, BeaconBlock<_>); ssz_static_test!(beacon_block_header, BeaconBlockHeader); - ssz_static_test!( - beacon_state, - SszStaticTHCHandler, { - (BeaconState, BeaconTreeHashCache<_>, MinimalEthSpec), - (BeaconState, BeaconTreeHashCache<_>, MainnetEthSpec) - } - ); + ssz_static_test!(beacon_state, SszStaticTHCHandler, BeaconState<_>); ssz_static_test!(checkpoint, Checkpoint); + // FIXME(altair): add ContributionAndProof ssz_static_test!(deposit, Deposit); ssz_static_test!(deposit_data, DepositData); ssz_static_test!(deposit_message, DepositMessage); - // FIXME(sproul): move Eth1Block to consensus/types - // - // Tracked at: https://github.com/sigp/lighthouse/issues/1835 - // - // ssz_static_test!(eth1_block, Eth1Block); + // NOTE: Eth1Block intentionally omitted, see: https://github.com/sigp/lighthouse/issues/1835 ssz_static_test!(eth1_data, Eth1Data); ssz_static_test!(fork, Fork); ssz_static_test!(fork_data, ForkData); ssz_static_test!(historical_batch, HistoricalBatch<_>); ssz_static_test!(indexed_attestation, IndexedAttestation<_>); + // NOTE: LightClient* intentionally omitted ssz_static_test!(pending_attestation, PendingAttestation<_>); ssz_static_test!(proposer_slashing, ProposerSlashing); ssz_static_test!(signed_aggregate_and_proof, SignedAggregateAndProof<_>); - ssz_static_test!(signed_beacon_block, SignedBeaconBlock<_>); + ssz_static_test!( + signed_beacon_block, + SszStaticWithSpecHandler, + SignedBeaconBlock<_> + ); ssz_static_test!(signed_beacon_block_header, SignedBeaconBlockHeader); + // FIXME(altair): add SignedContributionAndProof ssz_static_test!(signed_voluntary_exit, SignedVoluntaryExit); ssz_static_test!(signing_data, SigningData); + // FIXME(altair): add SyncCommitteeContribution/Signature/SigningData ssz_static_test!(validator, Validator); ssz_static_test!(voluntary_exit, VoluntaryExit); + + // BeaconBlockBody has no internal indicator of which fork it is for, so we test it separately. + #[test] + fn beacon_block_body() { + SszStaticHandler::, MinimalEthSpec>::base_only().run(); + SszStaticHandler::, MainnetEthSpec>::base_only().run(); + SszStaticHandler::, MinimalEthSpec>::altair_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::altair_only() + .run(); + } + + // Altair-only + #[test] + fn sync_aggregate() { + SszStaticHandler::, MinimalEthSpec>::altair_only().run(); + SszStaticHandler::, MainnetEthSpec>::altair_only().run(); + } + + #[test] + fn sync_committee() { + SszStaticHandler::, MinimalEthSpec>::altair_only().run(); + SszStaticHandler::, MainnetEthSpec>::altair_only().run(); + } } #[test] fn ssz_generic() { - SszGenericHandler::::run(); - SszGenericHandler::::run(); - SszGenericHandler::::run(); - SszGenericHandler::::run(); - SszGenericHandler::::run(); - SszGenericHandler::::run(); + SszGenericHandler::::default().run(); + SszGenericHandler::::default().run(); + SszGenericHandler::::default().run(); + SszGenericHandler::::default().run(); + SszGenericHandler::::default().run(); + SszGenericHandler::::default().run(); } #[test] fn epoch_processing_justification_and_finalization() { - EpochProcessingHandler::::run(); - EpochProcessingHandler::::run(); + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); } #[test] fn epoch_processing_rewards_and_penalties() { - EpochProcessingHandler::::run(); - EpochProcessingHandler::::run(); + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); } #[test] fn epoch_processing_registry_updates() { - EpochProcessingHandler::::run(); - EpochProcessingHandler::::run(); + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); } #[test] fn epoch_processing_slashings() { - EpochProcessingHandler::::run(); - EpochProcessingHandler::::run(); + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_eth1_data_reset() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_effective_balance_updates() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_slashings_reset() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_randao_mixes_reset() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_historical_roots_update() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_participation_record_updates() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_sync_committee_updates() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_inactivity_updates() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_participation_flag_updates() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn fork_upgrade() { + ForkHandler::::default().run(); + ForkHandler::::default().run(); } #[test] -fn epoch_processing_final_updates() { - EpochProcessingHandler::::run(); - EpochProcessingHandler::::run(); +fn transition() { + TransitionHandler::::default().run(); + TransitionHandler::::default().run(); } #[test] fn finality() { - FinalityHandler::::run(); - FinalityHandler::::run(); + FinalityHandler::::default().run(); + FinalityHandler::::default().run(); } #[test] fn genesis_initialization() { - GenesisInitializationHandler::::run(); + GenesisInitializationHandler::::default().run(); } #[test] fn genesis_validity() { - GenesisValidityHandler::::run(); + GenesisValidityHandler::::default().run(); // Note: there are no genesis validity tests for mainnet } + +#[test] +fn rewards() { + for handler in &["basic", "leak", "random"] { + RewardsHandler::::new(handler).run(); + RewardsHandler::::new(handler).run(); + } +} diff --git a/testing/remote_signer_test/src/local_signer_test_data.rs b/testing/remote_signer_test/src/local_signer_test_data.rs index 60d650a5433..69692c6a250 100644 --- a/testing/remote_signer_test/src/local_signer_test_data.rs +++ b/testing/remote_signer_test/src/local_signer_test_data.rs @@ -42,7 +42,7 @@ impl LocalSignerTestData> { &self.spec, ); - signed_block.signature.to_string() + signed_block.signature().to_string() } } diff --git a/testing/remote_signer_test/src/utils.rs b/testing/remote_signer_test/src/utils.rs index e1e04f11003..18bbdc53878 100644 --- a/testing/remote_signer_test/src/utils.rs +++ b/testing/remote_signer_test/src/utils.rs @@ -239,31 +239,39 @@ pub fn get_block(seed: u64) -> BeaconBlock { let mut block: BeaconBlock = BeaconBlock::empty(spec); for _ in 0..E::MaxProposerSlashings::to_usize() { block - .body - .proposer_slashings + .body_mut() + .proposer_slashings_mut() .push(proposer_slashing.clone()) .unwrap(); } for _ in 0..E::MaxDeposits::to_usize() { - block.body.deposits.push(deposit.clone()).unwrap(); + block + .body_mut() + .deposits_mut() + .push(deposit.clone()) + .unwrap(); } for _ in 0..E::MaxVoluntaryExits::to_usize() { block - .body - .voluntary_exits + .body_mut() + .voluntary_exits_mut() .push(signed_voluntary_exit.clone()) .unwrap(); } for _ in 0..E::MaxAttesterSlashings::to_usize() { block - .body - .attester_slashings + .body_mut() + .attester_slashings_mut() .push(attester_slashing.clone()) .unwrap(); } for _ in 0..E::MaxAttestations::to_usize() { - block.body.attestations.push(attestation.clone()).unwrap(); + block + .body_mut() + .attestations_mut() + .push(attestation.clone()) + .unwrap(); } block } diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 236d356633a..11be8781a94 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -99,7 +99,7 @@ async fn verify_validator_count( .await .map(|body| body.unwrap().data) .map_err(|e| format!("Get state root via http failed: {:?}", e))? - .validators + .validators() .len(); validator_counts.push(vc); } diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index fbe7c17bd8f..2fd5e91022b 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -10,3 +10,5 @@ edition = "2018" state_processing = { path = "../../consensus/state_processing" } types = { path = "../../consensus/types" } eth2_ssz = "0.1.2" +beacon_chain = { path = "../../beacon_node/beacon_chain" } +lazy_static = "1.4.0" diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 99b8122accc..a52ccf420d7 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -1,7 +1,8 @@ use super::*; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use state_processing::{ - per_block_processing, per_block_processing::errors::ExitInvalid, - test_utils::BlockProcessingBuilder, BlockProcessingError, BlockSignatureStrategy, + per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, + BlockSignatureStrategy, }; use types::{BeaconBlock, BeaconState, Epoch, EthSpec, SignedBeaconBlock}; @@ -14,8 +15,10 @@ struct ExitTest { validator_index: u64, exit_epoch: Epoch, state_epoch: Epoch, - block_modifier: Box)>, - builder_modifier: Box) -> BlockProcessingBuilder>, + state_modifier: Box)>, + #[allow(clippy::type_complexity)] + block_modifier: + Box>, &mut BeaconBlock)>, #[allow(dead_code)] expected: Result<(), BlockProcessingError>, } @@ -26,8 +29,8 @@ impl Default for ExitTest { validator_index: VALIDATOR_INDEX, exit_epoch: STATE_EPOCH, state_epoch: STATE_EPOCH, - block_modifier: Box::new(|_| ()), - builder_modifier: Box::new(|x| x), + state_modifier: Box::new(|_| ()), + block_modifier: Box::new(|_, _| ()), expected: Ok(()), } } @@ -35,14 +38,23 @@ impl Default for ExitTest { impl ExitTest { fn block_and_pre_state(self) -> (SignedBeaconBlock, BeaconState) { - let spec = &E::default_spec(); + let harness = get_harness::( + self.state_epoch.start_slot(E::slots_per_epoch()), + VALIDATOR_COUNT, + ); + let mut state = harness.get_current_state(); + (self.state_modifier)(&mut state); - (self.builder_modifier)( - get_builder(spec, self.state_epoch.as_u64(), VALIDATOR_COUNT) - .insert_exit(self.validator_index, self.exit_epoch) - .modify(self.block_modifier), - ) - .build(None, None) + let block_modifier = self.block_modifier; + let validator_index = self.validator_index; + let exit_epoch = self.exit_epoch; + + let (signed_block, state) = + harness.make_block_with_modifier(state.clone(), state.slot() + 1, |block| { + harness.add_voluntary_exit(block, validator_index, exit_epoch); + block_modifier(&harness, block); + }); + (signed_block, state) } fn process( @@ -58,7 +70,7 @@ impl ExitTest { ) } - #[cfg(test)] + #[cfg(all(test, not(debug_assertions)))] fn run(self) -> BeaconState { let spec = &E::default_spec(); let expected = self.expected.clone(); @@ -95,23 +107,22 @@ vectors_and_tests!( // Ensures we can process a valid exit, valid_single_exit, ExitTest::default(), - // Tests three exists in the same block. + // Tests three exits in the same block. valid_three_exits, ExitTest { - builder_modifier: Box::new(|builder| { - builder - .insert_exit(1, STATE_EPOCH) - .insert_exit(2, STATE_EPOCH) + block_modifier: Box::new(|harness, block| { + harness.add_voluntary_exit(block, 1, STATE_EPOCH); + harness.add_voluntary_exit(block, 2, STATE_EPOCH); }), ..ExitTest::default() }, // Ensures that a validator cannot be exited twice in the same block. invalid_duplicate, ExitTest { - block_modifier: Box::new(|block| { + block_modifier: Box::new(|_, block| { // Duplicate the exit - let exit = block.body.voluntary_exits[0].clone(); - block.body.voluntary_exits.push(exit).unwrap(); + let exit = block.body().voluntary_exits()[0].clone(); + block.body_mut().voluntary_exits_mut().push(exit).unwrap(); }), expected: Err(BlockProcessingError::ExitInvalid { index: 1, @@ -128,8 +139,10 @@ vectors_and_tests!( // ``` invalid_validator_unknown, ExitTest { - block_modifier: Box::new(|block| { - block.body.voluntary_exits[0].message.validator_index = VALIDATOR_COUNT as u64; + block_modifier: Box::new(|_, block| { + block.body_mut().voluntary_exits_mut()[0] + .message + .validator_index = VALIDATOR_COUNT as u64; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -147,9 +160,8 @@ vectors_and_tests!( // ``` invalid_exit_already_initiated, ExitTest { - builder_modifier: Box::new(|mut builder| { - builder.state.validators[0].exit_epoch = STATE_EPOCH + 1; - builder + state_modifier: Box::new(|state| { + state.validators_mut()[0].exit_epoch = STATE_EPOCH + 1; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -167,9 +179,8 @@ vectors_and_tests!( // ``` invalid_not_active_before_activation_epoch, ExitTest { - builder_modifier: Box::new(|mut builder| { - builder.state.validators[0].activation_epoch = builder.spec.far_future_epoch; - builder + state_modifier: Box::new(|state| { + state.validators_mut()[0].activation_epoch = E::default_spec().far_future_epoch; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -187,9 +198,8 @@ vectors_and_tests!( // ``` invalid_not_active_after_exit_epoch, ExitTest { - builder_modifier: Box::new(|mut builder| { - builder.state.validators[0].exit_epoch = STATE_EPOCH; - builder + state_modifier: Box::new(|state| { + state.validators_mut()[0].exit_epoch = STATE_EPOCH; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -286,10 +296,12 @@ vectors_and_tests!( // ``` invalid_bad_signature, ExitTest { - block_modifier: Box::new(|block| { + block_modifier: Box::new(|_, block| { // Shift the validator index by 1 so that it's mismatched from the key that was // used to sign. - block.body.voluntary_exits[0].message.validator_index = VALIDATOR_INDEX + 1; + block.body_mut().voluntary_exits_mut()[0] + .message + .validator_index = VALIDATOR_INDEX + 1; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -299,14 +311,14 @@ vectors_and_tests!( } ); -#[cfg(test)] +#[cfg(all(test, not(debug_assertions)))] mod custom_tests { use super::*; fn assert_exited(state: &BeaconState, validator_index: usize) { let spec = E::default_spec(); - let validator = &state.validators[validator_index]; + let validator = &state.validators()[validator_index]; assert_eq!( validator.exit_epoch, // This is correct until we exceed the churn limit. If that happens, we @@ -330,10 +342,9 @@ mod custom_tests { #[test] fn valid_three() { let state = ExitTest { - builder_modifier: Box::new(|builder| { - builder - .insert_exit(1, STATE_EPOCH) - .insert_exit(2, STATE_EPOCH) + block_modifier: Box::new(|harness, block| { + harness.add_voluntary_exit(block, 1, STATE_EPOCH); + harness.add_voluntary_exit(block, 2, STATE_EPOCH); }), ..ExitTest::default() } diff --git a/testing/state_transition_vectors/src/macros.rs b/testing/state_transition_vectors/src/macros.rs index ff70be40e5a..81f81718525 100644 --- a/testing/state_transition_vectors/src/macros.rs +++ b/testing/state_transition_vectors/src/macros.rs @@ -14,7 +14,7 @@ macro_rules! vectors_and_tests { vec } - #[cfg(test)] + #[cfg(all(test, not(debug_assertions)))] mod tests { use super::*; $( diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index 4f8426baebd..2512b03e5b4 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -2,15 +2,21 @@ mod macros; mod exit; +use beacon_chain::{ + store::StoreConfig, + test_utils::{BeaconChainHarness, EphemeralHarnessType}, +}; +use lazy_static::lazy_static; use ssz::Encode; -use state_processing::test_utils::BlockProcessingBuilder; use std::env; use std::fs::{self, File}; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::exit; -use types::MainnetEthSpec; -use types::{BeaconState, ChainSpec, EthSpec, SignedBeaconBlock}; +use types::{ + test_utils::generate_deterministic_keypairs, BeaconState, EthSpec, Keypair, SignedBeaconBlock, +}; +use types::{Hash256, MainnetEthSpec, Slot}; type E = MainnetEthSpec; @@ -19,6 +25,8 @@ pub const VALIDATOR_COUNT: usize = 64; /// The base output directory for test vectors. pub const BASE_VECTOR_DIR: &str = "vectors"; +pub const SLOT_OFFSET: u64 = 1; + /// Writes all known test vectors to `CARGO_MANIFEST_DIR/vectors`. fn main() { match write_all_vectors() { @@ -39,16 +47,36 @@ pub struct TestVector { pub error: Option, } -/// Gets a `BlockProcessingBuilder` to be used in testing. -fn get_builder( - spec: &ChainSpec, - epoch_offset: u64, - num_validators: usize, -) -> BlockProcessingBuilder { - // Set the state and block to be in the last slot of the `epoch_offset`th epoch. - let last_slot_of_epoch = (MainnetEthSpec::genesis_epoch() + epoch_offset) - .end_slot(MainnetEthSpec::slots_per_epoch()); - BlockProcessingBuilder::new(num_validators, last_slot_of_epoch, &spec).build_caches() +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +fn get_harness( + slot: Slot, + validator_count: usize, +) -> BeaconChainHarness> { + let harness = BeaconChainHarness::new_with_store_config( + E::default(), + None, + KEYPAIRS[0..validator_count].to_vec(), + StoreConfig::default(), + ); + let skip_to_slot = slot - SLOT_OFFSET; + if skip_to_slot > Slot::new(0) { + let state = harness.get_current_state(); + harness.add_attested_blocks_at_slots( + state, + Hash256::zero(), + (skip_to_slot.as_u64()..slot.as_u64()) + .map(Slot::new) + .collect::>() + .as_slice(), + (0..validator_count).collect::>().as_slice(), + ); + } + + harness } /// Writes all vectors to file. diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index ead7adbc9bb..0616eb2f423 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -209,7 +209,7 @@ impl CandidateBeaconNode { /// Checks if the node has the correct specification. async fn is_compatible(&self, spec: &ChainSpec, log: &Logger) -> Result<(), CandidateError> { - let yaml_config = self + let config_and_preset = self .beacon_node .get_config_spec() .await @@ -224,9 +224,8 @@ impl CandidateBeaconNode { })? .data; - let beacon_node_spec = yaml_config - .apply_to_chain_spec::(&E::default_spec()) - .ok_or_else(|| { + let beacon_node_spec = + ChainSpec::from_config::(&config_and_preset.config).ok_or_else(|| { error!( log, "The minimal/mainnet spec type of the beacon node does not match the validator \ @@ -236,11 +235,12 @@ impl CandidateBeaconNode { CandidateError::Incompatible })?; - if !yaml_config.extra_fields.is_empty() { + if !config_and_preset.extra_fields.is_empty() { debug!( log, "Beacon spec includes unknown fields"; - "fields" => ?yaml_config.extra_fields + "endpoint" => %self.beacon_node, + "fields" => ?config_and_preset.extra_fields, ); } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 48f9e74894b..36ca283f28a 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -289,8 +289,8 @@ impl BlockService { info!( log, "Successfully published block"; - "deposits" => signed_block.message.body.deposits.len(), - "attestations" => signed_block.message.body.attestations.len(), + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), "slot" => signed_block.slot().as_u64(), ); diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index ccf465f53f5..bc820ce44e5 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -16,7 +16,7 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::path::PathBuf; use std::sync::{Arc, Weak}; use tokio::runtime::Runtime; -use types::{ChainSpec, EthSpec, YamlConfig}; +use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; use warp::{ http::{ @@ -191,9 +191,9 @@ pub fn serve( .and(signer.clone()) .and_then(|spec: Arc<_>, signer| { blocking_signed_json_task(signer, move || { - Ok(api_types::GenericResponse::from( - YamlConfig::from_spec::(&spec), - )) + let mut config = ConfigAndPreset::from_chain_spec::(&spec); + config.make_backwards_compat(&spec); + Ok(api_types::GenericResponse::from(config)) }) }); diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 2a748059c7f..cf2618bba11 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -150,7 +150,8 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self.client.get_lighthouse_spec().await.unwrap().data; - let expected = YamlConfig::from_spec::(&E::default_spec()); + let mut expected = ConfigAndPreset::from_chain_spec::(&E::default_spec()); + expected.make_backwards_compat(&E::default_spec()); assert_eq!(result, expected); diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 760f4ce49ac..96024990e67 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -168,11 +168,11 @@ impl ValidatorStore { current_slot: Slot, ) -> Option> { // Make sure the block slot is not higher than the current slot to avoid potential attacks. - if block.slot > current_slot { + if block.slot() > current_slot { warn!( self.log, "Not signing block with slot greater than current slot"; - "block_slot" => block.slot.as_u64(), + "block_slot" => block.slot().as_u64(), "current_slot" => current_slot.as_u64() ); return None;