Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,30 @@ TIPS_INGRESS_WRITER_KAFKA_BROKERS=localhost:9092
TIPS_INGRESS_WRITER_KAFKA_TOPIC=tips-ingress-rpc
TIPS_INGRESS_WRITER_KAFKA_GROUP_ID=local-writer
TIPS_INGRESS_WRITER_LOG_LEVEL=info

# OP Node (Consensus Layer) - Common configuration for simulator-cl and shadow-builder-cl
OP_NODE_NETWORK=
OP_NODE_ROLLUP_CONFIG=/data/rollup.json
OP_NODE_ROLLUP_LOAD_PROTOCOL_VERSIONS=true
OP_NODE_SYNCMODE=consensus-layer
OP_NODE_L1_ETH_RPC=http://host.docker.internal:8545
OP_NODE_L1_BEACON=http://host.docker.internal:3500
OP_NODE_L1_RPC_KIND=debug_geth
OP_NODE_L1_TRUST_RPC=false
OP_NODE_L2_ENGINE_KIND=reth
OP_NODE_L2_ENGINE_AUTH=/data/jwtsecret
OP_NODE_P2P_LISTEN_IP=0.0.0.0
OP_NODE_P2P_LISTEN_TCP_PORT=9222
OP_NODE_P2P_LISTEN_UDP_PORT=9222
OP_NODE_P2P_INTERNAL_IP=true
OP_NODE_P2P_ADVERTISE_IP=host.docker.internal
OP_NODE_P2P_NO_DISCOVERY=true
OP_NODE_RPC_ADDR=0.0.0.0
OP_NODE_RPC_PORT=8545
OP_NODE_LOG_LEVEL=debug
OP_NODE_LOG_FORMAT=json
OP_NODE_SNAPSHOT_LOG=/tmp/op-node-snapshot-log
OP_NODE_METRICS_ENABLED=true
OP_NODE_METRICS_ADDR=0.0.0.0
OP_NODE_METRICS_PORT=7300
STATSD_ADDRESS=172.17.0.1
55 changes: 49 additions & 6 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[workspace]
members = ["crates/datastore", "crates/audit", "crates/ingress-rpc", "crates/maintenance", "crates/ingress-writer", "crates/simulator"]
members = ["crates/datastore", "crates/audit", "crates/ingress-rpc", "crates/maintenance", "crates/ingress-writer", "crates/simulator", "crates/shadow-boost"]
resolver = "2"

[workspace.dependencies]
Expand Down
38 changes: 38 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,41 @@ A service that consumes bundles from Kafka and persists them to the datastore.

### 🖥️ UI (`ui`)
A debug UI for viewing the state of the bundle store and S3.

### 🧪 Simulator (`crates/simulator`)
A Reth-based execution client that:
- Simulates bundles to estimate resource usage (e.g. execution time)
- Provides transaction tracing and simulation capabilities
- Syncs from production sequencer via an op-node instance (simulator-cl)
- Used by the block builder stack to throttle transactions based on resource consumption

## 🏗️ Shadow Builder Stack

The shadow builder stack enables production-ready block building with TIPS bundle integration. It consists of:

**shadow-builder-cl**: An op-node instance running in sequencer mode that:
- Syncs from production sequencer via P2P
- Drives block building through Engine API calls
- Uses a placeholder sequencer key so built blocks will be rejected by the network
- Does not submit blocks to L1 (shadow sequencer mode)

**shadow-builder**: A modified op-rbuilder instance that:
- Receives Engine API calls from shadow-builder-cl
- Queries TIPS datastore for bundles with resource usage estimates from the simulator
- Builds blocks including eligible bundles while respecting resource constraints
- Runs in parallel with the production builder for testing and validation

**Prerequisites**:
- [builder-playground](https://github.com/flashbots/builder-playground) running locally with the `niran:authorize-signers` branch
- op-rbuilder Docker image built using `just build-rbuilder`

**Quick Start**:
```bash
# Build op-rbuilder (optionally from a specific branch)
just build-rbuilder

# Start the shadow builder stack (requires builder-playground running)
just start-builder
```

The shadow-builder-cl syncs from the production sequencer via P2P while shadow-builder builds blocks with TIPS bundles in parallel with the production builder. The shadow builder's blocks are never broadcast to the network due to the invalid sequencer key, and there is no batcher service to submit them to L1, making this safe for testing and validation without affecting production.
33 changes: 14 additions & 19 deletions crates/datastore/src/postgres.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ pub enum BundleState {

#[derive(sqlx::FromRow, Debug)]
struct BundleRow {
id: Uuid,
senders: Option<Vec<String>>,
minimum_base_fee: Option<i64>,
txn_hashes: Option<Vec<String>>,
Expand Down Expand Up @@ -81,20 +82,14 @@ impl BundleFilter {
/// Extended bundle data that includes the original bundle plus extracted metadata
#[derive(Debug, Clone)]
pub struct BundleWithMetadata {
pub id: Uuid,
pub bundle: EthSendBundle,
pub txn_hashes: Vec<TxHash>,
pub senders: Vec<Address>,
pub min_base_fee: i64,
pub state: BundleState,
}

/// Bundle with its latest simulation
#[derive(Debug, Clone)]
pub struct BundleWithLatestSimulation {
pub bundle_with_metadata: BundleWithMetadata,
pub latest_simulation: Simulation,
}

/// State diff type: maps account addresses to storage slot mappings
pub type StateDiff = HashMap<Address, HashMap<StorageKey, StorageValue>>;

Expand Down Expand Up @@ -204,6 +199,7 @@ impl PostgresDatastore {
.collect();

Ok(BundleWithMetadata {
id: row.id,
bundle,
txn_hashes: parsed_txn_hashes?,
senders: parsed_senders?,
Expand Down Expand Up @@ -312,9 +308,9 @@ impl BundleDatastore for PostgresDatastore {
async fn get_bundle(&self, id: Uuid) -> Result<Option<BundleWithMetadata>> {
let result = sqlx::query_as::<_, BundleRow>(
r#"
SELECT senders, minimum_base_fee, txn_hashes, txs, reverting_tx_hashes,
SELECT id, senders, minimum_base_fee, txn_hashes, txs, reverting_tx_hashes,
dropping_tx_hashes, block_number, min_timestamp, max_timestamp, "state"
FROM bundles
FROM bundles
WHERE id = $1
"#,
)
Expand Down Expand Up @@ -352,9 +348,9 @@ impl BundleDatastore for PostgresDatastore {

let rows = sqlx::query_as::<_, BundleRow>(
r#"
SELECT senders, minimum_base_fee, txn_hashes, txs, reverting_tx_hashes,
SELECT id, senders, minimum_base_fee, txn_hashes, txs, reverting_tx_hashes,
dropping_tx_hashes, block_number, min_timestamp, max_timestamp, "state"
FROM bundles
FROM bundles
WHERE minimum_base_fee >= $1
AND (block_number = $2 OR block_number IS NULL OR block_number = 0 OR $2 = 0)
AND (min_timestamp <= $3 OR min_timestamp IS NULL)
Expand Down Expand Up @@ -463,7 +459,7 @@ impl BundleDatastore for PostgresDatastore {
async fn select_bundles_with_latest_simulation(
&self,
filter: BundleFilter,
) -> Result<Vec<BundleWithLatestSimulation>> {
) -> Result<Vec<(BundleWithMetadata, Simulation)>> {
let base_fee = filter.base_fee.unwrap_or(0);
let block_number = filter.block_number.unwrap_or(0) as i64;

Expand All @@ -487,9 +483,9 @@ impl BundleDatastore for PostgresDatastore {
ROW_NUMBER() OVER (PARTITION BY s.bundle_id ORDER BY s.block_number DESC) as rn
FROM simulations s
)
SELECT
b.senders, b.minimum_base_fee, b.txn_hashes, b.txs,
b.reverting_tx_hashes, b.dropping_tx_hashes,
SELECT
b.id, b.senders, b.minimum_base_fee, b.txn_hashes, b.txs,
b.reverting_tx_hashes, b.dropping_tx_hashes,
b.block_number, b.min_timestamp, b.max_timestamp, b."state",
ls.sim_id, ls.bundle_id as sim_bundle_id, ls.sim_block_number,
ls.block_hash, ls.execution_time_us, ls.gas_used, ls.state_diff
Expand All @@ -505,6 +501,7 @@ impl BundleDatastore for PostgresDatastore {
#[derive(sqlx::FromRow)]
struct BundleWithSimulationRow {
// Bundle fields
id: Uuid,
senders: Option<Vec<String>>,
minimum_base_fee: Option<i64>,
txn_hashes: Option<Vec<String>>,
Expand Down Expand Up @@ -537,6 +534,7 @@ impl BundleDatastore for PostgresDatastore {
for row in rows {
// Convert bundle part
let bundle_row = BundleRow {
id: row.id,
senders: row.senders,
minimum_base_fee: row.minimum_base_fee,
txn_hashes: row.txn_hashes,
Expand All @@ -562,10 +560,7 @@ impl BundleDatastore for PostgresDatastore {
};
let simulation = self.row_to_simulation(simulation_row)?;

results.push(BundleWithLatestSimulation {
bundle_with_metadata,
latest_simulation: simulation,
});
results.push((bundle_with_metadata, simulation));
}

Ok(results)
Expand Down
6 changes: 2 additions & 4 deletions crates/datastore/src/traits.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
use crate::postgres::{
BundleFilter, BundleWithLatestSimulation, BundleWithMetadata, Simulation, StateDiff,
};
use crate::postgres::{BundleFilter, BundleWithMetadata, Simulation, StateDiff};
use alloy_primitives::TxHash;
use alloy_rpc_types_mev::EthSendBundle;
use anyhow::Result;
Expand Down Expand Up @@ -46,5 +44,5 @@ pub trait BundleDatastore: Send + Sync {
async fn select_bundles_with_latest_simulation(
&self,
filter: BundleFilter,
) -> Result<Vec<BundleWithLatestSimulation>>;
) -> Result<Vec<(BundleWithMetadata, Simulation)>>;
}
38 changes: 15 additions & 23 deletions crates/datastore/tests/datastore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -503,8 +503,7 @@ async fn multiple_simulations_latest_selection() -> eyre::Result<()> {
// Should return exactly one bundle
assert_eq!(results.len(), 1, "Should return exactly one bundle");

let bundle_with_sim = &results[0];
let latest_sim = &bundle_with_sim.latest_simulation;
let (_bundle_meta, latest_sim) = &results[0];

// Verify it's the latest simulation (highest block number)
let expected_latest_block = base_block + 4; // Last iteration was i=4
Expand Down Expand Up @@ -637,26 +636,26 @@ async fn select_bundles_with_latest_simulation() -> eyre::Result<()> {
// Verify the results contain the correct bundles and latest simulations
let bundle1_result = results
.iter()
.find(|r| r.bundle_with_metadata.bundle.block_number == 100);
.find(|(bundle_meta, _)| bundle_meta.bundle.block_number == 100);
let bundle2_result = results
.iter()
.find(|r| r.bundle_with_metadata.bundle.block_number == 200);
.find(|(bundle_meta, _)| bundle_meta.bundle.block_number == 200);

assert!(bundle1_result.is_some(), "Bundle1 should be in results");
assert!(bundle2_result.is_some(), "Bundle2 should be in results");

let bundle1_result = bundle1_result.unwrap();
let bundle2_result = bundle2_result.unwrap();
let (_bundle1_meta, sim1) = bundle1_result.unwrap();
let (_bundle2_meta, sim2) = bundle2_result.unwrap();

// Check that bundle1 has the latest simulation (block 18500001)
assert_eq!(bundle1_result.latest_simulation.id, latest_sim1_id);
assert_eq!(bundle1_result.latest_simulation.block_number, 18500001);
assert_eq!(bundle1_result.latest_simulation.gas_used, 22000);
assert_eq!(sim1.id, latest_sim1_id);
assert_eq!(sim1.block_number, 18500001);
assert_eq!(sim1.gas_used, 22000);

// Check that bundle2 has its simulation
assert_eq!(bundle2_result.latest_simulation.id, sim2_id);
assert_eq!(bundle2_result.latest_simulation.block_number, 18500002);
assert_eq!(bundle2_result.latest_simulation.gas_used, 19000);
assert_eq!(sim2.id, sim2_id);
assert_eq!(sim2.block_number, 18500002);
assert_eq!(sim2.gas_used, 19000);

Ok(())
}
Expand Down Expand Up @@ -720,10 +719,8 @@ async fn select_bundles_with_latest_simulation_filtered() -> eyre::Result<()> {
1,
"Should return 1 bundle valid for block 200"
);
assert_eq!(
filtered_results[0].bundle_with_metadata.bundle.block_number,
200
);
let (bundle_meta, _sim) = &filtered_results[0];
assert_eq!(bundle_meta.bundle.block_number, 200);

// Test filtering by timestamp
let timestamp_filter = BundleFilter::new().valid_for_timestamp(1200);
Expand All @@ -738,13 +735,8 @@ async fn select_bundles_with_latest_simulation_filtered() -> eyre::Result<()> {
1,
"Should return 1 bundle valid for timestamp 1200"
);
assert_eq!(
timestamp_results[0]
.bundle_with_metadata
.bundle
.block_number,
100
);
let (bundle_meta, _sim) = &timestamp_results[0];
assert_eq!(bundle_meta.bundle.block_number, 100);

Ok(())
}
Expand Down
Loading