diff --git a/book/api/websocket.md b/book/api/websocket.md index 31368c97a2..26a70ce246 100644 --- a/book/api/websocket.md +++ b/book/api/websocket.md @@ -450,11 +450,13 @@ before it is ready. The phases form a state machine, and the validator can progress through them in interesting ways, +```txt +--+ +------------------------------+ | v | v joining_gossip -> loading_full_snapshot -> catching_up -> running v ^ ^ loading_incremental_snapshot --+ +``` Some interesting transitions are, @@ -516,21 +518,21 @@ Some interesting transitions are, ::: **`BootProgress`** -| Field | Type | Description | -|----------------------------------------------------------------------|-----------------|-------------| -| phase | `string` | One of `joining_gossip`, `loading_full_snapshot`, `loading_incremental_snapshot`, `catching_up`, or `running`. This indicates the current phase of the boot process | -| joining_gossip_elapsed_seconds | `number` | If the phase is `joining_gossip`, this is the duration, in seconds, spent joining the gossip network | -| loading_{full|incremental}_snapshot_elapsed_seconds | `number` | If the phase is at least `loading_{full|incremental}_snapshot`, this is the elapsed time, in seconds, spent reading (either downloading or reading from disk) the snapshot since the last reset | -| loading_{full|incremental}_snapshot_reset_count | `number\|null` | If the phase is at least `loading_{full|incremental}_snapshot` or later, this is the number of times the load for the snapshot failed and the phase was restarted from scratch. A snapshot load may fail due to an unreliable or underperforming network connection. Otherwise, `null` | -| loading_{full|incremental}_snapshot_slot | `number\|null` | If the phase is at least `loading_{full|incremental}_snapshot` or later, this is the slot of the snapshot being loaded. Otherwise, `null` | -| loading_{full|incremental}_snapshot_total_bytes_compressed | `number\|null` | If the phase is at least `loading_{full|incremental}_snapshot`, this is the (compressed) total size of the snapshot being loaded, in bytes. Otherwise, `null` | -| loading_{full|incremental}_snapshot_read_bytes_compressed | `number\|null` | If the phase is at least `loading_{full|incremental}_snapshot`, this is the (compressed) total number of bytes read from disk for the snapshot. Otherwise, `null` | -| loading_{full|incremental}_snapshot_read_path | `string\|null` | If the phase is at least `loading_{full|incremental}_snapshot`, this is either the remote url or local file path from which the snapshot is being read. Otherwise, `null` | -| loading_{full|incremental}_snapshot_decompress_bytes_decompressed | `number\|null` | If the phase is at least `loading_{full|incremental}_snapshot`, this is the (decompressed) number of bytes processed by decompress from the snapshot so far. Otherwise, `null` | -| loading_{full|incremental}_snapshot_decompress_bytes_compressed | `number\|null` | If the phase is at least `loading_{full|incremental}_snapshot`, this is the (compressed) number of bytes processed by decompress from the snapshot so far. Otherwise, `null` | -| loading_{full|incremental}_snapshot_insert_bytes_decompressed | `number\|null` | If the phase is at least `loading_{full|incremental}_snapshot`, this is the (decompressed) number of bytes processed from the snapshot by the snapshot insert time so far. Otherwise, `null` | -| loading_{full|incremental}_snapshot_insert_accounts | `number\|null` | If the phase is at least `loading_{full|incremental}_snapshot`, this is the current number of inserted accounts from the snapshot into the validator's accounts database. Otherwise, `null` | -| catching_up_elapsed_seconds | `number` | If the phase is `catching_up`, this is the duration, in seconds, the validator has spent catching up to the current slot | +| Field | Type | Description | +|-----------------------------------------------------------------------|-----------------|-------------| +| phase | `string` | One of `joining_gossip`, `loading_full_snapshot`, `loading_incremental_snapshot`, `catching_up`, or `running`. This indicates the current phase of the boot process | +| joining_gossip_elapsed_seconds | `number` | If the phase is `joining_gossip`, this is the duration, in seconds, spent joining the gossip network | +| loading_{full\|incremental}_snapshot_elapsed_seconds | `number` | If the phase is at least `loading_{full\|incremental}_snapshot`, this is the elapsed time, in seconds, spent reading (either downloading or reading from disk) the snapshot since the last reset | +| loading_{full\|incremental}_snapshot_reset_count | `number\|null` | If the phase is at least `loading_{full\|incremental}_snapshot` or later, this is the number of times the load for the snapshot failed and the phase was restarted from scratch. A snapshot load may fail due to an unreliable or underperforming network connection. Otherwise, `null` | +| loading_{full\|incremental}_snapshot_slot | `number\|null` | If the phase is at least `loading_{full\|incremental}_snapshot` or later, this is the slot of the snapshot being loaded. Otherwise, `null` | +| loading_{full\|incremental}_snapshot_total_bytes_compressed | `number\|null` | If the phase is at least `loading_{full\|incremental}_snapshot`, this is the (compressed) total size of the snapshot being loaded, in bytes. Otherwise, `null` | +| loading_{full\|incremental}_snapshot_read_bytes_compressed | `number\|null` | If the phase is at least `loading_{full\|incremental}_snapshot`, this is the (compressed) total number of bytes read from disk for the snapshot. Otherwise, `null` | +| loading_{full\|incremental}_snapshot_read_path | `string\|null` | If the phase is at least `loading_{full\|incremental}_snapshot`, this is either the remote url or local file path from which the snapshot is being read. Otherwise, `null` | +| loading_{full\|incremental}_snapshot_decompress_bytes_decompressed | `number\|null` | If the phase is at least `loading_{full\|incremental}_snapshot`, this is the (decompressed) number of bytes processed by decompress from the snapshot so far. Otherwise, `null` | +| loading_{full\|incremental}_snapshot_decompress_bytes_compressed | `number\|null` | If the phase is at least `loading_{full\|incremental}_snapshot`, this is the (compressed) number of bytes processed by decompress from the snapshot so far. Otherwise, `null` | +| loading_{full\|incremental}_snapshot_insert_bytes_decompressed | `number\|null` | If the phase is at least `loading_{full\|incremental}_snapshot`, this is the (decompressed) number of bytes processed from the snapshot by the snapshot insert time so far. Otherwise, `null` | +| loading_{full\|incremental}_snapshot_insert_accounts | `number\|null` | If the phase is at least `loading_{full\|incremental}_snapshot`, this is the current number of inserted accounts from the snapshot into the validator's accounts database. Otherwise, `null` | +| catching_up_elapsed_seconds | `number` | If the phase is `catching_up`, this is the duration, in seconds, the validator has spent catching up to the current slot | #### `summary.schedule_strategy` @@ -1141,21 +1143,21 @@ sort_key: | health | `GossipNetworkStake` | Aggregate statistics related to the health of the gossip network and the amount of connected peers / stake | | ingress | `GossipNetworkTraffic` | Ingress network traffic and peer metrics | | egress | `GossipNetworkTraffic` | Egress network traffic and peer metrics | -| storage | `GossipStorageStats` | Storage statistics showing the storage utilization for the Gossip Table. Inner arrays are ordered according to the following `tables_entries` array ["ContactInfoV1","Vote","LowestSlot","SnapshotHashes","AccountsHashes","EpochSlots","VersionV1","VersionV2","NodeInstance","DuplicateShred","IncrementalSnapshotHashes","ContactInfoV2","RestartLastVotedForkSlots","RestartHeaviestFork"] | +| storage | `GossipStorageStats` | Storage statistics showing the storage utilization for the Gossip Table. Inner arrays are ordered according to the following `tables_entries` array `["ContactInfoV1","Vote","LowestSlot","SnapshotHashes","AccountsHashes","EpochSlots","VersionV1","VersionV2","NodeInstance","DuplicateShred","IncrementalSnapshotHashes","ContactInfoV2","RestartLastVotedForkSlots","RestartHeaviestFork"]` | | messages | `GossipMessageStats` | Message statistics showing the message traffic for the Gossip Table. Inner arrays are ordered according to the following `message_types` array ["pull_request","pull_response","push","ping","pong","prune"] | **`GossipNetworkHealth`** -| Field | Type | Description | -|-----------------------------------------------------------------|----------|-------------| -| num_{push|pull_response}_entries_rx_{success|failure|duplicate} | `number` | The number of Gossip Table entries that this node has ever received. `success` means only entries that were fully received and included in the Table are counted. `failure` means only entries that was dropped for any reason, including parsing failures or invariant violations, are counted. `duplicate` refers to entries that were dropped as duplicates. {push|pull_request} means that only entries received via Gossip {push|pull_request} messages are counted | -| num_{push|pull_response}_messages_rx_{success|failure} | `number` | The number of Gossip messages that this node has ever received. `success` means only messages that were fully valid, even if any entries they contain were dropped. `failure` means only messages that was dropped for any reason, including parsing failures or invariant violations, are counted. `duplicate` refers to messages that were dropped as duplicates. {`push`|`pull_request`} is the type of Gossip message counted | -| total_stake | `number` | The total active stake on the Solana network for the current epoch. The information is derived from the getLeaderSchedule rpc call at startup and is fixed for the duration of the epoch | -| total_staked_peers | `number` | The total number of peers on the current epoch leader schedule also active on Gossip. This information is derived from `getClusterNodes` and `getLeaderSchedule` rpc calls at startup | -| total_unstaked_peers | `number` | The total number of peers active on gossip, not including peers on the leader schedule. This information is derived from `getClusterNodes` and `getLeaderSchedule` rpc calls at startup | -| connected_stake | `number` | The sum of active stake across all peers with a ContactInfo entry in the Gossip Table. The stake quantity is taken from the leader schedule, and reflects the activate stake at the start of the current epoch | -| connected_staked_peers | `number` | The number of currently connected peers that have nonzero active stake | -| connected_unstaked_peers | `number` | The number of currently connected peers without any stake currently active | +| Field | Type | Description | +|--------------------------------------------------------------------|----------|-------------| +| num_{push\|pull_response}\_entries_rx_{success\|failure\|duplicate} | `number` | The number of Gossip Table entries that this node has ever received. `success` means only entries that were fully received and included in the Table are counted. `failure` means only entries that was dropped for any reason, including parsing failures or invariant violations, are counted. `duplicate` refers to entries that were dropped as duplicates. {push\|pull_request} means that only entries received via Gossip {push\|pull_request} messages are counted | +| num_{push\|pull_response}\_messages_rx_{success\|failure} | `number` | The number of Gossip messages that this node has ever received. `success` means only messages that were fully valid, even if any entries they contain were dropped. `failure` means only messages that was dropped for any reason, including parsing failures or invariant violations, are counted. `duplicate` refers to messages that were dropped as duplicates. {push\|pull_request} is the type of Gossip message counted | +| total_stake | `number` | The total active stake on the Solana network for the current epoch. The information is derived from the getLeaderSchedule rpc call at startup and is fixed for the duration of the epoch | +| total_staked_peers | `number` | The total number of peers on the current epoch leader schedule also active on Gossip. This information is derived from `getClusterNodes` and `getLeaderSchedule` rpc calls at startup | +| total_unstaked_peers | `number` | The total number of peers active on gossip, not including peers on the leader schedule. This information is derived from `getClusterNodes` and `getLeaderSchedule` rpc calls at startup | +| connected_stake | `number` | The sum of active stake across all peers with a ContactInfo entry in the Gossip Table. The stake quantity is taken from the leader schedule, and reflects the activate stake at the start of the current epoch | +| connected_staked_peers | `number` | The number of currently connected peers that have nonzero active stake | +| connected_unstaked_peers | `number` | The number of currently connected peers without any stake currently active | **`GossipNetworkTraffic`** | Field | Type | Description | @@ -1166,7 +1168,7 @@ sort_key: | peer_throughput | `number[]` | A list of network throughputs in bytes per second. The peer name for each entry is the corresponding entry in `peer_names` | **`GossipStorageStats`** -| Field | Type | Description |! +| Field | Type | Description | |---------------|------------|-------------| | capacity | `number` | The total number of entries that can be stored in the Gossip Table before old entries start being evicted | | expired_total | `number` | The cumulative number of Gossip Table entries that have expired and been removed | @@ -1417,7 +1419,7 @@ identity is no longer in these three data sources, it will be removed. | shred_version | `number` | A `u16` representing the shred version the validator is configured to use. The shred version is changed when the cluster restarts, and is used to make sure the validator is talking to nodes that have participated in the same cluster restart | | version | `string\|null` | Software version being advertised by the validator. Might be `null` if the validator is not gossiping a version, or we have received the contact information but not the version yet. The version string, if not null, will always be formatted like `major`.`minor`.`patch` where `major`, `minor`, and `patch` are `u16`s | | feature_set | `number\|null` | First four bytes of the `FeatureSet` hash interpreted as a little endian `u32`. Might be `null` if the validator is not gossiping a feature set, or we have received the contact information but not the feature set yet | -| sockets | `[key: string]: string` | A dictionary of sockets that are advertised by the validator. `key` will be one of `gossip`, `repair`, `rpc`, `rpc_pubsub`, `serve_repair`, `serve_repair_quic`, `tpu`, `tpu_forwards`, `tpu_forwards_quic`, `tpu_quic`, `tpu_vote`, `tvu`, or `tvu_forwards`. The value is an address like `:`: the location to send traffic to for this validator with the given protocol. Address might be either an IPv4 or an IPv6 address | +| sockets | `[key: string]: string` | A dictionary of sockets that are advertised by the validator. `key` will be one of gossip `serve_repair_quic`, `rpc`, `rpc_pubsub`, `serve_repair`, `tpu`, `tpu_forwards`, `tpu_forwards_quic`, `tpu_quic`, `tpu_vote`, `tvu`, `tvu_quic`, `tpu_vote_quic`, or `alpenglow`. The value is an address like `:`: the location to send traffic to for this validator with the given protocol. Address might be either an IPv4 or an IPv6 address | **`PeerUpdateVoteAccount`** | Field | Type | Description | @@ -1731,12 +1733,12 @@ response value will be `null`. **`SlotRankings`** | Field | Type | Description | |-----------------------------------------------|------------|-------------| -| {slots|vals}_{smallest|largest}_tips | `number[]` | Rankings for the {smallest|largest} tips this epoch | -| {slots|vals}_{smallest|largest}_fees | `number[]` | Rankings for the {smallest|largest} fees this epoch | -| {slots|vals}_{smallest|largest}_rewards | `number[]` | Rankings for the {smallest|largest} rewards this epoch | -| {slots|vals}_{smallest|largest}_duration | `number[]` | Rankings for the {smallest|largest} slot durations this epoch | -| {slots|vals}_{smallest|largest}_compute_units | `number[]` | Rankings for the {smallest|largest} compute units this epoch | -| {slots|vals}_{smallest|largest}_skipped | `number[]` | Rankings for the {earliest|latest} skipped slots this epoch | +| {slots\|vals}_{smallest\|largest}_tips | `number[]` | Rankings for the {smallest\|largest} tips this epoch | +| {slots\|vals}_{smallest\|largest}_fees | `number[]` | Rankings for the {smallest\|largest} fees this epoch | +| {slots\|vals}_{smallest\|largest}_rewards | `number[]` | Rankings for the {smallest\|largest} rewards this epoch | +| {slots\|vals}_{smallest\|largest}_duration | `number[]` | Rankings for the {smallest\|largest} slot durations this epoch | +| {slots\|vals}_{smallest\|largest}_compute_units | `number[]` | Rankings for the {smallest\|largest} compute units this epoch | +| {slots\|vals}_{smallest\|largest}_skipped | `number[]` | Rankings for the {earliest\|latest} skipped slots this epoch | Each metric in this message will have four associated arrays. diff --git a/src/app/firedancer/topology.c b/src/app/firedancer/topology.c index c3df3c6ab8..c891d4b9ff 100644 --- a/src/app/firedancer/topology.c +++ b/src/app/firedancer/topology.c @@ -258,6 +258,9 @@ fd_topo_initialize( config_t * config ) { fd_topob_wksp( topo, "shred_out" ); fd_topob_wksp( topo, "replay_stake" ); + if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */ + fd_topob_wksp( topo, "replay_votes" ); + } fd_topob_wksp( topo, "replay_exec" ); fd_topob_wksp( topo, "replay_out" ); fd_topob_wksp( topo, "tower_out" ); @@ -345,6 +348,9 @@ fd_topo_initialize( config_t * config ) { /**/ fd_topob_link( topo, "dedup_resolv", "dedup_resolv", 65536UL, FD_TPU_PARSED_MTU, 1UL ); FOR(resolv_tile_cnt) fd_topob_link( topo, "resolv_pack", "resolv_pack", 65536UL, FD_TPU_RESOLVED_MTU, 1UL ); /**/ fd_topob_link( topo, "replay_stake", "replay_stake", 128UL, FD_STAKE_OUT_MTU, 1UL ); /* TODO: This should be 2 but requires fixing STEM_BURST */ + if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */ + fd_topob_link( topo, "replay_votes", "replay_votes", 128UL, FD_RUNTIME_MAX_VOTE_ACCOUNTS*sizeof(fd_replay_vote_t), 1UL ); + } /**/ fd_topob_link( topo, "replay_out", "replay_out", 8192UL, sizeof(fd_replay_message_t), 1UL ); /**/ fd_topob_link( topo, "pack_poh", "pack_poh", 128UL, sizeof(fd_done_packing_t), 1UL ); /* pack_bank is shared across all banks, so if one bank stalls due to complex transactions, the buffer neeeds to be large so that @@ -502,6 +508,9 @@ fd_topo_initialize( config_t * config ) { /**/ fd_topob_tile_in ( topo, "replay", 0UL, "metric_in", "genesi_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED ); /**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_out", 0UL ); /**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_stake", 0UL ); + if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */ + fd_topob_tile_out( topo, "replay", 0UL, "replay_votes", 0UL ); + } /**/ fd_topob_tile_out( topo, "replay", 0UL, "executed_txn", 0UL ); /**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_exec", 0UL ); /**/ fd_topob_tile_in ( topo, "replay", 0UL, "metric_in", "tower_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED ); @@ -654,6 +663,8 @@ fd_topo_initialize( config_t * config ) { /**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "gossip_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED ); /**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "tower_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED ); /**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "replay_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED ); + /**/ fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "replay_stake", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED ); + /**/ fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "replay_votes", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED ); if( FD_LIKELY( snapshots_enabled ) ) { fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "snaprd_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED ); diff --git a/src/disco/gui/fd_gui.c b/src/disco/gui/fd_gui.c index 11899f61e4..ca8529b916 100644 --- a/src/disco/gui/fd_gui.c +++ b/src/disco/gui/fd_gui.c @@ -229,6 +229,20 @@ fd_gui_ws_open( fd_gui_t * gui, FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) ); } + /* todo .. temporary workaround to skip the blur until frontend boot + screen lands */ + if( FD_UNLIKELY( gui->summary.is_full_client ) ) { + ulong real_mls = fd_ulong_if( gui->summary.catch_up_repair_sz>0UL, gui->summary.catch_up_repair[ 0 ], 0UL ); + uchar prev_phase = gui->summary.startup_progress.phase; + ulong prev_mls = gui->summary.startup_progress.startup_ledger_max_slot; + gui->summary.startup_progress.phase = FD_GUI_START_PROGRESS_TYPE_RUNNING; + gui->summary.startup_progress.startup_ledger_max_slot = real_mls; + fd_gui_printf_startup_progress( gui ); + FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) ); + gui->summary.startup_progress.phase = prev_phase; + gui->summary.startup_progress.startup_ledger_max_slot = prev_mls; + } + if( FD_LIKELY( gui->block_engine.has_block_engine ) ) { fd_gui_printf_block_engine( gui ); FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) ); @@ -1093,6 +1107,15 @@ fd_gui_slot_duration( fd_gui_t const * gui, fd_gui_slot_t const * cur ) { return (ulong)(cur->completed_time - prev->completed_time); } +/* All rankings are initialized / reset to ULONG_MAX. These sentinels + sort AFTER non-sentinel ranking entries. Equal slots are sorted by + oldest slot AFTER. Otherwise sort by value according to ranking + type. */ +#define SORT_NAME fd_gui_slot_ranking_sort +#define SORT_KEY_T fd_gui_slot_ranking_t +#define SORT_BEFORE(a,b) fd_int_if( (a).slot==ULONG_MAX, 0, fd_int_if( (b).slot==ULONG_MAX, 1, fd_int_if( (a).value==(b).value, (a).slot>(b).slot, fd_int_if( (a).type==FD_GUI_SLOT_RANKING_TYPE_DESC, (a).value>(b).value, (a).value<(b).value ) ) ) ) +#include "../../util/tmpl/fd_sort.c" + static inline void fd_gui_try_insert_ranking( fd_gui_t * gui, fd_gui_slot_rankings_t * rankings, @@ -1353,55 +1376,48 @@ fd_gui_clear_slot( fd_gui_t * gui, return slot; } -static void -fd_gui_handle_leader_schedule( fd_gui_t * gui, - ulong const * msg, - long now ) { - ulong epoch = msg[ 0 ]; - ulong staked_cnt = msg[ 1 ]; - ulong start_slot = msg[ 2 ]; - ulong slot_cnt = msg[ 3 ]; - ulong excluded_stake = msg[ 4 ]; - ulong vote_keyed_lsched = msg[ 5 ]; - - FD_TEST( staked_cnt<=MAX_STAKED_LEADERS ); - FD_TEST( slot_cnt<=MAX_SLOTS_PER_EPOCH ); - - ulong idx = epoch % 2UL; +void +fd_gui_handle_leader_schedule( fd_gui_t * gui, + fd_stake_weight_msg_t const * leader_schedule, + long now ) { + FD_TEST( leader_schedule->staked_cnt<=MAX_STAKED_LEADERS ); + FD_TEST( leader_schedule->slot_cnt<=MAX_SLOTS_PER_EPOCH ); + + ulong idx = leader_schedule->epoch % 2UL; gui->epoch.has_epoch[ idx ] = 1; - gui->epoch.epochs[ idx ].epoch = epoch; - gui->epoch.epochs[ idx ].start_slot = start_slot; - gui->epoch.epochs[ idx ].end_slot = start_slot + slot_cnt - 1; // end_slot is inclusive. - gui->epoch.epochs[ idx ].excluded_stake = excluded_stake; + gui->epoch.epochs[ idx ].epoch = leader_schedule->epoch; + gui->epoch.epochs[ idx ].start_slot = leader_schedule->start_slot; + gui->epoch.epochs[ idx ].end_slot = leader_schedule->start_slot + leader_schedule->slot_cnt - 1; // end_slot is inclusive. + gui->epoch.epochs[ idx ].excluded_stake = leader_schedule->excluded_stake; gui->epoch.epochs[ idx ].my_total_slots = 0UL; gui->epoch.epochs[ idx ].my_skipped_slots = 0UL; memset( gui->epoch.epochs[ idx ].rankings, (int)(UINT_MAX), sizeof(gui->epoch.epochs[ idx ].rankings) ); memset( gui->epoch.epochs[ idx ].my_rankings, (int)(UINT_MAX), sizeof(gui->epoch.epochs[ idx ].my_rankings) ); - gui->epoch.epochs[ idx ].rankings_slot = start_slot; + gui->epoch.epochs[ idx ].rankings_slot = leader_schedule->start_slot; - fd_vote_stake_weight_t const * stake_weights = fd_type_pun_const( msg+6UL ); - memcpy( gui->epoch.epochs[ idx ].stakes, stake_weights, staked_cnt*sizeof(fd_vote_stake_weight_t) ); + fd_vote_stake_weight_t const * stake_weights = leader_schedule->weights; + fd_memcpy( gui->epoch.epochs[ idx ].stakes, stake_weights, leader_schedule->staked_cnt*sizeof(fd_vote_stake_weight_t) ); fd_epoch_leaders_delete( fd_epoch_leaders_leave( gui->epoch.epochs[ idx ].lsched ) ); gui->epoch.epochs[idx].lsched = fd_epoch_leaders_join( fd_epoch_leaders_new( gui->epoch.epochs[ idx ]._lsched, - epoch, + leader_schedule->epoch, gui->epoch.epochs[ idx ].start_slot, - slot_cnt, - staked_cnt, + leader_schedule->slot_cnt, + leader_schedule->staked_cnt, gui->epoch.epochs[ idx ].stakes, - excluded_stake, - vote_keyed_lsched ) ); + leader_schedule->excluded_stake, + leader_schedule->vote_keyed_lsched ) ); - if( FD_UNLIKELY( start_slot==0UL ) ) { + if( FD_UNLIKELY( leader_schedule->start_slot==0UL ) ) { gui->epoch.epochs[ 0 ].start_time = now; } else { gui->epoch.epochs[ idx ].start_time = LONG_MAX; - for( ulong i=0UL; istart_slot-1UL, FD_GUI_SLOTS_CNT ); i++ ) { + fd_gui_slot_t const * slot = fd_gui_get_slot_const( gui, leader_schedule->start_slot-i ); if( FD_UNLIKELY( !slot ) ) break; else if( FD_UNLIKELY( slot->skipped ) ) continue; @@ -1477,6 +1493,11 @@ fd_gui_handle_slot_end( fd_gui_t * gui, fd_gui_tile_stats_snap( gui, slot->waterfall_end, slot->tile_stats_end, now ); } +#define SORT_NAME fd_gui_ephemeral_slot_sort +#define SORT_KEY_T fd_gui_ephemeral_slot_t +#define SORT_BEFORE(a,b) fd_int_if( (a).slot==ULONG_MAX, 0, fd_int_if( (b).slot==ULONG_MAX, 1, fd_int_if( (a).slot==(b).slot, (a).timestamp_arrival_nanos>(b).timestamp_arrival_nanos, (a).slot>(b).slot ) ) ) +#include "../../util/tmpl/fd_sort.c" + static inline void fd_gui_try_insert_ephemeral_slot( fd_gui_ephemeral_slot_t * slots, ulong slots_sz, ulong slot, long now ) { int already_present = 0; @@ -2072,6 +2093,11 @@ fd_gui_handle_reset_slot( fd_gui_t * gui, ulong reset_slot, long now ) { ulong prev_slot_completed = gui->summary.slot_completed; gui->summary.slot_completed = reset_slot; + if( FD_LIKELY( fd_gui_get_slot( gui, gui->summary.slot_completed ) ) ) { + fd_gui_printf_slot( gui, gui->summary.slot_completed ); + fd_http_server_ws_broadcast( gui->http ); + } + fd_gui_printf_completed_slot( gui ); fd_http_server_ws_broadcast( gui->http ); @@ -2181,9 +2207,18 @@ fd_gui_handle_reset_slot( fd_gui_t * gui, ulong reset_slot, long now ) { } } +#define SORT_NAME fd_gui_slot_staged_shred_event_evict_sort +#define SORT_KEY_T fd_gui_slot_staged_shred_event_t +#define SORT_BEFORE(a,b) (__extension__({ (void)(b); (a).slot==ULONG_MAX; })) +#include "../../util/tmpl/fd_sort.c" + +#define SORT_NAME fd_gui_slot_staged_shred_event_slot_sort +#define SORT_KEY_T fd_gui_slot_staged_shred_event_t +#define SORT_BEFORE(a,b) ((a).slot<(b).slot) +#include "../../util/tmpl/fd_sort.c" + static void fd_gui_handle_rooted_slot( fd_gui_t * gui, ulong root_slot ) { - // ulong unstaged_cnt = 0UL; for( ulong i=0UL; ilevel>=FD_GUI_SLOT_LEVEL_ROOTED ) ) break; - /* TODO: commented out due to being too slow */ - // /* archive root shred events */ - // slot->shreds.start_offset = gui->shreds.history_tail; - // for( ulong i=gui->shreds.staged_head; ishreds.staged_tail; i++ ) { - // if( FD_UNLIKELY( gui->shreds.staged[ i ].slot==slot->slot ) ) { - // /* move event to history */ - // gui->shreds.history[ gui->shreds.history_tail ].timestamp = gui->shreds.staged[ i ].timestamp; - // gui->shreds.history[ gui->shreds.history_tail ].shred_idx = gui->shreds.staged[ i ].shred_idx; - // gui->shreds.history[ gui->shreds.history_tail ].event = gui->shreds.staged[ i ].event; - // gui->shreds.history_tail++; - - // gui->shreds.staged[ i ].slot = ULONG_MAX; - // unstaged_cnt++; - // } - - // /* evict older slots staged also */ - // if( FD_UNLIKELY( gui->shreds.staged[ i ].slotslot ) ) { - // gui->shreds.staged[ i ].slot = ULONG_MAX; - // unstaged_cnt++; - // } - // } - // slot->shreds.end_offset = gui->shreds.history_tail; - - // /* change notarization levels and rebroadcast */ - // slot->level = FD_GUI_SLOT_LEVEL_ROOTED; - // fd_gui_printf_slot( gui, parent_slot ); - // fd_http_server_ws_broadcast( gui->http ); + /* change notarization levels and rebroadcast */ + slot->level = FD_GUI_SLOT_LEVEL_ROOTED; + fd_gui_printf_slot( gui, parent_slot ); + fd_http_server_ws_broadcast( gui->http ); + } + + /* archive root shred events. We want to avoid n^2 iteration here + since it can significantly slow things down. Instead, we copy + over all rooted shreds to a scratch space, stable sort by slot, + copy the sorted arrays to the shred history. */ + ulong evicted_cnt = 0UL; /* the total number evicted, including ignored */ + ulong archive_cnt = 0UL; /* the total number evicted, NOT including ignored */ + for( ulong i=gui->shreds.staged_head; ishreds.staged_tail; i++ ) { + /* ignore new shred events that came in after their slot was rooted */ + if( FD_UNLIKELY( gui->shreds.history_slot!=ULONG_MAX && gui->shreds.staged[ i ].slot<=gui->shreds.history_slot ) ) { + gui->shreds.staged[ i ].slot = ULONG_MAX; + evicted_cnt++; + } + + if( FD_UNLIKELY( gui->shreds.staged[ i ].slot<=root_slot ) ) { + /* move to scratch */ + fd_memcpy( gui->shreds._staged_scratch, &gui->shreds.staged[ i ], sizeof(fd_gui_slot_staged_shred_event_t) ); + archive_cnt++; + + /* evict from staged */ + gui->shreds.staged[ i ].slot = ULONG_MAX; + evicted_cnt++; + } } /* The entries from the staging area are evicted by setting their @@ -2229,15 +2265,37 @@ fd_gui_handle_rooted_slot( fd_gui_t * gui, ulong root_slot ) { IMPORTANT: this sort needs to be stable since we always keep valid un-broadcast events at the end of the ring buffer */ - // if( FD_LIKELY( unstaged_cnt ) ) { - // fd_gui_slot_staged_shred_event_sort_insert( &gui->shreds.staged[ gui->shreds.staged_head ], gui->shreds.staged_tail-gui->shreds.staged_head ); - // gui->shreds.staged_head += unstaged_cnt; - // } + if( FD_LIKELY( evicted_cnt ) ) { + fd_gui_slot_staged_shred_event_evict_sort_stable( &gui->shreds.staged[ gui->shreds.staged_head ], gui->shreds.staged_tail-gui->shreds.staged_head, gui->shreds._staged_scratch2 ); + gui->shreds.staged_head += evicted_cnt; + + /* In the rare case that we are archiving any shred events that have + not yet been broadcast, we'll increment + gui->shreds.staged_next_broadcast to keep it in bounds. */ + gui->shreds.staged_next_broadcast = fd_ulong_max( gui->shreds.staged_head, gui->shreds.staged_next_broadcast ); + + /* sort scratch by slot increasing */ + fd_gui_slot_staged_shred_event_slot_sort_stable( gui->shreds._staged_scratch, archive_cnt, gui->shreds._staged_scratch2 ); - // /* In the rare case that we are archiving any shred events that have - // not yet been broadcast, we'll increment - // gui->shreds.staged_next_broadcast to keep it in bounds. */ - // gui->shreds.staged_next_broadcast = fd_ulong_max( gui->shreds.staged_head, gui->shreds.staged_next_broadcast ); + /* copy shred events to archive */ + for( ulong i=0UL; ishreds._staged_scratch[ i ].slot!=gui->shreds.history_slot ) ) { + fd_gui_slot_t * prev_slot = fd_gui_get_slot( gui, gui->shreds.history_slot ); + if( FD_LIKELY( prev_slot ) ) prev_slot->shreds.end_offset = gui->shreds.history_tail; + + gui->shreds.history_slot = gui->shreds._staged_scratch[ i ].slot; + + fd_gui_slot_t * next_slot = fd_gui_get_slot( gui, gui->shreds.history_slot ); + if( FD_LIKELY( next_slot ) ) next_slot->shreds.start_offset = gui->shreds.history_tail; + } + + gui->shreds.history[ gui->shreds.history_tail ].timestamp = gui->shreds._staged_scratch[ i ].timestamp; + gui->shreds.history[ gui->shreds.history_tail ].shred_idx = gui->shreds._staged_scratch[ i ].shred_idx; + gui->shreds.history[ gui->shreds.history_tail ].event = gui->shreds._staged_scratch[ i ].event; + + gui->shreds.history_tail++; + } + } gui->summary.slot_rooted = root_slot; fd_gui_printf_root_slot( gui ); @@ -2365,7 +2423,8 @@ fd_gui_plugin_message( fd_gui_t * gui, break; } case FD_PLUGIN_MSG_LEADER_SCHEDULE: { - fd_gui_handle_leader_schedule( gui, (ulong const *)msg, now ); + FD_STATIC_ASSERT( sizeof(fd_stake_weight_msg_t)==6*sizeof(ulong), "new fields breaks things" ); + fd_gui_handle_leader_schedule( gui, (fd_stake_weight_msg_t *)msg, now ); break; } case FD_PLUGIN_MSG_SLOT_START: { diff --git a/src/disco/gui/fd_gui.h b/src/disco/gui/fd_gui.h index 6790984a56..1d3f44ff67 100644 --- a/src/disco/gui/fd_gui.h +++ b/src/disco/gui/fd_gui.h @@ -348,11 +348,6 @@ struct fd_gui_slot_staged_shred_event { typedef struct fd_gui_slot_staged_shred_event fd_gui_slot_staged_shred_event_t; -#define SORT_NAME fd_gui_slot_staged_shred_event_sort -#define SORT_KEY_T fd_gui_slot_staged_shred_event_t -#define SORT_BEFORE(a,b) (__extension__({ (void)(b); (a).slot==ULONG_MAX; })) -#include "../../util/tmpl/fd_sort.c" - struct __attribute__((packed)) fd_gui_slot_history_shred_event { long timestamp; ushort shred_idx; @@ -368,15 +363,6 @@ struct fd_gui_slot_ranking { }; typedef struct fd_gui_slot_ranking fd_gui_slot_ranking_t; -/* All rankings are initialized / reset to ULONG_MAX. These sentinels - sort AFTER non-sentinel ranking entries. Equal slots are sorted by - oldest slot AFTER. Otherwise sort by value according to ranking - type. */ -#define SORT_NAME fd_gui_slot_ranking_sort -#define SORT_KEY_T fd_gui_slot_ranking_t -#define SORT_BEFORE(a,b) fd_int_if( (a).slot==ULONG_MAX, 0, fd_int_if( (b).slot==ULONG_MAX, 1, fd_int_if( (a).value==(b).value, (a).slot>(b).slot, fd_int_if( (a).type==FD_GUI_SLOT_RANKING_TYPE_DESC, (a).value>(b).value, (a).value<(b).value ) ) ) ) -#include "../../util/tmpl/fd_sort.c" - struct fd_gui_slot_rankings { fd_gui_slot_ranking_t largest_tips [ FD_GUI_SLOT_RANKINGS_SZ+1UL ]; fd_gui_slot_ranking_t largest_fees [ FD_GUI_SLOT_RANKINGS_SZ+1UL ]; @@ -400,11 +386,6 @@ struct fd_gui_ephemeral_slot { }; typedef struct fd_gui_ephemeral_slot fd_gui_ephemeral_slot_t; -#define SORT_NAME fd_gui_ephemeral_slot_sort -#define SORT_KEY_T fd_gui_ephemeral_slot_t -#define SORT_BEFORE(a,b) fd_int_if( (a).slot==ULONG_MAX, 0, fd_int_if( (b).slot==ULONG_MAX, 1, fd_int_if( (a).slot==(b).slot, (a).timestamp_arrival_nanos>(b).timestamp_arrival_nanos, (a).slot>(b).slot ) ) ) -#include "../../util/tmpl/fd_sort.c" - struct __attribute__((packed)) fd_gui_txn { uchar signature[ FD_TXN_SIGNATURE_SZ ]; ulong transaction_fee; @@ -735,6 +716,10 @@ struct fd_gui { ulong history_slot; /* the largest slot store in history */ ulong history_tail; /* history_tail % FD_GUI_SHREDS_STAGING_SZ is the last valid event in history +1 */ fd_gui_slot_history_shred_event_t history[ FD_GUI_SHREDS_HISTORY_SZ ]; + + /* scratch space for stable sorts */ + fd_gui_slot_staged_shred_event_t _staged_scratch [ FD_GUI_SHREDS_STAGING_SZ ]; + fd_gui_slot_staged_shred_event_t _staged_scratch2[ FD_GUI_SHREDS_STAGING_SZ ]; } shreds; /* full client */ }; @@ -840,6 +825,11 @@ void fd_gui_handle_snapshot_update( fd_gui_t * gui, fd_snaprd_update_t const * msg ); +void +fd_gui_handle_leader_schedule( fd_gui_t * gui, + fd_stake_weight_msg_t const * leader_schedule, + long now ); + void fd_gui_handle_tower_update( fd_gui_t * gui, fd_tower_slot_done_t const * msg, diff --git a/src/disco/gui/fd_gui_live_table_tmpl.c b/src/disco/gui/fd_gui_live_table_tmpl.c index 844a7e1060..ba65c23200 100644 --- a/src/disco/gui/fd_gui_live_table_tmpl.c +++ b/src/disco/gui/fd_gui_live_table_tmpl.c @@ -677,7 +677,7 @@ LIVE_TABLE_STATIC FD_FN_PURE LIVE_TABLE_(fwd_iter_t) LIVE_TABLE_(fwd_iter_init)( LIVE_TABLE_(t) * join, LIVE_TABLE_(sort_key_t) const * sort_key, LIVE_TABLE_ROW_T * pool ) { ulong sort_key_idx = LIVE_TABLE_(private_query_sort_key)( join, sort_key ); if( FD_UNLIKELY( sort_key_idx==ULONG_MAX ) ) { - for( ulong i=0UL; itreaps_is_active[ i ] ) ) continue; sort_key_idx = i; LIVE_TABLE_(private_sort_key_create)( join, i, sort_key, pool ); diff --git a/src/disco/gui/fd_gui_peers.c b/src/disco/gui/fd_gui_peers.c index bd7ea87798..542e64fc24 100644 --- a/src/disco/gui/fd_gui_peers.c +++ b/src/disco/gui/fd_gui_peers.c @@ -3,7 +3,6 @@ #include "../../ballet/json/cJSON.h" #include "../../flamenco/gossip/fd_gossip_private.h" -#include "../../flamenco/gossip/fd_gossip_types.h" #define LOGGING 0 @@ -187,7 +186,7 @@ fd_gui_peers_gossip_stats_snap( fd_gui_peers_ctx_t * peers, fd_gui_peers_node_t * cur = fd_gui_peers_bandwidth_tracking_fwd_iter_ele( iter, peers->contact_info_table ); if( FD_UNLIKELY( jnetwork_ingress_peer_sz ) ) { - if( FD_LIKELY( cur->has_node_info ) ) FD_TEST( fd_cstr_printf_check( gossip_stats->network_ingress_peer_names[ j ], sizeof(gossip_stats->network_ingress_peer_names[ j ]), NULL, "%s", cur->name ) ); + if( FD_LIKELY( cur->has_val_info ) ) FD_TEST( fd_cstr_printf_check( gossip_stats->network_ingress_peer_names[ j ], sizeof(gossip_stats->network_ingress_peer_names[ j ]), NULL, "%s", cur->name ) ); else gossip_stats->network_ingress_peer_names[ j ][ 0 ] = '\0'; gossip_stats->network_ingress_peer_bytes_per_sec[ j ] = cur->gossvf_rx_sum.rate; fd_memcpy( &gossip_stats->network_ingress_peer_identities[ j ], cur->contact_info.pubkey.uc, 32UL ); @@ -231,7 +230,7 @@ fd_gui_peers_gossip_stats_snap( fd_gui_peers_ctx_t * peers, fd_gui_peers_node_t * cur = fd_gui_peers_bandwidth_tracking_fwd_iter_ele( iter, peers->contact_info_table ); if( FD_UNLIKELY( jnetwork_egress_peer_sz ) ) { - if( FD_LIKELY( cur->has_node_info ) ) FD_TEST( fd_cstr_printf_check( gossip_stats->network_egress_peer_names[ j ], sizeof(gossip_stats->network_egress_peer_names[ j ]), NULL, "%s", cur->name ) ); + if( FD_LIKELY( cur->has_val_info ) ) FD_TEST( fd_cstr_printf_check( gossip_stats->network_egress_peer_names[ j ], sizeof(gossip_stats->network_egress_peer_names[ j ]), NULL, "%s", cur->name ) ); else gossip_stats->network_egress_peer_names[ j ][ 0 ] = '\0'; gossip_stats->network_egress_peer_bytes_per_sec[ j ] = cur->gossip_tx_sum.rate; fd_memcpy( &gossip_stats->network_egress_peer_identities[ j ], cur->contact_info.pubkey.uc, 32UL ); @@ -452,7 +451,7 @@ fd_gui_peers_handle_gossip_message( fd_gui_peers_ctx_t * peers, #endif } -int +void fd_gui_peers_handle_gossip_update( fd_gui_peers_ctx_t * peers, fd_gossip_update_message_t const * update, long now ) { @@ -500,7 +499,7 @@ fd_gui_peers_handle_gossip_update( fd_gui_peers_ctx_t * peers, /* update does nothing */ if( FD_UNLIKELY( fd_gui_peers_contact_info_eq( &peer->contact_info, update->contact_info.contact_info ) ) ) { peer->contact_info.wallclock_nanos = update->contact_info.contact_info->wallclock_nanos; - return FD_GUI_PEERS_NODE_NOP; + break; } fd_gui_peers_node_sock_map_idx_remove_fast( peers->node_sock_map, update->contact_info.idx, peers->contact_info_table ); @@ -514,7 +513,9 @@ fd_gui_peers_handle_gossip_update( fd_gui_peers_ctx_t * peers, fd_gui_peers_live_table_idx_insert ( peers->live_table, update->contact_info.idx, peers->contact_info_table ); fd_gui_peers_node_sock_map_idx_insert ( peers->node_sock_map, update->contact_info.idx, peers->contact_info_table ); - return FD_GUI_PEERS_NODE_UPDATE; + /* broadcast update to WebSocket clients */ + fd_gui_peers_printf_nodes( peers, (int[]){ FD_GUI_PEERS_NODE_UPDATE }, (ulong[]){ update->contact_info.idx }, 1UL ); + fd_http_server_ws_broadcast( peers->http ); } else { FD_TEST( !fd_gui_peers_node_pubkey_map_ele_query_const( peers->node_pubkey_map, &update->contact_info.contact_info->pubkey, NULL, peers->contact_info_table ) ); #if LOGGING @@ -526,7 +527,8 @@ fd_gui_peers_handle_gossip_update( fd_gui_peers_ctx_t * peers, memset( &peer->gossip_tx, 0, sizeof(peer->gossip_tx) ); memset( &peer->gossvf_rx_sum, 0, sizeof(peer->gossvf_rx_sum) ); memset( &peer->gossip_tx_sum, 0, sizeof(peer->gossip_tx_sum) ); - peer->has_node_info = 0; + peer->has_val_info = 0; + peer->has_vote_info = 0; peer->valid = 1; peer->update_time_nanos = now; fd_memcpy( &peer->contact_info, update->contact_info.contact_info, sizeof(peer->contact_info) ); @@ -540,11 +542,12 @@ fd_gui_peers_handle_gossip_update( fd_gui_peers_ctx_t * peers, fd_gui_peers_bandwidth_tracking_idx_insert( peers->bw_tracking, update->contact_info.idx, peers->contact_info_table ); fd_gui_printf_peers_view_resize( peers, fd_gui_peers_live_table_ele_cnt( peers->live_table ) ); - FD_TEST( !fd_http_server_ws_broadcast( peers->http ) ); + fd_http_server_ws_broadcast( peers->http ); - return FD_GUI_PEERS_NODE_ADD; + /* broadcast update to WebSocket clients */ + fd_gui_peers_printf_nodes( peers, (int[]){ FD_GUI_PEERS_NODE_ADD }, (ulong[]){ update->contact_info.idx }, 1UL ); + fd_http_server_ws_broadcast( peers->http ); } - break; } case FD_GOSSIP_UPDATE_TAG_CONTACT_INFO_REMOVE: { @@ -577,13 +580,123 @@ fd_gui_peers_handle_gossip_update( fd_gui_peers_ctx_t * peers, peer->valid = 0; fd_gui_printf_peers_view_resize( peers, fd_gui_peers_live_table_ele_cnt( peers->live_table ) ); - FD_TEST( !fd_http_server_ws_broadcast( peers->http ) ); + fd_http_server_ws_broadcast( peers->http ); + + /* broadcast update to WebSocket clients */ + fd_gui_peers_printf_nodes( peers, (int[]){ FD_GUI_PEERS_NODE_DELETE }, (ulong[]){ update->contact_info.idx }, 1UL ); + fd_http_server_ws_broadcast( peers->http ); break; } default: break; } +} + +#define SORT_NAME fd_gui_peers_votes_slot_sort +#define SORT_KEY_T fd_replay_vote_t +#define SORT_BEFORE(a,b) ((a).last_vote_slot>(b).last_vote_slot) +#include "../../util/tmpl/fd_sort.c" + +#define SORT_NAME fd_gui_peers_votes_stake_sort +#define SORT_KEY_T fd_replay_vote_t +#define SORT_BEFORE(a,b) ((a).stake>(b).stake) +#include "../../util/tmpl/fd_sort.c" + +#define SORT_NAME fd_gui_peers_votes_pkey_sort +#define SORT_KEY_T fd_replay_vote_t +#define SORT_BEFORE(a,b) ( memcmp((a).node_account.uc, (b).node_account.uc, sizeof(fd_pubkey_t) ) < 0 ) +#include "../../util/tmpl/fd_sort.c" + +void +fd_gui_peers_handle_vote_update( fd_gui_peers_ctx_t * peers, + fd_replay_vote_t const * votes, + ulong vote_cnt, + long now ) { + (void)now; + fd_replay_vote_t * votes_sorted = peers->_scratch[ 0 ]; + fd_replay_vote_t * votes_scratch = peers->_scratch[ 1 ]; + fd_memcpy( votes_sorted, votes, vote_cnt*sizeof(fd_replay_vote_t) ); + + /* deduplicate node accounts, keeping the vote accounts with largest stake */ + fd_gui_peers_votes_stake_sort_inplace( votes_sorted, vote_cnt ); + fd_gui_peers_votes_pkey_sort_stable( votes_sorted, vote_cnt, votes_scratch ); + + ulong total_stake = 0UL; + fd_pubkey_t prev_peer = { 0 }; + for( ulong i=0UL; i2*total_stake ) ) { + last_vote_slot_p67 = votes_sorted[ i ].last_vote_slot; + } + } - return FD_GUI_PEERS_NODE_NOP; + /* resuse scratch to for publish state */ + int * actions = (void *)votes_scratch; + ulong * idxs = (ulong *)((uchar *)votes_scratch + FD_RUNTIME_MAX_VOTE_ACCOUNTS*sizeof(int)); + FD_STATIC_ASSERT( sizeof(peers->_scratch[ 1 ])>=(FD_RUNTIME_MAX_VOTE_ACCOUNTS*(sizeof(int) + sizeof(ulong))), "scratch too small" ); + + ulong count = 0UL; + for( ulong i=0UL; inode_pubkey_map, &votes_sorted[ i ].node_account, ULONG_MAX, peers->contact_info_table ); + if( FD_UNLIKELY( peer_idx==ULONG_MAX ) ) continue; /* peer not on gossip */ + + fd_gui_peers_node_t * peer = peers->contact_info_table + peer_idx; + + /* TODO: we only publish updates when stake changes, otherwise we'd + have to republish for every peer every slot, which ends up being + too much bandwidth because we republish all the peer info. + Ideally, we decouple the vote updates from the reset of the peer + info which would let us make updates quickly. */ + int is_delinquent = ((long)last_vote_slot_p67 - (long)votes_sorted[ i ].last_vote_slot) > 150L; + int vote_eq = peer->has_vote_info + && !memcmp( peer->vote_account.uc, votes_sorted[ i ].vote_account.uc, sizeof(fd_pubkey_t) ) + && peer->stake ==votes_sorted[ i ].stake + // && peer->last_vote_slot ==votes_sorted[ i ].last_vote_slot + // && peer->last_vote_timestamp ==votes_sorted[ i ].last_vote_timestamp + // && peer->epoch_credits ==votes_sorted[ i ].epoch_credits + && peer->commission ==votes_sorted[ i ].commission + && peer->epoch ==votes_sorted[ i ].epoch + && peer->delinquent ==is_delinquent; + + if( FD_LIKELY( vote_eq ) ) continue; /* nop */ + + /* todo .. when we include any of these stats in the peer metrics + table, we need to properly handle table updates */ + peer->has_vote_info = 1; + peer->vote_account = votes_sorted[ i ].vote_account; + peer->stake = votes_sorted[ i ].stake; + peer->last_vote_slot = votes_sorted[ i ].last_vote_slot; + peer->last_vote_timestamp = votes_sorted[ i ].last_vote_timestamp; + peer->epoch_credits = votes_sorted[ i ].epoch_credits; + peer->commission = votes_sorted[ i ].commission; + peer->epoch = votes_sorted[ i ].epoch; + peer->delinquent = is_delinquent; + + actions[ count ] = FD_GUI_PEERS_NODE_UPDATE; + idxs [ count ] = peer_idx; + count++; + } + + if( FD_UNLIKELY( count ) ) { + fd_gui_peers_printf_nodes( peers, actions, idxs, count ); + fd_http_server_ws_broadcast( peers->http ); + } } static void @@ -602,17 +715,18 @@ fd_gui_peers_viewport_snap( fd_gui_peers_ctx_t * peers, ulong ws_conn_id ) { } } FD_TEST( oldest_ws_conn_id!=ULONG_MAX ); - fd_gui_peers_live_table_sort_key_remove( peers->live_table, peers->client_viewports[ oldest_ws_conn_id ].sort_key ); + fd_gui_peers_live_table_sort_key_remove( peers->live_table, &peers->client_viewports[ oldest_ws_conn_id ].sort_key ); FD_TEST( fd_gui_peers_live_table_active_sort_key_cnt( peers->live_table )==FD_GUI_PEERS_CI_TABLE_SORT_KEY_CNT-1UL ); } - for( fd_gui_peers_live_table_fwd_iter_t iter = fd_gui_peers_live_table_fwd_iter_init( peers->live_table, peers->client_viewports[ ws_conn_id ].sort_key, peers->contact_info_table ), j = 0; + for( fd_gui_peers_live_table_fwd_iter_t iter = fd_gui_peers_live_table_fwd_iter_init( peers->live_table, &peers->client_viewports[ ws_conn_id ].sort_key, peers->contact_info_table ), j = 0; !fd_gui_peers_live_table_fwd_iter_done( iter ) && jclient_viewports[ ws_conn_id ].start_row+peers->client_viewports[ ws_conn_id ].row_cnt; iter = fd_gui_peers_live_table_fwd_iter_next( iter, peers->contact_info_table ), j++ ) { if( FD_LIKELY( jclient_viewports[ ws_conn_id ].start_row ) ) continue; fd_gui_peers_node_t const * cur = fd_gui_peers_live_table_fwd_iter_ele_const( iter, peers->contact_info_table ); - fd_gui_peers_node_t * ref = &peers->client_viewports[ ws_conn_id ].viewport[ j ]; + fd_gui_peers_node_t * ref = &peers->client_viewports[ ws_conn_id ].viewport[ j-peers->client_viewports[ ws_conn_id ].start_row ]; + FD_TEST( jvalueulong; - if( FD_UNLIKELY( _row_cnt==0 || _row_cnt > FD_GUI_PEERS_WS_VIEWPORT_MAX_SZ ) ) { + if( FD_UNLIKELY( _row_cnt==0 || _row_cnt > FD_GUI_PEERS_WS_VIEWPORT_MAX_SZ || _start_row > fd_gui_peers_live_table_ele_cnt( peers->live_table )-_row_cnt ) ) { fd_gui_printf_null_query_response( peers->http, "gossip", "query_scroll", request_id ); FD_TEST( !fd_http_server_ws_send( peers->http, ws_conn_id ) ); return 0; @@ -666,7 +780,7 @@ fd_gui_peers_request_sort( fd_gui_peers_ctx_t * peers, if( FD_UNLIKELY( _col_idx==ULONG_MAX) ) return FD_HTTP_SERVER_CONNECTION_CLOSE_BAD_REQUEST; ulong sort_idx = ULONG_MAX; for( ulong i=0UL; iclient_viewports[ ws_conn_id ].sort_key->col[ i ] ) ) { + if( FD_UNLIKELY( _col_idx==peers->client_viewports[ ws_conn_id ].sort_key.col[ i ] ) ) { sort_idx = i; break; } @@ -679,15 +793,15 @@ fd_gui_peers_request_sort( fd_gui_peers_ctx_t * peers, if( FD_UNLIKELY( _dir > 1 || _dir < -1 ) ) return FD_HTTP_SERVER_CONNECTION_CLOSE_BAD_REQUEST; - if( FD_UNLIKELY( peers->client_viewports[ ws_conn_id ].sort_key->dir[ sort_idx ]==_dir ) ) return 0; /* NOP, sort_key hasn't changed */ + if( FD_UNLIKELY( peers->client_viewports[ ws_conn_id ].sort_key.dir[ sort_idx ]==_dir ) ) return 0; /* NOP, sort_key hasn't changed */ /* shift the column to the front of the sort key */ for( ulong i=sort_idx; i>0; i-- ) { - peers->client_viewports[ ws_conn_id ].sort_key->col[ i ] = peers->client_viewports[ ws_conn_id ].sort_key->col[ i-1UL ]; - peers->client_viewports[ ws_conn_id ].sort_key->dir[ i ] = peers->client_viewports[ ws_conn_id ].sort_key->dir[ i-1UL ]; + peers->client_viewports[ ws_conn_id ].sort_key.col[ i ] = peers->client_viewports[ ws_conn_id ].sort_key.col[ i-1UL ]; + peers->client_viewports[ ws_conn_id ].sort_key.dir[ i ] = peers->client_viewports[ ws_conn_id ].sort_key.dir[ i-1UL ]; } - peers->client_viewports[ ws_conn_id ].sort_key->col[ 0 ] = _col_idx; - peers->client_viewports[ ws_conn_id ].sort_key->dir[ 0 ] = _dir; + peers->client_viewports[ ws_conn_id ].sort_key.col[ 0 ] = _col_idx; + peers->client_viewports[ ws_conn_id ].sort_key.dir[ 0 ] = _dir; if( FD_UNLIKELY( peers->client_viewports[ ws_conn_id ].row_cnt==0 )) return 0; /* NOP */ @@ -771,7 +885,7 @@ fd_gui_peers_viewport_log( fd_gui_peers_ctx_t * peers, fd_gui_peers_live_table_ele_cnt( peers->live_table ), peers->client_viewports[ ws_conn_id ].row_cnt ); FD_TEST( peers->client_viewports[ ws_conn_id ].connected ); - for( fd_gui_peers_live_table_fwd_iter_t iter = fd_gui_peers_live_table_fwd_iter_init( peers->live_table, peers->client_viewports[ ws_conn_id ].sort_key, peers->contact_info_table ), j = 0UL; + for( fd_gui_peers_live_table_fwd_iter_t iter = fd_gui_peers_live_table_fwd_iter_init( peers->live_table, &peers->client_viewports[ ws_conn_id ].sort_key, peers->contact_info_table ), j = 0UL; !fd_gui_peers_live_table_fwd_iter_done(iter) && j < peers->client_viewports[ ws_conn_id ].start_row + peers->client_viewports[ ws_conn_id ].row_cnt; iter = fd_gui_peers_live_table_fwd_iter_next(iter, peers->contact_info_table), j++ ) { if( FD_LIKELY( j < peers->client_viewports[ ws_conn_id ].start_row ) ) continue; @@ -816,7 +930,7 @@ fd_gui_peers_ws_conn_rr_shrink( fd_gui_peers_ctx_t * peers, ulong ws_conn_id ) { peers->open_ws_conn_cnt--; if( FD_UNLIKELY( peers->open_ws_conn_cnt && peers->active_ws_conn_id==ws_conn_id ) ) { - for( ulong i=0UL; imax_ws_conn_cnt; i++ ) { + for( ulong i=1UL; imax_ws_conn_cnt+1UL; i++ ) { ulong next_ws_conn_id = (ws_conn_id + i) % peers->max_ws_conn_cnt; if( FD_UNLIKELY( peers->client_viewports[ next_ws_conn_id ].connected ) ) { peers->active_ws_conn_id = next_ws_conn_id; @@ -830,7 +944,7 @@ static int fd_gui_peers_ws_conn_rr_advance( fd_gui_peers_ctx_t * peers, long now ) { if( FD_LIKELY( !peers->open_ws_conn_cnt || now <= peers->next_client_nanos ) ) return 0; - for( ulong i=1UL; imax_ws_conn_cnt; i++ ) { + for( ulong i=1UL; imax_ws_conn_cnt+1UL; i++ ) { ulong next_ws_conn_id = (peers->active_ws_conn_id + i) % peers->max_ws_conn_cnt; if( FD_UNLIKELY( peers->client_viewports[ next_ws_conn_id ].connected ) ) { peers->active_ws_conn_id = next_ws_conn_id; @@ -852,8 +966,10 @@ fd_gui_peers_poll( fd_gui_peers_ctx_t * peers, long now ) { fd_gui_printf_peers_viewport_update( peers, peers->active_ws_conn_id ); FD_TEST( !fd_http_server_ws_send( peers->http, peers->active_ws_conn_id ) ); +#if LOGGING /* log the diff */ fd_gui_peers_viewport_log( peers, peers->active_ws_conn_id ); +#endif (void)fd_gui_peers_viewport_log; /* update client state to the latest viewport */ @@ -938,17 +1054,20 @@ fd_gui_peers_poll( fd_gui_peers_ctx_t * peers, long now ) { void fd_gui_peers_ws_open( fd_gui_peers_ctx_t * peers, ulong ws_conn_id, long now ) { - fd_gui_peers_ws_conn_rr_grow( peers, ws_conn_id ); peers->client_viewports[ ws_conn_id ].connected = 1; peers->client_viewports[ ws_conn_id ].connected_time = now; peers->client_viewports[ ws_conn_id ].start_row = 0; peers->client_viewports[ ws_conn_id ].row_cnt = 0; - fd_memcpy( peers->client_viewports[ ws_conn_id ].sort_key, &FD_GUI_PEERS_LIVE_TABLE_DEFAULT_SORT_KEY, sizeof(fd_gui_peers_live_table_sort_key_t) ); + peers->client_viewports[ ws_conn_id ].sort_key = FD_GUI_PEERS_LIVE_TABLE_DEFAULT_SORT_KEY; + fd_gui_peers_ws_conn_rr_grow( peers, ws_conn_id ); + + fd_gui_peers_printf_node_all( peers ); + FD_TEST( !fd_http_server_ws_send( peers->http, ws_conn_id ) ); } void fd_gui_peers_ws_close( fd_gui_peers_ctx_t * peers, ulong ws_conn_id ) { - fd_gui_peers_live_table_sort_key_remove( peers->live_table, peers->client_viewports[ ws_conn_id ].sort_key ); + fd_gui_peers_live_table_sort_key_remove( peers->live_table, &peers->client_viewports[ ws_conn_id ].sort_key ); peers->client_viewports[ ws_conn_id ].connected = 0; fd_gui_peers_ws_conn_rr_shrink( peers, ws_conn_id ); } diff --git a/src/disco/gui/fd_gui_peers.h b/src/disco/gui/fd_gui_peers.h index f4a3739eca..2135db2508 100644 --- a/src/disco/gui/fd_gui_peers.h +++ b/src/disco/gui/fd_gui_peers.h @@ -17,6 +17,9 @@ #include "../../util/net/fd_net_headers.h" #include "../../disco/metrics/fd_metrics.h" #include "../../flamenco/gossip/fd_gossip_types.h" +#include "../../discof/replay/fd_replay_tile.h" +#include "../../flamenco/runtime/fd_runtime_const.h" + #include "../../waltz/http/fd_http_server.h" #include "../topo/fd_topo.h" @@ -57,12 +60,22 @@ struct fd_gui_peers_node { fd_gui_peers_metric_rate_t gossvf_rx_sum; /* sum of gossvf_rx */ fd_gui_peers_metric_rate_t gossip_tx_sum; /* sum of gossip_tx */ - int has_node_info; + int has_val_info; char name [ FD_GUI_PEERS_VALIDATOR_INFO_NAME_SZ ]; char website [ FD_GUI_PEERS_VALIDATOR_INFO_WEBSITE_SZ ]; char details [ FD_GUI_PEERS_VALIDATOR_INFO_DETAILS_SZ ]; char icon_uri[ FD_GUI_PEERS_VALIDATOR_INFO_ICON_URI_SZ ]; + int has_vote_info; + fd_pubkey_t vote_account; + ulong stake; + ulong last_vote_slot; + long last_vote_timestamp; + uchar commission; + ulong epoch; + ulong epoch_credits; + int delinquent; + struct { ulong next; ulong prev; @@ -220,7 +233,7 @@ struct fd_gui_peers_ws_conn { ulong start_row; ulong row_cnt; fd_gui_peers_node_t viewport[ FD_GUI_PEERS_WS_VIEWPORT_MAX_SZ ]; - fd_gui_peers_live_table_sort_key_t sort_key[ 1 ]; + fd_gui_peers_live_table_sort_key_t sort_key; }; typedef struct fd_gui_peers_ws_conn fd_gui_peers_ws_conn_t; struct fd_gui_peers_ctx { @@ -243,6 +256,8 @@ struct fd_gui_peers_ctx { fd_gui_peers_gossip_stats_t gossip_stats [ 1 ]; fd_gui_peers_node_t contact_info_table[ FD_CONTACT_INFO_TABLE_SIZE ]; + + fd_replay_vote_t _scratch[ 2 ][ FD_RUNTIME_MAX_VOTE_ACCOUNTS ]; /* for fast stable sort */ }; typedef struct fd_gui_peers_ctx fd_gui_peers_ctx_t; @@ -286,11 +301,17 @@ fd_gui_peers_handle_gossip_message( fd_gui_peers_ctx_t * peers, /* fd_gui_peers_handle_gossip_message_tx parses frags on the gossip_out link and uses the contact info update to build up the peer table. */ -int +void fd_gui_peers_handle_gossip_update( fd_gui_peers_ctx_t * peers, fd_gossip_update_message_t const * update, long now ); +void +fd_gui_peers_handle_vote_update( fd_gui_peers_ctx_t * peers, + fd_replay_vote_t const * votes, + ulong vote_cnt, + long now ); + /* fd_gui_peers_ws_message handles incoming websocket request payloads requesting peer-related responses. ws_conn_id is the connection id of the requester. data is a pointer to the start of the diff --git a/src/disco/gui/fd_gui_printf.c b/src/disco/gui/fd_gui_printf.c index da60845856..98344035b1 100644 --- a/src/disco/gui/fd_gui_printf.c +++ b/src/disco/gui/fd_gui_printf.c @@ -913,6 +913,137 @@ fd_gui_printf_peer( fd_gui_t * gui, jsonp_close_object( gui->http ); } +static void +peers_printf_node( fd_gui_peers_ctx_t * peers, + ulong contact_info_table_idx ) { + fd_gui_peers_node_t * peer = &peers->contact_info_table[ contact_info_table_idx ]; + + jsonp_open_object( peers->http, NULL ); + + char identity_base58[ FD_BASE58_ENCODED_32_SZ ]; + fd_base58_encode_32( peer->contact_info.pubkey.uc, NULL, identity_base58 ); + jsonp_string( peers->http, "identity_pubkey", identity_base58 ); + + jsonp_open_object( peers->http, "gossip" ); + + char version[ 32 ]; + FD_TEST( fd_cstr_printf( version, sizeof( version ), NULL, "%u.%u.%u", peer->contact_info.version.major, peer->contact_info.version.minor, peer->contact_info.version.patch ) ); + jsonp_string( peers->http, "version", version ); + jsonp_ulong( peers->http, "feature_set", peer->contact_info.version.feature_set ); + jsonp_long( peers->http, "wallclock", peer->contact_info.wallclock_nanos ); + jsonp_ulong( peers->http, "shred_version", peer->contact_info.shred_version ); + jsonp_open_object( peers->http, "sockets" ); + for( ulong j=0UL; jcontact_info.sockets[ FD_CONTACT_INFO_SOCKET_GOSSIP ].addr && !peer->contact_info.sockets[ FD_CONTACT_INFO_SOCKET_GOSSIP ].port ) ) continue; + char const * tag; + switch( j ) { + case FD_CONTACT_INFO_SOCKET_GOSSIP: tag = "gossip"; break; + case FD_CONTACT_INFO_SOCKET_SERVE_REPAIR_QUIC: tag = "serve_repair_quic"; break; + case FD_CONTACT_INFO_SOCKET_RPC: tag = "rpc"; break; + case FD_CONTACT_INFO_SOCKET_RPC_PUBSUB: tag = "rpc_pubsub"; break; + case FD_CONTACT_INFO_SOCKET_SERVE_REPAIR: tag = "serve_repair"; break; + case FD_CONTACT_INFO_SOCKET_TPU: tag = "tpu"; break; + case FD_CONTACT_INFO_SOCKET_TPU_FORWARDS: tag = "tpu_forwards"; break; + case FD_CONTACT_INFO_SOCKET_TPU_FORWARDS_QUIC: tag = "tpu_forwards_quic"; break; + case FD_CONTACT_INFO_SOCKET_TPU_QUIC: tag = "tpu_quic"; break; + case FD_CONTACT_INFO_SOCKET_TPU_VOTE: tag = "tpu_vote"; break; + case FD_CONTACT_INFO_SOCKET_TVU: tag = "tvu"; break; + case FD_CONTACT_INFO_SOCKET_TVU_QUIC: tag = "tvu_quic"; break; + case FD_CONTACT_INFO_SOCKET_TPU_VOTE_QUIC: tag = "tpu_vote_quic"; break; + case FD_CONTACT_INFO_SOCKET_ALPENGLOW: tag = "alpenglow"; break; + } + char line[ 64 ]; + FD_TEST( fd_cstr_printf( line, sizeof( line ), NULL, FD_IP4_ADDR_FMT ":%hu", FD_IP4_ADDR_FMT_ARGS( peer->contact_info.sockets[ FD_CONTACT_INFO_SOCKET_GOSSIP ].addr ), fd_ushort_bswap( peer->contact_info.sockets[ FD_CONTACT_INFO_SOCKET_GOSSIP ].port ) ) ); + jsonp_string( peers->http, tag, line ); + } + jsonp_close_object( peers->http ); + + jsonp_close_object( peers->http ); + + if( FD_LIKELY( !peer->has_vote_info ) ) { + jsonp_open_array( peers->http, "vote" ); + jsonp_close_array( peers->http ); + } else { + jsonp_open_array( peers->http, "vote" ); + jsonp_open_object( peers->http, NULL ); + char vote_account_base58[ FD_BASE58_ENCODED_32_SZ ]; + fd_base58_encode_32( peer->vote_account.uc, NULL, vote_account_base58 ); + jsonp_string( peers->http, "vote_account", vote_account_base58 ); + jsonp_ulong_as_str( peers->http, "activated_stake", peer->stake ); + jsonp_ulong( peers->http, "last_vote", peer->last_vote_slot ); + jsonp_ulong( peers->http, "epoch_credits", peer->epoch_credits ); + jsonp_ulong( peers->http, "commission", peer->commission ); + jsonp_ulong( peers->http, "root_slot", 0UL ); + jsonp_bool( peers->http, "delinquent", peer->delinquent ); + jsonp_close_object( peers->http ); + jsonp_close_array( peers->http ); + } + + if( FD_UNLIKELY( !peer->has_val_info ) ) { + jsonp_string( peers->http, "info", NULL ); + } else { + jsonp_open_object( peers->http, "info" ); + jsonp_string( peers->http, "name", peer->name ); + jsonp_string( peers->http, "details", peer->details ); + jsonp_string( peers->http, "website", peer->website ); + jsonp_string( peers->http, "icon_url", peer->icon_uri ); + jsonp_close_object( peers->http ); + } + + jsonp_close_object( peers->http ); +} + +void +fd_gui_peers_printf_nodes( fd_gui_peers_ctx_t * peers, + int * actions, + ulong * idxs, + ulong count ) { + jsonp_open_envelope( peers->http, "peers", "update" ); + jsonp_open_object( peers->http, "value" ); + jsonp_open_array( peers->http, "add" ); + for( ulong i=0UL; ihttp ); + + jsonp_open_array( peers->http, "update" ); + for( ulong i=0UL; ihttp ); + + jsonp_open_array( peers->http, "remove" ); + for( ulong i=0UL; ihttp, NULL ); + char identity_base58[ FD_BASE58_ENCODED_32_SZ ]; + fd_base58_encode_32( peers->contact_info_table[ idxs[ i ] ].contact_info.pubkey.uc, NULL, identity_base58 ); + jsonp_string( peers->http, "identity_pubkey", identity_base58 ); + jsonp_close_object( peers->http ); + } + } + jsonp_close_array( peers->http ); + jsonp_close_object( peers->http ); + jsonp_close_envelope( peers->http ); +} + +void +fd_gui_peers_printf_node_all( fd_gui_peers_ctx_t * peers ) { + jsonp_open_envelope( peers->http, "peers", "update" ); + jsonp_open_object( peers->http, "value" ); + jsonp_open_array( peers->http, "add" ); + /* We can iter through the bandwidth tracking table since it will always be populated */ + for( fd_gui_peers_bandwidth_tracking_fwd_iter_t iter = fd_gui_peers_bandwidth_tracking_fwd_iter_init( peers->bw_tracking, &FD_GUI_PEERS_BW_TRACKING_INGRESS_SORT_KEY, peers->contact_info_table ), j = 0UL; + !fd_gui_peers_bandwidth_tracking_fwd_iter_done( iter ); + iter = fd_gui_peers_bandwidth_tracking_fwd_iter_next( iter, peers->contact_info_table ), j++ ) { + ulong contact_info_table_idx = fd_gui_peers_bandwidth_tracking_fwd_iter_idx( iter ); + peers_printf_node( peers, contact_info_table_idx ); + } + jsonp_close_array( peers->http ); + jsonp_open_array( peers->http, "update" ); + jsonp_close_array( peers->http ); + jsonp_open_array( peers->http, "remove" ); + jsonp_close_array( peers->http ); + jsonp_close_object( peers->http ); + jsonp_close_envelope( peers->http ); +} + void fd_gui_printf_peers_gossip_update( fd_gui_t * gui, ulong const * updated, @@ -1644,20 +1775,20 @@ fd_gui_printf_peers_viewport_update( fd_gui_peers_ctx_t * peers, /* loop over latest viewport */ FD_TEST( peers->client_viewports[ ws_conn_id ].connected ); - if( !(peers->client_viewports[ ws_conn_id ].row_cnt && peers->client_viewports[ ws_conn_id ].row_cntclient_viewports[ ws_conn_id ].row_cnt )); + if( !(peers->client_viewports[ ws_conn_id ].row_cnt && peers->client_viewports[ ws_conn_id ].row_cntactive_ws_conn_id=%lu", peers->client_viewports[ ws_conn_id ].row_cnt, ws_conn_id, peers->active_ws_conn_id )); - for( fd_gui_peers_live_table_fwd_iter_t iter = fd_gui_peers_live_table_fwd_iter_init( peers->live_table, peers->client_viewports[ ws_conn_id ].sort_key, peers->contact_info_table ), j = 0; + for( fd_gui_peers_live_table_fwd_iter_t iter = fd_gui_peers_live_table_fwd_iter_init( peers->live_table, &peers->client_viewports[ ws_conn_id ].sort_key, peers->contact_info_table ), j = 0; !fd_gui_peers_live_table_fwd_iter_done( iter ) && jclient_viewports[ ws_conn_id ].start_row+peers->client_viewports[ ws_conn_id ].row_cnt; iter = fd_gui_peers_live_table_fwd_iter_next( iter, peers->contact_info_table ), j++ ) { if( FD_LIKELY( jclient_viewports[ ws_conn_id ].start_row ) ) continue; fd_gui_peers_node_t const * cur = fd_gui_peers_live_table_fwd_iter_ele_const( iter, peers->contact_info_table ); - fd_gui_peers_node_t * ref = &peers->client_viewports[ ws_conn_id ].viewport[ j ]; + fd_gui_peers_node_t * ref = &peers->client_viewports[ ws_conn_id ].viewport[ j-peers->client_viewports[ ws_conn_id ].start_row ]; /* This code should be kept in sync with updates to fd_gui_peers_live_table */ if( FD_UNLIKELY( memcmp( cur->contact_info.pubkey.uc, ref->contact_info.pubkey.uc, 32UL ) ) ) { jsonp_open_object( peers->http, NULL ); - jsonp_ulong ( peers->http, "row_index", peers->client_viewports[ ws_conn_id ].start_row + j ); + jsonp_ulong ( peers->http, "row_index", j ); jsonp_string( peers->http, "column_name", "Pubkey" ); char pubkey_base58[ FD_BASE58_ENCODED_32_SZ ]; @@ -1668,7 +1799,7 @@ fd_gui_printf_peers_viewport_update( fd_gui_peers_ctx_t * peers, if( FD_UNLIKELY( cur->contact_info.sockets[ FD_CONTACT_INFO_SOCKET_GOSSIP ].addr!=ref->contact_info.sockets[ FD_CONTACT_INFO_SOCKET_GOSSIP ].addr ) ) { jsonp_open_object( peers->http, NULL ); - jsonp_ulong ( peers->http, "row_index", peers->client_viewports[ ws_conn_id ].start_row + j ); + jsonp_ulong ( peers->http, "row_index", j ); jsonp_string( peers->http, "column_name", "IP Addr" ); char peer_addr[ 16 ]; /* 255.255.255.255 + '\0' */ @@ -1688,7 +1819,7 @@ fd_gui_printf_peers_viewport_update( fd_gui_peers_ctx_t * peers, if( FD_UNLIKELY( ref->valid && cur_ingress_pull_response_kbps!=ref_ingress_pull_response_kbps ) ) { jsonp_open_object( peers->http, NULL ); - jsonp_ulong ( peers->http, "row_index", peers->client_viewports[ ws_conn_id ].start_row + j ); + jsonp_ulong ( peers->http, "row_index", j ); jsonp_string( peers->http, "column_name", "Ingress Pull" ); jsonp_long ( peers->http, "new_value", cur_ingress_pull_response_kbps ); jsonp_close_object( peers->http ); @@ -1696,7 +1827,7 @@ fd_gui_printf_peers_viewport_update( fd_gui_peers_ctx_t * peers, if( FD_UNLIKELY( ref->valid && cur_ingress_push_kbps!=ref_ingress_push_kbps ) ) { jsonp_open_object( peers->http, NULL ); - jsonp_ulong ( peers->http, "row_index", peers->client_viewports[ ws_conn_id ].start_row + j ); + jsonp_ulong ( peers->http, "row_index", j ); jsonp_string( peers->http, "column_name", "Ingress Push" ); jsonp_long ( peers->http, "new_value", cur_ingress_push_kbps ); jsonp_close_object( peers->http ); @@ -1704,7 +1835,7 @@ fd_gui_printf_peers_viewport_update( fd_gui_peers_ctx_t * peers, if( FD_UNLIKELY( ref->valid && cur_egress_pull_response_kbps!=ref_egress_pull_response_kbps ) ) { jsonp_open_object( peers->http, NULL ); - jsonp_ulong ( peers->http, "row_index", peers->client_viewports[ ws_conn_id ].start_row + j ); + jsonp_ulong ( peers->http, "row_index", j ); jsonp_string( peers->http, "column_name", "Egress Pull" ); jsonp_long ( peers->http, "new_value", cur_egress_pull_response_kbps ); jsonp_close_object( peers->http ); @@ -1712,7 +1843,7 @@ fd_gui_printf_peers_viewport_update( fd_gui_peers_ctx_t * peers, if( FD_UNLIKELY( ref->valid && cur_egress_push_kbps!=ref_egress_push_kbps ) ) { jsonp_open_object( peers->http, NULL ); - jsonp_ulong ( peers->http, "row_index", peers->client_viewports[ ws_conn_id ].start_row + j ); + jsonp_ulong ( peers->http, "row_index", j ); jsonp_string( peers->http, "column_name", "Egress Push" ); jsonp_long ( peers->http, "new_value", cur_egress_push_kbps ); jsonp_close_object( peers->http ); @@ -1734,15 +1865,15 @@ fd_gui_printf_peers_viewport_request( fd_gui_peers_ctx_t * peers, jsonp_open_object( peers->http, "value" ); FD_TEST( peers->client_viewports[ ws_conn_id ].connected ); - if( !(peers->client_viewports[ ws_conn_id ].row_cnt && peers->client_viewports[ ws_conn_id ].row_cntclient_viewports[ ws_conn_id ].row_cnt )); - for( fd_gui_peers_live_table_fwd_iter_t iter = fd_gui_peers_live_table_fwd_iter_init( peers->live_table, peers->client_viewports[ ws_conn_id ].sort_key, peers->contact_info_table ), j = 0; + if( !(peers->client_viewports[ ws_conn_id ].row_cnt && peers->client_viewports[ ws_conn_id ].row_cntactive_ws_conn_id=%lu", peers->client_viewports[ ws_conn_id ].row_cnt, ws_conn_id, peers->active_ws_conn_id )); + for( fd_gui_peers_live_table_fwd_iter_t iter = fd_gui_peers_live_table_fwd_iter_init( peers->live_table, &peers->client_viewports[ ws_conn_id ].sort_key, peers->contact_info_table ), j = 0; !fd_gui_peers_live_table_fwd_iter_done( iter ) && jclient_viewports[ ws_conn_id ].start_row+peers->client_viewports[ ws_conn_id ].row_cnt; iter = fd_gui_peers_live_table_fwd_iter_next( iter, peers->contact_info_table ), j++ ) { if( FD_LIKELY( jclient_viewports[ ws_conn_id ].start_row ) ) continue; fd_gui_peers_node_t const * cur = fd_gui_peers_live_table_fwd_iter_ele_const( iter, peers->contact_info_table ); char row_index_cstr[ 32 ]; - FD_TEST( fd_cstr_printf_check( row_index_cstr, sizeof(row_index_cstr), NULL, "%lu", peers->client_viewports[ ws_conn_id ].start_row + j ) ); + FD_TEST( fd_cstr_printf_check( row_index_cstr, sizeof(row_index_cstr), NULL, "%lu", + j ) ); jsonp_open_object( peers->http, row_index_cstr ); /* This code should be kept in sync with updates to fd_gui_peers_live_table */ diff --git a/src/disco/gui/fd_gui_printf.h b/src/disco/gui/fd_gui_printf.h index 45a0a15b10..051e2f28f6 100644 --- a/src/disco/gui/fd_gui_printf.h +++ b/src/disco/gui/fd_gui_printf.h @@ -51,6 +51,15 @@ void fd_gui_printf_epoch( fd_gui_t * gui, ulong epoch_idx ); +void +fd_gui_peers_printf_nodes( fd_gui_peers_ctx_t * peers, + int * actions, + ulong * idxs, + ulong count ); + +void +fd_gui_peers_printf_node_all( fd_gui_peers_ctx_t * peers ); + void fd_gui_printf_peers_gossip_update( fd_gui_t * gui, ulong const * updated, diff --git a/src/disco/gui/fd_gui_tile.c b/src/disco/gui/fd_gui_tile.c index 52963b4583..9b34c3b4a3 100644 --- a/src/disco/gui/fd_gui_tile.c +++ b/src/disco/gui/fd_gui_tile.c @@ -54,6 +54,8 @@ static fd_http_static_file_t * STATIC_FILES; #define IN_KIND_REPAIR_NET (10UL) /* firedancer only */ #define IN_KIND_TOWER_OUT (11UL) /* firedancer only */ #define IN_KIND_REPLAY_OUT (12UL) /* firedancer only */ +#define IN_KIND_REPLAY_STAKE (13UL) /* firedancer only */ +#define IN_KIND_REPLAY_VOTES (14UL) /* firedancer only */ FD_IMPORT_BINARY( firedancer_svg, "book/public/fire.svg" ); @@ -227,11 +229,25 @@ during_frag( fd_gui_ctx_t * ctx, } } + if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPLAY_STAKE ) ) { + fd_stake_weight_msg_t * leader_schedule = (fd_stake_weight_msg_t *)src; + FD_TEST( sz==(ushort)(sizeof(fd_stake_weight_msg_t)+(leader_schedule->staked_cnt*sizeof(fd_vote_stake_weight_t))) ); + sz = fd_stake_weight_msg_sz( leader_schedule->staked_cnt ); + } + + if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPLAY_OUT ) ) { + if( FD_LIKELY( sig!=REPLAY_SIG_SLOT_COMPLETED && sig!=REPLAY_SIG_BECAME_LEADER ) ) return; + } + if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_SHRED_OUT ) ) { /* There are multiple frags types sent on this link, the currently the only way to distinguish them is to check sz. We dont actually read from the dcache. */ return; + } else if( FD_UNLIKELY( ctx->in_kind[ in_idx ]==IN_KIND_REPLAY_VOTES ) ) { + FD_TEST( sig<=FD_RUNTIME_MAX_VOTE_ACCOUNTS ); + FD_TEST( sz==(ushort)(sig*sizeof(fd_replay_vote_t)) ); + sz = sig*sizeof(fd_replay_vote_t); } if( FD_UNLIKELY( chunkin[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>ctx->in[ in_idx ].mtu ) ) @@ -272,6 +288,24 @@ after_frag( fd_gui_ctx_t * ctx, } break; } + case IN_KIND_REPLAY_STAKE: { + FD_TEST( ctx->is_full_client ); + + fd_stake_weight_msg_t * leader_schedule = (fd_stake_weight_msg_t *)ctx->buf; + for( ulong i=0UL; istaked_cnt; i++ ) { + if( !leader_schedule->weights[ i ].stake ) { + FD_LOG_ERR(("i=%lu sz=%lu", i, sz)); + } + } + fd_gui_handle_leader_schedule( ctx->gui, leader_schedule, fd_clock_now( ctx->clock ) ); + break; + } + case IN_KIND_REPLAY_VOTES: { + FD_TEST( ctx->is_full_client ); + fd_replay_vote_t * votes = (fd_replay_vote_t *)ctx->buf; + fd_gui_peers_handle_vote_update( ctx->peers, votes, sig, fd_clock_now( ctx->clock ) ); + break; + } case IN_KIND_TOWER_OUT: { FD_TEST( ctx->is_full_client ); fd_tower_slot_done_t const * tower = (fd_tower_slot_done_t const *)ctx->buf; @@ -347,6 +381,7 @@ after_frag( fd_gui_ctx_t * ctx, break; } case IN_KIND_POH_PACK: { + FD_TEST( !ctx->is_full_client ); FD_TEST( fd_disco_poh_sig_pkt_type( sig )==POH_PKT_TYPE_BECAME_LEADER ); fd_became_leader_t * became_leader = (fd_became_leader_t *)ctx->buf; fd_gui_became_leader( ctx->gui, fd_disco_poh_sig_slot( sig ), became_leader->slot_start_ns, became_leader->slot_end_ns, became_leader->limits.slot_max_cost, became_leader->max_microblocks_in_slot ); @@ -629,19 +664,21 @@ unprivileged_init( fd_topo_t * topo, fd_topo_link_t * link = &topo->links[ tile->in_link_id[ i ] ]; fd_topo_wksp_t * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ]; - if( FD_LIKELY( !strcmp( link->name, "plugin_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_PLUGIN; - else if( FD_LIKELY( !strcmp( link->name, "poh_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH_PACK; - else if( FD_LIKELY( !strcmp( link->name, "pack_bank" ) ) ) ctx->in_kind[ i ] = IN_KIND_PACK_BANK; - else if( FD_LIKELY( !strcmp( link->name, "pack_poh" ) ) ) ctx->in_kind[ i ] = IN_KIND_PACK_POH; - else if( FD_LIKELY( !strcmp( link->name, "bank_poh" ) ) ) ctx->in_kind[ i ] = IN_KIND_BANK_POH; - else if( FD_LIKELY( !strcmp( link->name, "shred_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_SHRED_OUT; /* full client only */ - else if( FD_LIKELY( !strcmp( link->name, "net_gossvf" ) ) ) ctx->in_kind[ i ] = IN_KIND_NET_GOSSVF; /* full client only */ - else if( FD_LIKELY( !strcmp( link->name, "gossip_net" ) ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP_NET; /* full client only */ - else if( FD_LIKELY( !strcmp( link->name, "gossip_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP_OUT; /* full client only */ - else if( FD_LIKELY( !strcmp( link->name, "snaprd_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_SNAPRD; /* full client only */ - else if( FD_LIKELY( !strcmp( link->name, "repair_net" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPAIR_NET; /* full client only */ - else if( FD_LIKELY( !strcmp( link->name, "tower_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_TOWER_OUT; /* full client only */ - else if( FD_LIKELY( !strcmp( link->name, "replay_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPLAY_OUT; /* full client only */ + if( FD_LIKELY( !strcmp( link->name, "plugin_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_PLUGIN; + else if( FD_LIKELY( !strcmp( link->name, "poh_pack" ) ) ) ctx->in_kind[ i ] = IN_KIND_POH_PACK; + else if( FD_LIKELY( !strcmp( link->name, "pack_bank" ) ) ) ctx->in_kind[ i ] = IN_KIND_PACK_BANK; + else if( FD_LIKELY( !strcmp( link->name, "pack_poh" ) ) ) ctx->in_kind[ i ] = IN_KIND_PACK_POH; + else if( FD_LIKELY( !strcmp( link->name, "bank_poh" ) ) ) ctx->in_kind[ i ] = IN_KIND_BANK_POH; + else if( FD_LIKELY( !strcmp( link->name, "shred_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_SHRED_OUT; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "net_gossvf" ) ) ) ctx->in_kind[ i ] = IN_KIND_NET_GOSSVF; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "gossip_net" ) ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP_NET; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "gossip_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_GOSSIP_OUT; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "snaprd_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_SNAPRD; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "repair_net" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPAIR_NET; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "tower_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_TOWER_OUT; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "replay_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPLAY_OUT; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "replay_stake" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPLAY_STAKE; /* full client only */ + else if( FD_LIKELY( !strcmp( link->name, "replay_votes" ) ) ) ctx->in_kind[ i ] = IN_KIND_REPLAY_VOTES; /* full client only */ else FD_LOG_ERR(( "gui tile has unexpected input link %lu %s", i, link->name )); if( FD_LIKELY( !strcmp( link->name, "bank_poh" ) ) ) { diff --git a/src/discof/replay/fd_replay_tile.c b/src/discof/replay/fd_replay_tile.c index fe69462469..e858d87bee 100644 --- a/src/discof/replay/fd_replay_tile.c +++ b/src/discof/replay/fd_replay_tile.c @@ -357,6 +357,7 @@ struct fd_replay_tile { fd_replay_out_link_t replay_out[1]; fd_replay_out_link_t stake_out[1]; + fd_replay_out_link_t replay_votes[1]; struct { fd_histf_t store_read_wait[ 1 ]; @@ -592,6 +593,46 @@ buffer_vote_towers( fd_replay_tile_t * ctx, fd_bank_vote_states_prev_end_locking_query( bank ); } +/* This function creates and publishes a snapshot of all the vote + account states at the end of this slot, which can be consumed for + monitoring purposes. + + This function should be called at the end of a slot, before any epoch + boundary processing. */ +static void +publish_vote_states( fd_replay_tile_t * ctx, + fd_stem_context_t * stem, + fd_bank_t * bank ) { + fd_replay_vote_t * votes = fd_chunk_to_laddr( ctx->replay_votes->mem, ctx->replay_votes->chunk ); + fd_vote_states_t const * vote_states = fd_bank_vote_states_locking_query( bank ); + fd_vote_states_iter_t iter_[1]; + ulong count = 0UL; + for( fd_vote_states_iter_t * iter = fd_vote_states_iter_init( iter_, vote_states ); + !fd_vote_states_iter_done( iter ); + fd_vote_states_iter_next( iter ) ) { + if( FD_UNLIKELY( count==FD_RUNTIME_MAX_VOTE_ACCOUNTS ) ) { + FD_LOG_WARNING(("on-chain vote accounts surpassed FD_RUNTIME_MAX_VOTE_ACCOUNTS")); + break; + } + fd_vote_state_ele_t const * vote_state = fd_vote_states_iter_ele( iter ); + + votes[ count ].vote_account = vote_state->vote_account; + votes[ count ].node_account = vote_state->node_account; + votes[ count ].stake = vote_state->stake; + votes[ count ].last_vote_slot = vote_state->last_vote_slot; + votes[ count ].last_vote_timestamp = vote_state->last_vote_timestamp; + votes[ count ].commission = vote_state->commission; + votes[ count ].epoch = fd_ulong_if( !vote_state->credits_cnt, ULONG_MAX, vote_state->epoch[ 0 ] ); + votes[ count ].epoch_credits = fd_ulong_if( !vote_state->credits_cnt, ULONG_MAX, vote_state->credits[ 0 ] ); + + count++; + } + fd_bank_vote_states_end_locking_query( bank ); + + fd_stem_publish( stem, ctx->replay_votes->idx, count, ctx->replay_votes->chunk, count*sizeof(fd_replay_vote_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) ); + ctx->replay_votes->chunk = fd_dcache_compact_next( ctx->replay_votes->chunk, count*sizeof(fd_replay_vote_t), ctx->replay_votes->chunk0, ctx->replay_votes->wmark ); +} + /* This function publishes the next vote tower in the ctx->vote_tower_out buffer to the tower tile. @@ -740,6 +781,8 @@ publish_slot_completed( fd_replay_tile_t * ctx, ctx->metrics.slots_total++; ctx->metrics.transactions_total = fd_bank_txn_count_get( bank ); + fd_bank_t * parent_bank = fd_banks_get_parent( ctx->banks, bank ); + fd_replay_slot_completed_t * slot_info = fd_chunk_to_laddr( ctx->replay_out->mem, ctx->replay_out->chunk ); slot_info->slot = slot; slot_info->root_slot = ctx->consensus_root_slot; @@ -751,11 +794,10 @@ publish_slot_completed( fd_replay_tile_t * ctx, slot_info->parent_block_id = parent_block_id; slot_info->bank_hash = *bank_hash; slot_info->block_hash = *block_hash; - - slot_info->transaction_count = fd_bank_txn_count_get( bank ); - slot_info->nonvote_txn_count = fd_bank_nonvote_txn_count_get( bank ); - slot_info->failed_txn_count = fd_bank_failed_txn_count_get( bank ); - slot_info->nonvote_failed_txn_count = fd_bank_nonvote_failed_txn_count_get( bank ); + slot_info->transaction_count = fd_bank_txn_count_get( bank ) - ( !!parent_bank ? fd_bank_txn_count_get( parent_bank ) : 0UL ); + slot_info->nonvote_txn_count = fd_bank_nonvote_txn_count_get( bank ) - ( !!parent_bank ? fd_bank_nonvote_txn_count_get( parent_bank ) : 0UL ); + slot_info->failed_txn_count = fd_bank_failed_txn_count_get( bank ) - ( !!parent_bank ? fd_bank_failed_txn_count_get( parent_bank ) : 0UL ); + slot_info->nonvote_failed_txn_count = fd_bank_nonvote_failed_txn_count_get( bank ) - ( !!parent_bank ? fd_bank_nonvote_failed_txn_count_get( parent_bank ) : 0UL ); slot_info->total_compute_units_used = fd_bank_total_compute_units_used_get( bank ); slot_info->execution_fees = fd_bank_execution_fees_get( bank ); slot_info->priority_fees = fd_bank_priority_fees_get( bank ); @@ -807,6 +849,11 @@ replay_block_finalize( fd_replay_tile_t * ctx, which will be published in after_credit. */ buffer_vote_towers( ctx, &xid, bank ); + /* Take a snapshot of vote states and publish them to consumers */ + if( FD_LIKELY( ctx->replay_votes->idx!=ULONG_MAX ) ) { + publish_vote_states( ctx, stem, bank ); + } + /**********************************************************************/ /* Bank hash comparison, and halt if there's a mismatch after replay */ /**********************************************************************/ @@ -1171,7 +1218,7 @@ maybe_become_leader( fd_replay_tile_t * ctx, FD_LOG_ERR(( "too many skipped ticks %lu for slot %lu, chain must halt", msg->ticks_per_slot+msg->total_skipped_ticks, ctx->next_leader_slot )); } - fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_BECAME_LEADER, ctx->replay_out->chunk, sizeof(fd_became_leader_t), 0UL, 0UL, 0UL ); + fd_stem_publish( stem, ctx->replay_out->idx, REPLAY_SIG_BECAME_LEADER, ctx->replay_out->chunk, sizeof(fd_became_leader_t), 0UL, 0UL, fd_frag_meta_ts_comp( fd_tickcount() ) ); ctx->replay_out->chunk = fd_dcache_compact_next( ctx->replay_out->chunk, sizeof(fd_became_leader_t), ctx->replay_out->chunk0, ctx->replay_out->wmark ); ctx->next_leader_slot = ULONG_MAX; @@ -2273,6 +2320,7 @@ unprivileged_init( fd_topo_t * topo, *ctx->stake_out = out1( topo, tile, "replay_stake" ); FD_TEST( ctx->stake_out->idx!=ULONG_MAX ); *ctx->replay_out = out1( topo, tile, "replay_out" ); FD_TEST( ctx->replay_out->idx!=ULONG_MAX ); + *ctx->replay_votes = out1( topo, tile, "replay_votes" ); ulong idx = fd_topo_find_tile_out_link( topo, tile, "replay_exec", 0UL ); FD_TEST( idx!=ULONG_MAX ); diff --git a/src/discof/replay/fd_replay_tile.h b/src/discof/replay/fd_replay_tile.h index 699c6a6f25..96d36e7202 100644 --- a/src/discof/replay/fd_replay_tile.h +++ b/src/discof/replay/fd_replay_tile.h @@ -71,6 +71,19 @@ struct fd_replay_tower { typedef struct fd_replay_tower fd_replay_tower_t; +struct fd_replay_vote { + fd_pubkey_t vote_account; + fd_pubkey_t node_account; + ulong stake; + ulong last_vote_slot; + long last_vote_timestamp; + uchar commission; + ulong epoch; + ulong epoch_credits; +}; + +typedef struct fd_replay_vote fd_replay_vote_t; + union fd_replay_message { fd_replay_slot_completed_t slot_completed; fd_replay_root_advanced_t root_advanced;