Skip to content

Commit 28c1766

Browse files
committed
gui: leader schedule + peer vote info
1 parent 4d6fdc7 commit 28c1766

File tree

11 files changed

+590
-153
lines changed

11 files changed

+590
-153
lines changed

book/api/websocket.md

Lines changed: 36 additions & 34 deletions
Large diffs are not rendered by default.

src/app/firedancer/topology.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,9 @@ fd_topo_initialize( config_t * config ) {
258258

259259
fd_topob_wksp( topo, "shred_out" );
260260
fd_topob_wksp( topo, "replay_stake" );
261+
if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */
262+
fd_topob_wksp( topo, "replay_votes" );
263+
}
261264
fd_topob_wksp( topo, "replay_exec" );
262265
fd_topob_wksp( topo, "replay_out" );
263266
fd_topob_wksp( topo, "tower_out" );
@@ -345,6 +348,9 @@ fd_topo_initialize( config_t * config ) {
345348
/**/ fd_topob_link( topo, "dedup_resolv", "dedup_resolv", 65536UL, FD_TPU_PARSED_MTU, 1UL );
346349
FOR(resolv_tile_cnt) fd_topob_link( topo, "resolv_pack", "resolv_pack", 65536UL, FD_TPU_RESOLVED_MTU, 1UL );
347350
/**/ fd_topob_link( topo, "replay_stake", "replay_stake", 128UL, FD_STAKE_OUT_MTU, 1UL ); /* TODO: This should be 2 but requires fixing STEM_BURST */
351+
if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */
352+
fd_topob_link( topo, "replay_votes", "replay_votes", 128UL, FD_RUNTIME_MAX_VOTE_ACCOUNTS*sizeof(fd_replay_vote_t), 1UL );
353+
}
348354
/**/ fd_topob_link( topo, "replay_out", "replay_out", 8192UL, sizeof(fd_replay_message_t), 1UL );
349355
/**/ fd_topob_link( topo, "pack_poh", "pack_poh", 128UL, sizeof(fd_done_packing_t), 1UL );
350356
/* pack_bank is shared across all banks, so if one bank stalls due to complex transactions, the buffer neeeds to be large so that
@@ -502,6 +508,9 @@ fd_topo_initialize( config_t * config ) {
502508
/**/ fd_topob_tile_in ( topo, "replay", 0UL, "metric_in", "genesi_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
503509
/**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_out", 0UL );
504510
/**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_stake", 0UL );
511+
if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */
512+
fd_topob_tile_out( topo, "replay", 0UL, "replay_votes", 0UL );
513+
}
505514
/**/ fd_topob_tile_out( topo, "replay", 0UL, "executed_txn", 0UL );
506515
/**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_exec", 0UL );
507516
/**/ fd_topob_tile_in ( topo, "replay", 0UL, "metric_in", "tower_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
@@ -654,6 +663,8 @@ fd_topo_initialize( config_t * config ) {
654663
/**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "gossip_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
655664
/**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "tower_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
656665
/**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "replay_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
666+
/**/ fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "replay_stake", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
667+
/**/ fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "replay_votes", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
657668

658669
if( FD_LIKELY( snapshots_enabled ) ) {
659670
fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "snaprd_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );

src/disco/gui/fd_gui.c

Lines changed: 126 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,20 @@ fd_gui_ws_open( fd_gui_t * gui,
229229
FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) );
230230
}
231231

232+
/* todo .. temporary workaround to skip the blur until frontend boot
233+
screen lands */
234+
if( FD_UNLIKELY( gui->summary.is_full_client ) ) {
235+
ulong real_mls = fd_ulong_if( gui->summary.catch_up_repair_sz>0UL, gui->summary.catch_up_repair[ 0 ], 0UL );
236+
uchar prev_phase = gui->summary.startup_progress.phase;
237+
ulong prev_mls = gui->summary.startup_progress.startup_ledger_max_slot;
238+
gui->summary.startup_progress.phase = FD_GUI_START_PROGRESS_TYPE_RUNNING;
239+
gui->summary.startup_progress.startup_ledger_max_slot = real_mls;
240+
fd_gui_printf_startup_progress( gui );
241+
FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) );
242+
gui->summary.startup_progress.phase = prev_phase;
243+
gui->summary.startup_progress.startup_ledger_max_slot = prev_mls;
244+
}
245+
232246
if( FD_LIKELY( gui->block_engine.has_block_engine ) ) {
233247
fd_gui_printf_block_engine( gui );
234248
FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) );
@@ -1093,6 +1107,15 @@ fd_gui_slot_duration( fd_gui_t const * gui, fd_gui_slot_t const * cur ) {
10931107
return (ulong)(cur->completed_time - prev->completed_time);
10941108
}
10951109

1110+
/* All rankings are initialized / reset to ULONG_MAX. These sentinels
1111+
sort AFTER non-sentinel ranking entries. Equal slots are sorted by
1112+
oldest slot AFTER. Otherwise sort by value according to ranking
1113+
type. */
1114+
#define SORT_NAME fd_gui_slot_ranking_sort
1115+
#define SORT_KEY_T fd_gui_slot_ranking_t
1116+
#define SORT_BEFORE(a,b) fd_int_if( (a).slot==ULONG_MAX, 0, fd_int_if( (b).slot==ULONG_MAX, 1, fd_int_if( (a).value==(b).value, (a).slot>(b).slot, fd_int_if( (a).type==FD_GUI_SLOT_RANKING_TYPE_DESC, (a).value>(b).value, (a).value<(b).value ) ) ) )
1117+
#include "../../util/tmpl/fd_sort.c"
1118+
10961119
static inline void
10971120
fd_gui_try_insert_ranking( fd_gui_t * gui,
10981121
fd_gui_slot_rankings_t * rankings,
@@ -1353,55 +1376,48 @@ fd_gui_clear_slot( fd_gui_t * gui,
13531376
return slot;
13541377
}
13551378

1356-
static void
1357-
fd_gui_handle_leader_schedule( fd_gui_t * gui,
1358-
ulong const * msg,
1359-
long now ) {
1360-
ulong epoch = msg[ 0 ];
1361-
ulong staked_cnt = msg[ 1 ];
1362-
ulong start_slot = msg[ 2 ];
1363-
ulong slot_cnt = msg[ 3 ];
1364-
ulong excluded_stake = msg[ 4 ];
1365-
ulong vote_keyed_lsched = msg[ 5 ];
1366-
1367-
FD_TEST( staked_cnt<=MAX_STAKED_LEADERS );
1368-
FD_TEST( slot_cnt<=MAX_SLOTS_PER_EPOCH );
1369-
1370-
ulong idx = epoch % 2UL;
1379+
void
1380+
fd_gui_handle_leader_schedule( fd_gui_t * gui,
1381+
fd_stake_weight_msg_t const * leader_schedule,
1382+
long now ) {
1383+
FD_TEST( leader_schedule->staked_cnt<=MAX_STAKED_LEADERS );
1384+
FD_TEST( leader_schedule->slot_cnt<=MAX_SLOTS_PER_EPOCH );
1385+
1386+
ulong idx = leader_schedule->epoch % 2UL;
13711387
gui->epoch.has_epoch[ idx ] = 1;
13721388

1373-
gui->epoch.epochs[ idx ].epoch = epoch;
1374-
gui->epoch.epochs[ idx ].start_slot = start_slot;
1375-
gui->epoch.epochs[ idx ].end_slot = start_slot + slot_cnt - 1; // end_slot is inclusive.
1376-
gui->epoch.epochs[ idx ].excluded_stake = excluded_stake;
1389+
gui->epoch.epochs[ idx ].epoch = leader_schedule->epoch;
1390+
gui->epoch.epochs[ idx ].start_slot = leader_schedule->start_slot;
1391+
gui->epoch.epochs[ idx ].end_slot = leader_schedule->start_slot + leader_schedule->slot_cnt - 1; // end_slot is inclusive.
1392+
gui->epoch.epochs[ idx ].excluded_stake = leader_schedule->excluded_stake;
13771393
gui->epoch.epochs[ idx ].my_total_slots = 0UL;
13781394
gui->epoch.epochs[ idx ].my_skipped_slots = 0UL;
13791395

13801396
memset( gui->epoch.epochs[ idx ].rankings, (int)(UINT_MAX), sizeof(gui->epoch.epochs[ idx ].rankings) );
13811397
memset( gui->epoch.epochs[ idx ].my_rankings, (int)(UINT_MAX), sizeof(gui->epoch.epochs[ idx ].my_rankings) );
13821398

1383-
gui->epoch.epochs[ idx ].rankings_slot = start_slot;
1399+
gui->epoch.epochs[ idx ].rankings_slot = leader_schedule->start_slot;
13841400

1385-
fd_vote_stake_weight_t const * stake_weights = fd_type_pun_const( msg+6UL );
1386-
memcpy( gui->epoch.epochs[ idx ].stakes, stake_weights, staked_cnt*sizeof(fd_vote_stake_weight_t) );
1401+
fd_vote_stake_weight_t const * stake_weights = leader_schedule->weights;
1402+
fd_memcpy( gui->epoch.epochs[ idx ].stakes, stake_weights, leader_schedule->staked_cnt*sizeof(fd_vote_stake_weight_t) );
13871403

13881404
fd_epoch_leaders_delete( fd_epoch_leaders_leave( gui->epoch.epochs[ idx ].lsched ) );
13891405
gui->epoch.epochs[idx].lsched = fd_epoch_leaders_join( fd_epoch_leaders_new( gui->epoch.epochs[ idx ]._lsched,
1390-
epoch,
1406+
leader_schedule->epoch,
13911407
gui->epoch.epochs[ idx ].start_slot,
1392-
slot_cnt,
1393-
staked_cnt,
1408+
leader_schedule->slot_cnt,
1409+
leader_schedule->staked_cnt,
13941410
gui->epoch.epochs[ idx ].stakes,
1395-
excluded_stake,
1396-
vote_keyed_lsched ) );
1411+
leader_schedule->excluded_stake,
1412+
leader_schedule->vote_keyed_lsched ) );
13971413

1398-
if( FD_UNLIKELY( start_slot==0UL ) ) {
1414+
if( FD_UNLIKELY( leader_schedule->start_slot==0UL ) ) {
13991415
gui->epoch.epochs[ 0 ].start_time = now;
14001416
} else {
14011417
gui->epoch.epochs[ idx ].start_time = LONG_MAX;
14021418

1403-
for( ulong i=0UL; i<fd_ulong_min( start_slot-1UL, FD_GUI_SLOTS_CNT ); i++ ) {
1404-
fd_gui_slot_t const * slot = fd_gui_get_slot_const( gui, start_slot-i );
1419+
for( ulong i=0UL; i<fd_ulong_min( leader_schedule->start_slot-1UL, FD_GUI_SLOTS_CNT ); i++ ) {
1420+
fd_gui_slot_t const * slot = fd_gui_get_slot_const( gui, leader_schedule->start_slot-i );
14051421
if( FD_UNLIKELY( !slot ) ) break;
14061422
else if( FD_UNLIKELY( slot->skipped ) ) continue;
14071423

@@ -1477,6 +1493,11 @@ fd_gui_handle_slot_end( fd_gui_t * gui,
14771493
fd_gui_tile_stats_snap( gui, slot->waterfall_end, slot->tile_stats_end, now );
14781494
}
14791495

1496+
#define SORT_NAME fd_gui_ephemeral_slot_sort
1497+
#define SORT_KEY_T fd_gui_ephemeral_slot_t
1498+
#define SORT_BEFORE(a,b) fd_int_if( (a).slot==ULONG_MAX, 0, fd_int_if( (b).slot==ULONG_MAX, 1, fd_int_if( (a).slot==(b).slot, (a).timestamp_arrival_nanos>(b).timestamp_arrival_nanos, (a).slot>(b).slot ) ) )
1499+
#include "../../util/tmpl/fd_sort.c"
1500+
14801501
static inline void
14811502
fd_gui_try_insert_ephemeral_slot( fd_gui_ephemeral_slot_t * slots, ulong slots_sz, ulong slot, long now ) {
14821503
int already_present = 0;
@@ -2072,6 +2093,11 @@ fd_gui_handle_reset_slot( fd_gui_t * gui, ulong reset_slot, long now ) {
20722093
ulong prev_slot_completed = gui->summary.slot_completed;
20732094
gui->summary.slot_completed = reset_slot;
20742095

2096+
if( FD_LIKELY( fd_gui_get_slot( gui, gui->summary.slot_completed ) ) ) {
2097+
fd_gui_printf_slot( gui, gui->summary.slot_completed );
2098+
fd_http_server_ws_broadcast( gui->http );
2099+
}
2100+
20752101
fd_gui_printf_completed_slot( gui );
20762102
fd_http_server_ws_broadcast( gui->http );
20772103

@@ -2181,9 +2207,18 @@ fd_gui_handle_reset_slot( fd_gui_t * gui, ulong reset_slot, long now ) {
21812207
}
21822208
}
21832209

2210+
#define SORT_NAME fd_gui_slot_staged_shred_event_evict_sort
2211+
#define SORT_KEY_T fd_gui_slot_staged_shred_event_t
2212+
#define SORT_BEFORE(a,b) (__extension__({ (void)(b); (a).slot==ULONG_MAX; }))
2213+
#include "../../util/tmpl/fd_sort.c"
2214+
2215+
#define SORT_NAME fd_gui_slot_staged_shred_event_slot_sort
2216+
#define SORT_KEY_T fd_gui_slot_staged_shred_event_t
2217+
#define SORT_BEFORE(a,b) ((a).slot<(b).slot)
2218+
#include "../../util/tmpl/fd_sort.c"
2219+
21842220
static void
21852221
fd_gui_handle_rooted_slot( fd_gui_t * gui, ulong root_slot ) {
2186-
// ulong unstaged_cnt = 0UL;
21872222
for( ulong i=0UL; i<fd_ulong_min( root_slot, FD_GUI_SLOTS_CNT ); i++ ) {
21882223
ulong parent_slot = root_slot - i;
21892224

@@ -2195,49 +2230,72 @@ fd_gui_handle_rooted_slot( fd_gui_t * gui, ulong root_slot ) {
21952230
}
21962231
if( FD_UNLIKELY( slot->level>=FD_GUI_SLOT_LEVEL_ROOTED ) ) break;
21972232

2198-
/* TODO: commented out due to being too slow */
2199-
// /* archive root shred events */
2200-
// slot->shreds.start_offset = gui->shreds.history_tail;
2201-
// for( ulong i=gui->shreds.staged_head; i<gui->shreds.staged_tail; i++ ) {
2202-
// if( FD_UNLIKELY( gui->shreds.staged[ i ].slot==slot->slot ) ) {
2203-
// /* move event to history */
2204-
// gui->shreds.history[ gui->shreds.history_tail ].timestamp = gui->shreds.staged[ i ].timestamp;
2205-
// gui->shreds.history[ gui->shreds.history_tail ].shred_idx = gui->shreds.staged[ i ].shred_idx;
2206-
// gui->shreds.history[ gui->shreds.history_tail ].event = gui->shreds.staged[ i ].event;
2207-
// gui->shreds.history_tail++;
2208-
2209-
// gui->shreds.staged[ i ].slot = ULONG_MAX;
2210-
// unstaged_cnt++;
2211-
// }
2212-
2213-
// /* evict older slots staged also */
2214-
// if( FD_UNLIKELY( gui->shreds.staged[ i ].slot<slot->slot ) ) {
2215-
// gui->shreds.staged[ i ].slot = ULONG_MAX;
2216-
// unstaged_cnt++;
2217-
// }
2218-
// }
2219-
// slot->shreds.end_offset = gui->shreds.history_tail;
2220-
2221-
// /* change notarization levels and rebroadcast */
2222-
// slot->level = FD_GUI_SLOT_LEVEL_ROOTED;
2223-
// fd_gui_printf_slot( gui, parent_slot );
2224-
// fd_http_server_ws_broadcast( gui->http );
2233+
/* change notarization levels and rebroadcast */
2234+
slot->level = FD_GUI_SLOT_LEVEL_ROOTED;
2235+
fd_gui_printf_slot( gui, parent_slot );
2236+
fd_http_server_ws_broadcast( gui->http );
2237+
}
2238+
2239+
/* archive root shred events. We want to avoid n^2 iteration here
2240+
since it can significantly slow things down. Instead, we copy
2241+
over all rooted shreds to a scratch space, stable sort by slot,
2242+
copy the sorted arrays to the shred history. */
2243+
ulong evicted_cnt = 0UL; /* the total number evicted, including ignored */
2244+
ulong archive_cnt = 0UL; /* the total number evicted, NOT including ignored */
2245+
for( ulong i=gui->shreds.staged_head; i<gui->shreds.staged_tail; i++ ) {
2246+
/* ignore new shred events that came in after their slot was rooted */
2247+
if( FD_UNLIKELY( gui->shreds.history_slot!=ULONG_MAX && gui->shreds.staged[ i ].slot<=gui->shreds.history_slot ) ) {
2248+
gui->shreds.staged[ i ].slot = ULONG_MAX;
2249+
evicted_cnt++;
2250+
}
2251+
2252+
if( FD_UNLIKELY( gui->shreds.staged[ i ].slot<=root_slot ) ) {
2253+
/* move to scratch */
2254+
fd_memcpy( gui->shreds._staged_scratch, &gui->shreds.staged[ i ], sizeof(fd_gui_slot_staged_shred_event_t) );
2255+
archive_cnt++;
2256+
2257+
/* evict from staged */
2258+
gui->shreds.staged[ i ].slot = ULONG_MAX;
2259+
evicted_cnt++;
2260+
}
22252261
}
22262262

22272263
/* The entries from the staging area are evicted by setting their
22282264
slot field to ULONG MAX, then sorting the staging area.
22292265
22302266
IMPORTANT: this sort needs to be stable since we always keep
22312267
valid un-broadcast events at the end of the ring buffer */
2232-
// if( FD_LIKELY( unstaged_cnt ) ) {
2233-
// fd_gui_slot_staged_shred_event_sort_insert( &gui->shreds.staged[ gui->shreds.staged_head ], gui->shreds.staged_tail-gui->shreds.staged_head );
2234-
// gui->shreds.staged_head += unstaged_cnt;
2235-
// }
2268+
if( FD_LIKELY( evicted_cnt ) ) {
2269+
fd_gui_slot_staged_shred_event_evict_sort_stable( &gui->shreds.staged[ gui->shreds.staged_head ], gui->shreds.staged_tail-gui->shreds.staged_head, gui->shreds._staged_scratch2 );
2270+
gui->shreds.staged_head += evicted_cnt;
2271+
2272+
/* In the rare case that we are archiving any shred events that have
2273+
not yet been broadcast, we'll increment
2274+
gui->shreds.staged_next_broadcast to keep it in bounds. */
2275+
gui->shreds.staged_next_broadcast = fd_ulong_max( gui->shreds.staged_head, gui->shreds.staged_next_broadcast );
2276+
2277+
/* sort scratch by slot increasing */
2278+
fd_gui_slot_staged_shred_event_slot_sort_stable( gui->shreds._staged_scratch, archive_cnt, gui->shreds._staged_scratch2 );
22362279

2237-
// /* In the rare case that we are archiving any shred events that have
2238-
// not yet been broadcast, we'll increment
2239-
// gui->shreds.staged_next_broadcast to keep it in bounds. */
2240-
// gui->shreds.staged_next_broadcast = fd_ulong_max( gui->shreds.staged_head, gui->shreds.staged_next_broadcast );
2280+
/* copy shred events to archive */
2281+
for( ulong i=0UL; i<archive_cnt; i++ ) {
2282+
if( FD_UNLIKELY( gui->shreds._staged_scratch[ i ].slot!=gui->shreds.history_slot ) ) {
2283+
fd_gui_slot_t * prev_slot = fd_gui_get_slot( gui, gui->shreds.history_slot );
2284+
if( FD_LIKELY( prev_slot ) ) prev_slot->shreds.end_offset = gui->shreds.history_tail;
2285+
2286+
gui->shreds.history_slot = gui->shreds._staged_scratch[ i ].slot;
2287+
2288+
fd_gui_slot_t * next_slot = fd_gui_get_slot( gui, gui->shreds.history_slot );
2289+
if( FD_LIKELY( next_slot ) ) next_slot->shreds.start_offset = gui->shreds.history_tail;
2290+
}
2291+
2292+
gui->shreds.history[ gui->shreds.history_tail ].timestamp = gui->shreds._staged_scratch[ i ].timestamp;
2293+
gui->shreds.history[ gui->shreds.history_tail ].shred_idx = gui->shreds._staged_scratch[ i ].shred_idx;
2294+
gui->shreds.history[ gui->shreds.history_tail ].event = gui->shreds._staged_scratch[ i ].event;
2295+
2296+
gui->shreds.history_tail++;
2297+
}
2298+
}
22412299

22422300
gui->summary.slot_rooted = root_slot;
22432301
fd_gui_printf_root_slot( gui );
@@ -2365,7 +2423,8 @@ fd_gui_plugin_message( fd_gui_t * gui,
23652423
break;
23662424
}
23672425
case FD_PLUGIN_MSG_LEADER_SCHEDULE: {
2368-
fd_gui_handle_leader_schedule( gui, (ulong const *)msg, now );
2426+
FD_STATIC_ASSERT( sizeof(fd_stake_weight_msg_t)==6*sizeof(ulong), "new fields breaks things" );
2427+
fd_gui_handle_leader_schedule( gui, (fd_stake_weight_msg_t *)msg, now );
23692428
break;
23702429
}
23712430
case FD_PLUGIN_MSG_SLOT_START: {

0 commit comments

Comments
 (0)