Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 36 additions & 34 deletions book/api/websocket.md

Large diffs are not rendered by default.

11 changes: 11 additions & 0 deletions src/app/firedancer/topology.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,9 @@ fd_topo_initialize( config_t * config ) {

fd_topob_wksp( topo, "shred_out" );
fd_topob_wksp( topo, "replay_stake" );
if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */
fd_topob_wksp( topo, "replay_votes" );
}
fd_topob_wksp( topo, "replay_exec" );
fd_topob_wksp( topo, "replay_out" );
fd_topob_wksp( topo, "tower_out" );
Expand Down Expand Up @@ -345,6 +348,9 @@ fd_topo_initialize( config_t * config ) {
/**/ fd_topob_link( topo, "dedup_resolv", "dedup_resolv", 65536UL, FD_TPU_PARSED_MTU, 1UL );
FOR(resolv_tile_cnt) fd_topob_link( topo, "resolv_pack", "resolv_pack", 65536UL, FD_TPU_RESOLVED_MTU, 1UL );
/**/ fd_topob_link( topo, "replay_stake", "replay_stake", 128UL, FD_STAKE_OUT_MTU, 1UL ); /* TODO: This should be 2 but requires fixing STEM_BURST */
if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */
fd_topob_link( topo, "replay_votes", "replay_votes", 128UL, FD_RUNTIME_MAX_VOTE_ACCOUNTS*sizeof(fd_replay_vote_t), 1UL );
}
/**/ fd_topob_link( topo, "replay_out", "replay_out", 8192UL, sizeof(fd_replay_message_t), 1UL );
/**/ fd_topob_link( topo, "pack_poh", "pack_poh", 128UL, sizeof(fd_done_packing_t), 1UL );
/* pack_bank is shared across all banks, so if one bank stalls due to complex transactions, the buffer neeeds to be large so that
Expand Down Expand Up @@ -502,6 +508,9 @@ fd_topo_initialize( config_t * config ) {
/**/ fd_topob_tile_in ( topo, "replay", 0UL, "metric_in", "genesi_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
/**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_out", 0UL );
/**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_stake", 0UL );
if( FD_LIKELY( config->tiles.gui.enabled ) ) { /* the gui, which is optional, is the only consumer of replay_votes */
fd_topob_tile_out( topo, "replay", 0UL, "replay_votes", 0UL );
}
/**/ fd_topob_tile_out( topo, "replay", 0UL, "executed_txn", 0UL );
/**/ fd_topob_tile_out( topo, "replay", 0UL, "replay_exec", 0UL );
/**/ fd_topob_tile_in ( topo, "replay", 0UL, "metric_in", "tower_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
Expand Down Expand Up @@ -654,6 +663,8 @@ fd_topo_initialize( config_t * config ) {
/**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "gossip_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
/**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "tower_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
/**/ fd_topob_tile_in( topo, "gui", 0UL, "metric_in", "replay_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
/**/ fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "replay_stake", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
/**/ fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "replay_votes", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );

if( FD_LIKELY( snapshots_enabled ) ) {
fd_topob_tile_in ( topo, "gui", 0UL, "metric_in", "snaprd_out", 0UL, FD_TOPOB_RELIABLE, FD_TOPOB_POLLED );
Expand Down
193 changes: 126 additions & 67 deletions src/disco/gui/fd_gui.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,20 @@ fd_gui_ws_open( fd_gui_t * gui,
FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) );
}

/* todo .. temporary workaround to skip the blur until frontend boot
screen lands */
if( FD_UNLIKELY( gui->summary.is_full_client ) ) {
ulong real_mls = fd_ulong_if( gui->summary.catch_up_repair_sz>0UL, gui->summary.catch_up_repair[ 0 ], 0UL );
uchar prev_phase = gui->summary.startup_progress.phase;
ulong prev_mls = gui->summary.startup_progress.startup_ledger_max_slot;
gui->summary.startup_progress.phase = FD_GUI_START_PROGRESS_TYPE_RUNNING;
gui->summary.startup_progress.startup_ledger_max_slot = real_mls;
fd_gui_printf_startup_progress( gui );
FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) );
gui->summary.startup_progress.phase = prev_phase;
gui->summary.startup_progress.startup_ledger_max_slot = prev_mls;
}

if( FD_LIKELY( gui->block_engine.has_block_engine ) ) {
fd_gui_printf_block_engine( gui );
FD_TEST( !fd_http_server_ws_send( gui->http, ws_conn_id ) );
Expand Down Expand Up @@ -1093,6 +1107,15 @@ fd_gui_slot_duration( fd_gui_t const * gui, fd_gui_slot_t const * cur ) {
return (ulong)(cur->completed_time - prev->completed_time);
}

/* All rankings are initialized / reset to ULONG_MAX. These sentinels
sort AFTER non-sentinel ranking entries. Equal slots are sorted by
oldest slot AFTER. Otherwise sort by value according to ranking
type. */
#define SORT_NAME fd_gui_slot_ranking_sort
#define SORT_KEY_T fd_gui_slot_ranking_t
#define SORT_BEFORE(a,b) fd_int_if( (a).slot==ULONG_MAX, 0, fd_int_if( (b).slot==ULONG_MAX, 1, fd_int_if( (a).value==(b).value, (a).slot>(b).slot, fd_int_if( (a).type==FD_GUI_SLOT_RANKING_TYPE_DESC, (a).value>(b).value, (a).value<(b).value ) ) ) )
#include "../../util/tmpl/fd_sort.c"

static inline void
fd_gui_try_insert_ranking( fd_gui_t * gui,
fd_gui_slot_rankings_t * rankings,
Expand Down Expand Up @@ -1353,55 +1376,48 @@ fd_gui_clear_slot( fd_gui_t * gui,
return slot;
}

static void
fd_gui_handle_leader_schedule( fd_gui_t * gui,
ulong const * msg,
long now ) {
ulong epoch = msg[ 0 ];
ulong staked_cnt = msg[ 1 ];
ulong start_slot = msg[ 2 ];
ulong slot_cnt = msg[ 3 ];
ulong excluded_stake = msg[ 4 ];
ulong vote_keyed_lsched = msg[ 5 ];

FD_TEST( staked_cnt<=MAX_STAKED_LEADERS );
FD_TEST( slot_cnt<=MAX_SLOTS_PER_EPOCH );

ulong idx = epoch % 2UL;
void
fd_gui_handle_leader_schedule( fd_gui_t * gui,
fd_stake_weight_msg_t const * leader_schedule,
long now ) {
FD_TEST( leader_schedule->staked_cnt<=MAX_STAKED_LEADERS );
FD_TEST( leader_schedule->slot_cnt<=MAX_SLOTS_PER_EPOCH );

ulong idx = leader_schedule->epoch % 2UL;
gui->epoch.has_epoch[ idx ] = 1;

gui->epoch.epochs[ idx ].epoch = epoch;
gui->epoch.epochs[ idx ].start_slot = start_slot;
gui->epoch.epochs[ idx ].end_slot = start_slot + slot_cnt - 1; // end_slot is inclusive.
gui->epoch.epochs[ idx ].excluded_stake = excluded_stake;
gui->epoch.epochs[ idx ].epoch = leader_schedule->epoch;
gui->epoch.epochs[ idx ].start_slot = leader_schedule->start_slot;
gui->epoch.epochs[ idx ].end_slot = leader_schedule->start_slot + leader_schedule->slot_cnt - 1; // end_slot is inclusive.
gui->epoch.epochs[ idx ].excluded_stake = leader_schedule->excluded_stake;
gui->epoch.epochs[ idx ].my_total_slots = 0UL;
gui->epoch.epochs[ idx ].my_skipped_slots = 0UL;

memset( gui->epoch.epochs[ idx ].rankings, (int)(UINT_MAX), sizeof(gui->epoch.epochs[ idx ].rankings) );
memset( gui->epoch.epochs[ idx ].my_rankings, (int)(UINT_MAX), sizeof(gui->epoch.epochs[ idx ].my_rankings) );

gui->epoch.epochs[ idx ].rankings_slot = start_slot;
gui->epoch.epochs[ idx ].rankings_slot = leader_schedule->start_slot;

fd_vote_stake_weight_t const * stake_weights = fd_type_pun_const( msg+6UL );
memcpy( gui->epoch.epochs[ idx ].stakes, stake_weights, staked_cnt*sizeof(fd_vote_stake_weight_t) );
fd_vote_stake_weight_t const * stake_weights = leader_schedule->weights;
fd_memcpy( gui->epoch.epochs[ idx ].stakes, stake_weights, leader_schedule->staked_cnt*sizeof(fd_vote_stake_weight_t) );

fd_epoch_leaders_delete( fd_epoch_leaders_leave( gui->epoch.epochs[ idx ].lsched ) );
gui->epoch.epochs[idx].lsched = fd_epoch_leaders_join( fd_epoch_leaders_new( gui->epoch.epochs[ idx ]._lsched,
epoch,
leader_schedule->epoch,
gui->epoch.epochs[ idx ].start_slot,
slot_cnt,
staked_cnt,
leader_schedule->slot_cnt,
leader_schedule->staked_cnt,
gui->epoch.epochs[ idx ].stakes,
excluded_stake,
vote_keyed_lsched ) );
leader_schedule->excluded_stake,
leader_schedule->vote_keyed_lsched ) );

if( FD_UNLIKELY( start_slot==0UL ) ) {
if( FD_UNLIKELY( leader_schedule->start_slot==0UL ) ) {
gui->epoch.epochs[ 0 ].start_time = now;
} else {
gui->epoch.epochs[ idx ].start_time = LONG_MAX;

for( ulong i=0UL; i<fd_ulong_min( start_slot-1UL, FD_GUI_SLOTS_CNT ); i++ ) {
fd_gui_slot_t const * slot = fd_gui_get_slot_const( gui, start_slot-i );
for( ulong i=0UL; i<fd_ulong_min( leader_schedule->start_slot-1UL, FD_GUI_SLOTS_CNT ); i++ ) {
fd_gui_slot_t const * slot = fd_gui_get_slot_const( gui, leader_schedule->start_slot-i );
if( FD_UNLIKELY( !slot ) ) break;
else if( FD_UNLIKELY( slot->skipped ) ) continue;

Expand Down Expand Up @@ -1477,6 +1493,11 @@ fd_gui_handle_slot_end( fd_gui_t * gui,
fd_gui_tile_stats_snap( gui, slot->waterfall_end, slot->tile_stats_end, now );
}

#define SORT_NAME fd_gui_ephemeral_slot_sort
#define SORT_KEY_T fd_gui_ephemeral_slot_t
#define SORT_BEFORE(a,b) fd_int_if( (a).slot==ULONG_MAX, 0, fd_int_if( (b).slot==ULONG_MAX, 1, fd_int_if( (a).slot==(b).slot, (a).timestamp_arrival_nanos>(b).timestamp_arrival_nanos, (a).slot>(b).slot ) ) )
#include "../../util/tmpl/fd_sort.c"

static inline void
fd_gui_try_insert_ephemeral_slot( fd_gui_ephemeral_slot_t * slots, ulong slots_sz, ulong slot, long now ) {
int already_present = 0;
Expand Down Expand Up @@ -2072,6 +2093,11 @@ fd_gui_handle_reset_slot( fd_gui_t * gui, ulong reset_slot, long now ) {
ulong prev_slot_completed = gui->summary.slot_completed;
gui->summary.slot_completed = reset_slot;

if( FD_LIKELY( fd_gui_get_slot( gui, gui->summary.slot_completed ) ) ) {
fd_gui_printf_slot( gui, gui->summary.slot_completed );
fd_http_server_ws_broadcast( gui->http );
}

fd_gui_printf_completed_slot( gui );
fd_http_server_ws_broadcast( gui->http );

Expand Down Expand Up @@ -2181,9 +2207,18 @@ fd_gui_handle_reset_slot( fd_gui_t * gui, ulong reset_slot, long now ) {
}
}

#define SORT_NAME fd_gui_slot_staged_shred_event_evict_sort
#define SORT_KEY_T fd_gui_slot_staged_shred_event_t
#define SORT_BEFORE(a,b) (__extension__({ (void)(b); (a).slot==ULONG_MAX; }))
#include "../../util/tmpl/fd_sort.c"

#define SORT_NAME fd_gui_slot_staged_shred_event_slot_sort
#define SORT_KEY_T fd_gui_slot_staged_shred_event_t
#define SORT_BEFORE(a,b) ((a).slot<(b).slot)
#include "../../util/tmpl/fd_sort.c"

static void
fd_gui_handle_rooted_slot( fd_gui_t * gui, ulong root_slot ) {
// ulong unstaged_cnt = 0UL;
for( ulong i=0UL; i<fd_ulong_min( root_slot, FD_GUI_SLOTS_CNT ); i++ ) {
ulong parent_slot = root_slot - i;

Expand All @@ -2195,49 +2230,72 @@ fd_gui_handle_rooted_slot( fd_gui_t * gui, ulong root_slot ) {
}
if( FD_UNLIKELY( slot->level>=FD_GUI_SLOT_LEVEL_ROOTED ) ) break;

/* TODO: commented out due to being too slow */
// /* archive root shred events */
// slot->shreds.start_offset = gui->shreds.history_tail;
// for( ulong i=gui->shreds.staged_head; i<gui->shreds.staged_tail; i++ ) {
// if( FD_UNLIKELY( gui->shreds.staged[ i ].slot==slot->slot ) ) {
// /* move event to history */
// gui->shreds.history[ gui->shreds.history_tail ].timestamp = gui->shreds.staged[ i ].timestamp;
// gui->shreds.history[ gui->shreds.history_tail ].shred_idx = gui->shreds.staged[ i ].shred_idx;
// gui->shreds.history[ gui->shreds.history_tail ].event = gui->shreds.staged[ i ].event;
// gui->shreds.history_tail++;

// gui->shreds.staged[ i ].slot = ULONG_MAX;
// unstaged_cnt++;
// }

// /* evict older slots staged also */
// if( FD_UNLIKELY( gui->shreds.staged[ i ].slot<slot->slot ) ) {
// gui->shreds.staged[ i ].slot = ULONG_MAX;
// unstaged_cnt++;
// }
// }
// slot->shreds.end_offset = gui->shreds.history_tail;

// /* change notarization levels and rebroadcast */
// slot->level = FD_GUI_SLOT_LEVEL_ROOTED;
// fd_gui_printf_slot( gui, parent_slot );
// fd_http_server_ws_broadcast( gui->http );
/* change notarization levels and rebroadcast */
slot->level = FD_GUI_SLOT_LEVEL_ROOTED;
fd_gui_printf_slot( gui, parent_slot );
fd_http_server_ws_broadcast( gui->http );
}

/* archive root shred events. We want to avoid n^2 iteration here
since it can significantly slow things down. Instead, we copy
over all rooted shreds to a scratch space, stable sort by slot,
copy the sorted arrays to the shred history. */
ulong evicted_cnt = 0UL; /* the total number evicted, including ignored */
ulong archive_cnt = 0UL; /* the total number evicted, NOT including ignored */
for( ulong i=gui->shreds.staged_head; i<gui->shreds.staged_tail; i++ ) {
/* ignore new shred events that came in after their slot was rooted */
if( FD_UNLIKELY( gui->shreds.history_slot!=ULONG_MAX && gui->shreds.staged[ i ].slot<=gui->shreds.history_slot ) ) {
gui->shreds.staged[ i ].slot = ULONG_MAX;
evicted_cnt++;
}

if( FD_UNLIKELY( gui->shreds.staged[ i ].slot<=root_slot ) ) {
/* move to scratch */
fd_memcpy( gui->shreds._staged_scratch, &gui->shreds.staged[ i ], sizeof(fd_gui_slot_staged_shred_event_t) );
archive_cnt++;

/* evict from staged */
gui->shreds.staged[ i ].slot = ULONG_MAX;
evicted_cnt++;
}
}

/* The entries from the staging area are evicted by setting their
slot field to ULONG MAX, then sorting the staging area.

IMPORTANT: this sort needs to be stable since we always keep
valid un-broadcast events at the end of the ring buffer */
// if( FD_LIKELY( unstaged_cnt ) ) {
// fd_gui_slot_staged_shred_event_sort_insert( &gui->shreds.staged[ gui->shreds.staged_head ], gui->shreds.staged_tail-gui->shreds.staged_head );
// gui->shreds.staged_head += unstaged_cnt;
// }
if( FD_LIKELY( evicted_cnt ) ) {
fd_gui_slot_staged_shred_event_evict_sort_stable( &gui->shreds.staged[ gui->shreds.staged_head ], gui->shreds.staged_tail-gui->shreds.staged_head, gui->shreds._staged_scratch2 );
gui->shreds.staged_head += evicted_cnt;

/* In the rare case that we are archiving any shred events that have
not yet been broadcast, we'll increment
gui->shreds.staged_next_broadcast to keep it in bounds. */
gui->shreds.staged_next_broadcast = fd_ulong_max( gui->shreds.staged_head, gui->shreds.staged_next_broadcast );

/* sort scratch by slot increasing */
fd_gui_slot_staged_shred_event_slot_sort_stable( gui->shreds._staged_scratch, archive_cnt, gui->shreds._staged_scratch2 );

// /* In the rare case that we are archiving any shred events that have
// not yet been broadcast, we'll increment
// gui->shreds.staged_next_broadcast to keep it in bounds. */
// gui->shreds.staged_next_broadcast = fd_ulong_max( gui->shreds.staged_head, gui->shreds.staged_next_broadcast );
/* copy shred events to archive */
for( ulong i=0UL; i<archive_cnt; i++ ) {
if( FD_UNLIKELY( gui->shreds._staged_scratch[ i ].slot!=gui->shreds.history_slot ) ) {
fd_gui_slot_t * prev_slot = fd_gui_get_slot( gui, gui->shreds.history_slot );
if( FD_LIKELY( prev_slot ) ) prev_slot->shreds.end_offset = gui->shreds.history_tail;

gui->shreds.history_slot = gui->shreds._staged_scratch[ i ].slot;

fd_gui_slot_t * next_slot = fd_gui_get_slot( gui, gui->shreds.history_slot );
if( FD_LIKELY( next_slot ) ) next_slot->shreds.start_offset = gui->shreds.history_tail;
}

gui->shreds.history[ gui->shreds.history_tail ].timestamp = gui->shreds._staged_scratch[ i ].timestamp;
gui->shreds.history[ gui->shreds.history_tail ].shred_idx = gui->shreds._staged_scratch[ i ].shred_idx;
gui->shreds.history[ gui->shreds.history_tail ].event = gui->shreds._staged_scratch[ i ].event;

gui->shreds.history_tail++;
}
}

gui->summary.slot_rooted = root_slot;
fd_gui_printf_root_slot( gui );
Expand Down Expand Up @@ -2365,7 +2423,8 @@ fd_gui_plugin_message( fd_gui_t * gui,
break;
}
case FD_PLUGIN_MSG_LEADER_SCHEDULE: {
fd_gui_handle_leader_schedule( gui, (ulong const *)msg, now );
FD_STATIC_ASSERT( sizeof(fd_stake_weight_msg_t)==6*sizeof(ulong), "new fields breaks things" );
fd_gui_handle_leader_schedule( gui, (fd_stake_weight_msg_t *)msg, now );
break;
}
case FD_PLUGIN_MSG_SLOT_START: {
Expand Down
Loading
Loading