@@ -231,7 +231,7 @@ static void sigdie_handler(int sig, siginfo_t *info, void *context)
231231 uv_tty_reset_mode ();
232232 if (sig == SIGILL )
233233 jl_show_sigill (context );
234- jl_critical_error (sig , jl_to_bt_context (context ));
234+ jl_critical_error (sig , jl_to_bt_context (context ), jl_get_current_task () );
235235 if (sig != SIGSEGV &&
236236 sig != SIGBUS &&
237237 sig != SIGILL ) {
@@ -410,7 +410,7 @@ CFI_NORETURN
410410 // (unavoidable due to its async nature).
411411 // Try harder to exit each time if we get multiple exit requests.
412412 if (thread0_exit_count <= 1 ) {
413- jl_critical_error (thread0_exit_state - 128 , NULL );
413+ jl_critical_error (thread0_exit_state - 128 , NULL , jl_current_task );
414414 jl_exit (thread0_exit_state );
415415 }
416416 else if (thread0_exit_count == 2 ) {
@@ -747,71 +747,72 @@ static void *signal_listener(void *arg)
747747 unw_context_t * signal_context ;
748748 // sample each thread, round-robin style in reverse order
749749 // (so that thread zero gets notified last)
750- if (critical || profile )
750+ if (critical || profile ) {
751751 jl_lock_profile ();
752- jl_shuffle_int_array_inplace (profile_round_robin_thread_order , jl_n_threads , & profile_cong_rng_seed );
753- for (int idx = jl_n_threads ; idx -- > 0 ; ) {
754- // Stop the threads in the random round-robin order.
755- int i = profile_round_robin_thread_order [idx ];
756- // notify thread to stop
757- jl_thread_suspend_and_get_state (i , & signal_context );
758-
759- // do backtrace on thread contexts for critical signals
760- // this part must be signal-handler safe
761- if (critical ) {
762- bt_size += rec_backtrace_ctx (bt_data + bt_size ,
763- JL_MAX_BT_SIZE / jl_n_threads - 1 ,
764- signal_context , NULL );
765- bt_data [bt_size ++ ].uintptr = 0 ;
766- }
767-
768- // do backtrace for profiler
769- if (profile && running ) {
770- if (jl_profile_is_buffer_full ()) {
771- // Buffer full: Delete the timer
772- jl_profile_stop_timer ();
752+ if (!critical )
753+ jl_shuffle_int_array_inplace (profile_round_robin_thread_order , jl_n_threads , & profile_cong_rng_seed );
754+ for (int idx = jl_n_threads ; idx -- > 0 ; ) {
755+ // Stop the threads in the random round-robin order.
756+ int i = critical ? idx : profile_round_robin_thread_order [idx ];
757+ // notify thread to stop
758+ jl_thread_suspend_and_get_state (i , & signal_context );
759+
760+ // do backtrace on thread contexts for critical signals
761+ // this part must be signal-handler safe
762+ if (critical ) {
763+ bt_size += rec_backtrace_ctx (bt_data + bt_size ,
764+ JL_MAX_BT_SIZE / jl_n_threads - 1 ,
765+ signal_context , NULL );
766+ bt_data [bt_size ++ ].uintptr = 0 ;
773767 }
774- else {
775- // unwinding can fail, so keep track of the current state
776- // and restore from the SEGV handler if anything happens.
777- jl_jmp_buf * old_buf = jl_get_safe_restore ();
778- jl_jmp_buf buf ;
779-
780- jl_set_safe_restore (& buf );
781- if (jl_setjmp (buf , 0 )) {
782- jl_safe_printf ("WARNING: profiler attempt to access an invalid memory location\n" );
783- } else {
784- // Get backtrace data
785- bt_size_cur += rec_backtrace_ctx ((jl_bt_element_t * )bt_data_prof + bt_size_cur ,
786- bt_size_max - bt_size_cur - 1 , signal_context , NULL );
787- }
788- jl_set_safe_restore (old_buf );
789-
790- jl_ptls_t ptls = jl_all_tls_states [i ];
791-
792- // store threadid but add 1 as 0 is preserved to indicate end of block
793- bt_data_prof [bt_size_cur ++ ].uintptr = ptls -> tid + 1 ;
794-
795- // store task id
796- bt_data_prof [bt_size_cur ++ ].jlvalue = (jl_value_t * )jl_atomic_load_relaxed (& ptls -> current_task );
797-
798- // store cpu cycle clock
799- bt_data_prof [bt_size_cur ++ ].uintptr = cycleclock ();
800768
801- // store whether thread is sleeping but add 1 as 0 is preserved to indicate end of block
802- bt_data_prof [bt_size_cur ++ ].uintptr = jl_atomic_load_relaxed (& ptls -> sleep_check_state ) + 1 ;
803-
804- // Mark the end of this block with two 0's
805- bt_data_prof [bt_size_cur ++ ].uintptr = 0 ;
806- bt_data_prof [bt_size_cur ++ ].uintptr = 0 ;
769+ // do backtrace for profiler
770+ if (profile && running ) {
771+ if (jl_profile_is_buffer_full ()) {
772+ // Buffer full: Delete the timer
773+ jl_profile_stop_timer ();
774+ }
775+ else {
776+ // unwinding can fail, so keep track of the current state
777+ // and restore from the SEGV handler if anything happens.
778+ jl_jmp_buf * old_buf = jl_get_safe_restore ();
779+ jl_jmp_buf buf ;
780+
781+ jl_set_safe_restore (& buf );
782+ if (jl_setjmp (buf , 0 )) {
783+ jl_safe_printf ("WARNING: profiler attempt to access an invalid memory location\n" );
784+ } else {
785+ // Get backtrace data
786+ bt_size_cur += rec_backtrace_ctx ((jl_bt_element_t * )bt_data_prof + bt_size_cur ,
787+ bt_size_max - bt_size_cur - 1 , signal_context , NULL );
788+ }
789+ jl_set_safe_restore (old_buf );
790+
791+ jl_ptls_t ptls = jl_all_tls_states [i ];
792+
793+ // store threadid but add 1 as 0 is preserved to indicate end of block
794+ bt_data_prof [bt_size_cur ++ ].uintptr = ptls -> tid + 1 ;
795+
796+ // store task id
797+ bt_data_prof [bt_size_cur ++ ].jlvalue = (jl_value_t * )jl_atomic_load_relaxed (& ptls -> current_task );
798+
799+ // store cpu cycle clock
800+ bt_data_prof [bt_size_cur ++ ].uintptr = cycleclock ();
801+
802+ // store whether thread is sleeping but add 1 as 0 is preserved to indicate end of block
803+ bt_data_prof [bt_size_cur ++ ].uintptr = jl_atomic_load_relaxed (& ptls -> sleep_check_state ) + 1 ;
804+
805+ // Mark the end of this block with two 0's
806+ bt_data_prof [bt_size_cur ++ ].uintptr = 0 ;
807+ bt_data_prof [bt_size_cur ++ ].uintptr = 0 ;
808+ }
807809 }
808- }
809810
810- // notify thread to resume
811- jl_thread_resume (i , sig );
812- }
813- if (critical || profile )
811+ // notify thread to resume
812+ jl_thread_resume (i , sig );
813+ }
814814 jl_unlock_profile ();
815+ }
815816#ifndef HAVE_MACH
816817 if (profile && running ) {
817818#if defined(HAVE_TIMER )
0 commit comments