@@ -1995,7 +1995,7 @@ function visitAsyncNode(
19951995 owner : node . owner ,
19961996 stack : stack ,
19971997 } ) ;
1998- advanceTaskTime ( request , task , endTime ) ;
1998+ markOperationEndTime ( request , task , endTime ) ;
19991999 }
20002000 }
20012001 }
@@ -2035,7 +2035,7 @@ function emitAsyncSequence(
20352035 awaited : ( ( awaitedNode : any ) : ReactIOInfo ) , // This is deduped by this reference.
20362036 env : env ,
20372037 } ) ;
2038- advanceTaskTime ( request , task , awaitedNode . end ) ;
2038+ markOperationEndTime ( request , task , awaitedNode . end ) ;
20392039 }
20402040}
20412041
@@ -4255,7 +4255,7 @@ function forwardDebugInfo(
42554255 // When forwarding time we need to ensure to convert it to the time space of the payload.
42564256 // We clamp the time to the starting render of the current component. It's as if it took
42574257 // no time to render and await if we reuse cached content.
4258- advanceTaskTime ( request , task , info . time ) ;
4258+ markOperationEndTime ( request , task , info . time ) ;
42594259 } else {
42604260 if ( typeof info . name === 'string' ) {
42614261 // We outline this model eagerly so that we can refer to by reference as an owner.
@@ -4352,6 +4352,20 @@ function advanceTaskTime(
43524352 task . timed = true ;
43534353}
43544354
4355+ function markOperationEndTime ( request : Request , task : Task , timestamp : number ) {
4356+ if ( ! enableProfilerTimer || ! enableComponentPerformanceTrack ) {
4357+ return ;
4358+ }
4359+ // This is like advanceTaskTime() but always emits a timing chunk even if it doesn't advance.
4360+ // This ensures that the end time of the previous entry isn't implied to be the start of the next one.
4361+ if ( timestamp > task . time ) {
4362+ emitTimingChunk ( request , task . id , timestamp ) ;
4363+ task . time = timestamp ;
4364+ } else {
4365+ emitTimingChunk ( request , task . id , task . time ) ;
4366+ }
4367+ }
4368+
43554369function emitChunk (
43564370 request : Request ,
43574371 task : Task ,
@@ -4443,7 +4457,7 @@ function emitChunk(
44434457function erroredTask ( request : Request , task : Task , error : mixed ) : void {
44444458 if ( enableProfilerTimer && enableComponentPerformanceTrack ) {
44454459 if ( task . timed ) {
4446- advanceTaskTime ( request , task , performance . now ( ) ) ;
4460+ markOperationEndTime ( request , task , performance . now ( ) ) ;
44474461 }
44484462 }
44494463 task . status = ERRORED ;
@@ -4526,7 +4540,7 @@ function retryTask(request: Request, task: Task): void {
45264540 // We've finished rendering. Log the end time.
45274541 if ( enableProfilerTimer && enableComponentPerformanceTrack ) {
45284542 if ( task . timed ) {
4529- advanceTaskTime ( request , task , performance . now ( ) ) ;
4543+ markOperationEndTime ( request , task , performance . now ( ) ) ;
45304544 }
45314545 }
45324546
@@ -4653,7 +4667,7 @@ function abortTask(task: Task, request: Request, errorId: number): void {
46534667 // Track when we aborted this task as its end time.
46544668 if ( enableProfilerTimer && enableComponentPerformanceTrack ) {
46554669 if ( task . timed ) {
4656- advanceTaskTime ( request , task , performance . now ( ) ) ;
4670+ markOperationEndTime ( request , task , performance . now ( ) ) ;
46574671 }
46584672 }
46594673 // Instead of emitting an error per task.id, we emit a model that only
0 commit comments