@@ -58,6 +58,7 @@ volatile u64 layer_refresh_seq_avgruntime;
5858
5959/* Flag to enable or disable antistall feature */
6060const volatile bool enable_antistall = true;
61+ const volatile bool enable_tickless = true;
6162const volatile bool enable_match_debug = false;
6263const volatile bool enable_gpu_support = false;
6364/* Delay permitted, in seconds, before antistall activates */
@@ -184,6 +185,32 @@ static __always_inline bool is_scheduler_task(struct task_struct *p)
184185 return (u32 )p -> tgid == layered_root_tgid ;
185186}
186187
188+ static void preempt_tickless (struct cpu_ctx * cpuc )
189+ {
190+ struct task_struct * curr ;
191+ struct layer * layer ;
192+
193+ if (!enable_tickless )
194+ return ;
195+
196+ bpf_rcu_read_lock ();
197+ curr = scx_bpf_cpu_rq (cpuc -> cpu )-> curr ;
198+ if (curr -> scx .slice == SCX_SLICE_INF ) {
199+ // If tickless task is running on a unowned layer then preempt it.
200+ if (cpuc -> layer_id >= nr_layers ) {
201+ curr -> scx .slice = 1 ;
202+ bpf_rcu_read_unlock ();
203+ return ;
204+ }
205+ if (!(layer = lookup_layer (cpuc -> layer_id ))) {
206+ bpf_rcu_read_unlock ();
207+ return ;
208+ }
209+ curr -> scx .slice = layer -> slice_ns ;
210+ }
211+ bpf_rcu_read_unlock ();
212+ }
213+
187214struct {
188215 __uint (type , BPF_MAP_TYPE_PERCPU_ARRAY );
189216 __type (key , u32 );
@@ -1204,7 +1231,8 @@ s32 BPF_STRUCT_OPS(layered_select_cpu, struct task_struct *p, s32 prev_cpu, u64
12041231 if (cpu >= 0 ) {
12051232 lstat_inc (LSTAT_SEL_LOCAL , layer , cpuc );
12061233 taskc -> dsq_id = SCX_DSQ_LOCAL ;
1207- scx_bpf_dsq_insert (p , taskc -> dsq_id , layer -> slice_ns , 0 );
1234+ u64 slice_ns = layer -> tickless ? SCX_SLICE_INF : layer -> slice_ns ;
1235+ scx_bpf_dsq_insert (p , taskc -> dsq_id , slice_ns , 0 );
12081236 return cpu ;
12091237 }
12101238
@@ -1387,6 +1415,8 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
13871415 if (!(cpuc = lookup_cpu_ctx (-1 )) || !(taskc = lookup_task_ctx (p )))
13881416 return ;
13891417
1418+ preempt_tickless (cpuc );
1419+
13901420 layer_id = taskc -> layer_id ;
13911421 if (!(layer = lookup_layer (layer_id )))
13921422 return ;
@@ -1602,10 +1632,11 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
16021632 lstats [LLC_LSTAT_CNT ]++ ;
16031633
16041634 taskc -> dsq_id = layer_dsq_id (layer_id , llc_id );
1635+ u64 slice_ns = layer -> tickless ? SCX_SLICE_INF : layer -> slice_ns ;
16051636 if (layer -> fifo )
1606- scx_bpf_dsq_insert (p , taskc -> dsq_id , layer -> slice_ns , enq_flags );
1637+ scx_bpf_dsq_insert (p , taskc -> dsq_id , slice_ns , enq_flags );
16071638 else
1608- scx_bpf_dsq_insert_vtime (p , taskc -> dsq_id , layer -> slice_ns , vtime , enq_flags );
1639+ scx_bpf_dsq_insert_vtime (p , taskc -> dsq_id , slice_ns , vtime , enq_flags );
16091640 lstat_inc (LSTAT_ENQ_DSQ , layer , cpuc );
16101641
16111642 /*
@@ -3191,6 +3222,7 @@ void BPF_STRUCT_OPS(layered_dump, struct scx_dump_ctx *dctx)
31913222 */
31923223struct layered_timer layered_timers [MAX_TIMERS ] = {
31933224 {15LLU * NSEC_PER_SEC , CLOCK_BOOTTIME , 0 },
3225+ {1LLU * NSEC_PER_MSEC , CLOCK_BOOTTIME , 0 },
31943226};
31953227
31963228/**
0 commit comments