Skip to content

Commit 759119b

Browse files
committed
scx_layered: Add tickless layer support
Add a option to enable tickless scheduling on a layer. This may improve throughput in certain scenarios by reducing the number of context switches. Signed-off-by: Daniel Hodges <[email protected]>
1 parent ebf1a26 commit 759119b

File tree

5 files changed

+48
-3
lines changed

5 files changed

+48
-3
lines changed

scheds/rust/scx_layered/src/bpf/intf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -338,6 +338,7 @@ struct layer {
338338
bool allow_node_aligned;
339339
bool skip_remote_node;
340340
bool prev_over_idle_core;
341+
bool tickless;
341342
int growth_algo;
342343

343344
u64 nr_tasks;

scheds/rust/scx_layered/src/bpf/main.bpf.c

Lines changed: 35 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ volatile u64 layer_refresh_seq_avgruntime;
5858

5959
/* Flag to enable or disable antistall feature */
6060
const volatile bool enable_antistall = true;
61+
const volatile bool enable_tickless = true;
6162
const volatile bool enable_match_debug = false;
6263
const volatile bool enable_gpu_support = false;
6364
/* Delay permitted, in seconds, before antistall activates */
@@ -184,6 +185,32 @@ static __always_inline bool is_scheduler_task(struct task_struct *p)
184185
return (u32)p->tgid == layered_root_tgid;
185186
}
186187

188+
static void preempt_tickless(struct cpu_ctx *cpuc)
189+
{
190+
struct task_struct *curr;
191+
struct layer *layer;
192+
193+
if (!enable_tickless)
194+
return;
195+
196+
bpf_rcu_read_lock();
197+
curr = scx_bpf_cpu_rq(cpuc->cpu)->curr;
198+
if (curr->scx.slice == SCX_SLICE_INF) {
199+
// If tickless task is running on a unowned layer then preempt it.
200+
if (cpuc->layer_id >= nr_layers) {
201+
curr->scx.slice = 1;
202+
bpf_rcu_read_unlock();
203+
return;
204+
}
205+
if (!(layer = lookup_layer(cpuc->layer_id))) {
206+
bpf_rcu_read_unlock();
207+
return;
208+
}
209+
curr->scx.slice = layer->slice_ns;
210+
}
211+
bpf_rcu_read_unlock();
212+
}
213+
187214
struct {
188215
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
189216
__type(key, u32);
@@ -1204,7 +1231,8 @@ s32 BPF_STRUCT_OPS(layered_select_cpu, struct task_struct *p, s32 prev_cpu, u64
12041231
if (cpu >= 0) {
12051232
lstat_inc(LSTAT_SEL_LOCAL, layer, cpuc);
12061233
taskc->dsq_id = SCX_DSQ_LOCAL;
1207-
scx_bpf_dsq_insert(p, taskc->dsq_id, layer->slice_ns, 0);
1234+
u64 slice_ns = layer->tickless ? SCX_SLICE_INF : layer->slice_ns;
1235+
scx_bpf_dsq_insert(p, taskc->dsq_id, slice_ns, 0);
12081236
return cpu;
12091237
}
12101238

@@ -1387,6 +1415,8 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
13871415
if (!(cpuc = lookup_cpu_ctx(-1)) || !(taskc = lookup_task_ctx(p)))
13881416
return;
13891417

1418+
preempt_tickless(cpuc);
1419+
13901420
layer_id = taskc->layer_id;
13911421
if (!(layer = lookup_layer(layer_id)))
13921422
return;
@@ -1602,10 +1632,11 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
16021632
lstats[LLC_LSTAT_CNT]++;
16031633

16041634
taskc->dsq_id = layer_dsq_id(layer_id, llc_id);
1635+
u64 slice_ns = layer->tickless ? SCX_SLICE_INF : layer->slice_ns;
16051636
if (layer->fifo)
1606-
scx_bpf_dsq_insert(p, taskc->dsq_id, layer->slice_ns, enq_flags);
1637+
scx_bpf_dsq_insert(p, taskc->dsq_id, slice_ns, enq_flags);
16071638
else
1608-
scx_bpf_dsq_insert_vtime(p, taskc->dsq_id, layer->slice_ns, vtime, enq_flags);
1639+
scx_bpf_dsq_insert_vtime(p, taskc->dsq_id, slice_ns, vtime, enq_flags);
16091640
lstat_inc(LSTAT_ENQ_DSQ, layer, cpuc);
16101641

16111642
/*
@@ -3191,6 +3222,7 @@ void BPF_STRUCT_OPS(layered_dump, struct scx_dump_ctx *dctx)
31913222
*/
31923223
struct layered_timer layered_timers[MAX_TIMERS] = {
31933224
{15LLU * NSEC_PER_SEC, CLOCK_BOOTTIME, 0},
3225+
{1LLU * NSEC_PER_MSEC, CLOCK_BOOTTIME, 0},
31943226
};
31953227

31963228
/**

scheds/rust/scx_layered/src/bpf/timer.bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ struct layered_timer {
2929

3030
enum layer_timer_callbacks {
3131
ANTISTALL_TIMER,
32+
TICKLESS_TIMER,
3233
MAX_TIMERS,
3334
};
3435

scheds/rust/scx_layered/src/config.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,8 @@ pub struct LayerCommon {
142142
pub llcs: Vec<usize>,
143143
#[serde(default)]
144144
pub placement: LayerPlacement,
145+
#[serde(default)]
146+
pub tickless: bool,
145147
}
146148

147149
#[derive(Clone, Debug, Serialize, Deserialize)]

scheds/rust/scx_layered/src/main.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ lazy_static! {
130130
nodes: vec![],
131131
llcs: vec![],
132132
placement: LayerPlacement::Standard,
133+
tickless: false,
133134
},
134135
},
135136
},
@@ -165,6 +166,7 @@ lazy_static! {
165166
nodes: vec![],
166167
llcs: vec![],
167168
placement: LayerPlacement::Standard,
169+
tickless: false,
168170
},
169171
},
170172
},
@@ -204,6 +206,7 @@ lazy_static! {
204206
nodes: vec![],
205207
llcs: vec![],
206208
placement: LayerPlacement::Standard,
209+
tickless: false,
207210
},
208211
},
209212
},
@@ -241,6 +244,7 @@ lazy_static! {
241244
nodes: vec![],
242245
llcs: vec![],
243246
placement: LayerPlacement::Standard,
247+
tickless: false,
244248
},
245249
},
246250
},
@@ -1413,6 +1417,7 @@ impl<'a> Scheduler<'a> {
14131417
disallow_preempt_after_us,
14141418
xllc_mig_min_us,
14151419
placement,
1420+
tickless,
14161421
..
14171422
} = spec.kind.common();
14181423

@@ -1435,6 +1440,10 @@ impl<'a> Scheduler<'a> {
14351440
layer.allow_node_aligned.write(*allow_node_aligned);
14361441
layer.skip_remote_node.write(*skip_remote_node);
14371442
layer.prev_over_idle_core.write(*prev_over_idle_core);
1443+
layer.tickless.write(*tickless);
1444+
if *tickless {
1445+
skel.maps.rodata_data.enable_tickless = *tickless;
1446+
}
14381447
layer.growth_algo = growth_algo.as_bpf_enum();
14391448
layer.weight = *weight;
14401449
layer.disallow_open_after_ns = match disallow_open_after_us.unwrap() {

0 commit comments

Comments
 (0)