Skip to content
This repository was archived by the owner on Aug 19, 2021. It is now read-only.

Commit 84d006b

Browse files
committed
Fixed drift in periodic events
Before this patch, calculation for periodic events occured after the event was dispatched, but didn't account for the overhead of the callback itself. Now calculation of the new target is based only on the previous target, ignoring drift introduced by the callback. This may lead never sleeping if the callback takes longer than its period, but the fairness of the scheduler should avoid problems this may cause. Additionally, equeue_enqueue was restructured to avoid calling equeue_tick redundantly (a system call in some implementations).
1 parent 847b2f9 commit 84d006b

File tree

1 file changed

+9
-6
lines changed

1 file changed

+9
-6
lines changed

equeue.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -197,10 +197,10 @@ void equeue_dealloc(equeue_t *q, void *p) {
197197

198198

199199
// equeue scheduling functions
200-
static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned ms) {
200+
static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) {
201201
// setup event and hash local id with buffer offset for unique id
202202
int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer);
203-
e->target = equeue_tick() + ms;
203+
e->target = tick + equeue_clampdiff(e->target, tick);
204204
e->generation = q->generation;
205205

206206
equeue_mutex_lock(&q->queuelock);
@@ -235,7 +235,8 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned ms) {
235235
// notify background timer
236236
if ((q->background.update && q->background.active) &&
237237
(q->queue == e && !e->sibling)) {
238-
q->background.update(q->background.timer, ms);
238+
q->background.update(q->background.timer,
239+
equeue_clampdiff(e->target, tick));
239240
}
240241

241242
equeue_mutex_unlock(&q->queuelock);
@@ -332,10 +333,11 @@ static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target) {
332333

333334
int equeue_post(equeue_t *q, void (*cb)(void*), void *p) {
334335
struct equeue_event *e = (struct equeue_event*)p - 1;
336+
unsigned tick = equeue_tick();
335337
e->cb = cb;
336-
e->target = equeue_clampdiff(e->target, 0);
338+
e->target = tick + e->target;
337339

338-
int id = equeue_enqueue(q, e, e->target);
340+
int id = equeue_enqueue(q, e, tick);
339341
equeue_sema_signal(&q->eventsema);
340342
return id;
341343
}
@@ -380,7 +382,8 @@ void equeue_dispatch(equeue_t *q, int ms) {
380382

381383
// reenqueue periodic events or deallocate
382384
if (e->period >= 0) {
383-
equeue_enqueue(q, e, e->period);
385+
e->target += e->period;
386+
equeue_enqueue(q, e, equeue_tick());
384387
} else {
385388
equeue_incid(q, e);
386389
equeue_dealloc(q, e+1);

0 commit comments

Comments
 (0)