Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions scheds/include/scx/common.bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
struct rq *scx_bpf_rq_locked(void) __ksym;
struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak;
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
u64 scx_bpf_now(void) __ksym __weak;
void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
Expand Down
16 changes: 16 additions & 0 deletions scheds/include/scx/compat.bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,22 @@ static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
#endif /* HAVE_SCX_ENQ_CPU_SELECTED */
}

/*
* v6.18: Add a helper to retrieve the current task running on a CPU.
*
* Keep this helper available until v6.20 for compatibility.
*/
static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
{
struct rq *rq;

if (bpf_ksym_exists(scx_bpf_cpu_curr))
return scx_bpf_cpu_curr(cpu);

rq = scx_bpf_cpu_rq(cpu);

return rq ? rq->curr : NULL;
}

#define scx_bpf_now() \
(bpf_ksym_exists(scx_bpf_now) ? \
Expand Down
15 changes: 11 additions & 4 deletions scheds/rust/scx_cosmos/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -332,13 +332,20 @@ static inline const struct cpumask *get_idle_smtmask(s32 cpu)
*/
static inline bool is_cpu_idle(s32 cpu)
{
struct rq *rq = scx_bpf_cpu_rq(cpu);
struct task_struct *p;
bool idle;

if (!rq) {
scx_bpf_error("Failed to access rq %d", cpu);
bpf_rcu_read_lock();
p = __COMPAT_scx_bpf_cpu_curr(cpu);

if (!p) {
bpf_rcu_read_unlock();
scx_bpf_error("Failed to access rq->curr %d", cpu);
return false;
}
return rq->curr->flags & PF_IDLE;
idle = p->flags & PF_IDLE;
bpf_rcu_read_unlock();
return idle;
}

/*
Expand Down
8 changes: 2 additions & 6 deletions scheds/rust/scx_flash/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1991,16 +1991,12 @@ static int tickless_timerfn(void *map, int *key, struct bpf_timer *timer)
*/
bpf_rcu_read_lock();
bpf_for(cpu, 0, nr_cpu_ids) {
struct task_struct *p;
struct rq *rq = scx_bpf_cpu_rq(cpu);
struct task_struct *p = __COMPAT_scx_bpf_cpu_curr(cpu);

if (!rq)
continue;
/*
* Ignore CPU if idle task is running.
*/
p = rq->curr;
if (p->flags & PF_IDLE)
if (!p || p->flags & PF_IDLE)
continue;

/*
Expand Down
12 changes: 7 additions & 5 deletions scheds/rust/scx_lavd/src/bpf/preempt.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@ static struct cpu_ctx *find_victim_cpu(const struct cpumask *cpumask,

static void ask_cpu_yield_after(struct cpu_ctx *victim_cpuc, u64 new_slice)
{
bpf_rcu_read_lock();
/*
* Note that we avoid using scx_bpf_kick_cpu() on purpose.
* While scx_bpf_kick_cpu() can trigger a task preemption immediately,
Expand All @@ -196,11 +197,9 @@ static void ask_cpu_yield_after(struct cpu_ctx *victim_cpuc, u64 new_slice)
* set the victim task's time slice to zero so the victim task yields
* the CPU in the next scheduling point.
*/
struct rq *victim_rq;
struct task_struct *victim_p;
struct task_struct *victim_p = __COMPAT_scx_bpf_cpu_curr(victim_cpuc->cpu_id);

victim_rq = scx_bpf_cpu_rq(victim_cpuc->cpu_id);
if (victim_rq && (victim_p = victim_rq->curr)) {
if (victim_p) {
/*
* Finding a victim is racy, but we do not coordinate. Thus,
* two different CPUs can choose the same victim CPU. We do not
Expand All @@ -220,8 +219,10 @@ static void ask_cpu_yield_after(struct cpu_ctx *victim_cpuc, u64 new_slice)
* (SCX_SLICE_DFL, 20 msec).
*/
u64 old = victim_cpuc->est_stopping_clk;
if (!old)
if (!old) {
bpf_rcu_read_unlock();
return;
}

/*
* If the new slice is one, this is the last time to be kicked,
Expand All @@ -240,6 +241,7 @@ static void ask_cpu_yield_after(struct cpu_ctx *victim_cpuc, u64 new_slice)
WRITE_ONCE(victim_p->scx.slice, new_slice);
}
}
bpf_rcu_read_unlock();
}

__hidden
Expand Down
11 changes: 7 additions & 4 deletions scheds/rust/scx_layered/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1419,7 +1419,6 @@ static bool try_preempt_cpu(s32 cand, struct task_struct *p, struct task_ctx *ta
struct layer *layer, u64 flags)
{
struct cpu_ctx *cpuc, *cand_cpuc, *sib_cpuc = NULL;
struct rq *rq;
struct task_struct *curr;
const struct cpumask *idle_cpumask;
bool cand_idle;
Expand All @@ -1446,19 +1445,23 @@ static bool try_preempt_cpu(s32 cand, struct task_struct *p, struct task_ctx *ta
if (scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cand))
return false;

rq = scx_bpf_cpu_rq(cand);
if (!rq)
bpf_rcu_read_lock();
curr = __COMPAT_scx_bpf_cpu_curr(cand);
if (!curr) {
bpf_rcu_read_unlock();
return false;
curr = rq->curr;
}

if (ext_sched_class_addr && idle_sched_class_addr &&
((u64)curr->sched_class != ext_sched_class_addr) &&
((u64)curr->sched_class != idle_sched_class_addr)) {
bpf_rcu_read_unlock();
if (!(cpuc = lookup_cpu_ctx(-1)))
return false;
gstat_inc(GSTAT_SKIP_PREEMPT, cpuc);
return false;
}
bpf_rcu_read_unlock();

/*
* Don't preempt if protection against is in effect. However, open
Expand Down
7 changes: 4 additions & 3 deletions scheds/rust/scx_tickless/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -412,14 +412,14 @@ static int sched_timerfn(void *map, int *key, struct bpf_timer *timer)
/*
* Check if we need to preempt the running tasks.
*/
bpf_rcu_read_lock();
bpf_for(cpu, 0, nr_cpu_ids) {
struct task_struct *p;
struct task_struct *p = __COMPAT_scx_bpf_cpu_curr(cpu);

/*
* Ignore CPU if idle task is running.
*/
p = scx_bpf_cpu_rq(cpu)->curr;
if (p->flags & PF_IDLE)
if (!p || p->flags & PF_IDLE)
continue;

/*
Expand All @@ -438,6 +438,7 @@ static int sched_timerfn(void *map, int *key, struct bpf_timer *timer)
__sync_fetch_and_add(&nr_preemptions, 1);
}
}
bpf_rcu_read_unlock();

bpf_timer_start(timer, tick_interval_ns(), 0);

Expand Down
Loading