summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2024-09-27 00:56:46 +0200
committerTejun Heo <tj@kernel.org>2024-09-27 00:56:46 +0200
commit63fb3ec80516b256e9fc91de48567f5eda61d135 (patch)
tree8bba42f2f1cff6afa80867431589714f5736ae00 /kernel
parentscx_flatcg: Use a user DSQ for fallback instead of SCX_DSQ_GLOBAL (diff)
downloadlinux-63fb3ec80516b256e9fc91de48567f5eda61d135.tar.xz
linux-63fb3ec80516b256e9fc91de48567f5eda61d135.zip
sched_ext: Allow only user DSQs for scx_bpf_consume(), scx_bpf_dsq_nr_queued() and bpf_iter_scx_dsq_new()
SCX_DSQ_GLOBAL is special in that it can't be used as a priority queue and is consumed implicitly, but all BPF DSQ related kfuncs could be used on it. SCX_DSQ_GLOBAL will be split per-node for scalability and those operations won't make sense anymore. Disallow SCX_DSQ_GLOBAL on scx_bpf_consume(), scx_bpf_dsq_nr_queued() and bpf_iter_scx_dsq_new(). This means that SCX_DSQ_GLOBAL can only be used as a dispatch target from BPF schedulers. With scx_flatcg, which was using SCX_DSQ_GLOBAL as the fallback DSQ, updated, this shouldn't affect any schedulers. This leaves find_dsq_for_dispatch() the only user of find_non_local_dsq(). Open code and remove find_non_local_dsq(). Signed-off-by: tejun heo <tj@kernel.org> Acked-by: David Vernet <void@manifault.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/ext.c22
1 files changed, 8 insertions, 14 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index d74d1fe06999..ed2b914c42d1 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1808,16 +1808,6 @@ static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
}
-static struct scx_dispatch_q *find_non_local_dsq(u64 dsq_id)
-{
- lockdep_assert(rcu_read_lock_any_held());
-
- if (dsq_id == SCX_DSQ_GLOBAL)
- return &scx_dsq_global;
- else
- return find_user_dsq(dsq_id);
-}
-
static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
struct task_struct *p)
{
@@ -1835,7 +1825,11 @@ static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
return &cpu_rq(cpu)->scx.local_dsq;
}
- dsq = find_non_local_dsq(dsq_id);
+ if (dsq_id == SCX_DSQ_GLOBAL)
+ dsq = &scx_dsq_global;
+ else
+ dsq = find_user_dsq(dsq_id);
+
if (unlikely(!dsq)) {
scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
dsq_id, p->comm, p->pid);
@@ -6176,7 +6170,7 @@ __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
flush_dispatch_buf(dspc->rq);
- dsq = find_non_local_dsq(dsq_id);
+ dsq = find_user_dsq(dsq_id);
if (unlikely(!dsq)) {
scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
return false;
@@ -6497,7 +6491,7 @@ __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
goto out;
}
} else {
- dsq = find_non_local_dsq(dsq_id);
+ dsq = find_user_dsq(dsq_id);
if (dsq) {
ret = READ_ONCE(dsq->nr);
goto out;
@@ -6546,7 +6540,7 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
return -EINVAL;
- kit->dsq = find_non_local_dsq(dsq_id);
+ kit->dsq = find_user_dsq(dsq_id);
if (!kit->dsq)
return -ENOENT;