]> Gentwo Git Trees - linux/.git/commitdiff
sched_ext: Factor out reenq_local() from scx_bpf_reenqueue_local()
authorTejun Heo <tj@kernel.org>
Sat, 25 Oct 2025 00:18:48 +0000 (14:18 -1000)
committerTejun Heo <tj@kernel.org>
Wed, 29 Oct 2025 15:29:04 +0000 (05:29 -1000)
Factor out the core re-enqueue logic from scx_bpf_reenqueue_local() into a
new reenq_local() helper function. scx_bpf_reenqueue_local() now handles the
BPF kfunc checks and calls reenq_local() to perform the actual work.

This is a prep patch to allow reenq_local() to be called from other contexts.

Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c

index 93ee196841d80c5260edf2058ea65ceeec40c344..d13ce92c3f0187579e806511d83f342039349316 100644 (file)
@@ -5879,32 +5879,12 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
        .set                    = &scx_kfunc_ids_dispatch,
 };
 
-__bpf_kfunc_start_defs();
-
-/**
- * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
- *
- * Iterate over all of the tasks currently enqueued on the local DSQ of the
- * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
- * processed tasks. Can only be called from ops.cpu_release().
- */
-__bpf_kfunc u32 scx_bpf_reenqueue_local(void)
+static u32 reenq_local(struct rq *rq)
 {
-       struct scx_sched *sch;
        LIST_HEAD(tasks);
        u32 nr_enqueued = 0;
-       struct rq *rq;
        struct task_struct *p, *n;
 
-       guard(rcu)();
-       sch = rcu_dereference(scx_root);
-       if (unlikely(!sch))
-               return 0;
-
-       if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE))
-               return 0;
-
-       rq = cpu_rq(smp_processor_id());
        lockdep_assert_rq_held(rq);
 
        /*
@@ -5941,6 +5921,34 @@ __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
        return nr_enqueued;
 }
 
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
+ *
+ * Iterate over all of the tasks currently enqueued on the local DSQ of the
+ * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
+ * processed tasks. Can only be called from ops.cpu_release().
+ */
+__bpf_kfunc u32 scx_bpf_reenqueue_local(void)
+{
+       struct scx_sched *sch;
+       struct rq *rq;
+
+       guard(rcu)();
+       sch = rcu_dereference(scx_root);
+       if (unlikely(!sch))
+               return 0;
+
+       if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE))
+               return 0;
+
+       rq = cpu_rq(smp_processor_id());
+       lockdep_assert_rq_held(rq);
+
+       return reenq_local(rq);
+}
+
 __bpf_kfunc_end_defs();
 
 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)