]> Gentwo Git Trees - linux/.git/commitdiff
tools/sched_ext: Strip compatibility macros for cgroup and dispatch APIs
authorTejun Heo <tj@kernel.org>
Tue, 7 Oct 2025 01:51:44 +0000 (15:51 -1000)
committerTejun Heo <tj@kernel.org>
Mon, 13 Oct 2025 18:49:29 +0000 (08:49 -1000)
Enough time has passed since the introduction of scx_bpf_task_cgroup() and
the scx_bpf_dispatch* -> scx_bpf_dsq* kfunc renaming. Strip the compatibility
macros.

Acked-by: Changwoo Min <changwoo@igalia.com>
Acked-by: Andrea Righi <arighi@nvidia.com>
Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
tools/sched_ext/include/scx/compat.bpf.h
tools/sched_ext/scx_flatcg.bpf.c
tools/sched_ext/scx_qmap.bpf.c

index dd9144624dc99e5489739563c4b9ab02cd2117ae..d979f16a3ae2be0746ac50d11506630b3103fc7e 100644 (file)
        __ret;                                                                  \
 })
 
-/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
-#define __COMPAT_scx_bpf_task_cgroup(p)                                                \
-       (bpf_ksym_exists(scx_bpf_task_cgroup) ?                                 \
-        scx_bpf_task_cgroup((p)) : NULL)
-
 /*
- * v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are
- * renamed to unload the verb.
- *
- * Build error is triggered if old names are used. New binaries work with both
- * new and old names. The compat macros will be removed on v6.15 release.
+ * v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits")
  *
- * scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by
- * 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").
- * Preserve __COMPAT macros until v6.15.
+ * Compat macro will be dropped on v6.19 release.
  */
-void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
-void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
-bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
-void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
-void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
-bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
-bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
 int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
 
-#define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags)                                \
-       (bpf_ksym_exists(scx_bpf_dsq_insert) ?                                  \
-        scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) :              \
-        scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags)))
-
-#define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags)           \
-       (bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ?                            \
-        scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \
-        scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags)))
-
-#define scx_bpf_dsq_move_to_local(dsq_id)                                      \
-       (bpf_ksym_exists(scx_bpf_dsq_move_to_local) ?                           \
-        scx_bpf_dsq_move_to_local((dsq_id)) :                                  \
-        scx_bpf_consume___compat((dsq_id)))
-
-#define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice)                   \
-       (bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ?                          \
-        scx_bpf_dsq_move_set_slice((it__iter), (slice)) :                      \
-        (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ?       \
-         scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) :   \
-         (void)0))
-
-#define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime)                   \
-       (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ?                          \
-        scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) :                      \
-        (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ?       \
-         scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) :   \
-         (void) 0))
-
-#define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags)              \
-       (bpf_ksym_exists(scx_bpf_dsq_move) ?                                    \
-        scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) :             \
-        (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ?                 \
-         scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
-         false))
-
-#define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags)                \
-       (bpf_ksym_exists(scx_bpf_dsq_move_vtime) ?                              \
-        scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) :       \
-        (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ?           \
-         scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
-         false))
-
 #define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz)          \
        (bpf_ksym_exists(bpf_cpumask_populate) ?                        \
         (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
 
-#define scx_bpf_dispatch(p, dsq_id, slice, enq_flags)                          \
-       _Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
-
-#define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags)             \
-       _Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()")
-
-#define scx_bpf_consume(dsq_id) ({                                             \
-       _Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \
-       false;                                                                  \
-})
-
-#define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice)           \
-       _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()")
-
-#define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime)           \
-       _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()")
-
-#define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({   \
-       _Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \
-       false;                                                                  \
-})
-
-#define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({  \
-       _Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \
-       false;                                                                  \
-})
-
-#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice)          \
-       _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()")
-
-#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime)          \
-       _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()")
-
-#define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({  \
-       _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \
-       false;                                                                  \
-})
-
-#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({  \
-       _Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \
-       false;                                                                  \
-})
-
 /**
  * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
  * in a compatible way. We will preserve this __COMPAT helper until v6.16.
index 2c720e3ecad59369eef61b98613c3670a8bcae33..43126858b8e4c5b300e78f842ebdd03f60888fa6 100644 (file)
@@ -382,7 +382,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
                return;
        }
 
-       cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+       cgrp = scx_bpf_task_cgroup(p);
        cgc = find_cgrp_ctx(cgrp);
        if (!cgc)
                goto out_release;
@@ -508,7 +508,7 @@ void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
 {
        struct cgroup *cgrp;
 
-       cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+       cgrp = scx_bpf_task_cgroup(p);
        update_active_weight_sums(cgrp, true);
        bpf_cgroup_release(cgrp);
 }
@@ -521,7 +521,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
        if (fifo_sched)
                return;
 
-       cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+       cgrp = scx_bpf_task_cgroup(p);
        cgc = find_cgrp_ctx(cgrp);
        if (cgc) {
                /*
@@ -564,7 +564,7 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
        if (!taskc->bypassed_at)
                return;
 
-       cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+       cgrp = scx_bpf_task_cgroup(p);
        cgc = find_cgrp_ctx(cgrp);
        if (cgc) {
                __sync_fetch_and_add(&cgc->cvtime_delta,
@@ -578,7 +578,7 @@ void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
 {
        struct cgroup *cgrp;
 
-       cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+       cgrp = scx_bpf_task_cgroup(p);
        update_active_weight_sums(cgrp, false);
        bpf_cgroup_release(cgrp);
 }
index 3072b593f89816139dfec80908736a7bf56972bb..c67dac78a4c6518ea3c5b86e8c303b61f7e8538b 100644 (file)
@@ -320,12 +320,9 @@ static bool dispatch_highpri(bool from_timer)
 
                if (tctx->highpri) {
                        /* exercise the set_*() and vtime interface too */
-                       __COMPAT_scx_bpf_dsq_move_set_slice(
-                               BPF_FOR_EACH_ITER, slice_ns * 2);
-                       __COMPAT_scx_bpf_dsq_move_set_vtime(
-                               BPF_FOR_EACH_ITER, highpri_seq++);
-                       __COMPAT_scx_bpf_dsq_move_vtime(
-                               BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
+                       scx_bpf_dsq_move_set_slice(BPF_FOR_EACH_ITER, slice_ns * 2);
+                       scx_bpf_dsq_move_set_vtime(BPF_FOR_EACH_ITER, highpri_seq++);
+                       scx_bpf_dsq_move_vtime(BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
                }
        }
 
@@ -342,9 +339,8 @@ static bool dispatch_highpri(bool from_timer)
                else
                        cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
 
-               if (__COMPAT_scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p,
-                                             SCX_DSQ_LOCAL_ON | cpu,
-                                             SCX_ENQ_PREEMPT)) {
+               if (scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, SCX_DSQ_LOCAL_ON | cpu,
+                                    SCX_ENQ_PREEMPT)) {
                        if (cpu == this_cpu) {
                                dispatched = true;
                                __sync_fetch_and_add(&nr_expedited_local, 1);