]> Gentwo Git Trees - linux/.git/commitdiff
sched_ext: Merge branch 'sched/core' of git://git.kernel.org/pub/scm/linux/kernel...
authorTejun Heo <tj@kernel.org>
Thu, 16 Oct 2025 18:45:38 +0000 (08:45 -1000)
committerTejun Heo <tj@kernel.org>
Thu, 16 Oct 2025 18:45:38 +0000 (08:45 -1000)
Pull in tip/sched/core to receive:

 50653216e4ff ("sched: Add support to pick functions to take rf")
 4c95380701f5 ("sched/ext: Fold balance_scx() into pick_task_scx()")

which will enable clean integration of DL server support among other things.

This conflicts with the following from sched_ext/for-6.18-fixes:

 a8ad873113d3 ("sched_ext: defer queue_balance_callback() until after ops.dispatch")

which adds maybe_queue_balance_callback() to balance_scx() which is removed
by 50653216e4ff. Resolve by moving the invocation to pick_task_scx() in the
equivalent location.

Signed-off-by: Tejun Heo <tj@kernel.org>
1  2 
kernel/sched/ext.c
kernel/sched/sched.h

index 5d6fdb31f18a9b2247cc258b17a8a430f3f6bdae,49f4a9e763486cf5f5dc6da25abd4d28497a2b90..adff739b396ce000832d1cd3e046f1ef40c05880
@@@ -2387,41 -2298,22 +2351,26 @@@ static struct task_struct *first_local_
                                        struct task_struct, scx.dsq_list.node);
  }
  
- static struct task_struct *pick_task_scx(struct rq *rq)
+ static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
  {
        struct task_struct *prev = rq->curr;
+       bool keep_prev, kick_idle = false;
        struct task_struct *p;
-       bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
-       bool kick_idle = false;
  
-       /*
-        * WORKAROUND:
-        *
-        * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
-        * have gone through balance_scx(). Unfortunately, there currently is a
-        * bug where fair could say yes on balance() but no on pick_task(),
-        * which then ends up calling pick_task_scx() without preceding
-        * balance_scx().
-        *
-        * Keep running @prev if possible and avoid stalling from entering idle
-        * without balancing.
-        *
-        * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
-        * if pick_task_scx() is called without preceding balance_scx().
-        */
-       if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
-               if (prev->scx.flags & SCX_TASK_QUEUED) {
-                       keep_prev = true;
-               } else {
-                       keep_prev = false;
-                       kick_idle = true;
-               }
-       } else if (unlikely(keep_prev &&
-                           prev->sched_class != &ext_sched_class)) {
-               /*
-                * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
-                * conditional on scx_enabled() and may have been skipped.
-                */
+       rq_modified_clear(rq);
++
+       rq_unpin_lock(rq, rf);
+       balance_one(rq, prev);
+       rq_repin_lock(rq, rf);
++
++      maybe_queue_balance_callback(rq);
++
+       if (rq_modified_above(rq, &ext_sched_class))
+               return RETRY_TASK;
+       keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
+       if (unlikely(keep_prev &&
+                    prev->sched_class != &ext_sched_class)) {
                WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED);
                keep_prev = false;
        }
Simple merge