]> Gentwo Git Trees - linux/.git/commitdiff
workqueue: Let DISASSOCIATED workers follow unbound wq cpumask changes
authorLai Jiangshan <jiangshan.ljs@antgroup.com>
Mon, 17 Nov 2025 03:09:12 +0000 (11:09 +0800)
committerTejun Heo <tj@kernel.org>
Thu, 20 Nov 2025 20:27:55 +0000 (10:27 -1000)
When workqueue cpumask changes are committed, the DISASSOCIATED workers
affinity is not touched and this might be a problem down the line for
isolated setups when the DISASSOCIATED pools still have works to run
after the cpu is offline.

Make sure the workers' affinity is updated every time a workqueue cpumask
changes, so these workers can't break isolation.

Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Waiman Long <longman@redhat.com>
Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index bc673ceaac554cf5df0339c99d781896b5c9e33e..5916342ba6e3a1db60caae21c2904705f62d7f4d 100644 (file)
@@ -6926,6 +6926,10 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
        }
 
        if (!ret) {
+               int cpu;
+               struct worker_pool *pool;
+               struct worker *worker;
+
                mutex_lock(&wq_pool_attach_mutex);
                cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
                /* rescuer needs to respect cpumask changes when it is not attached */
@@ -6933,6 +6937,15 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
                        if (wq->rescuer && !wq->rescuer->pool)
                                unbind_worker(wq->rescuer);
                }
+               /* DISASSOCIATED worker needs to respect wq_unbound_cpumask */
+               for_each_possible_cpu(cpu) {
+                       for_each_cpu_worker_pool(pool, cpu) {
+                               if (!(pool->flags & POOL_DISASSOCIATED))
+                                       continue;
+                               for_each_pool_worker(worker, pool)
+                                       unbind_worker(worker);
+                       }
+               }
                mutex_unlock(&wq_pool_attach_mutex);
        }
        return ret;