This patch introduces a new function run_on_each_cpu() that uses the keventd() to run the LRU draining on each processor. Processors disable preemption when dealing the LRU caches (these are per processor) and thus executing LRU draining from another process is safe. Signed-off-by: Christoph Lameter Index: linux-2.6.15-rc5/include/linux/workqueue.h =================================================================== --- linux-2.6.15-rc5.orig/include/linux/workqueue.h 2005-12-03 21:10:42.000000000 -0800 +++ linux-2.6.15-rc5/include/linux/workqueue.h 2005-12-07 13:11:09.000000000 -0800 @@ -65,6 +65,7 @@ extern int FASTCALL(schedule_work(struct extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); +extern void schedule_on_each_cpu(void (*func)(void *info), void *info); extern void flush_scheduled_work(void); extern int current_is_keventd(void); extern int keventd_up(void); Index: linux-2.6.15-rc5/kernel/workqueue.c =================================================================== --- linux-2.6.15-rc5.orig/kernel/workqueue.c 2005-12-03 21:10:42.000000000 -0800 +++ linux-2.6.15-rc5/kernel/workqueue.c 2005-12-07 13:11:09.000000000 -0800 @@ -419,6 +419,19 @@ int schedule_delayed_work_on(int cpu, return ret; } +void schedule_on_each_cpu(void (*func) (void *info), void *info) +{ + int cpu; + struct work_struct * work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL); + + for_each_online_cpu(cpu) { + INIT_WORK(work + cpu, func, info); + __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work + cpu); + } + flush_workqueue(keventd_wq); + kfree(work); +} + void flush_scheduled_work(void) { flush_workqueue(keventd_wq);