void sched_ext_dead(struct task_struct *p);
void print_scx_info(const char *log_lvl, struct task_struct *p);
void scx_softlockup(u32 dur_s);
+bool scx_hardlockup(void);
bool scx_rcu_cpu_stall(void);
#else /* !CONFIG_SCHED_CLASS_EXT */
static inline void sched_ext_dead(struct task_struct *p) {}
static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
static inline void scx_softlockup(u32 dur_s) {}
+static inline bool scx_hardlockup(void) { return false; }
static inline bool scx_rcu_cpu_stall(void) { return false; }
#endif /* CONFIG_SCHED_CLASS_EXT */
smp_processor_id(), dur_s);
}
+/**
+ * scx_hardlockup - sched_ext hardlockup handler
+ *
+ * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting
+ * numerous affinitized tasks in a single queue and directing all CPUs at it.
+ * Try kicking out the current scheduler in an attempt to recover the system to
+ * a good state before taking more drastic actions.
+ */
+bool scx_hardlockup(void)
+{
+ if (!handle_lockup("hard lockup - CPU %d", smp_processor_id()))
+ return false;
+
+ printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n",
+ smp_processor_id());
+ return true;
+}
+
/**
* scx_bypass - [Un]bypass scx_ops and guarantee forward progress
* @bypass: true for bypass, false for unbypass
#ifdef CONFIG_SYSFS
++hardlockup_count;
#endif
+ /*
+ * A poorly behaving BPF scheduler can trigger hard lockup by
+ * e.g. putting numerous affinitized tasks in a single queue and
+ * directing all CPUs at it. The following call can return true
+ * only once when sched_ext is enabled and will immediately
+ * abort the BPF scheduler and print out a warning message.
+ */
+ if (scx_hardlockup())
+ return;
/* Only print hardlockups once. */
if (per_cpu(watchdog_hardlockup_warned, cpu))