]> Gentwo Git Trees - linux/.git/commitdiff
printk: Introduce console_flush_one_record
authorAndrew Murray <amurray@thegoodpenguin.co.uk>
Mon, 20 Oct 2025 15:38:05 +0000 (16:38 +0100)
committerPetr Mladek <pmladek@suse.com>
Thu, 23 Oct 2025 15:08:41 +0000 (17:08 +0200)
console_flush_all prints all remaining records to all usable consoles
whilst its caller holds console_lock. This can result in large waiting
times for those waiting for console_lock especially where there is a
large volume of records or where the console is slow (e.g. serial).

Let's extract the parts of this function which print a single record
into a new function named console_flush_one_record. This can later
be used for functions that will release and reacquire console_lock
between records.

This commit should not change existing functionality.

Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Andrew Murray <amurray@thegoodpenguin.co.uk>
Reviewed-by: John Ogness <john.ogness@linutronix.de>
Link: https://patch.msgid.link/20251020-printk_legacy_thread_console_lock-v3-1-00f1f0ac055a@thegoodpenguin.co.uk
Signed-off-by: Petr Mladek <pmladek@suse.com>
kernel/printk/printk.c

index 5aee9ffb16b9a5e7bfadb0266a77bfa569e50e51..1c048c66d09919967e57326e1732bd17c10f3c76 100644 (file)
@@ -3134,6 +3134,99 @@ static inline void printk_kthreads_check_locked(void) { }
 
 #endif /* CONFIG_PRINTK */
 
+
+/*
+ * Print out one record for each console.
+ *
+ * @do_cond_resched is set by the caller. It can be true only in schedulable
+ * context.
+ *
+ * @next_seq is set to the sequence number after the last available record.
+ * The value is valid only when there is at least one usable console and all
+ * usable consoles were flushed.
+ *
+ * @handover will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding the
+ * console_lock. Otherwise it is set to false.
+ *
+ * @any_usable will be set to true if there are any usable consoles.
+ *
+ * Returns true when there was at least one usable console and a record was
+ * flushed. A returned false indicates there were no records to flush for any
+ * of the consoles. It may also indicate that there were no usable consoles,
+ * the context has been lost or there is a panic suitation. Regardless the
+ * reason, the caller should assume it is not useful to immediately try again.
+ *
+ * Requires the console_lock.
+ */
+static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover,
+                                    bool *any_usable)
+{
+       struct console_flush_type ft;
+       bool any_progress = false;
+       struct console *con;
+       int cookie;
+
+       printk_get_console_flush_type(&ft);
+
+       cookie = console_srcu_read_lock();
+       for_each_console_srcu(con) {
+               short flags = console_srcu_read_flags(con);
+               u64 printk_seq;
+               bool progress;
+
+               /*
+                * console_flush_one_record() is only responsible for
+                * nbcon consoles when the nbcon consoles cannot print via
+                * their atomic or threaded flushing.
+                */
+               if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
+                       continue;
+
+               if (!console_is_usable(con, flags, !do_cond_resched))
+                       continue;
+               *any_usable = true;
+
+               if (flags & CON_NBCON) {
+                       progress = nbcon_legacy_emit_next_record(con, handover, cookie,
+                                                                !do_cond_resched);
+                       printk_seq = nbcon_seq_read(con);
+               } else {
+                       progress = console_emit_next_record(con, handover, cookie);
+                       printk_seq = con->seq;
+               }
+
+               /*
+                * If a handover has occurred, the SRCU read lock
+                * is already released.
+                */
+               if (*handover)
+                       return false;
+
+               /* Track the next of the highest seq flushed. */
+               if (printk_seq > *next_seq)
+                       *next_seq = printk_seq;
+
+               if (!progress)
+                       continue;
+               any_progress = true;
+
+               /* Allow panic_cpu to take over the consoles safely. */
+               if (panic_on_other_cpu())
+                       goto abandon;
+
+               if (do_cond_resched)
+                       cond_resched();
+       }
+       console_srcu_read_unlock(cookie);
+
+       return any_progress;
+
+abandon:
+       console_srcu_read_unlock(cookie);
+       return false;
+}
+
 /*
  * Print out all remaining records to all consoles.
  *
@@ -3159,77 +3252,24 @@ static inline void printk_kthreads_check_locked(void) { }
  */
 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
 {
-       struct console_flush_type ft;
        bool any_usable = false;
-       struct console *con;
        bool any_progress;
-       int cookie;
 
        *next_seq = 0;
        *handover = false;
 
        do {
-               any_progress = false;
+               any_progress = console_flush_one_record(do_cond_resched, next_seq, handover,
+                                                       &any_usable);
 
-               printk_get_console_flush_type(&ft);
-
-               cookie = console_srcu_read_lock();
-               for_each_console_srcu(con) {
-                       short flags = console_srcu_read_flags(con);
-                       u64 printk_seq;
-                       bool progress;
+               if (*handover)
+                       return false;
 
-                       /*
-                        * console_flush_all() is only responsible for nbcon
-                        * consoles when the nbcon consoles cannot print via
-                        * their atomic or threaded flushing.
-                        */
-                       if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload))
-                               continue;
-
-                       if (!console_is_usable(con, flags, !do_cond_resched))
-                               continue;
-                       any_usable = true;
-
-                       if (flags & CON_NBCON) {
-                               progress = nbcon_legacy_emit_next_record(con, handover, cookie,
-                                                                        !do_cond_resched);
-                               printk_seq = nbcon_seq_read(con);
-                       } else {
-                               progress = console_emit_next_record(con, handover, cookie);
-                               printk_seq = con->seq;
-                       }
-
-                       /*
-                        * If a handover has occurred, the SRCU read lock
-                        * is already released.
-                        */
-                       if (*handover)
-                               return false;
-
-                       /* Track the next of the highest seq flushed. */
-                       if (printk_seq > *next_seq)
-                               *next_seq = printk_seq;
-
-                       if (!progress)
-                               continue;
-                       any_progress = true;
-
-                       /* Allow panic_cpu to take over the consoles safely. */
-                       if (panic_on_other_cpu())
-                               goto abandon;
-
-                       if (do_cond_resched)
-                               cond_resched();
-               }
-               console_srcu_read_unlock(cookie);
+               if (panic_on_other_cpu())
+                       return false;
        } while (any_progress);
 
        return any_usable;
-
-abandon:
-       console_srcu_read_unlock(cookie);
-       return false;
 }
 
 static void __console_flush_and_unlock(void)