]> Gentwo Git Trees - linux/.git/commitdiff
tracing: Add bulk garbage collection of freeing event_trigger_data
authorSteven Rostedt <rostedt@goodmis.org>
Tue, 25 Nov 2025 21:40:06 +0000 (16:40 -0500)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Wed, 26 Nov 2025 20:13:30 +0000 (15:13 -0500)
The event trigger data requires a full tracepoint_synchronize_unregister()
call before freeing. That call can take 100s of milliseconds to complete.
In order to allow for bulk freeing of the trigger data, it can not call
the tracepoint_synchronize_unregister() for every individual trigger data
being free.

Create a kthread that gets created the first time a trigger data is freed,
and have it use the lockless llist to get the list of data to free, run
the tracepoint_synchronize_unregister() then free everything in the list.

By freeing hundreds of event_trigger_data elements together, it only
requires two runs of the synchronization function, and not hundreds of
runs. This speeds up the operation by orders of magnitude (milliseconds
instead of several seconds).

Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Tom Zanussi <zanussi@kernel.org>
Link: https://patch.msgid.link/20251125214032.151674992@kernel.org
Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/trace.h
kernel/trace/trace_events_trigger.c

index 901aad30099bdc2b2b0118532562ba715dc36d46..a3aa225ed50a10bdaef1703ee373197b55c5b75f 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ctype.h>
 #include <linux/once_lite.h>
 #include <linux/ftrace_regs.h>
+#include <linux/llist.h>
 
 #include "pid_list.h"
 
@@ -1808,6 +1809,7 @@ struct event_trigger_data {
        char                            *name;
        struct list_head                named_list;
        struct event_trigger_data       *named_data;
+       struct llist_node               llist;
 };
 
 /* Avoid typos */
index e5dcfcbb2cd585d6aaa3a8b3b3043488d1bf413b..3b97c242b7954f0409552df6a12b8163d4d24feb 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/security.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/ctype.h>
 #include <linux/mutex.h>
 static LIST_HEAD(trigger_commands);
 static DEFINE_MUTEX(trigger_cmd_mutex);
 
+static struct task_struct *trigger_kthread;
+static struct llist_head trigger_data_free_list;
+static DEFINE_MUTEX(trigger_data_kthread_mutex);
+
+/* Bulk garbage collection of event_trigger_data elements */
+static int trigger_kthread_fn(void *ignore)
+{
+       struct event_trigger_data *data, *tmp;
+       struct llist_node *llnodes;
+
+       /* Once this task starts, it lives forever */
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (llist_empty(&trigger_data_free_list))
+                       schedule();
+
+               __set_current_state(TASK_RUNNING);
+
+               llnodes = llist_del_all(&trigger_data_free_list);
+
+               /* make sure current triggers exit before free */
+               tracepoint_synchronize_unregister();
+
+               llist_for_each_entry_safe(data, tmp, llnodes, llist)
+                       kfree(data);
+       }
+
+       return 0;
+}
+
 void trigger_data_free(struct event_trigger_data *data)
 {
        if (data->cmd_ops->set_filter)
                data->cmd_ops->set_filter(NULL, data, NULL);
 
-       /* make sure current triggers exit before free */
-       tracepoint_synchronize_unregister();
+       if (unlikely(!trigger_kthread)) {
+               guard(mutex)(&trigger_data_kthread_mutex);
+               /* Check again after taking mutex */
+               if (!trigger_kthread) {
+                       struct task_struct *kthread;
+
+                       kthread = kthread_create(trigger_kthread_fn, NULL,
+                                                "trigger_data_free");
+                       if (!IS_ERR(kthread))
+                               WRITE_ONCE(trigger_kthread, kthread);
+               }
+       }
+
+       if (!trigger_kthread) {
+               /* Do it the slow way */
+               tracepoint_synchronize_unregister();
+               kfree(data);
+               return;
+       }
 
-       kfree(data);
+       llist_add(&data->llist, &trigger_data_free_list);
+       wake_up_process(trigger_kthread);
 }
 
 static inline void data_ops_trigger(struct event_trigger_data *data,