]> Gentwo Git Trees - linux/.git/commitdiff
powerpc/64s/slb: Add no_slb_preload early cmdline param
authorRitesh Harjani (IBM) <ritesh.list@gmail.com>
Thu, 30 Oct 2025 14:57:36 +0000 (20:27 +0530)
committerMadhavan Srinivasan <maddy@linux.ibm.com>
Tue, 18 Nov 2025 07:05:53 +0000 (12:35 +0530)
no_slb_preload cmdline can come useful in quickly disabling and/or
testing the performance impact of userspace slb preloads. Recently there
was a slb multi-hit issue due to slb preload cache which was very
difficult to triage. This cmdline option allows to quickly disable
preloads and verify if the issue exists in preload cache or somewhere
else. This can also be a useful option to see the effect of slb preloads
for any application workload e.g. number of slb faults with or w/o slb
preloads.

with slb_preload:
slb_faults (minimal initrd boot): 15
slb_faults (full systemd boot): 300

with no_slb_preload:
slb_faults (minimal initrd boot): 33
slb_faults (full systemd boot): 138180

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/de484b55c45d831bc2db63945f455153c89a9a65.1761834163.git.ritesh.list@gmail.com
Documentation/admin-guide/kernel-parameters.txt
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/mm/book3s64/internal.h
arch/powerpc/mm/book3s64/slb.c

index 1c10190d583d4ab4adf4c4745ba3b470162a9d0c..d962d275dac7c70ee5a19053b92e5b0f9cbb69b3 100644 (file)
                        them frequently to increase the rate of SLB faults
                        on kernel addresses.
 
+       no_slb_preload  [PPC,EARLY]
+                       Disables slb preloading for userspace.
+
        sunrpc.min_resvport=
        sunrpc.max_resvport=
                        [NFS,SUNRPC]
index 31162dbad05c05976571ed6f002e598e22f9caef..9dc5889d6ecb57cc39950ddbaad81caac748fb13 100644 (file)
@@ -1329,6 +1329,9 @@ static void __init htab_initialize(void)
        if (stress_slb_enabled)
                static_branch_enable(&stress_slb_key);
 
+       if (no_slb_preload)
+               static_branch_enable(&no_slb_preload_key);
+
        if (stress_hpt_enabled) {
                unsigned long tmp;
                static_branch_enable(&stress_hpt_key);
index c26a6f0c90fcd24110195c293a5f591f6e12aa7a..cad08d83369cf201e718a120fa7c6c4274847e17 100644 (file)
@@ -22,6 +22,13 @@ static inline bool stress_hpt(void)
        return static_branch_unlikely(&stress_hpt_key);
 }
 
+extern bool no_slb_preload;
+DECLARE_STATIC_KEY_FALSE(no_slb_preload_key);
+static inline bool slb_preload_disabled(void)
+{
+       return static_branch_unlikely(&no_slb_preload_key);
+}
+
 void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
 
 void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);
index 042b762fc0d21c56fcb12e081d7e3668100de798..15f73abd150694c34c903c5f9f3379c0cbc5835b 100644 (file)
@@ -42,6 +42,15 @@ early_param("stress_slb", parse_stress_slb);
 
 __ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key);
 
+bool no_slb_preload __initdata;
+static int __init parse_no_slb_preload(char *p)
+{
+       no_slb_preload = true;
+       return 0;
+}
+early_param("no_slb_preload", parse_no_slb_preload);
+__ro_after_init DEFINE_STATIC_KEY_FALSE(no_slb_preload_key);
+
 static void assert_slb_presence(bool present, unsigned long ea)
 {
 #ifdef CONFIG_DEBUG_VM
@@ -299,6 +308,9 @@ static void preload_add(struct thread_info *ti, unsigned long ea)
        unsigned char idx;
        unsigned long esid;
 
+       if (slb_preload_disabled())
+               return;
+
        if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
                /* EAs are stored >> 28 so 256MB segments don't need clearing */
                if (ea & ESID_MASK_1T)
@@ -412,6 +424,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 
        copy_mm_to_paca(mm);
 
+       if (slb_preload_disabled())
+               return;
+
        /*
         * We gradually age out SLBs after a number of context switches to
         * reduce reload overhead of unused entries (like we do with FP/VEC