]> Gentwo Git Trees - linux/.git/commitdiff
ARM64: Convert kernel flushes
authorChristoph Lameter (Ampere) <cl@linux.com>
Thu, 7 Dec 2023 04:11:49 +0000 (20:11 -0800)
committerChristoph Lameter (Ampere) <cl@linux.com>
Wed, 20 Dec 2023 15:20:52 +0000 (07:20 -0800)
Move the kernel flush functions.

Signed-off-by: Christoph Lameter (Ampere) <cl@linux.com>
arch/arm64/include/asm/tlbflush.h
arch/arm64/mm/context.c

index f22e70098c546199b8b39ae20f9cee2679f027aa..48065f6ce965aa0de16213b27122f08af5437a45 100644 (file)
@@ -232,23 +232,9 @@ static inline unsigned long get_trans_granule(void)
  *     on top of these routines, since that is our interface to the mmu_gather
  *     API as used by munmap() and friends.
  */
-static inline void local_flush_tlb_all(void)
-{
-       dsb(nshst);
-       __tlbi(vmalle1);
-       dsb(nsh);
-       isb();
-       _count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-}
+void local_flush_tlb_all(void);
 
-static inline void flush_tlb_all(void)
-{
-       dsb(ishst);
-       __tlbi(vmalle1is);
-       dsb(ish);
-       isb();
-       _count_vm_tlb_event(NR_TLB_FLUSH_ALL);
-}
+void flush_tlb_all(void);
 
 void flush_tlb_mm(struct mm_struct *mm);
 
@@ -409,38 +395,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
        __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
 }
 
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
-       unsigned long addr;
-
-       if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
-               flush_tlb_all();
-               return;
-       }
-
-       start = __TLBI_VADDR(start, 0);
-       end = __TLBI_VADDR(end, 0);
-
-       dsb(ishst);
-       for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-               __tlbi(vaale1is, addr);
-       dsb(ish);
-       isb();
-}
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 /*
  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
  * table levels (pgd/pud/pmd).
  */
-static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
-{
-       unsigned long addr = __TLBI_VADDR(kaddr, 0);
-
-       dsb(ishst);
-       __tlbi(vaae1is, addr);
-       dsb(ish);
-       isb();
-}
+void __flush_tlb_kernel_pgtable(unsigned long kaddr);
 #endif
 
 #endif
index 4028d7930d2fda35a76e6d97da9ff7201e1821f3..5ae6f93ca86a70c2beb6c3fa050d7b78693644c5 100644 (file)
@@ -735,6 +735,67 @@ void __flush_tlb_range(struct vm_area_struct *vma,
        mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
 }
 
+static void inline flush_tlb_pre_kernel(void)
+{
+       flush_tlb_pre(TLB_BROADCAST);
+}
+
+static void inline flush_tlb_post_kernel(void)
+{
+       flush_tlb_post(TLB_BROADCAST);
+       isb();
+}
+
+void local_flush_tlb_all(void)
+{
+       flush_tlb_pre(TLB_LOCAL);
+       __tlbi(vmalle1);
+       flush_tlb_post(TLB_LOCAL);
+       isb();
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+}
+
+void flush_tlb_all(void)
+{
+       flush_tlb_pre_kernel();
+       __tlbi(vmalle1is);
+       flush_tlb_post_kernel();
+       count_vm_tlb_event(NR_TLB_FLUSH_ALL);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       unsigned long addr;
+
+       if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
+               flush_tlb_all();
+               return;
+       }
+
+       start = __TLBI_VADDR(start, 0);
+       end = __TLBI_VADDR(end, 0);
+
+       flush_tlb_pre_kernel();
+       for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+               __tlbi(vaale1is, addr);
+       }
+       flush_tlb_post_kernel();
+}
+
+/*
+ * Used to invalidate the TLB (walk caches) corresponding to intermediate page
+ * table levels (pgd/pud/pmd).
+ */
+void __flush_tlb_kernel_pgtable(unsigned long kaddr)
+{
+       unsigned long addr = __TLBI_VADDR(kaddr, 0);
+
+       flush_tlb_pre_kernel();
+       __tlbi(vaae1is, addr);
+       flush_tlb_post_kernel();
+}
+
+
 static ssize_t tlb_mode_read_file(struct file *file, char __user *user_buf,
                                size_t count, loff_t *ppos)
 {