]> Gentwo Git Trees - linux/.git/commitdiff
ARM64: Convert TLB page flushing
authorChristoph Lameter (Ampere) <cl@linux.com>
Thu, 7 Dec 2023 04:11:49 +0000 (20:11 -0800)
committerChristoph Lameter (Ampere) <cl@linux.com>
Wed, 20 Dec 2023 15:20:52 +0000 (07:20 -0800)
Move the functions related to individual page flushing
and convert them to use the new infrastructure.

Signed-off-by: Christoph Lameter (Ampere) <cl@linux.com>
arch/arm64/include/asm/tlbflush.h
arch/arm64/mm/context.c

index 8f0d4b48758bc296cd5cb677cfcea14b213b283d..8b06c7ec1fe536d0e3d4631676ef3bf1d47ae161 100644 (file)
@@ -250,20 +250,21 @@ static inline void flush_tlb_all(void)
        _count_vm_tlb_event(NR_TLB_FLUSH_ALL);
 }
 
-extern void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_mm(struct mm_struct *mm);
+
+void __flush_tlb_page(struct mm_struct *mm,
+                               unsigned long uaddr, bool sync);
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+                               unsigned long uaddr)
+{
+       __flush_tlb_page(vma->vm_mm, uaddr, true);
+}
 
 static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
                                           unsigned long uaddr)
 {
-       unsigned long addr;
-
-       dsb(ishst);
-       addr = __TLBI_VADDR(uaddr, ASID(mm));
-       __tlbi(vale1is, addr);
-       __tlbi_user(vale1is, addr);
-       mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
-                                               (uaddr & PAGE_MASK) + PAGE_SIZE);
-       _count_vm_tlb_event(NR_TLB_FLUSH_ONE);
+       __flush_tlb_page(mm, uaddr, false);
 }
 
 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
@@ -272,12 +273,6 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
        return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
 }
 
-static inline void flush_tlb_page(struct vm_area_struct *vma,
-                                 unsigned long uaddr)
-{
-       flush_tlb_page_nosync(vma, uaddr);
-       dsb(ish);
-}
 
 static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
 {
@@ -293,6 +288,8 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
        return true;
 }
 
+void __tlbbatch_flush(void);
+
 static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
                                             struct mm_struct *mm,
                                             unsigned long uaddr)
@@ -307,7 +304,7 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
  */
 static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
 {
-       dsb(ish);
+       __tlbbatch_flush();
 }
 
 /*
@@ -322,7 +319,7 @@ static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
  */
 static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 {
-       dsb(ish);
+       __tlbbatch_flush();
 }
 
 /*
index d11213f82d8e9adaf794ce7e7e49e90109dc39e7..418d9931ae53031fcc6e350c629f85ed04ba6458 100644 (file)
@@ -598,6 +598,51 @@ void flush_tlb_mm(struct mm_struct *mm)
        mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
 }
 
+struct ipi_flush_tlb_page_param {
+       unsigned long uaddr;
+       struct mm_struct *mm;
+};
+
+static inline void ipi_flush_tlb_page(void *p)
+{
+       struct ipi_flush_tlb_page_param *i = p;
+
+       flush_tlb_pre(TLB_LOCAL);
+       flush_tlb_addr(TLB_LOCAL, i->mm, i->uaddr);
+       flush_tlb_post(TLB_LOCAL);
+       count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+}
+
+void __flush_tlb_page(struct mm_struct *mm,
+                                 unsigned long uaddr, bool sync)
+{
+       struct ipi_flush_tlb_page_param i = { uaddr, mm };
+       enum tlb_state ts = tlbstat_mm(i.mm);
+
+       if (ts == TLB_IPI) {
+
+               on_each_cpu_mask(mm_cpumask(i.mm), ipi_flush_tlb_page, &i, true);
+               count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+
+       } else {
+
+               flush_tlb_pre(ts);
+               flush_tlb_addr(ts, i.mm, uaddr);
+
+               if (sync)
+                       flush_tlb_post(ts);
+
+       }
+
+       mmu_notifier_arch_invalidate_secondary_tlbs(i.mm, uaddr & PAGE_MASK,
+                                               (uaddr & PAGE_MASK) + PAGE_SIZE);
+}
+
+void __tlbbatch_flush(void)
+{
+       flush_tlb_post(TLB_BROADCAST);
+}
+
 static ssize_t tlb_mode_read_file(struct file *file, char __user *user_buf,
                                size_t count, loff_t *ppos)
 {