]> Gentwo Git Trees - linux/.git/commitdiff
Convert tlb_flush_range
authorChristoph Lameter (Ampere) <cl@linux.com>
Thu, 7 Dec 2023 04:11:49 +0000 (20:11 -0800)
committerChristoph Lameter (Ampere) <cl@linux.com>
Wed, 20 Dec 2023 15:20:52 +0000 (07:20 -0800)
Move tlb_flush_range related functionality and convert them to the
new framework.

Signed-off-by: Christoph Lameter (Ampere) <cl@linux.com>
arch/arm64/include/asm/tlbflush.h
arch/arm64/mm/context.c

index 8b06c7ec1fe536d0e3d4631676ef3bf1d47ae161..f22e70098c546199b8b39ae20f9cee2679f027aa 100644 (file)
@@ -393,42 +393,10 @@ do {                                                                      \
 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
        __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
 
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
+void __flush_tlb_range(struct vm_area_struct *vma,
                                     unsigned long start, unsigned long end,
                                     unsigned long stride, bool last_level,
-                                    int tlb_level)
-{
-       unsigned long asid, pages;
-
-       start = round_down(start, stride);
-       end = round_up(end, stride);
-       pages = (end - start) >> PAGE_SHIFT;
-
-       /*
-        * When not uses TLB range ops, we can handle up to
-        * (MAX_DVM_OPS - 1) pages;
-        * When uses TLB range ops, we can handle up to
-        * (MAX_TLBI_RANGE_PAGES - 1) pages.
-        */
-       if ((!system_supports_tlb_range() &&
-            (end - start) >= (MAX_DVM_OPS * stride)) ||
-           pages >= MAX_TLBI_RANGE_PAGES) {
-               flush_tlb_mm(vma->vm_mm);
-               return;
-       }
-
-       dsb(ishst);
-       asid = ASID(vma->vm_mm);
-
-       if (last_level)
-               __flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
-       else
-               __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
-
-       dsb(ish);
-       mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
-       _count_vm_tlb_event(NR_TLB_FLUSH_RANGE);
-}
+                                    int tlb_level);
 
 static inline void flush_tlb_range(struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end)
index 418d9931ae53031fcc6e350c629f85ed04ba6458..4028d7930d2fda35a76e6d97da9ff7201e1821f3 100644 (file)
@@ -643,6 +643,98 @@ void __tlbbatch_flush(void)
        flush_tlb_post(TLB_BROADCAST);
 }
 
+struct ipi_flush_tlb_range_param {
+       unsigned long start;
+       unsigned long pages;
+       unsigned long stride;
+       bool last_level;
+       int tlb_level;
+       unsigned long asid;
+};
+
+static inline void ipi_flush_tlb_range(void *p)
+{
+       struct ipi_flush_tlb_range_param *i = p;
+
+       flush_tlb_pre(TLB_LOCAL);
+
+       if (i->last_level)
+
+               __flush_tlb_range_op(vale1, i->start, i->pages, i->stride, i->asid, i->tlb_level, true);
+
+       else
+
+               __flush_tlb_range_op(vae1, i->start, i->pages, i->stride, i->asid, i->tlb_level, true);
+
+       flush_tlb_post(TLB_LOCAL);
+       count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+}
+
+void __flush_tlb_range(struct vm_area_struct *vma,
+               unsigned long start, unsigned long end,
+               unsigned long stride, bool last_level,
+               int tlb_level)
+{
+       struct ipi_flush_tlb_range_param i = { 0,0, stride, last_level, tlb_level, ASID(vma->vm_mm) };
+       enum tlb_state ts = tlbstat_mm(vma->vm_mm);
+
+       if (ts == TLB_NONE) {
+               count_vm_tlb_event(NR_TLB_SKIPPED);
+               goto out;
+       }
+
+       i.start = round_down(start, stride);
+       end = round_up(end, stride);
+       i.pages = (end - start) >> PAGE_SHIFT;
+
+       /*
+        * When not using TLB range ops, we can handle up to
+        * (MAX_DVM_OPS - 1) pages;
+        * When uses TLB range ops, we can handle up to
+        * (MAX_TLBI_RANGE_PAGES - 1) pages.
+        */
+       if (((tlb_mode & TLB_MODE_RANGE) && (end - i.start) >= (MAX_DVM_OPS * stride)) ||
+                       i.pages >= MAX_TLBI_RANGE_PAGES) {
+
+               flush_tlb_mm(vma->vm_mm);
+               return;
+
+       }
+
+       if (ts == TLB_IPI) {
+
+               on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &i, true);
+               count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+
+       } else {
+
+               flush_tlb_pre(ts);
+
+               if (last_level) {
+                       if (ts == TLB_LOCAL) {
+                               __flush_tlb_range_op(vale1, i.start, i.pages, i.stride, i.asid, i.tlb_level, true);
+                               count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_RANGE);
+                       } else {
+                               __flush_tlb_range_op(vale1is, i.start, i.pages, i.stride, i.asid, i.tlb_level, true);
+                               count_vm_tlb_event(NR_TLB_FLUSH_RANGE);
+                       }
+
+               } else {
+                       if (ts == TLB_LOCAL) {
+                               __flush_tlb_range_op(vae1, i.start, i.pages, i.stride, i.asid, i.tlb_level, true);
+                               count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_RANGE);
+                       } else {
+                               __flush_tlb_range_op(vae1is, i.start, i.pages, i.stride, i.asid, i.tlb_level, true);
+                               count_vm_tlb_event(NR_TLB_FLUSH_RANGE);
+                       }
+               }
+
+               flush_tlb_post(ts);
+       }
+out:
+       mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
+}
+
 static ssize_t tlb_mode_read_file(struct file *file, char __user *user_buf,
                                size_t count, loff_t *ppos)
 {