From: Christoph Lameter (Ampere) Date: Thu, 7 Dec 2023 04:11:49 +0000 (-0800) Subject: Convert tlb_flush_range X-Git-Url: https://gentwo.org/gitweb/?a=commitdiff_plain;h=981b020054c5e56d08261ff1c57f714b36f6cdb3;p=linux%2F.git Convert tlb_flush_range Move tlb_flush_range related functionality and convert them to the new framework. Signed-off-by: Christoph Lameter (Ampere) --- diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 8b06c7ec1fe5..f22e70098c54 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -393,42 +393,10 @@ do { \ #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false) -static inline void __flush_tlb_range(struct vm_area_struct *vma, +void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, - int tlb_level) -{ - unsigned long asid, pages; - - start = round_down(start, stride); - end = round_up(end, stride); - pages = (end - start) >> PAGE_SHIFT; - - /* - * When not uses TLB range ops, we can handle up to - * (MAX_DVM_OPS - 1) pages; - * When uses TLB range ops, we can handle up to - * (MAX_TLBI_RANGE_PAGES - 1) pages. - */ - if ((!system_supports_tlb_range() && - (end - start) >= (MAX_DVM_OPS * stride)) || - pages >= MAX_TLBI_RANGE_PAGES) { - flush_tlb_mm(vma->vm_mm); - return; - } - - dsb(ishst); - asid = ASID(vma->vm_mm); - - if (last_level) - __flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true); - else - __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true); - - dsb(ish); - mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); - _count_vm_tlb_event(NR_TLB_FLUSH_RANGE); -} + int tlb_level); static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 418d9931ae53..4028d7930d2f 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -643,6 +643,98 @@ void __tlbbatch_flush(void) flush_tlb_post(TLB_BROADCAST); } +struct ipi_flush_tlb_range_param { + unsigned long start; + unsigned long pages; + unsigned long stride; + bool last_level; + int tlb_level; + unsigned long asid; +}; + +static inline void ipi_flush_tlb_range(void *p) +{ + struct ipi_flush_tlb_range_param *i = p; + + flush_tlb_pre(TLB_LOCAL); + + if (i->last_level) + + __flush_tlb_range_op(vale1, i->start, i->pages, i->stride, i->asid, i->tlb_level, true); + + else + + __flush_tlb_range_op(vae1, i->start, i->pages, i->stride, i->asid, i->tlb_level, true); + + flush_tlb_post(TLB_LOCAL); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); +} + +void __flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long stride, bool last_level, + int tlb_level) +{ + struct ipi_flush_tlb_range_param i = { 0,0, stride, last_level, tlb_level, ASID(vma->vm_mm) }; + enum tlb_state ts = tlbstat_mm(vma->vm_mm); + + if (ts == TLB_NONE) { + count_vm_tlb_event(NR_TLB_SKIPPED); + goto out; + } + + i.start = round_down(start, stride); + end = round_up(end, stride); + i.pages = (end - start) >> PAGE_SHIFT; + + /* + * When not using TLB range ops, we can handle up to + * (MAX_DVM_OPS - 1) pages; + * When uses TLB range ops, we can handle up to + * (MAX_TLBI_RANGE_PAGES - 1) pages. + */ + if (((tlb_mode & TLB_MODE_RANGE) && (end - i.start) >= (MAX_DVM_OPS * stride)) || + i.pages >= MAX_TLBI_RANGE_PAGES) { + + flush_tlb_mm(vma->vm_mm); + return; + + } + + if (ts == TLB_IPI) { + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &i, true); + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); + + } else { + + flush_tlb_pre(ts); + + if (last_level) { + if (ts == TLB_LOCAL) { + __flush_tlb_range_op(vale1, i.start, i.pages, i.stride, i.asid, i.tlb_level, true); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_RANGE); + } else { + __flush_tlb_range_op(vale1is, i.start, i.pages, i.stride, i.asid, i.tlb_level, true); + count_vm_tlb_event(NR_TLB_FLUSH_RANGE); + } + + } else { + if (ts == TLB_LOCAL) { + __flush_tlb_range_op(vae1, i.start, i.pages, i.stride, i.asid, i.tlb_level, true); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_RANGE); + } else { + __flush_tlb_range_op(vae1is, i.start, i.pages, i.stride, i.asid, i.tlb_level, true); + count_vm_tlb_event(NR_TLB_FLUSH_RANGE); + } + } + + flush_tlb_post(ts); + } +out: + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); +} + static ssize_t tlb_mode_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) {