From: Christoph Lameter (Ampere) Date: Thu, 7 Dec 2023 04:11:49 +0000 (-0800) Subject: ARM64: Convert kernel flushes X-Git-Url: https://gentwo.org/gitweb/?a=commitdiff_plain;h=70517cc2ffc8148c319e3da7b85411ba21c9c99f;p=linux%2F.git ARM64: Convert kernel flushes Move the kernel flush functions. Signed-off-by: Christoph Lameter (Ampere) --- diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index f22e70098c54..48065f6ce965 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -232,23 +232,9 @@ static inline unsigned long get_trans_granule(void) * on top of these routines, since that is our interface to the mmu_gather * API as used by munmap() and friends. */ -static inline void local_flush_tlb_all(void) -{ - dsb(nshst); - __tlbi(vmalle1); - dsb(nsh); - isb(); - _count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); -} +void local_flush_tlb_all(void); -static inline void flush_tlb_all(void) -{ - dsb(ishst); - __tlbi(vmalle1is); - dsb(ish); - isb(); - _count_vm_tlb_event(NR_TLB_FLUSH_ALL); -} +void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); @@ -409,38 +395,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0); } -static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) -{ - unsigned long addr; - - if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) { - flush_tlb_all(); - return; - } - - start = __TLBI_VADDR(start, 0); - end = __TLBI_VADDR(end, 0); - - dsb(ishst); - for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) - __tlbi(vaale1is, addr); - dsb(ish); - isb(); -} +void flush_tlb_kernel_range(unsigned long start, unsigned long end); /* * Used to invalidate the TLB (walk caches) corresponding to intermediate page * table levels (pgd/pud/pmd). */ -static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) -{ - unsigned long addr = __TLBI_VADDR(kaddr, 0); - - dsb(ishst); - __tlbi(vaae1is, addr); - dsb(ish); - isb(); -} +void __flush_tlb_kernel_pgtable(unsigned long kaddr); #endif #endif diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 4028d7930d2f..5ae6f93ca86a 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -735,6 +735,67 @@ void __flush_tlb_range(struct vm_area_struct *vma, mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); } +static void inline flush_tlb_pre_kernel(void) +{ + flush_tlb_pre(TLB_BROADCAST); +} + +static void inline flush_tlb_post_kernel(void) +{ + flush_tlb_post(TLB_BROADCAST); + isb(); +} + +void local_flush_tlb_all(void) +{ + flush_tlb_pre(TLB_LOCAL); + __tlbi(vmalle1); + flush_tlb_post(TLB_LOCAL); + isb(); + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); +} + +void flush_tlb_all(void) +{ + flush_tlb_pre_kernel(); + __tlbi(vmalle1is); + flush_tlb_post_kernel(); + count_vm_tlb_event(NR_TLB_FLUSH_ALL); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long addr; + + if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) { + flush_tlb_all(); + return; + } + + start = __TLBI_VADDR(start, 0); + end = __TLBI_VADDR(end, 0); + + flush_tlb_pre_kernel(); + for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { + __tlbi(vaale1is, addr); + } + flush_tlb_post_kernel(); +} + +/* + * Used to invalidate the TLB (walk caches) corresponding to intermediate page + * table levels (pgd/pud/pmd). + */ +void __flush_tlb_kernel_pgtable(unsigned long kaddr) +{ + unsigned long addr = __TLBI_VADDR(kaddr, 0); + + flush_tlb_pre_kernel(); + __tlbi(vaae1is, addr); + flush_tlb_post_kernel(); +} + + static ssize_t tlb_mode_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) {