_count_vm_tlb_event(NR_TLB_FLUSH_ALL);
}
-extern void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_mm(struct mm_struct *mm);
+
+void __flush_tlb_page(struct mm_struct *mm,
+ unsigned long uaddr, bool sync);
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long uaddr)
+{
+ __flush_tlb_page(vma->vm_mm, uaddr, true);
+}
static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
unsigned long uaddr)
{
- unsigned long addr;
-
- dsb(ishst);
- addr = __TLBI_VADDR(uaddr, ASID(mm));
- __tlbi(vale1is, addr);
- __tlbi_user(vale1is, addr);
- mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
- (uaddr & PAGE_MASK) + PAGE_SIZE);
- _count_vm_tlb_event(NR_TLB_FLUSH_ONE);
+ __flush_tlb_page(mm, uaddr, false);
}
static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
}
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long uaddr)
-{
- flush_tlb_page_nosync(vma, uaddr);
- dsb(ish);
-}
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{
return true;
}
+void __tlbbatch_flush(void);
+
static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm,
unsigned long uaddr)
*/
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
{
- dsb(ish);
+ __tlbbatch_flush();
}
/*
*/
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- dsb(ish);
+ __tlbbatch_flush();
}
/*
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
+struct ipi_flush_tlb_page_param {
+ unsigned long uaddr;
+ struct mm_struct *mm;
+};
+
+static inline void ipi_flush_tlb_page(void *p)
+{
+ struct ipi_flush_tlb_page_param *i = p;
+
+ flush_tlb_pre(TLB_LOCAL);
+ flush_tlb_addr(TLB_LOCAL, i->mm, i->uaddr);
+ flush_tlb_post(TLB_LOCAL);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+}
+
+void __flush_tlb_page(struct mm_struct *mm,
+ unsigned long uaddr, bool sync)
+{
+ struct ipi_flush_tlb_page_param i = { uaddr, mm };
+ enum tlb_state ts = tlbstat_mm(i.mm);
+
+ if (ts == TLB_IPI) {
+
+ on_each_cpu_mask(mm_cpumask(i.mm), ipi_flush_tlb_page, &i, true);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+
+ } else {
+
+ flush_tlb_pre(ts);
+ flush_tlb_addr(ts, i.mm, uaddr);
+
+ if (sync)
+ flush_tlb_post(ts);
+
+ }
+
+ mmu_notifier_arch_invalidate_secondary_tlbs(i.mm, uaddr & PAGE_MASK,
+ (uaddr & PAGE_MASK) + PAGE_SIZE);
+}
+
+void __tlbbatch_flush(void)
+{
+ flush_tlb_post(TLB_BROADCAST);
+}
+
static ssize_t tlb_mode_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{