#define _ARCH_ARM64_TLBBATCH_H
struct arch_tlbflush_unmap_batch {
- /*
- * For arm64, HW can do tlb shootdown, so we don't
- * need to record cpumask for sending IPI
- */
+ /*
+ * Each bit set is a CPU that potentially has a TLB entry for one of
+ * the PFNs being flushed..
+ */
+ struct cpumask cpumask;
};
#endif /* _ARCH_ARM64_TLBBATCH_H */
init_bootcpu_ops();
smp_init_cpus();
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(&init_mm));
smp_build_mpidr_hash();
/* Init percpu seeds for random tags after cpus are set up. */
*/
mmgrab(mm);
current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
*/
irq_migrate_all_off_this_cpu();
+ /*
+ * Remove this CPU from the vm mask set of all processes
+ */
+ clear_tasks_mm_cpumask(cpu);
+
return 0;
}
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
+ cpumask_clear(mm_cpumask(mm));
return asid2ctxid(asid, generation);
}
switch_mm_fastpath:
arm64_apply_bp_hardening();
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
/*
* Defer TTBR0_EL1 setting for user threads to uaccess_enable() when