#include <linux/sched/clock.h>
#include <linux/sched/idle.h>
-#ifdef CONFIG_ARM64
-/*
- * POLL_IDLE_RELAX_COUNT determines how often we check for timeout
- * while polling for TIF_NEED_RESCHED in thread_info->flags.
- *
- * Set this to a low value since arm64, instead of polling, uses a
- * event based mechanism.
- */
-#define POLL_IDLE_RELAX_COUNT 1
-#else
-#define POLL_IDLE_RELAX_COUNT 200
-#endif
-
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
u64 time_start;
- time_start = local_clock_noinstr();
+ time_start = get_cycles();
dev->poll_time_limit = false;
raw_local_irq_enable();
if (!current_set_polling_and_test()) {
- u64 limit;
+ u64 end;
- limit = cpuidle_poll_time(drv, dev);
+ end = start + NSECS_TO_CYCLES(cpuidle_poll_time(drv, dev));
while (!need_resched()) {
- unsigned int loop_count = 0;
- if (local_clock_noinstr() - time_start > limit) {
- dev->poll_time_limit = true;
- break;
+
+ if (alternative_has_cap_unlikely(ARM64_HAS_WFXT)) {
+
+ /* We can power down for a configurable interval while waiting for event */
+ while (!need_resched() && get_cycles() < end)
+ wfet(end);
+
+ } else if (arch_timer_evtstrm_available()) {
+
+ /* We can power down for a ARCH_TIMER_EVT_STREAM_PERIOD_US time interval while waiting */
+ while (!need_resched() && (get_cycles() < end)
+ wfe();
+ } else {
+
+ /* No hardware wait mechanism. Do a polling loop */
+ while (!need_resched() && get_cycles() < end)
+ cpu_relax;
}
- smp_cond_load_relaxed(¤t_thread_info()->flags,
- VAL & _TIF_NEED_RESCHED ||
- loop_count++ >= POLL_IDLE_RELAX_COUNT);
}
}
raw_local_irq_disable();