static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
{
-- ------ if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) &&
-- --- -- !guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT))
- !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT))
++ ++++++ bool enabled;
++ ++++++
++ ++++++ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS))
++ ++++++ goto emulate_as_nop;
++ ++++++
++ ++++++ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT))
++ ++++++ enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT);
++ ++++++ else
++ ++++++ enabled = vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT;
++ ++++++
++ ++++++ if (!enabled)
return kvm_handle_invalid_op(vcpu);
++ ++++++emulate_as_nop:
pr_warn_once("%s instruction emulated as NOP!\n", insn);
return kvm_emulate_as_nop(vcpu);
}
smp_wmb();
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
---- ---- vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
---- ----
---- ---- if (vcpu->pvclock_set_guest_stopped_request) {
---- ---- vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
---- ---- vcpu->pvclock_set_guest_stopped_request = false;
---- ---- }
- -
- - memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
++++ ++++ hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
-- - - -- memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
-- - - --
---- ---- if (force_tsc_unstable)
---- ---- guest_hv_clock->flags &= ~PVCLOCK_TSC_STABLE_BIT;
++++ ++++ memcpy(guest_hv_clock, &hv_clock, sizeof(*guest_hv_clock));
smp_wmb();
vcpu->last_guest_tsc = tsc_timestamp;
/* If the host uses TSC clocksource, then it is stable */
---- ---- pvclock_flags = 0;
++++ ++++ hv_clock.flags = 0;
if (use_master_clock)
---- ---- pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
++++ ++++ hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT;
++++ + ++
- vcpu->hv_clock.flags = pvclock_flags;
++++ ++++ if (vcpu->pv_time.active) {
++++ ++++ /*
++++ ++++ * GUEST_STOPPED is only supported by kvmclock, and KVM's
++++ ++++ * historic behavior is to only process the request if kvmclock
++++ ++++ * is active/enabled.
++++ ++++ */
++++ ++++ if (vcpu->pvclock_set_guest_stopped_request) {
++++ ++++ hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
++++ ++++ vcpu->pvclock_set_guest_stopped_request = false;
++++ ++++ }
++++ ++++ kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0);
++ + +++
- - vcpu->hv_clock.flags = pvclock_flags;
++++ ++++ hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED;
++++ ++++ }
+ + +
-- - - - vcpu->hv_clock.flags = pvclock_flags;
++++ ++++ kvm_hv_setup_tsc_page(v->kvm, &hv_clock);
---- ---- if (vcpu->pv_time.active)
---- ---- kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0, false);
#ifdef CONFIG_KVM_XEN
++++ ++++ /*
++++ ++++ * For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
++++ ++++ * explicitly told to use TSC as its clocksource Xen will not set this bit.
++++ ++++ * This default behaviour led to bugs in some guest kernels which cause
++++ ++++ * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
++++ ++++ *
++++ ++++ * Note! Clear TSC_STABLE only for Xen clocks, i.e. the order matters!
++++ ++++ */
++++ ++++ if (ka->xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
++++ ++++ hv_clock.flags &= ~PVCLOCK_TSC_STABLE_BIT;
++++ ++++
if (vcpu->xen.vcpu_info_cache.active)
---- ---- kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
---- ---- offsetof(struct compat_vcpu_info, time),
---- ---- xen_pvclock_tsc_unstable);
++++ ++++ kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_info_cache,
++++ ++++ offsetof(struct compat_vcpu_info, time));
if (vcpu->xen.vcpu_time_info_cache.active)
---- ---- kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0,
---- ---- xen_pvclock_tsc_unstable);
++++ ++++ kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0);
#endif
---- ---- kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0;
}
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
- if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XMM3))
return 1;
vcpu->arch.ia32_misc_enable_msr = data;
-- ------ kvm_update_cpuid_runtime(vcpu);
++ ++++++ vcpu->arch.cpuid_dynamic_bits_dirty = true;
} else {
vcpu->arch.ia32_misc_enable_msr = data;
}
{
struct kvm_vcpu *vcpu;
unsigned long i;
---- ---- int ret = 0;
- -
- - mutex_lock(&kvm->lock);
- - kvm_for_each_vcpu(i, vcpu, kvm) {
- - if (!vcpu->arch.pv_time.active)
- - continue;
---- - - mutex_lock(&kvm->lock);
---- - - kvm_for_each_vcpu(i, vcpu, kvm) {
---- - - if (!vcpu->arch.pv_time.active)
---- - - continue;
---- - -
---- ---- ret = kvm_set_guest_paused(vcpu);
---- ---- if (ret) {
---- ---- kvm_err("Failed to pause guest VCPU%d: %d\n",
---- ---- vcpu->vcpu_id, ret);
---- ---- break;
---- ---- }
---- ---- }
---- ---- mutex_unlock(&kvm->lock);
++++ ++++ /*
++++ ++++ * Ignore the return, marking the guest paused only "fails" if the vCPU
++++ ++++ * isn't using kvmclock; continuing on is correct and desirable.
++++ ++++ */
++++ ++++ kvm_for_each_vcpu(i, vcpu, kvm)
++++ ++++ (void)kvm_set_guest_paused(vcpu);
---- ---- return ret ? NOTIFY_BAD : NOTIFY_DONE;
++++ ++++ return NOTIFY_DONE;
}
int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)