]> Gentwo Git Trees - linux/.git/commitdiff
KVM: s390: Use generic VIRT_XFER_TO_GUEST_WORK functions
authorAndrew Donnellan <ajd@linux.ibm.com>
Wed, 26 Nov 2025 05:33:12 +0000 (16:33 +1100)
committerJanosch Frank <frankja@linux.ibm.com>
Fri, 28 Nov 2025 09:11:14 +0000 (10:11 +0100)
Switch to using the generic infrastructure to check for and handle pending
work before transitioning into guest mode.

xfer_to_guest_mode_handle_work() does a few more things than the current
code does when deciding whether or not to exit the __vcpu_run() loop. The
exittime tests from kvm-unit-tests, in my tests, were within a few percent
compared to before this series, which is within noise tolerance.

Co-developed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
Acked-by: Janosch Frank <frankja@linux.ibm.com>
[frankja@linux.ibm.com: Removed semicolon]
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
arch/s390/kvm/Kconfig
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c

index cae908d645501ef7eb4edbe87b8431f6499370a4..0ca9d6587243c98034d086c0ebd4ef085e504faf 100644 (file)
@@ -30,6 +30,7 @@ config KVM
        select HAVE_KVM_NO_POLL
        select KVM_VFIO
        select MMU_NOTIFIER
+       select VIRT_XFER_TO_GUEST_WORK
        help
          Support hosting paravirtualized guest machines using the SIE
          virtualization capability on the mainframe. This should work
index 4d13601ec2178596c5ba2fcb75ff73b99cf11601..d31155e371df58e4de39949f5e004734dcb8dc7b 100644 (file)
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/compiler.h>
+#include <linux/entry-virt.h>
 #include <linux/export.h>
 #include <linux/err.h>
 #include <linux/fs.h>
@@ -4675,9 +4676,6 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
        vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
        vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
 
-       if (need_resched())
-               schedule();
-
        if (!kvm_is_ucontrol(vcpu->kvm)) {
                rc = kvm_s390_deliver_pending_interrupts(vcpu);
                if (rc || guestdbg_exit_pending(vcpu))
@@ -4982,12 +4980,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
         */
        kvm_vcpu_srcu_read_lock(vcpu);
 
-       do {
+       while (true) {
                rc = vcpu_pre_run(vcpu);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (rc || guestdbg_exit_pending(vcpu))
                        break;
 
-               kvm_vcpu_srcu_read_unlock(vcpu);
                /*
                 * As PF_VCPU will be used in fault handler, between
                 * guest_timing_enter_irqoff and guest_timing_exit_irqoff
@@ -4999,7 +4997,17 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                               sizeof(sie_page->pv_grregs));
                }
 
+xfer_to_guest_mode_check:
                local_irq_disable();
+               xfer_to_guest_mode_prepare();
+               if (xfer_to_guest_mode_work_pending()) {
+                       local_irq_enable();
+                       rc = kvm_xfer_to_guest_mode_handle_work(vcpu);
+                       if (rc)
+                               break;
+                       goto xfer_to_guest_mode_check;
+               }
+
                guest_timing_enter_irqoff();
                __disable_cpu_timer_accounting(vcpu);
 
@@ -5029,9 +5037,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                kvm_vcpu_srcu_read_lock(vcpu);
 
                rc = vcpu_post_run(vcpu, exit_reason);
-       } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
+               if (rc || guestdbg_exit_pending(vcpu)) {
+                       kvm_vcpu_srcu_read_unlock(vcpu);
+                       break;
+               }
+       }
 
-       kvm_vcpu_srcu_read_unlock(vcpu);
        return rc;
 }
 
index d23ab51208880418407a61fd900e6de319e153a8..b526621d2a1b0a00cd63afd7a96b5c8da81984a7 100644 (file)
@@ -1180,12 +1180,23 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        current->thread.gmap_int_code = 0;
        barrier();
        if (!kvm_s390_vcpu_sie_inhibited(vcpu)) {
+xfer_to_guest_mode_check:
                local_irq_disable();
+               xfer_to_guest_mode_prepare();
+               if (xfer_to_guest_mode_work_pending()) {
+                       local_irq_enable();
+                       rc = kvm_xfer_to_guest_mode_handle_work(vcpu);
+                       if (rc)
+                               goto skip_sie;
+                       goto xfer_to_guest_mode_check;
+               }
                guest_timing_enter_irqoff();
                rc = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, vsie_page->gmap->asce);
                guest_timing_exit_irqoff();
                local_irq_enable();
        }
+
+skip_sie:
        barrier();
        vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
 
@@ -1345,13 +1356,11 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                 * but rewind the PSW to re-enter SIE once that's completed
                 * instead of passing a "no action" intercept to the guest.
                 */
-               if (signal_pending(current) ||
-                   kvm_s390_vcpu_has_irq(vcpu, 0) ||
+               if (kvm_s390_vcpu_has_irq(vcpu, 0) ||
                    kvm_s390_vcpu_sie_inhibited(vcpu)) {
                        kvm_s390_rewind_psw(vcpu, 4);
                        break;
                }
-               cond_resched();
        }
 
        if (rc == -EFAULT) {
@@ -1483,8 +1492,7 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
        if (unlikely(scb_addr & 0x1ffUL))
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
-       if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
-           kvm_s390_vcpu_sie_inhibited(vcpu)) {
+       if (kvm_s390_vcpu_has_irq(vcpu, 0) || kvm_s390_vcpu_sie_inhibited(vcpu)) {
                kvm_s390_rewind_psw(vcpu, 4);
                return 0;
        }