From f59922b47e0a202386c8e8dcf9f0235b8a028ae0 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 16 Sep 2015 12:14:52 +0200 Subject: KVM: s390: remove unused variable in __inject_vm the float int structure is no longer used in __inject_vm. Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 5c2c169395c3..ab9f525aa7cd 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1390,12 +1390,9 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) { - struct kvm_s390_float_interrupt *fi; u64 type = READ_ONCE(inti->type); int rc; - fi = &kvm->arch.float_int; - switch (type) { case KVM_S390_MCHK: rc = __inject_float_mchk(kvm, inti); -- cgit v1.2.3 From fee0e0fdb2b9c221a3621bede722aa9f9c9f0d39 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 28 Sep 2015 13:32:38 +0200 Subject: KVM: s390: disabled wait cares about machine checks, not PER We don't care about program event recording irqs (synchronous program irqs) but asynchronous irqs when checking for disabled wait. Machine checks were missing. Let's directly switch to the functions we have for that purpose instead of testing once again for magic bits. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index ab9f525aa7cd..1058240b3db3 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -51,11 +51,9 @@ static int psw_mchk_disabled(struct kvm_vcpu *vcpu) static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) { - if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || - (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || - (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) - return 0; - return 1; + return psw_extint_disabled(vcpu) && + psw_ioint_disabled(vcpu) && + psw_mchk_disabled(vcpu); } static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 5f94c58ed0a6db016528d8555f1b655ad354f7bb Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 28 Sep 2015 14:27:51 +0200 Subject: KVM: s390: set interception requests for all floating irqs No need to separate pending and floating irqs when setting interception requests. Let's do it for all equally. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 1058240b3db3..4f05520efbae 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -107,14 +107,10 @@ static inline u8 int_word_to_isc(u32 int_word) return (int_word & 0x38000000) >> 27; } -static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu) +static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) { - return vcpu->kvm->arch.float_int.pending_irqs; -} - -static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.local_int.pending_irqs; + return vcpu->kvm->arch.float_int.pending_irqs | + vcpu->arch.local_int.pending_irqs; } static unsigned long disable_iscs(struct kvm_vcpu *vcpu, @@ -133,8 +129,7 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) { unsigned long active_mask; - active_mask = pending_local_irqs(vcpu); - active_mask |= pending_floating_irqs(vcpu); + active_mask = pending_irqs(vcpu); if (!active_mask) return 0; @@ -202,7 +197,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) { - if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) return; else if (psw_ioint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_IO_INT); @@ -212,7 +207,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) { - if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) return; if (psw_extint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_EXT_INT); @@ -222,7 +217,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) { - if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) return; if (psw_mchk_disabled(vcpu)) vcpu->arch.sie_block->ictl |= ICTL_LPSW; -- cgit v1.2.3 From 118b862b153190f92415ece4cb97a896929c5ab8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 23 Sep 2015 12:25:15 +0200 Subject: KVM: s390: kvm_arch_vcpu_runnable already cares about timer interrupts We can remove that double check. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 4f05520efbae..1260f8c18df9 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -839,7 +839,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) vcpu->stat.exit_wait_state++; /* fast path */ - if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) + if (kvm_arch_vcpu_runnable(vcpu)) return 0; if (psw_interrupts_disabled(vcpu)) { -- cgit v1.2.3 From 4d32ad6becf0baf09f38707f0aff42c0f4367a99 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 6 May 2015 13:51:29 +0200 Subject: KVM: s390: drop out early in kvm_s390_has_irq() Let's get rid of the local variable and exit directly if we found any pending interrupt. This is not only faster, but also better readable. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 1260f8c18df9..10a0e8beb9e1 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -808,23 +808,21 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) { - int rc; - - rc = !!deliverable_irqs(vcpu); + if (deliverable_irqs(vcpu)) + return 1; - if (!rc && kvm_cpu_has_pending_timer(vcpu)) - rc = 1; + if (kvm_cpu_has_pending_timer(vcpu)) + return 1; /* external call pending and deliverable */ - if (!rc && kvm_s390_ext_call_pending(vcpu) && + if (kvm_s390_ext_call_pending(vcpu) && !psw_extint_disabled(vcpu) && (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) - rc = 1; - - if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) - rc = 1; + return 1; - return rc; + if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) + return 1; + return 0; } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 66933b78e3204057bfc26343afcd0d463c0e8e55 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 20 Nov 2014 13:49:32 +0100 Subject: KVM: s390: simplify in-kernel program irq injection The main reason to keep program injection in kernel separated until now was that we were able to do some checking, if really only the owning thread injects program interrupts (via waitqueue_active(li->wq)). This BUG_ON was never triggered and the chances of really hitting it, if another thread injected a program irq to another vcpu, were very small. Let's drop this check and turn kvm_s390_inject_program_int() and kvm_s390_inject_prog_irq() into simple inline functions that makes use of kvm_s390_inject_vcpu(). __must_check can be dropped as they are implicitely given by kvm_s390_inject_vcpu(), to avoid ugly long function prototypes. Reviewed-by: Jens Freimann Acked-by: Cornelia Huck Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 28 ---------------------------- arch/s390/kvm/kvm-s390.h | 24 ++++++++++++++++++++---- 2 files changed, 20 insertions(+), 32 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 10a0e8beb9e1..f603bacf6ac9 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -977,34 +977,6 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) return 0; } -int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) -{ - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_irq irq; - - spin_lock(&li->lock); - irq.u.pgm.code = code; - __inject_prog(vcpu, &irq); - BUG_ON(waitqueue_active(li->wq)); - spin_unlock(&li->lock); - return 0; -} - -int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_pgm_info *pgm_info) -{ - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_irq irq; - int rc; - - spin_lock(&li->lock); - irq.u.pgm = *pgm_info; - rc = __inject_prog(vcpu, &irq); - BUG_ON(waitqueue_active(li->wq)); - spin_unlock(&li->lock); - return rc; -} - static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c446aabf60d3..3a368d2a6114 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -175,6 +175,7 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) return kvm->arch.user_cpu_state_ctrl != 0; } +/* implemented in interrupt.c */ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); @@ -185,7 +186,25 @@ int __must_check kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int); int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq); -int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); +static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, + struct kvm_s390_pgm_info *pgm_info) +{ + struct kvm_s390_irq irq = { + .type = KVM_S390_PROGRAM_INT, + .u.pgm = *pgm_info, + }; + + return kvm_s390_inject_vcpu(vcpu, &irq); +} +static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) +{ + struct kvm_s390_irq irq = { + .type = KVM_S390_PROGRAM_INT, + .u.pgm.code = code, + }; + + return kvm_s390_inject_vcpu(vcpu, &irq); +} struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 isc_mask, u32 schid); int kvm_s390_reinject_io_int(struct kvm *kvm, @@ -231,9 +250,6 @@ extern unsigned long kvm_s390_fac_list_mask[]; /* implemented in diag.c */ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); -/* implemented in interrupt.c */ -int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_pgm_info *pgm_info); static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) { -- cgit v1.2.3 From 238293b14d9b1f5689e2aa68710000b0f25aa612 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 4 May 2015 12:38:48 +0200 Subject: KVM: s390: correctly handle injection of pgm irqs and per events PER events can always co-exist with other program interrupts. For now, we always overwrite all program interrupt parameters when injecting any type of program interrupt. Let's handle that correctly by only overwriting the relevant portion of the program interrupt parameters. Therefore we can now inject PER events and ordinary program interrupts concurrently, resulting in no loss of program interrupts. This will especially by helpful when manually detecting PER events later - as both types might be triggered during one SIE exit. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f603bacf6ac9..a8be542b9cb0 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -972,7 +972,26 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, irq->u.pgm.code, 0); - li->irq.pgm = irq->u.pgm; + if (irq->u.pgm.code == PGM_PER) { + li->irq.pgm.code |= PGM_PER; + /* only modify PER related information */ + li->irq.pgm.per_address = irq->u.pgm.per_address; + li->irq.pgm.per_code = irq->u.pgm.per_code; + li->irq.pgm.per_atmid = irq->u.pgm.per_atmid; + li->irq.pgm.per_access_id = irq->u.pgm.per_access_id; + } else if (!(irq->u.pgm.code & PGM_PER)) { + li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | + irq->u.pgm.code; + /* only modify non-PER information */ + li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; + li->irq.pgm.mon_code = irq->u.pgm.mon_code; + li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code; + li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr; + li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id; + li->irq.pgm.op_access_id = irq->u.pgm.op_access_id; + } else { + li->irq.pgm = irq->u.pgm; + } set_bit(IRQ_PEND_PROG, &li->pending_irqs); return 0; } -- cgit v1.2.3 From 5a3d883a59b3fe8dc8775c7a79200a5b11a6761e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 29 Sep 2015 16:27:24 +0200 Subject: KVM: s390: switch to get_tod_clock() and fix STP sync races Nobody except early.c makes use of store_tod_clock() to handle the cc. So if we would get a cc != 0, we would be in more trouble. Let's replace all users with get_tod_clock(). Returning a cc on an ioctl sounded strange either way. We can now also easily move the get_tod_clock() call into the preempt_disable() section. This is in fact necessary to make the STP sync work as expected. Otherwise the host TOD could change and we would end up with a wrong epoch calculation. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 18 ++++-------------- arch/s390/kvm/priv.c | 8 ++------ 2 files changed, 6 insertions(+), 20 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0a67c40eece9..a0907795f31d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -523,19 +523,14 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) { struct kvm_vcpu *cur_vcpu; unsigned int vcpu_idx; - u64 host_tod, gtod; - int r; + u64 gtod; if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) return -EFAULT; - r = store_tod_clock(&host_tod); - if (r) - return r; - mutex_lock(&kvm->lock); preempt_disable(); - kvm->arch.epoch = gtod - host_tod; + kvm->arch.epoch = gtod - get_tod_clock(); kvm_s390_vcpu_block_all(kvm); kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; @@ -581,15 +576,10 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) { - u64 host_tod, gtod; - int r; - - r = store_tod_clock(&host_tod); - if (r) - return r; + u64 gtod; preempt_disable(); - gtod = host_tod + kvm->arch.epoch; + gtod = get_tod_clock() + kvm->arch.epoch; preempt_enable(); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 4d21dc4d1a84..b253de5b8945 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -34,7 +34,7 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) { struct kvm_vcpu *cpup; - s64 hostclk, val; + s64 val; int i, rc; ar_t ar; u64 op2; @@ -49,15 +49,11 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - if (store_tod_clock(&hostclk)) { - kvm_s390_set_psw_cc(vcpu, 3); - return 0; - } VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); - val = (val - hostclk) & ~0x3fUL; mutex_lock(&vcpu->kvm->lock); preempt_disable(); + val = (val - get_tod_clock()) & ~0x3fUL; kvm_for_each_vcpu(i, cpup, vcpu->kvm) cpup->arch.sie_block->epoch = val; preempt_enable(); -- cgit v1.2.3 From 25ed16759660cdfccd4a3cb7d30cce8a797b542a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 12 May 2015 09:49:14 +0200 Subject: KVM: s390: factor out and fix setting of guest TOD clock Let's move that whole logic into one function. We now always use unsigned values when calculating the epoch (to avoid over/underflow defined). Also, we always have to get all VCPUs out of SIE before doing the update to avoid running differing VCPUs with different TODs. Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 28 +++++++++++++++++----------- arch/s390/kvm/kvm-s390.h | 1 + arch/s390/kvm/priv.c | 15 +++------------ 3 files changed, 21 insertions(+), 23 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index a0907795f31d..87bd602f326c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -521,22 +521,12 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) { - struct kvm_vcpu *cur_vcpu; - unsigned int vcpu_idx; u64 gtod; if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) return -EFAULT; - mutex_lock(&kvm->lock); - preempt_disable(); - kvm->arch.epoch = gtod - get_tod_clock(); - kvm_s390_vcpu_block_all(kvm); - kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) - cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; - kvm_s390_vcpu_unblock_all(kvm); - preempt_enable(); - mutex_unlock(&kvm->lock); + kvm_s390_set_tod_clock(kvm, gtod); VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod); return 0; } @@ -1906,6 +1896,22 @@ retry: return 0; } +void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) +{ + struct kvm_vcpu *vcpu; + int i; + + mutex_lock(&kvm->lock); + preempt_disable(); + kvm->arch.epoch = tod - get_tod_clock(); + kvm_s390_vcpu_block_all(kvm); + kvm_for_each_vcpu(i, vcpu, kvm) + vcpu->arch.sie_block->epoch = kvm->arch.epoch; + kvm_s390_vcpu_unblock_all(kvm); + preempt_enable(); + mutex_unlock(&kvm->lock); +} + /** * kvm_arch_fault_in_page - fault-in guest page if necessary * @vcpu: The corresponding virtual cpu diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 3a368d2a6114..cc15ea3a150e 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -231,6 +231,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ +void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index b253de5b8945..77191b85ea7a 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -33,11 +33,9 @@ /* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *cpup; - s64 val; - int i, rc; + int rc; ar_t ar; - u64 op2; + u64 op2, val; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -50,14 +48,7 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) return kvm_s390_inject_prog_cond(vcpu, rc); VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); - - mutex_lock(&vcpu->kvm->lock); - preempt_disable(); - val = (val - get_tod_clock()) & ~0x3fUL; - kvm_for_each_vcpu(i, cpup, vcpu->kvm) - cpup->arch.sie_block->epoch = val; - preempt_enable(); - mutex_unlock(&vcpu->kvm->lock); + kvm_s390_set_tod_clock(vcpu->kvm, val); kvm_s390_set_psw_cc(vcpu, 0); return 0; -- cgit v1.2.3 From 60417fcc2b0235dfe3dcd589c56dbe3ea1a64c54 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 29 Sep 2015 16:20:36 +0200 Subject: KVM: s390: factor out reading of the guest TOD clock Let's factor this out and always use get_tod_clock_fast() when reading the guest TOD. STORE CLOCK FAST does not do serialization and, therefore, might result in some fuzziness between different processors in a way that subsequent calls on different CPUs might have time stamps that are earlier. This semantics is fine though for all KVM use cases. To make it obvious that the new function has STORE CLOCK FAST semantics we name it kvm_s390_get_tod_clock_fast. With this patch, we only have a handful of places were we have to care about STP sync (using preempt_disable() logic). Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 15 +++------------ arch/s390/kvm/kvm-s390.c | 4 +--- arch/s390/kvm/kvm-s390.h | 10 ++++++++++ 3 files changed, 14 insertions(+), 15 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index a8be542b9cb0..373e32346d68 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -69,13 +69,8 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) static int ckc_irq_pending(struct kvm_vcpu *vcpu) { - preempt_disable(); - if (!(vcpu->arch.sie_block->ckc < - get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { - preempt_enable(); + if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) return 0; - } - preempt_enable(); return ckc_interrupts_enabled(vcpu); } @@ -851,9 +846,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } - preempt_disable(); - now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; - preempt_enable(); + now = kvm_s390_get_tod_clock_fast(vcpu->kvm); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* underflow */ @@ -892,9 +885,7 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) u64 now, sltime; vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); - preempt_disable(); - now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; - preempt_enable(); + now = kvm_s390_get_tod_clock_fast(vcpu->kvm); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 87bd602f326c..618c85411a51 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -568,9 +568,7 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) { u64 gtod; - preempt_disable(); - gtod = get_tod_clock() + kvm->arch.epoch; - preempt_enable(); + gtod = kvm_s390_get_tod_clock_fast(kvm); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT; VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index cc15ea3a150e..1e70e00d3c5e 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -271,6 +271,16 @@ static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm) kvm_s390_vcpu_unblock(vcpu); } +static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm) +{ + u64 rc; + + preempt_disable(); + rc = get_tod_clock_fast() + kvm->arch.epoch; + preempt_enable(); + return rc; +} + /** * kvm_s390_inject_prog_cond - conditionally inject a program check * @vcpu: virtual cpu -- cgit v1.2.3 From b5510d9b68c33964abd938148f407ad3789e369f Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Tue, 29 Sep 2015 10:04:41 +0200 Subject: s390/fpu: always enable the vector facility if it is available If the kernel detects that the s390 hardware supports the vector facility, it is enabled by default at an early stage. To force it off, use the novx kernel parameter. Note that there is a small time window, where the vector facility is enabled before it is forced to be off. With enabling the vector facility by default, the FPU save and restore functions can be improved. They do not longer require to manage expensive control register updates to enable or disable the vector enablement control for particular processes. Signed-off-by: Hendrik Brueckner Reviewed-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ctl_reg.h | 2 - arch/s390/include/asm/fpu-internal.h | 14 +---- arch/s390/kernel/asm-offsets.c | 1 - arch/s390/kernel/compat_signal.c | 7 ++- arch/s390/kernel/early.c | 12 ++++- arch/s390/kernel/entry.S | 102 ++++++++--------------------------- arch/s390/kernel/entry.h | 2 - arch/s390/kernel/process.c | 32 ++++++----- arch/s390/kernel/ptrace.c | 44 ++++++--------- arch/s390/kernel/s390_ksyms.c | 1 - arch/s390/kernel/signal.c | 7 ++- arch/s390/kernel/traps.c | 39 -------------- arch/s390/kvm/kvm-s390.c | 6 --- 13 files changed, 70 insertions(+), 199 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 17a373576868..d7697ab802f6 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -46,8 +46,6 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) __ctl_load(reg, cr, cr); } -void __ctl_set_vx(void); - void smp_ctl_set_bit(int cr, int bit); void smp_ctl_clear_bit(int cr, int bit); diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu-internal.h index 55dc2c0fb40a..cd79a3355b2a 100644 --- a/arch/s390/include/asm/fpu-internal.h +++ b/arch/s390/include/asm/fpu-internal.h @@ -8,10 +8,6 @@ #ifndef _ASM_S390_FPU_INTERNAL_H #define _ASM_S390_FPU_INTERNAL_H -#define FPU_USE_VX 1 /* Vector extension is active */ - -#ifndef __ASSEMBLY__ - #include #include #include @@ -20,7 +16,6 @@ struct fpu { __u32 fpc; /* Floating-point control */ - __u32 flags; union { void *regs; freg_t *fprs; /* Floating-point register save area */ @@ -30,9 +25,6 @@ struct fpu { void save_fpu_regs(void); -#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX)) -#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX)) - /* VX array structure for address operand constraints in inline assemblies */ struct vx_array { __vector128 _[__NUM_VXRS]; }; @@ -89,7 +81,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs) static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) { fpregs->pad = 0; - if (is_vx_fpu(fpu)) + if (MACHINE_HAS_VX) convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); else memcpy((freg_t *)&fpregs->fprs, fpu->fprs, @@ -98,13 +90,11 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) { - if (is_vx_fpu(fpu)) + if (MACHINE_HAS_VX) convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); else memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, sizeof(fpregs->fprs)); } -#endif - #endif /* _ASM_S390_FPU_INTERNAL_H */ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 3aeeb1b562c0..f77834a498a9 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -29,7 +29,6 @@ int main(void) BLANK(); DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc)); - DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags)); DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs)); DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index e0f9d270b30f..66c94417c0ba 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -249,7 +249,7 @@ static int save_sigregs_ext32(struct pt_regs *regs, return -EFAULT; /* Save vector registers to signal stack */ - if (is_vx_task(current)) { + if (MACHINE_HAS_VX) { for (i = 0; i < __NUM_VXRS_LOW; i++) vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, @@ -277,7 +277,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs, *(__u32 *)®s->gprs[i] = gprs_high[i]; /* Restore vector registers from signal stack */ - if (is_vx_task(current)) { + if (MACHINE_HAS_VX) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, @@ -470,8 +470,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, */ uc_flags = UC_GPRS_HIGH; if (MACHINE_HAS_VX) { - if (is_vx_task(current)) - uc_flags |= UC_VXRS; + uc_flags |= UC_VXRS; } else frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) + sizeof(frame->uc.uc_mcontext_ext.vxrs_high); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 311a2d6d48e4..3c31609df959 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -329,9 +329,19 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_TE; if (test_facility(51)) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; - if (test_facility(129)) + if (test_facility(129)) { S390_lowcore.machine_flags |= MACHINE_FLAG_VX; + __ctl_set_bit(0, 17); + } +} + +static int __init disable_vector_extension(char *str) +{ + S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; + __ctl_clear_bit(0, 17); + return 1; } +early_param("novx", disable_vector_extension); static int __init cad_setup(char *str) { diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 582fe44ab07c..b78babf9a417 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -20,7 +20,6 @@ #include #include #include -#include #include __PT_R0 = __PT_GPRS @@ -748,15 +747,12 @@ ENTRY(psw_idle) br %r14 .Lpsw_idle_end: -/* Store floating-point controls and floating-point or vector extension - * registers instead. A critical section cleanup assures that the registers - * are stored even if interrupted for some other work. The register %r2 - * designates a struct fpu to store register contents. If the specified - * structure does not contain a register save area, the register store is - * omitted (see also comments in arch_dup_task_struct()). - * - * The CIF_FPU flag is set in any case. The CIF_FPU triggers a lazy restore - * of the register contents at system call or io return. +/* + * Store floating-point controls and floating-point or vector register + * depending whether the vector facility is available. A critical section + * cleanup assures that the registers are stored even if interrupted for + * some other work. The CIF_FPU flag is set to trigger a lazy restore + * of the register contents at return from io or a system call. */ ENTRY(save_fpu_regs) lg %r2,__LC_CURRENT @@ -768,7 +764,7 @@ ENTRY(save_fpu_regs) lg %r3,__THREAD_FPU_regs(%r2) ltgr %r3,%r3 jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU - tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + tm __LC_MACHINE_FLAGS+5,4 # MACHINE_HAS_VX jz .Lsave_fpu_regs_fp # no -> store FP regs .Lsave_fpu_regs_vx_low: VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) @@ -797,15 +793,15 @@ ENTRY(save_fpu_regs) br %r14 .Lsave_fpu_regs_end: -/* Load floating-point controls and floating-point or vector extension - * registers. A critical section cleanup assures that the register contents - * are loaded even if interrupted for some other work. Depending on the saved - * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. +/* + * Load floating-point controls and floating-point or vector registers. + * A critical section cleanup assures that the register contents are + * loaded even if interrupted for some other work. * * There are special calling conventions to fit into sysc and io return work: * %r15: * The function requires: - * %r4 and __SF_EMPTY+32(%r15) + * %r4 */ load_fpu_regs: lg %r4,__LC_CURRENT @@ -813,25 +809,14 @@ load_fpu_regs: tm __LC_CPU_FLAGS+7,_CIF_FPU bnor %r14 lfpc __THREAD_FPU_fpc(%r4) - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 - tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + tm __LC_MACHINE_FLAGS+5,4 # MACHINE_HAS_VX lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area - jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs -.Lload_fpu_regs_vx_ctl: - tm __SF_EMPTY+32+5(%r15),2 # test VX control - jo .Lload_fpu_regs_vx - oi __SF_EMPTY+32+5(%r15),2 # set VX control - lctlg %c0,%c0,__SF_EMPTY+32(%r15) + jz .Lload_fpu_regs_fp # -> no VX, load FP regs .Lload_fpu_regs_vx: VLM %v0,%v15,0,%r4 .Lload_fpu_regs_vx_high: VLM %v16,%v31,256,%r4 j .Lload_fpu_regs_done -.Lload_fpu_regs_fp_ctl: - tm __SF_EMPTY+32+5(%r15),2 # test VX control - jz .Lload_fpu_regs_fp - ni __SF_EMPTY+32+5(%r15),253 # clear VX control - lctlg %c0,%c0,__SF_EMPTY+32(%r15) .Lload_fpu_regs_fp: ld 0,0(%r4) ld 1,8(%r4) @@ -854,16 +839,6 @@ load_fpu_regs: br %r14 .Lload_fpu_regs_end: -/* Test and set the vector enablement control in CR0.46 */ -ENTRY(__ctl_set_vx) - stctg %c0,%c0,__SF_EMPTY(%r15) - tm __SF_EMPTY+5(%r15),2 - bor %r14 - oi __SF_EMPTY+5(%r15),2 - lctlg %c0,%c0,__SF_EMPTY(%r15) - br %r14 -.L__ctl_set_vx_end: - .L__critical_end: /* @@ -1019,10 +994,6 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs - clg %r9,BASED(.Lcleanup_table+112) # __ctl_set_vx - jl 0f - clg %r9,BASED(.Lcleanup_table+120) # .L__ctl_set_vx_end - jl .Lcleanup___ctl_set_vx 0: br %r14 .align 8 @@ -1041,8 +1012,6 @@ cleanup_critical: .quad .Lsave_fpu_regs_end .quad load_fpu_regs .quad .Lload_fpu_regs_end - .quad __ctl_set_vx - .quad .L__ctl_set_vx_end #if IS_ENABLED(CONFIG_KVM) .Lcleanup_table_sie: @@ -1226,7 +1195,7 @@ cleanup_critical: lg %r3,__THREAD_FPU_regs(%r2) ltgr %r3,%r3 jz 5f # no save area -> set CIF_FPU - tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + tm __LC_MACHINE_FLAGS+5,4 # MACHINE_HAS_VX jz 4f # no VX -> store FP regs 2: # Store vector registers (V0-V15) VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) @@ -1272,37 +1241,21 @@ cleanup_critical: jhe 1f clg %r9,BASED(.Lcleanup_load_fpu_regs_fp) jhe 2f - clg %r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl) - jhe 3f clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high) - jhe 4f + jhe 3f clg %r9,BASED(.Lcleanup_load_fpu_regs_vx) - jhe 5f - clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) - jhe 6f + jhe 4f lg %r4,__LC_CURRENT aghi %r4,__TASK_thread lfpc __THREAD_FPU_fpc(%r4) - tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + tm __LC_MACHINE_FLAGS+5,4 # MACHINE_HAS_VX lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area - jz 3f # -> no VX, load FP regs -6: # Set VX-enablement control - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 - tm __SF_EMPTY+32+5(%r15),2 # test VX control - jo 5f - oi __SF_EMPTY+32+5(%r15),2 # set VX control - lctlg %c0,%c0,__SF_EMPTY+32(%r15) -5: # Load V0 ..V15 registers + jz 2f # -> no VX, load FP regs +4: # Load V0 ..V15 registers VLM %v0,%v15,0,%r4 -4: # Load V16..V31 registers +3: # Load V16..V31 registers VLM %v16,%v31,256,%r4 j 1f -3: # Clear VX-enablement control for FP - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 - tm __SF_EMPTY+32+5(%r15),2 # test VX control - jz 2f - ni __SF_EMPTY+32+5(%r15),253 # clear VX control - lctlg %c0,%c0,__SF_EMPTY+32(%r15) 2: # Load floating-point registers ld 0,0(%r4) ld 1,8(%r4) @@ -1324,28 +1277,15 @@ cleanup_critical: ni __LC_CPU_FLAGS+7,255-_CIF_FPU lg %r9,48(%r11) # return from load_fpu_regs br %r14 -.Lcleanup_load_fpu_regs_vx_ctl: - .quad .Lload_fpu_regs_vx_ctl .Lcleanup_load_fpu_regs_vx: .quad .Lload_fpu_regs_vx .Lcleanup_load_fpu_regs_vx_high: .quad .Lload_fpu_regs_vx_high -.Lcleanup_load_fpu_regs_fp_ctl: - .quad .Lload_fpu_regs_fp_ctl .Lcleanup_load_fpu_regs_fp: .quad .Lload_fpu_regs_fp .Lcleanup_load_fpu_regs_done: .quad .Lload_fpu_regs_done -.Lcleanup___ctl_set_vx: - stctg %c0,%c0,__SF_EMPTY(%r15) - tm __SF_EMPTY+5(%r15),2 - bor %r14 - oi __SF_EMPTY+5(%r15),2 - lctlg %c0,%c0,__SF_EMPTY(%r15) - lg %r9,48(%r11) # return from __ctl_set_vx - br %r14 - /* * Integer constants */ diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 834df047d35f..3eecd0f7adb2 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -21,8 +21,6 @@ void psw_idle(struct s390_idle_data *, unsigned long); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); -int alloc_vector_registers(struct task_struct *tsk); - void do_protection_exception(struct pt_regs *regs); void do_dat_exception(struct pt_regs *regs); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index f2dac9f0799d..b63787156a19 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -87,31 +87,29 @@ void arch_release_task_struct(struct task_struct *tsk) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { + size_t fpu_regs_size; + *dst = *src; - /* Set up a new floating-point register save area */ - dst->thread.fpu.fpc = 0; - dst->thread.fpu.flags = 0; /* Always start with VX disabled */ - dst->thread.fpu.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, - GFP_KERNEL|__GFP_REPEAT); - if (!dst->thread.fpu.fprs) + /* + * If the vector extension is available, it is enabled for all tasks, + * and, thus, the FPU register save area must be allocated accordingly. + */ + fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS + : sizeof(freg_t) * __NUM_FPRS; + dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT); + if (!dst->thread.fpu.regs) return -ENOMEM; /* * Save the floating-point or vector register state of the current - * task. The state is not saved for early kernel threads, for example, - * the init_task, which do not have an allocated save area. - * The CIF_FPU flag is set in any case to lazy clear or restore a saved - * state when switching to a different task or returning to user space. + * task and set the CIF_FPU flag to lazy restore the FPU register + * state when returning to user space. */ save_fpu_regs(); dst->thread.fpu.fpc = current->thread.fpu.fpc; - if (is_vx_task(current)) - convert_vx_to_fp(dst->thread.fpu.fprs, - current->thread.fpu.vxrs); - else - memcpy(dst->thread.fpu.fprs, current->thread.fpu.fprs, - sizeof(freg_t) * __NUM_FPRS); + memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size); + return 0; } @@ -199,7 +197,7 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) save_fpu_regs(); fpregs->fpc = current->thread.fpu.fpc; fpregs->pad = 0; - if (is_vx_task(current)) + if (MACHINE_HAS_VX) convert_vx_to_fp((freg_t *)&fpregs->fprs, current->thread.fpu.vxrs); else diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 8b1c8e33f184..3ccd9008a4dc 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -239,7 +239,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; - if (is_vx_task(child)) + if (MACHINE_HAS_VX) tmp = *(addr_t *) ((addr_t) child->thread.fpu.vxrs + 2*offset); else @@ -383,7 +383,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; - if (is_vx_task(child)) + if (MACHINE_HAS_VX) *(addr_t *)((addr_t) child->thread.fpu.vxrs + 2*offset) = data; else @@ -617,7 +617,7 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; - if (is_vx_task(child)) + if (MACHINE_HAS_VX) tmp = *(__u32 *) ((addr_t) child->thread.fpu.vxrs + 2*offset); else @@ -742,7 +742,7 @@ static int __poke_user_compat(struct task_struct *child, * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; - if (is_vx_task(child)) + if (MACHINE_HAS_VX) *(__u32 *)((addr_t) child->thread.fpu.vxrs + 2*offset) = tmp; else @@ -981,7 +981,7 @@ static int s390_fpregs_set(struct task_struct *target, if (rc) return rc; - if (is_vx_task(target)) + if (MACHINE_HAS_VX) convert_fp_to_vx(target->thread.fpu.vxrs, fprs); else memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); @@ -1047,13 +1047,10 @@ static int s390_vxrs_low_get(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (is_vx_task(target)) { - if (target == current) - save_fpu_regs(); - for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); - } else - memset(vxrs, 0, sizeof(vxrs)); + if (target == current) + save_fpu_regs(); + for (i = 0; i < __NUM_VXRS_LOW; i++) + vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); } @@ -1067,11 +1064,7 @@ static int s390_vxrs_low_set(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (!is_vx_task(target)) { - rc = alloc_vector_registers(target); - if (rc) - return rc; - } else if (target == current) + if (target == current) save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); @@ -1091,13 +1084,10 @@ static int s390_vxrs_high_get(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (is_vx_task(target)) { - if (target == current) - save_fpu_regs(); - memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, - sizeof(vxrs)); - } else - memset(vxrs, 0, sizeof(vxrs)); + if (target == current) + save_fpu_regs(); + memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs)); + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); } @@ -1110,11 +1100,7 @@ static int s390_vxrs_high_set(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (!is_vx_task(target)) { - rc = alloc_vector_registers(target); - if (rc) - return rc; - } else if (target == current) + if (target == current) save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 5090d3dad10b..d9325b760596 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -10,7 +10,6 @@ EXPORT_SYMBOL(_mcount); EXPORT_SYMBOL(sie64a); EXPORT_SYMBOL(sie_exit); EXPORT_SYMBOL(save_fpu_regs); -EXPORT_SYMBOL(__ctl_set_vx); #endif EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 9549af102d75..028cc46cb82a 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -179,7 +179,7 @@ static int save_sigregs_ext(struct pt_regs *regs, int i; /* Save vector registers to signal stack */ - if (is_vx_task(current)) { + if (MACHINE_HAS_VX) { for (i = 0; i < __NUM_VXRS_LOW; i++) vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, @@ -199,7 +199,7 @@ static int restore_sigregs_ext(struct pt_regs *regs, int i; /* Restore vector registers from signal stack */ - if (is_vx_task(current)) { + if (MACHINE_HAS_VX) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, @@ -381,8 +381,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, uc_flags = 0; if (MACHINE_HAS_VX) { frame_size += sizeof(_sigregs_ext); - if (is_vx_task(current)) - uc_flags |= UC_VXRS; + uc_flags |= UC_VXRS; } frame = get_sigframe(&ksig->ka, regs, frame_size); if (frame == (void __user *) -1UL) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 9861613fb35a..d9c45318f19b 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -224,29 +224,6 @@ NOKPROBE_SYMBOL(illegal_op); DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, "specification exception"); -int alloc_vector_registers(struct task_struct *tsk) -{ - __vector128 *vxrs; - freg_t *fprs; - - /* Allocate vector register save area. */ - vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS, - GFP_KERNEL|__GFP_REPEAT); - if (!vxrs) - return -ENOMEM; - preempt_disable(); - if (tsk == current) - save_fpu_regs(); - /* Copy the 16 floating point registers */ - convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs); - fprs = tsk->thread.fpu.fprs; - tsk->thread.fpu.vxrs = vxrs; - tsk->thread.fpu.flags |= FPU_USE_VX; - kfree(fprs); - preempt_enable(); - return 0; -} - void vector_exception(struct pt_regs *regs) { int si_code, vic; @@ -281,13 +258,6 @@ void vector_exception(struct pt_regs *regs) do_trap(regs, SIGFPE, si_code, "vector exception"); } -static int __init disable_vector_extension(char *str) -{ - S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; - return 1; -} -__setup("novx", disable_vector_extension); - void data_exception(struct pt_regs *regs) { __u16 __user *location; @@ -296,15 +266,6 @@ void data_exception(struct pt_regs *regs) location = get_trap_ip(regs); save_fpu_regs(); - /* Check for vector register enablement */ - if (MACHINE_HAS_VX && !is_vx_task(current) && - (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) { - alloc_vector_registers(current); - /* Vector data exception is suppressing, rewind psw. */ - regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); - clear_pt_regs_flag(regs, PIF_PER_TRAP); - return; - } if (current->thread.fpu.fpc & FPC_DXC_MASK) signal = SIGFPE; else diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0a67c40eece9..c6b4063fce29 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1292,7 +1292,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) static inline void save_fpu_to(struct fpu *dst) { dst->fpc = current->thread.fpu.fpc; - dst->flags = current->thread.fpu.flags; dst->regs = current->thread.fpu.regs; } @@ -1303,7 +1302,6 @@ static inline void save_fpu_to(struct fpu *dst) static inline void load_fpu_from(struct fpu *from) { current->thread.fpu.fpc = from->fpc; - current->thread.fpu.flags = from->flags; current->thread.fpu.regs = from->regs; } @@ -1315,15 +1313,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (test_kvm_facility(vcpu->kvm, 129)) { current->thread.fpu.fpc = vcpu->run->s.regs.fpc; - current->thread.fpu.flags = FPU_USE_VX; /* * Use the register save area in the SIE-control block * for register restore and save in kvm_arch_vcpu_put() */ current->thread.fpu.vxrs = (__vector128 *)&vcpu->run->s.regs.vrs; - /* Always enable the vector extension for KVM */ - __ctl_set_vx(); } else load_fpu_from(&vcpu->arch.guest_fpregs); @@ -2326,7 +2321,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) * registers and the FPC value and store them in the * guest_fpregs structure. */ - WARN_ON(!is_vx_task(current)); /* XXX remove later */ vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, current->thread.fpu.vxrs); -- cgit v1.2.3 From c5c2c393468576bad6d10b2b5fefff8cd25df3f4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 26 Oct 2015 08:41:29 +0100 Subject: KVM: s390: SCA must not cross page boundaries We seemed to have missed a few corner cases in commit f6c137ff00a4 ("KVM: s390: randomize sca address"). The SCA has a maximum size of 2112 bytes. By setting the sca_offset to some unlucky numbers, we exceed the page. 0x7c0 (1984) -> Fits exactly 0x7d0 (2000) -> 16 bytes out 0x7e0 (2016) -> 32 bytes out 0x7f0 (2032) -> 48 bytes out One VCPU entry is 32 bytes long. For the last two cases, we actually write data to the other page. 1. The address of the VCPU. 2. Injection/delivery/clearing of SIGP externall calls via SIGP IF. Especially the 2. happens regularly. So this could produce two problems: 1. The guest losing/getting external calls. 2. Random memory overwrites in the host. So this problem happens on every 127 + 128 created VM with 64 VCPUs. Cc: stable@vger.kernel.org # v3.15+ Acked-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 618c85411a51..35596177fad2 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1098,7 +1098,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.sca) goto out_err; spin_lock(&kvm_lock); - sca_offset = (sca_offset + 16) & 0x7f0; + sca_offset += 16; + if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE) + sca_offset = 0; kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); spin_unlock(&kvm_lock); -- cgit v1.2.3 From 58c383c62e1a4379cee531b56e4293211f2d5ded Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 12 Oct 2015 13:27:29 +0200 Subject: KVM: s390: drop useless newline in debugging data the s390 debug feature does not need newlines. In fact it will result in empty lines. Get rid of 4 leftovers. Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck --- arch/s390/kvm/kvm-s390.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 35596177fad2..07a6aa896c12 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -514,7 +514,7 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) if (gtod_high != 0) return -EINVAL; - VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high); + VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); return 0; } @@ -527,7 +527,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) return -EFAULT; kvm_s390_set_tod_clock(kvm, gtod); - VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod); + VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); return 0; } @@ -559,7 +559,7 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) if (copy_to_user((void __user *)attr->addr, >od_high, sizeof(gtod_high))) return -EFAULT; - VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high); + VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); return 0; } @@ -571,7 +571,7 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) gtod = kvm_s390_get_tod_clock_fast(kvm); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT; - VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod); + VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); return 0; } -- cgit v1.2.3 From 46b708ea875f14f5496109df053624199f3aae87 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 23 Jul 2015 15:24:03 +0200 Subject: KVM: s390: use simple switch statement as multiplexer We currently do some magic shifting (by exploiting that exit codes are always a multiple of 4) and a table lookup to jump into the exit handlers. This causes some calculations and checks, just to do an potentially expensive function call. Changing that to a switch statement gives the compiler the chance to inline and dynamically decide between jump tables or inline compare and branches. In addition it makes the code more readable. bloat-o-meter gives me a small reduction in code size: add/remove: 0/7 grow/shrink: 1/1 up/down: 986/-1334 (-348) function old new delta kvm_handle_sie_intercept 72 1058 +986 handle_prog 704 696 -8 handle_noop 54 - -54 handle_partial_execution 60 - -60 intercept_funcs 120 - -120 handle_instruction 198 - -198 handle_validity 210 - -210 handle_stop 316 - -316 handle_external_interrupt 368 - -368 Right now my gcc does conditional branches instead of jump tables. The inlining seems to give us enough cycles as some micro-benchmarking shows minimal improvements, but still in noise. Signed-off-by: Christian Borntraeger Reviewed-by: Cornelia Huck --- arch/s390/kvm/intercept.c | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 7365e8a46032..b4a5aa110cec 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -336,28 +336,28 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } -static const intercept_handler_t intercept_funcs[] = { - [0x00 >> 2] = handle_noop, - [0x04 >> 2] = handle_instruction, - [0x08 >> 2] = handle_prog, - [0x10 >> 2] = handle_noop, - [0x14 >> 2] = handle_external_interrupt, - [0x18 >> 2] = handle_noop, - [0x1C >> 2] = kvm_s390_handle_wait, - [0x20 >> 2] = handle_validity, - [0x28 >> 2] = handle_stop, - [0x38 >> 2] = handle_partial_execution, -}; - int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) { - intercept_handler_t func; - u8 code = vcpu->arch.sie_block->icptcode; - - if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) + switch (vcpu->arch.sie_block->icptcode) { + case 0x00: + case 0x10: + case 0x18: + return handle_noop(vcpu); + case 0x04: + return handle_instruction(vcpu); + case 0x08: + return handle_prog(vcpu); + case 0x14: + return handle_external_interrupt(vcpu); + case 0x1c: + return kvm_s390_handle_wait(vcpu); + case 0x20: + return handle_validity(vcpu); + case 0x28: + return handle_stop(vcpu); + case 0x38: + return handle_partial_execution(vcpu); + default: return -EOPNOTSUPP; - func = intercept_funcs[code >> 2]; - if (func) - return func(vcpu); - return -EOPNOTSUPP; + } } -- cgit v1.2.3 From 5967c17b118a2bd1dd1d554cc4eee16233e52bec Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 6 Nov 2015 12:08:48 +0100 Subject: KVM: s390: enable SIMD only when no VCPUs were created We should never allow to enable/disable any facilities for the guest when other VCPUs were already created. kvm_arch_vcpu_(load|put) relies on SIMD not changing during runtime. If somebody would create and run VCPUs and then decides to enable SIMD, undefined behaviour could be possible (e.g. vector save area not being set up). Acked-by: Christian Borntraeger Acked-by: Cornelia Huck Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger Cc: stable@vger.kernel.org # 4.1+ --- arch/s390/kvm/kvm-s390.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8fe2f1c722dc..846589281b04 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) r = 0; break; case KVM_CAP_S390_VECTOR_REGISTERS: - if (MACHINE_HAS_VX) { + mutex_lock(&kvm->lock); + if (atomic_read(&kvm->online_vcpus)) { + r = -EBUSY; + } else if (MACHINE_HAS_VX) { set_kvm_facility(kvm->arch.model.fac->mask, 129); set_kvm_facility(kvm->arch.model.fac->list, 129); r = 0; } else r = -EINVAL; + mutex_unlock(&kvm->lock); VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", r ? "(not available)" : "(success)"); break; -- cgit v1.2.3 From 03c02807e25ef0f44767f28e939efc2c5deb0f3d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 13 Nov 2015 13:31:58 +0100 Subject: KVM: s390: fix pfmf intercept handler The pfmf intercept handler should check if the EDAT 1 facility is installed in the guest, not if it is installed in the host. Signed-off-by: Heiko Carstens Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 77191b85ea7a..d76b51cb4b62 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, ®1, ®2); - if (!MACHINE_HAS_PFMF) + if (!test_kvm_facility(vcpu->kvm, 8)) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) -- cgit v1.2.3 From b85de33a1a3433487b6a721cfdce25ec8673e622 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 5 Nov 2015 09:38:15 +0100 Subject: KVM: s390: avoid memory overwrites on emergency signal injection Commit 383d0b050106 ("KVM: s390: handle pending local interrupts via bitmap") introduced a possible memory overwrite from user space. User space could pass an invalid emergency signal code (sending VCPU) and therefore exceed the bitmap. Let's take care of this case and check that the id is in the valid range. Reviewed-by: Dominik Dingel Cc: stable@vger.kernel.org # v3.19+ db27a7a KVM: Provide function for VCPU lookup by id Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 373e32346d68..7242c2da4009 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1110,6 +1110,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, irq->u.emerg.code, 0); + /* sending vcpu invalid */ + if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) + return -EINVAL; + set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); atomic_or(CPUSTAT_EXT_INT, li->cpuflags); -- cgit v1.2.3 From 152e9f65d66f0a3891efc3869440becc0e7ff53f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 5 Nov 2015 09:06:06 +0100 Subject: KVM: s390: fix wrong lookup of VCPUs by array index For now, VCPUs were always created sequentially with incrementing VCPU ids. Therefore, the index in the VCPUs array matched the id. As sequential creation might change with cpu hotplug, let's use the correct lookup function to find a VCPU by id, not array index. Let's also use kvm_lookup_vcpu() for validation of the sending VCPU on external call injection. Reviewed-by: Christian Borntraeger Signed-off-by: David Hildenbrand Signed-off-by: Christian Borntraeger Cc: stable@vger.kernel.org # db27a7a KVM: Provide function for VCPU lookup by id --- arch/s390/kvm/interrupt.c | 3 +-- arch/s390/kvm/sigp.c | 8 ++------ 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'arch/s390/kvm') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 7242c2da4009..6a75352f453c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) src_id, 0); /* sending vcpu invalid */ - if (src_id >= KVM_MAX_VCPUS || - kvm_get_vcpu(vcpu->kvm, src_id) == NULL) + if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) return -EINVAL; if (sclp.has_sigpif) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index da690b69f9fe..77c22d685c7a 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, u16 cpu_addr, u32 parameter, u64 *status_reg) { int rc; - struct kvm_vcpu *dst_vcpu; + struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); - if (cpu_addr >= KVM_MAX_VCPUS) - return SIGP_CC_NOT_OPERATIONAL; - - dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); if (!dst_vcpu) return SIGP_CC_NOT_OPERATIONAL; @@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); if (order_code == SIGP_EXTERNAL_CALL) { - dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); + dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); BUG_ON(dest_vcpu == NULL); kvm_s390_vcpu_wakeup(dest_vcpu); -- cgit v1.2.3