diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-10-01 21:08:07 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-10-01 21:08:07 +0300 |
commit | b2626f1e3245ddd810b69df86514774d6cb655ee (patch) | |
tree | 6bf8c29e62f3c6bfb5269c6f971942ba1766b0ff | |
parent | 24f67d82c43c9c594821ee1bc4367a23d89d9f8b (diff) | |
parent | 7b0035eaa7dab9fd33d6658ad6a755024bdce26c (diff) | |
download | linux-b2626f1e3245ddd810b69df86514774d6cb655ee.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more kvm fixes from Paolo Bonzini:
"Small x86 fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: selftests: Ensure all migrations are performed when test is affined
KVM: x86: Swap order of CPUID entry "index" vs. "significant flag" checks
ptp: Fix ptp_kvm_getcrosststamp issue for x86 ptp_kvm
x86/kvmclock: Move this_cpu_pvti into kvmclock.h
selftests: KVM: Don't clobber XMM register when read
KVM: VMX: Fix a TSX_CTRL_CPUID_CLEAR field mask issue
-rw-r--r-- | arch/x86/include/asm/kvmclock.h | 14 | ||||
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 13 | ||||
-rw-r--r-- | arch/x86/kvm/cpuid.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/vmx.c | 2 | ||||
-rw-r--r-- | drivers/ptp/ptp_kvm_x86.c | 9 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/include/x86_64/processor.h | 2 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/rseq_test.c | 69 |
7 files changed, 81 insertions, 32 deletions
diff --git a/arch/x86/include/asm/kvmclock.h b/arch/x86/include/asm/kvmclock.h index eceea9299097..6c5765192102 100644 --- a/arch/x86/include/asm/kvmclock.h +++ b/arch/x86/include/asm/kvmclock.h @@ -2,6 +2,20 @@ #ifndef _ASM_X86_KVM_CLOCK_H #define _ASM_X86_KVM_CLOCK_H +#include <linux/percpu.h> + extern struct clocksource kvm_clock; +DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); + +static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) +{ + return &this_cpu_read(hv_clock_per_cpu)->pvti; +} + +static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) +{ + return this_cpu_read(hv_clock_per_cpu); +} + #endif /* _ASM_X86_KVM_CLOCK_H */ diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index ad273e5861c1..73c74b961d0f 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); static struct pvclock_vsyscall_time_info hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); static struct pvclock_wall_clock wall_clock __bss_decrypted; -static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); static struct pvclock_vsyscall_time_info *hvclock_mem; - -static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) -{ - return &this_cpu_read(hv_clock_per_cpu)->pvti; -} - -static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) -{ - return this_cpu_read(hv_clock_per_cpu); -} +DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); +EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu); /* * The wallclock is the time of day when we booted. Since then, some time may diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index fe03bd978761..751aa85a3001 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( for (i = 0; i < nent; i++) { e = &entries[i]; - if (e->function == function && (e->index == index || - !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX))) + if (e->function == function && + (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)) return e; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9ecfcf13a046..116b08904ac3 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6848,7 +6848,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) */ tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); if (tsx_ctrl) - vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; + tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR; } err = alloc_loaded_vmcs(&vmx->vmcs01); diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c index 3dd519dfc473..d0096cd7096a 100644 --- a/drivers/ptp/ptp_kvm_x86.c +++ b/drivers/ptp/ptp_kvm_x86.c @@ -15,8 +15,6 @@ #include <linux/ptp_clock_kernel.h> #include <linux/ptp_kvm.h> -struct pvclock_vsyscall_time_info *hv_clock; - static phys_addr_t clock_pair_gpa; static struct kvm_clock_pairing clock_pair; @@ -28,8 +26,7 @@ int kvm_arch_ptp_init(void) return -ENODEV; clock_pair_gpa = slow_virt_to_phys(&clock_pair); - hv_clock = pvclock_get_pvti_cpu0_va(); - if (!hv_clock) + if (!pvclock_get_pvti_cpu0_va()) return -ENODEV; ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, @@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, struct pvclock_vcpu_time_info *src; unsigned int version; long ret; - int cpu; - cpu = smp_processor_id(); - src = &hv_clock[cpu].pvti; + src = this_cpu_pvti(); do { /* diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index eba8bd08293e..05e65ca1c30c 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -315,7 +315,7 @@ static inline void set_xmm(int n, unsigned long val) #define GET_XMM(__xmm) \ ({ \ unsigned long __val; \ - asm volatile("movq %%"#__xmm", %0" : "=r"(__val) : : #__xmm); \ + asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \ __val; \ }) diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c index c5e0dd664a7b..4158da0da2bb 100644 --- a/tools/testing/selftests/kvm/rseq_test.c +++ b/tools/testing/selftests/kvm/rseq_test.c @@ -10,6 +10,7 @@ #include <signal.h> #include <syscall.h> #include <sys/ioctl.h> +#include <sys/sysinfo.h> #include <asm/barrier.h> #include <linux/atomic.h> #include <linux/rseq.h> @@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = { static pthread_t migration_thread; static cpu_set_t possible_mask; +static int min_cpu, max_cpu; static bool done; static atomic_t seq_cnt; @@ -57,20 +59,37 @@ static void sys_rseq(int flags) TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno)); } +static int next_cpu(int cpu) +{ + /* + * Advance to the next CPU, skipping those that weren't in the original + * affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's + * data storage is considered as opaque. Note, if this task is pinned + * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will + * burn a lot cycles and the test will take longer than normal to + * complete. + */ + do { + cpu++; + if (cpu > max_cpu) { + cpu = min_cpu; + TEST_ASSERT(CPU_ISSET(cpu, &possible_mask), + "Min CPU = %d must always be usable", cpu); + break; + } + } while (!CPU_ISSET(cpu, &possible_mask)); + + return cpu; +} + static void *migration_worker(void *ign) { cpu_set_t allowed_mask; - int r, i, nr_cpus, cpu; + int r, i, cpu; CPU_ZERO(&allowed_mask); - nr_cpus = CPU_COUNT(&possible_mask); - - for (i = 0; i < NR_TASK_MIGRATIONS; i++) { - cpu = i % nr_cpus; - if (!CPU_ISSET(cpu, &possible_mask)) - continue; - + for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) { CPU_SET(cpu, &allowed_mask); /* @@ -154,6 +173,36 @@ static void *migration_worker(void *ign) return NULL; } +static int calc_min_max_cpu(void) +{ + int i, cnt, nproc; + + if (CPU_COUNT(&possible_mask) < 2) + return -EINVAL; + + /* + * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that + * this task is affined to in order to reduce the time spent querying + * unusable CPUs, e.g. if this task is pinned to a small percentage of + * total CPUs. + */ + nproc = get_nprocs_conf(); + min_cpu = -1; + max_cpu = -1; + cnt = 0; + + for (i = 0; i < nproc; i++) { + if (!CPU_ISSET(i, &possible_mask)) + continue; + if (min_cpu == -1) + min_cpu = i; + max_cpu = i; + cnt++; + } + + return (cnt < 2) ? -EINVAL : 0; +} + int main(int argc, char *argv[]) { int r, i, snapshot; @@ -167,8 +216,8 @@ int main(int argc, char *argv[]) TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno, strerror(errno)); - if (CPU_COUNT(&possible_mask) < 2) { - print_skip("Only one CPU, task migration not possible\n"); + if (calc_min_max_cpu()) { + print_skip("Only one usable CPU, task migration not possible"); exit(KSFT_SKIP); } |