diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-06 21:19:10 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-06 21:19:10 +0300 |
commit | 541efb7632642cab55361178d73d544f025b593c (patch) | |
tree | eee019bef6c95f50561ba4194ba0dba7b66a252b /arch/x86/xen | |
parent | 6218590bcb452c3da7517d02b588d4d0a8628f73 (diff) | |
parent | a6a198bc60e6c980a56eca24d33dc7f29139f8ea (diff) | |
download | linux-541efb7632642cab55361178d73d544f025b593c.tar.xz |
Merge tag 'for-linus-4.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from David Vrabel:
"xen features and fixes for 4.9:
- switch to new CPU hotplug mechanism
- support driver_override in pciback
- require vector callback for HVM guests (the alternate mechanism via
the platform device has been broken for ages)"
* tag 'for-linus-4.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/x86: Update topology map for PV VCPUs
xen/x86: Initialize per_cpu(xen_vcpu, 0) a little earlier
xen/pciback: support driver_override
xen/pciback: avoid multiple entries in slot list
xen/pciback: simplify pcistub device handling
xen: Remove event channel notification through Xen PCI platform device
xen/events: Convert to hotplug state machine
xen/x86: Convert to hotplug state machine
x86/xen: add missing \n at end of printk warning message
xen/grant-table: Use kmalloc_array() in arch_gnttab_valloc()
xen: Make VPMU init message look less scary
xen: rename xen_pmu_init() in sys-hypervisor.c
hotplug: Prevent alloc/free of irq descriptors during cpu up/down (again)
xen/x86: Move irq allocation from Xen smp_op.cpu_up()
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/enlighten.c | 94 | ||||
-rw-r--r-- | arch/x86/xen/grant-table.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/platform-pci-unplug.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/pmu.c | 7 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 53 | ||||
-rw-r--r-- | arch/x86/xen/smp.h | 13 | ||||
-rw-r--r-- | arch/x86/xen/time.c | 5 |
7 files changed, 96 insertions, 80 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index f1d2182e071f..c0fdd57da7aa 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -137,8 +137,10 @@ struct shared_info xen_dummy_shared_info; void *xen_initial_gdt; RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); -__read_mostly int xen_have_vector_callback; -EXPORT_SYMBOL_GPL(xen_have_vector_callback); + +static int xen_cpu_up_prepare(unsigned int cpu); +static int xen_cpu_up_online(unsigned int cpu); +static int xen_cpu_dead(unsigned int cpu); /* * Point at some empty memory to start with. We map the real shared_info @@ -1519,10 +1521,7 @@ static void __init xen_pvh_early_guest_init(void) if (!xen_feature(XENFEAT_auto_translated_physmap)) return; - if (!xen_feature(XENFEAT_hvm_callback_vector)) - return; - - xen_have_vector_callback = 1; + BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector)); xen_pvh_early_cpu_init(0, false); xen_pvh_set_cr_flags(0); @@ -1538,6 +1537,24 @@ static void __init xen_dom0_set_legacy_features(void) x86_platform.legacy.rtc = 1; } +static int xen_cpuhp_setup(void) +{ + int rc; + + rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, + "XEN_HVM_GUEST_PREPARE", + xen_cpu_up_prepare, xen_cpu_dead); + if (rc >= 0) { + rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "XEN_HVM_GUEST_ONLINE", + xen_cpu_up_online, NULL); + if (rc < 0) + cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); + } + + return rc >= 0 ? 0 : rc; +} + /* First C function to be called on Xen boot */ asmlinkage __visible void __init xen_start_kernel(void) { @@ -1639,6 +1656,8 @@ asmlinkage __visible void __init xen_start_kernel(void) possible map and a non-dummy shared_info. */ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; + WARN_ON(xen_cpuhp_setup()); + local_irq_disable(); early_boot_irqs_disabled = true; @@ -1819,31 +1838,54 @@ static void __init init_hvm_pv_info(void) xen_domain_type = XEN_HVM_DOMAIN; } -static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, - void *hcpu) +static int xen_cpu_up_prepare(unsigned int cpu) { - int cpu = (long)hcpu; - switch (action) { - case CPU_UP_PREPARE: + int rc; + + if (xen_hvm_domain()) { + /* + * This can happen if CPU was offlined earlier and + * offlining timed out in common_cpu_die(). + */ + if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) { + xen_smp_intr_free(cpu); + xen_uninit_lock_cpu(cpu); + } + if (cpu_acpi_id(cpu) != U32_MAX) per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); else per_cpu(xen_vcpu_id, cpu) = cpu; xen_vcpu_setup(cpu); - if (xen_have_vector_callback) { - if (xen_feature(XENFEAT_hvm_safe_pvclock)) - xen_setup_timer(cpu); - } - break; - default: - break; } - return NOTIFY_OK; + + if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_timer(cpu); + + rc = xen_smp_intr_init(cpu); + if (rc) { + WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n", + cpu, rc); + return rc; + } + return 0; } -static struct notifier_block xen_hvm_cpu_notifier = { - .notifier_call = xen_hvm_cpu_notify, -}; +static int xen_cpu_dead(unsigned int cpu) +{ + xen_smp_intr_free(cpu); + + if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_teardown_timer(cpu); + + return 0; +} + +static int xen_cpu_up_online(unsigned int cpu) +{ + xen_init_lock_cpu(cpu); + return 0; +} #ifdef CONFIG_KEXEC_CORE static void xen_hvm_shutdown(void) @@ -1871,10 +1913,10 @@ static void __init xen_hvm_guest_init(void) xen_panic_handler_init(); - if (xen_feature(XENFEAT_hvm_callback_vector)) - xen_have_vector_callback = 1; + BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector)); + xen_hvm_smp_init(); - register_cpu_notifier(&xen_hvm_cpu_notifier); + WARN_ON(xen_cpuhp_setup()); xen_unplug_emulated_devices(); x86_init.irqs.intr_init = xen_init_IRQ; xen_hvm_init_time_ops(); @@ -1910,7 +1952,7 @@ bool xen_hvm_need_lapic(void) return false; if (!xen_hvm_domain()) return false; - if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) + if (xen_feature(XENFEAT_hvm_pirqs)) return false; return true; } diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c index de4144c24f1c..809b6c812654 100644 --- a/arch/x86/xen/grant-table.c +++ b/arch/x86/xen/grant-table.c @@ -89,7 +89,7 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) { - area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL); + area->ptes = kmalloc_array(nr_frames, sizeof(*area->ptes), GFP_KERNEL); if (area->ptes == NULL) return -ENOMEM; diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index d37a0c7f82cb..90d1b83cf35f 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c @@ -61,7 +61,7 @@ static int check_platform_magic(void) } break; default: - printk(KERN_WARNING "Xen Platform PCI: unknown I/O protocol version"); + printk(KERN_WARNING "Xen Platform PCI: unknown I/O protocol version\n"); return XEN_PLATFORM_ERR_PROTOCOL; } diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 32bdc2c90297..b9fc52556bcc 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -547,8 +547,11 @@ void xen_pmu_init(int cpu) return; fail: - pr_info_once("Could not initialize VPMU for cpu %d, error %d\n", - cpu, err); + if (err == -EOPNOTSUPP || err == -ENOSYS) + pr_info_once("VPMU disabled by hypervisor.\n"); + else + pr_info_once("Could not initialize VPMU for cpu %d, error %d\n", + cpu, err); free_pages((unsigned long)xenpmu_data, 0); } diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 0b4d04c8ab4d..9fa27ceeecfd 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -87,6 +87,12 @@ static void cpu_bringup(void) cpu_data(cpu).x86_max_cores = 1; set_cpu_sibling_map(cpu); + /* + * identify_cpu() may have set logical_pkg_id to -1 due + * to incorrect phys_proc_id. Let's re-comupte it. + */ + topology_update_package_map(apic->cpu_present_to_apicid(cpu), cpu); + xen_setup_cpu_clockevents(); notify_cpu_starting(cpu); @@ -115,7 +121,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu) cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } -static void xen_smp_intr_free(unsigned int cpu) +void xen_smp_intr_free(unsigned int cpu) { if (per_cpu(xen_resched_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); @@ -159,7 +165,7 @@ static void xen_smp_intr_free(unsigned int cpu) per_cpu(xen_pmu_irq, cpu).name = NULL; } }; -static int xen_smp_intr_init(unsigned int cpu) +int xen_smp_intr_init(unsigned int cpu) { int rc; char *resched_name, *callfunc_name, *debug_name, *pmu_name; @@ -475,8 +481,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) common_cpu_up(cpu, idle); xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); - xen_init_lock_cpu(cpu); /* * PV VCPUs are always successfully taken down (see 'while' loop @@ -495,10 +499,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) xen_pmu_init(cpu); - rc = xen_smp_intr_init(cpu); - if (rc) - return rc; - rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL); BUG_ON(rc); @@ -769,47 +769,10 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) xen_init_lock_cpu(0); } -static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) -{ - int rc; - - /* - * This can happen if CPU was offlined earlier and - * offlining timed out in common_cpu_die(). - */ - if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) { - xen_smp_intr_free(cpu); - xen_uninit_lock_cpu(cpu); - } - - /* - * xen_smp_intr_init() needs to run before native_cpu_up() - * so that IPI vectors are set up on the booting CPU before - * it is marked online in native_cpu_up(). - */ - rc = xen_smp_intr_init(cpu); - WARN_ON(rc); - if (!rc) - rc = native_cpu_up(cpu, tidle); - - /* - * We must initialize the slowpath CPU kicker _after_ the native - * path has executed. If we initialized it before none of the - * unlocker IPI kicks would reach the booting CPU as the booting - * CPU had not set itself 'online' in cpu_online_mask. That mask - * is checked when IPIs are sent (on HVM at least). - */ - xen_init_lock_cpu(cpu); - return rc; -} - void __init xen_hvm_smp_init(void) { - if (!xen_have_vector_callback) - return; smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; smp_ops.smp_send_reschedule = xen_smp_send_reschedule; - smp_ops.cpu_up = xen_hvm_cpu_up; smp_ops.cpu_die = xen_cpu_die; smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h index 963d62a35c82..c5c16dc4f694 100644 --- a/arch/x86/xen/smp.h +++ b/arch/x86/xen/smp.h @@ -1,5 +1,6 @@ #ifndef _XEN_SMP_H +#ifdef CONFIG_SMP extern void xen_send_IPI_mask(const struct cpumask *mask, int vector); extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, @@ -8,6 +9,18 @@ extern void xen_send_IPI_allbutself(int vector); extern void xen_send_IPI_all(int vector); extern void xen_send_IPI_self(int vector); +extern int xen_smp_intr_init(unsigned int cpu); +extern void xen_smp_intr_free(unsigned int cpu); + +#else /* CONFIG_SMP */ + +static inline int xen_smp_intr_init(unsigned int cpu) +{ + return 0; +} +static inline void xen_smp_intr_free(unsigned int cpu) {} +#endif /* CONFIG_SMP */ + #ifdef CONFIG_XEN_PVH extern void xen_pvh_early_cpu_init(int cpu, bool entry); #else diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 67356d29d74d..33d8f6a7829d 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -432,11 +432,6 @@ static void xen_hvm_setup_cpu_clockevents(void) void __init xen_hvm_init_time_ops(void) { - /* vector callback is needed otherwise we cannot receive interrupts - * on cpu > 0 and at this point we don't know how many cpus are - * available */ - if (!xen_have_vector_callback) - return; if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { printk(KERN_INFO "Xen doesn't support pvclock on HVM," "disable pv timer\n"); |