diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 19:16:51 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 19:16:51 +0400 |
commit | 9d2da7af909e1cf529f3cac582aaae05b107aa1e (patch) | |
tree | d3845268b2f6ee22e928ad1ec751d74e121dd797 | |
parent | c1be5a5b1b355d40e6cf79cc979eb66dafa24ad1 (diff) | |
parent | 18c0025b692a293e3e4aecb34264563c0a442448 (diff) | |
download | linux-9d2da7af909e1cf529f3cac582aaae05b107aa1e.tar.xz |
Merge tag 'stable/for-linus-3.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen updates from Konrad Rzeszutek Wilk:
"Features:
- Populate the boot_params with EDD data.
- Cleanups in the IRQ code.
Bug-fixes:
- CPU hotplug offline/online in PVHVM mode.
- Re-upload processor PM data after ACPI S3 suspend/resume cycle."
And Konrad gets a gold star for sending the pull request early when he
thought he'd be away for the first week of the merge window (but because
of 3.9 dragging out to -rc8 he then re-sent the reminder on the first
day of the merge window anyway)
* tag 'stable/for-linus-3.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen: resolve section mismatch warnings in xen-acpi-processor
xen: Re-upload processor PM data to hypervisor after S3 resume (v2)
xen/smp: Unifiy some of the PVs and PVHVM offline CPU path
xen/smp/pvhvm: Don't initialize IRQ_WORKER as we are using the native one.
xen/spinlock: Disable IRQ spinlock (PV) allocation on PVHVM
xen/spinlock: Check against default value of -1 for IRQ line.
xen/time: Add default value of -1 for IRQ and check for that.
xen/events: Check that IRQ value passed in is valid.
xen/time: Fix kasprintf splat when allocating timer%d IRQ line.
xen/smp/spinlock: Fix leakage of the spinlock interrupt line for every CPU online/offline
xen/smp: Fix leakage of timer interrupt line for every CPU online/offline.
xen kconfig: fix select INPUT_XEN_KBDDEV_FRONTEND
xen: drop tracking of IRQ vector
x86/xen: populate boot_params with EDD data
-rw-r--r-- | arch/x86/pci/xen.c | 6 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 57 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 21 | ||||
-rw-r--r-- | arch/x86/xen/spinlock.c | 25 | ||||
-rw-r--r-- | arch/x86/xen/time.c | 13 | ||||
-rw-r--r-- | drivers/video/Kconfig | 2 | ||||
-rw-r--r-- | drivers/xen/events.c | 33 | ||||
-rw-r--r-- | drivers/xen/xen-acpi-processor.c | 82 | ||||
-rw-r--r-- | include/xen/events.h | 3 |
9 files changed, 188 insertions, 54 deletions
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 94e76620460f..4a9be6ddf054 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) goto error; i = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], (type == PCI_CAP_ID_MSIX) ? "pcifront-msi-x" : "pcifront-msi", @@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) dev_dbg(&dev->dev, "xen: msi already bound to pirq=%d\n", pirq); } - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", DOMID_SELF); @@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) } ret = xen_bind_pirq_msi_to_irq(dev, msidesc, - map_irq.pirq, map_irq.index, + map_irq.pirq, (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi", domid); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c8e1c7b95c3b..ddbd54a9b845 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -31,6 +31,7 @@ #include <linux/pci.h> #include <linux/gfp.h> #include <linux/memblock.h> +#include <linux/edd.h> #include <xen/xen.h> #include <xen/events.h> @@ -1306,6 +1307,55 @@ static const struct machine_ops xen_machine_ops __initconst = { .emergency_restart = xen_emergency_restart, }; +static void __init xen_boot_params_init_edd(void) +{ +#if IS_ENABLED(CONFIG_EDD) + struct xen_platform_op op; + struct edd_info *edd_info; + u32 *mbr_signature; + unsigned nr; + int ret; + + edd_info = boot_params.eddbuf; + mbr_signature = boot_params.edd_mbr_sig_buffer; + + op.cmd = XENPF_firmware_info; + + op.u.firmware_info.type = XEN_FW_DISK_INFO; + for (nr = 0; nr < EDDMAXNR; nr++) { + struct edd_info *info = edd_info + nr; + + op.u.firmware_info.index = nr; + info->params.length = sizeof(info->params); + set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, + &info->params); + ret = HYPERVISOR_dom0_op(&op); + if (ret) + break; + +#define C(x) info->x = op.u.firmware_info.u.disk_info.x + C(device); + C(version); + C(interface_support); + C(legacy_max_cylinder); + C(legacy_max_head); + C(legacy_sectors_per_track); +#undef C + } + boot_params.eddbuf_entries = nr; + + op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; + for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) { + op.u.firmware_info.index = nr; + ret = HYPERVISOR_dom0_op(&op); + if (ret) + break; + mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; + } + boot_params.edd_mbr_sig_buf_entries = nr; +#endif +} + /* * Set up the GDT and segment registers for -fstack-protector. Until * we do this, we have to be careful not to call any stack-protected @@ -1508,6 +1558,8 @@ asmlinkage void __init xen_start_kernel(void) /* Avoid searching for BIOS MP tables */ x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop; + + xen_boot_params_init_edd(); } #ifdef CONFIG_PCI /* PCI BIOS service won't work from a PV guest. */ @@ -1589,8 +1641,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: xen_vcpu_setup(cpu); - if (xen_have_vector_callback) + if (xen_have_vector_callback) { xen_init_lock_cpu(cpu); + if (xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_timer(cpu); + } break; default: break; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 09ea61d2e02f..0d466d7c7175 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu) goto fail; per_cpu(xen_callfuncsingle_irq, cpu) = rc; + /* + * The IRQ worker on PVHVM goes through the native path and uses the + * IPI mechanism. + */ + if (xen_hvm_domain()) + return 0; + callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, @@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu) if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); + if (xen_hvm_domain()) + return rc; + if (per_cpu(xen_irq_work, cpu) >= 0) unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); @@ -418,7 +428,7 @@ static int xen_cpu_disable(void) static void xen_cpu_die(unsigned int cpu) { - while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { + while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } @@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu) unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); + if (!xen_hvm_domain()) + unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); } @@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) static void xen_hvm_cpu_die(unsigned int cpu) { - unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); + xen_cpu_die(cpu); native_cpu_die(cpu); } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index f7a080ef0354..8b54603ce816 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu) int irq; const char *name; + WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n", + cpu, per_cpu(lock_kicker_irq, cpu)); + + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, @@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu) void xen_uninit_lock_cpu(int cpu) { + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); + per_cpu(lock_kicker_irq, cpu) = -1; } void __init xen_init_spinlocks(void) { + /* + * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 + * (xen: disable PV spinlocks on HVM) + */ + if (xen_hvm_domain()) + return; + BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); pv_lock_ops.spin_is_locked = xen_spin_is_locked; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 0296a9522501..3d88bfdf9e1c 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = { static const struct clock_event_device *xen_clockevent = &xen_timerop_clockevent; -static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events); +static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 }; static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) { @@ -401,6 +401,9 @@ void xen_setup_timer(int cpu) struct clock_event_device *evt; int irq; + evt = &per_cpu(xen_clock_events, cpu); + WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); + printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); name = kasprintf(GFP_KERNEL, "timer%d", cpu); @@ -413,7 +416,6 @@ void xen_setup_timer(int cpu) IRQF_FORCE_RESUME, name, NULL); - evt = &per_cpu(xen_clock_events, cpu); memcpy(evt, xen_clockevent, sizeof(*evt)); evt->cpumask = cpumask_of(cpu); @@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu) BUG_ON(cpu == 0); evt = &per_cpu(xen_clock_events, cpu); unbind_from_irqhandler(evt->irq, NULL); + evt->irq = -1; } void xen_setup_cpu_clockevents(void) @@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void) { int cpu = smp_processor_id(); xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); + /* + * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence + * doing it xen_hvm_cpu_notify (which gets called by smp_init during + * early bootup and also during CPU hotplug events). + */ xen_setup_cpu_clockevents(); } diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 4c1546f71d56..09394657926e 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -2277,7 +2277,7 @@ config XEN_FBDEV_FRONTEND select FB_SYS_IMAGEBLIT select FB_SYS_FOPS select FB_DEFERRED_IO - select INPUT_XEN_KBDDEV_FRONTEND + select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC select XEN_XENBUS_FRONTEND default y help diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 2647ad8e1f19..d8cc8127f19c 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -85,8 +85,7 @@ enum xen_irq_type { * event channel - irq->event channel mapping * cpu - cpu this event channel is bound to * index - type-specific information: - * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM - * guest, or GSI (real passthrough IRQ) of the device. + * PIRQ - physical IRQ, GSI, flags, and owner domain * VIRQ - virq number * IPI - IPI vector * EVTCHN - @@ -105,7 +104,6 @@ struct irq_info { struct { unsigned short pirq; unsigned short gsi; - unsigned char vector; unsigned char flags; uint16_t domid; } pirq; @@ -211,7 +209,6 @@ static void xen_irq_info_pirq_init(unsigned irq, unsigned short evtchn, unsigned short pirq, unsigned short gsi, - unsigned short vector, uint16_t domid, unsigned char flags) { @@ -221,7 +218,6 @@ static void xen_irq_info_pirq_init(unsigned irq, info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; - info->u.pirq.vector = vector; info->u.pirq.domid = domid; info->u.pirq.flags = flags; } @@ -519,6 +515,9 @@ static void xen_free_irq(unsigned irq) { struct irq_info *info = irq_get_handler_data(irq); + if (WARN_ON(!info)) + return; + list_del(&info->list); irq_set_handler_data(irq, NULL); @@ -714,7 +713,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, goto out; } - xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, + xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF, shareable ? PIRQ_SHAREABLE : 0); pirq_query_unmask(irq); @@ -762,8 +761,7 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, - int pirq, int vector, const char *name, - domid_t domid) + int pirq, const char *name, domid_t domid) { int irq, ret; @@ -776,7 +774,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, name); - xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); + xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0); ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; @@ -1008,6 +1006,9 @@ static void unbind_from_irq(unsigned int irq) int evtchn = evtchn_from_irq(irq); struct irq_info *info = irq_get_handler_data(irq); + if (WARN_ON(!info)) + return; + mutex_lock(&irq_mapping_update_lock); if (info->refcnt > 0) { @@ -1135,6 +1136,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void unbind_from_irqhandler(unsigned int irq, void *dev_id) { + struct irq_info *info = irq_get_handler_data(irq); + + if (WARN_ON(!info)) + return; free_irq(irq, dev_id); unbind_from_irq(irq); } @@ -1457,6 +1462,9 @@ void rebind_evtchn_irq(int evtchn, int irq) { struct irq_info *info = info_for_irq(irq); + if (WARN_ON(!info)) + return; + /* Make sure the irq is masked, since the new event channel will also be masked. */ disable_irq(irq); @@ -1730,7 +1738,12 @@ void xen_poll_irq(int irq) int xen_test_irq_shared(int irq) { struct irq_info *info = info_for_irq(irq); - struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; + struct physdev_irq_status_query irq_status; + + if (WARN_ON(!info)) + return -ENOENT; + + irq_status.irq = info->u.pirq.pirq; if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) return 0; diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 90e34ac7e522..8abd7d579037 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -25,6 +25,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> +#include <linux/syscore_ops.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/processor.h> @@ -51,9 +52,9 @@ static DEFINE_MUTEX(acpi_ids_mutex); /* Which ACPI ID we have processed from 'struct acpi_processor'. */ static unsigned long *acpi_ids_done; /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */ -static unsigned long __initdata *acpi_id_present; +static unsigned long *acpi_id_present; /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */ -static unsigned long __initdata *acpi_id_cst_present; +static unsigned long *acpi_id_cst_present; static int push_cxx_to_hypervisor(struct acpi_processor *_pr) { @@ -329,7 +330,7 @@ static unsigned int __init get_max_acpi_id(void) * for_each_[present|online]_cpu macros which are banded to the virtual * CPU amount. */ -static acpi_status __init +static acpi_status read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) { u32 acpi_id; @@ -384,12 +385,16 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; } -static int __init check_acpi_ids(struct acpi_processor *pr_backup) +static int check_acpi_ids(struct acpi_processor *pr_backup) { if (!pr_backup) return -ENODEV; + if (acpi_id_present && acpi_id_cst_present) + /* OK, done this once .. skip to uploading */ + goto upload; + /* All online CPUs have been processed at this stage. Now verify * whether in fact "online CPUs" == physical CPUs. */ @@ -408,6 +413,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup) read_acpi_id, NULL, NULL, NULL); acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); +upload: if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { unsigned int i; for_each_set_bit(i, acpi_id_present, nr_acpi_bits) { @@ -417,10 +423,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup) (void)upload_pm_data(pr_backup); } } - kfree(acpi_id_present); - acpi_id_present = NULL; - kfree(acpi_id_cst_present); - acpi_id_cst_present = NULL; + return 0; } static int __init check_prereq(void) @@ -467,10 +470,47 @@ static void free_acpi_perf_data(void) free_percpu(acpi_perf_data); } -static int __init xen_acpi_processor_init(void) +static int xen_upload_processor_pm_data(void) { struct acpi_processor *pr_backup = NULL; unsigned int i; + int rc = 0; + + pr_info(DRV_NAME "Uploading Xen processor PM info\n"); + + for_each_possible_cpu(i) { + struct acpi_processor *_pr; + _pr = per_cpu(processors, i /* APIC ID */); + if (!_pr) + continue; + + if (!pr_backup) { + pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); + if (pr_backup) + memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); + } + (void)upload_pm_data(_pr); + } + + rc = check_acpi_ids(pr_backup); + kfree(pr_backup); + + return rc; +} + +static void xen_acpi_processor_resume(void) +{ + bitmap_zero(acpi_ids_done, nr_acpi_bits); + xen_upload_processor_pm_data(); +} + +static struct syscore_ops xap_syscore_ops = { + .resume = xen_acpi_processor_resume, +}; + +static int __init xen_acpi_processor_init(void) +{ + unsigned int i; int rc = check_prereq(); if (rc) @@ -514,27 +554,12 @@ static int __init xen_acpi_processor_init(void) goto err_out; } - for_each_possible_cpu(i) { - struct acpi_processor *_pr; - _pr = per_cpu(processors, i /* APIC ID */); - if (!_pr) - continue; - - if (!pr_backup) { - pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); - if (pr_backup) - memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); - } - (void)upload_pm_data(_pr); - } - rc = check_acpi_ids(pr_backup); - - kfree(pr_backup); - pr_backup = NULL; - + rc = xen_upload_processor_pm_data(); if (rc) goto err_unregister; + register_syscore_ops(&xap_syscore_ops); + return 0; err_unregister: for_each_possible_cpu(i) { @@ -552,7 +577,10 @@ static void __exit xen_acpi_processor_exit(void) { int i; + unregister_syscore_ops(&xap_syscore_ops); kfree(acpi_ids_done); + kfree(acpi_id_present); + kfree(acpi_id_cst_present); for_each_possible_cpu(i) { struct acpi_processor_performance *perf; perf = per_cpu_ptr(acpi_perf_data, i); diff --git a/include/xen/events.h b/include/xen/events.h index c6bfe01acf6b..b2b27c6a0f7b 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -90,8 +90,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); /* Bind an PSI pirq to an irq. */ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, - int pirq, int vector, const char *name, - domid_t domid); + int pirq, const char *name, domid_t domid); #endif /* De-allocates the above mentioned physical interrupt. */ |