summaryrefslogtreecommitdiff
path: root/drivers/xen/events.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r--drivers/xen/events.c178
1 files changed, 122 insertions, 56 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 97612f548a8e..74681478100a 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -105,7 +105,6 @@ struct irq_info
static struct irq_info *irq_info;
static int *pirq_to_irq;
-static int nr_pirqs;
static int *evtchn_to_irq;
struct cpu_evtchn_s {
@@ -171,6 +170,9 @@ static struct irq_info *info_for_irq(unsigned irq)
static unsigned int evtchn_from_irq(unsigned irq)
{
+ if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
+ return 0;
+
return info_for_irq(irq)->evtchn;
}
@@ -278,17 +280,17 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
#endif
- __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
- __set_bit(chn, cpu_evtchn_mask(cpu));
+ clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
+ set_bit(chn, cpu_evtchn_mask(cpu));
irq_info[irq].cpu = cpu;
}
static void init_evtchn_cpu_bindings(void)
{
+ int i;
#ifdef CONFIG_SMP
struct irq_desc *desc;
- int i;
/* By default all event channels notify CPU#0. */
for_each_irq_desc(i, desc) {
@@ -296,7 +298,10 @@ static void init_evtchn_cpu_bindings(void)
}
#endif
- memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
+ for_each_possible_cpu(i)
+ memset(cpu_evtchn_mask(i),
+ (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
+
}
static inline void clear_evtchn(int port)
@@ -353,7 +358,7 @@ static void unmask_evtchn(int port)
struct evtchn_unmask unmask = { .port = port };
(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
} else {
- struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
sync_clear_bit(port, &s->evtchn_mask[0]);
@@ -382,12 +387,17 @@ static int get_nr_hw_irqs(void)
return ret;
}
-/* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs
- * succeeded otherwise nr_pirqs won't hold the right value */
-static int find_unbound_pirq(void)
+static int find_unbound_pirq(int type)
{
- int i;
- for (i = nr_pirqs-1; i >= 0; i--) {
+ int rc, i;
+ struct physdev_get_free_pirq op_get_free_pirq;
+ op_get_free_pirq.type = type;
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
+ if (!rc)
+ return op_get_free_pirq.pirq;
+
+ for (i = 0; i < nr_irqs; i++) {
if (pirq_to_irq[i] < 0)
return i;
}
@@ -398,15 +408,21 @@ static int find_unbound_irq(void)
{
struct irq_data *data;
int irq, res;
- int start = get_nr_hw_irqs();
+ int bottom = get_nr_hw_irqs();
+ int top = nr_irqs-1;
- if (start == nr_irqs)
+ if (bottom == nr_irqs)
goto no_irqs;
- /* nr_irqs is a magic value. Must not use it.*/
- for (irq = nr_irqs-1; irq > start; irq--) {
+ /* This loop starts from the top of IRQ space and goes down.
+ * We need this b/c if we have a PCI device in a Xen PV guest
+ * we do not have an IO-APIC (though the backend might have them)
+ * mapped in. To not have a collision of physical IRQs with the Xen
+ * event channels start at the top of the IRQ space for virtual IRQs.
+ */
+ for (irq = top; irq > bottom; irq--) {
data = irq_get_irq_data(irq);
- /* only 0->15 have init'd desc; handle irq > 16 */
+ /* only 15->0 have init'd desc; handle irq > 16 */
if (!data)
break;
if (data->chip == &no_irq_chip)
@@ -417,10 +433,10 @@ static int find_unbound_irq(void)
return irq;
}
- if (irq == start)
+ if (irq == bottom)
goto no_irqs;
- res = irq_alloc_desc_at(irq, 0);
+ res = irq_alloc_desc_at(irq, -1);
if (WARN_ON(res != irq))
return -1;
@@ -608,10 +624,10 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
spin_lock(&irq_mapping_update_lock);
- if ((pirq > nr_pirqs) || (gsi > nr_irqs)) {
+ if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
- pirq > nr_pirqs ? "nr_pirqs" :"",
- gsi > nr_irqs ? "nr_irqs" : "");
+ pirq > nr_irqs ? "pirq" :"",
+ gsi > nr_irqs ? "gsi" : "");
goto out;
}
@@ -627,7 +643,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
xen_pv_domain())) {
irq = gsi;
- irq_alloc_desc_at(irq, 0);
+ irq_alloc_desc_at(irq, -1);
} else
irq = find_unbound_irq();
@@ -661,17 +677,21 @@ out:
#include <linux/msi.h>
#include "../pci/msi.h"
-void xen_allocate_pirq_msi(char *name, int *irq, int *pirq)
+void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
{
spin_lock(&irq_mapping_update_lock);
- *irq = find_unbound_irq();
- if (*irq == -1)
- goto out;
+ if (alloc & XEN_ALLOC_IRQ) {
+ *irq = find_unbound_irq();
+ if (*irq == -1)
+ goto out;
+ }
- *pirq = find_unbound_pirq();
- if (*pirq == -1)
- goto out;
+ if (alloc & XEN_ALLOC_PIRQ) {
+ *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
+ if (*pirq == -1)
+ goto out;
+ }
set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
handle_level_irq, name);
@@ -752,13 +772,14 @@ int xen_destroy_irq(int irq)
goto out;
if (xen_initial_domain()) {
- unmap_irq.pirq = info->u.pirq.gsi;
+ unmap_irq.pirq = info->u.pirq.pirq;
unmap_irq.domid = DOMID_SELF;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
if (rc) {
printk(KERN_WARNING "unmap irq failed %d\n", rc);
goto out;
}
+ pirq_to_irq[info->u.pirq.pirq] = -1;
}
irq_info[irq] = mk_unbound_info();
@@ -779,6 +800,11 @@ int xen_gsi_from_irq(unsigned irq)
return gsi_from_irq(irq);
}
+int xen_irq_from_pirq(unsigned pirq)
+{
+ return pirq_to_irq[pirq];
+}
+
int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
@@ -1084,7 +1110,7 @@ static void __xen_evtchn_do_upcall(void)
{
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
- struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
unsigned count;
do {
@@ -1092,7 +1118,7 @@ static void __xen_evtchn_do_upcall(void)
vcpu_info->evtchn_upcall_pending = 0;
- if (__get_cpu_var(xed_nesting_count)++)
+ if (__this_cpu_inc_return(xed_nesting_count) - 1)
goto out;
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -1124,8 +1150,8 @@ static void __xen_evtchn_do_upcall(void)
BUG_ON(!irqs_disabled());
- count = __get_cpu_var(xed_nesting_count);
- __get_cpu_var(xed_nesting_count) = 0;
+ count = __this_cpu_read(xed_nesting_count);
+ __this_cpu_write(xed_nesting_count, 0);
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
out:
@@ -1276,6 +1302,42 @@ static int retrigger_dynirq(unsigned int irq)
return ret;
}
+static void restore_cpu_pirqs(void)
+{
+ int pirq, rc, irq, gsi;
+ struct physdev_map_pirq map_irq;
+
+ for (pirq = 0; pirq < nr_irqs; pirq++) {
+ irq = pirq_to_irq[pirq];
+ if (irq == -1)
+ continue;
+
+ /* save/restore of PT devices doesn't work, so at this point the
+ * only devices present are GSI based emulated devices */
+ gsi = gsi_from_irq(irq);
+ if (!gsi)
+ continue;
+
+ map_irq.domid = DOMID_SELF;
+ map_irq.type = MAP_PIRQ_TYPE_GSI;
+ map_irq.index = gsi;
+ map_irq.pirq = pirq;
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ if (rc) {
+ printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
+ gsi, irq, pirq, rc);
+ irq_info[irq] = mk_unbound_info();
+ pirq_to_irq[pirq] = -1;
+ continue;
+ }
+
+ printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
+
+ startup_pirq(irq);
+ }
+}
+
static void restore_cpu_virqs(unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
@@ -1299,9 +1361,6 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_virq_info(evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
-
- /* Ready for use. */
- unmask_evtchn(evtchn);
}
}
@@ -1327,10 +1386,6 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_ipi_info(evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
-
- /* Ready for use. */
- unmask_evtchn(evtchn);
-
}
}
@@ -1390,6 +1445,7 @@ void xen_poll_irq(int irq)
void xen_irq_resume(void)
{
unsigned int cpu, irq, evtchn;
+ struct irq_desc *desc;
init_evtchn_cpu_bindings();
@@ -1408,6 +1464,25 @@ void xen_irq_resume(void)
restore_cpu_virqs(cpu);
restore_cpu_ipis(cpu);
}
+
+ /*
+ * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
+ * are not handled by the IRQ core.
+ */
+ for_each_irq_desc(irq, desc) {
+ if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
+ continue;
+ if (desc->status & IRQ_DISABLED)
+ continue;
+
+ evtchn = evtchn_from_irq(irq);
+ if (evtchn == -1)
+ continue;
+
+ unmask_evtchn(evtchn);
+ }
+
+ restore_cpu_pirqs();
}
static struct irq_chip xen_dynamic_chip __read_mostly = {
@@ -1492,26 +1567,17 @@ void xen_callback_vector(void) {}
void __init xen_init_IRQ(void)
{
- int i, rc;
- struct physdev_nr_pirqs op_nr_pirqs;
+ int i;
cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
GFP_KERNEL);
irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs, &op_nr_pirqs);
- if (rc < 0) {
- nr_pirqs = nr_irqs;
- if (rc != -ENOSYS)
- printk(KERN_WARNING "PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc);
- } else {
- if (xen_pv_domain() && !xen_initial_domain())
- nr_pirqs = max((int)op_nr_pirqs.nr_pirqs, nr_irqs);
- else
- nr_pirqs = op_nr_pirqs.nr_pirqs;
- }
- pirq_to_irq = kcalloc(nr_pirqs, sizeof(*pirq_to_irq), GFP_KERNEL);
- for (i = 0; i < nr_pirqs; i++)
+ /* We are using nr_irqs as the maximum number of pirq available but
+ * that number is actually chosen by Xen and we don't know exactly
+ * what it is. Be careful choosing high pirq numbers. */
+ pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
+ for (i = 0; i < nr_irqs; i++)
pirq_to_irq[i] = -1;
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),