diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-09-17 18:13:38 +0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-09-17 18:14:07 +0400 |
commit | 7f63037c8fb204f666367c6469f5a9b9d6888877 (patch) | |
tree | a026c999c7166aa1004e111b2e9afd7b663acdce /drivers/irqchip | |
parent | 272b98c6455f00884f0350f775c5342358ebb73f (diff) | |
parent | b22537c682671de97c932d5addb6b7d087352aa1 (diff) | |
download | linux-7f63037c8fb204f666367c6469f5a9b9d6888877.tar.xz |
Merge branch 'iks_for_rmk' of git://git.linaro.org/people/nico/linux into devel-stable
Nicolas Pitre writes:
This is the first part of the patch series adding IKS (In-Kernel
Switcher) support for big.LITTLE system architectures. This consists of
the core patches only. Extra patches to come later will introduce
various optimizations and tracing support.
Those patches were posted on the list a while ago here:
http://news.gmane.org/group/gmane.linux.ports.arm.kernel/thread=253942
Diffstat (limited to 'drivers/irqchip')
-rw-r--r-- | drivers/irqchip/irq-gic.c | 108 |
1 files changed, 105 insertions, 3 deletions
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index d0e948084eaf..6365b59181ee 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; + raw_spin_lock(&irq_controller_lock); mask = 0xff << shift; bit = gic_cpu_map[cpu] << shift; - - raw_spin_lock(&irq_controller_lock); val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); raw_spin_unlock(&irq_controller_lock); @@ -652,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic) void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) { int cpu; - unsigned long map = 0; + unsigned long flags, map = 0; + + raw_spin_lock_irqsave(&irq_controller_lock, flags); /* Convert our logical CPU mask into a physical one. */ for_each_cpu(cpu, mask) @@ -666,6 +667,107 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) /* this always happens on GIC0 */ writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); + + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); +} +#endif + +#ifdef CONFIG_BL_SWITCHER +/* + * gic_get_cpu_id - get the CPU interface ID for the specified CPU + * + * @cpu: the logical CPU number to get the GIC ID for. + * + * Return the CPU interface ID for the given logical CPU number, + * or -1 if the CPU number is too large or the interface ID is + * unknown (more than one bit set). + */ +int gic_get_cpu_id(unsigned int cpu) +{ + unsigned int cpu_bit; + + if (cpu >= NR_GIC_CPU_IF) + return -1; + cpu_bit = gic_cpu_map[cpu]; + if (cpu_bit & (cpu_bit - 1)) + return -1; + return __ffs(cpu_bit); +} + +/* + * gic_migrate_target - migrate IRQs to another CPU interface + * + * @new_cpu_id: the CPU target ID to migrate IRQs to + * + * Migrate all peripheral interrupts with a target matching the current CPU + * to the interface corresponding to @new_cpu_id. The CPU interface mapping + * is also updated. Targets to other CPU interfaces are unchanged. + * This must be called with IRQs locally disabled. + */ +void gic_migrate_target(unsigned int new_cpu_id) +{ + unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; + void __iomem *dist_base; + int i, ror_val, cpu = smp_processor_id(); + u32 val, cur_target_mask, active_mask; + + if (gic_nr >= MAX_GIC_NR) + BUG(); + + dist_base = gic_data_dist_base(&gic_data[gic_nr]); + if (!dist_base) + return; + gic_irqs = gic_data[gic_nr].gic_irqs; + + cur_cpu_id = __ffs(gic_cpu_map[cpu]); + cur_target_mask = 0x01010101 << cur_cpu_id; + ror_val = (cur_cpu_id - new_cpu_id) & 31; + + raw_spin_lock(&irq_controller_lock); + + /* Update the target interface for this logical CPU */ + gic_cpu_map[cpu] = 1 << new_cpu_id; + + /* + * Find all the peripheral interrupts targetting the current + * CPU interface and migrate them to the new CPU interface. + * We skip DIST_TARGET 0 to 7 as they are read-only. + */ + for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { + val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); + active_mask = val & cur_target_mask; + if (active_mask) { + val &= ~active_mask; + val |= ror32(active_mask, ror_val); + writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); + } + } + + raw_spin_unlock(&irq_controller_lock); + + /* + * Now let's migrate and clear any potential SGIs that might be + * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET + * is a banked register, we can only forward the SGI using + * GIC_DIST_SOFTINT. The original SGI source is lost but Linux + * doesn't use that information anyway. + * + * For the same reason we do not adjust SGI source information + * for previously sent SGIs by us to other CPUs either. + */ + for (i = 0; i < 16; i += 4) { + int j; + val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); + if (!val) + continue; + writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); + for (j = i; j < i + 4; j++) { + if (val & 0xff) + writel_relaxed((1 << (new_cpu_id + 16)) | j, + dist_base + GIC_DIST_SOFTINT); + val >>= 8; + } + } } #endif |