summaryrefslogtreecommitdiff
path: root/drivers/irqchip/irq-gic.c
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2015-04-03 23:38:43 +0300
committerOlof Johansson <olof@lixom.net>2015-04-03 23:38:43 +0300
commit47f36e4921ae11dccf3163f75f70bb55686780f1 (patch)
tree770560b54cbf2f3b8ae64aa5f202d25aa6c33c56 /drivers/irqchip/irq-gic.c
parent63fad06a2733f78f7565a436a97848aee8099600 (diff)
parent874c571414d5617f4042298986b6a826816ee885 (diff)
downloadlinux-47f36e4921ae11dccf3163f75f70bb55686780f1.tar.xz
Merge tag 'arm-perf-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into next/drivers
Merge "arm-cci PMU updates for 4.1" from Will Deacon: CCI-400 PMU updates This series reworks some of the CCI-400 PMU code so that it can be used on both ARM and ARM64-based systems, without the need to boot in secure mode on the latter. This paves the way for CCI-500 support in future. * tag 'arm-perf-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux: arm-cci: Fix CCI PMU event validation arm-cci: Split the code for PMU vs driver support arm-cci: Get rid of secure transactions for PMU driver arm-cci: Abstract the CCI400 PMU specific definitions arm-cci: Rearrange code for splitting PMU vs driver code drivers: cci: reject groups spanning multiple HW PMUs + Linux 4.0-rc4 Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/irqchip/irq-gic.c')
-rw-r--r--drivers/irqchip/irq-gic.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4634cf7d0ec3..471e1cdc1933 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d)
static void gic_mask_irq(struct irq_data *d)
{
u32 mask = 1 << (gic_irq(d) % 32);
+ unsigned long flags;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
if (gic_arch_extn.irq_mask)
gic_arch_extn.irq_mask(d);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_unmask_irq(struct irq_data *d)
{
u32 mask = 1 << (gic_irq(d) % 32);
+ unsigned long flags;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
if (gic_arch_extn.irq_unmask)
gic_arch_extn.irq_unmask(d);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_eoi_irq(struct irq_data *d)
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
unsigned int gicirq = gic_irq(d);
+ unsigned long flags;
int ret;
/* Interrupt configuration for SGIs can't be changed */
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
ret = gic_configure_irq(gicirq, type, base, NULL);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
return ret;
}
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
u32 val, mask, bit;
+ unsigned long flags;
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
mask = 0xff << shift;
bit = gic_cpu_map[cpu] << shift;
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
return IRQ_SET_MASK_OK;
}