diff options
39 files changed, 1017 insertions, 370 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt index 7d9d3c2286b2..369a4f48eb0d 100644 --- a/Documentation/arm64/booting.txt +++ b/Documentation/arm64/booting.txt @@ -173,13 +173,22 @@ Before jumping into the kernel, the following conditions must be met: the kernel image will be entered must be initialised by software at a higher exception level to prevent execution in an UNKNOWN state. - For systems with a GICv3 interrupt controller: + For systems with a GICv3 interrupt controller to be used in v3 mode: - If EL3 is present: ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1. ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1. - If the kernel is entered at EL1: ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1 ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1. + - The DT or ACPI tables must describe a GICv3 interrupt controller. + + For systems with a GICv3 interrupt controller to be used in + compatibility (v2) mode: + - If EL3 is present: + ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b0. + - If the kernel is entered at EL1: + ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b0. + - The DT or ACPI tables must describe a GICv2 interrupt controller. The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt index 2da059a4790c..cc56021eb60b 100644 --- a/Documentation/devicetree/bindings/arm/gic.txt +++ b/Documentation/devicetree/bindings/arm/gic.txt @@ -11,13 +11,14 @@ have PPIs or SGIs. Main node required properties: - compatible : should be one of: - "arm,gic-400" + "arm,arm1176jzf-devchip-gic" + "arm,arm11mp-gic" "arm,cortex-a15-gic" - "arm,cortex-a9-gic" "arm,cortex-a7-gic" - "arm,arm11mp-gic" + "arm,cortex-a9-gic" + "arm,gic-400" + "arm,pl390" "brcm,brahma-b15-gic" - "arm,arm1176jzf-devchip-gic" "qcom,msm-8660-qgic" "qcom,msm-qgic2" - interrupt-controller : Identifies the node as an interrupt controller @@ -58,6 +59,21 @@ Optional regions, used when the GIC doesn't have banked registers. The offset is cpu-offset * cpu-nr. +- clocks : List of phandle and clock-specific pairs, one for each entry + in clock-names. +- clock-names : List of names for the GIC clock input(s). Valid clock names + depend on the GIC variant: + "ic_clk" (for "arm,arm11mp-gic") + "PERIPHCLKEN" (for "arm,cortex-a15-gic") + "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic") + "clk" (for "arm,gic-400") + "gclk" (for "arm,pl390") + +- power-domains : A phandle and PM domain specifier as defined by bindings of + the power controller specified by phandle, used when the GIC + is part of a Power or Clock Domain. + + Example: intc: interrupt-controller@fff11000 { diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt index 63633bdea7e4..ae5054c27c99 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt @@ -10,6 +10,7 @@ Required properties: - "renesas,irqc-r8a7792" (R-Car V2H) - "renesas,irqc-r8a7793" (R-Car M2-N) - "renesas,irqc-r8a7794" (R-Car E2) + - "renesas,intc-ex-r8a7795" (R-Car H3) - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in interrupts.txt in this directory - clocks: Must contain a reference to the functional clock. diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 72ad724c67ae..0d72535ed01d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -819,6 +819,7 @@ config ARCH_VIRT bool "Dummy Virtual Machine" if ARCH_MULTI_V7 select ARM_AMBA select ARM_GIC + select ARM_GIC_V3 select ARM_PSCI select HAVE_ARM_ARCH_TIMER diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h new file mode 100644 index 000000000000..6607d976e07d --- /dev/null +++ b/arch/arm/include/asm/arch_gicv3.h @@ -0,0 +1,188 @@ +/* + * arch/arm/include/asm/arch_gicv3.h + * + * Copyright (C) 2015 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ARCH_GICV3_H +#define __ASM_ARCH_GICV3_H + +#ifndef __ASSEMBLY__ + +#include <linux/io.h> + +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2 +#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm + +#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1) +#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1) +#define ICC_IAR1 __ACCESS_CP15(c12, 0, c12, 0) +#define ICC_SGI1R __ACCESS_CP15_64(0, c12) +#define ICC_PMR __ACCESS_CP15(c4, 0, c6, 0) +#define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4) +#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5) +#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7) + +#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5) + +#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4) +#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0) +#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1) +#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2) +#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3) +#define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5) +#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7) + +#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x) +#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x) + +#define ICH_LR0 __LR0(0) +#define ICH_LR1 __LR0(1) +#define ICH_LR2 __LR0(2) +#define ICH_LR3 __LR0(3) +#define ICH_LR4 __LR0(4) +#define ICH_LR5 __LR0(5) +#define ICH_LR6 __LR0(6) +#define ICH_LR7 __LR0(7) +#define ICH_LR8 __LR8(0) +#define ICH_LR9 __LR8(1) +#define ICH_LR10 __LR8(2) +#define ICH_LR11 __LR8(3) +#define ICH_LR12 __LR8(4) +#define ICH_LR13 __LR8(5) +#define ICH_LR14 __LR8(6) +#define ICH_LR15 __LR8(7) + +/* LR top half */ +#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x) +#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x) + +#define ICH_LRC0 __LRC0(0) +#define ICH_LRC1 __LRC0(1) +#define ICH_LRC2 __LRC0(2) +#define ICH_LRC3 __LRC0(3) +#define ICH_LRC4 __LRC0(4) +#define ICH_LRC5 __LRC0(5) +#define ICH_LRC6 __LRC0(6) +#define ICH_LRC7 __LRC0(7) +#define ICH_LRC8 __LRC8(0) +#define ICH_LRC9 __LRC8(1) +#define ICH_LRC10 __LRC8(2) +#define ICH_LRC11 __LRC8(3) +#define ICH_LRC12 __LRC8(4) +#define ICH_LRC13 __LRC8(5) +#define ICH_LRC14 __LRC8(6) +#define ICH_LRC15 __LRC8(7) + +#define __AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x) +#define ICH_AP0R0 __AP0Rx(0) +#define ICH_AP0R1 __AP0Rx(1) +#define ICH_AP0R2 __AP0Rx(2) +#define ICH_AP0R3 __AP0Rx(3) + +#define __AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x) +#define ICH_AP1R0 __AP1Rx(0) +#define ICH_AP1R1 __AP1Rx(1) +#define ICH_AP1R2 __AP1Rx(2) +#define ICH_AP1R3 __AP1Rx(3) + +/* Low-level accessors */ + +static inline void gic_write_eoir(u32 irq) +{ + asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq)); + isb(); +} + +static inline void gic_write_dir(u32 val) +{ + asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val)); + isb(); +} + +static inline u32 gic_read_iar(void) +{ + u32 irqstat; + + asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); + return irqstat; +} + +static inline void gic_write_pmr(u32 val) +{ + asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val)); +} + +static inline void gic_write_ctlr(u32 val) +{ + asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val)); + isb(); +} + +static inline void gic_write_grpen1(u32 val) +{ + asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val)); + isb(); +} + +static inline void gic_write_sgi1r(u64 val) +{ + asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val)); +} + +static inline u32 gic_read_sre(void) +{ + u32 val; + + asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val)); + return val; +} + +static inline void gic_write_sre(u32 val) +{ + asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val)); + isb(); +} + +/* + * Even in 32bit systems that use LPAE, there is no guarantee that the I/O + * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't + * make much sense. + * Moreover, 64bit I/O emulation is extremely difficult to implement on + * AArch32, since the syndrome register doesn't provide any information for + * them. + * Consequently, the following IO helpers use 32bit accesses. + * + * There are only two registers that need 64bit accesses in this driver: + * - GICD_IROUTERn, contain the affinity values associated to each interrupt. + * The upper-word (aff3) will always be 0, so there is no need for a lock. + * - GICR_TYPER is an ID register and doesn't need atomicity. + */ +static inline void gic_write_irouter(u64 val, volatile void __iomem *addr) +{ + writel_relaxed((u32)val, addr); + writel_relaxed((u32)(val >> 32), addr + 4); +} + +static inline u64 gic_read_typer(const volatile void __iomem *addr) +{ + u64 val; + + val = readl_relaxed(addr); + val |= (u64)readl_relaxed(addr + 4) << 32; + return val; +} + +#endif /* !__ASSEMBLY__ */ +#endif /* !__ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 07d1811aa03f..440d906429de 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -348,6 +348,33 @@ config ARM64_ERRATUM_843419 If unsure, say Y. +config CAVIUM_ERRATUM_22375 + bool "Cavium erratum 22375, 24313" + default y + help + Enable workaround for erratum 22375, 24313. + + This implements two gicv3-its errata workarounds for ThunderX. Both + with small impact affecting only ITS table allocation. + + erratum 22375: only alloc 8MB table size + erratum 24313: ignore memory access type + + The fixes are in ITS initialization and basically ignore memory access + type and table size provided by the TYPER and BASER registers. + + If unsure, say Y. + +config CAVIUM_ERRATUM_23154 + bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" + default y + help + The gicv3 of ThunderX requires a modified version for + reading the IAR status to ensure data synchronization + (access to icc_iar1_el1 is not sync'ed before and after). + + If unsure, say Y. + endmenu diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h new file mode 100644 index 000000000000..030cdcb46c6b --- /dev/null +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -0,0 +1,170 @@ +/* + * arch/arm64/include/asm/arch_gicv3.h + * + * Copyright (C) 2015 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ARCH_GICV3_H +#define __ASM_ARCH_GICV3_H + +#include <asm/sysreg.h> + +#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) +#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) +#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) +#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) +#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) +#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) +#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) +#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) + +#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) + +/* + * System register definitions + */ +#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) +#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) +#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) +#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) +#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) +#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) +#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) + +#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) +#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) + +#define ICH_LR0_EL2 __LR0_EL2(0) +#define ICH_LR1_EL2 __LR0_EL2(1) +#define ICH_LR2_EL2 __LR0_EL2(2) +#define ICH_LR3_EL2 __LR0_EL2(3) +#define ICH_LR4_EL2 __LR0_EL2(4) +#define ICH_LR5_EL2 __LR0_EL2(5) +#define ICH_LR6_EL2 __LR0_EL2(6) +#define ICH_LR7_EL2 __LR0_EL2(7) +#define ICH_LR8_EL2 __LR8_EL2(0) +#define ICH_LR9_EL2 __LR8_EL2(1) +#define ICH_LR10_EL2 __LR8_EL2(2) +#define ICH_LR11_EL2 __LR8_EL2(3) +#define ICH_LR12_EL2 __LR8_EL2(4) +#define ICH_LR13_EL2 __LR8_EL2(5) +#define ICH_LR14_EL2 __LR8_EL2(6) +#define ICH_LR15_EL2 __LR8_EL2(7) + +#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) +#define ICH_AP0R0_EL2 __AP0Rx_EL2(0) +#define ICH_AP0R1_EL2 __AP0Rx_EL2(1) +#define ICH_AP0R2_EL2 __AP0Rx_EL2(2) +#define ICH_AP0R3_EL2 __AP0Rx_EL2(3) + +#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) +#define ICH_AP1R0_EL2 __AP1Rx_EL2(0) +#define ICH_AP1R1_EL2 __AP1Rx_EL2(1) +#define ICH_AP1R2_EL2 __AP1Rx_EL2(2) +#define ICH_AP1R3_EL2 __AP1Rx_EL2(3) + +#ifndef __ASSEMBLY__ + +#include <linux/stringify.h> + +/* + * Low-level accessors + * + * These system registers are 32 bits, but we make sure that the compiler + * sets the GP register's most significant bits to 0 with an explicit cast. + */ + +static inline void gic_write_eoir(u32 irq) +{ + asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq)); + isb(); +} + +static inline void gic_write_dir(u32 irq) +{ + asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq)); + isb(); +} + +static inline u64 gic_read_iar_common(void) +{ + u64 irqstat; + + asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); + return irqstat; +} + +/* + * Cavium ThunderX erratum 23154 + * + * The gicv3 of ThunderX requires a modified version for reading the + * IAR status to ensure data synchronization (access to icc_iar1_el1 + * is not sync'ed before and after). + */ +static inline u64 gic_read_iar_cavium_thunderx(void) +{ + u64 irqstat; + + asm volatile( + "nop;nop;nop;nop\n\t" + "nop;nop;nop;nop\n\t" + "mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t" + "nop;nop;nop;nop" + : "=r" (irqstat)); + mb(); + + return irqstat; +} + +static inline void gic_write_pmr(u32 val) +{ + asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val)); +} + +static inline void gic_write_ctlr(u32 val) +{ + asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val)); + isb(); +} + +static inline void gic_write_grpen1(u32 val) +{ + asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val)); + isb(); +} + +static inline void gic_write_sgi1r(u64 val) +{ + asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); +} + +static inline u32 gic_read_sre(void) +{ + u64 val; + + asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); + return val; +} + +static inline void gic_write_sre(u32 val) +{ + asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val)); + isb(); +} + +#define gic_read_typer(c) readq_relaxed(c) +#define gic_write_irouter(v, c) writeq_relaxed(v, c) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 171570702bb8..dbc78d2b8cc6 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -27,8 +27,9 @@ #define ARM64_HAS_SYSREG_GIC_CPUIF 3 #define ARM64_HAS_PAN 4 #define ARM64_HAS_LSE_ATOMICS 5 +#define ARM64_WORKAROUND_CAVIUM_23154 6 -#define ARM64_NCAPS 6 +#define ARM64_NCAPS 7 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ee6403df9fe4..100a3d1b17c8 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -62,15 +62,18 @@ (0xf << MIDR_ARCHITECTURE_SHIFT) | \ ((partnum) << MIDR_PARTNUM_SHIFT)) -#define ARM_CPU_IMP_ARM 0x41 -#define ARM_CPU_IMP_APM 0x50 +#define ARM_CPU_IMP_ARM 0x41 +#define ARM_CPU_IMP_APM 0x50 +#define ARM_CPU_IMP_CAVIUM 0x43 -#define ARM_CPU_PART_AEM_V8 0xD0F -#define ARM_CPU_PART_FOUNDATION 0xD00 -#define ARM_CPU_PART_CORTEX_A57 0xD07 -#define ARM_CPU_PART_CORTEX_A53 0xD03 +#define ARM_CPU_PART_AEM_V8 0xD0F +#define ARM_CPU_PART_FOUNDATION 0xD00 +#define ARM_CPU_PART_CORTEX_A57 0xD07 +#define ARM_CPU_PART_CORTEX_A53 0xD03 -#define APM_CPU_PART_POTENZA 0x000 +#define APM_CPU_PART_POTENZA 0x000 + +#define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 #define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6ffd91438560..574450c257a4 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -23,6 +23,7 @@ #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) +#define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) @@ -82,6 +83,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), }, #endif +#ifdef CONFIG_CAVIUM_ERRATUM_23154 + { + /* Cavium ThunderX, pass 1.x */ + .desc = "Cavium erratum 23154", + .capability = ARM64_WORKAROUND_CAVIUM_23154, + MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), + }, +#endif { } }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3c9aed32f70b..305f30dc9e63 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -23,6 +23,8 @@ #include <asm/cpufeature.h> #include <asm/processor.h> +#include <linux/irqchip/arm-gic-v3.h> + static bool feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) { @@ -45,11 +47,26 @@ __ID_FEAT_CHK(id_aa64pfr0); __ID_FEAT_CHK(id_aa64mmfr1); __ID_FEAT_CHK(id_aa64isar0); +static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry) +{ + bool has_sre; + + if (!has_id_aa64pfr0_feature(entry)) + return false; + + has_sre = gic_enable_sre(); + if (!has_sre) + pr_warn_once("%s present but disabled by higher exception level\n", + entry->desc); + + return has_sre; +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, - .matches = has_id_aa64pfr0_feature, + .matches = has_useable_gicv3_cpuif, .field_pos = 24, .min_field_value = 1, }, diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 90d09eddd5b2..351a4de1b1e2 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -498,6 +498,8 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 msr_s ICC_SRE_EL2, x0 isb // Make sure SRE is now set + mrs_s x0, ICC_SRE_EL2 // Read SRE back, + tbz x0, #0, 3f // and check that it sticks msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 3: diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 5c7e920e4861..ff5292c6277c 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -16,6 +16,9 @@ menuconfig VIRTUALIZATION if VIRTUALIZATION +config KVM_ARM_VGIC_V3 + bool + config KVM bool "Kernel-based Virtual Machine (KVM) support" depends on OF @@ -31,6 +34,7 @@ config KVM select KVM_VFIO select HAVE_KVM_EVENTFD select HAVE_KVM_IRQFD + select KVM_ARM_VGIC_V3 ---help--- Support hosting virtualized guest machines. diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 27b52c8729cd..67d802706be9 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -123,6 +123,7 @@ config RENESAS_INTC_IRQPIN config RENESAS_IRQC bool + select GENERIC_IRQ_CHIP select IRQ_DOMAIN config ST_IRQCHIP diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index f6d680485bee..62bb840c613f 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -70,16 +70,15 @@ static struct irq_domain *aic5_domain; static asmlinkage void __exception_irq_entry aic5_handle(struct pt_regs *regs) { - struct irq_domain_chip_generic *dgc = aic5_domain->gc; - struct irq_chip_generic *gc = dgc->gc[0]; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0); u32 irqnr; u32 irqstat; - irqnr = irq_reg_readl(gc, AT91_AIC5_IVR); - irqstat = irq_reg_readl(gc, AT91_AIC5_ISR); + irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR); + irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR); if (!irqstat) - irq_reg_writel(gc, 0, AT91_AIC5_EOICR); + irq_reg_writel(bgc, 0, AT91_AIC5_EOICR); else handle_domain_irq(aic5_domain, irqnr, regs); } @@ -87,8 +86,7 @@ aic5_handle(struct pt_regs *regs) static void aic5_mask(struct irq_data *d) { struct irq_domain *domain = d->domain; - struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *bgc = dgc->gc[0]; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); /* @@ -105,8 +103,7 @@ static void aic5_mask(struct irq_data *d) static void aic5_unmask(struct irq_data *d) { struct irq_domain *domain = d->domain; - struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *bgc = dgc->gc[0]; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); /* @@ -123,14 +120,13 @@ static void aic5_unmask(struct irq_data *d) static int aic5_retrigger(struct irq_data *d) { struct irq_domain *domain = d->domain; - struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *gc = dgc->gc[0]; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); - irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); - irq_reg_writel(gc, 1, AT91_AIC5_ISCR); - irq_gc_unlock(gc); + irq_gc_lock(bgc); + irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); + irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); + irq_gc_unlock(bgc); return 0; } @@ -138,18 +134,17 @@ static int aic5_retrigger(struct irq_data *d) static int aic5_set_type(struct irq_data *d, unsigned type) { struct irq_domain *domain = d->domain; - struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *gc = dgc->gc[0]; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); unsigned int smr; int ret; - irq_gc_lock(gc); - irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); - smr = irq_reg_readl(gc, AT91_AIC5_SMR); + irq_gc_lock(bgc); + irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); + smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_type(d, type, &smr); if (!ret) - irq_reg_writel(gc, smr, AT91_AIC5_SMR); - irq_gc_unlock(gc); + irq_reg_writel(bgc, smr, AT91_AIC5_SMR); + irq_gc_unlock(bgc); return ret; } @@ -159,7 +154,7 @@ static void aic5_suspend(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *bgc = dgc->gc[0]; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int i; u32 mask; @@ -183,7 +178,7 @@ static void aic5_resume(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *bgc = dgc->gc[0]; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int i; u32 mask; @@ -207,7 +202,7 @@ static void aic5_pm_shutdown(struct irq_data *d) { struct irq_domain *domain = d->domain; struct irq_domain_chip_generic *dgc = domain->gc; - struct irq_chip_generic *bgc = dgc->gc[0]; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int i; @@ -262,12 +257,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, irq_hw_number_t *out_hwirq, unsigned int *out_type) { - struct irq_domain_chip_generic *dgc = d->gc; - struct irq_chip_generic *gc; + struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); unsigned smr; int ret; - if (!dgc) + if (!bgc) return -EINVAL; ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, @@ -275,15 +269,13 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, if (ret) return ret; - gc = dgc->gc[0]; - - irq_gc_lock(gc); - irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR); - smr = irq_reg_readl(gc, AT91_AIC5_SMR); + irq_gc_lock(bgc); + irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); + smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_priority(intspec[2], &smr); if (!ret) - irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR); - irq_gc_unlock(gc); + irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR); + irq_gc_unlock(bgc); return ret; } diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 9448e391cb71..44a077f3a4a2 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -21,6 +21,17 @@ #include "irq-gic-common.h" +void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, + void *data) +{ + for (; quirks->desc; quirks++) { + if (quirks->iidr != (quirks->mask & iidr)) + continue; + quirks->init(data); + pr_info("GIC: enabling workaround for %s\n", quirks->desc); + } +} + int gic_configure_irq(unsigned int irq, unsigned int type, void __iomem *base, void (*sync_access)(void)) { diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 35a9884778bd..fff697db8e22 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -20,10 +20,19 @@ #include <linux/of.h> #include <linux/irqdomain.h> +struct gic_quirk { + const char *desc; + void (*init)(void *data); + u32 iidr; + u32 mask; +}; + int gic_configure_irq(unsigned int irq, unsigned int type, void __iomem *base, void (*sync_access)(void)); void gic_dist_config(void __iomem *base, int gic_irqs, void (*sync_access)(void)); void gic_cpu_config(void __iomem *base, void (*sync_access)(void)); +void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, + void *data); #endif /* _IRQ_GIC_COMMON_H */ diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 12985daa66ab..9a36ab0b544c 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -37,12 +37,19 @@ #define V2M_MSI_SETSPI_NS 0x040 #define V2M_MIN_SPI 32 #define V2M_MAX_SPI 1019 +#define V2M_MSI_IIDR 0xFCC #define V2M_MSI_TYPER_BASE_SPI(x) \ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) +/* APM X-Gene with GICv2m MSI_IIDR register value */ +#define XGENE_GICV2M_MSI_IIDR 0x06000170 + +/* List of flags for specific v2m implementation */ +#define GICV2M_NEEDS_SPI_OFFSET 0x00000001 + struct v2m_data { spinlock_t msi_cnt_lock; struct resource res; /* GICv2m resource */ @@ -50,6 +57,7 @@ struct v2m_data { u32 spi_start; /* The SPI number that MSIs start */ u32 nr_spis; /* The number of SPIs for MSIs */ unsigned long *bm; /* MSI vector bitmap */ + u32 flags; /* v2m flags for specific implementation */ }; static void gicv2m_mask_msi_irq(struct irq_data *d) @@ -98,6 +106,9 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) msg->address_hi = upper_32_bits(addr); msg->address_lo = lower_32_bits(addr); msg->data = data->hwirq; + + if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) + msg->data -= v2m->spi_start; } static struct irq_chip gicv2m_irq_chip = { @@ -266,6 +277,17 @@ static int __init gicv2m_init_one(struct device_node *node, goto err_iounmap; } + /* + * APM X-Gene GICv2m implementation has an erratum where + * the MSI data needs to be the offset from the spi_start + * in order to trigger the correct MSI interrupt. This is + * different from the standard GICv2m implementation where + * the MSI data is the absolute value within the range from + * spi_start to (spi_start + num_spis). + */ + if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR) + v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; + v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), GFP_KERNEL); if (!v2m->bm) { diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 25ceae9f7348..5f11898b8e39 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -37,7 +37,10 @@ #include <asm/cputype.h> #include <asm/exception.h> -#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) @@ -817,7 +820,22 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) int i; int psz = SZ_64K; u64 shr = GITS_BASER_InnerShareable; - u64 cache = GITS_BASER_WaWb; + u64 cache; + u64 typer; + u32 ids; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { + /* + * erratum 22375: only alloc 8MB table size + * erratum 24313: ignore memory access type + */ + cache = 0; + ids = 0x14; /* 20 bits, 8MB */ + } else { + cache = GITS_BASER_WaWb; + typer = readq_relaxed(its->base + GITS_TYPER); + ids = GITS_TYPER_DEVBITS(typer); + } for (i = 0; i < GITS_BASER_NR_REGS; i++) { u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); @@ -825,6 +843,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) u64 entry_size = GITS_BASER_ENTRY_SIZE(val); int order = get_order(psz); int alloc_size; + int alloc_pages; u64 tmp; void *base; @@ -840,9 +859,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) * For other tables, only allocate a single page. */ if (type == GITS_BASER_TYPE_DEVICE) { - u64 typer = readq_relaxed(its->base + GITS_TYPER); - u32 ids = GITS_TYPER_DEVBITS(typer); - /* * 'order' was initialized earlier to the default page * granule of the the ITS. We can't have an allocation @@ -859,6 +875,14 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) } alloc_size = (1 << order) * PAGE_SIZE; + alloc_pages = (alloc_size / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n", + node_name, order, alloc_pages); + } + base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!base) { err = -ENOMEM; @@ -887,7 +911,7 @@ retry_baser: break; } - val |= (alloc_size / psz) - 1; + val |= alloc_pages - 1; writeq_relaxed(val, its->base + GITS_BASER + i * 8); tmp = readq_relaxed(its->base + GITS_BASER + i * 8); @@ -1370,6 +1394,33 @@ static int its_force_quiescent(void __iomem *base) } } +static void __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); +} + static int its_probe(struct device_node *node, struct irq_domain *parent) { struct resource res; @@ -1428,6 +1479,8 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) } its->cmd_write = its->cmd_base; + its_enable_quirks(its); + err = its_alloc_tables(node->full_name, its); if (err) goto out_free_cmd; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 36ecfc870e5a..222f9cc0deae 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -108,57 +108,17 @@ static void gic_redist_wait_for_rwp(void) gic_do_wait_for_rwp(gic_data_rdist_rd_base()); } -/* Low level accessors */ -static u64 __maybe_unused gic_read_iar(void) -{ - u64 irqstat; - - asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); - return irqstat; -} - -static void __maybe_unused gic_write_pmr(u64 val) -{ - asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val)); -} - -static void __maybe_unused gic_write_ctlr(u64 val) -{ - asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val)); - isb(); -} +#ifdef CONFIG_ARM64 +static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx); -static void __maybe_unused gic_write_grpen1(u64 val) -{ - asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val)); - isb(); -} - -static void __maybe_unused gic_write_sgi1r(u64 val) -{ - asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); -} - -static void gic_enable_sre(void) +static u64 __maybe_unused gic_read_iar(void) { - u64 val; - - asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); - val |= ICC_SRE_EL1_SRE; - asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val)); - isb(); - - /* - * Need to check that the SRE bit has actually been set. If - * not, it means that SRE is disabled at EL2. We're going to - * die painfully, and there is nothing we can do about it. - * - * Kindly inform the luser. - */ - asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); - if (!(val & ICC_SRE_EL1_SRE)) - pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + if (static_branch_unlikely(&is_cavium_thunderx)) + return gic_read_iar_cavium_thunderx(); + else + return gic_read_iar_common(); } +#endif static void gic_enable_redist(bool enable) { @@ -359,11 +319,11 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) return 0; } -static u64 gic_mpidr_to_affinity(u64 mpidr) +static u64 gic_mpidr_to_affinity(unsigned long mpidr) { u64 aff; - aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); @@ -373,7 +333,7 @@ static u64 gic_mpidr_to_affinity(u64 mpidr) static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { - u64 irqnr; + u32 irqnr; do { irqnr = gic_read_iar(); @@ -432,12 +392,12 @@ static void __init gic_dist_init(void) */ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); for (i = 32; i < gic_data.irq_nr; i++) - writeq_relaxed(affinity, base + GICD_IROUTER + i * 8); + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); } static int gic_populate_rdist(void) { - u64 mpidr = cpu_logical_map(smp_processor_id()); + unsigned long mpidr = cpu_logical_map(smp_processor_id()); u64 typer; u32 aff; int i; @@ -463,15 +423,14 @@ static int gic_populate_rdist(void) } do { - typer = readq_relaxed(ptr + GICR_TYPER); + typer = gic_read_typer(ptr + GICR_TYPER); if ((typer >> 32) == aff) { u64 offset = ptr - gic_data.redist_regions[i].redist_base; gic_data_rdist_rd_base() = ptr; gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; - pr_info("CPU%d: found redistributor %llx region %d:%pa\n", - smp_processor_id(), - (unsigned long long)mpidr, - i, &gic_data_rdist()->phys_base); + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, i, + &gic_data_rdist()->phys_base); return 0; } @@ -486,15 +445,22 @@ static int gic_populate_rdist(void) } /* We couldn't even deal with ourselves... */ - WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n", - smp_processor_id(), (unsigned long long)mpidr); + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), mpidr); return -ENODEV; } static void gic_cpu_sys_reg_init(void) { - /* Enable system registers */ - gic_enable_sre(); + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); /* Set priority mask register */ gic_write_pmr(DEFAULT_PMR_VALUE); @@ -557,10 +523,10 @@ static struct notifier_block gic_cpu_notifier = { }; static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, - u64 cluster_id) + unsigned long cluster_id) { int cpu = *base_cpu; - u64 mpidr = cpu_logical_map(cpu); + unsigned long mpidr = cpu_logical_map(cpu); u16 tlist = 0; while (cpu < nr_cpu_ids) { @@ -621,7 +587,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) smp_wmb(); for_each_cpu(cpu, mask) { - u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; + unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; u16 tlist; tlist = gic_compute_target_list(&cpu, mask, cluster_id); @@ -657,7 +623,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); - writeq_relaxed(val, reg); + gic_write_irouter(val, reg); /* * If the interrupt was enabled, enabled it again. Otherwise, @@ -836,6 +802,14 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .free = gic_irq_domain_free, }; +static void gicv3_enable_quirks(void) +{ +#ifdef CONFIG_ARM64 + if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154)) + static_branch_enable(&is_cavium_thunderx); +#endif +} + static int __init gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *dist_base; @@ -901,6 +875,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare gic_data.nr_redist_regions = nr_redist_regions; gic_data.redist_stride = redist_stride; + gicv3_enable_quirks(); + /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 982c09c2d791..a9f23cfa9c96 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -51,6 +51,19 @@ #include "irq-gic-common.h" +#ifdef CONFIG_ARM64 +#include <asm/cpufeature.h> + +static void gic_check_cpu_features(void) +{ + WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF), + TAINT_CPU_OUT_OF_SPEC, + "GICv3 system registers enabled, broken firmware!\n"); +} +#else +#define gic_check_cpu_features() do { } while(0) +#endif + union gic_base { void __iomem *common_base; void __percpu * __iomem *percpu_base; @@ -987,6 +1000,8 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, BUG_ON(gic_nr >= MAX_GIC_NR); + gic_check_cpu_features(); + gic = &gic_data[gic_nr]; #ifdef CONFIG_GIC_NON_BANKED if (percpu_offset) { /* Frankein-GIC without banked registers... */ @@ -1191,6 +1206,7 @@ IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); +IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); #endif diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index e484fd255321..6b304eb39bd2 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -377,8 +377,8 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent) } domain = __init_i8259_irqs(node); - irq_set_handler_data(parent_irq, domain); - irq_set_chained_handler(parent_irq, i8259_irq_dispatch); + irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch, + domain); return 0; } IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init); diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 9525335723f6..c325806561be 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -361,14 +361,16 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = { .xlate = irq_domain_xlate_twocell, }; -static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a7779 = { +static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a777x = { .irlm_bit = 23, /* ICR0.IRLM0 */ }; static const struct of_device_id intc_irqpin_dt_ids[] = { { .compatible = "renesas,intc-irqpin", }, + { .compatible = "renesas,intc-irqpin-r8a7778", + .data = &intc_irqpin_irlm_r8a777x }, { .compatible = "renesas,intc-irqpin-r8a7779", - .data = &intc_irqpin_irlm_r8a7779 }, + .data = &intc_irqpin_irlm_r8a777x }, {}, }; MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index 35bf97ba4a3d..52304b139aa4 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -62,33 +62,20 @@ struct irqc_priv { struct irqc_irq irq[IRQC_IRQ_MAX]; unsigned int number_of_irqs; struct platform_device *pdev; - struct irq_chip irq_chip; + struct irq_chip_generic *gc; struct irq_domain *irq_domain; struct clk *clk; }; -static void irqc_dbg(struct irqc_irq *i, char *str) -{ - dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n", - str, i->requested_irq, i->hw_irq); -} - -static void irqc_irq_enable(struct irq_data *d) +static struct irqc_priv *irq_data_to_priv(struct irq_data *data) { - struct irqc_priv *p = irq_data_get_irq_chip_data(d); - int hw_irq = irqd_to_hwirq(d); - - irqc_dbg(&p->irq[hw_irq], "enable"); - iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_SET); + return data->domain->host_data; } -static void irqc_irq_disable(struct irq_data *d) +static void irqc_dbg(struct irqc_irq *i, char *str) { - struct irqc_priv *p = irq_data_get_irq_chip_data(d); - int hw_irq = irqd_to_hwirq(d); - - irqc_dbg(&p->irq[hw_irq], "disable"); - iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS); + dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n", + str, i->requested_irq, i->hw_irq); } static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { @@ -101,7 +88,7 @@ static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { static int irqc_irq_set_type(struct irq_data *d, unsigned int type) { - struct irqc_priv *p = irq_data_get_irq_chip_data(d); + struct irqc_priv *p = irq_data_to_priv(d); int hw_irq = irqd_to_hwirq(d); unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK]; u32 tmp; @@ -120,7 +107,7 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type) static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) { - struct irqc_priv *p = irq_data_get_irq_chip_data(d); + struct irqc_priv *p = irq_data_to_priv(d); int hw_irq = irqd_to_hwirq(d); irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); @@ -153,35 +140,11 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id) return IRQ_NONE; } -/* - * This lock class tells lockdep that IRQC irqs are in a different - * category than their parents, so it won't report false recursion. - */ -static struct lock_class_key irqc_irq_lock_class; - -static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - struct irqc_priv *p = h->host_data; - - irqc_dbg(&p->irq[hw], "map"); - irq_set_chip_data(virq, h->host_data); - irq_set_lockdep_class(virq, &irqc_irq_lock_class); - irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); - return 0; -} - -static const struct irq_domain_ops irqc_irq_domain_ops = { - .map = irqc_irq_domain_map, - .xlate = irq_domain_xlate_twocell, -}; - static int irqc_probe(struct platform_device *pdev) { struct irqc_priv *p; struct resource *io; struct resource *irq; - struct irq_chip *irq_chip; const char *name = dev_name(&pdev->dev); int ret; int k; @@ -241,40 +204,51 @@ static int irqc_probe(struct platform_device *pdev) p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ - irq_chip = &p->irq_chip; - irq_chip->name = name; - irq_chip->irq_mask = irqc_irq_disable; - irq_chip->irq_unmask = irqc_irq_enable; - irq_chip->irq_set_type = irqc_irq_set_type; - irq_chip->irq_set_wake = irqc_irq_set_wake; - irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; - p->irq_domain = irq_domain_add_linear(pdev->dev.of_node, p->number_of_irqs, - &irqc_irq_domain_ops, p); + &irq_generic_chip_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(&pdev->dev, "cannot initialize irq domain\n"); goto err2; } + ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs, + 1, name, handle_level_irq, + 0, 0, IRQ_GC_INIT_NESTED_LOCK); + if (ret) { + dev_err(&pdev->dev, "cannot allocate generic chip\n"); + goto err3; + } + + p->gc = irq_get_domain_generic_chip(p->irq_domain, 0); + p->gc->reg_base = p->cpu_int_base; + p->gc->chip_types[0].regs.enable = IRQC_EN_SET; + p->gc->chip_types[0].regs.disable = IRQC_EN_STS; + p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; + p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; + p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type; + p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake; + p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND; + /* request interrupts one by one */ for (k = 0; k < p->number_of_irqs; k++) { if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, 0, name, &p->irq[k])) { dev_err(&pdev->dev, "failed to request IRQ\n"); ret = -ENOENT; - goto err3; + goto err4; } } dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); return 0; -err3: +err4: while (--k >= 0) free_irq(p->irq[k].requested_irq, &p->irq[k]); +err3: irq_domain_remove(p->irq_domain); err2: iounmap(p->iomem); diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index c143dd58410c..4ef178078e5b 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -8,6 +8,9 @@ * warranty of any kind, whether express or implied. */ +#define DRV_NAME "sunxi-nmi" +#define pr_fmt(fmt) DRV_NAME ": " fmt + #include <linux/bitops.h> #include <linux/device.h> #include <linux/io.h> @@ -96,8 +99,8 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) break; default: irq_gc_unlock(gc); - pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", - __func__, data->irq); + pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", + data->irq); return -EBADR; } @@ -130,30 +133,29 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node, domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); if (!domain) { - pr_err("%s: Could not register interrupt domain.\n", node->name); + pr_err("Could not register interrupt domain.\n"); return -ENOMEM; } - ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name, + ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME, handle_fasteoi_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) { - pr_err("%s: Could not allocate generic interrupt chip.\n", - node->name); - goto fail_irqd_remove; + pr_err("Could not allocate generic interrupt chip.\n"); + goto fail_irqd_remove; } irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { - pr_err("%s: unable to parse irq\n", node->name); + pr_err("unable to parse irq\n"); ret = -EINVAL; goto fail_irqd_remove; } gc = irq_get_domain_generic_chip(domain, 0); - gc->reg_base = of_iomap(node, 0); + gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node)); if (!gc->reg_base) { - pr_err("%s: unable to map resource\n", node->name); + pr_err("unable to map resource\n"); ret = -ENOMEM; goto fail_irqd_remove; } diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4e14dac282bb..6a3538ef7275 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -282,7 +282,7 @@ struct vgic_v2_cpu_if { }; struct vgic_v3_cpu_if { -#ifdef CONFIG_ARM_GIC_V3 +#ifdef CONFIG_KVM_ARM_VGIC_V3 u32 vgic_hcr; u32 vgic_vmcr; u32 vgic_sre; /* Restored only, change ignored */ @@ -364,7 +364,7 @@ void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active); int vgic_v2_probe(struct device_node *vgic_node, const struct vgic_ops **ops, const struct vgic_params **params); -#ifdef CONFIG_ARM_GIC_V3 +#ifdef CONFIG_KVM_ARM_VGIC_V3 int vgic_v3_probe(struct device_node *vgic_node, const struct vgic_ops **ops, const struct vgic_params **params); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index be7e75c945e9..ad16809c8596 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -102,6 +102,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @flags: flags (see IRQF_* above) * @thread_fn: interrupt handler function for threaded interrupts * @thread: thread pointer for threaded interrupts + * @secondary: pointer to secondary irqaction (force threading) * @thread_flags: flags related to @thread * @thread_mask: bitmask for keeping track of @thread activity * @dir: pointer to the proc/irq/NN/name entry @@ -113,6 +114,7 @@ struct irqaction { struct irqaction *next; irq_handler_t thread_fn; struct task_struct *thread; + struct irqaction *secondary; unsigned int irq; unsigned int flags; unsigned long thread_flags; diff --git a/include/linux/irq.h b/include/linux/irq.h index 11bf09288ddb..3c1c96786248 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -67,11 +67,12 @@ enum irqchip_irq_state; * request/setup_irq() * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context - * IRQ_NESTED_TRHEAD - Interrupt nests into another thread + * IRQ_NESTED_THREAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable * IRQ_IS_POLLED - Always polled by another interrupt. Exclude * it from the spurious interrupt detection * mechanism and from core side polling. + * IRQ_DISABLE_UNLAZY - Disable lazy irq disable */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -97,13 +98,14 @@ enum { IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), + IRQ_DISABLE_UNLAZY = (1 << 19), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) @@ -297,21 +299,6 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } -/* - * Functions for chained handlers which can be enabled/disabled by the - * standard disable_irq/enable_irq calls. Must be called with - * irq_desc->lock held. - */ -static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) -{ - __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS; -} - -static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) -{ - __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS; -} - static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) { return d->hwirq; @@ -452,6 +439,8 @@ extern int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask, bool force); extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); +extern void irq_migrate_all_off_this_cpu(void); + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) void irq_move_irq(struct irq_data *data); void irq_move_masked_irq(struct irq_data *data); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 9eeeb9589acf..c9ae0c6ec050 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -18,8 +18,6 @@ #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H #define __LINUX_IRQCHIP_ARM_GIC_V3_H -#include <asm/sysreg.h> - /* * Distributor registers. We assume we're running non-secure, with ARE * being set. Secure-only and non-ARE registers are not described. @@ -231,6 +229,7 @@ #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGES_MAX 256 #define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_DEVICE 1 @@ -266,16 +265,16 @@ /* * Hypervisor interface registers (SRE only) */ -#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) - -#define ICH_LR_EOI (1UL << 41) -#define ICH_LR_GROUP (1UL << 60) -#define ICH_LR_HW (1UL << 61) -#define ICH_LR_STATE (3UL << 62) -#define ICH_LR_PENDING_BIT (1UL << 62) -#define ICH_LR_ACTIVE_BIT (1UL << 63) +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) #define ICH_LR_PHYS_ID_SHIFT 32 -#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) #define ICH_MISR_EOI (1 << 0) #define ICH_MISR_U (1 << 1) @@ -292,19 +291,8 @@ #define ICH_VMCR_PMR_SHIFT 24 #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) -#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) -#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) -#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) -#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) -#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) -#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) -#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) -#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) - #define ICC_IAR1_EL1_SPURIOUS 0x3ff -#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) - #define ICC_SRE_EL2_SRE (1 << 0) #define ICC_SRE_EL2_ENABLE (1 << 3) @@ -320,54 +308,10 @@ #define ICC_SGI1R_AFFINITY_3_SHIFT 48 #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) -/* - * System register definitions - */ -#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) -#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) -#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) -#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) -#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) -#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) -#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) - -#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) -#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) - -#define ICH_LR0_EL2 __LR0_EL2(0) -#define ICH_LR1_EL2 __LR0_EL2(1) -#define ICH_LR2_EL2 __LR0_EL2(2) -#define ICH_LR3_EL2 __LR0_EL2(3) -#define ICH_LR4_EL2 __LR0_EL2(4) -#define ICH_LR5_EL2 __LR0_EL2(5) -#define ICH_LR6_EL2 __LR0_EL2(6) -#define ICH_LR7_EL2 __LR0_EL2(7) -#define ICH_LR8_EL2 __LR8_EL2(0) -#define ICH_LR9_EL2 __LR8_EL2(1) -#define ICH_LR10_EL2 __LR8_EL2(2) -#define ICH_LR11_EL2 __LR8_EL2(3) -#define ICH_LR12_EL2 __LR8_EL2(4) -#define ICH_LR13_EL2 __LR8_EL2(5) -#define ICH_LR14_EL2 __LR8_EL2(6) -#define ICH_LR15_EL2 __LR8_EL2(7) - -#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) -#define ICH_AP0R0_EL2 __AP0Rx_EL2(0) -#define ICH_AP0R1_EL2 __AP0Rx_EL2(1) -#define ICH_AP0R2_EL2 __AP0Rx_EL2(2) -#define ICH_AP0R3_EL2 __AP0Rx_EL2(3) - -#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) -#define ICH_AP1R0_EL2 __AP1Rx_EL2(0) -#define ICH_AP1R1_EL2 __AP1Rx_EL2(1) -#define ICH_AP1R2_EL2 __AP1Rx_EL2(2) -#define ICH_AP1R3_EL2 __AP1Rx_EL2(3) +#include <asm/arch_gicv3.h> #ifndef __ASSEMBLY__ -#include <linux/stringify.h> -#include <asm/msi.h> - /* * We need a value to serve as a irq-type for LPIs. Choose one that will * hopefully pique the interest of the reviewer. @@ -385,23 +329,26 @@ struct rdists { u64 flags; }; -static inline void gic_write_eoir(u64 irq) -{ - asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); - isb(); -} - -static inline void gic_write_dir(u64 irq) -{ - asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq)); - isb(); -} - struct irq_domain; int its_cpu_init(void); int its_init(struct device_node *node, struct rdists *rdists, struct irq_domain *domain); +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + #endif #endif diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 9a76e3beda54..3b48dab80164 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -30,6 +30,10 @@ config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ config GENERIC_PENDING_IRQ bool +# Support for generic irq migrating off cpu before the cpu is offline. +config GENERIC_IRQ_MIGRATION + bool + # Alpha specific irq affinity mechanism config AUTO_IRQ_AFFINITY bool diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index d12123526e2b..2fc9cbdf35b6 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o +obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o obj-$(CONFIG_PM_SLEEP) += pm.o obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index e28169dd1c36..15206453b12a 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -21,6 +21,20 @@ #include "internals.h" +static irqreturn_t bad_chained_irq(int irq, void *dev_id) +{ + WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); + return IRQ_NONE; +} + +/* + * Chained handlers should never call action on their IRQ. This default + * action will emit warning if such thing happens. + */ +struct irqaction chained_action = { + .handler = bad_chained_irq, +}; + /** * irq_set_chip - set the irq chip for an irq * @irq: irq number @@ -227,6 +241,13 @@ void irq_enable(struct irq_desc *desc) * disabled. If an interrupt happens, then the interrupt flow * handler masks the line at the hardware level and marks it * pending. + * + * If the interrupt chip does not implement the irq_disable callback, + * a driver can disable the lazy approach for a particular irq line by + * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can + * be used for devices which cannot disable the interrupt at the + * device level under certain circumstances and have to use + * disable_irq[_nosync] instead. */ void irq_disable(struct irq_desc *desc) { @@ -234,6 +255,8 @@ void irq_disable(struct irq_desc *desc) if (desc->irq_data.chip->irq_disable) { desc->irq_data.chip->irq_disable(&desc->irq_data); irq_state_set_masked(desc); + } else if (irq_settings_disable_unlazy(desc)) { + mask_irq(desc); } } @@ -669,7 +692,7 @@ void handle_percpu_irq(struct irq_desc *desc) if (chip->irq_ack) chip->irq_ack(&desc->irq_data); - handle_irq_event_percpu(desc, desc->action); + handle_irq_event_percpu(desc); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); @@ -746,6 +769,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, if (desc->irq_data.chip != &no_irq_chip) mask_ack_irq(desc); irq_state_set_disabled(desc); + if (is_chained) + desc->action = NULL; desc->depth = 1; } desc->handle_irq = handle; @@ -755,6 +780,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, irq_settings_set_noprobe(desc); irq_settings_set_norequest(desc); irq_settings_set_nothread(desc); + desc->action = &chained_action; irq_startup(desc, true); } } diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c new file mode 100644 index 000000000000..80f4f4e56fed --- /dev/null +++ b/kernel/irq/cpuhotplug.c @@ -0,0 +1,82 @@ +/* + * Generic cpu hotunplug interrupt migration code copied from the + * arch/arm implementation + * + * Copyright (C) Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/interrupt.h> +#include <linux/ratelimit.h> +#include <linux/irq.h> + +#include "internals.h" + +static bool migrate_one_irq(struct irq_desc *desc) +{ + struct irq_data *d = irq_desc_get_irq_data(desc); + const struct cpumask *affinity = d->common->affinity; + struct irq_chip *c; + bool ret = false; + + /* + * If this is a per-CPU interrupt, or the affinity does not + * include this CPU, then we have nothing to do. + */ + if (irqd_is_per_cpu(d) || + !cpumask_test_cpu(smp_processor_id(), affinity)) + return false; + + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { + affinity = cpu_online_mask; + ret = true; + } + + c = irq_data_get_irq_chip(d); + if (!c->irq_set_affinity) { + pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq); + } else { + int r = irq_do_set_affinity(d, affinity, false); + if (r) + pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", + d->irq, r); + } + + return ret; +} + +/** + * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu + * + * The current CPU has been marked offline. Migrate IRQs off this CPU. + * If the affinity settings do not allow other CPUs, force them onto any + * available CPU. + * + * Note: we must iterate over all IRQs, whether they have an attached + * action structure or not, as we need to get chained interrupts too. + */ +void irq_migrate_all_off_this_cpu(void) +{ + unsigned int irq; + struct irq_desc *desc; + unsigned long flags; + + local_irq_save(flags); + + for_each_active_irq(irq) { + bool affinity_broken; + + desc = irq_to_desc(irq); + raw_spin_lock(&desc->lock); + affinity_broken = migrate_one_irq(desc); + raw_spin_unlock(&desc->lock); + + if (affinity_broken) + pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", + irq, smp_processor_id()); + } + + local_irq_restore(flags); +} diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index e25a83b67cce..a302cf9a2126 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -132,11 +132,11 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) wake_up_process(action->thread); } -irqreturn_t -handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) +irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) { irqreturn_t retval = IRQ_NONE; unsigned int flags = 0, irq = desc->irq_data.irq; + struct irqaction *action = desc->action; do { irqreturn_t res; @@ -184,14 +184,13 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) irqreturn_t handle_irq_event(struct irq_desc *desc) { - struct irqaction *action = desc->action; irqreturn_t ret; desc->istate &= ~IRQS_PENDING; irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock(&desc->lock); - ret = handle_irq_event_percpu(desc, action); + ret = handle_irq_event_percpu(desc); raw_spin_lock(&desc->lock); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 5ef0c2dbe930..05c2188271b8 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -18,6 +18,8 @@ extern bool noirqdebug; +extern struct irqaction chained_action; + /* * Bits used by threaded handlers: * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run @@ -81,7 +83,7 @@ extern void irq_mark_irq(unsigned int irq); extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); -irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); +irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); irqreturn_t handle_irq_event(struct irq_desc *desc); /* Resending of interrupts :*/ diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index f9a59f6cabd2..a71175ff98d5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -258,37 +258,6 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); -/** - * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt - * @irq: interrupt number to set affinity - * @vcpu_info: vCPU specific data - * - * This function uses the vCPU specific data to set the vCPU - * affinity for an irq. The vCPU specific data is passed from - * outside, such as KVM. One example code path is as below: - * KVM -> IOMMU -> irq_set_vcpu_affinity(). - */ -int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) -{ - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - struct irq_data *data; - struct irq_chip *chip; - int ret = -ENOSYS; - - if (!desc) - return -EINVAL; - - data = irq_desc_get_irq_data(desc); - chip = irq_data_get_irq_chip(data); - if (chip && chip->irq_set_vcpu_affinity) - ret = chip->irq_set_vcpu_affinity(data, vcpu_info); - irq_put_desc_unlock(desc, flags); - - return ret; -} -EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); - static void irq_affinity_notify(struct work_struct *work) { struct irq_affinity_notify *notify = @@ -424,6 +393,37 @@ setup_affinity(struct irq_desc *desc, struct cpumask *mask) } #endif +/** + * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt + * @irq: interrupt number to set affinity + * @vcpu_info: vCPU specific data + * + * This function uses the vCPU specific data to set the vCPU + * affinity for an irq. The vCPU specific data is passed from + * outside, such as KVM. One example code path is as below: + * KVM -> IOMMU -> irq_set_vcpu_affinity(). + */ +int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) +{ + unsigned long flags; + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); + struct irq_data *data; + struct irq_chip *chip; + int ret = -ENOSYS; + + if (!desc) + return -EINVAL; + + data = irq_desc_get_irq_data(desc); + chip = irq_data_get_irq_chip(data); + if (chip && chip->irq_set_vcpu_affinity) + ret = chip->irq_set_vcpu_affinity(data, vcpu_info); + irq_put_desc_unlock(desc, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); + void __disable_irq(struct irq_desc *desc) { if (!desc->depth++) @@ -730,6 +730,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) return IRQ_NONE; } +static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) +{ + WARN(1, "Secondary action handler called for irq %d\n", irq); + return IRQ_NONE; +} + static int irq_wait_for_interrupt(struct irqaction *action) { set_current_state(TASK_INTERRUPTIBLE); @@ -756,7 +762,8 @@ static int irq_wait_for_interrupt(struct irqaction *action) static void irq_finalize_oneshot(struct irq_desc *desc, struct irqaction *action) { - if (!(desc->istate & IRQS_ONESHOT)) + if (!(desc->istate & IRQS_ONESHOT) || + action->handler == irq_forced_secondary_handler) return; again: chip_bus_lock(desc); @@ -910,6 +917,18 @@ static void irq_thread_dtor(struct callback_head *unused) irq_finalize_oneshot(desc, action); } +static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) +{ + struct irqaction *secondary = action->secondary; + + if (WARN_ON_ONCE(!secondary)) + return; + + raw_spin_lock_irq(&desc->lock); + __irq_wake_thread(desc, secondary); + raw_spin_unlock_irq(&desc->lock); +} + /* * Interrupt handler thread */ @@ -940,6 +959,8 @@ static int irq_thread(void *data) action_ret = handler_fn(desc, action); if (action_ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); + if (action_ret == IRQ_WAKE_THREAD) + irq_wake_secondary(desc, action); wake_threads_waitq(desc); } @@ -984,20 +1005,36 @@ void irq_wake_thread(unsigned int irq, void *dev_id) } EXPORT_SYMBOL_GPL(irq_wake_thread); -static void irq_setup_forced_threading(struct irqaction *new) +static int irq_setup_forced_threading(struct irqaction *new) { if (!force_irqthreads) - return; + return 0; if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) - return; + return 0; new->flags |= IRQF_ONESHOT; - if (!new->thread_fn) { - set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); - new->thread_fn = new->handler; - new->handler = irq_default_primary_handler; + /* + * Handle the case where we have a real primary handler and a + * thread handler. We force thread them as well by creating a + * secondary action. + */ + if (new->handler != irq_default_primary_handler && new->thread_fn) { + /* Allocate the secondary action */ + new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); + if (!new->secondary) + return -ENOMEM; + new->secondary->handler = irq_forced_secondary_handler; + new->secondary->thread_fn = new->thread_fn; + new->secondary->dev_id = new->dev_id; + new->secondary->irq = new->irq; + new->secondary->name = new->name; } + /* Deal with the primary handler */ + set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); + new->thread_fn = new->handler; + new->handler = irq_default_primary_handler; + return 0; } static int irq_request_resources(struct irq_desc *desc) @@ -1017,6 +1054,48 @@ static void irq_release_resources(struct irq_desc *desc) c->irq_release_resources(d); } +static int +setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) +{ + struct task_struct *t; + struct sched_param param = { + .sched_priority = MAX_USER_RT_PRIO/2, + }; + + if (!secondary) { + t = kthread_create(irq_thread, new, "irq/%d-%s", irq, + new->name); + } else { + t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, + new->name); + param.sched_priority -= 1; + } + + if (IS_ERR(t)) + return PTR_ERR(t); + + sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); + + /* + * We keep the reference to the task struct even if + * the thread dies to avoid that the interrupt code + * references an already freed task_struct. + */ + get_task_struct(t); + new->thread = t; + /* + * Tell the thread to set its affinity. This is + * important for shared interrupt handlers as we do + * not invoke setup_affinity() for the secondary + * handlers as everything is already set up. Even for + * interrupts marked with IRQF_NO_BALANCE this is + * correct as we want the thread to move to the cpu(s) + * on which the requesting code placed the interrupt. + */ + set_bit(IRQTF_AFFINITY, &new->thread_flags); + return 0; +} + /* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. @@ -1037,6 +1116,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (!try_module_get(desc->owner)) return -ENODEV; + new->irq = irq; + /* * Check whether the interrupt nests into another interrupt * thread. @@ -1054,8 +1135,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ new->handler = irq_nested_primary_handler; } else { - if (irq_settings_can_thread(desc)) - irq_setup_forced_threading(new); + if (irq_settings_can_thread(desc)) { + ret = irq_setup_forced_threading(new); + if (ret) + goto out_mput; + } } /* @@ -1064,37 +1148,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) * thread. */ if (new->thread_fn && !nested) { - struct task_struct *t; - static const struct sched_param param = { - .sched_priority = MAX_USER_RT_PRIO/2, - }; - - t = kthread_create(irq_thread, new, "irq/%d-%s", irq, - new->name); - if (IS_ERR(t)) { - ret = PTR_ERR(t); + ret = setup_irq_thread(new, irq, false); + if (ret) goto out_mput; + if (new->secondary) { + ret = setup_irq_thread(new->secondary, irq, true); + if (ret) + goto out_thread; } - - sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); - - /* - * We keep the reference to the task struct even if - * the thread dies to avoid that the interrupt code - * references an already freed task_struct. - */ - get_task_struct(t); - new->thread = t; - /* - * Tell the thread to set its affinity. This is - * important for shared interrupt handlers as we do - * not invoke setup_affinity() for the secondary - * handlers as everything is already set up. Even for - * interrupts marked with IRQF_NO_BALANCE this is - * correct as we want the thread to move to the cpu(s) - * on which the requesting code placed the interrupt. - */ - set_bit(IRQTF_AFFINITY, &new->thread_flags); } if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { @@ -1267,7 +1328,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irq, nmsk, omsk); } - new->irq = irq; *old_ptr = new; irq_pm_install_action(desc, new); @@ -1293,6 +1353,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ if (new->thread) wake_up_process(new->thread); + if (new->secondary) + wake_up_process(new->secondary->thread); register_irq_proc(irq, desc); new->dir = NULL; @@ -1323,6 +1385,13 @@ out_thread: kthread_stop(t); put_task_struct(t); } + if (new->secondary && new->secondary->thread) { + struct task_struct *t = new->secondary->thread; + + new->secondary->thread = NULL; + kthread_stop(t); + put_task_struct(t); + } out_mput: module_put(desc->owner); return ret; @@ -1394,6 +1463,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) /* If this was the last handler, shut down the IRQ line: */ if (!desc->action) { + irq_settings_clr_disable_unlazy(desc); irq_shutdown(desc); irq_release_resources(desc); } @@ -1430,9 +1500,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) if (action->thread) { kthread_stop(action->thread); put_task_struct(action->thread); + if (action->secondary && action->secondary->thread) { + kthread_stop(action->secondary->thread); + put_task_struct(action->secondary->thread); + } } module_put(desc->owner); + kfree(action->secondary); return action; } @@ -1576,8 +1651,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, retval = __setup_irq(irq, desc, action); chip_bus_sync_unlock(desc); - if (retval) + if (retval) { + kfree(action->secondary); kfree(action); + } #ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index a50ddc9417ff..a916cf144b65 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -475,7 +475,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; - if (!action && !any_count) + if ((!action || action == &chained_action) && !any_count) goto out; seq_printf(p, "%*d: ", prec, i); diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 3320b84cc60f..320579d89091 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -15,6 +15,7 @@ enum { _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, _IRQ_IS_POLLED = IRQ_IS_POLLED, + _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -28,6 +29,7 @@ enum { #define IRQ_NESTED_THREAD GOT_YOU_MORON #define IRQ_PER_CPU_DEVID GOT_YOU_MORON #define IRQ_IS_POLLED GOT_YOU_MORON +#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -154,3 +156,13 @@ static inline bool irq_settings_is_polled(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_IS_POLLED; } + +static inline bool irq_settings_disable_unlazy(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY; +} + +static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc) +{ + desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY; +} diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 6bd1c9bf7ae7..77b0176b0484 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -2122,7 +2122,7 @@ static int init_vgic_model(struct kvm *kvm, int type) case KVM_DEV_TYPE_ARM_VGIC_V2: vgic_v2_init_emulation(kvm); break; -#ifdef CONFIG_ARM_GIC_V3 +#ifdef CONFIG_KVM_ARM_VGIC_V3 case KVM_DEV_TYPE_ARM_VGIC_V3: vgic_v3_init_emulation(kvm); break; @@ -2284,7 +2284,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) block_size = KVM_VGIC_V2_CPU_SIZE; alignment = SZ_4K; break; -#ifdef CONFIG_ARM_GIC_V3 +#ifdef CONFIG_KVM_ARM_VGIC_V3 case KVM_VGIC_V3_ADDR_TYPE_DIST: type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; addr_ptr = &vgic->vgic_dist_base; |