summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h8
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h81
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h9
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h1
-rw-r--r--arch/arm64/include/asm/atomic_lse.h4
-rw-r--r--arch/arm64/include/asm/barrier.h20
-rw-r--r--arch/arm64/include/asm/bug.h32
-rw-r--r--arch/arm64/include/asm/cache.h38
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cachetype.h100
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h16
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h13
-rw-r--r--arch/arm64/include/asm/esr.h4
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h4
-rw-r--r--arch/arm64/include/asm/io.h10
-rw-r--r--arch/arm64/include/asm/kexec.h52
-rw-r--r--arch/arm64/include/asm/kvm_asm.h5
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h6
-rw-r--r--arch/arm64/include/asm/kvm_host.h18
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h14
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/module.h14
-rw-r--r--arch/arm64/include/asm/pci.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/sections.h2
-rw-r--r--arch/arm64/include/asm/smp.h3
-rw-r--r--arch/arm64/include/asm/sysreg.h167
-rw-r--r--arch/arm64/include/asm/uaccess.h13
-rw-r--r--arch/arm64/include/asm/virt.h31
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild18
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h3
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h10
37 files changed, 437 insertions, 287 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index a12f1afc95a3..a7a97a608033 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -29,6 +29,7 @@ generic-y += rwsem.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
+generic-y += set_memory.h
generic-y += shmbuf.h
generic-y += simd.h
generic-y += sizes.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index c1976c0adca7..59cca1d6ec54 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -23,9 +23,9 @@
#define ACPI_MADT_GICC_LENGTH \
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
-#define BAD_MADT_GICC_ENTRY(entry, end) \
- (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
- (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end) \
+ (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+ (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
@@ -85,6 +85,8 @@ static inline bool acpi_has_cpu_in_madt(void)
return true;
}
+struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
+
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index f37e3a21f6e7..1a98bc8602a2 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -20,69 +20,14 @@
#include <asm/sysreg.h>
-#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
-#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
-#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
-#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
-#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
-#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
-#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
-#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
-#define ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
-
-#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
-
-/*
- * System register definitions
- */
-#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
-#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
-#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
-#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
-#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
-#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
-#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
-
-#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
-#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
-
-#define ICH_LR0_EL2 __LR0_EL2(0)
-#define ICH_LR1_EL2 __LR0_EL2(1)
-#define ICH_LR2_EL2 __LR0_EL2(2)
-#define ICH_LR3_EL2 __LR0_EL2(3)
-#define ICH_LR4_EL2 __LR0_EL2(4)
-#define ICH_LR5_EL2 __LR0_EL2(5)
-#define ICH_LR6_EL2 __LR0_EL2(6)
-#define ICH_LR7_EL2 __LR0_EL2(7)
-#define ICH_LR8_EL2 __LR8_EL2(0)
-#define ICH_LR9_EL2 __LR8_EL2(1)
-#define ICH_LR10_EL2 __LR8_EL2(2)
-#define ICH_LR11_EL2 __LR8_EL2(3)
-#define ICH_LR12_EL2 __LR8_EL2(4)
-#define ICH_LR13_EL2 __LR8_EL2(5)
-#define ICH_LR14_EL2 __LR8_EL2(6)
-#define ICH_LR15_EL2 __LR8_EL2(7)
-
-#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
-#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
-#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
-#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
-#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
-
-#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
-#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
-#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
-#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
-#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
-
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
-#define read_gicreg read_sysreg_s
-#define write_gicreg write_sysreg_s
+#define read_gicreg(r) read_sysreg_s(SYS_ ## r)
+#define write_gicreg(v, r) write_sysreg_s(v, SYS_ ## r)
/*
* Low-level accessors
@@ -93,13 +38,13 @@
static inline void gic_write_eoir(u32 irq)
{
- write_sysreg_s(irq, ICC_EOIR1_EL1);
+ write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
isb();
}
static inline void gic_write_dir(u32 irq)
{
- write_sysreg_s(irq, ICC_DIR_EL1);
+ write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
@@ -107,7 +52,7 @@ static inline u64 gic_read_iar_common(void)
{
u64 irqstat;
- irqstat = read_sysreg_s(ICC_IAR1_EL1);
+ irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
dsb(sy);
return irqstat;
}
@@ -124,7 +69,7 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
u64 irqstat;
nops(8);
- irqstat = read_sysreg_s(ICC_IAR1_EL1);
+ irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
nops(4);
mb();
@@ -133,40 +78,40 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
static inline void gic_write_pmr(u32 val)
{
- write_sysreg_s(val, ICC_PMR_EL1);
+ write_sysreg_s(val, SYS_ICC_PMR_EL1);
}
static inline void gic_write_ctlr(u32 val)
{
- write_sysreg_s(val, ICC_CTLR_EL1);
+ write_sysreg_s(val, SYS_ICC_CTLR_EL1);
isb();
}
static inline void gic_write_grpen1(u32 val)
{
- write_sysreg_s(val, ICC_GRPEN1_EL1);
+ write_sysreg_s(val, SYS_ICC_GRPEN1_EL1);
isb();
}
static inline void gic_write_sgi1r(u64 val)
{
- write_sysreg_s(val, ICC_SGI1R_EL1);
+ write_sysreg_s(val, SYS_ICC_SGI1R_EL1);
}
static inline u32 gic_read_sre(void)
{
- return read_sysreg_s(ICC_SRE_EL1);
+ return read_sysreg_s(SYS_ICC_SRE_EL1);
}
static inline void gic_write_sre(u32 val)
{
- write_sysreg_s(val, ICC_SRE_EL1);
+ write_sysreg_s(val, SYS_ICC_SRE_EL1);
isb();
}
static inline void gic_write_bpr1(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_BPR1_EL1) ", %0" : : "r" (val));
+ write_sysreg_s(val, SYS_ICC_BPR1_EL1);
}
#define gic_read_typer(c) readq_relaxed(c)
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index df411f3e083c..ecd9788cd298 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -62,4 +62,13 @@ alternative_if ARM64_ALT_PAN_NOT_UAO
alternative_else_nop_endif
.endm
+/*
+ * Remove the address tag from a virtual address, if present.
+ */
+ .macro clear_address_tag, dst, addr
+ tst \addr, #(1 << 55)
+ bic \dst, \addr, #(0xff << 56)
+ csel \dst, \dst, \addr, eq
+ .endm
+
#endif
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index f819fdcff1ac..f5a2d09afb38 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
- " mov %" #w "[oldval], %" #w "[old]\n" \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(unsigned long *)ptr) \
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 7457ce082b5f..99fa69c9c3cf 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -322,7 +322,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
- register long x0 asm ("w0") = i; \
+ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
@@ -394,7 +394,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
- register long x0 asm ("w0") = i; \
+ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 4e0497f581a0..0fe7e43b7fbc 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -42,25 +42,35 @@
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
-#define __smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u8 *)__u.__c) \
+ : "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u16 *)__u.__c) \
+ : "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u32 *)__u.__c) \
+ : "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u64 *)__u.__c) \
+ : "memory"); \
break; \
} \
} while (0)
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index a9be1072933c..366448eb0fb7 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -20,9 +20,6 @@
#include <asm/brk-imm.h>
-#ifdef CONFIG_GENERIC_BUG
-#define HAVE_ARCH_BUG
-
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
#define __BUGVERBOSE_LOCATION(file, line) \
@@ -36,28 +33,35 @@
#define _BUGVERBOSE_LOCATION(file, line)
#endif
-#define _BUG_FLAGS(flags) __BUG_FLAGS(flags)
+#ifdef CONFIG_GENERIC_BUG
-#define __BUG_FLAGS(flags) asm volatile ( \
+#define __BUG_ENTRY(flags) \
".pushsection __bug_table,\"a\"\n\t" \
".align 2\n\t" \
"0: .long 1f - 0b\n\t" \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
".short " #flags "\n\t" \
".popsection\n" \
- \
- "1: brk %[imm]" \
- :: [imm] "i" (BUG_BRK_IMM) \
-)
+ "1: "
+#else
+#define __BUG_ENTRY(flags) ""
+#endif
+
+#define __BUG_FLAGS(flags) \
+ asm volatile ( \
+ __BUG_ENTRY(flags) \
+ "brk %[imm]" :: [imm] "i" (BUG_BRK_IMM) \
+ );
-#define BUG() do { \
- _BUG_FLAGS(0); \
- unreachable(); \
+
+#define BUG() do { \
+ __BUG_FLAGS(0); \
+ unreachable(); \
} while (0)
-#define __WARN_FLAGS(flags) _BUG_FLAGS(BUGFLAG_WARNING|(flags))
+#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
-#endif /* ! CONFIG_GENERIC_BUG */
+#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5082b30bc2c0..ea9bb4e0e9bb 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -16,7 +16,18 @@
#ifndef __ASM_CACHE_H
#define __ASM_CACHE_H
-#include <asm/cachetype.h>
+#include <asm/cputype.h>
+
+#define CTR_L1IP_SHIFT 14
+#define CTR_L1IP_MASK 3
+#define CTR_CWG_SHIFT 24
+#define CTR_CWG_MASK 15
+
+#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+
+#define ICACHE_POLICY_VPIPT 0
+#define ICACHE_POLICY_VIPT 2
+#define ICACHE_POLICY_PIPT 3
#define L1_CACHE_SHIFT 7
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
@@ -32,6 +43,31 @@
#ifndef __ASSEMBLY__
+#include <linux/bitops.h>
+
+#define ICACHEF_ALIASING 0
+#define ICACHEF_VPIPT 1
+extern unsigned long __icache_flags;
+
+/*
+ * Whilst the D-side always behaves as PIPT on AArch64, aliasing is
+ * permitted in the I-cache.
+ */
+static inline int icache_is_aliasing(void)
+{
+ return test_bit(ICACHEF_ALIASING, &__icache_flags);
+}
+
+static inline int icache_is_vpipt(void)
+{
+ return test_bit(ICACHEF_VPIPT, &__icache_flags);
+}
+
+static inline u32 cache_type_cwg(void)
+{
+ return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
+}
+
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
static inline int cache_line_size(void)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 5a2a6ee65f65..d74a284abdc2 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -150,9 +150,6 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
-int set_memory_ro(unsigned long addr, int numpages);
-int set_memory_rw(unsigned long addr, int numpages);
-int set_memory_x(unsigned long addr, int numpages);
-int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_valid(unsigned long addr, unsigned long size, int enable);
#endif
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
deleted file mode 100644
index f5588692f1d4..000000000000
--- a/arch/arm64/include/asm/cachetype.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_CACHETYPE_H
-#define __ASM_CACHETYPE_H
-
-#include <asm/cputype.h>
-
-#define CTR_L1IP_SHIFT 14
-#define CTR_L1IP_MASK 3
-#define CTR_CWG_SHIFT 24
-#define CTR_CWG_MASK 15
-
-#define ICACHE_POLICY_RESERVED 0
-#define ICACHE_POLICY_AIVIVT 1
-#define ICACHE_POLICY_VIPT 2
-#define ICACHE_POLICY_PIPT 3
-
-#ifndef __ASSEMBLY__
-
-#include <linux/bitops.h>
-
-#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
-
-#define ICACHEF_ALIASING 0
-#define ICACHEF_AIVIVT 1
-
-extern unsigned long __icache_flags;
-
-/*
- * NumSets, bits[27:13] - (Number of sets in cache) - 1
- * Associativity, bits[12:3] - (Associativity of cache) - 1
- * LineSize, bits[2:0] - (Log2(Number of words in cache line)) - 2
- */
-#define CCSIDR_EL1_WRITE_THROUGH BIT(31)
-#define CCSIDR_EL1_WRITE_BACK BIT(30)
-#define CCSIDR_EL1_READ_ALLOCATE BIT(29)
-#define CCSIDR_EL1_WRITE_ALLOCATE BIT(28)
-#define CCSIDR_EL1_LINESIZE_MASK 0x7
-#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
-#define CCSIDR_EL1_ASSOCIATIVITY_SHIFT 3
-#define CCSIDR_EL1_ASSOCIATIVITY_MASK 0x3ff
-#define CCSIDR_EL1_ASSOCIATIVITY(x) \
- (((x) >> CCSIDR_EL1_ASSOCIATIVITY_SHIFT) & CCSIDR_EL1_ASSOCIATIVITY_MASK)
-#define CCSIDR_EL1_NUMSETS_SHIFT 13
-#define CCSIDR_EL1_NUMSETS_MASK 0x7fff
-#define CCSIDR_EL1_NUMSETS(x) \
- (((x) >> CCSIDR_EL1_NUMSETS_SHIFT) & CCSIDR_EL1_NUMSETS_MASK)
-
-#define CACHE_LINESIZE(x) (16 << CCSIDR_EL1_LINESIZE(x))
-#define CACHE_NUMSETS(x) (CCSIDR_EL1_NUMSETS(x) + 1)
-#define CACHE_ASSOCIATIVITY(x) (CCSIDR_EL1_ASSOCIATIVITY(x) + 1)
-
-extern u64 __attribute_const__ cache_get_ccsidr(u64 csselr);
-
-/* Helpers for Level 1 Instruction cache csselr = 1L */
-static inline int icache_get_linesize(void)
-{
- return CACHE_LINESIZE(cache_get_ccsidr(1L));
-}
-
-static inline int icache_get_numsets(void)
-{
- return CACHE_NUMSETS(cache_get_ccsidr(1L));
-}
-
-/*
- * Whilst the D-side always behaves as PIPT on AArch64, aliasing is
- * permitted in the I-cache.
- */
-static inline int icache_is_aliasing(void)
-{
- return test_bit(ICACHEF_ALIASING, &__icache_flags);
-}
-
-static inline int icache_is_aivivt(void)
-{
- return test_bit(ICACHEF_AIVIVT, &__icache_flags);
-}
-
-static inline u32 cache_type_cwg(void)
-{
- return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_CACHETYPE_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 91b26d26af8a..ae852add053d 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -46,7 +46,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \
" swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
__nops(3) \
" " #nop_lse) \
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
: "r" (x) \
: cl); \
\
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index f31c48d0cd68..428ee1f2468c 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+extern struct static_key_false arm64_const_caps_ready;
bool this_cpu_has_cap(unsigned int cap);
@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num)
}
/* System capability check for constant caps */
-static inline bool cpus_have_const_cap(int num)
+static inline bool __cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num)
return test_bit(num, cpu_hwcaps);
}
+static inline bool cpus_have_const_cap(int num)
+{
+ if (static_branch_likely(&arm64_const_caps_ready))
+ return __cpus_have_const_cap(num);
+ else
+ return cpus_have_cap(num);
+}
+
static inline void cpus_set_cap(unsigned int num)
{
if (num >= ARM64_NCAPS) {
@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num)
num, ARM64_NCAPS);
} else {
__set_bit(num, cpu_hwcaps);
- static_branch_enable(&cpu_hwcap_keys[num]);
}
}
@@ -226,7 +234,7 @@ void update_cpu_errata_workarounds(void);
void __init enable_errata_workarounds(void);
void verify_local_cpu_errata_workarounds(void);
-u64 read_system_reg(u32 id);
+u64 read_sanitised_ftr_reg(u32 id);
static inline bool cpu_supports_mixed_endian_el0(void)
{
@@ -240,7 +248,7 @@ static inline bool system_supports_32bit_el0(void)
static inline bool system_supports_mixed_endian_el0(void)
{
- return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
+ return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
}
static inline bool system_supports_fpsimd(void)
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 73d5bab015eb..5a5fa47a6b18 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -20,6 +20,9 @@ struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
+#ifdef CONFIG_XEN
+ const struct dma_map_ops *dev_dma_ops;
+#endif
bool dma_coherent;
};
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 505756cdc67a..5392dbeffa45 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -27,11 +27,8 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern const struct dma_map_ops dummy_dma_ops;
-static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- if (dev && dev->dma_ops)
- return dev->dma_ops;
-
/*
* We expect no ISA devices, and all other DMA masters are expected to
* have someone call arch_setup_dma_ops at device creation time.
@@ -39,14 +36,6 @@ static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
return &dummy_dma_ops;
}
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- if (xen_initial_domain())
- return xen_dma_ops;
- else
- return __generic_dma_ops(NULL);
-}
-
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent);
#define arch_setup_dma_ops arch_setup_dma_ops
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index ad42e79a5d4d..85997c0e5443 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -177,6 +177,10 @@
#define ESR_ELx_SYS64_ISS_SYS_CNTVCT (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \
ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 8740297dac77..1473fc2f7ab7 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index b6b167ac082b..41770766d964 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -149,7 +149,7 @@ static inline void ptrace_hw_copy_thread(struct task_struct *task)
/* Determine number of BRP registers available. */
static inline int get_num_brps(void)
{
- u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
+ u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 +
cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPS_SHIFT);
@@ -158,7 +158,7 @@ static inline int get_num_brps(void)
/* Determine number of WRP registers available. */
static inline int get_num_wrps(void)
{
- u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
+ u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 +
cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPS_SHIFT);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 0c00c87bb9dd..35b2e50f17fb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -173,6 +173,16 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define iounmap __iounmap
/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification disallows posted write configuration transactions.
+ * Add an arch specific pci_remap_cfgspace() definition that is implemented
+ * through nGnRnE device memory attribute as recommended by the ARM v8
+ * Architecture reference manual Issue A.k B2.8.2 "Device memory".
+ */
+#define pci_remap_cfgspace(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+
+/*
* io{read,write}{16,32,64}be() macros
*/
#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 04744dc5fb61..e17f0529a882 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -40,9 +40,59 @@
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
- /* Empty routine needed to avoid build errors. */
+ if (oldregs) {
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ } else {
+ u64 tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "stp x0, x1, [%2, #16 * 0]\n"
+ "stp x2, x3, [%2, #16 * 1]\n"
+ "stp x4, x5, [%2, #16 * 2]\n"
+ "stp x6, x7, [%2, #16 * 3]\n"
+ "stp x8, x9, [%2, #16 * 4]\n"
+ "stp x10, x11, [%2, #16 * 5]\n"
+ "stp x12, x13, [%2, #16 * 6]\n"
+ "stp x14, x15, [%2, #16 * 7]\n"
+ "stp x16, x17, [%2, #16 * 8]\n"
+ "stp x18, x19, [%2, #16 * 9]\n"
+ "stp x20, x21, [%2, #16 * 10]\n"
+ "stp x22, x23, [%2, #16 * 11]\n"
+ "stp x24, x25, [%2, #16 * 12]\n"
+ "stp x26, x27, [%2, #16 * 13]\n"
+ "stp x28, x29, [%2, #16 * 14]\n"
+ "mov %0, sp\n"
+ "stp x30, %0, [%2, #16 * 15]\n"
+
+ "/* faked current PSTATE */\n"
+ "mrs %0, CurrentEL\n"
+ "mrs %1, SPSEL\n"
+ "orr %0, %0, %1\n"
+ "mrs %1, DAIF\n"
+ "orr %0, %0, %1\n"
+ "mrs %1, NZCV\n"
+ "orr %0, %0, %1\n"
+ /* pc */
+ "adr %1, 1f\n"
+ "1:\n"
+ "stp %1, %0, [%2, #16 * 16]\n"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (newregs)
+ : "memory"
+ );
+ }
}
+#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_HIBERNATION)
+extern bool crash_is_nosave(unsigned long pfn);
+extern void crash_prepare_suspend(void);
+extern void crash_post_resume(void);
+#else
+static inline bool crash_is_nosave(unsigned long pfn) {return false; }
+static inline void crash_prepare_suspend(void) {}
+static inline void crash_post_resume(void) {}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ec3553eb9349..26a64d0f9ab9 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -28,7 +28,7 @@
#define ARM_EXCEPTION_EL1_SERROR 1
#define ARM_EXCEPTION_TRAP 2
/* The hyp-stub will return this for any kvm_call_hyp() call */
-#define ARM_EXCEPTION_HYP_GONE 3
+#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
@@ -47,7 +47,6 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
-extern char __kvm_hyp_reset[];
extern char __kvm_hyp_vector[];
@@ -59,6 +58,8 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern u64 __vgic_v3_read_vmcr(void);
+extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f5ea0ba70f07..fe39e6841326 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
+static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+{
+ u32 esr = kvm_vcpu_get_hsr(vcpu);
+ return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+}
+
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e7705e7bb07b..1f252a95bc02 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <asm/cpufeature.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -31,7 +32,6 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#define KVM_USER_MEM_SLOTS 512
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HALT_POLL_NS_DEFAULT 500000
#include <kvm/arm_vgic.h>
@@ -42,7 +42,7 @@
#define KVM_VCPU_MAX_FEATURES 4
-#define KVM_REQ_VCPU_EXIT 8
+#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -356,19 +356,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long vector_ptr)
{
/*
- * Call initialization code, and switch to the full blown
- * HYP code.
+ * Call initialization code, and switch to the full blown HYP code.
+ * If the cpucaps haven't been finalized yet, something has gone very
+ * wrong, and hyp will crash and burn when it uses any
+ * cpus_have_const_cap() wrapper.
*/
+ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
}
-void __kvm_hyp_teardown(void);
-static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
- phys_addr_t phys_idmap_start)
-{
- kvm_call_hyp(__kvm_hyp_teardown, phys_idmap_start);
-}
-
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index ed1246014901..a89cc22abadc 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -108,7 +108,7 @@ alternative_else_nop_endif
#else
#include <asm/pgalloc.h>
-#include <asm/cachetype.h>
+#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
@@ -155,7 +155,6 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
-phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
@@ -242,12 +241,13 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_flush_dcache_to_poc(va, size);
- if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range((unsigned long)va,
- (unsigned long)va + size);
- } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
+ if (icache_is_aliasing()) {
/* any kind of VIPT cache */
__flush_icache_all();
+ } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
+ /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
+ flush_icache_range((unsigned long)va,
+ (unsigned long)va + size);
}
}
@@ -307,7 +307,7 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
static inline unsigned int kvm_get_vmid_bits(void)
{
- int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
+ int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 47619411f0ff..5468c834b072 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -37,5 +37,6 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+extern void mark_linear_text_alias_ro(void);
#endif
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 06ff7fd9e81f..d57693f5d4ec 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -17,26 +17,26 @@
#define __ASM_MODULE_H
#include <asm-generic/module.h>
-#include <asm/memory.h>
#define MODULE_ARCH_VERMAGIC "aarch64"
#ifdef CONFIG_ARM64_MODULE_PLTS
-struct mod_arch_specific {
+struct mod_plt_sec {
struct elf64_shdr *plt;
int plt_num_entries;
int plt_max_entries;
};
+
+struct mod_arch_specific {
+ struct mod_plt_sec core;
+ struct mod_plt_sec init;
+};
#endif
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym);
#ifdef CONFIG_RANDOMIZE_BASE
-#ifdef CONFIG_MODVERSIONS
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
-#endif
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index b9a7ba9ca44c..1fc19744ffe9 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -22,6 +22,8 @@
*/
#define PCI_DMA_BUS_IS_PHYS (0)
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0eef6064bf3b..c213fdbd056c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -74,6 +74,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
+#define pte_cont_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
+ (__boundary - 1 < (end) - 1) ? __boundary : (end); \
+})
+
+#define pmd_cont_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
+ (__boundary - 1 < (end) - 1) ? __boundary : (end); \
+})
+
#ifdef CONFIG_ARM64_HW_AFDBM
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
#else
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index c97b8bd2acba..9428b93fefb2 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -50,6 +50,7 @@ extern phys_addr_t arm64_dma_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
struct debug_info {
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
/* Have we suspended stepping by a debugger? */
int suspended_step;
/* Allow breakpoints and watchpoints to be disabled for this thread. */
@@ -58,6 +59,7 @@ struct debug_info {
/* Hardware breakpoints pinned to this task. */
struct perf_event *hbp_break[ARM_MAX_BRP];
struct perf_event *hbp_watch[ARM_MAX_WRP];
+#endif
};
struct cpu_context {
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 4e7e7067afdb..941267caa39c 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -24,6 +24,8 @@ extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[];
extern char __idmap_text_start[], __idmap_text_end[];
+extern char __initdata_begin[], __initdata_end[];
+extern char __inittext_begin[], __inittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index d050d720a1b4..55f08c5acfad 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -148,6 +148,9 @@ static inline void cpu_panic_kernel(void)
*/
bool cpus_are_stuck_in_kernel(void);
+extern void smp_send_crash_stop(void);
+extern bool smp_crash_stop_failed(void);
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index ac24b6e798b1..15c142ce991c 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -48,6 +48,8 @@
((crn) << CRn_shift) | ((crm) << CRm_shift) | \
((op2) << Op2_shift))
+#define sys_insn sys_reg
+
#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask)
#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask)
#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask)
@@ -81,6 +83,41 @@
#endif /* CONFIG_BROKEN_GAS_INST */
+#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
+#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+
+#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
+ (!!x)<<8 | 0x1f)
+#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
+ (!!x)<<8 | 0x1f)
+
+#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
+#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
+#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
+
+#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
+#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
+#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
+#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2)
+#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2)
+#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4)
+#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5)
+#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6)
+#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7)
+#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0)
+#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4)
+#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4)
+#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4)
+#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4)
+#define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6)
+#define SYS_DBGCLAIMCLR_EL1 sys_reg(2, 0, 7, 9, 6)
+#define SYS_DBGAUTHSTATUS_EL1 sys_reg(2, 0, 7, 14, 6)
+#define SYS_MDCCSR_EL0 sys_reg(2, 3, 0, 1, 0)
+#define SYS_DBGDTR_EL0 sys_reg(2, 3, 0, 4, 0)
+#define SYS_DBGDTRRX_EL0 sys_reg(2, 3, 0, 5, 0)
+#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
+#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
+
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -88,6 +125,7 @@
#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
+#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3)
#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
@@ -118,17 +156,127 @@
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
-#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
+#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
+#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
+
+#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
+#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
+#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
+
+#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
+
+#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
+#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1)
+#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0)
+#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
+#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
+
+#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
+#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
+
+#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
+#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
+
+#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
+
+#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
+#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
+#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
+#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
+#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
+#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
+#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+
+#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
+#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
+
+#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
+
+#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
+#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
+
+#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
+
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
-#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
-#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
+#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
+#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
+#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
+#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
+#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5)
+#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
+#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
+#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
+#define SYS_PMXEVTYPER_EL0 sys_reg(3, 3, 9, 13, 1)
+#define SYS_PMXEVCNTR_EL0 sys_reg(3, 3, 9, 13, 2)
+#define SYS_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0)
+#define SYS_PMOVSSET_EL0 sys_reg(3, 3, 9, 14, 3)
+
+#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
+#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
-#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
- (!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
- (!!x)<<8 | 0x1f)
+#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+
+#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0)
+#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
+#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+
+#define __PMEV_op2(n) ((n) & 0x7)
+#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
+#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
+#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
+#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
+
+#define SYS_PMCCFILTR_EL0 sys_reg (3, 3, 14, 15, 7)
+
+#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
+#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
+#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+
+#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
+#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0)
+#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1)
+#define SYS_ICH_AP0R2_EL2 __SYS__AP0Rx_EL2(2)
+#define SYS_ICH_AP0R3_EL2 __SYS__AP0Rx_EL2(3)
+
+#define __SYS__AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
+#define SYS_ICH_AP1R0_EL2 __SYS__AP1Rx_EL2(0)
+#define SYS_ICH_AP1R1_EL2 __SYS__AP1Rx_EL2(1)
+#define SYS_ICH_AP1R2_EL2 __SYS__AP1Rx_EL2(2)
+#define SYS_ICH_AP1R3_EL2 __SYS__AP1Rx_EL2(3)
+
+#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
+#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
+#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
+#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
+#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
+#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
+#define SYS_ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
+#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
+
+#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
+#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0)
+#define SYS_ICH_LR1_EL2 __SYS__LR0_EL2(1)
+#define SYS_ICH_LR2_EL2 __SYS__LR0_EL2(2)
+#define SYS_ICH_LR3_EL2 __SYS__LR0_EL2(3)
+#define SYS_ICH_LR4_EL2 __SYS__LR0_EL2(4)
+#define SYS_ICH_LR5_EL2 __SYS__LR0_EL2(5)
+#define SYS_ICH_LR6_EL2 __SYS__LR0_EL2(6)
+#define SYS_ICH_LR7_EL2 __SYS__LR0_EL2(7)
+
+#define __SYS__LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
+#define SYS_ICH_LR8_EL2 __SYS__LR8_EL2(0)
+#define SYS_ICH_LR9_EL2 __SYS__LR8_EL2(1)
+#define SYS_ICH_LR10_EL2 __SYS__LR8_EL2(2)
+#define SYS_ICH_LR11_EL2 __SYS__LR8_EL2(3)
+#define SYS_ICH_LR12_EL2 __SYS__LR8_EL2(4)
+#define SYS_ICH_LR13_EL2 __SYS__LR8_EL2(5)
+#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6)
+#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_EE (1 << 25)
@@ -156,6 +304,11 @@
#define ID_AA64ISAR0_SHA1_SHIFT 8
#define ID_AA64ISAR0_AES_SHIFT 4
+/* id_aa64isar1 */
+#define ID_AA64ISAR1_LRCPC_SHIFT 20
+#define ID_AA64ISAR1_FCMA_SHIFT 16
+#define ID_AA64ISAR1_JSCVT_SHIFT 12
+
/* id_aa64pfr0 */
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index ba497172610d..7b8a04789cef 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -69,20 +69,21 @@ static inline void set_fs(mm_segment_t fs)
*/
#define __range_ok(addr, size) \
({ \
+ unsigned long __addr = (unsigned long __force)(addr); \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
- : "1" (addr), "Ir" (size), \
+ : "1" (__addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \
: "cc"); \
flag; \
})
/*
- * When dealing with data aborts or instruction traps we may end up with
- * a tagged userland pointer. Clear the tag to get a sane pointer to pass
- * on to access_ok(), for instance.
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
@@ -230,7 +231,7 @@ do { \
(err), ARM64_HAS_UAO); \
break; \
case 8: \
- __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
+ __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
(err), ARM64_HAS_UAO); \
break; \
default: \
@@ -297,7 +298,7 @@ do { \
(err), ARM64_HAS_UAO); \
break; \
case 8: \
- __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
+ __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
(err), ARM64_HAS_UAO); \
break; \
default: \
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439f6b5d31f6..c5f89442785c 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -19,25 +19,38 @@
#define __ASM__VIRT_H
/*
- * The arm64 hcall implementation uses x0 to specify the hcall type. A value
- * less than 0xfff indicates a special hcall, such as get/set vector.
- * Any other value is used as a pointer to the function to call.
+ * The arm64 hcall implementation uses x0 to specify the hcall
+ * number. A value less than HVC_STUB_HCALL_NR indicates a special
+ * hcall, such as set vector. Any other value is handled in a
+ * hypervisor specific way.
+ *
+ * The hypercall is allowed to clobber any of the caller-saved
+ * registers (x0-x18), so it is advisable to use it through the
+ * indirection of a function call (as implemented in hyp-stub.S).
*/
-/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */
-#define HVC_GET_VECTORS 0
-
/*
* HVC_SET_VECTORS - Set the value of the vbar_el2 register.
*
* @x1: Physical address of the new vector table.
*/
-#define HVC_SET_VECTORS 1
+#define HVC_SET_VECTORS 0
/*
* HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
*/
-#define HVC_SOFT_RESTART 2
+#define HVC_SOFT_RESTART 1
+
+/*
+ * HVC_RESET_VECTORS - Restore the vectors to the original HYP stubs
+ */
+#define HVC_RESET_VECTORS 2
+
+/* Max number of HYP stub hypercalls */
+#define HVC_STUB_HCALL_NR 3
+
+/* Error returned when an invalid stub number is passed into x0 */
+#define HVC_STUB_ERR 0xbadca11
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)
@@ -61,7 +74,7 @@
extern u32 __boot_cpu_mode[2];
void __hyp_set_vectors(phys_addr_t phys_vector_base);
-phys_addr_t __hyp_get_vectors(void);
+void __hyp_reset_vectors(void);
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 825b0fe51c2b..13a97aa2285f 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -2,21 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += fcntl.h
-header-y += hwcap.h
-header-y += kvm_para.h
-header-y += perf_regs.h
-header-y += param.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += statfs.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 61c263cba272..4e187ce2a811 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -32,5 +32,8 @@
#define HWCAP_ASIMDHP (1 << 10)
#define HWCAP_CPUID (1 << 11)
#define HWCAP_ASIMDRDM (1 << 12)
+#define HWCAP_JSCVT (1 << 13)
+#define HWCAP_FCMA (1 << 14)
+#define HWCAP_LRCPC (1 << 15)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index c2860358ae3e..70eea2ecc663 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -39,6 +39,8 @@
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -143,6 +145,8 @@ struct kvm_debug_exit_arch {
#define KVM_GUESTDBG_USE_HW (1 << 17)
struct kvm_sync_regs {
+ /* Used with KVM_CAP_ARM_USER_IRQ */
+ __u64 device_irq_level;
};
struct kvm_arch_memory_slot {
@@ -212,13 +216,17 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
-#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
+#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
+#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0