summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig7
-rw-r--r--arch/arm64/Kconfig.debug11
-rw-r--r--arch/arm64/Makefile1
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi21
-rw-r--r--arch/arm64/include/asm/cacheflush.h4
-rw-r--r--arch/arm64/include/asm/cachetype.h20
-rw-r--r--arch/arm64/include/asm/cpu_ops.h7
-rw-r--r--arch/arm64/include/asm/cpuidle.h13
-rw-r--r--arch/arm64/include/asm/debug-monitors.h30
-rw-r--r--arch/arm64/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/include/asm/insn.h249
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/kgdb.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h13
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h24
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h18
-rw-r--r--arch/arm64/include/asm/percpu.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h33
-rw-r--r--arch/arm64/include/asm/proc-fns.h2
-rw-r--r--arch/arm64/include/asm/suspend.h1
-rw-r--r--arch/arm64/include/asm/thread_info.h9
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/cpuidle.c31
-rw-r--r--arch/arm64/kernel/cpuinfo.c28
-rw-r--r--arch/arm64/kernel/efi-stub.c16
-rw-r--r--arch/arm64/kernel/entry.S1
-rw-r--r--arch/arm64/kernel/ftrace.c10
-rw-r--r--arch/arm64/kernel/head.S6
-rw-r--r--arch/arm64/kernel/insn.c671
-rw-r--r--arch/arm64/kernel/irq.c12
-rw-r--r--arch/arm64/kernel/kgdb.c4
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/process.c48
-rw-r--r--arch/arm64/kernel/psci.c104
-rw-r--r--arch/arm64/kernel/return_address.c3
-rw-r--r--arch/arm64/kernel/setup.c11
-rw-r--r--arch/arm64/kernel/sleep.S47
-rw-r--r--arch/arm64/kernel/smp_spin_table.c22
-rw-r--r--arch/arm64/kernel/stacktrace.c3
-rw-r--r--arch/arm64/kernel/suspend.c48
-rw-r--r--arch/arm64/kernel/sys_compat.c6
-rw-r--r--arch/arm64/kernel/traps.c3
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp-init.S4
-rw-r--r--arch/arm64/kvm/sys_regs.c2
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/dma-mapping.c33
-rw-r--r--arch/arm64/mm/init.c13
-rw-r--r--arch/arm64/mm/mmap.c2
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/arm64/mm/pageattr.c97
-rw-r--r--arch/arm64/mm/proc.S15
-rw-r--r--arch/arm64/net/Makefile4
-rw-r--r--arch/arm64/net/bpf_jit.h169
-rw-r--r--arch/arm64/net/bpf_jit_comp.c679
59 files changed, 2374 insertions, 216 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fd4e81a4e1ce..f0d3a2d85a5b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -35,6 +35,7 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
+ select HAVE_BPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_BUGVERBOSE
@@ -252,11 +253,11 @@ config SCHED_SMT
places. If unsure say N here.
config NR_CPUS
- int "Maximum number of CPUs (2-32)"
- range 2 32
+ int "Maximum number of CPUs (2-64)"
+ range 2 64
depends on SMP
# These have to remain sorted largest to smallest
- default "8"
+ default "64"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 4ee8e90b7a45..0a12933e50ed 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -43,4 +43,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
of TEXT_OFFSET and platforms must not require a specific
value.
+config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+ depends on MODULES
+ help
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+ of module data. Such protection may interfere with run-time code
+ patching and dynamic kernel tracing - and they might also protect
+ against certain classes of kernel exploits.
+ If in doubt, say "N".
+
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2df5e5daeebe..59c86b6b3052 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -47,6 +47,7 @@ endif
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
+core-$(CONFIG_NET) += arch/arm64/net/
core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index c0aceef7f5b3..f391972ad135 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -269,6 +269,19 @@
enable-mask = <0x2>;
clock-output-names = "rtcclk";
};
+
+ rngpkaclk: rngpkaclk@17000000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x17000000 0x0 0x2000>;
+ reg-names = "csr-reg";
+ csr-offset = <0xc>;
+ csr-mask = <0x10>;
+ enable-offset = <0x10>;
+ enable-mask = <0x10>;
+ clock-output-names = "rngpkaclk";
+ };
};
serial0: serial@1c020000 {
@@ -421,5 +434,13 @@
};
};
+
+ rng: rng@10520000 {
+ compatible = "apm,xgene-rng";
+ reg = <0x0 0x10520000 0x0 0x100>;
+ interrupts = <0x0 0x41 0x4>;
+ clocks = <&rngpkaclk 0>;
+ };
+
};
};
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index f2defe1c380c..689b6379188c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
#endif
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
index 7a2e0762cb40..4c631a0a3609 100644
--- a/arch/arm64/include/asm/cachetype.h
+++ b/arch/arm64/include/asm/cachetype.h
@@ -39,6 +39,26 @@
extern unsigned long __icache_flags;
+#define CCSIDR_EL1_LINESIZE_MASK 0x7
+#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
+
+#define CCSIDR_EL1_NUMSETS_SHIFT 13
+#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT)
+#define CCSIDR_EL1_NUMSETS(x) \
+ (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT)
+
+extern u64 __attribute_const__ icache_get_ccsidr(void);
+
+static inline int icache_get_linesize(void)
+{
+ return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr());
+}
+
+static inline int icache_get_numsets(void)
+{
+ return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr());
+}
+
/*
* Whilst the D-side always behaves as PIPT on AArch64, aliasing is
* permitted in the I-cache.
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index d7b4b38a8e86..6f8e2ef9094a 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -28,6 +28,8 @@ struct device_node;
* enable-method property.
* @cpu_init: Reads any data necessary for a specific enable-method from the
* devicetree, for a given cpu node and proposed logical id.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
+ * devicetree, for a given cpu node and proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
@@ -47,6 +49,7 @@ struct device_node;
struct cpu_operations {
const char *name;
int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
@@ -61,7 +64,7 @@ struct cpu_operations {
};
extern const struct cpu_operations *cpu_ops[NR_CPUS];
-extern int __init cpu_read_ops(struct device_node *dn, int cpu);
-extern void __init cpu_read_bootcpu_ops(void);
+int __init cpu_read_ops(struct device_node *dn, int cpu);
+void __init cpu_read_bootcpu_ops(void);
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
new file mode 100644
index 000000000000..b52a9932e2b1
--- /dev/null
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_CPUIDLE_H
+#define __ASM_CPUIDLE_H
+
+#ifdef CONFIG_CPU_IDLE
+extern int cpu_init_idle(unsigned int cpu);
+#else
+static inline int cpu_init_idle(unsigned int cpu)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 7fb343779498..40ec68aa6870 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -48,11 +48,13 @@
/*
* #imm16 values used for BRK instruction generation
* Allowed values for kgbd are 0x400 - 0x7ff
+ * 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
*/
-#define KGDB_DYN_DGB_BRK_IMM 0x400
-#define KDBG_COMPILED_DBG_BRK_IMM 0x401
+#define FAULT_BRK_IMM 0x100
+#define KGDB_DYN_DBG_BRK_IMM 0x400
+#define KGDB_COMPILED_DBG_BRK_IMM 0x401
/*
* BRK instruction encoding
@@ -61,24 +63,30 @@
#define AARCH64_BREAK_MON 0xd4200000
/*
+ * BRK instruction for provoking a fault on purpose
+ * Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
+ */
+#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
+
+/*
* Extract byte from BRK instruction
*/
-#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
+#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
/*
* Extract byte from BRK #imm16
*/
-#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
- (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
+#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
+ (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
-#define KGDB_DYN_DGB_BRK_BYTE(x) \
- (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
+#define KGDB_DYN_DBG_BRK_BYTE(x) \
+ (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
-#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
-#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
-#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
-#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
+#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0)
+#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1)
+#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2)
+#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3)
#define CACHE_FLUSH_IS_SAFE 1
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index dc82e52acdb3..adeae3f6f0fc 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -52,6 +52,13 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
+static inline int set_arch_dma_coherent_ops(struct device *dev)
+{
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+ return 0;
+}
+#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
+
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index dc1f73b13e74..56a9e63b6c33 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -64,12 +66,155 @@ enum aarch64_insn_imm_type {
AARCH64_INSN_IMM_14,
AARCH64_INSN_IMM_12,
AARCH64_INSN_IMM_9,
+ AARCH64_INSN_IMM_7,
+ AARCH64_INSN_IMM_6,
+ AARCH64_INSN_IMM_S,
+ AARCH64_INSN_IMM_R,
AARCH64_INSN_IMM_MAX
};
+enum aarch64_insn_register_type {
+ AARCH64_INSN_REGTYPE_RT,
+ AARCH64_INSN_REGTYPE_RN,
+ AARCH64_INSN_REGTYPE_RT2,
+ AARCH64_INSN_REGTYPE_RM,
+ AARCH64_INSN_REGTYPE_RD,
+ AARCH64_INSN_REGTYPE_RA,
+};
+
+enum aarch64_insn_register {
+ AARCH64_INSN_REG_0 = 0,
+ AARCH64_INSN_REG_1 = 1,
+ AARCH64_INSN_REG_2 = 2,
+ AARCH64_INSN_REG_3 = 3,
+ AARCH64_INSN_REG_4 = 4,
+ AARCH64_INSN_REG_5 = 5,
+ AARCH64_INSN_REG_6 = 6,
+ AARCH64_INSN_REG_7 = 7,
+ AARCH64_INSN_REG_8 = 8,
+ AARCH64_INSN_REG_9 = 9,
+ AARCH64_INSN_REG_10 = 10,
+ AARCH64_INSN_REG_11 = 11,
+ AARCH64_INSN_REG_12 = 12,
+ AARCH64_INSN_REG_13 = 13,
+ AARCH64_INSN_REG_14 = 14,
+ AARCH64_INSN_REG_15 = 15,
+ AARCH64_INSN_REG_16 = 16,
+ AARCH64_INSN_REG_17 = 17,
+ AARCH64_INSN_REG_18 = 18,
+ AARCH64_INSN_REG_19 = 19,
+ AARCH64_INSN_REG_20 = 20,
+ AARCH64_INSN_REG_21 = 21,
+ AARCH64_INSN_REG_22 = 22,
+ AARCH64_INSN_REG_23 = 23,
+ AARCH64_INSN_REG_24 = 24,
+ AARCH64_INSN_REG_25 = 25,
+ AARCH64_INSN_REG_26 = 26,
+ AARCH64_INSN_REG_27 = 27,
+ AARCH64_INSN_REG_28 = 28,
+ AARCH64_INSN_REG_29 = 29,
+ AARCH64_INSN_REG_FP = 29, /* Frame pointer */
+ AARCH64_INSN_REG_30 = 30,
+ AARCH64_INSN_REG_LR = 30, /* Link register */
+ AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
+ AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */
+};
+
+enum aarch64_insn_variant {
+ AARCH64_INSN_VARIANT_32BIT,
+ AARCH64_INSN_VARIANT_64BIT
+};
+
+enum aarch64_insn_condition {
+ AARCH64_INSN_COND_EQ = 0x0, /* == */
+ AARCH64_INSN_COND_NE = 0x1, /* != */
+ AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */
+ AARCH64_INSN_COND_CC = 0x3, /* unsigned < */
+ AARCH64_INSN_COND_MI = 0x4, /* < 0 */
+ AARCH64_INSN_COND_PL = 0x5, /* >= 0 */
+ AARCH64_INSN_COND_VS = 0x6, /* overflow */
+ AARCH64_INSN_COND_VC = 0x7, /* no overflow */
+ AARCH64_INSN_COND_HI = 0x8, /* unsigned > */
+ AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */
+ AARCH64_INSN_COND_GE = 0xa, /* signed >= */
+ AARCH64_INSN_COND_LT = 0xb, /* signed < */
+ AARCH64_INSN_COND_GT = 0xc, /* signed > */
+ AARCH64_INSN_COND_LE = 0xd, /* signed <= */
+ AARCH64_INSN_COND_AL = 0xe, /* always */
+};
+
enum aarch64_insn_branch_type {
AARCH64_INSN_BRANCH_NOLINK,
AARCH64_INSN_BRANCH_LINK,
+ AARCH64_INSN_BRANCH_RETURN,
+ AARCH64_INSN_BRANCH_COMP_ZERO,
+ AARCH64_INSN_BRANCH_COMP_NONZERO,
+};
+
+enum aarch64_insn_size_type {
+ AARCH64_INSN_SIZE_8,
+ AARCH64_INSN_SIZE_16,
+ AARCH64_INSN_SIZE_32,
+ AARCH64_INSN_SIZE_64,
+};
+
+enum aarch64_insn_ldst_type {
+ AARCH64_INSN_LDST_LOAD_REG_OFFSET,
+ AARCH64_INSN_LDST_STORE_REG_OFFSET,
+ AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX,
+ AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
+ AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
+ AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
+};
+
+enum aarch64_insn_adsb_type {
+ AARCH64_INSN_ADSB_ADD,
+ AARCH64_INSN_ADSB_SUB,
+ AARCH64_INSN_ADSB_ADD_SETFLAGS,
+ AARCH64_INSN_ADSB_SUB_SETFLAGS
+};
+
+enum aarch64_insn_movewide_type {
+ AARCH64_INSN_MOVEWIDE_ZERO,
+ AARCH64_INSN_MOVEWIDE_KEEP,
+ AARCH64_INSN_MOVEWIDE_INVERSE
+};
+
+enum aarch64_insn_bitfield_type {
+ AARCH64_INSN_BITFIELD_MOVE,
+ AARCH64_INSN_BITFIELD_MOVE_UNSIGNED,
+ AARCH64_INSN_BITFIELD_MOVE_SIGNED
+};
+
+enum aarch64_insn_data1_type {
+ AARCH64_INSN_DATA1_REVERSE_16,
+ AARCH64_INSN_DATA1_REVERSE_32,
+ AARCH64_INSN_DATA1_REVERSE_64,
+};
+
+enum aarch64_insn_data2_type {
+ AARCH64_INSN_DATA2_UDIV,
+ AARCH64_INSN_DATA2_SDIV,
+ AARCH64_INSN_DATA2_LSLV,
+ AARCH64_INSN_DATA2_LSRV,
+ AARCH64_INSN_DATA2_ASRV,
+ AARCH64_INSN_DATA2_RORV,
+};
+
+enum aarch64_insn_data3_type {
+ AARCH64_INSN_DATA3_MADD,
+ AARCH64_INSN_DATA3_MSUB,
+};
+
+enum aarch64_insn_logic_type {
+ AARCH64_INSN_LOGIC_AND,
+ AARCH64_INSN_LOGIC_BIC,
+ AARCH64_INSN_LOGIC_ORR,
+ AARCH64_INSN_LOGIC_ORN,
+ AARCH64_INSN_LOGIC_EOR,
+ AARCH64_INSN_LOGIC_EON,
+ AARCH64_INSN_LOGIC_AND_SETFLAGS,
+ AARCH64_INSN_LOGIC_BIC_SETFLAGS
};
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
@@ -78,13 +223,58 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
{ return (val); }
+__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
+__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
+__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
+__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
+__AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000)
+__AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000)
+__AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000)
+__AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000)
+__AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000)
+__AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000)
+__AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000)
+__AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000)
+__AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000)
+__AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000)
+__AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000)
+__AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000)
+__AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000)
+__AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000)
+__AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000)
+__AARCH64_INSN_FUNCS(madd, 0x7FE08000, 0x1B000000)
+__AARCH64_INSN_FUNCS(msub, 0x7FE08000, 0x1B008000)
+__AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800)
+__AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00)
+__AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000)
+__AARCH64_INSN_FUNCS(lsrv, 0x7FE0FC00, 0x1AC02400)
+__AARCH64_INSN_FUNCS(asrv, 0x7FE0FC00, 0x1AC02800)
+__AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00)
+__AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400)
+__AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800)
+__AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00)
+__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000)
+__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000)
+__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000)
+__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000)
+__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000)
+__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000)
+__AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000)
+__AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000)
__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000)
+__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000)
+__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000)
__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
+__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
+__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
+__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
#undef __AARCH64_INSN_FUNCS
@@ -97,8 +287,67 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_condition cond);
u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
u32 aarch64_insn_gen_nop(void);
+u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_register offset,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_register base,
+ int offset,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int imm, enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type);
+u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int immr, int imms,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_bitfield_type type);
+u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
+ int imm, int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_movewide_type type);
+u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type);
+u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data1_type type);
+u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data2_type type);
+u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data3_type type);
+u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_logic_type type);
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index e0ecdcf6632d..f771e8bcad4a 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -243,7 +243,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
* (PHYS_OFFSET and PHYS_MASK taken into account).
*/
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-extern int valid_phys_addr_range(unsigned long addr, size_t size);
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
index 3c8aafc1082f..f69f69c8120c 100644
--- a/arch/arm64/include/asm/kgdb.h
+++ b/arch/arm64/include/asm/kgdb.h
@@ -29,7 +29,7 @@
static inline void arch_kgdb_breakpoint(void)
{
- asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
+ asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM));
}
extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index cc83520459ed..7fd3e27e3ccc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -122,6 +122,17 @@
#define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24
+/*
+ * We configure the Stage-2 page tables to always restrict the IPA space to be
+ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
+ * not known to exist and will break with this configuration.
+ *
+ * Note that when using 4K pages, we concatenate two first level page tables
+ * together.
+ *
+ * The magic numbers used for VTTBR_X in this patch can be found in Tables
+ * D4-23 and D4-25 in ARM DDI 0487A.b.
+ */
#ifdef CONFIG_ARM64_64K_PAGES
/*
* Stage2 translation configuration:
@@ -149,7 +160,7 @@
#endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (48LLU)
#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fdc3e21abd8d..5674a55b5518 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -174,6 +174,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
+ return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+{
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e10c45a578e3..2012c4ba8d67 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -22,6 +22,8 @@
#ifndef __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__
+#include <linux/types.h>
+#include <linux/kvm_types.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -41,8 +43,7 @@
#define KVM_VCPU_MAX_FEATURES 3
-struct kvm_vcpu;
-int kvm_target_cpu(void);
+int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
@@ -164,25 +165,23 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-struct kvm_one_reg;
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-struct kvm;
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
+ unsigned long end)
{
return 0;
}
@@ -192,8 +191,13 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
return 0;
}
+static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
+ unsigned long address)
+{
+}
+
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...);
@@ -244,4 +248,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
}
}
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 8e138c7c53ac..a030d163840b 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -59,10 +59,9 @@
#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
/*
- * Align KVM with the kernel's view of physical memory. Should be
- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
+ * We currently only support a 40bit IPA.
*/
-#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
+#define KVM_PHYS_SHIFT (40)
#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
@@ -93,19 +92,6 @@ void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
-static inline bool kvm_is_write_fault(unsigned long esr)
-{
- unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
-
- if (esr_ec == ESR_EL2_EC_IABT)
- return false;
-
- if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
- return false;
-
- return true;
-}
-
static inline void kvm_clean_pgd(pgd_t *pgd) {}
static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
static inline void kvm_clean_pte(pte_t *pte) {}
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 453a179469a3..5279e5733386 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
- register unsigned long *sp asm ("sp");
/*
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
+ asm("mrs %0, tpidr_el1" : "=r" (off) :
+ "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index ffe1ba0506d1..d58e40cde88e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -149,46 +149,51 @@ extern struct page *empty_zero_page;
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-static inline pte_t pte_wrprotect(pte_t pte)
+static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
- pte_val(pte) &= ~PTE_WRITE;
+ pte_val(pte) &= ~pgprot_val(prot);
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
{
- pte_val(pte) |= PTE_WRITE;
+ pte_val(pte) |= pgprot_val(prot);
return pte;
}
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return clear_pte_bit(pte, __pgprot(PTE_WRITE));
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return set_pte_bit(pte, __pgprot(PTE_WRITE));
+}
+
static inline pte_t pte_mkclean(pte_t pte)
{
- pte_val(pte) &= ~PTE_DIRTY;
- return pte;
+ return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
}
static inline pte_t pte_mkdirty(pte_t pte)
{
- pte_val(pte) |= PTE_DIRTY;
- return pte;
+ return set_pte_bit(pte, __pgprot(PTE_DIRTY));
}
static inline pte_t pte_mkold(pte_t pte)
{
- pte_val(pte) &= ~PTE_AF;
- return pte;
+ return clear_pte_bit(pte, __pgprot(PTE_AF));
}
static inline pte_t pte_mkyoung(pte_t pte)
{
- pte_val(pte) |= PTE_AF;
- return pte;
+ return set_pte_bit(pte, __pgprot(PTE_AF));
}
static inline pte_t pte_mkspecial(pte_t pte)
{
- pte_val(pte) |= PTE_SPECIAL;
- return pte;
+ return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
}
static inline void set_pte(pte_t *ptep, pte_t pte)
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 0c657bb54597..9a8fd84f8fb2 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -32,6 +32,8 @@ extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+void cpu_soft_restart(phys_addr_t cpu_reset,
+ unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index e9c149c042e0..456d67c1f0fa 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -21,6 +21,7 @@ struct sleep_save_sp {
phys_addr_t save_ptr_stash_phys;
};
+extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
extern int cpu_suspend(unsigned long);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 45108d802f5e..459bf8e53208 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -69,14 +69,19 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+/*
* how to get the thread information struct from C
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
static inline struct thread_info *current_thread_info(void)
{
- register unsigned long sp asm ("sp");
- return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+ return (struct thread_info *)
+ (current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index e633ff8cdec8..8e38878c87c6 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -37,6 +37,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -159,6 +160,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index df7ef8768fc2..6e9538c2d28a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -26,6 +26,7 @@ arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
new file mode 100644
index 000000000000..19d17f51db37
--- /dev/null
+++ b/arch/arm64/kernel/cpuidle.c
@@ -0,0 +1,31 @@
+/*
+ * ARM64 CPU idle arch support
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/cpuidle.h>
+#include <asm/cpu_ops.h>
+
+int cpu_init_idle(unsigned int cpu)
+{
+ int ret = -EOPNOTSUPP;
+ struct device_node *cpu_node = of_cpu_device_node_get(cpu);
+
+ if (!cpu_node)
+ return -ENODEV;
+
+ if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
+ ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+ return ret;
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 177169623026..504fdaa8367e 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -20,8 +20,10 @@
#include <asm/cputype.h>
#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/preempt.h>
#include <linux/printk.h>
#include <linux/smp.h>
@@ -47,8 +49,18 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
unsigned int cpu = smp_processor_id();
u32 l1ip = CTR_L1IP(info->reg_ctr);
- if (l1ip != ICACHE_POLICY_PIPT)
- set_bit(ICACHEF_ALIASING, &__icache_flags);
+ if (l1ip != ICACHE_POLICY_PIPT) {
+ /*
+ * VIPT caches are non-aliasing if the VA always equals the PA
+ * in all bit positions that are covered by the index. This is
+ * the case if the size of a way (# of sets * line size) does
+ * not exceed PAGE_SIZE.
+ */
+ u32 waysize = icache_get_numsets() * icache_get_linesize();
+
+ if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
+ set_bit(ICACHEF_ALIASING, &__icache_flags);
+ }
if (l1ip == ICACHE_POLICY_AIVIVT)
set_bit(ICACHEF_AIVIVT, &__icache_flags);
@@ -190,3 +202,15 @@ void __init cpuinfo_store_boot_cpu(void)
boot_cpu_data = *info;
}
+
+u64 __attribute_const__ icache_get_ccsidr(void)
+{
+ u64 ccsidr;
+
+ WARN_ON(preemptible());
+
+ /* Select L1 I-cache and read its size ID register */
+ asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1"
+ : "=r"(ccsidr) : "r"(1L));
+ return ccsidr;
+}
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 1317fef8dde9..d27dd982ff26 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -28,20 +28,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
kernel_size = _edata - _text;
if (*image_addr != (dram_base + TEXT_OFFSET)) {
kernel_memsize = kernel_size + (_end - _edata);
- status = efi_relocate_kernel(sys_table, image_addr,
- kernel_size, kernel_memsize,
- dram_base + TEXT_OFFSET,
- PAGE_SIZE);
+ status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
+ SZ_2M, reserve_addr);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to relocate kernel\n");
return status;
}
- if (*image_addr != (dram_base + TEXT_OFFSET)) {
- pr_efi_err(sys_table, "Failed to alloc kernel memory\n");
- efi_free(sys_table, kernel_memsize, *image_addr);
- return EFI_LOAD_ERROR;
- }
- *image_size = kernel_memsize;
+ memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
+ kernel_size);
+ *image_addr = *reserve_addr + TEXT_OFFSET;
+ *reserve_size = kernel_memsize + TEXT_OFFSET;
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f0b5e5120a87..726b910fe6ec 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -324,7 +324,6 @@ el1_dbg:
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
- enable_dbg
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 7924d73b6476..cf8556ae09d0 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -58,7 +58,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
u32 new;
pc = (unsigned long)&ftrace_call;
- new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
+ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
+ AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, 0, new, false);
}
@@ -72,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
u32 old, new;
old = aarch64_insn_gen_nop();
- new = aarch64_insn_gen_branch_imm(pc, addr, true);
+ new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, old, new, true);
}
@@ -86,7 +87,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
u32 old, new;
- old = aarch64_insn_gen_branch_imm(pc, addr, true);
+ old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true);
@@ -154,7 +155,8 @@ static int ftrace_modify_graph_caller(bool enable)
u32 branch, nop;
branch = aarch64_insn_gen_branch_imm(pc,
- (unsigned long)ftrace_graph_caller, false);
+ (unsigned long)ftrace_graph_caller,
+ AARCH64_INSN_BRANCH_LINK);
nop = aarch64_insn_gen_nop();
if (enable)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 873069056229..0a6e4f924df8 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -151,7 +151,7 @@ optional_header:
.short 0x20b // PE32+ format
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
- .long _edata - stext // SizeOfCode
+ .long _end - stext // SizeOfCode
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
.long efi_stub_entry - efi_head // AddressOfEntryPoint
@@ -169,7 +169,7 @@ extra_header_fields:
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
- .long _edata - efi_head // SizeOfImage
+ .long _end - efi_head // SizeOfImage
// Everything before the kernel image is considered part of the header
.long stext - efi_head // SizeOfHeaders
@@ -216,7 +216,7 @@ section_table:
.byte 0
.byte 0
.byte 0 // end of 0 padding of section name
- .long _edata - stext // VirtualSize
+ .long _end - stext // VirtualSize
.long stext - efi_head // VirtualAddress
.long _edata - stext // SizeOfRawData
.long stext - efi_head // PointerToRawData
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 92f36835486b..e007714ded04 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -2,6 +2,8 @@
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -20,9 +22,14 @@
#include <linux/smp.h>
#include <linux/stop_machine.h>
#include <linux/uaccess.h>
+
#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
#include <asm/insn.h>
+#define AARCH64_INSN_SF_BIT BIT(31)
+#define AARCH64_INSN_N_BIT BIT(22)
+
static int aarch64_insn_encoding_class[] = {
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
@@ -251,6 +258,19 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
mask = BIT(9) - 1;
shift = 12;
break;
+ case AARCH64_INSN_IMM_7:
+ mask = BIT(7) - 1;
+ shift = 15;
+ break;
+ case AARCH64_INSN_IMM_6:
+ case AARCH64_INSN_IMM_S:
+ mask = BIT(6) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_R:
+ mask = BIT(6) - 1;
+ shift = 16;
+ break;
default:
pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
type);
@@ -264,10 +284,76 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
return insn;
}
-u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
- enum aarch64_insn_branch_type type)
+static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
+ u32 insn,
+ enum aarch64_insn_register reg)
+{
+ int shift;
+
+ if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
+ pr_err("%s: unknown register encoding %d\n", __func__, reg);
+ return 0;
+ }
+
+ switch (type) {
+ case AARCH64_INSN_REGTYPE_RT:
+ case AARCH64_INSN_REGTYPE_RD:
+ shift = 0;
+ break;
+ case AARCH64_INSN_REGTYPE_RN:
+ shift = 5;
+ break;
+ case AARCH64_INSN_REGTYPE_RT2:
+ case AARCH64_INSN_REGTYPE_RA:
+ shift = 10;
+ break;
+ case AARCH64_INSN_REGTYPE_RM:
+ shift = 16;
+ break;
+ default:
+ pr_err("%s: unknown register type encoding %d\n", __func__,
+ type);
+ return 0;
+ }
+
+ insn &= ~(GENMASK(4, 0) << shift);
+ insn |= reg << shift;
+
+ return insn;
+}
+
+static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
+ u32 insn)
+{
+ u32 size;
+
+ switch (type) {
+ case AARCH64_INSN_SIZE_8:
+ size = 0;
+ break;
+ case AARCH64_INSN_SIZE_16:
+ size = 1;
+ break;
+ case AARCH64_INSN_SIZE_32:
+ size = 2;
+ break;
+ case AARCH64_INSN_SIZE_64:
+ size = 3;
+ break;
+ default:
+ pr_err("%s: unknown size encoding %d\n", __func__, type);
+ return 0;
+ }
+
+ insn &= ~GENMASK(31, 30);
+ insn |= size << 30;
+
+ return insn;
+}
+
+static inline long branch_imm_common(unsigned long pc, unsigned long addr,
+ long range)
{
- u32 insn;
long offset;
/*
@@ -276,23 +362,97 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
*/
BUG_ON((pc & 0x3) || (addr & 0x3));
+ offset = ((long)addr - (long)pc);
+ BUG_ON(offset < -range || offset >= range);
+
+ return offset;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
/*
* B/BL support [-128M, 128M) offset
* ARM64 virtual address arrangement guarantees all kernel and module
* texts are within +/-128M.
*/
- offset = ((long)addr - (long)pc);
- BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+ offset = branch_imm_common(pc, addr, SZ_128M);
- if (type == AARCH64_INSN_BRANCH_LINK)
+ switch (type) {
+ case AARCH64_INSN_BRANCH_LINK:
insn = aarch64_insn_get_bl_value();
- else
+ break;
+ case AARCH64_INSN_BRANCH_NOLINK:
insn = aarch64_insn_get_b_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
offset >> 2);
}
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ offset = branch_imm_common(pc, addr, SZ_1M);
+
+ switch (type) {
+ case AARCH64_INSN_BRANCH_COMP_ZERO:
+ insn = aarch64_insn_get_cbz_value();
+ break;
+ case AARCH64_INSN_BRANCH_COMP_NONZERO:
+ insn = aarch64_insn_get_cbnz_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+}
+
+u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_condition cond)
+{
+ u32 insn;
+ long offset;
+
+ offset = branch_imm_common(pc, addr, SZ_1M);
+
+ insn = aarch64_insn_get_bcond_value();
+
+ BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
+ insn |= cond;
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+}
+
u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
{
return aarch64_insn_get_hint_value() | op;
@@ -302,3 +462,500 @@ u32 __kprobes aarch64_insn_gen_nop(void)
{
return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
}
+
+u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_BRANCH_NOLINK:
+ insn = aarch64_insn_get_br_value();
+ break;
+ case AARCH64_INSN_BRANCH_LINK:
+ insn = aarch64_insn_get_blr_value();
+ break;
+ case AARCH64_INSN_BRANCH_RETURN:
+ insn = aarch64_insn_get_ret_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
+}
+
+u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_register offset,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
+ insn = aarch64_insn_get_ldr_reg_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_REG_OFFSET:
+ insn = aarch64_insn_get_str_reg_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
+ offset);
+}
+
+u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_register base,
+ int offset,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_ldst_type type)
+{
+ u32 insn;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
+ insn = aarch64_insn_get_ldp_pre_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
+ insn = aarch64_insn_get_stp_pre_value();
+ break;
+ case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
+ insn = aarch64_insn_get_ldp_post_value();
+ break;
+ case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
+ insn = aarch64_insn_get_stp_post_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ /* offset must be multiples of 4 in the range [-256, 252] */
+ BUG_ON(offset & 0x3);
+ BUG_ON(offset < -256 || offset > 252);
+ shift = 2;
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ /* offset must be multiples of 8 in the range [-512, 504] */
+ BUG_ON(offset & 0x7);
+ BUG_ON(offset < -512 || offset > 504);
+ shift = 3;
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ reg1);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
+ reg2);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ base);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
+ offset >> shift);
+}
+
+u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int imm, enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_ADSB_ADD:
+ insn = aarch64_insn_get_add_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB:
+ insn = aarch64_insn_get_sub_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_ADD_SETFLAGS:
+ insn = aarch64_insn_get_adds_imm_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB_SETFLAGS:
+ insn = aarch64_insn_get_subs_imm_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ BUG_ON(imm & ~(SZ_4K - 1));
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
+}
+
+u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ int immr, int imms,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_bitfield_type type)
+{
+ u32 insn;
+ u32 mask;
+
+ switch (type) {
+ case AARCH64_INSN_BITFIELD_MOVE:
+ insn = aarch64_insn_get_bfm_value();
+ break;
+ case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
+ insn = aarch64_insn_get_ubfm_value();
+ break;
+ case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
+ insn = aarch64_insn_get_sbfm_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ mask = GENMASK(4, 0);
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
+ mask = GENMASK(5, 0);
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ BUG_ON(immr & ~mask);
+ BUG_ON(imms & ~mask);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
+}
+
+u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
+ int imm, int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_movewide_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_MOVEWIDE_ZERO:
+ insn = aarch64_insn_get_movz_value();
+ break;
+ case AARCH64_INSN_MOVEWIDE_KEEP:
+ insn = aarch64_insn_get_movk_value();
+ break;
+ case AARCH64_INSN_MOVEWIDE_INVERSE:
+ insn = aarch64_insn_get_movn_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ BUG_ON(imm & ~(SZ_64K - 1));
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ BUG_ON(shift != 0 && shift != 16);
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
+ shift != 48);
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn |= (shift >> 4) << 21;
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
+}
+
+u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_adsb_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_ADSB_ADD:
+ insn = aarch64_insn_get_add_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB:
+ insn = aarch64_insn_get_sub_value();
+ break;
+ case AARCH64_INSN_ADSB_ADD_SETFLAGS:
+ insn = aarch64_insn_get_adds_value();
+ break;
+ case AARCH64_INSN_ADSB_SUB_SETFLAGS:
+ insn = aarch64_insn_get_subs_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ BUG_ON(shift & ~(SZ_32 - 1));
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ BUG_ON(shift & ~(SZ_64 - 1));
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
+}
+
+u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data1_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA1_REVERSE_16:
+ insn = aarch64_insn_get_rev16_value();
+ break;
+ case AARCH64_INSN_DATA1_REVERSE_32:
+ insn = aarch64_insn_get_rev32_value();
+ break;
+ case AARCH64_INSN_DATA1_REVERSE_64:
+ BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
+ insn = aarch64_insn_get_rev64_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+}
+
+u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data2_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA2_UDIV:
+ insn = aarch64_insn_get_udiv_value();
+ break;
+ case AARCH64_INSN_DATA2_SDIV:
+ insn = aarch64_insn_get_sdiv_value();
+ break;
+ case AARCH64_INSN_DATA2_LSLV:
+ insn = aarch64_insn_get_lslv_value();
+ break;
+ case AARCH64_INSN_DATA2_LSRV:
+ insn = aarch64_insn_get_lsrv_value();
+ break;
+ case AARCH64_INSN_DATA2_ASRV:
+ insn = aarch64_insn_get_asrv_value();
+ break;
+ case AARCH64_INSN_DATA2_RORV:
+ insn = aarch64_insn_get_rorv_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+}
+
+u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg1,
+ enum aarch64_insn_register reg2,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_data3_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_DATA3_MADD:
+ insn = aarch64_insn_get_madd_value();
+ break;
+ case AARCH64_INSN_DATA3_MSUB:
+ insn = aarch64_insn_get_msub_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ reg1);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
+ reg2);
+}
+
+u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
+ enum aarch64_insn_register src,
+ enum aarch64_insn_register reg,
+ int shift,
+ enum aarch64_insn_variant variant,
+ enum aarch64_insn_logic_type type)
+{
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_LOGIC_AND:
+ insn = aarch64_insn_get_and_value();
+ break;
+ case AARCH64_INSN_LOGIC_BIC:
+ insn = aarch64_insn_get_bic_value();
+ break;
+ case AARCH64_INSN_LOGIC_ORR:
+ insn = aarch64_insn_get_orr_value();
+ break;
+ case AARCH64_INSN_LOGIC_ORN:
+ insn = aarch64_insn_get_orn_value();
+ break;
+ case AARCH64_INSN_LOGIC_EOR:
+ insn = aarch64_insn_get_eor_value();
+ break;
+ case AARCH64_INSN_LOGIC_EON:
+ insn = aarch64_insn_get_eon_value();
+ break;
+ case AARCH64_INSN_LOGIC_AND_SETFLAGS:
+ insn = aarch64_insn_get_ands_value();
+ break;
+ case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
+ insn = aarch64_insn_get_bics_value();
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ switch (variant) {
+ case AARCH64_INSN_VARIANT_32BIT:
+ BUG_ON(shift & ~(SZ_32 - 1));
+ break;
+ case AARCH64_INSN_VARIANT_64BIT:
+ insn |= AARCH64_INSN_SF_BIT;
+ BUG_ON(shift & ~(SZ_64 - 1));
+ break;
+ default:
+ BUG_ON(1);
+ return AARCH64_BREAK_FAULT;
+ }
+
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
+}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 0f08dfd69ebc..dfa6e3e74fdd 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
ret = true;
+ }
- /*
- * when using forced irq_set_affinity we must ensure that the cpu
- * being offlined is not present in the affinity mask, it may be
- * selected as the target CPU otherwise
- */
- affinity = cpu_online_mask;
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 75c9cf1aafee..a0d10c55f307 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -235,13 +235,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
- .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
+ .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
.fn = kgdb_brk_fn
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.esr_mask = 0xffffffff,
- .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
+ .esr_val = DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
};
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index baf5afb7e6a0..aa29ecb4f800 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1276,7 +1276,7 @@ arch_initcall(cpu_pmu_reset);
/*
* PMU platform driver and devicetree bindings.
*/
-static struct of_device_id armpmu_of_device_ids[] = {
+static const struct of_device_id armpmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3"},
{},
};
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 1309d64aa926..89f41f7d27dd 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -57,36 +57,10 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
-static void setup_restart(void)
-{
- /*
- * Tell the mm system that we are going to reboot -
- * we may need it to insert some 1:1 mappings so that
- * soft boot works.
- */
- setup_mm_for_reboot();
-
- /* Clean and invalidate caches */
- flush_cache_all();
-
- /* Turn D-cache off */
- cpu_cache_off();
-
- /* Push out any further dirty data, and ensure cache is empty */
- flush_cache_all();
-}
-
void soft_restart(unsigned long addr)
{
- typedef void (*phys_reset_t)(unsigned long);
- phys_reset_t phys_reset;
-
- setup_restart();
-
- /* Switch to the identity mapping */
- phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
- phys_reset(addr);
-
+ setup_mm_for_reboot();
+ cpu_soft_restart(virt_to_phys(cpu_reset), addr);
/* Should never get here */
BUG();
}
@@ -230,9 +204,27 @@ void exit_thread(void)
{
}
+static void tls_thread_flush(void)
+{
+ asm ("msr tpidr_el0, xzr");
+
+ if (is_compat_task()) {
+ current->thread.tp_value = 0;
+
+ /*
+ * We need to ensure ordering between the shadow state and the
+ * hardware state, so that we don't corrupt the hardware state
+ * with a stale shadow state during context switch.
+ */
+ barrier();
+ asm ("msr tpidrro_el0, xzr");
+ }
+}
+
void flush_thread(void)
{
fpsimd_flush_thread();
+ tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 553954771a67..866c1c821860 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -21,6 +21,7 @@
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <uapi/linux/psci.h>
#include <asm/compiler.h>
@@ -28,6 +29,7 @@
#include <asm/errno.h>
#include <asm/psci.h>
#include <asm/smp_plat.h>
+#include <asm/suspend.h>
#include <asm/system_misc.h>
#define PSCI_POWER_STATE_TYPE_STANDBY 0
@@ -65,6 +67,8 @@ enum psci_function {
PSCI_FN_MAX,
};
+static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
+
static u32 psci_function_id[PSCI_FN_MAX];
static int psci_to_linux_errno(int errno)
@@ -93,6 +97,18 @@ static u32 psci_power_state_pack(struct psci_power_state state)
& PSCI_0_2_POWER_STATE_AFFL_MASK);
}
+static void psci_power_state_unpack(u32 power_state,
+ struct psci_power_state *state)
+{
+ state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
+ PSCI_0_2_POWER_STATE_ID_SHIFT;
+ state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
+ PSCI_0_2_POWER_STATE_TYPE_SHIFT;
+ state->affinity_level =
+ (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
+ PSCI_0_2_POWER_STATE_AFFL_SHIFT;
+}
+
/*
* The following two functions are invoked via the invoke_psci_fn pointer
* and will not be inlined, allowing us to piggyback on the AAPCS.
@@ -199,6 +215,63 @@ static int psci_migrate_info_type(void)
return err;
}
+static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
+ unsigned int cpu)
+{
+ int i, ret, count = 0;
+ struct psci_power_state *psci_states;
+ struct device_node *state_node;
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ /* Count idle states */
+ while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+ count))) {
+ count++;
+ of_node_put(state_node);
+ }
+
+ if (!count)
+ return -ENODEV;
+
+ psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ if (!psci_states)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ u32 psci_power_state;
+
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+ ret = of_property_read_u32(state_node,
+ "arm,psci-suspend-param",
+ &psci_power_state);
+ if (ret) {
+ pr_warn(" * %s missing arm,psci-suspend-param property\n",
+ state_node->full_name);
+ of_node_put(state_node);
+ goto free_mem;
+ }
+
+ of_node_put(state_node);
+ pr_debug("psci-power-state %#x index %d\n", psci_power_state,
+ i);
+ psci_power_state_unpack(psci_power_state, &psci_states[i]);
+ }
+ /* Idle states parsed correctly, initialize per-cpu pointer */
+ per_cpu(psci_power_state, cpu) = psci_states;
+ return 0;
+
+free_mem:
+ kfree(psci_states);
+ return ret;
+}
+
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
@@ -436,8 +509,39 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
#endif
#endif
+static int psci_suspend_finisher(unsigned long index)
+{
+ struct psci_power_state *state = __get_cpu_var(psci_power_state);
+
+ return psci_ops.cpu_suspend(state[index - 1],
+ virt_to_phys(cpu_resume));
+}
+
+static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
+{
+ int ret;
+ struct psci_power_state *state = __get_cpu_var(psci_power_state);
+ /*
+ * idle state index 0 corresponds to wfi, should never be called
+ * from the cpu_suspend operations
+ */
+ if (WARN_ON_ONCE(!index))
+ return -EINVAL;
+
+ if (state->type == PSCI_POWER_STATE_TYPE_STANDBY)
+ ret = psci_ops.cpu_suspend(state[index - 1], 0);
+ else
+ ret = __cpu_suspend(index, psci_suspend_finisher);
+
+ return ret;
+}
+
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
+#ifdef CONFIG_CPU_IDLE
+ .cpu_init_idle = cpu_psci_cpu_init_idle,
+ .cpu_suspend = cpu_psci_cpu_suspend,
+#endif
#ifdef CONFIG_SMP
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 89102a6ffad5..6c4fd2810ecb 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -36,13 +36,12 @@ void *return_address(unsigned int level)
{
struct return_address_data data;
struct stackframe frame;
- register unsigned long current_sp asm ("sp");
data.level = level + 2;
data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
walk_stackframe(&frame, save_return_addr, &data);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index edb146d01857..2437196cc5d4 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -365,11 +365,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
- /*
- * Unmask asynchronous aborts early to catch possible system errors.
- */
- local_async_enable();
-
setup_processor();
setup_machine_fdt(__fdt_pointer);
@@ -385,6 +380,12 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
+ /*
+ * Unmask asynchronous aborts after bringing up possible earlycon.
+ * (Report possible System Errors once we can report this occurred)
+ */
+ local_async_enable();
+
efi_init();
arm64_memblock_init();
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index b1925729c692..a564b440416a 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -49,28 +49,39 @@
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
.endm
/*
- * Save CPU state for a suspend. This saves callee registers, and allocates
- * space on the kernel stack to save the CPU specific registers + some
- * other data for resume.
+ * Save CPU state for a suspend and execute the suspend finisher.
+ * On success it will return 0 through cpu_resume - ie through a CPU
+ * soft/hard reboot from the reset vector.
+ * On failure it returns the suspend finisher return value or force
+ * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
+ * is not allowed to return, if it does this must be considered failure).
+ * It saves callee registers, and allocates space on the kernel stack
+ * to save the CPU specific registers + some other data for resume.
*
* x0 = suspend finisher argument
+ * x1 = suspend finisher function pointer
*/
-ENTRY(__cpu_suspend)
+ENTRY(__cpu_suspend_enter)
stp x29, lr, [sp, #-96]!
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
+ /*
+ * Stash suspend finisher and its argument in x20 and x19
+ */
+ mov x19, x0
+ mov x20, x1
mov x2, sp
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
- mov x1, sp
+ mov x0, sp
/*
- * x1 now points to struct cpu_suspend_ctx allocated on the stack
+ * x0 now points to struct cpu_suspend_ctx allocated on the stack
*/
- str x2, [x1, #CPU_CTX_SP]
- ldr x2, =sleep_save_sp
- ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
+ str x2, [x0, #CPU_CTX_SP]
+ ldr x1, =sleep_save_sp
+ ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
#ifdef CONFIG_SMP
mrs x7, mpidr_el1
ldr x9, =mpidr_hash
@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend)
ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
- add x2, x2, x8, lsl #3
+ add x1, x1, x8, lsl #3
#endif
- bl __cpu_suspend_finisher
+ bl __cpu_suspend_save
+ /*
+ * Grab suspend finisher in x20 and its argument in x19
+ */
+ mov x0, x19
+ mov x1, x20
+ /*
+ * We are ready for power down, fire off the suspend finisher
+ * in x1, with argument in x0
+ */
+ blr x1
/*
- * Never gets here, unless suspend fails.
+ * Never gets here, unless suspend finisher fails.
* Successful cpu_suspend should return from cpu_resume, returning
* through this code path is considered an error
* If the return value is set to 0 force x0 = -EOPNOTSUPP
@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend)
ldp x27, x28, [sp, #80]
ldp x29, lr, [sp], #96
ret
-ENDPROC(__cpu_suspend)
+ENDPROC(__cpu_suspend_enter)
.ltorg
/*
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 0347d38eea29..4f93c67e63de 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
+#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
@@ -65,12 +66,21 @@ static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
- void **release_addr;
+ __le64 __iomem *release_addr;
if (!cpu_release_addr[cpu])
return -ENODEV;
- release_addr = __va(cpu_release_addr[cpu]);
+ /*
+ * The cpu-release-addr may or may not be inside the linear mapping.
+ * As ioremap_cache will either give us a new mapping or reuse the
+ * existing linear mapping, we can use it to cover both cases. In
+ * either case the memory will be MT_NORMAL.
+ */
+ release_addr = ioremap_cache(cpu_release_addr[cpu],
+ sizeof(*release_addr));
+ if (!release_addr)
+ return -ENOMEM;
/*
* We write the release address as LE regardless of the native
@@ -79,15 +89,17 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
- release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
-
- __flush_dcache_area(release_addr, sizeof(release_addr[0]));
+ writeq_relaxed(__pa(secondary_holding_pen), release_addr);
+ __flush_dcache_area((__force void *)release_addr,
+ sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
*/
sev();
+ iounmap(release_addr);
+
return 0;
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 55437ba1f5a4..407991bf79f5 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -111,10 +111,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
- register unsigned long current_sp asm("sp");
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 55a99b9a97e0..13ad4dbb1615 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -9,22 +9,19 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
-extern int __cpu_suspend(unsigned long);
+extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
/*
- * This is called by __cpu_suspend() to save the state, and do whatever
+ * This is called by __cpu_suspend_enter() to save the state, and do whatever
* flushing is required to ensure that when the CPU goes to sleep we have
* the necessary data available when the caches are not searched.
*
- * @arg: Argument to pass to suspend operations
- * @ptr: CPU context virtual address
- * @save_ptr: address of the location where the context physical address
- * must be saved
+ * ptr: CPU context virtual address
+ * save_ptr: address of the location where the context physical address
+ * must be saved
*/
-int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
- phys_addr_t *save_ptr)
+void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
+ phys_addr_t *save_ptr)
{
- int cpu = smp_processor_id();
-
*save_ptr = virt_to_phys(ptr);
cpu_do_suspend(ptr);
@@ -35,8 +32,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
*/
__flush_dcache_area(ptr, sizeof(*ptr));
__flush_dcache_area(save_ptr, sizeof(*save_ptr));
-
- return cpu_ops[cpu]->cpu_suspend(arg);
}
/*
@@ -56,15 +51,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
}
/**
- * cpu_suspend
+ * cpu_suspend() - function to enter a low-power state
+ * @arg: argument to pass to CPU suspend operations
*
- * @arg: argument to pass to the finisher function
+ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
+ * operations back-end error code otherwise.
*/
int cpu_suspend(unsigned long arg)
{
- struct mm_struct *mm = current->active_mm;
- int ret, cpu = smp_processor_id();
- unsigned long flags;
+ int cpu = smp_processor_id();
/*
* If cpu_ops have not been registered or suspend
@@ -72,6 +67,21 @@ int cpu_suspend(unsigned long arg)
*/
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
return -EOPNOTSUPP;
+ return cpu_ops[cpu]->cpu_suspend(arg);
+}
+
+/*
+ * __cpu_suspend
+ *
+ * arg: argument to pass to the finisher function
+ * fn: finisher function pointer
+ *
+ */
+int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+{
+ struct mm_struct *mm = current->active_mm;
+ int ret;
+ unsigned long flags;
/*
* From this point debug exceptions are disabled to prevent
@@ -86,7 +96,7 @@ int cpu_suspend(unsigned long arg)
* page tables, so that the thread address space is properly
* set-up on function return.
*/
- ret = __cpu_suspend(arg);
+ ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
@@ -95,7 +105,7 @@ int cpu_suspend(unsigned long arg)
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
*/
- set_my_cpu_offset(per_cpu_offset(cpu));
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
/*
* Restore HW breakpoint registers to sane values
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index de2b0226e06d..dc47e53e9e28 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0];
+
+ /*
+ * Protect against register corruption from context switch.
+ * See comment in tls_thread_flush.
+ */
+ barrier();
asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
return 0;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 02cd3f023e9a..de1b085e7963 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -132,7 +132,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- const register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -145,7 +144,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = regs->pc;
} else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_sp;
+ frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
} else {
/*
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 97f0c0429dfa..edf8715ba39b 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -97,9 +97,9 @@ SECTIONS
PERCPU_SECTION(64)
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
- . = ALIGN(PAGE_SIZE);
_data = .;
_sdata = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 8d1ec2887a26..76794692c20b 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -174,7 +174,7 @@ static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
if (ret != 0)
- return ret;
+ return -EFAULT;
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index e28be510380c..34b8bd0711e9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -66,6 +66,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
else
kvm_vcpu_block(vcpu);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
return 1;
}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index d968796f4b2d..c3191168a994 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -80,6 +80,10 @@ __do_hyp_init:
msr mair_el2, x4
isb
+ /* Invalidate the stale TLBs from Bootloader */
+ tlbi alle2
+ dsb sy
+
mrs x4, sctlr_el2
and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
ldr x5, =SCTLR_EL2_FLAGS
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 5805e7c4a4dd..4cc3b719208e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1218,7 +1218,7 @@ static bool is_valid_cache(u32 val)
u32 level, ctype;
if (val >= CSSELR_MAX)
- return -ENOENT;
+ return false;
/* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
level = (val >> 1);
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 3ecb56c624d3..c56179ed2c09 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,5 +1,5 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
- context.o proc.o
+ context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4164c5ace9f8..2c71077cacfd 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -22,11 +22,8 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
-#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
@@ -125,7 +122,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
no_map:
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
- *dma_handle = ~0;
+ *dma_handle = DMA_ERROR_CODE;
return NULL;
}
@@ -308,40 +305,12 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
-static int dma_bus_notifier(struct notifier_block *nb,
- unsigned long event, void *_dev)
-{
- struct device *dev = _dev;
-
- if (event != BUS_NOTIFY_ADD_DEVICE)
- return NOTIFY_DONE;
-
- if (of_property_read_bool(dev->of_node, "dma-coherent"))
- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block platform_bus_nb = {
- .notifier_call = dma_bus_notifier,
-};
-
-static struct notifier_block amba_bus_nb = {
- .notifier_call = dma_bus_notifier,
-};
-
extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
- /*
- * These must be registered before of_platform_populate().
- */
- bus_register_notifier(&platform_bus_type, &platform_bus_nb);
- bus_register_notifier(&amba_bustype, &amba_bus_nb);
-
dma_ops = &noncoherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 5472c2401876..494297c698ca 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -149,8 +149,7 @@ void __init arm64_memblock_init(void)
memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
- if (!efi_enabled(EFI_MEMMAP))
- early_init_fdt_scan_reserved_mem();
+ early_init_fdt_scan_reserved_mem();
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
@@ -256,7 +255,7 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
- max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
+ set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
#ifndef CONFIG_SPARSEMEM_VMEMMAP
free_unused_memmap();
@@ -334,8 +333,14 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd)
+ if (!keep_initrd) {
+ if (start == initrd_start)
+ start = round_down(start, PAGE_SIZE);
+ if (end == initrd_end)
+ end = round_up(end, PAGE_SIZE);
+
free_reserved_area((void *)start, (void *)end, 0, "initrd");
+ }
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 8ed6cb1a900f..1d73662f00ff 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
*/
-int valid_phys_addr_range(unsigned long addr, size_t size)
+int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
if (addr < PHYS_OFFSET)
return 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c55567283cde..6894ef3e6234 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -94,7 +94,7 @@ static int __init early_cachepolicy(char *p)
*/
asm volatile(
" mrs %0, mair_el1\n"
- " bfi %0, %1, #%2, #8\n"
+ " bfi %0, %1, %2, #8\n"
" msr mair_el1, %0\n"
" isb\n"
: "=&r" (tmp)
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
new file mode 100644
index 000000000000..bb0ea94c4ba1
--- /dev/null
+++ b/arch/arm64/mm/pageattr.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+struct page_change_data {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page_change_data *cdata = data;
+ pte_t pte = *ptep;
+
+ pte = clear_pte_bit(pte, cdata->clear_mask);
+ pte = set_pte_bit(pte, cdata->set_mask);
+
+ set_pte(ptep, pte);
+ return 0;
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+ pgprot_t set_mask, pgprot_t clear_mask)
+{
+ unsigned long start = addr;
+ unsigned long size = PAGE_SIZE*numpages;
+ unsigned long end = start + size;
+ int ret;
+ struct page_change_data data;
+
+ if (!IS_ALIGNED(addr, PAGE_SIZE)) {
+ start &= PAGE_MASK;
+ end = start + size;
+ WARN_ON_ONCE(1);
+ }
+
+ if (!is_module_address(start) || !is_module_address(end - 1))
+ return -EINVAL;
+
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, end);
+ return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_RDONLY),
+ __pgprot(PTE_WRITE));
+}
+EXPORT_SYMBOL_GPL(set_memory_ro);
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_WRITE),
+ __pgprot(PTE_RDONLY));
+}
+EXPORT_SYMBOL_GPL(set_memory_rw);
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(PTE_PXN),
+ __pgprot(0));
+}
+EXPORT_SYMBOL_GPL(set_memory_nx);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(0),
+ __pgprot(PTE_PXN));
+}
+EXPORT_SYMBOL_GPL(set_memory_x);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 7736779c9809..4e778b13291b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -76,6 +76,21 @@ ENTRY(cpu_reset)
ret x0
ENDPROC(cpu_reset)
+ENTRY(cpu_soft_restart)
+ /* Save address of cpu_reset() and reset address */
+ mov x19, x0
+ mov x20, x1
+
+ /* Turn D-cache off */
+ bl cpu_cache_off
+
+ /* Push out all dirty data, and ensure cache is empty */
+ bl flush_cache_all
+
+ mov x0, x20
+ ret x19
+ENDPROC(cpu_soft_restart)
+
/*
* cpu_do_idle()
*
diff --git a/arch/arm64/net/Makefile b/arch/arm64/net/Makefile
new file mode 100644
index 000000000000..da9763378284
--- /dev/null
+++ b/arch/arm64/net/Makefile
@@ -0,0 +1,4 @@
+#
+# ARM64 networking code
+#
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
new file mode 100644
index 000000000000..2134f7e6c288
--- /dev/null
+++ b/arch/arm64/net/bpf_jit.h
@@ -0,0 +1,169 @@
+/*
+ * BPF JIT compiler for ARM64
+ *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+#include <asm/insn.h>
+
+/* 5-bit Register Operand */
+#define A64_R(x) AARCH64_INSN_REG_##x
+#define A64_FP AARCH64_INSN_REG_FP
+#define A64_LR AARCH64_INSN_REG_LR
+#define A64_ZR AARCH64_INSN_REG_ZR
+#define A64_SP AARCH64_INSN_REG_SP
+
+#define A64_VARIANT(sf) \
+ ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
+
+/* Compare & branch (immediate) */
+#define A64_COMP_BRANCH(sf, Rt, offset, type) \
+ aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
+ AARCH64_INSN_BRANCH_COMP_##type)
+#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
+
+/* Conditional branch (immediate) */
+#define A64_COND_BRANCH(cond, offset) \
+ aarch64_insn_gen_cond_branch_imm(0, offset, cond)
+#define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */
+#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
+#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
+#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
+#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
+#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
+#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
+
+/* Unconditional branch (immediate) */
+#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
+ AARCH64_INSN_BRANCH_##type)
+#define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK)
+#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
+
+/* Unconditional branch (register) */
+#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
+#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
+
+/* Load/store register (register offset) */
+#define A64_LS_REG(Rt, Rn, Rm, size, type) \
+ aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
+ AARCH64_INSN_SIZE_##size, \
+ AARCH64_INSN_LDST_##type##_REG_OFFSET)
+#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
+#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
+#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
+#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
+#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
+#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
+#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
+#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
+
+/* Load/store register pair */
+#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
+ aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
+ AARCH64_INSN_VARIANT_64BIT, \
+ AARCH64_INSN_LDST_##ls##_PAIR_##type)
+/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
+#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
+/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
+#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
+
+/* Add/subtract (immediate) */
+#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
+ aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
+ A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
+/* Rd = Rn OP imm12 */
+#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
+#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
+/* Rd = Rn */
+#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
+
+/* Bitfield move */
+#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
+ aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
+ A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
+/* Signed, with sign replication to left and zeros to right */
+#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
+/* Unsigned, with zeros to left and right */
+#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
+
+/* Rd = Rn << shift */
+#define A64_LSL(sf, Rd, Rn, shift) ({ \
+ int sz = (sf) ? 64 : 32; \
+ A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
+})
+/* Rd = Rn >> shift */
+#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
+/* Rd = Rn >> shift; signed */
+#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
+
+/* Move wide (immediate) */
+#define A64_MOVEW(sf, Rd, imm16, shift, type) \
+ aarch64_insn_gen_movewide(Rd, imm16, shift, \
+ A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
+/* Rd = Zeros (for MOVZ);
+ * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
+ * Rd = ~Rd; (for MOVN); */
+#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
+#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
+#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
+
+/* Add/subtract (shifted register) */
+#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
+ aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
+ A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
+/* Rd = Rn OP Rm */
+#define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
+#define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
+#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
+/* Rd = -Rm */
+#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
+/* Rn - Rm; set condition flags */
+#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
+
+/* Data-processing (1 source) */
+#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
+ A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
+/* Rd = BSWAPx(Rn) */
+#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
+#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
+#define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
+
+/* Data-processing (2 source) */
+/* Rd = Rn OP Rm */
+#define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \
+ A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV)
+
+/* Data-processing (3 source) */
+/* Rd = Ra + Rn * Rm */
+#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
+ A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
+/* Rd = Rn * Rm */
+#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
+
+/* Logical (shifted register) */
+#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
+ aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
+ A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
+/* Rd = Rn OP Rm */
+#define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
+#define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
+#define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
+#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
+/* Rn & Rm; set condition flags */
+#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
+
+#endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..7ae33545535b
--- /dev/null
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -0,0 +1,679 @@
+/*
+ * BPF JIT compiler for ARM64
+ *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "bpf_jit: " fmt
+
+#include <linux/filter.h>
+#include <linux/moduleloader.h>
+#include <linux/printk.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+
+#include "bpf_jit.h"
+
+int bpf_jit_enable __read_mostly;
+
+#define TMP_REG_1 (MAX_BPF_REG + 0)
+#define TMP_REG_2 (MAX_BPF_REG + 1)
+
+/* Map BPF registers to A64 registers */
+static const int bpf2a64[] = {
+ /* return value from in-kernel function, and exit value from eBPF */
+ [BPF_REG_0] = A64_R(7),
+ /* arguments from eBPF program to in-kernel function */
+ [BPF_REG_1] = A64_R(0),
+ [BPF_REG_2] = A64_R(1),
+ [BPF_REG_3] = A64_R(2),
+ [BPF_REG_4] = A64_R(3),
+ [BPF_REG_5] = A64_R(4),
+ /* callee saved registers that in-kernel function will preserve */
+ [BPF_REG_6] = A64_R(19),
+ [BPF_REG_7] = A64_R(20),
+ [BPF_REG_8] = A64_R(21),
+ [BPF_REG_9] = A64_R(22),
+ /* read-only frame pointer to access stack */
+ [BPF_REG_FP] = A64_FP,
+ /* temporary register for internal BPF JIT */
+ [TMP_REG_1] = A64_R(23),
+ [TMP_REG_2] = A64_R(24),
+};
+
+struct jit_ctx {
+ const struct bpf_prog *prog;
+ int idx;
+ int tmp_used;
+ int body_offset;
+ int *offset;
+ u32 *image;
+};
+
+static inline void emit(const u32 insn, struct jit_ctx *ctx)
+{
+ if (ctx->image != NULL)
+ ctx->image[ctx->idx] = cpu_to_le32(insn);
+
+ ctx->idx++;
+}
+
+static inline void emit_a64_mov_i64(const int reg, const u64 val,
+ struct jit_ctx *ctx)
+{
+ u64 tmp = val;
+ int shift = 0;
+
+ emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
+ tmp >>= 16;
+ shift += 16;
+ while (tmp) {
+ if (tmp & 0xffff)
+ emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
+ tmp >>= 16;
+ shift += 16;
+ }
+}
+
+static inline void emit_a64_mov_i(const int is64, const int reg,
+ const s32 val, struct jit_ctx *ctx)
+{
+ u16 hi = val >> 16;
+ u16 lo = val & 0xffff;
+
+ if (hi & 0x8000) {
+ if (hi == 0xffff) {
+ emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
+ } else {
+ emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
+ emit(A64_MOVK(is64, reg, lo, 0), ctx);
+ }
+ } else {
+ emit(A64_MOVZ(is64, reg, lo, 0), ctx);
+ if (hi)
+ emit(A64_MOVK(is64, reg, hi, 16), ctx);
+ }
+}
+
+static inline int bpf2a64_offset(int bpf_to, int bpf_from,
+ const struct jit_ctx *ctx)
+{
+ int to = ctx->offset[bpf_to + 1];
+ /* -1 to account for the Branch instruction */
+ int from = ctx->offset[bpf_from + 1] - 1;
+
+ return to - from;
+}
+
+static inline int epilogue_offset(const struct jit_ctx *ctx)
+{
+ int to = ctx->offset[ctx->prog->len - 1];
+ int from = ctx->idx - ctx->body_offset;
+
+ return to - from;
+}
+
+/* Stack must be multiples of 16B */
+#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
+
+static void build_prologue(struct jit_ctx *ctx)
+{
+ const u8 r6 = bpf2a64[BPF_REG_6];
+ const u8 r7 = bpf2a64[BPF_REG_7];
+ const u8 r8 = bpf2a64[BPF_REG_8];
+ const u8 r9 = bpf2a64[BPF_REG_9];
+ const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 ra = bpf2a64[BPF_REG_A];
+ const u8 rx = bpf2a64[BPF_REG_X];
+ const u8 tmp1 = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ int stack_size = MAX_BPF_STACK;
+
+ stack_size += 4; /* extra for skb_copy_bits buffer */
+ stack_size = STACK_ALIGN(stack_size);
+
+ /* Save callee-saved register */
+ emit(A64_PUSH(r6, r7, A64_SP), ctx);
+ emit(A64_PUSH(r8, r9, A64_SP), ctx);
+ if (ctx->tmp_used)
+ emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
+
+ /* Set up BPF stack */
+ emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+
+ /* Set up frame pointer */
+ emit(A64_MOV(1, fp, A64_SP), ctx);
+
+ /* Clear registers A and X */
+ emit_a64_mov_i64(ra, 0, ctx);
+ emit_a64_mov_i64(rx, 0, ctx);
+}
+
+static void build_epilogue(struct jit_ctx *ctx)
+{
+ const u8 r0 = bpf2a64[BPF_REG_0];
+ const u8 r6 = bpf2a64[BPF_REG_6];
+ const u8 r7 = bpf2a64[BPF_REG_7];
+ const u8 r8 = bpf2a64[BPF_REG_8];
+ const u8 r9 = bpf2a64[BPF_REG_9];
+ const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 tmp1 = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ int stack_size = MAX_BPF_STACK;
+
+ stack_size += 4; /* extra for skb_copy_bits buffer */
+ stack_size = STACK_ALIGN(stack_size);
+
+ /* We're done with BPF stack */
+ emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
+
+ /* Restore callee-saved register */
+ if (ctx->tmp_used)
+ emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
+ emit(A64_POP(r8, r9, A64_SP), ctx);
+ emit(A64_POP(r6, r7, A64_SP), ctx);
+
+ /* Restore frame pointer */
+ emit(A64_MOV(1, fp, A64_SP), ctx);
+
+ /* Set return value */
+ emit(A64_MOV(1, A64_R(0), r0), ctx);
+
+ emit(A64_RET(A64_LR), ctx);
+}
+
+static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ const u8 code = insn->code;
+ const u8 dst = bpf2a64[insn->dst_reg];
+ const u8 src = bpf2a64[insn->src_reg];
+ const u8 tmp = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const s16 off = insn->off;
+ const s32 imm = insn->imm;
+ const int i = insn - ctx->prog->insnsi;
+ const bool is64 = BPF_CLASS(code) == BPF_ALU64;
+ u8 jmp_cond;
+ s32 jmp_offset;
+
+ switch (code) {
+ /* dst = src */
+ case BPF_ALU | BPF_MOV | BPF_X:
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ emit(A64_MOV(is64, dst, src), ctx);
+ break;
+ /* dst = dst OP src */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ emit(A64_ADD(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ emit(A64_SUB(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ emit(A64_AND(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ emit(A64_ORR(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ emit(A64_EOR(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ emit(A64_MUL(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ ctx->tmp_used = 1;
+ emit(A64_UDIV(is64, tmp, dst, src), ctx);
+ emit(A64_MUL(is64, tmp, tmp, src), ctx);
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ break;
+ /* dst = -dst */
+ case BPF_ALU | BPF_NEG:
+ case BPF_ALU64 | BPF_NEG:
+ emit(A64_NEG(is64, dst, dst), ctx);
+ break;
+ /* dst = BSWAP##imm(dst) */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (BPF_SRC(code) == BPF_FROM_BE)
+ break;
+#else /* !CONFIG_CPU_BIG_ENDIAN */
+ if (BPF_SRC(code) == BPF_FROM_LE)
+ break;
+#endif
+ switch (imm) {
+ case 16:
+ emit(A64_REV16(is64, dst, dst), ctx);
+ break;
+ case 32:
+ emit(A64_REV32(is64, dst, dst), ctx);
+ break;
+ case 64:
+ emit(A64_REV64(dst, dst), ctx);
+ break;
+ }
+ break;
+ /* dst = imm */
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ emit_a64_mov_i(is64, dst, imm, ctx);
+ break;
+ /* dst = dst OP imm */
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_ADD(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_AND(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_ORR(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_EOR(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_MUL(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp, imm, ctx);
+ emit(A64_UDIV(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(is64, tmp2, imm, ctx);
+ emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
+ emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ emit(A64_LSL(is64, dst, dst, imm), ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ emit(A64_LSR(is64, dst, dst, imm), ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ emit(A64_ASR(is64, dst, dst, imm), ctx);
+ break;
+
+#define check_imm(bits, imm) do { \
+ if ((((imm) > 0) && ((imm) >> (bits))) || \
+ (((imm) < 0) && (~(imm) >> (bits)))) { \
+ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
+ i, imm, imm); \
+ return -EINVAL; \
+ } \
+} while (0)
+#define check_imm19(imm) check_imm(19, imm)
+#define check_imm26(imm) check_imm(26, imm)
+
+ /* JUMP off */
+ case BPF_JMP | BPF_JA:
+ jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ check_imm26(jmp_offset);
+ emit(A64_B(jmp_offset), ctx);
+ break;
+ /* IF (dst COND src) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ emit(A64_CMP(1, dst, src), ctx);
+emit_cond_jmp:
+ jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ check_imm19(jmp_offset);
+ switch (BPF_OP(code)) {
+ case BPF_JEQ:
+ jmp_cond = A64_COND_EQ;
+ break;
+ case BPF_JGT:
+ jmp_cond = A64_COND_HI;
+ break;
+ case BPF_JGE:
+ jmp_cond = A64_COND_CS;
+ break;
+ case BPF_JNE:
+ jmp_cond = A64_COND_NE;
+ break;
+ case BPF_JSGT:
+ jmp_cond = A64_COND_GT;
+ break;
+ case BPF_JSGE:
+ jmp_cond = A64_COND_GE;
+ break;
+ default:
+ return -EFAULT;
+ }
+ emit(A64_B_(jmp_cond, jmp_offset), ctx);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ emit(A64_TST(1, dst, src), ctx);
+ goto emit_cond_jmp;
+ /* IF (dst COND imm) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp, imm, ctx);
+ emit(A64_CMP(1, dst, tmp), ctx);
+ goto emit_cond_jmp;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp, imm, ctx);
+ emit(A64_TST(1, dst, tmp), ctx);
+ goto emit_cond_jmp;
+ /* function call */
+ case BPF_JMP | BPF_CALL:
+ {
+ const u8 r0 = bpf2a64[BPF_REG_0];
+ const u64 func = (u64)__bpf_call_base + imm;
+
+ ctx->tmp_used = 1;
+ emit_a64_mov_i64(tmp, func, ctx);
+ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+ emit(A64_BLR(tmp), ctx);
+ emit(A64_MOV(1, r0, A64_R(0)), ctx);
+ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+ break;
+ }
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+ if (i == ctx->prog->len - 1)
+ break;
+ jmp_offset = epilogue_offset(ctx);
+ check_imm26(jmp_offset);
+ emit(A64_B(jmp_offset), ctx);
+ break;
+
+ /* LDX: dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp, off, ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ emit(A64_LDR32(dst, src, tmp), ctx);
+ break;
+ case BPF_H:
+ emit(A64_LDRH(dst, src, tmp), ctx);
+ break;
+ case BPF_B:
+ emit(A64_LDRB(dst, src, tmp), ctx);
+ break;
+ case BPF_DW:
+ emit(A64_LDR64(dst, src, tmp), ctx);
+ break;
+ }
+ break;
+
+ /* ST: *(size *)(dst + off) = imm */
+ case BPF_ST | BPF_MEM | BPF_W:
+ case BPF_ST | BPF_MEM | BPF_H:
+ case BPF_ST | BPF_MEM | BPF_B:
+ case BPF_ST | BPF_MEM | BPF_DW:
+ goto notyet;
+
+ /* STX: *(size *)(dst + off) = src */
+ case BPF_STX | BPF_MEM | BPF_W:
+ case BPF_STX | BPF_MEM | BPF_H:
+ case BPF_STX | BPF_MEM | BPF_B:
+ case BPF_STX | BPF_MEM | BPF_DW:
+ ctx->tmp_used = 1;
+ emit_a64_mov_i(1, tmp, off, ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ emit(A64_STR32(src, dst, tmp), ctx);
+ break;
+ case BPF_H:
+ emit(A64_STRH(src, dst, tmp), ctx);
+ break;
+ case BPF_B:
+ emit(A64_STRB(src, dst, tmp), ctx);
+ break;
+ case BPF_DW:
+ emit(A64_STR64(src, dst, tmp), ctx);
+ break;
+ }
+ break;
+ /* STX XADD: lock *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_W:
+ /* STX XADD: lock *(u64 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ goto notyet;
+
+ /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
+ case BPF_LD | BPF_ABS | BPF_W:
+ case BPF_LD | BPF_ABS | BPF_H:
+ case BPF_LD | BPF_ABS | BPF_B:
+ /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
+ case BPF_LD | BPF_IND | BPF_W:
+ case BPF_LD | BPF_IND | BPF_H:
+ case BPF_LD | BPF_IND | BPF_B:
+ {
+ const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
+ const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
+ const u8 fp = bpf2a64[BPF_REG_FP];
+ const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
+ const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
+ const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
+ const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
+ const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
+ int size;
+
+ emit(A64_MOV(1, r1, r6), ctx);
+ emit_a64_mov_i(0, r2, imm, ctx);
+ if (BPF_MODE(code) == BPF_IND)
+ emit(A64_ADD(0, r2, r2, src), ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ size = 4;
+ break;
+ case BPF_H:
+ size = 2;
+ break;
+ case BPF_B:
+ size = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ emit_a64_mov_i64(r3, size, ctx);
+ emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
+ emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
+ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+ emit(A64_BLR(r5), ctx);
+ emit(A64_MOV(1, r0, A64_R(0)), ctx);
+ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+
+ jmp_offset = epilogue_offset(ctx);
+ check_imm19(jmp_offset);
+ emit(A64_CBZ(1, r0, jmp_offset), ctx);
+ emit(A64_MOV(1, r5, r0), ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ emit(A64_LDR32(r0, r5, A64_ZR), ctx);
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ emit(A64_REV32(0, r0, r0), ctx);
+#endif
+ break;
+ case BPF_H:
+ emit(A64_LDRH(r0, r5, A64_ZR), ctx);
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ emit(A64_REV16(0, r0, r0), ctx);
+#endif
+ break;
+ case BPF_B:
+ emit(A64_LDRB(r0, r5, A64_ZR), ctx);
+ break;
+ }
+ break;
+ }
+notyet:
+ pr_info_once("*** NOT YET: opcode %02x ***\n", code);
+ return -EFAULT;
+
+ default:
+ pr_err_once("unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int build_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->prog;
+ int i;
+
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ int ret;
+
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
+
+ ret = build_insn(insn, ctx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+void bpf_jit_compile(struct bpf_prog *prog)
+{
+ /* Nothing to do here. We support Internal BPF. */
+}
+
+void bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ struct jit_ctx ctx;
+ int image_size;
+
+ if (!bpf_jit_enable)
+ return;
+
+ if (!prog || !prog->len)
+ return;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.prog = prog;
+
+ ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ if (ctx.offset == NULL)
+ return;
+
+ /* 1. Initial fake pass to compute ctx->idx. */
+
+ /* Fake pass to fill in ctx->offset. */
+ if (build_body(&ctx))
+ goto out;
+
+ build_prologue(&ctx);
+
+ build_epilogue(&ctx);
+
+ /* Now we know the actual image size. */
+ image_size = sizeof(u32) * ctx.idx;
+ ctx.image = module_alloc(image_size);
+ if (unlikely(ctx.image == NULL))
+ goto out;
+
+ /* 2. Now, the actual pass. */
+
+ ctx.idx = 0;
+ build_prologue(&ctx);
+
+ ctx.body_offset = ctx.idx;
+ if (build_body(&ctx)) {
+ module_free(NULL, ctx.image);
+ goto out;
+ }
+
+ build_epilogue(&ctx);
+
+ /* And we're done. */
+ if (bpf_jit_enable > 1)
+ bpf_jit_dump(prog->len, image_size, 2, ctx.image);
+
+ bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
+ prog->bpf_func = (void *)ctx.image;
+ prog->jited = 1;
+
+out:
+ kfree(ctx.offset);
+}
+
+void bpf_jit_free(struct bpf_prog *prog)
+{
+ if (prog->jited)
+ module_free(NULL, prog->bpf_func);
+
+ kfree(prog);
+}