summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/Kbuild4
-rw-r--r--arch/arm64/include/asm/arch_timer.h133
-rw-r--r--arch/arm64/include/asm/arm_generic.h100
-rw-r--r--arch/arm64/include/asm/assembler.h8
-rw-r--r--arch/arm64/include/asm/atomic.h132
-rw-r--r--arch/arm64/include/asm/cacheflush.h11
-rw-r--r--arch/arm64/include/asm/cmpxchg.h74
-rw-r--r--arch/arm64/include/asm/compat.h6
-rw-r--r--arch/arm64/include/asm/dma-mapping.h1
-rw-r--r--arch/arm64/include/asm/elf.h10
-rw-r--r--arch/arm64/include/asm/fpsimd.h5
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h64
-rw-r--r--arch/arm64/include/asm/futex.h2
-rw-r--r--arch/arm64/include/asm/io.h13
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/mmu_context.h15
-rw-r--r--arch/arm64/include/asm/perf_event.h7
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h68
-rw-r--r--arch/arm64/include/asm/processor.h7
-rw-r--r--arch/arm64/include/asm/psci.h38
-rw-r--r--arch/arm64/include/asm/ptrace.h41
-rw-r--r--arch/arm64/include/asm/smp.h11
-rw-r--r--arch/arm64/include/asm/spinlock.h78
-rw-r--r--arch/arm64/include/asm/syscalls.h10
-rw-r--r--arch/arm64/include/asm/unistd.h5
-rw-r--r--arch/arm64/include/asm/unistd32.h29
-rw-r--r--arch/arm64/include/asm/virt.h54
29 files changed, 609 insertions, 325 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index a581a2205938..e5fe4f99fe10 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -3,6 +3,7 @@
generic-y += bug.h
generic-y += bugs.h
generic-y += checksum.h
+generic-y += clkdev.h
generic-y += cputime.h
generic-y += current.h
generic-y += delay.h
@@ -18,6 +19,7 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mman.h
@@ -43,6 +45,8 @@ generic-y += swab.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
+generic-y += trace_clock.h
generic-y += types.h
generic-y += unaligned.h
generic-y += user.h
+generic-y += xor.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
new file mode 100644
index 000000000000..91e2a6a6fcd4
--- /dev/null
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -0,0 +1,133 @@
+/*
+ * arch/arm64/include/asm/arch_timer.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARCH_TIMER_H
+#define __ASM_ARCH_TIMER_H
+
+#include <asm/barrier.h>
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+static inline void arch_timer_reg_write(int access, int reg, u32 val)
+{
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
+ break;
+ default:
+ BUILD_BUG();
+ }
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("msr cntv_ctl_el0, %0" : : "r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
+ break;
+ default:
+ BUILD_BUG();
+ }
+ } else {
+ BUILD_BUG();
+ }
+
+ isb();
+}
+
+static inline u32 arch_timer_reg_read(int access, int reg)
+{
+ u32 val;
+
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
+ break;
+ default:
+ BUILD_BUG();
+ }
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
+ break;
+ default:
+ BUILD_BUG();
+ }
+ } else {
+ BUILD_BUG();
+ }
+
+ return val;
+}
+
+static inline u32 arch_timer_get_cntfrq(void)
+{
+ u32 val;
+ asm volatile("mrs %0, cntfrq_el0" : "=r" (val));
+ return val;
+}
+
+static inline void __cpuinit arch_counter_set_user_access(void)
+{
+ u32 cntkctl;
+
+ /* Disable user access to the timers and the physical counter. */
+ asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
+ cntkctl &= ~((3 << 8) | (1 << 0));
+
+ /* Enable user access to the virtual counter and frequency. */
+ cntkctl |= (1 << 1);
+ asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
+}
+
+static inline u64 arch_counter_get_cntpct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
+
+ return cval;
+}
+
+static inline u64 arch_counter_get_cntvct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
+
+ return cval;
+}
+
+#endif
diff --git a/arch/arm64/include/asm/arm_generic.h b/arch/arm64/include/asm/arm_generic.h
deleted file mode 100644
index e4cec9d30f27..000000000000
--- a/arch/arm64/include/asm/arm_generic.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * arch/arm64/include/asm/arm_generic.h
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_ARM_GENERIC_H
-#define __ASM_ARM_GENERIC_H
-
-#include <linux/clocksource.h>
-
-#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
-#define ARCH_TIMER_CTRL_IMASK (1 << 1)
-#define ARCH_TIMER_CTRL_ISTATUS (1 << 2)
-
-#define ARCH_TIMER_REG_CTRL 0
-#define ARCH_TIMER_REG_FREQ 1
-#define ARCH_TIMER_REG_TVAL 2
-
-static inline void arch_timer_reg_write(int reg, u32 val)
-{
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
- break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
- break;
- default:
- BUILD_BUG();
- }
-
- isb();
-}
-
-static inline u32 arch_timer_reg_read(int reg)
-{
- u32 val;
-
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
- break;
- case ARCH_TIMER_REG_FREQ:
- asm volatile("mrs %0, cntfrq_el0" : "=r" (val));
- break;
- case ARCH_TIMER_REG_TVAL:
- asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
- break;
- default:
- BUILD_BUG();
- }
-
- return val;
-}
-
-static inline void __cpuinit arch_counter_enable_user_access(void)
-{
- u32 cntkctl;
-
- /* Disable user access to the timers and the virtual counter. */
- asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- cntkctl &= ~((3 << 8) | (1 << 1));
-
- /* Enable user access to the physical counter and frequency. */
- cntkctl |= 1;
- asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
-}
-
-static inline cycle_t arch_counter_get_cntpct(void)
-{
- cycle_t cval;
-
- asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
-
- return cval;
-}
-
-static inline cycle_t arch_counter_get_cntvct(void)
-{
- cycle_t cval;
-
- asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
-
- return cval;
-}
-
-#endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index da2a13e8f1e6..c8eedc604984 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -107,3 +107,11 @@
* Register aliases.
*/
lr .req x30 // link register
+
+/*
+ * Vector entry
+ */
+ .macro ventry label
+ .align 7
+ b \label
+ .endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 407717ba060e..836364468571 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v)
int result;
asm volatile("// atomic_add\n"
-"1: ldxr %w0, [%3]\n"
-" add %w0, %w0, %w4\n"
-" stxr %w1, %w0, [%3]\n"
+"1: ldxr %w0, %2\n"
+" add %w0, %w0, %w3\n"
+" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
: "cc");
}
@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_add_return\n"
-"1: ldaxr %w0, [%3]\n"
-" add %w0, %w0, %w4\n"
-" stlxr %w1, %w0, [%3]\n"
+"1: ldaxr %w0, %2\n"
+" add %w0, %w0, %w3\n"
+" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
+ : "cc", "memory");
return result;
}
@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub\n"
-"1: ldxr %w0, [%3]\n"
-" sub %w0, %w0, %w4\n"
-" stxr %w1, %w0, [%3]\n"
+"1: ldxr %w0, %2\n"
+" sub %w0, %w0, %w3\n"
+" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
: "cc");
}
@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub_return\n"
-"1: ldaxr %w0, [%3]\n"
-" sub %w0, %w0, %w4\n"
-" stlxr %w1, %w0, [%3]\n"
+"1: ldaxr %w0, %2\n"
+" sub %w0, %w0, %w3\n"
+" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
+ : "cc", "memory");
return result;
}
@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
int oldval;
asm volatile("// atomic_cmpxchg\n"
-"1: ldaxr %w1, [%3]\n"
-" cmp %w1, %w4\n"
+"1: ldaxr %w1, %2\n"
+" cmp %w1, %w3\n"
" b.ne 2f\n"
-" stlxr %w0, %w5, [%3]\n"
+" stlxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
- : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
- : "r" (&ptr->counter), "Ir" (old), "r" (new)
- : "cc");
+ : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
+ : "Ir" (old), "r" (new)
+ : "cc", "memory");
return oldval;
}
@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
unsigned long tmp, tmp2;
asm volatile("// atomic_clear_mask\n"
-"1: ldxr %0, [%3]\n"
-" bic %0, %0, %4\n"
-" stxr %w1, %0, [%3]\n"
+"1: ldxr %0, %2\n"
+" bic %0, %0, %3\n"
+" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
- : "r" (addr), "Ir" (mask)
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
+ : "Ir" (mask)
: "cc");
}
@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add\n"
-"1: ldxr %0, [%3]\n"
-" add %0, %0, %4\n"
-" stxr %w1, %0, [%3]\n"
+"1: ldxr %0, %2\n"
+" add %0, %0, %3\n"
+" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
: "cc");
}
@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
-"1: ldaxr %0, [%3]\n"
-" add %0, %0, %4\n"
-" stlxr %w1, %0, [%3]\n"
+"1: ldaxr %0, %2\n"
+" add %0, %0, %3\n"
+" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
+ : "cc", "memory");
return result;
}
@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub\n"
-"1: ldxr %0, [%3]\n"
-" sub %0, %0, %4\n"
-" stxr %w1, %0, [%3]\n"
+"1: ldxr %0, %2\n"
+" sub %0, %0, %3\n"
+" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
: "cc");
}
@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
-"1: ldaxr %0, [%3]\n"
-" sub %0, %0, %4\n"
-" stlxr %w1, %0, [%3]\n"
+"1: ldaxr %0, %2\n"
+" sub %0, %0, %3\n"
+" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ : "Ir" (i)
+ : "cc", "memory");
return result;
}
@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
unsigned long res;
asm volatile("// atomic64_cmpxchg\n"
-"1: ldaxr %1, [%3]\n"
-" cmp %1, %4\n"
+"1: ldaxr %1, %2\n"
+" cmp %1, %3\n"
" b.ne 2f\n"
-" stlxr %w0, %5, [%3]\n"
+" stlxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
- : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter)
- : "r" (&ptr->counter), "Ir" (old), "r" (new)
- : "cc");
+ : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
+ : "Ir" (old), "r" (new)
+ : "cc", "memory");
return oldval;
}
@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
-"1: ldaxr %0, [%3]\n"
+"1: ldaxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
-" stlxr %w1, %0, [%3]\n"
+" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
"2:"
- : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
- : "r" (&v->counter)
- : "cc");
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ :
+ : "cc", "memory");
return result;
}
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index aa3132ab7f29..3300cbd18a89 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -70,13 +70,20 @@
* - size - region size
*/
extern void flush_cache_all(void);
-extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
-extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __flush_cache_user_range(unsigned long start, unsigned long end);
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn)
+{
+}
+
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index e0e65b069d9e..968b5cbfc260 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
case 1:
asm volatile("// __xchg1\n"
- "1: ldaxrb %w0, [%3]\n"
- " stlxrb %w1, %w2, [%3]\n"
+ "1: ldaxrb %w0, %2\n"
+ " stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
+ : "r" (x)
+ : "cc", "memory");
break;
case 2:
asm volatile("// __xchg2\n"
- "1: ldaxrh %w0, [%3]\n"
- " stlxrh %w1, %w2, [%3]\n"
+ "1: ldaxrh %w0, %2\n"
+ " stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
+ : "r" (x)
+ : "cc", "memory");
break;
case 4:
asm volatile("// __xchg4\n"
- "1: ldaxr %w0, [%3]\n"
- " stlxr %w1, %w2, [%3]\n"
+ "1: ldaxr %w0, %2\n"
+ " stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
+ : "r" (x)
+ : "cc", "memory");
break;
case 8:
asm volatile("// __xchg8\n"
- "1: ldaxr %0, [%3]\n"
- " stlxr %w1, %2, [%3]\n"
+ "1: ldaxr %0, %2\n"
+ " stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (x), "r" (ptr)
- : "memory", "cc");
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
+ : "r" (x)
+ : "cc", "memory");
break;
default:
BUILD_BUG();
@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 1:
do {
asm volatile("// __cmpxchg1\n"
- " ldxrb %w1, [%2]\n"
+ " ldxrb %w1, %2\n"
" mov %w0, #0\n"
" cmp %w1, %w3\n"
" b.ne 1f\n"
- " stxrb %w0, %w4, [%2]\n"
+ " stxrb %w0, %w4, %2\n"
"1:\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (old), "r" (new)
+ : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
+ : "Ir" (old), "r" (new)
: "cc");
} while (res);
break;
@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 2:
do {
asm volatile("// __cmpxchg2\n"
- " ldxrh %w1, [%2]\n"
+ " ldxrh %w1, %2\n"
" mov %w0, #0\n"
" cmp %w1, %w3\n"
" b.ne 1f\n"
- " stxrh %w0, %w4, [%2]\n"
+ " stxrh %w0, %w4, %2\n"
"1:\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (old), "r" (new)
- : "memory", "cc");
+ : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
+ : "Ir" (old), "r" (new)
+ : "cc");
} while (res);
break;
case 4:
do {
asm volatile("// __cmpxchg4\n"
- " ldxr %w1, [%2]\n"
+ " ldxr %w1, %2\n"
" mov %w0, #0\n"
" cmp %w1, %w3\n"
" b.ne 1f\n"
- " stxr %w0, %w4, [%2]\n"
+ " stxr %w0, %w4, %2\n"
"1:\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (old), "r" (new)
+ : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
+ : "Ir" (old), "r" (new)
: "cc");
} while (res);
break;
@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 8:
do {
asm volatile("// __cmpxchg8\n"
- " ldxr %1, [%2]\n"
+ " ldxr %1, %2\n"
" mov %w0, #0\n"
" cmp %1, %3\n"
" b.ne 1f\n"
- " stxr %w0, %4, [%2]\n"
+ " stxr %w0, %4, %2\n"
"1:\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (old), "r" (new)
+ : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
+ : "Ir" (old), "r" (new)
: "cc");
} while (res);
break;
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 37e610dc084e..618b450e5a1d 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -23,6 +23,7 @@
*/
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "armv8l\0\0"
@@ -209,10 +210,11 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
+#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
+
static inline void __user *arch_compat_alloc_user_space(long len)
{
- struct pt_regs *regs = task_pt_regs(current);
- return (void __user *)regs->compat_sp - len;
+ return (void __user *)compat_user_stack_pointer() - len;
}
struct compat_ipc64_perm {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 538f4b44db5d..994776894198 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -50,6 +50,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ debug_dma_mapping_error(dev, dev_addr);
return ops->mapping_error(dev, dev_addr);
}
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index cf284649dfcb..fe32c0e4ac01 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -25,12 +25,13 @@
#include <asm/user.h>
typedef unsigned long elf_greg_t;
-typedef unsigned long elf_freg_t[3];
-#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
-typedef struct user_fp elf_fpregset_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+typedef struct user_fpsimd_state elf_fpregset_t;
#define EM_AARCH64 183
@@ -87,7 +88,6 @@ typedef struct user_fp elf_fpregset_t;
#define R_AARCH64_MOVW_PREL_G2_NC 292
#define R_AARCH64_MOVW_PREL_G3 293
-
/*
* These are used to set parameters in the core dumps.
*/
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index b42fab9f62a9..c43b4ac13008 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -25,9 +25,8 @@
* - FPSR and FPCR
* - 32 128-bit data registers
*
- * Note that user_fp forms a prefix of this structure, which is relied
- * upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must
- * form a prefix of struct fpsimd_state.
+ * Note that user_fpsimd forms a prefix of this structure, which is
+ * relied upon in the ptrace FP/SIMD accessors.
*/
struct fpsimd_state {
union {
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
new file mode 100644
index 000000000000..bbec599c96bd
--- /dev/null
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -0,0 +1,64 @@
+/*
+ * FP/SIMD state saving and restoring macros
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+.macro fpsimd_save state, tmpnr
+ stp q0, q1, [\state, #16 * 0]
+ stp q2, q3, [\state, #16 * 2]
+ stp q4, q5, [\state, #16 * 4]
+ stp q6, q7, [\state, #16 * 6]
+ stp q8, q9, [\state, #16 * 8]
+ stp q10, q11, [\state, #16 * 10]
+ stp q12, q13, [\state, #16 * 12]
+ stp q14, q15, [\state, #16 * 14]
+ stp q16, q17, [\state, #16 * 16]
+ stp q18, q19, [\state, #16 * 18]
+ stp q20, q21, [\state, #16 * 20]
+ stp q22, q23, [\state, #16 * 22]
+ stp q24, q25, [\state, #16 * 24]
+ stp q26, q27, [\state, #16 * 26]
+ stp q28, q29, [\state, #16 * 28]
+ stp q30, q31, [\state, #16 * 30]!
+ mrs x\tmpnr, fpsr
+ str w\tmpnr, [\state, #16 * 2]
+ mrs x\tmpnr, fpcr
+ str w\tmpnr, [\state, #16 * 2 + 4]
+.endm
+
+.macro fpsimd_restore state, tmpnr
+ ldp q0, q1, [\state, #16 * 0]
+ ldp q2, q3, [\state, #16 * 2]
+ ldp q4, q5, [\state, #16 * 4]
+ ldp q6, q7, [\state, #16 * 6]
+ ldp q8, q9, [\state, #16 * 8]
+ ldp q10, q11, [\state, #16 * 10]
+ ldp q12, q13, [\state, #16 * 12]
+ ldp q14, q15, [\state, #16 * 14]
+ ldp q16, q17, [\state, #16 * 16]
+ ldp q18, q19, [\state, #16 * 18]
+ ldp q20, q21, [\state, #16 * 20]
+ ldp q22, q23, [\state, #16 * 22]
+ ldp q24, q25, [\state, #16 * 24]
+ ldp q26, q27, [\state, #16 * 26]
+ ldp q28, q29, [\state, #16 * 28]
+ ldp q30, q31, [\state, #16 * 30]!
+ ldr w\tmpnr, [\state, #16 * 2]
+ msr fpsr, x\tmpnr
+ ldr w\tmpnr, [\state, #16 * 2 + 4]
+ msr fpcr, x\tmpnr
+.endm
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 3468ae8439fa..c582fa316366 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -39,7 +39,7 @@
" .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "cc")
+ : "cc", "memory")
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 74a2a7d304a9..57f12c991de2 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -114,7 +114,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* I/O port access primitives.
*/
#define IO_SPACE_LIMIT 0xffff
-#define PCI_IOBASE ((void __iomem *)0xffffffbbfffe0000UL)
+#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M))
static inline u8 inb(unsigned long addr)
{
@@ -222,14 +222,17 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
-#define ioremap(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
-#define ioremap_nocache(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
-#define ioremap_wc(addr, size) __ioremap((addr), (size), PROT_NORMAL_NC)
+#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
+#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+
#define ARCH_HAS_IOREMAP_WC
#include <asm-generic/iomap.h>
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 1cac16a001cb..381f556b664e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -43,6 +43,7 @@
#define PAGE_OFFSET UL(0xffffffc000000000)
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
+#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
#define VA_BITS (39)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index d4f7fd5b9e33..2494fc01896a 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -26,5 +26,6 @@ typedef struct {
extern void paging_init(void);
extern void setup_mm_for_reboot(void);
+extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index f68465dee026..e2bc385adb6b 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid;
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+static inline void contextidr_thread_switch(struct task_struct *next)
+{
+ asm(
+ " msr contextidr_el1, %0\n"
+ " isb"
+ :
+ : "r" (task_pid_nr(next)));
+}
+#else
+static inline void contextidr_thread_switch(struct task_struct *next)
+{
+}
+#endif
+
/*
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
*/
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index a6fffd511c5e..d26d1d53c0d7 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,11 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
-/* It's quiet around here... */
+#ifdef CONFIG_HW_PERF_EVENTS
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+#endif
#endif
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 0f3b4581d925..75fd13d289b9 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -38,7 +38,8 @@
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
-#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
+#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@@ -57,7 +58,8 @@
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
-#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
+#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
+#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 8960239be722..e333a243bfcc 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -24,7 +24,8 @@
/*
* Software defined PTE bits definition.
*/
-#define PTE_VALID (_AT(pteval_t, 1) << 0) /* pte_present() check */
+#define PTE_VALID (_AT(pteval_t, 1) << 0)
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
@@ -60,25 +61,28 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
extern pgprot_t pgprot_default;
-#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
-
-#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN)
-#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG)
-#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY)
-
-#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN)
-#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG)
-#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
-#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
+#define __pgprot_modify(prot,mask,bits) \
+ __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
+#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
+
+#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
+#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
+#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
+
+#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
+#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#endif /* __ASSEMBLY__ */
@@ -125,16 +129,15 @@ extern struct page *empty_zero_page;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & PTE_VALID)
+#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
-#define pte_exec(pte) (!(pte_val(pte) & PTE_XN))
+#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
-#define pte_present_exec_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \
- (PTE_VALID | PTE_USER))
+#define pte_valid_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -157,8 +160,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- if (pte_present_exec_user(pte))
- __sync_icache_dcache(pte, addr);
+ if (pte_valid_user(pte)) {
+ if (pte_exec(pte))
+ __sync_icache_dcache(pte, addr);
+ if (!pte_dirty(pte))
+ pte = pte_wrprotect(pte);
+ }
+
set_pte(ptep, pte);
}
@@ -168,9 +176,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
-#define __pgprot_modify(prot,mask,bits) \
- __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
-
#define __HAVE_ARCH_PTE_SPECIAL
/*
@@ -262,7 +267,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY;
+ const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
+ PTE_PROT_NONE | PTE_VALID;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 5d810044feda..ab239b2c456f 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -43,6 +43,8 @@
#else
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */
+
+#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
#endif /* __KERNEL__ */
struct debug_info {
@@ -126,11 +128,6 @@ unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
-/*
- * Create a new kernel thread
- */
-extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
new file mode 100644
index 000000000000..0604237ecd99
--- /dev/null
+++ b/arch/arm64/include/asm/psci.h
@@ -0,0 +1,38 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2013 ARM Limited
+ */
+
+#ifndef __ASM_PSCI_H
+#define __ASM_PSCI_H
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+extern struct psci_operations psci_ops;
+
+int psci_init(void);
+
+#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index b04d3404f0d1..41a71ee4c3df 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -30,8 +30,28 @@
#define COMPAT_PTRACE_SETVFPREGS 28
#define COMPAT_PTRACE_GETHBPREGS 29
#define COMPAT_PTRACE_SETHBPREGS 30
+
+/* AArch32 CPSR bits */
+#define COMPAT_PSR_MODE_MASK 0x0000001f
#define COMPAT_PSR_MODE_USR 0x00000010
+#define COMPAT_PSR_MODE_FIQ 0x00000011
+#define COMPAT_PSR_MODE_IRQ 0x00000012
+#define COMPAT_PSR_MODE_SVC 0x00000013
+#define COMPAT_PSR_MODE_ABT 0x00000017
+#define COMPAT_PSR_MODE_HYP 0x0000001a
+#define COMPAT_PSR_MODE_UND 0x0000001b
+#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
+#define COMPAT_PSR_F_BIT 0x00000040
+#define COMPAT_PSR_I_BIT 0x00000080
+#define COMPAT_PSR_A_BIT 0x00000100
+#define COMPAT_PSR_E_BIT 0x00000200
+#define COMPAT_PSR_J_BIT 0x01000000
+#define COMPAT_PSR_Q_BIT 0x08000000
+#define COMPAT_PSR_V_BIT 0x10000000
+#define COMPAT_PSR_C_BIT 0x20000000
+#define COMPAT_PSR_Z_BIT 0x40000000
+#define COMPAT_PSR_N_BIT 0x80000000
#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
/*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
@@ -44,10 +64,27 @@
/* sizeof(struct user) for AArch32 */
#define COMPAT_USER_SZ 296
-/* AArch32 uses x13 as the stack pointer... */
+
+/* Architecturally defined mapping between AArch32 and AArch64 registers */
+#define compat_usr(x) regs[(x)]
#define compat_sp regs[13]
-/* ... and x14 as the link register. */
#define compat_lr regs[14]
+#define compat_sp_hyp regs[15]
+#define compat_sp_irq regs[16]
+#define compat_lr_irq regs[17]
+#define compat_sp_svc regs[18]
+#define compat_lr_svc regs[19]
+#define compat_sp_abt regs[20]
+#define compat_lr_abt regs[21]
+#define compat_sp_und regs[22]
+#define compat_lr_und regs[23]
+#define compat_r8_fiq regs[24]
+#define compat_r9_fiq regs[25]
+#define compat_r10_fiq regs[26]
+#define compat_r11_fiq regs[27]
+#define compat_r12_fiq regs[28]
+#define compat_sp_fiq regs[29]
+#define compat_lr_fiq regs[30]
/*
* This struct defines the way the registers are stored on the stack during an
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 7e34295f78e3..4b8023c5d146 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+struct device_node;
+
+struct smp_enable_ops {
+ const char *name;
+ int (*init_cpu)(struct device_node *, int);
+ int (*prepare_cpu)(int);
+};
+
+extern const struct smp_enable_ops smp_spin_table_ops;
+extern const struct smp_enable_ops smp_psci_ops;
+
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 41112fe2f8b1..7065e920149d 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
asm volatile(
" sevl\n"
"1: wfe\n"
- "2: ldaxr %w0, [%1]\n"
+ "2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n"
- " stxr %w0, %w2, [%1]\n"
+ " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n"
- : "=&r" (tmp)
- : "r" (&lock->lock), "r" (1)
- : "memory");
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
+ : "cc", "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned int tmp;
asm volatile(
- " ldaxr %w0, [%1]\n"
+ " ldaxr %w0, %1\n"
" cbnz %w0, 1f\n"
- " stxr %w0, %w2, [%1]\n"
+ " stxr %w0, %w2, %1\n"
"1:\n"
- : "=&r" (tmp)
- : "r" (&lock->lock), "r" (1)
- : "memory");
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
+ : "cc", "memory");
return !tmp;
}
@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
- " stlr %w1, [%0]\n"
- : : "r" (&lock->lock), "r" (0) : "memory");
+ " stlr %w1, %0\n"
+ : "=Q" (lock->lock) : "r" (0) : "memory");
}
/*
@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
asm volatile(
" sevl\n"
"1: wfe\n"
- "2: ldaxr %w0, [%1]\n"
+ "2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n"
- " stxr %w0, %w2, [%1]\n"
+ " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "memory");
+ : "=&r" (tmp), "+Q" (rw->lock)
+ : "r" (0x80000000)
+ : "cc", "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
unsigned int tmp;
asm volatile(
- " ldaxr %w0, [%1]\n"
+ " ldaxr %w0, %1\n"
" cbnz %w0, 1f\n"
- " stxr %w0, %w2, [%1]\n"
+ " stxr %w0, %w2, %1\n"
"1:\n"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "memory");
+ : "=&r" (tmp), "+Q" (rw->lock)
+ : "r" (0x80000000)
+ : "cc", "memory");
return !tmp;
}
@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
asm volatile(
- " stlr %w1, [%0]\n"
- : : "r" (&rw->lock), "r" (0) : "memory");
+ " stlr %w1, %0\n"
+ : "=Q" (rw->lock) : "r" (0) : "memory");
}
/* write_can_lock - would write_trylock() succeed? */
@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
asm volatile(
" sevl\n"
"1: wfe\n"
- "2: ldaxr %w0, [%2]\n"
+ "2: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1b\n"
- " stxr %w1, %w0, [%2]\n"
+ " stxr %w1, %w0, %2\n"
" cbnz %w1, 2b\n"
- : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&rw->lock)
- : "memory");
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
+ :
+ : "cc", "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
unsigned int tmp, tmp2;
asm volatile(
- "1: ldxr %w0, [%2]\n"
+ "1: ldxr %w0, %2\n"
" sub %w0, %w0, #1\n"
- " stlxr %w1, %w0, [%2]\n"
+ " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b\n"
- : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&rw->lock)
- : "memory");
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
+ :
+ : "cc", "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
unsigned int tmp, tmp2 = 1;
asm volatile(
- " ldaxr %w0, [%2]\n"
+ " ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1f\n"
- " stxr %w1, %w0, [%2]\n"
+ " stxr %w1, %w0, %2\n"
"1:\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "memory");
+ : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+ :
+ : "cc", "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h
index 09ff33572aab..48fe7c600e98 100644
--- a/arch/arm64/include/asm/syscalls.h
+++ b/arch/arm64/include/asm/syscalls.h
@@ -23,17 +23,7 @@
/*
* System call wrappers implemented in kernel/entry.S.
*/
-asmlinkage long sys_execve_wrapper(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp);
-asmlinkage long sys_clone_wrapper(unsigned long clone_flags,
- unsigned long newsp,
- void __user *parent_tid,
- unsigned long tls_val,
- void __user *child_tid);
asmlinkage long sys_rt_sigreturn_wrapper(void);
-asmlinkage long sys_sigaltstack_wrapper(const stack_t __user *uss,
- stack_t __user *uoss);
#include <asm-generic/syscalls.h>
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 63f853f8b718..82ce217e94cf 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -14,7 +14,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef CONFIG_COMPAT
-#define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION
#define __ARCH_WANT_COMPAT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
@@ -23,7 +22,9 @@
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_VFORK
#endif
+#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 6d909faebf28..12f22492df4c 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -23,7 +23,7 @@
__SYSCALL(0, sys_restart_syscall)
__SYSCALL(1, sys_exit)
-__SYSCALL(2, compat_sys_fork_wrapper)
+__SYSCALL(2, sys_fork)
__SYSCALL(3, sys_read)
__SYSCALL(4, sys_write)
__SYSCALL(5, compat_sys_open)
@@ -32,7 +32,7 @@ __SYSCALL(7, sys_ni_syscall) /* 7 was sys_waitpid */
__SYSCALL(8, sys_creat)
__SYSCALL(9, sys_link)
__SYSCALL(10, sys_unlink)
-__SYSCALL(11, compat_sys_execve_wrapper)
+__SYSCALL(11, compat_sys_execve)
__SYSCALL(12, sys_chdir)
__SYSCALL(13, sys_ni_syscall) /* 13 was sys_time */
__SYSCALL(14, sys_mknod)
@@ -40,7 +40,7 @@ __SYSCALL(15, sys_chmod)
__SYSCALL(16, sys_lchown16)
__SYSCALL(17, sys_ni_syscall) /* 17 was sys_break */
__SYSCALL(18, sys_ni_syscall) /* 18 was sys_stat */
-__SYSCALL(19, compat_sys_lseek_wrapper)
+__SYSCALL(19, compat_sys_lseek)
__SYSCALL(20, sys_getpid)
__SYSCALL(21, compat_sys_mount)
__SYSCALL(22, sys_ni_syscall) /* 22 was sys_umount */
@@ -93,7 +93,7 @@ __SYSCALL(68, sys_ni_syscall) /* 68 was sys_sgetmask */
__SYSCALL(69, sys_ni_syscall) /* 69 was sys_ssetmask */
__SYSCALL(70, sys_setreuid16)
__SYSCALL(71, sys_setregid16)
-__SYSCALL(72, compat_sys_sigsuspend)
+__SYSCALL(72, sys_sigsuspend)
__SYSCALL(73, compat_sys_sigpending)
__SYSCALL(74, sys_sethostname)
__SYSCALL(75, compat_sys_setrlimit)
@@ -113,8 +113,8 @@ __SYSCALL(88, sys_reboot)
__SYSCALL(89, sys_ni_syscall) /* 89 was sys_readdir */
__SYSCALL(90, sys_ni_syscall) /* 90 was sys_mmap */
__SYSCALL(91, sys_munmap)
-__SYSCALL(92, sys_truncate)
-__SYSCALL(93, sys_ftruncate)
+__SYSCALL(92, compat_sys_truncate)
+__SYSCALL(93, compat_sys_ftruncate)
__SYSCALL(94, sys_fchmod)
__SYSCALL(95, sys_fchown16)
__SYSCALL(96, sys_getpriority)
@@ -141,7 +141,7 @@ __SYSCALL(116, compat_sys_sysinfo)
__SYSCALL(117, sys_ni_syscall) /* 117 was sys_ipc */
__SYSCALL(118, sys_fsync)
__SYSCALL(119, compat_sys_sigreturn_wrapper)
-__SYSCALL(120, compat_sys_clone_wrapper)
+__SYSCALL(120, sys_clone)
__SYSCALL(121, sys_setdomainname)
__SYSCALL(122, sys_newuname)
__SYSCALL(123, sys_ni_syscall) /* 123 was sys_modify_ldt */
@@ -207,11 +207,11 @@ __SYSCALL(182, sys_chown16)
__SYSCALL(183, sys_getcwd)
__SYSCALL(184, sys_capget)
__SYSCALL(185, sys_capset)
-__SYSCALL(186, compat_sys_sigaltstack_wrapper)
+__SYSCALL(186, compat_sys_sigaltstack)
__SYSCALL(187, compat_sys_sendfile)
__SYSCALL(188, sys_ni_syscall) /* 188 reserved */
__SYSCALL(189, sys_ni_syscall) /* 189 reserved */
-__SYSCALL(190, compat_sys_vfork_wrapper)
+__SYSCALL(190, sys_vfork)
__SYSCALL(191, compat_sys_getrlimit) /* SuS compliant getrlimit */
__SYSCALL(192, sys_mmap_pgoff)
__SYSCALL(193, compat_sys_truncate64_wrapper)
@@ -392,11 +392,16 @@ __SYSCALL(367, sys_fanotify_init)
__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
__SYSCALL(369, sys_prlimit64)
__SYSCALL(370, sys_name_to_handle_at)
-__SYSCALL(371, sys_open_by_handle_at)
-__SYSCALL(372, sys_clock_adjtime)
+__SYSCALL(371, compat_sys_open_by_handle_at)
+__SYSCALL(372, compat_sys_clock_adjtime)
__SYSCALL(373, sys_syncfs)
+__SYSCALL(374, compat_sys_sendmmsg)
+__SYSCALL(375, sys_setns)
+__SYSCALL(376, compat_sys_process_vm_readv)
+__SYSCALL(377, compat_sys_process_vm_writev)
+__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */
-#define __NR_compat_syscalls 374
+#define __NR_compat_syscalls 379
/*
* Compat syscall numbers used by the AArch64 kernel.
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
new file mode 100644
index 000000000000..439827271e3d
--- /dev/null
+++ b/arch/arm64/include/asm/virt.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM__VIRT_H
+#define __ASM__VIRT_H
+
+#define BOOT_CPU_MODE_EL2 (0x0e12b007)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * __boot_cpu_mode records what mode CPUs were booted in.
+ * A correctly-implemented bootloader must start all CPUs in the same mode:
+ * In this case, both 32bit halves of __boot_cpu_mode will contain the
+ * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2).
+ *
+ * Should the bootloader fail to do this, the two values will be different.
+ * This allows the kernel to flag an error when the secondaries have come up.
+ */
+extern u32 __boot_cpu_mode[2];
+
+void __hyp_set_vectors(phys_addr_t phys_vector_base);
+phys_addr_t __hyp_get_vectors(void);
+
+/* Reports the availability of HYP mode */
+static inline bool is_hyp_mode_available(void)
+{
+ return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
+ __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
+}
+
+/* Check if the bootloader has booted CPUs in different modes */
+static inline bool is_hyp_mode_mismatched(void)
+{
+ return __boot_cpu_mode[0] != __boot_cpu_mode[1];
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* ! __ASM__VIRT_H */