summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-11-01 07:15:30 +0300
committerDavid S. Miller <davem@davemloft.net>2015-11-01 07:15:30 +0300
commitb75ec3af27bf011a760e2f44eb25a99b6fbb0fb3 (patch)
tree89f4fbab2c6194b32a46eb771c4b158585bf0bb5 /arch/arm64
parente7b63ff115f21ea6c609cbb08f3d489af627af6e (diff)
parent523e13455ec9ec4457a5a1d24ff7132949742b70 (diff)
downloadlinux-b75ec3af27bf011a760e2f44eb25a99b6fbb0fb3.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c16
-rw-r--r--arch/arm64/kernel/efi-stub.c14
-rw-r--r--arch/arm64/kernel/stacktrace.c6
-rw-r--r--arch/arm64/kernel/suspend.c22
4 files changed, 35 insertions, 23 deletions
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index bcee7abac68e..937f5e58a4d3 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -284,21 +284,23 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
__asm__ __volatile__( \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- " mov %w2, %w1\n" \
- "0: ldxr"B" %w1, [%3]\n" \
- "1: stxr"B" %w0, %w2, [%3]\n" \
+ "0: ldxr"B" %w2, [%3]\n" \
+ "1: stxr"B" %w0, %w1, [%3]\n" \
" cbz %w0, 2f\n" \
" mov %w0, %w4\n" \
+ " b 3f\n" \
"2:\n" \
+ " mov %w1, %w2\n" \
+ "3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
- "3: mov %w0, %w5\n" \
- " b 2b\n" \
+ "4: mov %w0, %w5\n" \
+ " b 3b\n" \
" .popsection" \
" .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
- " .quad 0b, 3b\n" \
- " .quad 1b, 3b\n" \
+ " .quad 0b, 4b\n" \
+ " .quad 1b, 4b\n" \
" .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 816120ece6bc..78dfbd34b6bf 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -25,10 +25,20 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
unsigned long kernel_size, kernel_memsize = 0;
unsigned long nr_pages;
void *old_image_addr = (void *)*image_addr;
+ unsigned long preferred_offset;
+
+ /*
+ * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
+ * a 2 MB aligned base, which itself may be lower than dram_base, as
+ * long as the resulting offset equals or exceeds it.
+ */
+ preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
+ if (preferred_offset < dram_base)
+ preferred_offset += SZ_2M;
/* Relocate the image, if required. */
kernel_size = _edata - _text;
- if (*image_addr != (dram_base + TEXT_OFFSET)) {
+ if (*image_addr != preferred_offset) {
kernel_memsize = kernel_size + (_end - _edata);
/*
@@ -42,7 +52,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
* Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'.
*/
- *image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
+ *image_addr = *reserve_addr = preferred_offset;
nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
EFI_PAGE_SIZE;
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 407991bf79f5..ccb6078ed9f2 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
- /*
- * -4 here because we care about the PC at time of bl,
- * not where the return will go.
- */
- frame->pc = *(unsigned long *)(fp + 8) - 4;
+ frame->pc = *(unsigned long *)(fp + 8);
return 0;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 8297d502217e..44ca4143b013 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -80,17 +80,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
if (ret == 0) {
/*
* We are resuming from reset with TTBR0_EL1 set to the
- * idmap to enable the MMU; restore the active_mm mappings in
- * TTBR0_EL1 unless the active_mm == &init_mm, in which case
- * the thread entered cpu_suspend with TTBR0_EL1 set to
- * reserved TTBR0 page tables and should be restored as such.
+ * idmap to enable the MMU; set the TTBR0 to the reserved
+ * page tables to prevent speculative TLB allocations, flush
+ * the local tlb and set the default tcr_el1.t0sz so that
+ * the TTBR0 address space set-up is properly restored.
+ * If the current active_mm != &init_mm we entered cpu_suspend
+ * with mappings in TTBR0 that must be restored, so we switch
+ * them back to complete the address space configuration
+ * restoration before returning.
*/
- if (mm == &init_mm)
- cpu_set_reserved_ttbr0();
- else
- cpu_switch_mm(mm->pgd, mm);
-
+ cpu_set_reserved_ttbr0();
flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
+
+ if (mm != &init_mm)
+ cpu_switch_mm(mm->pgd, mm);
/*
* Restore per-cpu offset before any kernel