diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-02 03:54:25 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-02 03:54:25 +0300 |
commit | 8b050fe42d8ad1a2219201ba6d773e5b0eff1a29 (patch) | |
tree | 9233f2331a440712a3d353cbb8a22205a8cad7c0 /arch | |
parent | 33640d718c5ec9dccdeea49fd23bd9acfc537ce7 (diff) | |
parent | f7daa9c8fd191724b9ab9580a7be55cd1a67d799 (diff) | |
download | linux-8b050fe42d8ad1a2219201ba6d773e5b0eff1a29.tar.xz |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon:
"Although we're still debugging a few minor arm64-specific issues in
mainline, I didn't want to hold this lot up in the meantime.
We've got an additional KASLR fix after the previous one wasn't quite
complete, a fix for a performance regression when mapping executable
pages into userspace and some fixes for kprobe blacklisting. All
candidates for stable.
Summary:
- Fix module loading when KASLR is configured but disabled at runtime
- Fix accidental IPI when mapping user executable pages
- Ensure hyp-stub and KVM world switch code cannot be kprobed"
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: hibernate: Clean the __hyp_text to PoC after resume
arm64: hyp-stub: Forbid kprobing of the hyp-stub
arm64: kprobe: Always blacklist the KVM world-switch code
arm64: kaslr: ensure randomized quantities are clean also when kaslr is off
arm64: Do not issue IPIs for user executable ptes
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/kernel/hibernate.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/hyp-stub.S | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/kaslr.c | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/probes/kprobes.c | 6 | ||||
-rw-r--r-- | arch/arm64/mm/flush.c | 6 |
5 files changed, 14 insertions, 5 deletions
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 29cdc99688f3..9859e1178e6b 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -299,8 +299,10 @@ int swsusp_arch_suspend(void) dcache_clean_range(__idmap_text_start, __idmap_text_end); /* Clean kvm setup code to PoC? */ - if (el2_reset_needed()) + if (el2_reset_needed()) { dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); + dcache_clean_range(__hyp_text_start, __hyp_text_end); + } /* make the crash dump kernel image protected again */ crash_post_resume(); diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index e1261fbaa374..17f325ba831e 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -28,6 +28,8 @@ #include <asm/virt.h> .text + .pushsection .hyp.text, "ax" + .align 11 ENTRY(__hyp_stub_vectors) diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index ba6b41790fcd..b09b6f75f759 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * we end up running with module randomization disabled. */ module_alloc_base = (u64)_etext - MODULES_VSIZE; + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); /* * Try to map the FDT early. If this fails, we simply bail, diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a5b338b2542..f17afb99890c 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr) addr < (unsigned long)__entry_text_end) || (addr >= (unsigned long)__idmap_text_start && addr < (unsigned long)__idmap_text_end) || + (addr >= (unsigned long)__hyp_text_start && + addr < (unsigned long)__hyp_text_end) || !!search_exception_tables(addr)) return true; if (!is_kernel_in_hyp_mode()) { - if ((addr >= (unsigned long)__hyp_text_start && - addr < (unsigned long)__hyp_text_end) || - (addr >= (unsigned long)__hyp_idmap_text_start && + if ((addr >= (unsigned long)__hyp_idmap_text_start && addr < (unsigned long)__hyp_idmap_text_end)) return true; } diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 30695a868107..5c9073bace83 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len) __clean_dcache_area_pou(kaddr, len); __flush_icache_all(); } else { - flush_icache_range(addr, addr + len); + /* + * Don't issue kick_all_cpus_sync() after I-cache invalidation + * for user mappings. + */ + __flush_icache_range(addr, addr + len); } } |