diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-26 04:41:56 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-26 04:41:56 +0300 |
commit | 5694cecdb092656a822287a6691aa7ce668c8160 (patch) | |
tree | 5916f2c5367d8cbe426b3f5a296b513214f6bc2f /arch/arm64/mm | |
parent | 13e1ad2be3a85f5c0f76e82af9806b3d12a574d0 (diff) | |
parent | 12f799c8c739518e12248bbd000eb0a246e8e5f8 (diff) | |
download | linux-5694cecdb092656a822287a6691aa7ce668c8160.tar.xz |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 festive updates from Will Deacon:
"In the end, we ended up with quite a lot more than I expected:
- Support for ARMv8.3 Pointer Authentication in userspace (CRIU and
kernel-side support to come later)
- Support for per-thread stack canaries, pending an update to GCC
that is currently undergoing review
- Support for kexec_file_load(), which permits secure boot of a kexec
payload but also happens to improve the performance of kexec
dramatically because we can avoid the sucky purgatory code from
userspace. Kdump will come later (requires updates to libfdt).
- Optimisation of our dynamic CPU feature framework, so that all
detected features are enabled via a single stop_machine()
invocation
- KPTI whitelisting of Cortex-A CPUs unaffected by Meltdown, so that
they can benefit from global TLB entries when KASLR is not in use
- 52-bit virtual addressing for userspace (kernel remains 48-bit)
- Patch in LSE atomics for per-cpu atomic operations
- Custom preempt.h implementation to avoid unconditional calls to
preempt_schedule() from preempt_enable()
- Support for the new 'SB' Speculation Barrier instruction
- Vectorised implementation of XOR checksumming and CRC32
optimisations
- Workaround for Cortex-A76 erratum #1165522
- Improved compatibility with Clang/LLD
- Support for TX2 system PMUS for profiling the L3 cache and DMC
- Reflect read-only permissions in the linear map by default
- Ensure MMIO reads are ordered with subsequent calls to Xdelay()
- Initial support for memory hotplug
- Tweak the threshold when we invalidate the TLB by-ASID, so that
mremap() performance is improved for ranges spanning multiple PMDs.
- Minor refactoring and cleanups"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (125 commits)
arm64: kaslr: print PHYS_OFFSET in dump_kernel_offset()
arm64: sysreg: Use _BITUL() when defining register bits
arm64: cpufeature: Rework ptr auth hwcaps using multi_entry_cap_matches
arm64: cpufeature: Reduce number of pointer auth CPU caps from 6 to 4
arm64: docs: document pointer authentication
arm64: ptr auth: Move per-thread keys from thread_info to thread_struct
arm64: enable pointer authentication
arm64: add prctl control for resetting ptrauth keys
arm64: perf: strip PAC when unwinding userspace
arm64: expose user PAC bit positions via ptrace
arm64: add basic pointer authentication support
arm64/cpufeature: detect pointer authentication
arm64: Don't trap host pointer auth use to EL2
arm64/kvm: hide ptrauth from guests
arm64/kvm: consistently handle host HCR_EL2 flags
arm64: add pointer authentication register bits
arm64: add comments about EC exception levels
arm64: perf: Treat EXCLUDE_EL* bit definitions as unsigned
arm64: kpti: Whitelist Cortex-A CPUs that don't implement the CSV3 field
arm64: enable per-task stack canaries
...
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/cache.S | 3 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 33 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 12 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 35 | ||||
-rw-r--r-- | arch/arm64/mm/numa.c | 10 | ||||
-rw-r--r-- | arch/arm64/mm/pageattr.c | 21 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 14 |
8 files changed, 114 insertions, 16 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 0c22ede52f90..a194fd0e837f 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area) * - size - size in question */ ENTRY(__clean_dcache_area_pop) + alternative_if_not ARM64_HAS_DCPOP + b __clean_dcache_area_poc + alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_pop) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 7d9571f4ae3d..5fe6d2e40e9b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -160,7 +160,7 @@ void show_pte(unsigned long addr) pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, - VA_BITS, mm->pgd); + mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd); pgdp = pgd_offset(mm, addr); pgd = READ_ONCE(*pgdp); pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index f58ea503ad01..28cbc22d7e30 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -429,6 +429,27 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); } +static void __init add_huge_page_size(unsigned long size) +{ + if (size_to_hstate(size)) + return; + + hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); +} + +static int __init hugetlbpage_init(void) +{ +#ifdef CONFIG_ARM64_4K_PAGES + add_huge_page_size(PUD_SIZE); +#endif + add_huge_page_size(PMD_SIZE * CONT_PMDS); + add_huge_page_size(PMD_SIZE); + add_huge_page_size(PAGE_SIZE * CONT_PTES); + + return 0; +} +arch_initcall(hugetlbpage_init); + static __init int setup_hugepagesz(char *opt) { unsigned long ps = memparse(opt, &opt); @@ -440,7 +461,7 @@ static __init int setup_hugepagesz(char *opt) case PMD_SIZE * CONT_PMDS: case PMD_SIZE: case PAGE_SIZE * CONT_PTES: - hugetlb_add_hstate(ilog2(ps) - PAGE_SHIFT); + add_huge_page_size(ps); return 1; } @@ -449,13 +470,3 @@ static __init int setup_hugepagesz(char *opt) return 0; } __setup("hugepagesz=", setup_hugepagesz); - -#ifdef CONFIG_ARM64_64K_PAGES -static __init int add_default_hugepagesz(void) -{ - if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) - hugetlb_add_hstate(CONT_PTE_SHIFT); - return 0; -} -arch_initcall(add_default_hugepagesz); -#endif diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 0340e45655c6..cbba537ba3d2 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -59,6 +59,8 @@ * that cannot be mistaken for a real physical address. */ s64 memstart_addr __ro_after_init = -1; +EXPORT_SYMBOL(memstart_addr); + phys_addr_t arm64_dma_phys_limit __ro_after_init; #ifdef CONFIG_BLK_DEV_INITRD @@ -289,6 +291,14 @@ int pfn_valid(unsigned long pfn) if ((addr >> PAGE_SHIFT) != pfn) return 0; + +#ifdef CONFIG_SPARSEMEM + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + + if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) + return 0; +#endif return memblock_is_map_memory(addr); } EXPORT_SYMBOL(pfn_valid); @@ -607,7 +617,7 @@ void __init mem_init(void) * detected at build time already. */ #ifdef CONFIG_COMPAT - BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); + BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); #endif if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d1d6601b385d..da513a1facf4 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -52,6 +52,8 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; +u64 vabits_user __ro_after_init; +EXPORT_SYMBOL(vabits_user); u64 kimage_voffset __ro_after_init; EXPORT_SYMBOL(kimage_voffset); @@ -451,7 +453,7 @@ static void __init map_mem(pgd_t *pgdp) struct memblock_region *reg; int flags = 0; - if (debug_pagealloc_enabled()) + if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -552,7 +554,19 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, static int __init parse_rodata(char *arg) { - return strtobool(arg, &rodata_enabled); + int ret = strtobool(arg, &rodata_enabled); + if (!ret) { + rodata_full = false; + return 0; + } + + /* permit 'full' in addition to boolean options */ + if (strcmp(arg, "full")) + return -EINVAL; + + rodata_enabled = true; + rodata_full = true; + return 0; } early_param("rodata", parse_rodata); @@ -1032,3 +1046,20 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) pmd_free(NULL, table); return 1; } + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, + bool want_memblock) +{ + int flags = 0; + + if (rodata_full || debug_pagealloc_enabled()) + flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), + size, PAGE_KERNEL, pgd_pgtable_alloc, flags); + + return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, + altmap, want_memblock); +} +#endif diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 27a31efd9e8e..ae34e3a1cef1 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -466,3 +466,13 @@ void __init arm64_numa_init(void) numa_init(dummy_numa_init); } + +/* + * We hope that we will be hotplugging memory on nodes we already know about, + * such that acpi_get_node() succeeds and we never fall back to this... + */ +int memory_add_physaddr_to_nid(u64 addr) +{ + pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr); + return 0; +} diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index a56359373d8b..6cd645edcf35 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -25,6 +25,8 @@ struct page_change_data { pgprot_t clear_mask; }; +bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); + static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { @@ -64,6 +66,7 @@ static int change_memory_common(unsigned long addr, int numpages, unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; struct vm_struct *area; + int i; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; @@ -93,6 +96,24 @@ static int change_memory_common(unsigned long addr, int numpages, if (!numpages) return 0; + /* + * If we are manipulating read-only permissions, apply the same + * change to the linear mapping of the pages that back this VM area. + */ + if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || + pgprot_val(clear_mask) == PTE_RDONLY)) { + for (i = 0; i < area->nr_pages; i++) { + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + } + } + + /* + * Get rid of potentially aliasing lazily unmapped vm areas that may + * have permissions set that deviate from the ones we are setting here. + */ + vm_unmap_aliases(); + return __change_memory_common(start, size, set_mask, clear_mask); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 2c75b0b903ae..e05b3ce1db6b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -182,6 +182,7 @@ ENDPROC(cpu_do_switch_mm) .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, empty_zero_page phys_to_ttbr \tmp2, \tmp1 + offset_ttbr1 \tmp2 msr ttbr1_el1, \tmp2 isb tlbi vmalle1 @@ -200,6 +201,7 @@ ENTRY(idmap_cpu_replace_ttbr1) __idmap_cpu_set_reserved_ttbr1 x1, x3 + offset_ttbr1 x0 msr ttbr1_el1, x0 isb @@ -254,6 +256,7 @@ ENTRY(idmap_kpti_install_ng_mappings) pte .req x16 mrs swapper_ttb, ttbr1_el1 + restore_ttbr1 swapper_ttb adr flag_ptr, __idmap_kpti_flag cbnz cpu, __idmap_kpti_secondary @@ -373,6 +376,7 @@ __idmap_kpti_secondary: cbnz w18, 1b /* All done, act like nothing happened */ + offset_ttbr1 swapper_ttb msr ttbr1_el1, swapper_ttb isb ret @@ -446,7 +450,15 @@ ENTRY(__cpu_setup) ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 - tcr_set_idmap_t0sz x10, x9 + +#ifdef CONFIG_ARM64_USER_VA_BITS_52 + ldr_l x9, vabits_user + sub x9, xzr, x9 + add x9, x9, #64 +#else + ldr_l x9, idmap_t0sz +#endif + tcr_set_t0sz x10, x9 /* * Set the IPS bits in TCR_EL1. |