diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/Kconfig | 15 | ||||
-rw-r--r-- | arch/arm64/boot/dts/broadcom/ns2.dtsi | 11 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/extable.h | 25 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable-types.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 83 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd32.h | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/arm64ksyms.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/cpuidle.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/kaslr.c | 10 | ||||
-rw-r--r-- | arch/arm64/kernel/probes/kprobes.c | 6 | ||||
-rw-r--r-- | arch/arm64/kvm/handle_exit.c | 19 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/tlb.c | 64 | ||||
-rw-r--r-- | arch/arm64/lib/copy_in_user.S | 4 | ||||
-rw-r--r-- | arch/arm64/mm/kasan_init.c | 2 |
17 files changed, 151 insertions, 111 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a39029b5414e..3c833ff3303c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -115,6 +115,7 @@ config ARM64 select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select ARCH_HAS_RAW_COPY_USER help ARM 64-bit (AArch64) Linux support. @@ -508,6 +509,16 @@ config QCOM_FALKOR_ERRATUM_1009 If unsure, say Y. +config QCOM_QDF2400_ERRATUM_0065 + bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size" + default y + help + On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports + ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have + been indicated as 16Bytes (0xf), not 8Bytes (0x7). + + If unsure, say Y. + endmenu @@ -1063,6 +1074,10 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC +config KEYS_COMPAT + def_bool y + depends on COMPAT && KEYS + endmenu menu "Power management options" diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi index 9f9e203c09c5..bcb03fc32665 100644 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi @@ -114,6 +114,7 @@ pcie0: pcie@20020000 { compatible = "brcm,iproc-pcie"; reg = <0 0x20020000 0 0x1000>; + dma-coherent; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; @@ -144,6 +145,7 @@ pcie4: pcie@50020000 { compatible = "brcm,iproc-pcie"; reg = <0 0x50020000 0 0x1000>; + dma-coherent; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; @@ -174,6 +176,7 @@ pcie8: pcie@60c00000 { compatible = "brcm,iproc-pcie-paxc"; reg = <0 0x60c00000 0 0x1000>; + dma-coherent; linux,pci-domain = <8>; bus-range = <0x0 0x1>; @@ -203,6 +206,7 @@ <0x61030000 0x100>; reg-names = "amac_base", "idm_base", "nicpm_base"; interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; phy-handle = <&gphy0>; phy-mode = "rgmii"; status = "disabled"; @@ -213,6 +217,7 @@ reg = <0x612c0000 0x445>; /* PDC FS0 regs */ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -222,6 +227,7 @@ reg = <0x612e0000 0x445>; /* PDC FS1 regs */ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -231,6 +237,7 @@ reg = <0x61300000 0x445>; /* PDC FS2 regs */ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -240,6 +247,7 @@ reg = <0x61320000 0x445>; /* PDC FS3 regs */ interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -644,6 +652,7 @@ sata: ahci@663f2000 { compatible = "brcm,iproc-ahci", "generic-ahci"; reg = <0x663f2000 0x1000>; + dma-coherent; reg-names = "ahci"; interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; @@ -667,6 +676,7 @@ compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66420000 0x100>; interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; bus-width = <8>; clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; status = "disabled"; @@ -676,6 +686,7 @@ compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66430000 0x100>; interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; bus-width = <8>; clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; status = "disabled"; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 05310ad8c5ab..f31c48d0cd68 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void) static inline bool system_uses_ttbr0_pan(void) { return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && - !cpus_have_cap(ARM64_HAS_PAN); + !cpus_have_const_cap(ARM64_HAS_PAN); } #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h new file mode 100644 index 000000000000..42f50f15a44c --- /dev/null +++ b/arch/arm64/include/asm/extable.h @@ -0,0 +1,25 @@ +#ifndef __ASM_EXTABLE_H +#define __ASM_EXTABLE_H + +/* + * The exception table consists of pairs of relative offsets: the first + * is the relative offset to an instruction that is allowed to fault, + * and the second is the relative offset at which the program should + * continue. No registers are modified, so it is entirely up to the + * continuation code to figure out what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + int insn, fixup; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +extern int fixup_exception(struct pt_regs *regs); +#endif diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f21fd3894370..e7705e7bb07b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,8 +30,7 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#define KVM_USER_MEM_SLOTS 32 -#define KVM_PRIVATE_MEM_SLOTS 4 +#define KVM_USER_MEM_SLOTS 512 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HALT_POLL_NS_DEFAULT 500000 diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h index 69b2fd41503c..345a072b5856 100644 --- a/arch/arm64/include/asm/pgtable-types.h +++ b/arch/arm64/include/asm/pgtable-types.h @@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t; #define __pgprot(x) ((pgprot_t) { (x) } ) #if CONFIG_PGTABLE_LEVELS == 2 +#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> #elif CONFIG_PGTABLE_LEVELS == 3 +#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopud.h> +#elif CONFIG_PGTABLE_LEVELS == 4 +#include <asm-generic/5level-fixup.h> #endif #endif /* __ASM_PGTABLE_TYPES_H */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 5308d696311b..ba497172610d 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -28,38 +28,12 @@ #include <linux/bitops.h> #include <linux/kasan-checks.h> #include <linux/string.h> -#include <linux/thread_info.h> #include <asm/cpufeature.h> #include <asm/ptrace.h> -#include <asm/errno.h> #include <asm/memory.h> #include <asm/compiler.h> - -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - -/* - * The exception table consists of pairs of relative offsets: the first - * is the relative offset to an instruction that is allowed to fault, - * and the second is the relative offset at which the program should - * continue. No registers are modified, so it is entirely up to the - * continuation code to figure out what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - int insn, fixup; -}; - -#define ARCH_HAS_RELATIVE_EXTABLE - -extern int fixup_exception(struct pt_regs *regs); +#include <asm/extable.h> #define KERNEL_DS (-1UL) #define get_ds() (KERNEL_DS) @@ -357,58 +331,13 @@ do { \ }) extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); +#define raw_copy_from_user __arch_copy_from_user extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); +#define raw_copy_to_user __arch_copy_to_user +extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); - -static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) -{ - kasan_check_write(to, n); - check_object_size(to, n, false); - return __arch_copy_from_user(to, from, n); -} - -static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) -{ - kasan_check_read(from, n); - check_object_size(from, n, true); - return __arch_copy_to_user(to, from, n); -} - -static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned long res = n; - kasan_check_write(to, n); - check_object_size(to, n, false); - - if (access_ok(VERIFY_READ, from, n)) { - res = __arch_copy_from_user(to, from, n); - } - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; -} - -static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) -{ - kasan_check_read(from, n); - check_object_size(from, n, true); - - if (access_ok(VERIFY_WRITE, to, n)) { - n = __arch_copy_to_user(to, from, n); - } - return n; -} - -static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)) - n = __copy_in_user(to, from, n); - return n; -} - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define INLINE_COPY_TO_USER +#define INLINE_COPY_FROM_USER static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index e78ac26324bd..bdbeb06dc11e 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 394 +#define __NR_compat_syscalls 398 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index b7e8ef16ff0d..c66b51aab195 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range) __SYSCALL(__NR_preadv2, compat_sys_preadv2) #define __NR_pwritev2 393 __SYSCALL(__NR_pwritev2, compat_sys_pwritev2) +#define __NR_pkey_mprotect 394 +__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect) +#define __NR_pkey_alloc 395 +__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) +#define __NR_pkey_free 396 +__SYSCALL(__NR_pkey_free, sys_pkey_free) +#define __NR_statx 397 +__SYSCALL(__NR_statx, sys_statx) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index e9c4dc9e0ada..67368c7329c0 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -38,7 +38,7 @@ EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(__arch_copy_from_user); EXPORT_SYMBOL(__arch_copy_to_user); EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__copy_in_user); +EXPORT_SYMBOL(raw_copy_in_user); /* physical memory */ EXPORT_SYMBOL(memstart_addr); diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 75a0f8acef66..fd691087dc9a 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu) } /** - * cpu_suspend() - function to enter a low-power idle state + * arm_cpuidle_suspend() - function to enter a low-power idle state * @arg: argument to pass to CPU suspend operations * * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 769f24ef628c..d7e90d97f5c4 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) /* * The kernel Image should not extend across a 1GB/32MB/512MB alignment * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this - * happens, increase the KASLR offset by the size of the kernel image. + * happens, increase the KASLR offset by the size of the kernel image + * rounded up by SWAPPER_BLOCK_SIZE. */ if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != - (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) - offset = (offset + (u64)(_end - _text)) & mask; + (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) { + u64 kimg_sz = _end - _text; + offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE)) + & mask; + } if (IS_ENABLED(CONFIG_KASAN)) /* diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a07aae5b8a2..c5c45942fb6e 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) return 0; } -int __kprobes kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return NOTIFY_DONE; -} - static void __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p, *cur_kprobe; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 1bfe30dfbfe7..fa1b18e364fc 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) return ret; } +static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + u32 hsr = kvm_vcpu_get_hsr(vcpu); + + kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n", + hsr, esr_get_class_string(hsr)); + + kvm_inject_undefined(vcpu); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { + [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, [ESR_ELx_EC_WFx] = kvm_handle_wfx, [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, @@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) u32 hsr = kvm_vcpu_get_hsr(vcpu); u8 hsr_ec = ESR_ELx_EC(hsr); - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || - !arm_exit_handlers[hsr_ec]) { - kvm_err("Unknown exception class: hsr: %#08x -- %s\n", - hsr, esr_get_class_string(hsr)); - BUG(); - } - return arm_exit_handlers[hsr_ec]; } diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index e8e7ba2bc11f..9e1d2b75eecd 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -18,14 +18,62 @@ #include <asm/kvm_hyp.h> #include <asm/tlbflush.h> +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) +{ + u64 val; + + /* + * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and + * most TLB operations target EL2/EL0. In order to affect the + * guest TLBs (EL1/EL0), we need to change one of these two + * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so + * let's flip TGE before executing the TLB operation. + */ + write_sysreg(kvm->arch.vttbr, vttbr_el2); + val = read_sysreg(hcr_el2); + val &= ~HCR_TGE; + write_sysreg(val, hcr_el2); + isb(); +} + +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) +{ + write_sysreg(kvm->arch.vttbr, vttbr_el2); + isb(); +} + +static hyp_alternate_select(__tlb_switch_to_guest, + __tlb_switch_to_guest_nvhe, + __tlb_switch_to_guest_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) +{ + /* + * We're done with the TLB operation, let's restore the host's + * view of HCR_EL2. + */ + write_sysreg(0, vttbr_el2); + write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); +} + +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) +{ + write_sysreg(0, vttbr_el2); +} + +static hyp_alternate_select(__tlb_switch_to_host, + __tlb_switch_to_host_nvhe, + __tlb_switch_to_host_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); /* * We could do so much better if we had the VA as well. @@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) dsb(ish); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) @@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); __tlbi(vmalls12e1is); dsb(ish); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) @@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); /* Switch to requested VMID */ - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); __tlbi(vmalle1); dsb(nsh); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_flush_vm_context(void) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 47184c3a97da..b24a830419ad 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -64,14 +64,14 @@ .endm end .req x5 -ENTRY(__copy_in_user) +ENTRY(raw_copy_in_user) uaccess_enable_not_uao x3, x4 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3 mov x0, #0 ret -ENDPROC(__copy_in_user) +ENDPROC(raw_copy_in_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 55d1e9205543..687a358a3733 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -162,7 +162,7 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); vmemmap_populate(kimg_shadow_start, kimg_shadow_end, - pfn_to_nid(virt_to_pfn(_text))); + pfn_to_nid(virt_to_pfn(lm_alias(_text)))); /* * vmemmap_populate() has populated the shadow region that covers the |