diff options
Diffstat (limited to 'arch/arm/kernel')
29 files changed, 1325 insertions, 445 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 902397dd1000..752725dcbf42 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -16,7 +16,7 @@ CFLAGS_REMOVE_return_address.o = -pg # Object file lists. obj-y := elf.o entry-common.o irq.o opcodes.o \ - process.o ptrace.o return_address.o \ + process.o ptrace.o reboot.o return_address.o \ setup.o signal.o sigreturn_codes.o \ stacktrace.o sys_arm.o time.o traps.o @@ -34,7 +34,6 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o obj-$(CONFIG_MODULES) += armksyms.o module.o -obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o @@ -75,6 +74,7 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o CFLAGS_pj4-cp0.o := -marm AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o +obj-$(CONFIG_VDSO) += vdso.o ifneq ($(CONFIG_ARCH_EBSA110),y) obj-y += io.o @@ -86,7 +86,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o ifeq ($(CONFIG_ARM_PSCI),y) -obj-y += psci.o +obj-y += psci.o psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif diff --git a/arch/arm/kernel/arthur.c b/arch/arm/kernel/arthur.c deleted file mode 100644 index 321c5291d05f..000000000000 --- a/arch/arm/kernel/arthur.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * linux/arch/arm/kernel/arthur.c - * - * Copyright (C) 1998, 1999, 2000, 2001 Philip Blundell - * - * Arthur personality - */ - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/personality.h> -#include <linux/stddef.h> -#include <linux/signal.h> -#include <linux/init.h> -#include <linux/sched.h> - -#include <asm/ptrace.h> - -/* Arthur doesn't have many signals, and a lot of those that it does - have don't map easily to any Linux equivalent. Never mind. */ - -#define ARTHUR_SIGABRT 1 -#define ARTHUR_SIGFPE 2 -#define ARTHUR_SIGILL 3 -#define ARTHUR_SIGINT 4 -#define ARTHUR_SIGSEGV 5 -#define ARTHUR_SIGTERM 6 -#define ARTHUR_SIGSTAK 7 -#define ARTHUR_SIGUSR1 8 -#define ARTHUR_SIGUSR2 9 -#define ARTHUR_SIGOSERROR 10 - -static unsigned long arthur_to_linux_signals[32] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31 -}; - -static unsigned long linux_to_arthur_signals[32] = { - 0, -1, ARTHUR_SIGINT, -1, - ARTHUR_SIGILL, 5, ARTHUR_SIGABRT, 7, - ARTHUR_SIGFPE, 9, ARTHUR_SIGUSR1, ARTHUR_SIGSEGV, - ARTHUR_SIGUSR2, 13, 14, ARTHUR_SIGTERM, - 16, 17, 18, 19, - 20, 21, 22, 23, - 24, 25, 26, 27, - 28, 29, 30, 31 -}; - -static void arthur_lcall7(int nr, struct pt_regs *regs) -{ - struct siginfo info; - info.si_signo = SIGSWI; - info.si_errno = nr; - /* Bounce it to the emulator */ - send_sig_info(SIGSWI, &info, current); -} - -static struct exec_domain arthur_exec_domain = { - .name = "Arthur", - .handler = arthur_lcall7, - .pers_low = PER_RISCOS, - .pers_high = PER_RISCOS, - .signal_map = arthur_to_linux_signals, - .signal_invmap = linux_to_arthur_signals, - .module = THIS_MODULE, -}; - -/* - * We could do with some locking to stop Arthur being removed while - * processes are using it. - */ - -static int __init arthur_init(void) -{ - return register_exec_domain(&arthur_exec_domain); -} - -static void __exit arthur_exit(void) -{ - unregister_exec_domain(&arthur_exec_domain); -} - -module_init(arthur_init); -module_exit(arthur_exit); - -MODULE_LICENSE("GPL"); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 2d2d6087b9b1..871b8267d211 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -25,6 +25,7 @@ #include <asm/memory.h> #include <asm/procinfo.h> #include <asm/suspend.h> +#include <asm/vdso_datapage.h> #include <asm/hardware/cache-l2x0.h> #include <linux/kbuild.h> @@ -66,7 +67,6 @@ int main(void) DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); @@ -190,7 +190,6 @@ int main(void) DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); -#ifdef CONFIG_KVM_ARM_VGIC DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); @@ -200,15 +199,16 @@ int main(void) DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); -#ifdef CONFIG_KVM_ARM_TIMER DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); -#endif DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); -#endif DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); #endif + BLANK(); +#ifdef CONFIG_VDSO + DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); +#endif return 0; } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index ab19b7c03423..fcbbbb1b9e95 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -618,21 +618,15 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { - struct pci_sys_data *root = dev->sysdata; - unsigned long phys; - - if (mmap_state == pci_mmap_io) { + if (mmap_state == pci_mmap_io) return -EINVAL; - } else { - phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT); - } /* * Mark this as IO */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (remap_pfn_range(vma, vma->vm_start, phys, + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c index 89545f6c8403..318da33465f4 100644 --- a/arch/arm/kernel/cpuidle.c +++ b/arch/arm/kernel/cpuidle.c @@ -10,8 +10,28 @@ */ #include <linux/cpuidle.h> -#include <asm/proc-fns.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <asm/cpuidle.h> +extern struct of_cpuidle_method __cpuidle_method_of_table[]; + +static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel + __used __section(__cpuidle_method_of_table_end); + +static struct cpuidle_ops cpuidle_ops[NR_CPUS]; + +/** + * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle() + * @dev: not used + * @drv: not used + * @index: not used + * + * A trivial wrapper to allow the cpu_do_idle function to be assigned as a + * cpuidle callback by matching the function signature. + * + * Returns the index passed as parameter + */ int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { @@ -19,3 +39,114 @@ int arm_cpuidle_simple_enter(struct cpuidle_device *dev, return index; } + +/** + * arm_cpuidle_suspend() - function to enter low power idle states + * @index: an integer used as an identifier for the low level PM callbacks + * + * This function calls the underlying arch specific low level PM code as + * registered at the init time. + * + * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the + * callback otherwise. + */ +int arm_cpuidle_suspend(int index) +{ + int ret = -EOPNOTSUPP; + int cpu = smp_processor_id(); + + if (cpuidle_ops[cpu].suspend) + ret = cpuidle_ops[cpu].suspend(cpu, index); + + return ret; +} + +/** + * arm_cpuidle_get_ops() - find a registered cpuidle_ops by name + * @method: the method name + * + * Search in the __cpuidle_method_of_table array the cpuidle ops matching the + * method name. + * + * Returns a struct cpuidle_ops pointer, NULL if not found. + */ +static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method) +{ + struct of_cpuidle_method *m = __cpuidle_method_of_table; + + for (; m->method; m++) + if (!strcmp(m->method, method)) + return m->ops; + + return NULL; +} + +/** + * arm_cpuidle_read_ops() - Initialize the cpuidle ops with the device tree + * @dn: a pointer to a struct device node corresponding to a cpu node + * @cpu: the cpu identifier + * + * Get the method name defined in the 'enable-method' property, retrieve the + * associated cpuidle_ops and do a struct copy. This copy is needed because all + * cpuidle_ops are tagged __initdata and will be unloaded after the init + * process. + * + * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if + * no cpuidle_ops is registered for the 'enable-method'. + */ +static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) +{ + const char *enable_method; + struct cpuidle_ops *ops; + + enable_method = of_get_property(dn, "enable-method", NULL); + if (!enable_method) + return -ENOENT; + + ops = arm_cpuidle_get_ops(enable_method); + if (!ops) { + pr_warn("%s: unsupported enable-method property: %s\n", + dn->full_name, enable_method); + return -EOPNOTSUPP; + } + + cpuidle_ops[cpu] = *ops; /* structure copy */ + + pr_notice("cpuidle: enable-method property '%s'" + " found operations\n", enable_method); + + return 0; +} + +/** + * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu + * @cpu: the cpu to be initialized + * + * Initialize the cpuidle ops with the device for the cpu and then call + * the cpu's idle initialization callback. This may fail if the underlying HW + * is not operational. + * + * Returns: + * 0 on success, + * -ENODEV if it fails to find the cpu node in the device tree, + * -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu, + * -ENOENT if it fails to find an 'enable-method' property, + * -ENXIO if the HW reports a failure or a misconfiguration, + * -ENOMEM if the HW report an memory allocation failure + */ +int __init arm_cpuidle_init(int cpu) +{ + struct device_node *cpu_node = of_cpu_device_node_get(cpu); + int ret; + + if (!cpu_node) + return -ENODEV; + + ret = arm_cpuidle_read_ops(cpu_node, cpu); + if (!ret && cpuidle_ops[cpu].init) + ret = cpuidle_ops[cpu].init(cpu_node, cpu); + + of_node_put(cpu_node); + + return ret; +} diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 672b21942fff..570306c49406 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -545,7 +545,7 @@ ENDPROC(__und_usr) /* * The out of line fixup for the ldrt instructions above. */ - .pushsection .fixup, "ax" + .pushsection .text.fixup, "ax" .align 2 4: str r4, [sp, #S_PC] @ retry current instruction ret r9 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 01963273c07a..3637973a9708 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -138,9 +138,9 @@ ENTRY(stext) @ mmu has been enabled adr lr, BSYM(1f) @ return (PIC) address mov r8, r4 @ set TTBR1 to swapper_pg_dir - ARM( add pc, r10, #PROCINFO_INITFUNC ) - THUMB( add r12, r10, #PROCINFO_INITFUNC ) - THUMB( ret r12 ) + ldr r12, [r10, #PROCINFO_INITFUNC] + add r12, r12, r10 + ret r12 1: b __enable_mmu ENDPROC(stext) .ltorg @@ -386,10 +386,10 @@ ENTRY(secondary_startup) ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir adr lr, BSYM(__enable_mmu) @ return address mov r13, r12 @ __secondary_switched address - ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor - @ (return control reg) - THUMB( add r12, r10, #PROCINFO_INITFUNC ) - THUMB( ret r12 ) + ldr r12, [r10, #PROCINFO_INITFUNC] + add r12, r12, r10 @ initialise processor + @ (return control reg) + ret r12 ENDPROC(secondary_startup) ENDPROC(secondary_startup_arm) diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c index c4cc50e58c13..a71501ff6f18 100644 --- a/arch/arm/kernel/hibernate.c +++ b/arch/arm/kernel/hibernate.c @@ -22,6 +22,7 @@ #include <asm/suspend.h> #include <asm/memory.h> #include <asm/sections.h> +#include "reboot.h" int pfn_is_nosave(unsigned long pfn) { @@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused) ret = swsusp_save(); if (ret == 0) - soft_restart(virt_to_phys(cpu_resume)); + _soft_restart(virt_to_phys(cpu_resume), false); return ret; } @@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused) for (pbe = restore_pblist; pbe; pbe = pbe->next) copy_page(pbe->orig_address, pbe->address); - soft_restart(virt_to_phys(cpu_resume)); + _soft_restart(virt_to_phys(cpu_resume), false); } static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; @@ -99,7 +100,6 @@ static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; */ int swsusp_arch_resume(void) { - extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); call_with_stack(arch_restore_image, 0, resume_stack + ARRAY_SIZE(resume_stack)); return 0; diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 7fc70ae21185..dc7d0a95bd36 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * Per-cpu breakpoints are not supported by our stepping * mechanism. */ - if (!bp->hw.bp_target) + if (!bp->hw.target) return -EINVAL; /* diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index de2b085ad753..8bf3b7c09888 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -46,7 +46,8 @@ int machine_kexec_prepare(struct kimage *image) * and implements CPU hotplug for the current HW. If not, we won't be * able to kexec reliably, so fail the prepare operation. */ - if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) + if (num_possible_cpus() > 1 && platform_can_secondary_boot() && + !platform_can_cpu_hotplug()) return -EINVAL; /* diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2e11961f65ae..af791f4a6205 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -98,14 +98,19 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, case R_ARM_PC24: case R_ARM_CALL: case R_ARM_JUMP24: + if (sym->st_value & 3) { + pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (ARM -> Thumb)\n", + module->name, relindex, i, symname); + return -ENOEXEC; + } + offset = __mem_to_opcode_arm(*(u32 *)loc); offset = (offset & 0x00ffffff) << 2; if (offset & 0x02000000) offset -= 0x04000000; offset += sym->st_value - loc; - if (offset & 3 || - offset <= (s32)0xfe000000 || + if (offset <= (s32)0xfe000000 || offset >= (s32)0x02000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", module->name, relindex, i, symname, @@ -155,6 +160,22 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, #ifdef CONFIG_THUMB2_KERNEL case R_ARM_THM_CALL: case R_ARM_THM_JUMP24: + /* + * For function symbols, only Thumb addresses are + * allowed (no interworking). + * + * For non-function symbols, the destination + * has no specific ARM/Thumb disposition, so + * the branch is resolved under the assumption + * that interworking is not required. + */ + if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC && + !(sym->st_value & 1)) { + pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (Thumb -> ARM)\n", + module->name, relindex, i, symname); + return -ENOEXEC; + } + upper = __mem_to_opcode_thumb16(*(u16 *)loc); lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2)); @@ -182,18 +203,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset -= 0x02000000; offset += sym->st_value - loc; - /* - * For function symbols, only Thumb addresses are - * allowed (no interworking). - * - * For non-function symbols, the destination - * has no specific ARM/Thumb disposition, so - * the branch is resolved under the assumption - * that interworking is not required. - */ - if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC && - !(offset & 1)) || - offset <= (s32)0xff000000 || + if (offset <= (s32)0xff000000 || offset >= (s32)0x01000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", module->name, relindex, i, symname, diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 557e128e4df0..4a86a0133ac3 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -259,20 +259,29 @@ out: } static int -validate_event(struct pmu_hw_events *hw_events, - struct perf_event *event) +validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, + struct perf_event *event) { - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct arm_pmu *armpmu; if (is_software_event(event)) return 1; + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != pmu) + return 0; + if (event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; + armpmu = to_arm_pmu(event->pmu); return armpmu->get_event_idx(hw_events, event) >= 0; } @@ -288,15 +297,15 @@ validate_group(struct perf_event *event) */ memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); - if (!validate_event(&fake_pmu, leader)) + if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_pmu, sibling)) + if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } - if (!validate_event(&fake_pmu, event)) + if (!validate_event(event->pmu, &fake_pmu, event)) return -EINVAL; return 0; diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 61b53c46edfa..91c7ba182dcd 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -92,11 +92,16 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) free_percpu_irq(irq, &hw_events->percpu_pmu); } else { for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) + int cpu = i; + + if (cpu_pmu->irq_affinity) + cpu = cpu_pmu->irq_affinity[i]; + + if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); if (irq >= 0) - free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i)); + free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } } } @@ -128,32 +133,37 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { + int cpu = i; + err = 0; irq = platform_get_irq(pmu_device, i); if (irq < 0) continue; + if (cpu_pmu->irq_affinity) + cpu = cpu_pmu->irq_affinity[i]; + /* * If we have a single PMU interrupt that we can't shift, * assume that we're running on a uniprocessor machine and * continue. Otherwise, continue without this interrupt. */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); + irq, cpu); continue; } err = request_irq(irq, handler, IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", - per_cpu_ptr(&hw_events->percpu_pmu, i)); + per_cpu_ptr(&hw_events->percpu_pmu, cpu)); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); return err; } - cpumask_set_cpu(i, &cpu_pmu->active_irqs); + cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); } } @@ -243,6 +253,8 @@ static const struct of_device_id cpu_pmu_of_device_ids[] = { {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, + {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init}, + {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init}, {}, }; @@ -289,6 +301,48 @@ static int probe_current_pmu(struct arm_pmu *pmu) return ret; } +static int of_pmu_irq_cfg(struct platform_device *pdev) +{ + int i; + int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); + + if (!irqs) + return -ENOMEM; + + for (i = 0; i < pdev->num_resources; ++i) { + struct device_node *dn; + int cpu; + + dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", + i); + if (!dn) { + pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", + of_node_full_name(dn), i); + break; + } + + for_each_possible_cpu(cpu) + if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) + break; + + of_node_put(dn); + if (cpu >= nr_cpu_ids) { + pr_warn("Failed to find logical CPU for %s\n", + dn->name); + break; + } + + irqs[i] = cpu; + } + + if (i == pdev->num_resources) + cpu_pmu->irq_affinity = irqs; + else + kfree(irqs); + + return 0; +} + static int cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; @@ -313,7 +367,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { init_fn = of_id->data; - ret = init_fn(pmu); + + ret = of_pmu_irq_cfg(pdev); + if (!ret) + ret = init_fn(pmu); } else { ret = probe_current_pmu(pmu); } diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 8993770c47de..f4207a4dcb01 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -140,6 +140,23 @@ enum krait_perf_types { KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210, }; +/* ARMv7 Scorpion specific event types */ +enum scorpion_perf_types { + SCORPION_LPM0_GROUP0 = 0x4c, + SCORPION_LPM1_GROUP0 = 0x50, + SCORPION_LPM2_GROUP0 = 0x54, + SCORPION_L2LPM_GROUP0 = 0x58, + SCORPION_VLPM_GROUP0 = 0x5c, + + SCORPION_ICACHE_ACCESS = 0x10053, + SCORPION_ICACHE_MISS = 0x10052, + + SCORPION_DTLB_ACCESS = 0x12013, + SCORPION_DTLB_MISS = 0x12012, + + SCORPION_ITLB_MISS = 0x12021, +}; + /* * Cortex-A8 HW events mapping * @@ -482,6 +499,49 @@ static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }; /* + * Scorpion HW events mapping + */ +static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + /* + * The performance counters don't differentiate between read and write + * accesses/misses so this isn't strictly correct, but it's the best we + * can do. Writes and reads get combined. + */ + [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS, + /* + * Only ITLB misses and DTLB refills are supported. If users want the + * DTLB refills misses a raw counter must be used. + */ + [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, + [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, +}; + +/* * Perf Events' indices */ #define ARMV7_IDX_CYCLE_COUNTER 0 @@ -976,6 +1036,12 @@ static int krait_map_event_no_branch(struct perf_event *event) &krait_perf_cache_map, 0xFFFFF); } +static int scorpion_map_event(struct perf_event *event) +{ + return armpmu_map_event(event, &scorpion_perf_map, + &scorpion_perf_cache_map, 0xFFFFF); +} + static void armv7pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->handle_irq = armv7pmu_handle_irq; @@ -1103,6 +1169,12 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) #define PMRESRn_EN BIT(31) +#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */ +#define EVENT_GROUP(event) ((event) & 0xf) /* G */ +#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */ +#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */ +#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */ + static u32 krait_read_pmresrn(int n) { u32 val; @@ -1141,19 +1213,19 @@ static void krait_write_pmresrn(int n, u32 val) } } -static u32 krait_read_vpmresr0(void) +static u32 venum_read_pmresr(void) { u32 val; asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); return val; } -static void krait_write_vpmresr0(u32 val) +static void venum_write_pmresr(u32 val) { asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); } -static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val) +static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val) { u32 venum_new_val; u32 fp_new_val; @@ -1170,7 +1242,7 @@ static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val) fmxr(FPEXC, fp_new_val); } -static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val) +static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val) { BUG_ON(preemptible()); /* Restore FPEXC */ @@ -1193,16 +1265,11 @@ static void krait_evt_setup(int idx, u32 config_base) u32 val; u32 mask; u32 vval, fval; - unsigned int region; - unsigned int group; - unsigned int code; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + unsigned int code = EVENT_CODE(config_base); unsigned int group_shift; - bool venum_event; - - venum_event = !!(config_base & VENUM_EVENT); - region = (config_base >> 12) & 0xf; - code = (config_base >> 4) & 0xff; - group = (config_base >> 0) & 0xf; + bool venum_event = EVENT_VENUM(config_base); group_shift = group * 8; mask = 0xff << group_shift; @@ -1217,16 +1284,14 @@ static void krait_evt_setup(int idx, u32 config_base) val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); armv7_pmnc_write_evtsel(idx, val); - asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); - if (venum_event) { - krait_pre_vpmresr0(&vval, &fval); - val = krait_read_vpmresr0(); + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); val &= ~mask; val |= code << group_shift; val |= PMRESRn_EN; - krait_write_vpmresr0(val); - krait_post_vpmresr0(vval, fval); + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); } else { val = krait_read_pmresrn(region); val &= ~mask; @@ -1236,7 +1301,7 @@ static void krait_evt_setup(int idx, u32 config_base) } } -static u32 krait_clear_pmresrn_group(u32 val, int group) +static u32 clear_pmresrn_group(u32 val, int group) { u32 mask; int group_shift; @@ -1256,23 +1321,19 @@ static void krait_clearpmu(u32 config_base) { u32 val; u32 vval, fval; - unsigned int region; - unsigned int group; - bool venum_event; - - venum_event = !!(config_base & VENUM_EVENT); - region = (config_base >> 12) & 0xf; - group = (config_base >> 0) & 0xf; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + bool venum_event = EVENT_VENUM(config_base); if (venum_event) { - krait_pre_vpmresr0(&vval, &fval); - val = krait_read_vpmresr0(); - val = krait_clear_pmresrn_group(val, group); - krait_write_vpmresr0(val); - krait_post_vpmresr0(vval, fval); + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val = clear_pmresrn_group(val, group); + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); } else { val = krait_read_pmresrn(region); - val = krait_clear_pmresrn_group(val, group); + val = clear_pmresrn_group(val, group); krait_write_pmresrn(region, val); } } @@ -1342,6 +1403,8 @@ static void krait_pmu_enable_event(struct perf_event *event) static void krait_pmu_reset(void *info) { u32 vval, fval; + struct arm_pmu *cpu_pmu = info; + u32 idx, nb_cnt = cpu_pmu->num_events; armv7pmu_reset(info); @@ -1350,9 +1413,16 @@ static void krait_pmu_reset(void *info) krait_write_pmresrn(1, 0); krait_write_pmresrn(2, 0); - krait_pre_vpmresr0(&vval, &fval); - krait_write_vpmresr0(0); - krait_post_vpmresr0(vval, fval); + venum_pre_pmresr(&vval, &fval); + venum_write_pmresr(0); + venum_post_pmresr(vval, fval); + + /* Reset PMxEVNCTCR to sane default */ + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv7_pmnc_select_counter(idx); + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + } + } static int krait_event_to_bit(struct perf_event *event, unsigned int region, @@ -1386,26 +1456,18 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, { int idx; int bit = -1; - unsigned int prefix; - unsigned int region; - unsigned int code; - unsigned int group; - bool krait_event; struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int code = EVENT_CODE(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool krait_event = EVENT_CPU(hwc->config_base); - region = (hwc->config_base >> 12) & 0xf; - code = (hwc->config_base >> 4) & 0xff; - group = (hwc->config_base >> 0) & 0xf; - krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK); - - if (krait_event) { + if (venum_event || krait_event) { /* Ignore invalid events */ if (group > 3 || region > 2) return -EINVAL; - prefix = hwc->config_base & KRAIT_EVENT_MASK; - if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT) - return -EINVAL; - if (prefix == VENUM_EVENT && (code & 0xe0)) + if (venum_event && (code & 0xe0)) return -EINVAL; bit = krait_event_to_bit(event, region, group); @@ -1425,15 +1487,12 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, { int bit; struct hw_perf_event *hwc = &event->hw; - unsigned int region; - unsigned int group; - bool krait_event; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool krait_event = EVENT_CPU(hwc->config_base); - region = (hwc->config_base >> 12) & 0xf; - group = (hwc->config_base >> 0) & 0xf; - krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK); - - if (krait_event) { + if (venum_event || krait_event) { bit = krait_event_to_bit(event, region, group); clear_bit(bit, cpuc->used_mask); } @@ -1458,6 +1517,344 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; return 0; } + +/* + * Scorpion Local Performance Monitor Register (LPMn) + * + * 31 30 24 16 8 0 + * +--------------------------------+ + * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0 + * +--------------------------------+ + * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1 + * +--------------------------------+ + * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2 + * +--------------------------------+ + * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3 + * +--------------------------------+ + * VLPM | EN | CC | CC | CC | CC | N = 2, R = ? + * +--------------------------------+ + * EN | G=3 | G=2 | G=1 | G=0 + * + * + * Event Encoding: + * + * hwc->config_base = 0xNRCCG + * + * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM) + * R = region register + * CC = class of events the group G is choosing from + * G = group or particular event + * + * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2 + * + * A region (R) corresponds to a piece of the CPU (execution unit, instruction + * unit, etc.) while the event code (CC) corresponds to a particular class of + * events (interrupts for example). An event code is broken down into + * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for + * example). + */ + +static u32 scorpion_read_pmresrn(int n) +{ + u32 val; + + switch (n) { + case 0: + asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val)); + break; + case 1: + asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); + break; + case 2: + asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val)); + break; + case 3: + asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val)); + break; + default: + BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ + } + + return val; +} + +static void scorpion_write_pmresrn(int n, u32 val) +{ + switch (n) { + case 0: + asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val)); + break; + case 1: + asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); + break; + case 2: + asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val)); + break; + case 3: + asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val)); + break; + default: + BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ + } +} + +static u32 scorpion_get_pmresrn_event(unsigned int region) +{ + static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0, + SCORPION_LPM1_GROUP0, + SCORPION_LPM2_GROUP0, + SCORPION_L2LPM_GROUP0 }; + return pmresrn_table[region]; +} + +static void scorpion_evt_setup(int idx, u32 config_base) +{ + u32 val; + u32 mask; + u32 vval, fval; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + unsigned int code = EVENT_CODE(config_base); + unsigned int group_shift; + bool venum_event = EVENT_VENUM(config_base); + + group_shift = group * 8; + mask = 0xff << group_shift; + + /* Configure evtsel for the region and group */ + if (venum_event) + val = SCORPION_VLPM_GROUP0; + else + val = scorpion_get_pmresrn_event(region); + val += group; + /* Mix in mode-exclusion bits */ + val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); + armv7_pmnc_write_evtsel(idx, val); + + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + + if (venum_event) { + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val &= ~mask; + val |= code << group_shift; + val |= PMRESRn_EN; + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); + } else { + val = scorpion_read_pmresrn(region); + val &= ~mask; + val |= code << group_shift; + val |= PMRESRn_EN; + scorpion_write_pmresrn(region, val); + } +} + +static void scorpion_clearpmu(u32 config_base) +{ + u32 val; + u32 vval, fval; + unsigned int region = EVENT_REGION(config_base); + unsigned int group = EVENT_GROUP(config_base); + bool venum_event = EVENT_VENUM(config_base); + + if (venum_event) { + venum_pre_pmresr(&vval, &fval); + val = venum_read_pmresr(); + val = clear_pmresrn_group(val, group); + venum_write_pmresr(val); + venum_post_pmresr(vval, fval); + } else { + val = scorpion_read_pmresrn(region); + val = clear_pmresrn_group(val, group); + scorpion_write_pmresrn(region, val); + } +} + +static void scorpion_pmu_disable_event(struct perf_event *event) +{ + unsigned long flags; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + + /* Disable counter and interrupt */ + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Clear pmresr code (if destined for PMNx counters) + */ + if (hwc->config_base & KRAIT_EVENT_MASK) + scorpion_clearpmu(hwc->config_base); + + /* Disable interrupt for this counter */ + armv7_pmnc_disable_intens(idx); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void scorpion_pmu_enable_event(struct perf_event *event) +{ + unsigned long flags; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We don't set the event for the cycle counter because we + * don't have the ability to perform event filtering. + */ + if (hwc->config_base & KRAIT_EVENT_MASK) + scorpion_evt_setup(idx, hwc->config_base); + else if (idx != ARMV7_IDX_CYCLE_COUNTER) + armv7_pmnc_write_evtsel(idx, hwc->config_base); + + /* Enable interrupt for this counter */ + armv7_pmnc_enable_intens(idx); + + /* Enable counter */ + armv7_pmnc_enable_counter(idx); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void scorpion_pmu_reset(void *info) +{ + u32 vval, fval; + struct arm_pmu *cpu_pmu = info; + u32 idx, nb_cnt = cpu_pmu->num_events; + + armv7pmu_reset(info); + + /* Clear all pmresrs */ + scorpion_write_pmresrn(0, 0); + scorpion_write_pmresrn(1, 0); + scorpion_write_pmresrn(2, 0); + scorpion_write_pmresrn(3, 0); + + venum_pre_pmresr(&vval, &fval); + venum_write_pmresr(0); + venum_post_pmresr(vval, fval); + + /* Reset PMxEVNCTCR to sane default */ + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { + armv7_pmnc_select_counter(idx); + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); + } +} + +static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, + unsigned int group) +{ + int bit; + struct hw_perf_event *hwc = &event->hw; + struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + + if (hwc->config_base & VENUM_EVENT) + bit = SCORPION_VLPM_GROUP0; + else + bit = scorpion_get_pmresrn_event(region); + bit -= scorpion_get_pmresrn_event(0); + bit += group; + /* + * Lower bits are reserved for use by the counters (see + * armv7pmu_get_event_idx() for more info) + */ + bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; + + return bit; +} + +/* + * We check for column exclusion constraints here. + * Two events cant use the same group within a pmresr register. + */ +static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int idx; + int bit = -1; + struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool scorpion_event = EVENT_CPU(hwc->config_base); + + if (venum_event || scorpion_event) { + /* Ignore invalid events */ + if (group > 3 || region > 3) + return -EINVAL; + + bit = scorpion_event_to_bit(event, region, group); + if (test_and_set_bit(bit, cpuc->used_mask)) + return -EAGAIN; + } + + idx = armv7pmu_get_event_idx(cpuc, event); + if (idx < 0 && bit >= 0) + clear_bit(bit, cpuc->used_mask); + + return idx; +} + +static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int bit; + struct hw_perf_event *hwc = &event->hw; + unsigned int region = EVENT_REGION(hwc->config_base); + unsigned int group = EVENT_GROUP(hwc->config_base); + bool venum_event = EVENT_VENUM(hwc->config_base); + bool scorpion_event = EVENT_CPU(hwc->config_base); + + if (venum_event || scorpion_event) { + bit = scorpion_event_to_bit(event, region, group); + clear_bit(bit, cpuc->used_mask); + } +} + +static int scorpion_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_scorpion"; + cpu_pmu->map_event = scorpion_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + cpu_pmu->reset = scorpion_pmu_reset; + cpu_pmu->enable = scorpion_pmu_enable_event; + cpu_pmu->disable = scorpion_pmu_disable_event; + cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; + cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; + return 0; +} + +static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7pmu_init(cpu_pmu); + cpu_pmu->name = "armv7_scorpion_mp"; + cpu_pmu->map_event = scorpion_map_event; + cpu_pmu->num_events = armv7_read_num_pmnc_events(); + cpu_pmu->reset = scorpion_pmu_reset; + cpu_pmu->enable = scorpion_pmu_enable_event; + cpu_pmu->disable = scorpion_pmu_disable_event; + cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; + cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; + return 0; +} #else static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) { @@ -1498,4 +1895,14 @@ static inline int krait_pmu_init(struct arm_pmu *cpu_pmu) { return -ENODEV; } + +static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu) +{ + return -ENODEV; +} + +static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) +{ + return -ENODEV; +} #endif /* CONFIG_CPU_V7 */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index fdfa3a78ec8c..f192a2a41719 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -17,12 +17,9 @@ #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/user.h> -#include <linux/delay.h> -#include <linux/reboot.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/init.h> -#include <linux/cpu.h> #include <linux/elfcore.h> #include <linux/pm.h> #include <linux/tick.h> @@ -31,16 +28,14 @@ #include <linux/random.h> #include <linux/hw_breakpoint.h> #include <linux/leds.h> -#include <linux/reboot.h> -#include <asm/cacheflush.h> -#include <asm/idmap.h> #include <asm/processor.h> #include <asm/thread_notify.h> #include <asm/stacktrace.h> #include <asm/system_misc.h> #include <asm/mach/time.h> #include <asm/tls.h> +#include <asm/vdso.h> #ifdef CONFIG_CC_STACKPROTECTOR #include <linux/stackprotector.h> @@ -59,69 +54,6 @@ static const char *isa_modes[] __maybe_unused = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; -extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); -typedef void (*phys_reset_t)(unsigned long); - -/* - * A temporary stack to use for CPU reset. This is static so that we - * don't clobber it with the identity mapping. When running with this - * stack, any references to the current task *will not work* so you - * should really do as little as possible before jumping to your reset - * code. - */ -static u64 soft_restart_stack[16]; - -static void __soft_restart(void *addr) -{ - phys_reset_t phys_reset; - - /* Take out a flat memory mapping. */ - setup_mm_for_reboot(); - - /* Clean and invalidate caches */ - flush_cache_all(); - - /* Turn off caching */ - cpu_proc_fin(); - - /* Push out any further dirty data, and ensure cache is empty */ - flush_cache_all(); - - /* Switch to the identity mapping. */ - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); - phys_reset((unsigned long)addr); - - /* Should never get here. */ - BUG(); -} - -void soft_restart(unsigned long addr) -{ - u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); - - /* Disable interrupts first */ - raw_local_irq_disable(); - local_fiq_disable(); - - /* Disable the L2 if we're the last man standing. */ - if (num_online_cpus() == 1) - outer_disable(); - - /* Change to the new stack and continue with the reset. */ - call_with_stack(__soft_restart, (void *)addr, (void *)stack); - - /* Should never get here. */ - BUG(); -} - -/* - * Function pointers to optional machine specific functions - */ -void (*pm_power_off)(void); -EXPORT_SYMBOL(pm_power_off); - -void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); - /* * This is our default idle handler. */ @@ -166,79 +98,6 @@ void arch_cpu_idle_dead(void) } #endif -/* - * Called by kexec, immediately prior to machine_kexec(). - * - * This must completely disable all secondary CPUs; simply causing those CPUs - * to execute e.g. a RAM-based pin loop is not sufficient. This allows the - * kexec'd kernel to use any and all RAM as it sees fit, without having to - * avoid any code or data used by any SW CPU pin loop. The CPU hotplug - * functionality embodied in disable_nonboot_cpus() to achieve this. - */ -void machine_shutdown(void) -{ - disable_nonboot_cpus(); -} - -/* - * Halting simply requires that the secondary CPUs stop performing any - * activity (executing tasks, handling interrupts). smp_send_stop() - * achieves this. - */ -void machine_halt(void) -{ - local_irq_disable(); - smp_send_stop(); - - local_irq_disable(); - while (1); -} - -/* - * Power-off simply requires that the secondary CPUs stop performing any - * activity (executing tasks, handling interrupts). smp_send_stop() - * achieves this. When the system power is turned off, it will take all CPUs - * with it. - */ -void machine_power_off(void) -{ - local_irq_disable(); - smp_send_stop(); - - if (pm_power_off) - pm_power_off(); -} - -/* - * Restart requires that the secondary CPUs stop performing any activity - * while the primary CPU resets the system. Systems with a single CPU can - * use soft_restart() as their machine descriptor's .restart hook, since that - * will cause the only available CPU to reset. Systems with multiple CPUs must - * provide a HW restart implementation, to ensure that all CPUs reset at once. - * This is required so that any code running after reset on the primary CPU - * doesn't have to co-ordinate with other CPUs to ensure they aren't still - * executing pre-reset code, and using RAM that the primary CPU's code wishes - * to use. Implementing such co-ordination would be essentially impossible. - */ -void machine_restart(char *cmd) -{ - local_irq_disable(); - smp_send_stop(); - - if (arm_pm_restart) - arm_pm_restart(reboot_mode, cmd); - else - do_kernel_restart(cmd); - - /* Give a grace period for failure to restart of 1s */ - mdelay(1000); - - /* Whoops - the platform was unable to reboot. Tell the user! */ - printk("Reboot failed -- System halted\n"); - local_irq_disable(); - while (1); -} - void __show_regs(struct pt_regs *regs) { unsigned long flags; @@ -475,7 +334,7 @@ const char *arch_vma_name(struct vm_area_struct *vma) } /* If possible, provide a placement hint at a random offset from the - * stack for the signal page. + * stack for the sigpage and vdso pages. */ static unsigned long sigpage_addr(const struct mm_struct *mm, unsigned int npages) @@ -519,6 +378,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + unsigned long npages; unsigned long addr; unsigned long hint; int ret = 0; @@ -528,9 +388,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (!signal_page) return -ENOMEM; + npages = 1; /* for sigpage */ + npages += vdso_total_pages; + down_write(&mm->mmap_sem); - hint = sigpage_addr(mm, 1); - addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0); + hint = sigpage_addr(mm, npages); + addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; @@ -547,6 +410,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) mm->context.sigpage = addr; + /* Unlike the sigpage, failure to install the vdso is unlikely + * to be fatal to the process, so no error check needed + * here. + */ + arm_install_vdso(mm, addr + PAGE_SIZE); + up_fail: up_write(&mm->mmap_sem); return ret; diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S new file mode 100644 index 000000000000..a78e9e1e206d --- /dev/null +++ b/arch/arm/kernel/psci-call.S @@ -0,0 +1,31 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + * + * Author: Mark Rutland <mark.rutland@arm.com> + */ + +#include <linux/linkage.h> + +#include <asm/opcodes-sec.h> +#include <asm/opcodes-virt.h> + +/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */ +ENTRY(__invoke_psci_fn_hvc) + __HVC(0) + bx lr +ENDPROC(__invoke_psci_fn_hvc) + +/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */ +ENTRY(__invoke_psci_fn_smc) + __SMC(0) + bx lr +ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c index f73891b6b730..f90fdf4ce7c7 100644 --- a/arch/arm/kernel/psci.c +++ b/arch/arm/kernel/psci.c @@ -23,8 +23,6 @@ #include <asm/compiler.h> #include <asm/errno.h> -#include <asm/opcodes-sec.h> -#include <asm/opcodes-virt.h> #include <asm/psci.h> #include <asm/system_misc.h> @@ -33,6 +31,9 @@ struct psci_operations psci_ops; static int (*invoke_psci_fn)(u32, u32, u32, u32); typedef int (*psci_initcall_t)(const struct device_node *); +asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32); +asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32); + enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, @@ -71,40 +72,6 @@ static u32 psci_power_state_pack(struct psci_power_state state) & PSCI_0_2_POWER_STATE_AFFL_MASK); } -/* - * The following two functions are invoked via the invoke_psci_fn pointer - * and will not be inlined, allowing us to piggyback on the AAPCS. - */ -static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, - u32 arg2) -{ - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r1") - __asmeq("%2", "r2") - __asmeq("%3", "r3") - __HVC(0) - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - -static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, - u32 arg2) -{ - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r1") - __asmeq("%2", "r2") - __asmeq("%3", "r3") - __SMC(0) - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - static int psci_get_version(void) { int err; diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c new file mode 100644 index 000000000000..1a4d232796be --- /dev/null +++ b/arch/arm/kernel/reboot.c @@ -0,0 +1,155 @@ +/* + * Copyright (C) 1996-2000 Russell King - Converted to ARM. + * Original Copyright (C) 1995 Linus Torvalds + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/cpu.h> +#include <linux/delay.h> +#include <linux/reboot.h> + +#include <asm/cacheflush.h> +#include <asm/idmap.h> + +#include "reboot.h" + +typedef void (*phys_reset_t)(unsigned long); + +/* + * Function pointers to optional machine specific functions + */ +void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +/* + * A temporary stack to use for CPU reset. This is static so that we + * don't clobber it with the identity mapping. When running with this + * stack, any references to the current task *will not work* so you + * should really do as little as possible before jumping to your reset + * code. + */ +static u64 soft_restart_stack[16]; + +static void __soft_restart(void *addr) +{ + phys_reset_t phys_reset; + + /* Take out a flat memory mapping. */ + setup_mm_for_reboot(); + + /* Clean and invalidate caches */ + flush_cache_all(); + + /* Turn off caching */ + cpu_proc_fin(); + + /* Push out any further dirty data, and ensure cache is empty */ + flush_cache_all(); + + /* Switch to the identity mapping. */ + phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); + phys_reset((unsigned long)addr); + + /* Should never get here. */ + BUG(); +} + +void _soft_restart(unsigned long addr, bool disable_l2) +{ + u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); + + /* Disable interrupts first */ + raw_local_irq_disable(); + local_fiq_disable(); + + /* Disable the L2 if we're the last man standing. */ + if (disable_l2) + outer_disable(); + + /* Change to the new stack and continue with the reset. */ + call_with_stack(__soft_restart, (void *)addr, (void *)stack); + + /* Should never get here. */ + BUG(); +} + +void soft_restart(unsigned long addr) +{ + _soft_restart(addr, num_online_cpus() == 1); +} + +/* + * Called by kexec, immediately prior to machine_kexec(). + * + * This must completely disable all secondary CPUs; simply causing those CPUs + * to execute e.g. a RAM-based pin loop is not sufficient. This allows the + * kexec'd kernel to use any and all RAM as it sees fit, without having to + * avoid any code or data used by any SW CPU pin loop. The CPU hotplug + * functionality embodied in disable_nonboot_cpus() to achieve this. + */ +void machine_shutdown(void) +{ + disable_nonboot_cpus(); +} + +/* + * Halting simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. + */ +void machine_halt(void) +{ + local_irq_disable(); + smp_send_stop(); + + local_irq_disable(); + while (1); +} + +/* + * Power-off simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. When the system power is turned off, it will take all CPUs + * with it. + */ +void machine_power_off(void) +{ + local_irq_disable(); + smp_send_stop(); + + if (pm_power_off) + pm_power_off(); +} + +/* + * Restart requires that the secondary CPUs stop performing any activity + * while the primary CPU resets the system. Systems with a single CPU can + * use soft_restart() as their machine descriptor's .restart hook, since that + * will cause the only available CPU to reset. Systems with multiple CPUs must + * provide a HW restart implementation, to ensure that all CPUs reset at once. + * This is required so that any code running after reset on the primary CPU + * doesn't have to co-ordinate with other CPUs to ensure they aren't still + * executing pre-reset code, and using RAM that the primary CPU's code wishes + * to use. Implementing such co-ordination would be essentially impossible. + */ +void machine_restart(char *cmd) +{ + local_irq_disable(); + smp_send_stop(); + + if (arm_pm_restart) + arm_pm_restart(reboot_mode, cmd); + else + do_kernel_restart(cmd); + + /* Give a grace period for failure to restart of 1s */ + mdelay(1000); + + /* Whoops - the platform was unable to reboot. Tell the user! */ + printk("Reboot failed -- System halted\n"); + local_irq_disable(); + while (1); +} diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h new file mode 100644 index 000000000000..bf7a0b1f076e --- /dev/null +++ b/arch/arm/kernel/reboot.h @@ -0,0 +1,7 @@ +#ifndef REBOOT_H +#define REBOOT_H + +extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); +extern void _soft_restart(unsigned long addr, bool disable_l2); + +#endif diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 24b4a04846eb..36ed35073289 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -56,8 +56,6 @@ void *return_address(unsigned int level) return NULL; } -#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ - -#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */ +#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e55408e96559..6c777e908a24 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -246,12 +246,9 @@ static int __get_cpu_architecture(void) if (cpu_arch) cpu_arch += CPU_ARCH_ARMv3; } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { - unsigned int mmfr0; - /* Revised CPUID format. Read the Memory Model Feature * Register 0 and check for VMSAv7 or PMSAv7 */ - asm("mrc p15, 0, %0, c0, c1, 4" - : "=r" (mmfr0)); + unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); if ((mmfr0 & 0x0000000f) >= 0x00000003 || (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; @@ -375,30 +372,48 @@ void __init early_print(const char *str, ...) static void __init cpuid_init_hwcaps(void) { - unsigned int divide_instrs, vmsa; + int block; + u32 isar5; if (cpu_architecture() < CPU_ARCH_ARMv7) return; - divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; - - switch (divide_instrs) { - case 2: + block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24); + if (block >= 2) elf_hwcap |= HWCAP_IDIVA; - case 1: + if (block >= 1) elf_hwcap |= HWCAP_IDIVT; - } /* LPAE implies atomic ldrd/strd instructions */ - vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; - if (vmsa >= 5) + block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); + if (block >= 5) elf_hwcap |= HWCAP_LPAE; + + /* check for supported v8 Crypto instructions */ + isar5 = read_cpuid_ext(CPUID_EXT_ISAR5); + + block = cpuid_feature_extract_field(isar5, 4); + if (block >= 2) + elf_hwcap2 |= HWCAP2_PMULL; + if (block >= 1) + elf_hwcap2 |= HWCAP2_AES; + + block = cpuid_feature_extract_field(isar5, 8); + if (block >= 1) + elf_hwcap2 |= HWCAP2_SHA1; + + block = cpuid_feature_extract_field(isar5, 12); + if (block >= 1) + elf_hwcap2 |= HWCAP2_SHA2; + + block = cpuid_feature_extract_field(isar5, 16); + if (block >= 1) + elf_hwcap2 |= HWCAP2_CRC32; } static void __init elf_hwcap_fixup(void) { unsigned id = read_cpuid_id(); - unsigned sync_prim; /* * HWCAP_TLS is available only on 1136 r1p0 and later, @@ -419,9 +434,9 @@ static void __init elf_hwcap_fixup(void) * avoid advertising SWP; it may not be atomic with * multiprocessing cores. */ - sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) | - ((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f); - if (sync_prim >= 0x13) + if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || + (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && + cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3)) elf_hwcap &= ~HWCAP_SWP; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 023ac905e4c3..423663e23791 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -318,17 +318,6 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) return frame; } -/* - * translate the signal - */ -static inline int map_sig(int sig) -{ - struct thread_info *thread = current_thread_info(); - if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) - sig = thread->exec_domain->signal_invmap[sig]; - return sig; -} - static int setup_return(struct pt_regs *regs, struct ksignal *ksig, unsigned long __user *rc, void __user *frame) @@ -412,7 +401,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, } } - regs->ARM_r0 = map_sig(ksig->sig); + regs->ARM_r0 = ksig->sig; regs->ARM_sp = (unsigned long)frame; regs->ARM_lr = retcode; regs->ARM_pc = handler; diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index e1e60e5a7a27..7d37bfc50830 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -116,14 +116,7 @@ cpu_resume_after_mmu: ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_resume_after_mmu) -/* - * Note: Yes, part of the following code is located into the .data section. - * This is to allow sleep_save_sp to be accessed with a relative load - * while we can't rely on any MMU translation. We could have put - * sleep_save_sp in the .text section as well, but some setups might - * insist on it to be truly read-only. - */ - .data + .text .align ENTRY(cpu_resume) ARM_BE8(setend be) @ ensure we are in BE mode @@ -145,6 +138,8 @@ ARM_BE8(setend be) @ ensure we are in BE mode compute_mpidr_hash r1, r4, r5, r6, r0, r3 1: adr r0, _sleep_save_sp + ldr r2, [r0] + add r0, r0, r2 ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] ldr r0, [r0, r1, lsl #2] @@ -156,10 +151,12 @@ THUMB( bx r3 ) ENDPROC(cpu_resume) .align 2 +_sleep_save_sp: + .long sleep_save_sp - . mpidr_hash_ptr: .long mpidr_hash - . @ mpidr_hash struct offset + .data .type sleep_save_sp, #object ENTRY(sleep_save_sp) -_sleep_save_sp: .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 86ef244c5a24..cca5b8758185 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -145,6 +145,11 @@ void __init smp_init_cpus(void) smp_ops.smp_init_cpus(); } +int platform_can_secondary_boot(void) +{ + return !!smp_ops.smp_boot_secondary; +} + int platform_can_cpu_hotplug(void) { #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index afdd51e30bec..1361756782c7 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -42,7 +42,7 @@ " cmp %0, #0\n" \ " movne %0, %4\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .section .text.fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %5\n" \ " b 2b\n" \ diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 0cc7e58c47cc..a66e37e211a9 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -76,7 +76,7 @@ void timer_tick(void) } #endif -static void dummy_clock_access(struct timespec *ts) +static void dummy_clock_access(struct timespec64 *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; @@ -85,12 +85,12 @@ static void dummy_clock_access(struct timespec *ts) static clock_access_fn __read_persistent_clock = dummy_clock_access; static clock_access_fn __read_boot_clock = dummy_clock_access;; -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { __read_persistent_clock(ts); } -void read_boot_clock(struct timespec *ts) +void read_boot_clock64(struct timespec64 *ts) { __read_boot_clock(ts); } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 788e23fe64d8..3dce1a342030 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -505,12 +505,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason) static int bad_syscall(int n, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); siginfo_t info; - if ((current->personality & PER_MASK) != PER_LINUX && - thread->exec_domain->handler) { - thread->exec_domain->handler(n, regs); + if ((current->personality & PER_MASK) != PER_LINUX) { + send_sig(SIGSEGV, current, 1); return regs->ARM_r0; } diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c new file mode 100644 index 000000000000..efe17dd9b921 --- /dev/null +++ b/arch/arm/kernel/vdso.c @@ -0,0 +1,337 @@ +/* + * Adapted from arm64 version. + * + * Copyright (C) 2012 ARM Limited + * Copyright (C) 2015 Mentor Graphics Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/elf.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/of.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/timekeeper_internal.h> +#include <linux/vmalloc.h> +#include <asm/arch_timer.h> +#include <asm/barrier.h> +#include <asm/cacheflush.h> +#include <asm/page.h> +#include <asm/vdso.h> +#include <asm/vdso_datapage.h> +#include <clocksource/arm_arch_timer.h> + +#define MAX_SYMNAME 64 + +static struct page **vdso_text_pagelist; + +/* Total number of pages needed for the data and text portions of the VDSO. */ +unsigned int vdso_total_pages __read_mostly; + +/* + * The VDSO data page. + */ +static union vdso_data_store vdso_data_store __page_aligned_data; +static struct vdso_data *vdso_data = &vdso_data_store.data; + +static struct page *vdso_data_page; +static struct vm_special_mapping vdso_data_mapping = { + .name = "[vvar]", + .pages = &vdso_data_page, +}; + +static struct vm_special_mapping vdso_text_mapping = { + .name = "[vdso]", +}; + +struct elfinfo { + Elf32_Ehdr *hdr; /* ptr to ELF */ + Elf32_Sym *dynsym; /* ptr to .dynsym section */ + unsigned long dynsymsize; /* size of .dynsym section */ + char *dynstr; /* ptr to .dynstr section */ +}; + +/* Cached result of boot-time check for whether the arch timer exists, + * and if so, whether the virtual counter is useable. + */ +static bool cntvct_ok __read_mostly; + +static bool __init cntvct_functional(void) +{ + struct device_node *np; + bool ret = false; + + if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) + goto out; + + /* The arm_arch_timer core should export + * arch_timer_use_virtual or similar so we don't have to do + * this. + */ + np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); + if (!np) + goto out_put; + + if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) + goto out_put; + + ret = true; + +out_put: + of_node_put(np); +out: + return ret; +} + +static void * __init find_section(Elf32_Ehdr *ehdr, const char *name, + unsigned long *size) +{ + Elf32_Shdr *sechdrs; + unsigned int i; + char *secnames; + + /* Grab section headers and strings so we can tell who is who */ + sechdrs = (void *)ehdr + ehdr->e_shoff; + secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; + + /* Find the section they want */ + for (i = 1; i < ehdr->e_shnum; i++) { + if (strcmp(secnames + sechdrs[i].sh_name, name) == 0) { + if (size) + *size = sechdrs[i].sh_size; + return (void *)ehdr + sechdrs[i].sh_offset; + } + } + + if (size) + *size = 0; + return NULL; +} + +static Elf32_Sym * __init find_symbol(struct elfinfo *lib, const char *symname) +{ + unsigned int i; + + for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) { + char name[MAX_SYMNAME], *c; + + if (lib->dynsym[i].st_name == 0) + continue; + strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, + MAX_SYMNAME); + c = strchr(name, '@'); + if (c) + *c = 0; + if (strcmp(symname, name) == 0) + return &lib->dynsym[i]; + } + return NULL; +} + +static void __init vdso_nullpatch_one(struct elfinfo *lib, const char *symname) +{ + Elf32_Sym *sym; + + sym = find_symbol(lib, symname); + if (!sym) + return; + + sym->st_name = 0; +} + +static void __init patch_vdso(void *ehdr) +{ + struct elfinfo einfo; + + einfo = (struct elfinfo) { + .hdr = ehdr, + }; + + einfo.dynsym = find_section(einfo.hdr, ".dynsym", &einfo.dynsymsize); + einfo.dynstr = find_section(einfo.hdr, ".dynstr", NULL); + + /* If the virtual counter is absent or non-functional we don't + * want programs to incur the slight additional overhead of + * dispatching through the VDSO only to fall back to syscalls. + */ + if (!cntvct_ok) { + vdso_nullpatch_one(&einfo, "__vdso_gettimeofday"); + vdso_nullpatch_one(&einfo, "__vdso_clock_gettime"); + } +} + +static int __init vdso_init(void) +{ + unsigned int text_pages; + int i; + + if (memcmp(&vdso_start, "\177ELF", 4)) { + pr_err("VDSO is not a valid ELF object!\n"); + return -ENOEXEC; + } + + text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; + pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start); + + /* Allocate the VDSO text pagelist */ + vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *), + GFP_KERNEL); + if (vdso_text_pagelist == NULL) + return -ENOMEM; + + /* Grab the VDSO data page. */ + vdso_data_page = virt_to_page(vdso_data); + + /* Grab the VDSO text pages. */ + for (i = 0; i < text_pages; i++) { + struct page *page; + + page = virt_to_page(&vdso_start + i * PAGE_SIZE); + vdso_text_pagelist[i] = page; + } + + vdso_text_mapping.pages = vdso_text_pagelist; + + vdso_total_pages = 1; /* for the data/vvar page */ + vdso_total_pages += text_pages; + + cntvct_ok = cntvct_functional(); + + patch_vdso(&vdso_start); + + return 0; +} +arch_initcall(vdso_init); + +static int install_vvar(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma; + + vma = _install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_MAYREAD, + &vdso_data_mapping); + + return IS_ERR(vma) ? PTR_ERR(vma) : 0; +} + +/* assumes mmap_sem is write-locked */ +void arm_install_vdso(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma; + unsigned long len; + + mm->context.vdso = 0; + + if (vdso_text_pagelist == NULL) + return; + + if (install_vvar(mm, addr)) + return; + + /* Account for vvar page. */ + addr += PAGE_SIZE; + len = (vdso_total_pages - 1) << PAGE_SHIFT; + + vma = _install_special_mapping(mm, addr, len, + VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + &vdso_text_mapping); + + if (!IS_ERR(vma)) + mm->context.vdso = addr; +} + +static void vdso_write_begin(struct vdso_data *vdata) +{ + ++vdso_data->seq_count; + smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */ +} + +static void vdso_write_end(struct vdso_data *vdata) +{ + smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */ + ++vdso_data->seq_count; +} + +static bool tk_is_cntvct(const struct timekeeper *tk) +{ + if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) + return false; + + if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0) + return false; + + return true; +} + +/** + * update_vsyscall - update the vdso data page + * + * Increment the sequence counter, making it odd, indicating to + * userspace that an update is in progress. Update the fields used + * for coarse clocks and, if the architected system timer is in use, + * the fields used for high precision clocks. Increment the sequence + * counter again, making it even, indicating to userspace that the + * update is finished. + * + * Userspace is expected to sample seq_count before reading any other + * fields from the data page. If seq_count is odd, userspace is + * expected to wait until it becomes even. After copying data from + * the page, userspace must sample seq_count again; if it has changed + * from its previous value, userspace must retry the whole sequence. + * + * Calls to update_vsyscall are serialized by the timekeeping core. + */ +void update_vsyscall(struct timekeeper *tk) +{ + struct timespec xtime_coarse; + struct timespec64 *wtm = &tk->wall_to_monotonic; + + if (!cntvct_ok) { + /* The entry points have been zeroed, so there is no + * point in updating the data page. + */ + return; + } + + vdso_write_begin(vdso_data); + + xtime_coarse = __current_kernel_time(); + vdso_data->tk_is_cntvct = tk_is_cntvct(tk); + vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; + vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->wtm_clock_sec = wtm->tv_sec; + vdso_data->wtm_clock_nsec = wtm->tv_nsec; + + if (vdso_data->tk_is_cntvct) { + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; + vdso_data->xtime_clock_sec = tk->xtime_sec; + vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec; + vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_shift = tk->tkr_mono.shift; + vdso_data->cs_mask = tk->tkr_mono.mask; + } + + vdso_write_end(vdso_data); + + flush_dcache_page(virt_to_page(vdso_data)); +} + +void update_vsyscall_tz(void) +{ + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; + vdso_data->tz_dsttime = sys_tz.tz_dsttime; + flush_dcache_page(virt_to_page(vdso_data)); +} diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index f2db429ea75d..8b60fde5ce48 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -74,7 +74,7 @@ SECTIONS ARM_EXIT_DISCARD(EXIT_DATA) EXIT_CALL #ifndef CONFIG_MMU - *(.fixup) + *(.text.fixup) *(__ex_table) #endif #ifndef CONFIG_SMP_ON_UP @@ -100,6 +100,7 @@ SECTIONS .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ + IDMAP_TEXT __exception_text_start = .; *(.exception.text) __exception_text_end = .; @@ -108,10 +109,6 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT - IDMAP_TEXT -#ifdef CONFIG_MMU - *(.fixup) -#endif *(.gnu.warning) *(.glue_7) *(.glue_7t) |