diff options
Diffstat (limited to 'arch')
975 files changed, 17928 insertions, 21224 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index bec6666a3cc4..8a8ea7110de8 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -221,6 +221,10 @@ config ARCH_TASK_STRUCT_ALLOCATOR config ARCH_THREAD_INFO_ALLOCATOR bool +# Select if arch wants to size task_struct dynamically via arch_task_struct_size: +config ARCH_WANTS_DYNAMIC_TASK_STRUCT + bool + config HAVE_REGS_AND_STACK_ACCESS_API bool help diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index bf9e9d3b3792..f515a4dbf7a0 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -3,6 +3,7 @@ config ALPHA default y select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_USE_CMPXCHG_LOCKREF select HAVE_AOUT select HAVE_IDE select HAVE_OPROFILE diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index cde23cd03609..ffd9cf5ec8c4 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -5,6 +5,7 @@ generic-y += cputime.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/alpha/include/asm/mm-arch-hooks.h b/arch/alpha/include/asm/mm-arch-hooks.h deleted file mode 100644 index b07fd862fec3..000000000000 --- a/arch/alpha/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_ALPHA_MM_ARCH_HOOKS_H -#define _ASM_ALPHA_MM_ARCH_HOOKS_H - -#endif /* _ASM_ALPHA_MM_ARCH_HOOKS_H */ diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index 37b570d01202..fed9c6f44c19 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -16,6 +16,11 @@ #define arch_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.lock == 0; +} + static inline void arch_spin_unlock(arch_spinlock_t * lock) { mb(); diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 36dc91ace83a..6cc08166ff00 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1138,6 +1138,7 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; cputime_t utime, stime; + unsigned long utime_jiffies, stime_jiffies; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; @@ -1146,14 +1147,18 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) switch (who) { case RUSAGE_SELF: task_cputime(current, &utime, &stime); - jiffies_to_timeval32(utime, &r.ru_utime); - jiffies_to_timeval32(stime, &r.ru_stime); + utime_jiffies = cputime_to_jiffies(utime); + stime_jiffies = cputime_to_jiffies(stime); + jiffies_to_timeval32(utime_jiffies, &r.ru_utime); + jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: - jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); - jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); + utime_jiffies = cputime_to_jiffies(current->signal->cutime); + stime_jiffies = cputime_to_jiffies(current->signal->cstime); + jiffies_to_timeval32(utime_jiffies, &r.ru_utime); + jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index 82f738e5d54c..cded02c890aa 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -242,12 +242,7 @@ pci_restore_srm_config(void) void pcibios_fixup_bus(struct pci_bus *bus) { - struct pci_dev *dev = bus->self; - - if (pci_has_flag(PCI_PROBE_ONLY) && dev && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_read_bridge_bases(bus); - } + struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { pdev_save_srm_config(dev); diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index e7cee0a5c56d..78c0621d5819 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -8,6 +8,7 @@ config ARC def_bool y + select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select BUILDTIME_EXTABLE_SORT select COMMON_CLK select CLONE_BACKWARDS @@ -22,6 +23,7 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select HAVE_FUTEX_CMPXCHG select HAVE_IOREMAP_PROT select HAVE_KPROBES select HAVE_KRETPROBES @@ -115,6 +117,7 @@ if ISA_ARCOMPACT config ARC_CPU_750D bool "ARC750D" + select ARC_CANT_LLSC help Support for ARC750 core @@ -312,11 +315,11 @@ config ARC_PAGE_SIZE_8K config ARC_PAGE_SIZE_16K bool "16KB" - depends on ARC_MMU_V3 + depends on ARC_MMU_V3 || ARC_MMU_V4 config ARC_PAGE_SIZE_4K bool "4KB" - depends on ARC_MMU_V3 + depends on ARC_MMU_V3 || ARC_MMU_V4 endchoice @@ -362,7 +365,12 @@ config ARC_CANT_LLSC config ARC_HAS_LLSC bool "Insn: LLOCK/SCOND (efficient atomic ops)" default y - depends on !ARC_CPU_750D && !ARC_CANT_LLSC + depends on !ARC_CANT_LLSC + +config ARC_STAR_9000923308 + bool "Workaround for llock/scond livelock" + default y + depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" @@ -378,6 +386,10 @@ config ARC_HAS_LL64 dest operands with 2 possible source operands. default y +config ARC_HAS_DIV_REM + bool "Insn: div, divu, rem, remu" + default y + config ARC_HAS_RTC bool "Local 64-bit r/o cycle counter" default n diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 6107062c0111..8a27a48304a4 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape +ifdef CONFIG_ISA_ARCV2 + ifndef CONFIG_ARC_HAS_LL64 -cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 +cflags-y += -mno-ll64 +endif + +ifndef CONFIG_ARC_HAS_DIV_REM +cflags-y += -mno-div-rem +endif + endif cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables @@ -49,7 +57,8 @@ endif ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE # Generic build system uses -O2, we want -O3 -cflags-y += -O3 +# Note: No need to add to cflags-y as that happens anyways +ARCH_CFLAGS += -O3 endif # small data is default for elf32 tool-chain. If not usable, disable it diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index 15c8d6226c9d..846481f37eef 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi @@ -12,7 +12,7 @@ / { compatible = "snps,arc"; - clock-frequency = <75000000>; + clock-frequency = <90000000>; #address-cells = <1>; #size-cells = <1>; @@ -72,12 +72,13 @@ }; /* - * This INTC is actually connected to DW APB GPIO - * which acts as a wire between MB INTC and CPU INTC. - * GPIO INTC is configured in platform init code - * and here we mimic direct connection from MB INTC to - * CPU INTC, thus we set "interrupts = <7>" instead of - * "interrupts = <12>" + * The DW APB ICTL intc on MB is connected to CPU intc via a + * DT "invisible" DW APB GPIO block, configured to simply pass thru + * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c) + * + * So here we mimic a direct connection betwen them, ignoring the + * ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core) + * instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO) * * This intc actually resides on MB, but we move it here to * avoid duplicating the MB dtsi file given that IRQ from diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 199d42820eca..2f0b33257db2 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi @@ -12,7 +12,7 @@ / { compatible = "snps,arc"; - clock-frequency = <75000000>; + clock-frequency = <90000000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 1a80cc91a03b..7611b10a2d23 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -22,6 +22,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += msgbuf.h generic-y += param.h diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 070f58827a5c..d8023bc8d1ad 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -35,6 +35,7 @@ #define ARC_REG_RTT_BCR 0xF2 #define ARC_REG_IRQ_BCR 0xF3 #define ARC_REG_SMART_BCR 0xFF +#define ARC_REG_CLUSTER_BCR 0xcf /* status32 Bits Positions */ #define STATUS_AE_BIT 5 /* Exception active */ @@ -89,11 +90,10 @@ #define ECR_C_BIT_DTLB_LD_MISS 8 #define ECR_C_BIT_DTLB_ST_MISS 9 - /* Auxiliary registers */ #define AUX_IDENTITY 4 #define AUX_INTR_VEC_BASE 0x25 - +#define AUX_NON_VOL 0x5e /* * Floating Pt Registers @@ -240,9 +240,9 @@ struct bcr_extn_xymem { struct bcr_perip { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int start:8, pad2:8, sz:8, pad:8; + unsigned int start:8, pad2:8, sz:8, ver:8; #else - unsigned int pad:8, sz:8, pad2:8, start:8; + unsigned int ver:8, sz:8, pad2:8, start:8; #endif }; diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 03484cb4d16d..87d18ae53115 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -23,33 +23,60 @@ #define atomic_set(v, i) (((v)->counter) = (i)) -#ifdef CONFIG_ISA_ARCV2 -#define PREFETCHW " prefetchw [%1] \n" -#else -#define PREFETCHW +#ifdef CONFIG_ARC_STAR_9000923308 + +#define SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int delay = 1, tmp; \ + +#define SCOND_FAIL_RETRY_ASM \ + " bz 4f \n" \ + " ; --- scond fail delay --- \n" \ + " mov %[tmp], %[delay] \n" /* tmp = delay */ \ + "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ + " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ + " rol %[delay], %[delay] \n" /* delay *= 2 */ \ + " b 1b \n" /* start over */ \ + "4: ; --- success --- \n" \ + +#define SCOND_FAIL_RETRY_VARS \ + ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ + +#else /* !CONFIG_ARC_STAR_9000923308 */ + +#define SCOND_FAIL_RETRY_VAR_DEF + +#define SCOND_FAIL_RETRY_ASM \ + " bnz 1b \n" \ + +#define SCOND_FAIL_RETRY_VARS + #endif #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ - unsigned int temp; \ + unsigned int val; \ + SCOND_FAIL_RETRY_VAR_DEF \ \ __asm__ __volatile__( \ - "1: \n" \ - PREFETCHW \ - " llock %0, [%1] \n" \ - " " #asm_op " %0, %0, %2 \n" \ - " scond %0, [%1] \n" \ - " bnz 1b \n" \ - : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ - : "r"(&v->counter), "ir"(i) \ + "1: llock %[val], [%[ctr]] \n" \ + " " #asm_op " %[val], %[val], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " \n" \ + SCOND_FAIL_RETRY_ASM \ + \ + : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ + SCOND_FAIL_RETRY_VARS \ + : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ + [i] "ir" (i) \ : "cc"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ - unsigned int temp; \ + unsigned int val; \ + SCOND_FAIL_RETRY_VAR_DEF \ \ /* \ * Explicit full memory barrier needed before/after as \ @@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ smp_mb(); \ \ __asm__ __volatile__( \ - "1: \n" \ - PREFETCHW \ - " llock %0, [%1] \n" \ - " " #asm_op " %0, %0, %2 \n" \ - " scond %0, [%1] \n" \ - " bnz 1b \n" \ - : "=&r"(temp) \ - : "r"(&v->counter), "ir"(i) \ + "1: llock %[val], [%[ctr]] \n" \ + " " #asm_op " %[val], %[val], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " \n" \ + SCOND_FAIL_RETRY_ASM \ + \ + : [val] "=&r" (val) \ + SCOND_FAIL_RETRY_VARS \ + : [ctr] "r" (&v->counter), \ + [i] "ir" (i) \ : "cc"); \ \ smp_mb(); \ \ - return temp; \ + return val; \ } #else /* !CONFIG_ARC_HAS_LLSC */ @@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef SCOND_FAIL_RETRY_VAR_DEF +#undef SCOND_FAIL_RETRY_ASM +#undef SCOND_FAIL_RETRY_VARS /** * __atomic_add_unless - add unless the number is a given value diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 99fe118d3730..57c1f33844d4 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -50,8 +50,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ * done for const @nr, but no code is generated due to gcc \ * const prop. \ */ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ + nr &= 0x1f; \ \ __asm__ __volatile__( \ "1: llock %0, [%1] \n" \ @@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long * \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ + nr &= 0x1f; \ \ /* \ * Explicit full memory barrier needed before/after as \ @@ -129,16 +127,13 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ unsigned long temp, flags; \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ - \ /* \ * spin lock/unlock provide the needed smp_mb() before/after \ */ \ bitops_lock(flags); \ \ temp = *m; \ - *m = temp c_op (1UL << nr); \ + *m = temp c_op (1UL << (nr & 0x1f)); \ \ bitops_unlock(flags); \ } @@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long * unsigned long old, flags; \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ - \ bitops_lock(flags); \ \ old = *m; \ - *m = old c_op (1 << nr); \ + *m = old c_op (1UL << (nr & 0x1f)); \ \ bitops_unlock(flags); \ \ - return (old & (1 << nr)) != 0; \ + return (old & (1UL << (nr & 0x1f))) != 0; \ } #endif /* CONFIG_ARC_HAS_LLSC */ @@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \ unsigned long temp; \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ - \ temp = *m; \ - *m = temp c_op (1UL << nr); \ + *m = temp c_op (1UL << (nr & 0x1f)); \ } #define __TEST_N_BIT_OP(op, c_op, asm_op) \ @@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long unsigned long old; \ m += nr >> 5; \ \ - if (__builtin_constant_p(nr)) \ - nr &= 0x1f; \ - \ old = *m; \ - *m = old c_op (1 << nr); \ + *m = old c_op (1UL << (nr & 0x1f)); \ \ - return (old & (1 << nr)) != 0; \ + return (old & (1UL << (nr & 0x1f))) != 0; \ } #define BIT_OPS(op, c_op, asm_op) \ @@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr) addr += nr >> 5; - if (__builtin_constant_p(nr)) - nr &= 0x1f; - - mask = 1 << nr; + mask = 1UL << (nr & 0x1f); return ((mask & *addr) != 0); } diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index d67345d3e2d4..e23ea6e7633a 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -53,6 +53,8 @@ extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); +extern int ioc_exists; + #endif /* !__ASSEMBLY__ */ /* Instruction cache related Auxiliary registers */ @@ -94,4 +96,10 @@ extern void read_decode_cache_bcr(void); #define SLC_CTRL_BUSY 0x100 #define SLC_CTRL_RGN_OP_INV 0x200 +/* IO coherency related Auxiliary registers */ +#define ARC_REG_IO_COH_ENABLE 0x500 +#define ARC_REG_IO_COH_PARTIAL 0x501 +#define ARC_REG_IO_COH_AP0_BASE 0x508 +#define ARC_REG_IO_COH_AP0_SIZE 0x509 + #endif /* _ASM_CACHE_H */ diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index 44fd531f4d7b..af7a2db139c9 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -110,18 +110,18 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, sizeof(*(ptr)))) /* - * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need - * not require any locking. However there's a quirk. - * ARC lacks native CMPXCHG, thus emulated (see above), using external locking - - * incidently it "reuses" the same atomic_ops_lock used by atomic APIs. - * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to - * abide by same serializing rules, thus ends up using atomic_ops_lock as well. + * xchg() maps directly to ARC EX instruction which guarantees atomicity. + * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock + * due to a subtle reason: + * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot + * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h) + * Hence xchg() needs to follow same locking rules. * - * This however is only relevant if SMP and/or ARC lacks LLSC - * if (UP or LLSC) - * xchg doesn't need serialization - * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC) - * xchg needs serialization + * Technically the lock is also needed for UP (boils down to irq save/restore) + * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to + * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg() + * Other way around, xchg is one instruction anyways, so can't be interrupted + * as such */ #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h index 05b5aaf5b0f9..11e1b1f3acda 100644 --- a/arch/arc/include/asm/futex.h +++ b/arch/arc/include/asm/futex.h @@ -16,18 +16,22 @@ #include <linux/uaccess.h> #include <asm/errno.h> +#ifdef CONFIG_ARC_HAS_LLSC + #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ \ + smp_mb(); \ __asm__ __volatile__( \ - "1: ld %1, [%2] \n" \ + "1: llock %1, [%2] \n" \ insn "\n" \ - "2: st %0, [%2] \n" \ + "2: scond %0, [%2] \n" \ + " bnz 1b \n" \ " mov %0, 0 \n" \ "3: \n" \ " .section .fixup,\"ax\" \n" \ " .align 4 \n" \ "4: mov %0, %4 \n" \ - " b 3b \n" \ + " j 3b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " .align 4 \n" \ @@ -37,7 +41,37 @@ \ : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ - : "cc", "memory") + : "cc", "memory"); \ + smp_mb() \ + +#else /* !CONFIG_ARC_HAS_LLSC */ + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ + \ + smp_mb(); \ + __asm__ __volatile__( \ + "1: ld %1, [%2] \n" \ + insn "\n" \ + "2: st %0, [%2] \n" \ + " mov %0, 0 \n" \ + "3: \n" \ + " .section .fixup,\"ax\" \n" \ + " .align 4 \n" \ + "4: mov %0, %4 \n" \ + " j 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " .align 4 \n" \ + " .word 1b, 4b \n" \ + " .word 2b, 4b \n" \ + " .previous \n" \ + \ + : "=&r" (ret), "=&r" (oldval) \ + : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ + : "cc", "memory"); \ + smp_mb() \ + +#endif static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { @@ -53,6 +87,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; +#ifndef CONFIG_ARC_HAS_LLSC + preempt_disable(); /* to guarantee atomic r-m-w of futex op */ +#endif pagefault_disable(); switch (op) { @@ -60,6 +97,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ADD: + /* oldval = *uaddr; *uaddr += oparg ; ret = *uaddr */ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: @@ -76,6 +114,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) } pagefault_enable(); +#ifndef CONFIG_ARC_HAS_LLSC + preempt_enable(); +#endif if (!ret) { switch (cmp) { @@ -104,48 +145,57 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) return ret; } -/* Compare-xchg with pagefaults disabled. - * Notes: - * -Best-Effort: Exchg happens only if compare succeeds. - * If compare fails, returns; leaving retry/looping to upper layers - * -successful cmp-xchg: return orig value in @addr (same as cmp val) - * -Compare fails: return orig value in @addr - * -user access r/w fails: return -EFAULT +/* + * cmpxchg of futex (pagefaults disabled by caller) + * Return 0 for success, -EFAULT otherwise */ static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, - u32 newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 expval, + u32 newval) { - u32 val; + int ret = 0; + u32 existval; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - pagefault_disable(); +#ifndef CONFIG_ARC_HAS_LLSC + preempt_disable(); /* to guarantee atomic r-m-w of futex op */ +#endif + smp_mb(); - /* TBD : can use llock/scond */ __asm__ __volatile__( - "1: ld %0, [%3] \n" - " brne %0, %1, 3f \n" - "2: st %2, [%3] \n" +#ifdef CONFIG_ARC_HAS_LLSC + "1: llock %1, [%4] \n" + " brne %1, %2, 3f \n" + "2: scond %3, [%4] \n" + " bnz 1b \n" +#else + "1: ld %1, [%4] \n" + " brne %1, %2, 3f \n" + "2: st %3, [%4] \n" +#endif "3: \n" " .section .fixup,\"ax\" \n" - "4: mov %0, %4 \n" - " b 3b \n" + "4: mov %0, %5 \n" + " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " .align 4 \n" " .word 1b, 4b \n" " .word 2b, 4b \n" " .previous\n" - : "=&r"(val) - : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT) + : "+&r"(ret), "=&r"(existval) + : "r"(expval), "r"(newval), "r"(uaddr), "ir"(-EFAULT) : "cc", "memory"); - pagefault_enable(); + smp_mb(); - *uval = val; - return val; +#ifndef CONFIG_ARC_HAS_LLSC + preempt_enable(); +#endif + *uval = existval; + return ret; } #endif diff --git a/arch/arc/include/asm/mm-arch-hooks.h b/arch/arc/include/asm/mm-arch-hooks.h deleted file mode 100644 index c37541c5f8ba..000000000000 --- a/arch/arc/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_ARC_MM_ARCH_HOOKS_H -#define _ASM_ARC_MM_ARCH_HOOKS_H - -#endif /* _ASM_ARC_MM_ARCH_HOOKS_H */ diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 2b8880e953a2..5f071762fb1c 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -1,6 +1,7 @@ /* * Linux performance counter support for ARC * + * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify @@ -12,8 +13,8 @@ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H -/* real maximum varies per CPU, this is the maximum supported by the driver */ -#define ARC_PMU_MAX_HWEVENTS 64 +/* Max number of counters that PCT block may ever have */ +#define ARC_PERF_MAX_COUNTERS 32 #define ARC_REG_CC_BUILD 0xF6 #define ARC_REG_CC_INDEX 0x240 @@ -28,15 +29,22 @@ #define ARC_REG_PCT_CONFIG 0x254 #define ARC_REG_PCT_CONTROL 0x255 #define ARC_REG_PCT_INDEX 0x256 +#define ARC_REG_PCT_INT_CNTL 0x25C +#define ARC_REG_PCT_INT_CNTH 0x25D +#define ARC_REG_PCT_INT_CTRL 0x25E +#define ARC_REG_PCT_INT_ACT 0x25F + +#define ARC_REG_PCT_CONFIG_USER (1 << 18) /* count in user mode */ +#define ARC_REG_PCT_CONFIG_KERN (1 << 19) /* count in kernel mode */ #define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */ #define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */ struct arc_reg_pct_build { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int m:8, c:8, r:6, s:2, v:8; + unsigned int m:8, c:8, r:5, i:1, s:2, v:8; #else - unsigned int v:8, s:2, r:6, c:8, m:8; + unsigned int v:8, s:2, i:1, r:5, c:8, m:8; #endif }; @@ -95,10 +103,13 @@ static const char * const arc_pmu_ev_hw_map[] = { /* counts condition */ [PERF_COUNT_HW_INSTRUCTIONS] = "iall", - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ +#ifdef CONFIG_ISA_ARCV2 + [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", +#else [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ - +#endif [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */ [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */ diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 91755972b9a2..69095da1fcfd 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -20,20 +20,20 @@ struct pt_regs { /* Real registers */ - long bta; /* bta_l1, bta_l2, erbta */ + unsigned long bta; /* bta_l1, bta_l2, erbta */ - long lp_start, lp_end, lp_count; + unsigned long lp_start, lp_end, lp_count; - long status32; /* status32_l1, status32_l2, erstatus */ - long ret; /* ilink1, ilink2 or eret */ - long blink; - long fp; - long r26; /* gp */ + unsigned long status32; /* status32_l1, status32_l2, erstatus */ + unsigned long ret; /* ilink1, ilink2 or eret */ + unsigned long blink; + unsigned long fp; + unsigned long r26; /* gp */ - long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; + unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; - long sp; /* user/kernel sp depending on where we came from */ - long orig_r0; + unsigned long sp; /* User/Kernel depending on where we came from */ + unsigned long orig_r0; /* * To distinguish bet excp, syscall, irq @@ -55,13 +55,13 @@ struct pt_regs { unsigned long event; }; - long user_r25; + unsigned long user_r25; }; #else struct pt_regs { - long orig_r0; + unsigned long orig_r0; union { struct { @@ -76,26 +76,26 @@ struct pt_regs { unsigned long event; }; - long bta; /* bta_l1, bta_l2, erbta */ + unsigned long bta; /* bta_l1, bta_l2, erbta */ - long user_r25; + unsigned long user_r25; - long r26; /* gp */ - long fp; - long sp; /* user/kernel sp depending on where we came from */ + unsigned long r26; /* gp */ + unsigned long fp; + unsigned long sp; /* user/kernel sp depending on where we came from */ - long r12; + unsigned long r12; /*------- Below list auto saved by h/w -----------*/ - long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; + unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; - long blink; - long lp_end, lp_start, lp_count; + unsigned long blink; + unsigned long lp_end, lp_start, lp_count; - long ei, ldi, jli; + unsigned long ei, ldi, jli; - long ret; - long status32; + unsigned long ret; + unsigned long status32; }; #endif @@ -103,7 +103,7 @@ struct pt_regs { /* Callee saved registers - need to be saved only when you are scheduled out */ struct callee_regs { - long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; + unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; }; #define instruction_pointer(regs) ((regs)->ret) @@ -142,7 +142,7 @@ struct callee_regs { static inline long regs_return_value(struct pt_regs *regs) { - return regs->r0; + return (long)regs->r0; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index e1651df6a93d..db8c59d1eaeb 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -18,9 +18,518 @@ #define arch_spin_unlock_wait(x) \ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) +#ifdef CONFIG_ARC_HAS_LLSC + +/* + * A normal LLOCK/SCOND based system, w/o need for livelock workaround + */ +#ifndef CONFIG_ARC_STAR_9000923308 + static inline void arch_spin_lock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + unsigned int val; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bnz 1b \n" + " mov %[got_it], 1 \n" + "4: \n" + " \n" + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + + smp_mb(); +} + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) + */ + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + * + * if (rw->counter > 0) { + * rw->counter--; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ + " sub %[val], %[val], 1 \n" /* reader lock */ + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ + " sub %[val], %[val], 1 \n" /* counter-- */ + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" /* retry if collided with someone */ + " mov %[got_it], 1 \n" + " \n" + "4: ; --- done --- \n" + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + * + * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + * rw->counter = 0; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" /* retry if collided with someone */ + " mov %[got_it], 1 \n" + " \n" + "4: ; --- done --- \n" + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter++; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " add %[val], %[val], 1 \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)) + : "memory", "cc"); + + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + smp_mb(); + + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + + smp_mb(); +} + +#else /* CONFIG_ARC_STAR_9000923308 */ + +/* + * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping + * coherency transactions in the SCU. The exclusive line state keeps rotating + * among contenting cores leading to a never ending cycle. So break the cycle + * by deferring the retry of failed exclusive access (SCOND). The actual delay + * needed is function of number of contending cores as well as the unrelated + * coherency traffic from other cores. To keep the code simple, start off with + * small delay of 1 which would suffice most cases and in case of contention + * double the delay. Eventually the delay is sufficient such that the coherency + * pipeline is drained, thus a subsequent exclusive access would succeed. + */ + +#define SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int delay, tmp; \ + +#define SCOND_FAIL_RETRY_ASM \ + " ; --- scond fail delay --- \n" \ + " mov %[tmp], %[delay] \n" /* tmp = delay */ \ + "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ + " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ + " rol %[delay], %[delay] \n" /* delay *= 2 */ \ + " b 1b \n" /* start over */ \ + " \n" \ + "4: ; --- done --- \n" \ + +#define SCOND_FAIL_RETRY_VARS \ + ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bz 4f \n" /* done */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + + smp_mb(); +} + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) + */ + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + * + * if (rw->counter > 0) { + * rw->counter--; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ + " sub %[val], %[val], 1 \n" /* reader lock */ + " scond %[val], [%[rwlock]] \n" + " bz 4f \n" /* done */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ + " sub %[val], %[val], 1 \n" /* counter-- */ + " scond %[val], [%[rwlock]] \n" + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + * + * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + * rw->counter = 0; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bz 4f \n" + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter++; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " add %[val], %[val], 1 \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)) + : "memory", "cc"); + + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " scond %[UNLOCKED], [%[rwlock]]\n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +#undef SCOND_FAIL_RETRY_VAR_DEF +#undef SCOND_FAIL_RETRY_ASM +#undef SCOND_FAIL_RETRY_VARS + +#endif /* CONFIG_ARC_STAR_9000923308 */ + +#else /* !CONFIG_ARC_HAS_LLSC */ + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; /* * This smp_mb() is technically superfluous, we only need the one @@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ex %0, [%1] \n" " breq %0, %2, 1b \n" - : "+&r" (tmp) + : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) : "memory"); @@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) smp_mb(); } +/* 1 - lock taken successfully */ static inline int arch_spin_trylock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; smp_mb(); __asm__ __volatile__( "1: ex %0, [%1] \n" - : "+r" (tmp) + : "+r" (val) : "r"(&(lock->slock)) : "memory"); smp_mb(); - return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); + return (val == __ARCH_SPIN_LOCK_UNLOCKED__); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__; /* * RELEASE barrier: given the instructions avail on ARCv2, full barrier @@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) __asm__ __volatile__( " ex %0, [%1] \n" - : "+r" (tmp) + : "+r" (val) : "r"(&(lock->slock)) : "memory"); @@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) /* * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) * * The spinlock itself is contained in @counter and access to it is * serialized with @lock_mutex. - * - * Unfair locking as Writers could be starved indefinitely by Reader(s) */ -/* Would read_trylock() succeed? */ -#define arch_read_can_lock(x) ((x)->counter > 0) - -/* Would write_trylock() succeed? */ -#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) - /* 1 - lock taken successfully */ static inline int arch_read_trylock(arch_rwlock_t *rw) { @@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) arch_spin_unlock(&(rw->lock_mutex)); } +#endif + +#define arch_read_can_lock(x) ((x)->counter > 0) +#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h index 662627ced4f2..4e1ef5f650c6 100644 --- a/arch/arc/include/asm/spinlock_types.h +++ b/arch/arc/include/asm/spinlock_types.h @@ -26,7 +26,9 @@ typedef struct { */ typedef struct { volatile unsigned int counter; +#ifndef CONFIG_ARC_HAS_LLSC arch_spinlock_t lock_mutex; +#endif } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 76a7739aab1c..0b3ef63d4a03 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -32,20 +32,20 @@ */ struct user_regs_struct { - long pad; + unsigned long pad; struct { - long bta, lp_start, lp_end, lp_count; - long status32, ret, blink, fp, gp; - long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; - long sp; + unsigned long bta, lp_start, lp_end, lp_count; + unsigned long status32, ret, blink, fp, gp; + unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; + unsigned long sp; } scratch; - long pad2; + unsigned long pad2; struct { - long r25, r24, r23, r22, r21, r20; - long r19, r18, r17, r16, r15, r14, r13; + unsigned long r25, r24, r23, r22, r21, r20; + unsigned long r19, r18, r17, r16, r15, r14, r13; } callee; - long efa; /* break pt addr, for break points in delay slots */ - long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ + unsigned long efa; /* break pt addr, for break points in delay slots */ + unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index bd7105d3172f..8fa76567e402 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -57,13 +57,8 @@ VECTOR handle_interrupt ; (23) End of fixed IRQs .section .text, "ax",@progbits -res_service: ; processor restart - flag 0x1 ; not implemented - nop - nop - -reserved: ; processor restart - rtie ; jump to processor initializations +reserved: + flag 1 ; Unexpected event, halt ;##################### Interrupt Handling ############################## diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index f7a82fd4d601..589abf5172d6 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -42,7 +42,7 @@ ENTRY(ret_from_fork) ; when the forked child comes here from the __switch_to function ; r0 has the last task pointer. ; put last task in scheduler queue - bl @schedule_tail + jl @schedule_tail ld r9, [sp, PT_status32] brne r9, 0, 1f @@ -320,7 +320,7 @@ resume_user_mode_begin: ; --- (Slow Path #1) task preemption --- bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals mov blink, resume_user_mode_begin ; tail-call to U mode ret chks - b @schedule ; BTST+Bnz causes relo error in link + j @schedule ; BTST+Bnz causes relo error in link .Lchk_pend_signals: IRQ_ENABLE r10 @@ -381,7 +381,7 @@ resume_kernel_mode: bbit0 r9, TIF_NEED_RESCHED, .Lrestore_regs ; Invoke PREEMPTION - bl preempt_schedule_irq + jl preempt_schedule_irq ; preempt_schedule_irq() always returns with IRQ disabled #endif diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 6208c630abed..26c156827479 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -12,7 +12,6 @@ #include <linux/of.h> #include <linux/irqdomain.h> #include <linux/irqchip.h> -#include "../../drivers/irqchip/irqchip.h" #include <asm/irq.h> /* diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index fcdddb631766..039fac30b5c1 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -12,7 +12,6 @@ #include <linux/of.h> #include <linux/irqdomain.h> #include <linux/irqchip.h> -#include "../../drivers/irqchip/irqchip.h" #include <asm/irq.h> /* diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 30284e8de6ff..2fb86589054d 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -175,7 +175,6 @@ void mcip_init_early_smp(void) #include <linux/irqchip.h> #include <linux/of.h> #include <linux/of_irq.h> -#include "../../drivers/irqchip/irqchip.h" /* * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) @@ -218,11 +217,28 @@ static void idu_irq_unmask(struct irq_data *data) raw_spin_unlock_irqrestore(&mcip_lock, flags); } +#ifdef CONFIG_SMP static int -idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f) +idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, + bool force) { + unsigned long flags; + cpumask_t online; + + /* errout if no online cpu per @cpumask */ + if (!cpumask_and(&online, cpumask, cpu_online_mask)) + return -EINVAL; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + idu_set_dest(data->hwirq, cpumask_bits(&online)[0]); + idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); + return IRQ_SET_MASK_OK; } +#endif static struct irq_chip idu_irq_chip = { .name = "MCIP IDU Intc", @@ -330,8 +346,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent) if (!i) idu_first_irq = irq; - irq_set_handler_data(irq, domain); - irq_set_chained_handler(irq, idu_cascade_isr); + irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 1287388c258a..0c08bb1ce15a 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -1,7 +1,7 @@ /* * Linux performance counter support for ARC700 series * - * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) * * This code is inspired by the perf support of various other architectures. * @@ -11,6 +11,7 @@ * */ #include <linux/errno.h> +#include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/perf_event.h> @@ -20,12 +21,25 @@ struct arc_pmu { struct pmu pmu; - int counter_size; /* in bits */ + unsigned int irq; int n_counters; - unsigned long used_mask[BITS_TO_LONGS(ARC_PMU_MAX_HWEVENTS)]; + u64 max_period; int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; }; +struct arc_pmu_cpu { + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)]; + + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS]; +}; + struct arc_callchain_trace { int depth; void *perf_stuff; @@ -65,6 +79,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) } static struct arc_pmu *arc_pmu; +static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); /* read counter #idx; note that counter# != event# on ARC! */ static uint64_t arc_pmu_read_counter(int idx) @@ -88,18 +103,15 @@ static uint64_t arc_pmu_read_counter(int idx) static void arc_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { - uint64_t prev_raw_count, new_raw_count; - int64_t delta; - - do { - prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = arc_pmu_read_counter(idx); - } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count); - - delta = (new_raw_count - prev_raw_count) & - ((1ULL << arc_pmu->counter_size) - 1ULL); + uint64_t prev_raw_count = local64_read(&hwc->prev_count); + uint64_t new_raw_count = arc_pmu_read_counter(idx); + int64_t delta = new_raw_count - prev_raw_count; + /* + * We don't afaraid of hwc->prev_count changing beneath our feet + * because there's no way for us to re-enter this function anytime. + */ + local64_set(&hwc->prev_count, new_raw_count); local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); } @@ -142,22 +154,41 @@ static int arc_pmu_event_init(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int ret; + if (!is_sampling_event(event)) { + hwc->sample_period = arc_pmu->max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + hwc->config = 0; + + if (is_isa_arcv2()) { + /* "exclude user" means "count only kernel" */ + if (event->attr.exclude_user) + hwc->config |= ARC_REG_PCT_CONFIG_KERN; + + /* "exclude kernel" means "count only user" */ + if (event->attr.exclude_kernel) + hwc->config |= ARC_REG_PCT_CONFIG_USER; + } + switch (event->attr.type) { case PERF_TYPE_HARDWARE: if (event->attr.config >= PERF_COUNT_HW_MAX) return -ENOENT; if (arc_pmu->ev_hw_idx[event->attr.config] < 0) return -ENOENT; - hwc->config = arc_pmu->ev_hw_idx[event->attr.config]; + hwc->config |= arc_pmu->ev_hw_idx[event->attr.config]; pr_debug("init event %d with h/w %d \'%s\'\n", (int) event->attr.config, (int) hwc->config, arc_pmu_ev_hw_map[event->attr.config]); return 0; + case PERF_TYPE_HW_CACHE: ret = arc_pmu_cache_event(event->attr.config); if (ret < 0) return ret; - hwc->config = arc_pmu->ev_hw_idx[ret]; + hwc->config |= arc_pmu->ev_hw_idx[ret]; return 0; default: return -ENOENT; @@ -180,6 +211,47 @@ static void arc_pmu_disable(struct pmu *pmu) write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); } +static int arc_pmu_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int idx = hwc->idx; + int overflow = 0; + u64 value; + + if (unlikely(left <= -period)) { + /* left underflowed by more than period. */ + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } else if (unlikely(left <= 0)) { + /* left underflowed by less than period. */ + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (left > arc_pmu->max_period) + left = arc_pmu->max_period; + + value = arc_pmu->max_period - left; + local64_set(&hwc->prev_count, value); + + /* Select counter */ + write_aux_reg(ARC_REG_PCT_INDEX, idx); + + /* Write value */ + write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); + write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); + + perf_event_update_userpage(event); + + return overflow; +} + /* * Assigns hardware counter to hardware condition. * Note that there is no separate start/stop mechanism; @@ -194,13 +266,20 @@ static void arc_pmu_start(struct perf_event *event, int flags) return; if (flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; - event->hw.state = 0; + arc_pmu_event_set_period(event); + + /* Enable interrupt for this counter */ + if (is_sampling_event(event)) + write_aux_reg(ARC_REG_PCT_INT_CTRL, + read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); /* enable ARC pmu here */ - write_aux_reg(ARC_REG_PCT_INDEX, idx); - write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); + write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ + write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); /* condition */ } static void arc_pmu_stop(struct perf_event *event, int flags) @@ -208,6 +287,17 @@ static void arc_pmu_stop(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + /* Disable interrupt for this counter */ + if (is_sampling_event(event)) { + /* + * Reset interrupt flag by writing of 1. This is required + * to make sure pending interrupt was not left. + */ + write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); + write_aux_reg(ARC_REG_PCT_INT_CTRL, + read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); + } + if (!(event->hw.state & PERF_HES_STOPPED)) { /* stop ARC pmu here */ write_aux_reg(ARC_REG_PCT_INDEX, idx); @@ -227,8 +317,12 @@ static void arc_pmu_stop(struct perf_event *event, int flags) static void arc_pmu_del(struct perf_event *event, int flags) { + struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); + arc_pmu_stop(event, PERF_EF_UPDATE); - __clear_bit(event->hw.idx, arc_pmu->used_mask); + __clear_bit(event->hw.idx, pmu_cpu->used_mask); + + pmu_cpu->act_counter[event->hw.idx] = 0; perf_event_update_userpage(event); } @@ -236,20 +330,31 @@ static void arc_pmu_del(struct perf_event *event, int flags) /* allocate hardware counter and optionally start counting */ static int arc_pmu_add(struct perf_event *event, int flags) { + struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - if (__test_and_set_bit(idx, arc_pmu->used_mask)) { - idx = find_first_zero_bit(arc_pmu->used_mask, + if (__test_and_set_bit(idx, pmu_cpu->used_mask)) { + idx = find_first_zero_bit(pmu_cpu->used_mask, arc_pmu->n_counters); if (idx == arc_pmu->n_counters) return -EAGAIN; - __set_bit(idx, arc_pmu->used_mask); + __set_bit(idx, pmu_cpu->used_mask); hwc->idx = idx; } write_aux_reg(ARC_REG_PCT_INDEX, idx); + + pmu_cpu->act_counter[idx] = event; + + if (is_sampling_event(event)) { + /* Mimic full counter overflow as other arches do */ + write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); + write_aux_reg(ARC_REG_PCT_INT_CNTH, + (arc_pmu->max_period >> 32)); + } + write_aux_reg(ARC_REG_PCT_CONFIG, 0); write_aux_reg(ARC_REG_PCT_COUNTL, 0); write_aux_reg(ARC_REG_PCT_COUNTH, 0); @@ -264,11 +369,82 @@ static int arc_pmu_add(struct perf_event *event, int flags) return 0; } +#ifdef CONFIG_ISA_ARCV2 +static irqreturn_t arc_pmu_intr(int irq, void *dev) +{ + struct perf_sample_data data; + struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); + struct pt_regs *regs; + int active_ints; + int idx; + + arc_pmu_disable(&arc_pmu->pmu); + + active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT); + + regs = get_irq_regs(); + + for (idx = 0; idx < arc_pmu->n_counters; idx++) { + struct perf_event *event = pmu_cpu->act_counter[idx]; + struct hw_perf_event *hwc; + + if (!(active_ints & (1 << idx))) + continue; + + /* Reset interrupt flag by writing of 1 */ + write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); + + /* + * On reset of "interrupt active" bit corresponding + * "interrupt enable" bit gets automatically reset as well. + * Now we need to re-enable interrupt for the counter. + */ + write_aux_reg(ARC_REG_PCT_INT_CTRL, + read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); + + hwc = &event->hw; + + WARN_ON_ONCE(hwc->idx != idx); + + arc_perf_event_update(event, &event->hw, event->hw.idx); + perf_sample_data_init(&data, 0, hwc->last_period); + if (!arc_pmu_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + arc_pmu_stop(event, 0); + } + + arc_pmu_enable(&arc_pmu->pmu); + + return IRQ_HANDLED; +} +#else + +static irqreturn_t arc_pmu_intr(int irq, void *dev) +{ + return IRQ_NONE; +} + +#endif /* CONFIG_ISA_ARCV2 */ + +void arc_cpu_pmu_irq_init(void) +{ + struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); + + arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, + "ARC perf counters", pmu_cpu); + + /* Clear all pending interrupt flags */ + write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); +} + static int arc_pmu_device_probe(struct platform_device *pdev) { struct arc_reg_pct_build pct_bcr; struct arc_reg_cc_build cc_bcr; - int i, j; + int i, j, has_interrupts; + int counter_size; /* in bits */ union cc_name { struct { @@ -284,7 +460,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) pr_err("This core does not have performance counters!\n"); return -ENODEV; } - BUG_ON(pct_bcr.c > ARC_PMU_MAX_HWEVENTS); + BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); READ_BCR(ARC_REG_CC_BUILD, cc_bcr); BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ @@ -293,11 +469,16 @@ static int arc_pmu_device_probe(struct platform_device *pdev) if (!arc_pmu) return -ENOMEM; + has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; + arc_pmu->n_counters = pct_bcr.c; - arc_pmu->counter_size = 32 + (pct_bcr.s << 4); + counter_size = 32 + (pct_bcr.s << 4); - pr_info("ARC perf\t: %d counters (%d bits), %d countable conditions\n", - arc_pmu->n_counters, arc_pmu->counter_size, cc_bcr.c); + arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL; + + pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", + arc_pmu->n_counters, counter_size, cc_bcr.c, + has_interrupts ? ", [overflow IRQ support]":""); cc_name.str[8] = 0; for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) @@ -332,8 +513,37 @@ static int arc_pmu_device_probe(struct platform_device *pdev) .read = arc_pmu_read, }; - /* ARC 700 PMU does not support sampling events */ - arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + if (has_interrupts) { + int irq = platform_get_irq(pdev, 0); + unsigned long flags; + + if (irq < 0) { + pr_err("Cannot get IRQ number for the platform\n"); + return -ENODEV; + } + + arc_pmu->irq = irq; + + /* + * arc_cpu_pmu_irq_init() needs to be called on all cores for + * their respective local PMU. + * However we use opencoded on_each_cpu() to ensure it is called + * on core0 first, so that arc_request_percpu_irq() sets up + * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable + * perf IRQ on non master cores. + * see arc_request_percpu_irq() + */ + preempt_disable(); + local_irq_save(flags); + arc_cpu_pmu_irq_init(); + local_irq_restore(flags); + smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1); + preempt_enable(); + + /* Clean all pending interrupt flags */ + write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); + } else + arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); } @@ -341,6 +551,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id arc_pmu_match[] = { { .compatible = "snps,arc700-pct" }, + { .compatible = "snps,archs-pct" }, {}, }; MODULE_DEVICE_TABLE(of, arc_pmu_match); @@ -348,7 +559,7 @@ MODULE_DEVICE_TABLE(of, arc_pmu_match); static struct platform_driver arc_pmu_driver = { .driver = { - .name = "arc700-pct", + .name = "arc-pct", .of_match_table = of_match_ptr(arc_pmu_match), }, .probe = arc_pmu_device_probe, diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 44092456776f..91d5a0f1f3f7 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -65,7 +65,7 @@ asmlinkage void ret_from_fork(void); * ------------------ * | r25 | <==== top of Stack (thread.ksp) * ~ ~ - * | --to-- | (CALLEE Regs of user mode) + * | --to-- | (CALLEE Regs of kernel mode) * | r13 | * ------------------ * | fp | diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index a3d186211ed3..cabde9dc0696 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void) struct bcr_perip uncached_space; struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; + unsigned long perip_space; FIX_PTR(cpu); READ_BCR(AUX_IDENTITY, cpu->core); @@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void) cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); - BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); + if (uncached_space.ver < 3) + perip_space = uncached_space.start << 24; + else + perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000; + + BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE); READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); @@ -142,17 +148,22 @@ static void read_arc_build_cfg_regs(void) } static const struct cpuinfo_data arc_cpu_tbl[] = { +#ifdef CONFIG_ISA_ARCOMPACT { {0x20, "ARC 600" }, 0x2F}, { {0x30, "ARC 700" }, 0x33}, { {0x34, "ARC 700 R4.10"}, 0x34}, { {0x35, "ARC 700 R4.11"}, 0x35}, - { {0x50, "ARC HS38" }, 0x51}, +#else + { {0x50, "ARC HS38 R2.0"}, 0x51}, + { {0x52, "ARC HS38 R2.1"}, 0x52}, +#endif { {0x00, NULL } } }; -#define IS_AVAIL1(v, str) ((v) ? str : "") -#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ") -#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg)) +#define IS_AVAIL1(v, s) ((v) ? s : "") +#define IS_USED_RUN(v) ((v) ? "" : "(not used) ") +#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) +#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { @@ -226,7 +237,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); } n += scnprintf(buf + n, len - n, "%s", - IS_USED(CONFIG_ARC_HAS_HW_MPY)); + IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY)); } n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", @@ -325,6 +336,10 @@ static void arc_chk_core_config(void) pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); else if (!cpu->extn.fpu_dp && fpu_enabled) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); + + if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && + !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) + panic("llock/scond livelock workaround missing\n"); } /* diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 3364d2bbc515..4294761a2b3e 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta, return 0; } -static void arc_clkevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int arc_clkevent_set_periodic(struct clock_event_device *dev) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* - * At X Hz, 1 sec = 1000ms -> X cycles; - * 10ms -> X / 100 cycles - */ - arc_timer_event_setup(arc_get_core_freq() / HZ); - break; - case CLOCK_EVT_MODE_ONESHOT: - break; - default: - break; - } - - return; + /* + * At X Hz, 1 sec = 1000ms -> X cycles; + * 10ms -> X / 100 cycles + */ + arc_timer_event_setup(arc_get_core_freq() / HZ); + return 0; } static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { - .name = "ARC Timer0", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .mode = CLOCK_EVT_MODE_UNUSED, - .rating = 300, - .irq = TIMER0_IRQ, /* hardwired, no need for resources */ - .set_next_event = arc_clkevent_set_next_event, - .set_mode = arc_clkevent_set_mode, + .name = "ARC Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .irq = TIMER0_IRQ, /* hardwired, no need for resources */ + .set_next_event = arc_clkevent_set_next_event, + .set_state_periodic = arc_clkevent_set_periodic, }; static irqreturn_t timer_irq_handler(int irq, void *dev_id) @@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() */ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); - int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; + int irq_reenable = clockevent_state_periodic(evt); /* * Any write to CTRL reg ACks the interrupt, we rewrite the diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 807f7d61d7a7..a6f91e88ce36 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -58,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs) static void print_task_path_n_nm(struct task_struct *tsk, char *buf) { - struct path path; char *path_nm = NULL; struct mm_struct *mm; struct file *exe_file; diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 74db59b6f392..abd961f3e763 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -34,7 +34,7 @@ " .section .fixup,\"ax\"\n" \ " .align 4\n" \ "3: mov %0, 1\n" \ - " b 2b\n" \ + " j 2b\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .align 4\n" \ @@ -82,7 +82,7 @@ " .section .fixup,\"ax\"\n" \ " .align 4\n" \ "4: mov %0, 1\n" \ - " b 3b\n" \ + " j 3b\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .align 4\n" \ @@ -113,7 +113,7 @@ " .section .fixup,\"ax\"\n" \ " .align 4\n" \ "6: mov %0, 1\n" \ - " b 5b\n" \ + " j 5b\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .align 4\n" \ diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index 1b2b3acfed52..0cab0b8a57c5 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -206,7 +206,7 @@ unalignedOffby3: ld.ab r6, [r1, 4] prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetch [r3, 32] ;Prefetch the next write location + prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 92d573c734b5..365b18364815 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S @@ -10,12 +10,6 @@ #undef PREALLOC_NOT_AVAIL -#ifdef PREALLOC_NOT_AVAIL -#define PREWRITE(A,B) prefetchw [(A),(B)] -#else -#define PREWRITE(A,B) prealloc [(A),(B)] -#endif - ENTRY(memset) prefetchw [r0] ; Prefetch the write location mov.f 0, r2 @@ -51,9 +45,15 @@ ENTRY(memset) ;;; Convert len to Dwords, unfold x8 lsr.f lp_count, lp_count, 6 + lpnz @.Lset64bytes ;; LOOP START - PREWRITE(r3, 64) ;Prefetch the next write location +#ifdef PREALLOC_NOT_AVAIL + prefetchw [r3, 64] ;Prefetch the next write location +#else + prealloc [r3, 64] +#endif +#ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -62,16 +62,45 @@ ENTRY(memset) std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif .Lset64bytes: lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START prefetchw [r3, 32] ;Prefetch the next write location +#ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif .Lset32bytes: and.f lp_count, r2, 0x1F ;Last remaining 31 bytes diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index b29d62ed4f7e..0d1a6e96839f 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -22,15 +22,22 @@ #include <asm/setup.h> static int l2_line_sz; +int ioc_exists; +volatile int slc_enable = 1, ioc_enable = 1; void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr, unsigned long sz, const int cacheop); +void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); +void (*__dma_cache_inv)(unsigned long start, unsigned long sz); +void (*__dma_cache_wback)(unsigned long start, unsigned long sz); + char *arc_cache_mumbojumbo(int c, char *buf, int len) { int n = 0; struct cpuinfo_arc_cache *p; +#define IS_USED_RUN(v) ((v) ? "" : "(disabled) ") #define PR_CACHE(p, cfg, str) \ if (!(p)->ver) \ n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ @@ -45,10 +52,18 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); + if (!is_isa_arcv2()) + return buf; + p = &cpuinfo_arc700[c].slc; if (p->ver) n += scnprintf(buf + n, len - n, - "SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len); + "SLC\t\t: %uK, %uB Line%s\n", + p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); + + if (ioc_exists) + n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", + IS_USED_RUN(ioc_enable)); return buf; } @@ -58,18 +73,9 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) * the cpuinfo structure for later use. * No Validation done here, simply read/convert the BCRs */ -void read_decode_cache_bcr(void) +static void read_decode_cache_bcr_arcv2(int cpu) { - struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc; - unsigned int cpu = smp_processor_id(); - struct bcr_cache { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; -#else - unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; -#endif - } ibcr, dbcr; - + struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc; struct bcr_generic sbcr; struct bcr_slc_cfg { @@ -80,6 +86,39 @@ void read_decode_cache_bcr(void) #endif } slc_cfg; + struct bcr_clust_cfg { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8; +#else + unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7; +#endif + } cbcr; + + READ_BCR(ARC_REG_SLC_BCR, sbcr); + if (sbcr.ver) { + READ_BCR(ARC_REG_SLC_CFG, slc_cfg); + p_slc->ver = sbcr.ver; + p_slc->sz_k = 128 << slc_cfg.sz; + l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; + } + + READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); + if (cbcr.c && ioc_enable) + ioc_exists = 1; +} + +void read_decode_cache_bcr(void) +{ + struct cpuinfo_arc_cache *p_ic, *p_dc; + unsigned int cpu = smp_processor_id(); + struct bcr_cache { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; +#else + unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; +#endif + } ibcr, dbcr; + p_ic = &cpuinfo_arc700[cpu].icache; READ_BCR(ARC_REG_IC_BCR, ibcr); @@ -122,17 +161,8 @@ dc_chk: p_dc->ver = dbcr.ver; slc_chk: - if (!is_isa_arcv2()) - return; - - p_slc = &cpuinfo_arc700[cpu].slc; - READ_BCR(ARC_REG_SLC_BCR, sbcr); - if (sbcr.ver) { - READ_BCR(ARC_REG_SLC_CFG, slc_cfg); - p_slc->ver = sbcr.ver; - p_slc->sz_k = 128 << slc_cfg.sz; - l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; - } + if (is_isa_arcv2()) + read_decode_cache_bcr_arcv2(cpu); } /* @@ -468,10 +498,18 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) { #ifdef CONFIG_ISA_ARCV2 + /* + * SLC is shared between all cores and concurrent aux operations from + * multiple cores need to be serialized using a spinlock + * A concurrent operation can be silently ignored and/or the old/new + * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop + * below) + */ + static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned int ctrl; - local_irq_save(flags); + spin_lock_irqsave(&lock, flags); /* * The Region Flush operation is specified by CTRL.RGN_OP[11..9] @@ -504,15 +542,10 @@ noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); - local_irq_restore(flags); + spin_unlock_irqrestore(&lock, flags); #endif } -static inline int need_slc_flush(void) -{ - return is_isa_arcv2() && l2_line_sz; -} - /*********************************************************** * Exported APIs */ @@ -561,30 +594,74 @@ void flush_dcache_page(struct page *page) } EXPORT_SYMBOL(flush_dcache_page); -void dma_cache_wback_inv(unsigned long start, unsigned long sz) +/* + * DMA ops for systems with L1 cache only + * Make memory coherent with L1 cache by flushing/invalidating L1 lines + */ +static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz) { __dc_line_op_k(start, sz, OP_FLUSH_N_INV); +} - if (need_slc_flush()) - slc_op(start, sz, OP_FLUSH_N_INV); +static void __dma_cache_inv_l1(unsigned long start, unsigned long sz) +{ + __dc_line_op_k(start, sz, OP_INV); } -EXPORT_SYMBOL(dma_cache_wback_inv); -void dma_cache_inv(unsigned long start, unsigned long sz) +static void __dma_cache_wback_l1(unsigned long start, unsigned long sz) +{ + __dc_line_op_k(start, sz, OP_FLUSH); +} + +/* + * DMA ops for systems with both L1 and L2 caches, but without IOC + * Both L1 and L2 lines need to be explicity flushed/invalidated + */ +static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz) +{ + __dc_line_op_k(start, sz, OP_FLUSH_N_INV); + slc_op(start, sz, OP_FLUSH_N_INV); +} + +static void __dma_cache_inv_slc(unsigned long start, unsigned long sz) { __dc_line_op_k(start, sz, OP_INV); + slc_op(start, sz, OP_INV); +} + +static void __dma_cache_wback_slc(unsigned long start, unsigned long sz) +{ + __dc_line_op_k(start, sz, OP_FLUSH); + slc_op(start, sz, OP_FLUSH); +} - if (need_slc_flush()) - slc_op(start, sz, OP_INV); +/* + * DMA ops for systems with IOC + * IOC hardware snoops all DMA traffic keeping the caches consistent with + * memory - eliding need for any explicit cache maintenance of DMA buffers + */ +static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {} +static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {} +static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {} + +/* + * Exported DMA API + */ +void dma_cache_wback_inv(unsigned long start, unsigned long sz) +{ + __dma_cache_wback_inv(start, sz); +} +EXPORT_SYMBOL(dma_cache_wback_inv); + +void dma_cache_inv(unsigned long start, unsigned long sz) +{ + __dma_cache_inv(start, sz); } EXPORT_SYMBOL(dma_cache_inv); void dma_cache_wback(unsigned long start, unsigned long sz) { - __dc_line_op_k(start, sz, OP_FLUSH); - - if (need_slc_flush()) - slc_op(start, sz, OP_FLUSH); + __dma_cache_wback(start, sz); } EXPORT_SYMBOL(dma_cache_wback); @@ -840,4 +917,41 @@ void arc_cache_init(void) panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); } } + + if (is_isa_arcv2() && l2_line_sz && !slc_enable) { + + /* IM set : flush before invalidate */ + write_aux_reg(ARC_REG_SLC_CTRL, + read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM); + + write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); + + /* Important to wait for flush to complete */ + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); + write_aux_reg(ARC_REG_SLC_CTRL, + read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); + } + + if (is_isa_arcv2() && ioc_exists) { + /* IO coherency base - 0x8z */ + write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); + /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ + write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11); + /* Enable partial writes */ + write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); + /* Enable IO coherency */ + write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); + + __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; + __dma_cache_inv = __dma_cache_inv_ioc; + __dma_cache_wback = __dma_cache_wback_ioc; + } else if (is_isa_arcv2() && l2_line_sz && slc_enable) { + __dma_cache_wback_inv = __dma_cache_wback_inv_slc; + __dma_cache_inv = __dma_cache_inv_slc; + __dma_cache_wback = __dma_cache_wback_slc; + } else { + __dma_cache_wback_inv = __dma_cache_wback_inv_l1; + __dma_cache_inv = __dma_cache_inv_l1; + __dma_cache_wback = __dma_cache_wback_l1; + } } diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 74a637a1cfc4..29a46bb198cc 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -19,6 +19,7 @@ #include <linux/dma-mapping.h> #include <linux/dma-debug.h> #include <linux/export.h> +#include <asm/cache.h> #include <asm/cacheflush.h> /* @@ -53,6 +54,20 @@ void *dma_alloc_coherent(struct device *dev, size_t size, { void *paddr, *kvaddr; + /* + * IOC relies on all data (even coherent DMA data) being in cache + * Thus allocate normal cached memory + * + * The gains with IOC are two pronged: + * -For streaming data, elides needs for cache maintenance, saving + * cycles in flush code, and bus bandwidth as all the lines of a + * buffer need to be flushed out to memory + * -For coherent data, Read/Write to buffers terminate early in cache + * (vs. always going to memory - thus are faster) + */ + if (is_isa_arcv2() && ioc_exists) + return dma_alloc_noncoherent(dev, size, dma_handle, gfp); + /* This is linear addr (0x8000_0000 based) */ paddr = alloc_pages_exact(size, gfp); if (!paddr) @@ -60,8 +75,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size, /* This is kernel Virtual address (0x7000_0000 based) */ kvaddr = ioremap_nocache((unsigned long)paddr, size); - if (kvaddr != NULL) - memset(kvaddr, 0, size); + if (kvaddr == NULL) + return NULL; /* This is bus address, platform dependent */ *dma_handle = (dma_addr_t)paddr; @@ -85,6 +100,9 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, dma_addr_t dma_handle) { + if (is_isa_arcv2() && ioc_exists) + return dma_free_noncoherent(dev, size, kvaddr, dma_handle); + iounmap((void __force __iomem *)kvaddr); free_pages_exact((void *)dma_handle, size); diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index 99f7da513a48..ad9825d4026a 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c @@ -46,7 +46,7 @@ static void __init axs10x_enable_gpio_intc_wire(void) * ------------------- ------------------- * | snps,dw-apb-gpio | | snps,dw-apb-gpio | * ------------------- ------------------- - * | | + * | #12 | * | [ Debug UART on cpu card ] * | * ------------------------ @@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od) static void __init axs103_early_init(void) { + /* + * AXS103 configurations for SMP/QUAD configurations share device tree + * which defaults to 90 MHz. However recent failures of Quad config + * revealed P&R timing violations so clamp it down to safe 50 MHz + * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack + * + * This hack is really hacky as of now. Fix it properly by getting the + * number of cores as return value of platform's early SMP callback + */ +#ifdef CONFIG_ARC_MCIP + unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; + if (num_cores > 2) + arc_set_core_freq(50 * 1000000); +#endif + switch (arc_get_core_freq()/1000000) { case 33: axs103_set_freq(1, 1, 1); diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1c5021002fe4..41cbb4a53066 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -268,7 +268,6 @@ config PHYS_OFFSET depends on !ARM_PATCH_PHYS_VIRT default DRAM_BASE if !MMU default 0x00000000 if ARCH_EBSA110 || \ - EP93XX_SDCE3_SYNC_PHYS_OFFSET || \ ARCH_FOOTBRIDGE || \ ARCH_INTEGRATOR || \ ARCH_IOP13XX || \ @@ -277,10 +276,7 @@ config PHYS_OFFSET default 0x10000000 if ARCH_OMAP1 || ARCH_RPC default 0x20000000 if ARCH_S5PV210 default 0x70000000 if REALVIEW_HIGH_PHYS_OFFSET - default 0xc0000000 if EP93XX_SDCE0_PHYS_OFFSET || ARCH_SA1100 - default 0xd0000000 if EP93XX_SDCE1_PHYS_OFFSET - default 0xe0000000 if EP93XX_SDCE2_PHYS_OFFSET - default 0xf0000000 if EP93XX_SDCE3_ASYNC_PHYS_OFFSET + default 0xc0000000 if ARCH_SA1100 help Please provide the physical address corresponding to the location of main memory in your system. @@ -418,11 +414,14 @@ config ARCH_EP93XX bool "EP93xx-based" select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_REQUIRE_GPIOLIB - select ARCH_USES_GETTIMEOFFSET select ARM_AMBA + select ARM_PATCH_PHYS_VIRT select ARM_VIC + select AUTO_ZRELADDR select CLKDEV_LOOKUP + select CLKSRC_MMIO select CPU_ARM920T + select GENERIC_CLOCKEVENTS help This enables support for the Cirrus EP93xx series of CPUs. @@ -536,6 +535,7 @@ config ARCH_ORION5X select MVEBU_MBUS select PCI select PLAT_ORION_LEGACY + select MULTI_IRQ_HANDLER help Support for the following Marvell Orion 5x series SoCs: Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182), diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index a2e16f940394..0cfd7f947f6b 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -141,6 +141,12 @@ choice depends on ARCH_AT91 depends on SOC_SAMA5 + config AT91_DEBUG_LL_DBGU3 + bool "Kernel low-level debugging on sama5d2" + select DEBUG_AT91_UART + depends on ARCH_AT91 + depends on SOC_SAMA5 + config DEBUG_BCM2835 bool "Kernel low-level debugging on BCM2835 PL011 UART" depends on ARCH_BCM2835 @@ -411,6 +417,13 @@ choice Say Y here if you want kernel low-level debugging support on i.MX6SX. + config DEBUG_IMX6UL_UART + bool "i.MX6UL Debug UART" + depends on SOC_IMX6UL + help + Say Y here if you want kernel low-level debugging support + on i.MX6UL. + config DEBUG_IMX7D_UART bool "i.MX7D Debug UART" depends on SOC_IMX7D @@ -1269,6 +1282,7 @@ config DEBUG_IMX_UART_PORT DEBUG_IMX6Q_UART || \ DEBUG_IMX6SL_UART || \ DEBUG_IMX6SX_UART || \ + DEBUG_IMX6UL_UART || \ DEBUG_IMX7D_UART default 1 depends on ARCH_MXC @@ -1320,6 +1334,7 @@ config DEBUG_LL_INCLUDE DEBUG_IMX6Q_UART || \ DEBUG_IMX6SL_UART || \ DEBUG_IMX6SX_UART || \ + DEBUG_IMX6UL_UART || \ DEBUG_IMX7D_UART default "debug/ks8695.S" if DEBUG_KS8695_UART default "debug/msm.S" if DEBUG_QCOM_UARTDM diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 07ab3d203916..7451b447cc2d 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) +bootpImage uImage: zImage +zImage: Image + $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 7a13aebacf81..3f9a9ebc77c3 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -51,10 +51,6 @@ else endif endif -ifeq ($(CONFIG_ARCH_SHMOBILE_LEGACY),y) -OBJS += head-shmobile.o -endif - # # We now have a PIC decompressor implementation. Decompressors running # from RAM should not define ZTEXTADDR. Decompressors running directly diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S deleted file mode 100644 index 22a75259faa3..000000000000 --- a/arch/arm/boot/compressed/head-shmobile.S +++ /dev/null @@ -1,71 +0,0 @@ -/* - * The head-file for SH-Mobile ARM platforms - * - * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * Simon Horman <horms@verge.net.au> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifdef CONFIG_ZBOOT_ROM - - .section ".start", "ax" - - /* load board-specific initialization code */ -#include <mach/zboot.h> - - adr r0, dtb_info - ldmia r0, {r1, r3, r4, r5, r7} - - sub r0, r0, r1 @ calculate the delta offset - add r5, r5, r0 @ _edata - - ldr lr, [r5, #0] @ check if valid DTB is present - cmp lr, r3 - bne 0f - - add r9, r7, #31 @ rounded up to a multiple - bic r9, r9, #31 @ ... of 32 bytes - - add r6, r9, r5 @ copy from _edata - add r9, r9, r4 @ to MEMORY_START - -1: ldmdb r6!, {r0 - r3, r10 - r12, lr} - cmp r6, r5 - stmdb r9!, {r0 - r3, r10 - r12, lr} - bhi 1b - - /* Success: Zero board ID, pointer to start of memory for atag/dtb */ - mov r7, #0 - mov r8, r4 - b 2f - - .align 2 -dtb_info: - .word dtb_info -#ifndef __ARMEB__ - .word 0xedfe0dd0 @ sig is 0xd00dfeed big endian -#else - .word 0xd00dfeed -#endif - .word MEMORY_START - .word _edata - .word 0x4000 @ maximum DTB size -0: - /* Failure: Zero board ID, NULL atag/dtb */ - mov r7, #0 - mov r8, #0 @ pass null pointer as atag -2 : - -#endif /* CONFIG_ZBOOT_ROM */ diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 246473a244f6..917aa318929d 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -501,11 +501,8 @@ dtb-$(CONFIG_ARCH_S5PV210) += \ s5pv210-smdkv210.dtb \ s5pv210-torbreck.dtb dtb-$(CONFIG_ARCH_SHMOBILE_LEGACY) += \ - r8a7740-armadillo800eva.dtb \ r8a7778-bockw.dtb \ - r8a7778-bockw-reference.dtb \ - r8a7779-marzen.dtb \ - sh73a0-kzm9g.dtb + r8a7778-bockw-reference.dtb dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += \ emev2-kzm9d.dtb \ r7s72100-genmai.dtb \ diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts index 0d35ab64641c..7106114c7464 100644 --- a/arch/arm/boot/dts/am335x-pepper.dts +++ b/arch/arm/boot/dts/am335x-pepper.dts @@ -74,6 +74,7 @@ audio_codec: tlv320aic3106@1b { compatible = "ti,tlv320aic3106"; reg = <0x1b>; + ai3x-micbias-vg = <0x2>; }; accel: lis331dlh@1d { @@ -153,7 +154,7 @@ ti,audio-routing = "Headphone Jack", "HPLOUT", "Headphone Jack", "HPROUT", - "LINE1L", "Line In"; + "MIC3L", "Mic3L Switch"; }; &mcasp0 { @@ -438,41 +439,50 @@ regulators { dcdc1_reg: regulator@0 { /* VDD_1V8 system supply */ + regulator-always-on; }; dcdc2_reg: regulator@1 { /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */ regulator-name = "vdd_core"; regulator-min-microvolt = <925000>; - regulator-max-microvolt = <1325000>; + regulator-max-microvolt = <1150000>; regulator-boot-on; + regulator-always-on; }; dcdc3_reg: regulator@2 { /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */ regulator-name = "vdd_mpu"; regulator-min-microvolt = <925000>; - regulator-max-microvolt = <1150000>; + regulator-max-microvolt = <1325000>; regulator-boot-on; + regulator-always-on; }; ldo1_reg: regulator@3 { /* VRTC 1.8V always-on supply */ + regulator-name = "vrtc,vdds"; regulator-always-on; }; ldo2_reg: regulator@4 { /* 3.3V rail */ + regulator-name = "vdd_3v3aux"; + regulator-always-on; }; ldo3_reg: regulator@5 { /* VDD_3V3A 3.3V rail */ + regulator-name = "vdd_3v3a"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; ldo4_reg: regulator@6 { /* VDD_3V3B 3.3V rail */ + regulator-name = "vdd_3v3b"; + regulator-always-on; }; }; }; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 21fcc440fc1a..b76f9a2ce05d 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -210,7 +210,7 @@ }; uart0: serial@44e09000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart1"; clock-frequency = <48000000>; reg = <0x44e09000 0x2000>; @@ -221,7 +221,7 @@ }; uart1: serial@48022000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart2"; clock-frequency = <48000000>; reg = <0x48022000 0x2000>; @@ -232,7 +232,7 @@ }; uart2: serial@48024000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart3"; clock-frequency = <48000000>; reg = <0x48024000 0x2000>; @@ -243,7 +243,7 @@ }; uart3: serial@481a6000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart4"; clock-frequency = <48000000>; reg = <0x481a6000 0x2000>; @@ -252,7 +252,7 @@ }; uart4: serial@481a8000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart5"; clock-frequency = <48000000>; reg = <0x481a8000 0x2000>; @@ -261,7 +261,7 @@ }; uart5: serial@481aa000 { - compatible = "ti,omap3-uart"; + compatible = "ti,am3352-uart", "ti,omap3-uart"; ti,hwmods = "uart6"; clock-frequency = <48000000>; reg = <0x481aa000 0x2000>; diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index ade28c790f4b..359a3b6daf4f 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -86,6 +86,7 @@ prcm: prcm@1f0000 { compatible = "ti,am4-prcm"; reg = <0x1f0000 0x11000>; + interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; prcm_clocks: clocks { #address-cells = <1>; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index a63bf78191ea..f9a4b317ed2f 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -693,3 +693,7 @@ }; }; }; + +&pcie1 { + gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; +}; diff --git a/arch/arm/boot/dts/cros-ec-keyboard.dtsi b/arch/arm/boot/dts/cros-ec-keyboard.dtsi index 9c7fb0acae79..4e42f30cb318 100644 --- a/arch/arm/boot/dts/cros-ec-keyboard.dtsi +++ b/arch/arm/boot/dts/cros-ec-keyboard.dtsi @@ -22,6 +22,7 @@ MATRIX_KEY(0x00, 0x02, KEY_F1) MATRIX_KEY(0x00, 0x03, KEY_B) MATRIX_KEY(0x00, 0x04, KEY_F10) + MATRIX_KEY(0x00, 0x05, KEY_RO) MATRIX_KEY(0x00, 0x06, KEY_N) MATRIX_KEY(0x00, 0x08, KEY_EQUAL) MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) @@ -34,6 +35,7 @@ MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) MATRIX_KEY(0x01, 0x09, KEY_F9) MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) + MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL) MATRIX_KEY(0x02, 0x01, KEY_TAB) @@ -45,6 +47,7 @@ MATRIX_KEY(0x02, 0x07, KEY_102ND) MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) MATRIX_KEY(0x02, 0x09, KEY_F8) + MATRIX_KEY(0x02, 0x0a, KEY_YEN) MATRIX_KEY(0x03, 0x01, KEY_GRAVE) MATRIX_KEY(0x03, 0x02, KEY_F2) @@ -53,6 +56,7 @@ MATRIX_KEY(0x03, 0x06, KEY_6) MATRIX_KEY(0x03, 0x08, KEY_MINUS) MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) + MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL) MATRIX_KEY(0x04, 0x01, KEY_A) diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index aa465904f6cc..096f68be99e2 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -686,7 +686,8 @@ &dcan1 { status = "ok"; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&dcan1_pins_default>; + pinctrl-names = "default", "sleep", "active"; + pinctrl-0 = <&dcan1_pins_sleep>; pinctrl-1 = <&dcan1_pins_sleep>; + pinctrl-2 = <&dcan1_pins_default>; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 8f1e25bcecbd..b058b3146874 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -116,7 +116,7 @@ ranges = <0 0x2000 0x2000>; scm_conf: scm_conf@0 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x0 0x1400>; #address-cells = <1>; #size-cells = <1>; @@ -211,7 +211,7 @@ #address-cells = <1>; ranges = <0x51000000 0x51000000 0x3000 0x0 0x20000000 0x10000000>; - pcie@51000000 { + pcie1: pcie@51000000 { compatible = "ti,dra7-pcie"; reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; @@ -397,7 +397,7 @@ }; uart1: serial@4806a000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x4806a000 0x100>; interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart1"; @@ -408,7 +408,7 @@ }; uart2: serial@4806c000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x4806c000 0x100>; interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart2"; @@ -419,7 +419,7 @@ }; uart3: serial@48020000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48020000 0x100>; interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart3"; @@ -430,7 +430,7 @@ }; uart4: serial@4806e000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x4806e000 0x100>; interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart4"; @@ -441,7 +441,7 @@ }; uart5: serial@48066000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48066000 0x100>; interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart5"; @@ -452,7 +452,7 @@ }; uart6: serial@48068000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48068000 0x100>; interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart6"; @@ -463,7 +463,7 @@ }; uart7: serial@48420000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48420000 0x100>; interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart7"; @@ -472,7 +472,7 @@ }; uart8: serial@48422000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48422000 0x100>; interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart8"; @@ -481,7 +481,7 @@ }; uart9: serial@48424000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x48424000 0x100>; interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart9"; @@ -490,7 +490,7 @@ }; uart10: serial@4ae2b000 { - compatible = "ti,omap4-uart"; + compatible = "ti,dra742-uart", "ti,omap4-uart"; reg = <0x4ae2b000 0x100>; interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "uart10"; @@ -1140,6 +1140,7 @@ ctrl-module = <&omap_control_sata>; clocks = <&sys_clkin1>, <&sata_ref_clk>; clock-names = "sysclk", "refclk"; + syscon-pllreset = <&scm_conf 0x3fc>; #phy-cells = <0>; }; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index 4e1b60581782..803738414086 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -587,9 +587,10 @@ &dcan1 { status = "ok"; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&dcan1_pins_default>; + pinctrl-names = "default", "sleep", "active"; + pinctrl-0 = <&dcan1_pins_sleep>; pinctrl-1 = <&dcan1_pins_sleep>; + pinctrl-2 = <&dcan1_pins_default>; }; &qspi { diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts index 031853b75528..baa9b2f52009 100644 --- a/arch/arm/boot/dts/exynos3250-rinato.dts +++ b/arch/arm/boot/dts/exynos3250-rinato.dts @@ -182,7 +182,7 @@ display-timings { timing-0 { - clock-frequency = <0>; + clock-frequency = <4600000>; hactive = <320>; vactive = <320>; hfront-porch = <1>; diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index d7201333e3bc..2db99433e17f 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -138,8 +138,8 @@ mipi_phy: video-phy@10020710 { compatible = "samsung,s5pv210-mipi-video-phy"; - reg = <0x10020710 8>; #phy-cells = <1>; + syscon = <&pmu_system_controller>; }; pd_cam: cam-power-domain@10023C00 { diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts index e0abfc3324d1..e050d85cdacd 100644 --- a/arch/arm/boot/dts/exynos4210-origen.dts +++ b/arch/arm/boot/dts/exynos4210-origen.dts @@ -127,6 +127,10 @@ }; }; +&cpu0 { + cpu0-supply = <&buck1_reg>; +}; + &fimd { pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 98f3ce65cb9a..ba34886f8b65 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts @@ -188,6 +188,10 @@ }; }; +&cpu0 { + cpu0-supply = <&varm_breg>; +}; + &dsi_0 { vddcore-supply = <&vusb_reg>; vddio-supply = <&vmipi_reg>; diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index d4f2b11319dd..775892b2cc6a 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts @@ -548,6 +548,10 @@ }; }; +&cpu0 { + cpu0-supply = <&vdd_arm_reg>; +}; + &pinctrl_1 { hdmi_hpd: hdmi-hpd { samsung,pins = "gpx3-7"; diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 10d3c173396e..3e5ba665d200 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi @@ -40,6 +40,18 @@ device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0x900>; + clocks = <&clock CLK_ARM_CLK>; + clock-names = "cpu"; + clock-latency = <160000>; + + operating-points = < + 1200000 1250000 + 1000000 1150000 + 800000 1075000 + 500000 975000 + 400000 975000 + 200000 950000 + >; cooling-min-level = <4>; cooling-max-level = <2>; #cooling-cells = <2>; /* min followed by max */ diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi index c892d58e8dad..b995333ea22b 100644 --- a/arch/arm/boot/dts/imx23.dtsi +++ b/arch/arm/boot/dts/imx23.dtsi @@ -468,6 +468,7 @@ interrupts = <36 37 38 39 40 41 42 43 44>; status = "disabled"; clocks = <&clks 26>; + #io-channel-cells = <1>; }; spdif@80054000 { diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts index dd45e6971bc3..9351296356dc 100644 --- a/arch/arm/boot/dts/imx25-pdk.dts +++ b/arch/arm/boot/dts/imx25-pdk.dts @@ -10,6 +10,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> #include "imx25.dtsi" @@ -114,8 +115,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio2 1 0>; - wp-gpios = <&gpio2 0 0>; + cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index bc215e4b75fd..b69be5c499cf 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -108,7 +108,7 @@ }; gpt1: timer@10003000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10003000 0x1000>; interrupts = <26>; clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>, @@ -117,7 +117,7 @@ }; gpt2: timer@10004000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10004000 0x1000>; interrupts = <25>; clocks = <&clks IMX27_CLK_GPT2_IPG_GATE>, @@ -126,7 +126,7 @@ }; gpt3: timer@10005000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10005000 0x1000>; interrupts = <24>; clocks = <&clks IMX27_CLK_GPT3_IPG_GATE>, @@ -376,7 +376,7 @@ }; gpt4: timer@10019000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10019000 0x1000>; interrupts = <4>; clocks = <&clks IMX27_CLK_GPT4_IPG_GATE>, @@ -385,7 +385,7 @@ }; gpt5: timer@1001a000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x1001a000 0x1000>; interrupts = <3>; clocks = <&clks IMX27_CLK_GPT5_IPG_GATE>, @@ -436,7 +436,7 @@ }; gpt6: timer@1001f000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x1001f000 0x1000>; interrupts = <2>; clocks = <&clks IMX27_CLK_GPT6_IPG_GATE>, diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi index b6478e97d6a7..e6540b5cfa4c 100644 --- a/arch/arm/boot/dts/imx35.dtsi +++ b/arch/arm/boot/dts/imx35.dtsi @@ -286,8 +286,8 @@ can1: can@53fe4000 { compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; reg = <0x53fe4000 0x1000>; - clocks = <&clks 33>; - clock-names = "ipg"; + clocks = <&clks 33>, <&clks 33>; + clock-names = "ipg", "per"; interrupts = <43>; status = "disabled"; }; @@ -295,8 +295,8 @@ can2: can@53fe8000 { compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; reg = <0x53fe8000 0x1000>; - clocks = <&clks 34>; - clock-names = "ipg"; + clocks = <&clks 34>, <&clks 34>; + clock-names = "ipg", "per"; interrupts = <44>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts index 93d3ea12328c..0f3fe29b816e 100644 --- a/arch/arm/boot/dts/imx51-apf51dev.dts +++ b/arch/arm/boot/dts/imx51-apf51dev.dts @@ -98,7 +98,7 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>; bus-width = <4>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts index e9337ad52f59..3bc18835fb4b 100644 --- a/arch/arm/boot/dts/imx53-ard.dts +++ b/arch/arm/boot/dts/imx53-ard.dts @@ -103,8 +103,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio1 1 0>; - wp-gpios = <&gpio1 9 0>; + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts index d0e0f57eb432..53f40885c530 100644 --- a/arch/arm/boot/dts/imx53-m53evk.dts +++ b/arch/arm/boot/dts/imx53-m53evk.dts @@ -124,8 +124,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio1 1 0>; - wp-gpios = <&gpio1 9 0>; + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi index 181ae5ebf23f..b0d5542ac829 100644 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi @@ -147,8 +147,8 @@ &esdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc3>; - cd-gpios = <&gpio3 11 0>; - wp-gpios = <&gpio3 12 0>; + cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>; bus-width = <8>; status = "okay"; }; @@ -295,9 +295,10 @@ &tve { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_vga_sync>; + ddc-i2c-bus = <&i2c2>; fsl,tve-mode = "vga"; - fsl,hsync-pin = <4>; - fsl,vsync-pin = <6>; + fsl,hsync-pin = <7>; /* IPU DI1 PIN7 via EIM_OE */ + fsl,vsync-pin = <8>; /* IPU DI1 PIN8 via EIM_RW */ status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts index 1d325576bcc0..fc89ce1e5763 100644 --- a/arch/arm/boot/dts/imx53-smd.dts +++ b/arch/arm/boot/dts/imx53-smd.dts @@ -41,8 +41,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio3 13 0>; - wp-gpios = <&gpio4 11 0>; + cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi index 4f1f0e2868bf..e03373a58760 100644 --- a/arch/arm/boot/dts/imx53-tqma53.dtsi +++ b/arch/arm/boot/dts/imx53-tqma53.dtsi @@ -41,8 +41,8 @@ pinctrl-0 = <&pinctrl_esdhc2>, <&pinctrl_esdhc2_cdwp>; vmmc-supply = <®_3p3v>; - wp-gpios = <&gpio1 2 0>; - cd-gpios = <&gpio1 4 0>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi index 704bd72cbfec..d3e50b22064f 100644 --- a/arch/arm/boot/dts/imx53-tx53.dtsi +++ b/arch/arm/boot/dts/imx53-tx53.dtsi @@ -183,7 +183,7 @@ }; &esdhc1 { - cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>; fsl,wp-controller; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; @@ -191,7 +191,7 @@ }; &esdhc2 { - cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; fsl,wp-controller; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc2>; diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts index c17d3ad6dba5..fc51b87ad208 100644 --- a/arch/arm/boot/dts/imx53-voipac-bsb.dts +++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts @@ -119,8 +119,8 @@ &esdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc2>; - cd-gpios = <&gpio3 25 0>; - wp-gpios = <&gpio2 19 0>; + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts index 43cb3fd76be7..5111f5170d53 100644 --- a/arch/arm/boot/dts/imx6dl-riotboard.dts +++ b/arch/arm/boot/dts/imx6dl-riotboard.dts @@ -305,8 +305,8 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio1 4 0>; - wp-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -314,8 +314,8 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; - wp-gpios = <&gpio7 1 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts index 78df05e9d1ce..d6515f7a56c4 100644 --- a/arch/arm/boot/dts/imx6q-arm2.dts +++ b/arch/arm/boot/dts/imx6q-arm2.dts @@ -11,6 +11,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx6q.dtsi" / { @@ -196,8 +197,8 @@ }; &usdhc3 { - cd-gpios = <&gpio6 11 0>; - wp-gpios = <&gpio6 14 0>; + cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3 diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts index 703539cf36d3..00bd63e63d0c 100644 --- a/arch/arm/boot/dts/imx6q-gk802.dts +++ b/arch/arm/boot/dts/imx6q-gk802.dts @@ -7,6 +7,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx6q.dtsi" / { @@ -161,7 +162,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; - cd-gpios = <&gpio6 11 0>; + cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts index a43abfa21e33..5645d52850a7 100644 --- a/arch/arm/boot/dts/imx6q-tbs2910.dts +++ b/arch/arm/boot/dts/imx6q-tbs2910.dts @@ -251,7 +251,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; - cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -260,7 +260,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; - cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi index e6d9195a1da7..f4d6ae564ead 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi @@ -173,7 +173,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; status = "okay"; }; @@ -181,7 +181,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi index 1d85de2befb3..a47a0399a172 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi @@ -392,7 +392,7 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; no-1-8-v; status = "okay"; }; @@ -400,7 +400,7 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; no-1-8-v; status = "okay"; diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index 59e5d15e3ec4..ff41f83551de 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi @@ -258,6 +258,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio1 4 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi index 2c253d6d20bd..45e7c39e80d5 100644 --- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi +++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi @@ -1,3 +1,5 @@ +#include <dt-bindings/gpio/gpio.h> + / { regulators { compatible = "simple-bus"; @@ -181,7 +183,7 @@ &usdhc2 { /* module slot */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio2 2 0>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi index b5756c21ea1d..4493f6e99330 100644 --- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi @@ -318,7 +318,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi index 86f03c1b147c..a857d1294609 100644 --- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi @@ -324,7 +324,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi index 4a8d97f47759..1afe3385e2d2 100644 --- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi @@ -417,7 +417,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi index 62a82f3eba88..6dd0b764e036 100644 --- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi @@ -299,6 +299,6 @@ &pinctrl_hummingboard_usdhc2 >; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio1 4 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi index 3af16dfe417b..d7fe6672d00c 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi @@ -453,7 +453,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -461,7 +461,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio2 6 0>; + cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 1ce6133b67f5..9e6ecd99b472 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -409,8 +409,8 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio1 4 0>; - wp-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; status = "disabled"; }; @@ -418,7 +418,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3 &pinctrl_usdhc3_cdwp>; - cd-gpios = <&gpio1 27 0>; - wp-gpios = <&gpio1 29 0>; + cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi index 488a640796ac..3373fd958e95 100644 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi @@ -342,7 +342,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -351,6 +351,6 @@ pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index 3b24b12651b2..e329ca5c3322 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi @@ -467,8 +467,8 @@ pinctrl-0 = <&pinctrl_usdhc3>; pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; - cd-gpios = <&gpio6 15 0>; - wp-gpios = <&gpio1 13 0>; + cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi index e00c44f6a0df..782379320517 100644 --- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi @@ -448,8 +448,8 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; - wp-gpios = <&gpio7 1 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -457,7 +457,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio2 6 0>; + cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index a626e6dd8022..944eb81cb2b8 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -562,8 +562,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <8>; - cd-gpios = <&gpio2 2 0>; - wp-gpios = <&gpio2 3 0>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -571,8 +571,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <8>; - cd-gpios = <&gpio2 0 0>; - wp-gpios = <&gpio2 1 0>; + cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi index f02b80b41d4f..da08de324e9e 100644 --- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi +++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi @@ -680,7 +680,7 @@ pinctrl-0 = <&pinctrl_usdhc1>; bus-width = <4>; no-1-8-v; - cd-gpios = <&gpio7 2 0>; + cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>; fsl,wp-controller; status = "okay"; }; @@ -690,7 +690,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; no-1-8-v; - cd-gpios = <&gpio7 3 0>; + cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>; fsl,wp-controller; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi index 5fb091675582..9e096d811bed 100644 --- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi @@ -9,6 +9,8 @@ * */ +#include <dt-bindings/gpio/gpio.h> + / { regulators { compatible = "simple-bus"; @@ -250,13 +252,13 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; status = "okay"; }; &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio3 9 0>; + cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index e6d13592080d..10d0b26c93f1 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -181,10 +181,10 @@ interrupt-names = "msi"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; - interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6QDL_CLK_PCIE_AXI>, <&clks IMX6QDL_CLK_LVDS1_GATE>, <&clks IMX6QDL_CLK_PCIE_REF_125M>; @@ -836,10 +836,31 @@ reg = <0x02100000 0x100000>; ranges; - caam@02100000 { - reg = <0x02100000 0x40000>; - interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>, - <0 106 IRQ_TYPE_LEVEL_HIGH>; + crypto: caam@2100000 { + compatible = "fsl,sec-v4.0"; + fsl,sec-era = <4>; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x2100000 0x10000>; + ranges = <0 0x2100000 0x10000>; + interrupt-parent = <&intc>; + clocks = <&clks IMX6QDL_CLK_CAAM_MEM>, + <&clks IMX6QDL_CLK_CAAM_ACLK>, + <&clks IMX6QDL_CLK_CAAM_IPG>, + <&clks IMX6QDL_CLK_EIM_SLOW>; + clock-names = "mem", "aclk", "ipg", "emi_slow"; + + sec_jr0: jr0@1000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; + }; + + sec_jr1: jr1@2000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; + }; }; aipstz@0217c000 { /* AIPSTZ2 */ diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 945887d3fdb3..b84dff2e94ea 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -617,8 +617,8 @@ pinctrl-1 = <&pinctrl_usdhc1_100mhz>; pinctrl-2 = <&pinctrl_usdhc1_200mhz>; bus-width = <8>; - cd-gpios = <&gpio4 7 0>; - wp-gpios = <&gpio4 6 0>; + cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -627,8 +627,8 @@ pinctrl-0 = <&pinctrl_usdhc2>; pinctrl-1 = <&pinctrl_usdhc2_100mhz>; pinctrl-2 = <&pinctrl_usdhc2_200mhz>; - cd-gpios = <&gpio5 0 0>; - wp-gpios = <&gpio4 29 0>; + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -637,6 +637,6 @@ pinctrl-0 = <&pinctrl_usdhc3>; pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; - cd-gpios = <&gpio3 22 0>; + cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index e3c0b63c2205..115f3fd78971 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts @@ -49,7 +49,7 @@ pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; bus-width = <8>; - cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; keep-power-in-suspend; enable-sdio-wakeup; @@ -61,7 +61,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; bus-width = <8>; - cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>; no-1-8-v; keep-power-in-suspend; enable-sdio-wakup; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index cef04cef3a80..ac88c3467078 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi @@ -293,7 +293,7 @@ pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; bus-width = <8>; - cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; keep-power-in-suspend; enable-sdio-wakeup; @@ -304,7 +304,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 708175d59b9c..e6223d8e79af 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -738,6 +738,33 @@ reg = <0x02100000 0x100000>; ranges; + crypto: caam@2100000 { + compatible = "fsl,sec-v4.0"; + fsl,sec-era = <4>; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x2100000 0x10000>; + ranges = <0 0x2100000 0x10000>; + interrupt-parent = <&intc>; + clocks = <&clks IMX6SX_CLK_CAAM_MEM>, + <&clks IMX6SX_CLK_CAAM_ACLK>, + <&clks IMX6SX_CLK_CAAM_IPG>, + <&clks IMX6SX_CLK_EIM_SLOW>; + clock-names = "mem", "aclk", "ipg", "emi_slow"; + + sec_jr0: jr0@1000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x1000 0x1000>; + interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; + }; + + sec_jr1: jr1@2000 { + compatible = "fsl,sec-v4.0-job-ring"; + reg = <0x2000 0x1000>; + interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + usbotg1: usb@02184000 { compatible = "fsl,imx6sx-usb", "fsl,imx27-usb"; reg = <0x02184000 0x200>; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 4d1a4b977d84..fdd1d7c9a5cc 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -234,8 +234,8 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio5 0 0>; - wp-gpios = <&gpio5 1 0>; + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; enable-sdio-wakeup; keep-power-in-suspend; status = "okay"; diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi index 4773d6af66a0..d56d68fe7ffc 100644 --- a/arch/arm/boot/dts/k2e-clocks.dtsi +++ b/arch/arm/boot/dts/k2e-clocks.dtsi @@ -13,9 +13,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi index 50e555eab50d..675fb8e492c6 100644 --- a/arch/arm/boot/dts/k2e.dtsi +++ b/arch/arm/boot/dts/k2e.dtsi @@ -86,7 +86,7 @@ gpio,syscon-dev = <&devctrl 0x240>; }; - pcie@21020000 { + pcie1: pcie@21020000 { compatible = "ti,keystone-pcie","snps,dw-pcie"; clocks = <&clkpcie1>; clock-names = "pcie"; @@ -96,6 +96,7 @@ ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000 0x82000000 0 0x60000000 0x60000000 0 0x10000000>; + status = "disabled"; device_type = "pci"; num-lanes = <2>; @@ -130,10 +131,17 @@ <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>; }; }; + + mdio: mdio@24200f00 { + compatible = "ti,keystone_mdio", "ti,davinci_mdio"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x24200f00 0x100>; + status = "disabled"; + clocks = <&clkcpgmac>; + clock-names = "fck"; + bus_freq = <2500000>; + }; /include/ "k2e-netcp.dtsi" }; }; - -&mdio { - reg = <0x24200f00 0x100>; -}; diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi index d5adee3c0067..af9b7190533a 100644 --- a/arch/arm/boot/dts/k2hk-clocks.dtsi +++ b/arch/arm/boot/dts/k2hk-clocks.dtsi @@ -22,9 +22,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi index ae6472407b22..d0810a5f2968 100644 --- a/arch/arm/boot/dts/k2hk.dtsi +++ b/arch/arm/boot/dts/k2hk.dtsi @@ -98,6 +98,17 @@ #gpio-cells = <2>; gpio,syscon-dev = <&devctrl 0x25c>; }; + + mdio: mdio@02090300 { + compatible = "ti,keystone_mdio", "ti,davinci_mdio"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x02090300 0x100>; + status = "disabled"; + clocks = <&clkcpgmac>; + clock-names = "fck"; + bus_freq = <2500000>; + }; /include/ "k2hk-netcp.dtsi" }; }; diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi index eb1e3e29f073..ef8464bb11ff 100644 --- a/arch/arm/boot/dts/k2l-clocks.dtsi +++ b/arch/arm/boot/dts/k2l-clocks.dtsi @@ -22,9 +22,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi index 0e007483615e..49fd414f680c 100644 --- a/arch/arm/boot/dts/k2l.dtsi +++ b/arch/arm/boot/dts/k2l.dtsi @@ -29,7 +29,6 @@ }; soc { - /include/ "k2l-clocks.dtsi" uart2: serial@02348400 { @@ -79,6 +78,17 @@ #gpio-cells = <2>; gpio,syscon-dev = <&devctrl 0x24c>; }; + + mdio: mdio@26200f00 { + compatible = "ti,keystone_mdio", "ti,davinci_mdio"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x26200f00 0x100>; + status = "disabled"; + clocks = <&clkcpgmac>; + clock-names = "fck"; + bus_freq = <2500000>; + }; /include/ "k2l-netcp.dtsi" }; }; @@ -96,7 +106,3 @@ /* Pin muxed. Enabled and configured by Bootloader */ status = "disabled"; }; - -&mdio { - reg = <0x26200f00 0x100>; -}; diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi index c06542b2c954..72816d65f7ec 100644 --- a/arch/arm/boot/dts/keystone.dtsi +++ b/arch/arm/boot/dts/keystone.dtsi @@ -267,17 +267,6 @@ 1 0 0x21000A00 0x00000100>; }; - mdio: mdio@02090300 { - compatible = "ti,keystone_mdio", "ti,davinci_mdio"; - #address-cells = <1>; - #size-cells = <0>; - reg = <0x02090300 0x100>; - status = "disabled"; - clocks = <&clkpa>; - clock-names = "fck"; - bus_freq = <2500000>; - }; - kirq0: keystone_irq@26202a0 { compatible = "ti,keystone-irq"; interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>; @@ -286,7 +275,7 @@ ti,syscon-dev = <&devctrl 0x2a0>; }; - pcie@21800000 { + pcie0: pcie@21800000 { compatible = "ti,keystone-pcie", "snps,dw-pcie"; clocks = <&clkpcie>; clock-names = "pcie"; @@ -296,6 +285,7 @@ ranges = <0x81000000 0 0 0x23250000 0 0x4000 0x82000000 0 0x50000000 0x50000000 0 0x10000000>; + status = "disabled"; device_type = "pci"; num-lanes = <2>; diff --git a/arch/arm/boot/dts/kirkwood-d2net.dts b/arch/arm/boot/dts/kirkwood-d2net.dts index 6b7856025001..e1c25c35e9ce 100644 --- a/arch/arm/boot/dts/kirkwood-d2net.dts +++ b/arch/arm/boot/dts/kirkwood-d2net.dts @@ -10,6 +10,7 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-netxbig.dtsi" / { @@ -28,6 +29,10 @@ label = "d2net_v2:blue:sata"; slow-gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>; cmd-gpio = <&gpio0 30 GPIO_ACTIVE_HIGH>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; diff --git a/arch/arm/boot/dts/kirkwood-is2.dts b/arch/arm/boot/dts/kirkwood-is2.dts index da674bbd49a8..4121674abd1c 100644 --- a/arch/arm/boot/dts/kirkwood-is2.dts +++ b/arch/arm/boot/dts/kirkwood-is2.dts @@ -1,5 +1,6 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-ns2-common.dtsi" / { @@ -27,6 +28,10 @@ label = "ns2:blue:sata"; slow-gpio = <&gpio0 29 0>; cmd-gpio = <&gpio0 30 0>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2.dts b/arch/arm/boot/dts/kirkwood-ns2.dts index 53368d1022cc..190189d235e6 100644 --- a/arch/arm/boot/dts/kirkwood-ns2.dts +++ b/arch/arm/boot/dts/kirkwood-ns2.dts @@ -1,5 +1,6 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-ns2-common.dtsi" / { @@ -27,6 +28,10 @@ label = "ns2:blue:sata"; slow-gpio = <&gpio0 29 0>; cmd-gpio = <&gpio0 30 0>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2max.dts b/arch/arm/boot/dts/kirkwood-ns2max.dts index 72c78d0b1116..55cc41d9c80c 100644 --- a/arch/arm/boot/dts/kirkwood-ns2max.dts +++ b/arch/arm/boot/dts/kirkwood-ns2max.dts @@ -1,5 +1,6 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-ns2-common.dtsi" / { @@ -46,6 +47,10 @@ label = "ns2:blue:sata"; slow-gpio = <&gpio0 29 0>; cmd-gpio = <&gpio0 30 0>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2mini.dts b/arch/arm/boot/dts/kirkwood-ns2mini.dts index c441bf62c09f..9935f3ec29b4 100644 --- a/arch/arm/boot/dts/kirkwood-ns2mini.dts +++ b/arch/arm/boot/dts/kirkwood-ns2mini.dts @@ -1,5 +1,6 @@ /dts-v1/; +#include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-ns2-common.dtsi" / { @@ -47,6 +48,10 @@ label = "ns2:blue:sata"; slow-gpio = <&gpio0 29 0>; cmd-gpio = <&gpio0 30 0>; + modes-map = <NS_V2_LED_OFF 1 0 + NS_V2_LED_ON 0 1 + NS_V2_LED_ON 1 1 + NS_V2_LED_SATA 0 0>; }; }; }; diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi index 11a7963be003..2390f387c271 100644 --- a/arch/arm/boot/dts/omap2430.dtsi +++ b/arch/arm/boot/dts/omap2430.dtsi @@ -51,7 +51,8 @@ }; scm_conf: scm_conf@270 { - compatible = "syscon"; + compatible = "syscon", + "simple-bus"; reg = <0x270 0x240>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi index 233c69e50ae3..df8908adb0cb 100644 --- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi +++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi @@ -120,7 +120,7 @@ lcd0: display@0 { compatible = "lgphilips,lb035q02"; - label = "lcd"; + label = "lcd35"; reg = <1>; /* CS1 */ spi-max-frequency = <10000000>; diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi index f5395b7da912..048fd216970a 100644 --- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi +++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi @@ -98,7 +98,7 @@ lcd0: display@0 { compatible = "samsung,lte430wq-f0c", "panel-dpi"; - label = "lcd"; + label = "lcd43"; pinctrl-names = "default"; pinctrl-0 = <<e430_pins>; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index f884d6adb71e..abc4473e6f8a 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -191,7 +191,8 @@ }; omap4_padconf_global: omap4_padconf_global@5a0 { - compatible = "syscon"; + compatible = "syscon", + "simple-bus"; reg = <0x5a0 0x170>; #address-cells = <1>; #size-cells = <1>; @@ -551,6 +552,7 @@ reg = <0x4a066000 0x100>; interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "mmu_dsp"; + #iommu-cells = <0>; }; mmu_ipu: mmu@55082000 { @@ -558,6 +560,7 @@ reg = <0x55082000 0x100>; interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "mmu_ipu"; + #iommu-cells = <0>; ti,iommu-bus-err-back; }; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 7d24ae0306b5..b1a1263e6001 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -180,7 +180,8 @@ }; omap5_padconf_global: omap5_padconf_global@5a0 { - compatible = "syscon"; + compatible = "syscon", + "simple-bus"; reg = <0x5a0 0xec>; #address-cells = <1>; #size-cells = <1>; @@ -612,6 +613,7 @@ reg = <0x4a066000 0x100>; interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "mmu_dsp"; + #iommu-cells = <0>; }; mmu_ipu: mmu@55082000 { @@ -619,6 +621,7 @@ reg = <0x55082000 0x100>; interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "mmu_ipu"; + #iommu-cells = <0>; ti,iommu-bus-err-back; }; diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts index bd35b0674ff6..9bc72a3356e4 100644 --- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts +++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts @@ -17,3 +17,13 @@ status = "ok"; }; }; + +&spmi_bus { + pm8941@0 { + coincell@2800 { + status = "ok"; + qcom,rset-ohms = <2100>; + qcom,vset-millivolts = <3000>; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi index aa774e685018..968f1043d4f5 100644 --- a/arch/arm/boot/dts/qcom-pm8941.dtsi +++ b/arch/arm/boot/dts/qcom-pm8941.dtsi @@ -125,6 +125,12 @@ interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>; qcom,external-resistor-micro-ohms = <10000>; }; + + coincell@2800 { + compatible = "qcom,pm8941-coincell"; + reg = <0x2800>; + status = "disabled"; + }; }; usid1: pm8941@1 { diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts index 71468a7eb28f..5e17fd147728 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts @@ -60,27 +60,27 @@ rxc-skew-ps = <2000>; }; -&mmc0 { - vmmc-supply = <®ulator_3_3v>; - vqmmc-supply = <®ulator_3_3v>; -}; - -&usb1 { - status = "okay"; -}; - &gpio2 { status = "okay"; }; -&i2c1{ +&i2c1 { status = "okay"; - accel1: accel1@53{ - compatible = "adxl34x"; + accel1: accelerometer@53 { + compatible = "adi,adxl345"; reg = <0x53>; - interrupt-parent = < &portc >; + interrupt-parent = <&portc>; interrupts = <3 2>; }; }; + +&mmc0 { + vmmc-supply = <®ulator_3_3v>; + vqmmc-supply = <®ulator_3_3v>; +}; + +&usb1 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts index d42c84b1df8d..e48857249ce7 100644 --- a/arch/arm/boot/dts/spear1310-evb.dts +++ b/arch/arm/boot/dts/spear1310-evb.dts @@ -1,7 +1,7 @@ /* * DTS file for SPEAr1310 Evaluation Baord * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi index 9d342920695a..54bc6d3cf290 100644 --- a/arch/arm/boot/dts/spear1310.dtsi +++ b/arch/arm/boot/dts/spear1310.dtsi @@ -1,7 +1,7 @@ /* * DTS file for all SPEAr1310 SoCs * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts index b23e05ed1d60..c611f5606dfe 100644 --- a/arch/arm/boot/dts/spear1340-evb.dts +++ b/arch/arm/boot/dts/spear1340-evb.dts @@ -1,7 +1,7 @@ /* * DTS file for SPEAr1340 Evaluation Baord * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index 13e1aa33daa2..df2232d767ed 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -1,7 +1,7 @@ /* * DTS file for all SPEAr1340 SoCs * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 40accc87e3a2..14594ce8c18a 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -1,7 +1,7 @@ /* * DTS file for all SPEAr13xx SoCs * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts index 5de1431653e4..e859e8288bcd 100644 --- a/arch/arm/boot/dts/spear300-evb.dts +++ b/arch/arm/boot/dts/spear300-evb.dts @@ -1,7 +1,7 @@ /* * DTS file for SPEAr300 Evaluation Baord * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi index f79b3dfaabe6..f4e92e599729 100644 --- a/arch/arm/boot/dts/spear300.dtsi +++ b/arch/arm/boot/dts/spear300.dtsi @@ -1,7 +1,7 @@ /* * DTS file for SPEAr300 SoC * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts index b09632963d15..070f2c1b7851 100644 --- a/arch/arm/boot/dts/spear310-evb.dts +++ b/arch/arm/boot/dts/spear310-evb.dts @@ -1,7 +1,7 @@ /* * DTS file for SPEAr310 Evaluation Baord * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi index 95372080eea6..da210b454753 100644 --- a/arch/arm/boot/dts/spear310.dtsi +++ b/arch/arm/boot/dts/spear310.dtsi @@ -1,7 +1,7 @@ /* * DTS file for SPEAr310 SoC * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts index fdedbb514102..1b1034477923 100644 --- a/arch/arm/boot/dts/spear320-evb.dts +++ b/arch/arm/boot/dts/spear320-evb.dts @@ -1,7 +1,7 @@ /* * DTS file for SPEAr320 Evaluation Baord * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi index ffea342aeec9..22be6e5edaac 100644 --- a/arch/arm/boot/dts/spear320.dtsi +++ b/arch/arm/boot/dts/spear320.dtsi @@ -1,7 +1,7 @@ /* * DTS file for SPEAr320 SoC * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi index f0e3fcf8e323..118135d75899 100644 --- a/arch/arm/boot/dts/spear3xx.dtsi +++ b/arch/arm/boot/dts/spear3xx.dtsi @@ -1,7 +1,7 @@ /* * DTS file for all SPEAr3xx SoCs * - * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> + * Copyright 2012 Viresh Kumar <vireshk@kernel.org> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts index 32dd55e5f4e6..6eaaf638e52e 100644 --- a/arch/arm/boot/dts/ste-ccu8540.dts +++ b/arch/arm/boot/dts/ste-ccu8540.dts @@ -17,6 +17,13 @@ model = "ST-Ericsson U8540 platform with Device Tree"; compatible = "st-ericsson,ccu8540", "st-ericsson,u8540"; + /* This stablilizes the serial port enumeration */ + aliases { + serial0 = &ux500_serial0; + serial1 = &ux500_serial1; + serial2 = &ux500_serial2; + }; + memory@0 { device_type = "memory"; reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>; diff --git a/arch/arm/boot/dts/ste-ccu9540.dts b/arch/arm/boot/dts/ste-ccu9540.dts index 651c56d400a4..c8b815819cfe 100644 --- a/arch/arm/boot/dts/ste-ccu9540.dts +++ b/arch/arm/boot/dts/ste-ccu9540.dts @@ -16,6 +16,13 @@ model = "ST-Ericsson CCU9540 platform with Device Tree"; compatible = "st-ericsson,ccu9540", "st-ericsson,u9540"; + /* This stablilizes the serial port enumeration */ + aliases { + serial0 = &ux500_serial0; + serial1 = &ux500_serial1; + serial2 = &ux500_serial2; + }; + memory { reg = <0x00000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index 853684ad7773..c5fbde3afcf6 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -15,6 +15,33 @@ #include "skeleton.dtsi" / { + cpus { + #address-cells = <1>; + #size-cells = <0>; + enable-method = "ste,dbx500-smp"; + + cpu-map { + cluster0 { + core0 { + cpu = <&CPU0>; + }; + core1 { + cpu = <&CPU1>; + }; + }; + }; + CPU0: cpu@300 { + device_type = "cpu"; + compatible = "arm,cortex-a9"; + reg = <0x300>; + }; + CPU1: cpu@301 { + device_type = "cpu"; + compatible = "arm,cortex-a9"; + reg = <0x301>; + }; + }; + soc { #address-cells = <1>; #size-cells = <1>; @@ -22,32 +49,6 @@ interrupt-parent = <&intc>; ranges; - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu-map { - cluster0 { - core0 { - cpu = <&CPU0>; - }; - core1 { - cpu = <&CPU1>; - }; - }; - }; - CPU0: cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a9"; - reg = <0>; - }; - CPU1: cpu@1 { - device_type = "cpu"; - compatible = "arm,cortex-a9"; - reg = <1>; - }; - }; - ptm@801ae000 { compatible = "arm,coresight-etm3x", "arm,primecell"; reg = <0x801ae000 0x1000>; @@ -219,6 +220,13 @@ clocks { compatible = "stericsson,u8500-clks"; + /* + * Registers for the CLKRST block on peripheral + * groups 1, 2, 3, 5, 6, + */ + reg = <0x8012f000 0x1000>, <0x8011f000 0x1000>, + <0x8000f000 0x1000>, <0xa03ff000 0x1000>, + <0xa03cf000 0x1000>; prcmu_clk: prcmu-clock { #clock-cells = <1>; @@ -971,7 +979,7 @@ power-domains = <&pm_domains DOMAIN_VAPE>; }; - uart@80120000 { + ux500_serial0: uart@80120000 { compatible = "arm,pl011", "arm,primecell"; reg = <0x80120000 0x1000>; interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>; @@ -986,7 +994,7 @@ status = "disabled"; }; - uart@80121000 { + ux500_serial1: uart@80121000 { compatible = "arm,pl011", "arm,primecell"; reg = <0x80121000 0x1000>; interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>; @@ -1001,7 +1009,7 @@ status = "disabled"; }; - uart@80007000 { + ux500_serial2: uart@80007000 { compatible = "arm,pl011", "arm,primecell"; reg = <0x80007000 0x1000>; interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index 744c1e3a744d..6d8ce154347e 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -32,11 +32,11 @@ status = "okay"; }; + /* This UART is unused and thus left disabled */ uart@80121000 { pinctrl-names = "default", "sleep"; pinctrl-0 = <&uart1_default_mode>; pinctrl-1 = <&uart1_sleep_mode>; - status = "okay"; }; uart@80007000 { diff --git a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts index 2b1cb5b584b6..18e9795a94f9 100644 --- a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts +++ b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts @@ -17,6 +17,13 @@ model = "ST-Ericsson HREF (pre-v60) and ST UIB"; compatible = "st-ericsson,mop500", "st-ericsson,u8500"; + /* This stablilizes the serial port enumeration */ + aliases { + serial0 = &ux500_serial0; + serial1 = &ux500_serial1; + serial2 = &ux500_serial2; + }; + soc { /* Reset line for the BU21013 touchscreen */ i2c@80110000 { diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts index 59523f866812..24739914e689 100644 --- a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts +++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts @@ -16,4 +16,11 @@ / { model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB"; compatible = "st-ericsson,mop500", "st-ericsson,u8500"; + + /* This stablilizes the serial port enumeration */ + aliases { + serial0 = &ux500_serial0; + serial1 = &ux500_serial1; + serial2 = &ux500_serial2; + }; }; diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi index 7f3975b58d16..b0278f4c486c 100644 --- a/arch/arm/boot/dts/ste-hrefprev60.dtsi +++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi @@ -23,6 +23,11 @@ }; soc { + /* Enable UART1 on this board */ + uart@80121000 { + status = "okay"; + }; + i2c@80004000 { tps61052@33 { compatible = "tps61052"; diff --git a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts index 8c6a2de56cf1..c2e1ba019a2f 100644 --- a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts +++ b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts @@ -19,6 +19,13 @@ model = "ST-Ericsson HREF (v60+) and ST UIB"; compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500"; + /* This stablilizes the serial port enumeration */ + aliases { + serial0 = &ux500_serial0; + serial1 = &ux500_serial1; + serial2 = &ux500_serial2; + }; + soc { /* Reset line for the BU21013 touchscreen */ i2c@80110000 { diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts index d53cccdce776..ebd8547e98f1 100644 --- a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts +++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts @@ -18,4 +18,11 @@ / { model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB"; compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500"; + + /* This stablilizes the serial port enumeration */ + aliases { + serial0 = &ux500_serial0; + serial1 = &ux500_serial1; + serial2 = &ux500_serial2; + }; }; diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi index a4bc9e77d640..810cda743b6d 100644 --- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi +++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi @@ -43,15 +43,26 @@ <&vaudio_hf_hrefv60_mode>, <&gbf_hrefv60_mode>, <&hdtv_hrefv60_mode>, - <&touch_hrefv60_mode>; + <&touch_hrefv60_mode>, + <&gpios_hrefv60_mode>; sdi0 { - /* SD card detect GPIO pin, extend default state */ sdi0_default_mode: sdi0_default { + /* SD card detect GPIO pin, extend default state */ default_hrefv60_cfg1 { pins = "GPIO95_E8"; ste,config = <&gpio_in_pu>; }; + /* VMMCI level-shifter enable */ + default_hrefv60_cfg2 { + pins = "GPIO169_D22"; + ste,config = <&gpio_out_lo>; + }; + /* VMMCI level-shifter voltage select */ + default_hrefv60_cfg3 { + pins = "GPIO5_AG6"; + ste,config = <&gpio_out_hi>; + }; }; }; ipgpio { @@ -213,6 +224,16 @@ }; }; }; + gpios { + /* Dangling GPIO pins */ + gpios_hrefv60_mode: gpios_hrefv60 { + default_cfg1 { + /* Normally UART1 RXD, now dangling */ + pins = "GPIO4_AH6"; + ste,config = <&in_pu>; + }; + }; + }; }; }; }; diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index 3d0b8755caee..4a21c6492dbb 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts @@ -17,11 +17,22 @@ }; aliases { + serial0 = &uart0; + serial1 = &uart1; stmpe-i2c0 = &stmpe0; stmpe-i2c1 = &stmpe1; }; pinctrl { + uart0 { + uart0_nhk_mode: uart0_mux { + u0_default_mux { + function = "u0"; + groups = "u0txrx_a_1", "u0ctsrts_a_1"; + }; + }; + }; + stmpe2401_1 { stmpe2401_1_nhk_mode: stmpe2401_1_nhk { nhk_cfg1 { @@ -72,6 +83,11 @@ }; i2c0 { + lis3lv02dl@1d { + /* Accelerometer */ + compatible = "st,lis3lv02dl-accel"; + reg = <0x1d>; + }; stmpe0: stmpe2401@43 { compatible = "st,stmpe2401"; reg = <0x43>; @@ -130,22 +146,30 @@ #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; + /* + * This will turn off SATA so that MMC/SD + * can thrive + */ + mmcsd-gpio { + gpio-hog; + gpios = <2 0x0>; + output-low; + line-name = "SATA EN"; + }; }; }; }; amba { + /* Activate RX/TX and CTS/RTS on UART 0 */ + uart0: uart@101fd000 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_nhk_mode>; + status = "okay"; + }; mmcsd: sdi@101f6000 { cd-gpios = <&stmpe_gpio44 7 GPIO_ACTIVE_LOW>; wp-gpios = <&stmpe_gpio44 18 GPIO_ACTIVE_HIGH>; }; }; - - /* Custom board node with GPIO pins to active etc */ - usb-s8815 { - /* This will turn off SATA so that MMC/SD can thrive */ - mmcsd-gpio { - gpios = <&stmpe_gpio44 2 0x1>; - }; - }; }; diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts index 85d3b95dfdba..35282c0105c6 100644 --- a/arch/arm/boot/dts/ste-nomadik-s8815.dts +++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts @@ -15,6 +15,21 @@ bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; }; + aliases { + serial0 = &uart0; + serial1 = &uart1; + }; + + gpio3: gpio@101e7000 { + /* This hog will bias the MMC/SD card detect line */ + mmcsd-gpio { + gpio-hog; + gpios = <16 0x0>; + output-low; + line-name = "card detect bias"; + }; + }; + src@101e0000 { /* These chrystal drivers are not used on this board */ disable-sxtalo; @@ -26,6 +41,15 @@ pinctrl-names = "default"; pinctrl-0 = <&cd_default_mode>; + uart0 { + /* Only use RX/TX pins */ + uart0_s8815_mode: uart0_mux { + u0_default_mux { + function = "u0"; + groups = "u0txrx_a_1"; + }; + }; + }; mmcsd-cd { cd_default_mode: cd_default { cd_default_cfg1 { @@ -81,6 +105,14 @@ }; }; + i2c1 { + lis3lv02dl@1d { + /* Accelerometer */ + compatible = "st,lis3lv02dl-accel"; + reg = <0x1d>; + }; + }; + /* GPIO I2C connected to the USB portions of the STw4811 only */ gpio-i2c { compatible = "i2c-gpio"; @@ -98,21 +130,19 @@ }; - /* Configure card detect for the uSD slot */ amba { + /* Activate RXTX on UART 0 */ + uart0: uart@101fd000 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_s8815_mode>; + status = "okay"; + }; + /* Configure card detect for the uSD slot */ mmcsd: sdi@101f6000 { cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; }; }; - /* Custom board node with GPIO pins to active etc */ - usb-s8815 { - /* This will bias the MMC/SD card detect line */ - mmcsd-gpio { - gpios = <&gpio3 16 0x1>; - }; - }; - /* The user LED on the board is set up to be used for heartbeat */ leds { compatible = "gpio-leds"; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index 9a5f2ba139b7..176e332fc0bd 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -21,6 +21,13 @@ interrupts = <30>; cache-unified; cache-level = <2>; + cache-size = <131072>; + cache-sets = <512>; + cache-line-size = <32>; + /* At full speed latency must be >=2 */ + arm,tag-latency = <2>; + arm,data-latency = <2 2>; + arm,dirty-latency = <2>; }; mtu0: mtu@101e2000 { @@ -97,14 +104,6 @@ pinctrl { compatible = "stericsson,stn8815-pinctrl"; /* Pin configurations */ - uart0 { - uart0_default_mux: uart0_mux { - u0_default_mux { - function = "u0"; - groups = "u0_a_1"; - }; - }; - }; uart1 { uart1_default_mux: uart1_mux { u1_default_mux { @@ -721,11 +720,6 @@ compatible = "st,stw5095"; reg = <0x1a>; }; - lis3lv02dl@1d { - /* Accelerometer */ - compatible = "st,lis3lv02dl-accel"; - reg = <0x1d>; - }; }; amba { @@ -755,8 +749,7 @@ interrupts = <12>; clocks = <&uart0clk>, <&pclkuart0>; clock-names = "uartclk", "apb_pclk"; - pinctrl-names = "default"; - pinctrl-0 = <&uart0_default_mux>; + status = "disabled"; }; uart1: uart@101fb000 { diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 9edadc37719f..32a5ccb14e7e 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -18,6 +18,13 @@ model = "Calao Systems Snowball platform with device tree"; compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500"; + /* This stablilizes the serial port enumeration */ + aliases { + serial0 = &ux500_serial0; + serial1 = &ux500_serial1; + serial2 = &ux500_serial2; + }; + memory { reg = <0x00000000 0x20000000>; }; @@ -223,11 +230,11 @@ status = "okay"; }; + /* This UART is unused and thus left disabled */ uart@80121000 { pinctrl-names = "default", "sleep"; pinctrl-0 = <&uart1_default_mode>; pinctrl-1 = <&uart1_sleep_mode>; - status = "okay"; }; uart@80007000 { @@ -452,7 +459,21 @@ pins = "GPIO21_AB3"; /* DAT31DIR */ ste,config = <&out_hi>; }; - + /* SD card detect GPIO pin, extend default state */ + snowball_cfg2 { + pins = "GPIO218_AH11"; + ste,config = <&gpio_in_pu>; + }; + /* VMMCI level-shifter enable */ + snowball_cfg3 { + pins = "GPIO217_AH12"; + ste,config = <&gpio_out_lo>; + }; + /* VMMCI level-shifter voltage select */ + snowball_cfg4 { + pins = "GPIO228_AJ6"; + ste,config = <&gpio_out_hi>; + }; }; }; ssp0 { diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 61c03d1fe530..adaa57b7a943 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -241,6 +241,7 @@ compatible = "allwinner,sun4i-a10-axi-gates-clk"; reg = <0x01c2005c 0x4>; clocks = <&axi>; + clock-indices = <0>; clock-output-names = "axi_dram"; }; @@ -257,17 +258,36 @@ compatible = "allwinner,sun4i-a10-ahb-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb>; + clock-indices = <0>, <1>, + <2>, <3>, + <4>, <5>, <6>, + <7>, <8>, <9>, + <10>, <11>, <12>, + <13>, <14>, <16>, + <17>, <18>, <20>, + <21>, <22>, <23>, + <24>, <25>, <26>, + <32>, <33>, <34>, + <35>, <36>, <37>, + <40>, <41>, <43>, + <44>, <45>, + <46>, <47>, + <50>, <52>; clock-output-names = "ahb_usb0", "ahb_ehci0", - "ahb_ohci0", "ahb_ehci1", "ahb_ohci1", "ahb_ss", - "ahb_dma", "ahb_bist", "ahb_mmc0", "ahb_mmc1", - "ahb_mmc2", "ahb_mmc3", "ahb_ms", "ahb_nand", - "ahb_sdram", "ahb_ace", "ahb_emac", "ahb_ts", - "ahb_spi0", "ahb_spi1", "ahb_spi2", "ahb_spi3", - "ahb_pata", "ahb_sata", "ahb_gps", "ahb_ve", - "ahb_tvd", "ahb_tve0", "ahb_tve1", "ahb_lcd0", - "ahb_lcd1", "ahb_csi0", "ahb_csi1", "ahb_hdmi", - "ahb_de_be0", "ahb_de_be1", "ahb_de_fe0", - "ahb_de_fe1", "ahb_mp", "ahb_mali400"; + "ahb_ohci0", "ahb_ehci1", + "ahb_ohci1", "ahb_ss", "ahb_dma", + "ahb_bist", "ahb_mmc0", "ahb_mmc1", + "ahb_mmc2", "ahb_mmc3", "ahb_ms", + "ahb_nand", "ahb_sdram", "ahb_ace", + "ahb_emac", "ahb_ts", "ahb_spi0", + "ahb_spi1", "ahb_spi2", "ahb_spi3", + "ahb_pata", "ahb_sata", "ahb_gps", + "ahb_ve", "ahb_tvd", "ahb_tve0", + "ahb_tve1", "ahb_lcd0", "ahb_lcd1", + "ahb_csi0", "ahb_csi1", "ahb_hdmi", + "ahb_de_be0", "ahb_de_be1", + "ahb_de_fe0", "ahb_de_fe1", + "ahb_mp", "ahb_mali400"; }; apb0: apb0@01c20054 { @@ -283,9 +303,14 @@ compatible = "allwinner,sun4i-a10-apb0-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb0>; + clock-indices = <0>, <1>, + <2>, <3>, + <5>, <6>, + <7>, <10>; clock-output-names = "apb0_codec", "apb0_spdif", - "apb0_ac97", "apb0_iis", "apb0_pio", "apb0_ir0", - "apb0_ir1", "apb0_keypad"; + "apb0_ac97", "apb0_iis", + "apb0_pio", "apb0_ir0", + "apb0_ir1", "apb0_keypad"; }; apb1: clk@01c20058 { @@ -301,12 +326,22 @@ compatible = "allwinner,sun4i-a10-apb1-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb1>; + clock-indices = <0>, <1>, + <2>, <4>, + <5>, <6>, + <7>, <16>, + <17>, <18>, + <19>, <20>, + <21>, <22>, + <23>; clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_can", "apb1_scr", - "apb1_ps20", "apb1_ps21", "apb1_uart0", - "apb1_uart1", "apb1_uart2", "apb1_uart3", - "apb1_uart4", "apb1_uart5", "apb1_uart6", - "apb1_uart7"; + "apb1_i2c2", "apb1_can", + "apb1_scr", "apb1_ps20", + "apb1_ps21", "apb1_uart0", + "apb1_uart1", "apb1_uart2", + "apb1_uart3", "apb1_uart4", + "apb1_uart5", "apb1_uart6", + "apb1_uart7"; }; nand_clk: clk@01c20080 { @@ -643,6 +678,14 @@ status = "disabled"; }; + crypto: crypto-engine@01c15000 { + compatible = "allwinner,sun4i-a10-crypto"; + reg = <0x01c15000 0x1000>; + interrupts = <86>; + clocks = <&ahb_gates 5>, <&ss_clk>; + clock-names = "ahb", "mod"; + }; + spi2: spi@01c17000 { compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c17000 0x1000>; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index f11efb722bbb..a513b416a807 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -85,6 +85,17 @@ compatible = "allwinner,sun5i-a10s-ahb-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb>; + clock-indices = <0>, <1>, + <2>, <5>, <6>, + <7>, <8>, <9>, + <10>, <13>, + <14>, <17>, <18>, + <20>, <21>, <22>, + <26>, <28>, <32>, + <34>, <36>, <40>, + <43>, <44>, + <46>, <51>, + <52>; clock-output-names = "ahb_usbotg", "ahb_ehci", "ahb_ohci", "ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0", "ahb_mmc1", @@ -103,6 +114,9 @@ compatible = "allwinner,sun5i-a10s-apb0-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb0>; + clock-indices = <0>, <3>, + <5>, <6>, + <10>; clock-output-names = "apb0_codec", "apb0_iis", "apb0_pio", "apb0_ir", "apb0_keypad"; @@ -113,9 +127,14 @@ compatible = "allwinner,sun5i-a10s-apb1-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb1>; + clock-indices = <0>, <1>, + <2>, <16>, + <17>, <18>, + <19>; clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_uart0", "apb1_uart1", - "apb1_uart2", "apb1_uart3"; + "apb1_i2c2", "apb1_uart0", + "apb1_uart1", "apb1_uart2", + "apb1_uart3"; }; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 976d4faa2179..f3631c9c6fa2 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -104,6 +104,16 @@ compatible = "allwinner,sun5i-a13-ahb-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb>; + clock-indices = <0>, <1>, + <2>, <5>, <6>, + <7>, <8>, <9>, + <10>, <13>, + <14>, <20>, + <21>, <22>, + <28>, <32>, <36>, + <40>, <44>, + <46>, <51>, + <52>; clock-output-names = "ahb_usbotg", "ahb_ehci", "ahb_ohci", "ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0", "ahb_mmc1", @@ -121,6 +131,8 @@ compatible = "allwinner,sun5i-a13-apb0-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb0>; + clock-indices = <0>, <5>, + <6>; clock-output-names = "apb0_codec", "apb0_pio", "apb0_ir"; }; @@ -130,8 +142,12 @@ compatible = "allwinner,sun5i-a13-apb1-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb1>; + clock-indices = <0>, <1>, + <2>, <17>, + <19>; clock-output-names = "apb1_i2c0", "apb1_i2c1", - "apb1_i2c2", "apb1_uart1", "apb1_uart3"; + "apb1_i2c2", "apb1_uart1", + "apb1_uart3"; }; }; }; diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi index 54b097830434..427c0e7289fa 100644 --- a/arch/arm/boot/dts/sun5i.dtsi +++ b/arch/arm/boot/dts/sun5i.dtsi @@ -178,6 +178,7 @@ compatible = "allwinner,sun4i-a10-axi-gates-clk"; reg = <0x01c2005c 0x4>; clocks = <&axi>; + clock-indices = <0>; clock-output-names = "axi_dram"; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 008047a018cf..e4d3484d97bd 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -252,6 +252,20 @@ compatible = "allwinner,sun6i-a31-ahb1-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb1>; + clock-indices = <1>, <5>, + <6>, <8>, <9>, + <10>, <11>, <12>, + <13>, <14>, + <17>, <18>, <19>, + <20>, <21>, <22>, + <23>, <24>, <26>, + <27>, <29>, + <30>, <31>, <32>, + <36>, <37>, <40>, + <43>, <44>, <45>, + <46>, <47>, <50>, + <52>, <55>, <56>, + <57>, <58>; clock-output-names = "ahb1_mipidsi", "ahb1_ss", "ahb1_dma", "ahb1_mmc0", "ahb1_mmc1", "ahb1_mmc2", "ahb1_mmc3", "ahb1_nand1", @@ -281,6 +295,9 @@ compatible = "allwinner,sun6i-a31-apb1-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb1>; + clock-indices = <0>, <4>, + <5>, <12>, + <13>; clock-output-names = "apb1_codec", "apb1_digital_mic", "apb1_pio", "apb1_daudio0", "apb1_daudio1"; @@ -299,6 +316,10 @@ compatible = "allwinner,sun6i-a31-apb2-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb2>; + clock-indices = <0>, <1>, + <2>, <3>, <16>, + <17>, <18>, <19>, + <20>, <21>; clock-output-names = "apb2_i2c0", "apb2_i2c1", "apb2_i2c2", "apb2_i2c3", "apb2_uart0", "apb2_uart1", @@ -346,6 +367,14 @@ "mmc3_sample"; }; + ss_clk: clk@01c2009c { + #clock-cells = <0>; + compatible = "allwinner,sun4i-a10-mod0-clk"; + reg = <0x01c2009c 0x4>; + clocks = <&osc24M>, <&pll6 0>; + clock-output-names = "ss"; + }; + spi0_clk: clk@01c200a0 { #clock-cells = <0>; compatible = "allwinner,sun4i-a10-mod0-clk"; @@ -384,6 +413,9 @@ compatible = "allwinner,sun6i-a31-usb-clk"; reg = <0x01c200cc 0x4>; clocks = <&osc24M>; + clock-indices = <8>, <9>, <10>, + <16>, <17>, + <18>; clock-output-names = "usb_phy0", "usb_phy1", "usb_phy2", "usb_ohci0", "usb_ohci1", "usb_ohci2"; @@ -870,6 +902,16 @@ #size-cells = <0>; }; + crypto: crypto-engine@01c15000 { + compatible = "allwinner,sun4i-a10-crypto"; + reg = <0x01c15000 0x1000>; + interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&ahb1_gates 5>, <&ss_clk>; + clock-names = "ahb", "mod"; + resets = <&ahb1_rst 5>; + reset-names = "ahb"; + }; + timer@01c60000 { compatible = "allwinner,sun6i-a31-hstimer", "allwinner,sun7i-a20-hstimer"; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 6a63f30c9a69..d3b2f26417aa 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -267,6 +267,19 @@ compatible = "allwinner,sun7i-a20-ahb-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb>; + clock-indices = <0>, <1>, + <2>, <3>, <4>, + <5>, <6>, <7>, <8>, + <9>, <10>, <11>, <12>, + <13>, <14>, <16>, + <17>, <18>, <20>, <21>, + <22>, <23>, <25>, + <28>, <32>, <33>, <34>, + <35>, <36>, <37>, <40>, + <41>, <42>, <43>, + <44>, <45>, <46>, + <47>, <49>, <50>, + <52>; clock-output-names = "ahb_usb0", "ahb_ehci0", "ahb_ohci0", "ahb_ehci1", "ahb_ohci1", "ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0", @@ -295,6 +308,10 @@ compatible = "allwinner,sun7i-a20-apb0-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb0>; + clock-indices = <0>, <1>, + <2>, <3>, <4>, + <5>, <6>, <7>, + <8>, <10>; clock-output-names = "apb0_codec", "apb0_spdif", "apb0_ac97", "apb0_iis0", "apb0_iis1", "apb0_pio", "apb0_ir0", "apb0_ir1", @@ -314,6 +331,12 @@ compatible = "allwinner,sun7i-a20-apb1-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb1>; + clock-indices = <0>, <1>, + <2>, <3>, <4>, + <5>, <6>, <7>, + <15>, <16>, <17>, + <18>, <19>, <20>, + <21>, <22>, <23>; clock-output-names = "apb1_i2c0", "apb1_i2c1", "apb1_i2c2", "apb1_i2c3", "apb1_can", "apb1_scr", "apb1_ps20", "apb1_ps21", @@ -731,6 +754,14 @@ status = "disabled"; }; + crypto: crypto-engine@01c15000 { + compatible = "allwinner,sun4i-a10-crypto"; + reg = <0x01c15000 0x1000>; + interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&ahb_gates 5>, <&ss_clk>; + clock-names = "ahb", "mod"; + }; + spi2: spi@01c17000 { compatible = "allwinner,sun4i-a10-spi"; reg = <0x01c17000 0x1000>; diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index 7abd0ae3143d..c318c770b6c1 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi @@ -180,6 +180,15 @@ compatible = "allwinner,sun8i-a23-ahb1-gates-clk"; reg = <0x01c20060 0x8>; clocks = <&ahb1>; + clock-indices = <1>, <6>, + <8>, <9>, <10>, + <13>, <14>, + <19>, <20>, + <21>, <24>, <26>, + <29>, <32>, <36>, + <40>, <44>, <46>, + <52>, <54>, + <57>; clock-output-names = "ahb1_mipidsi", "ahb1_dma", "ahb1_mmc0", "ahb1_mmc1", "ahb1_mmc2", "ahb1_nand", "ahb1_sdram", @@ -196,6 +205,8 @@ compatible = "allwinner,sun8i-a23-apb1-gates-clk"; reg = <0x01c20068 0x4>; clocks = <&apb1>; + clock-indices = <0>, <5>, + <12>, <13>; clock-output-names = "apb1_codec", "apb1_pio", "apb1_daudio0", "apb1_daudio1"; }; @@ -213,6 +224,10 @@ compatible = "allwinner,sun8i-a23-apb2-gates-clk"; reg = <0x01c2006c 0x4>; clocks = <&apb2>; + clock-indices = <0>, <1>, + <2>, <16>, + <17>, <18>, + <19>, <20>; clock-output-names = "apb2_i2c0", "apb2_i2c1", "apb2_i2c2", "apb2_uart0", "apb2_uart1", "apb2_uart2", diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi index a43ad779ee2f..5908e3dcf965 100644 --- a/arch/arm/boot/dts/sun9i-a80.dtsi +++ b/arch/arm/boot/dts/sun9i-a80.dtsi @@ -277,9 +277,12 @@ compatible = "allwinner,sun9i-a80-ahb0-gates-clk"; reg = <0x06000580 0x4>; clocks = <&ahb0>; - clock-indices = <0>, <1>, <3>, <5>, <8>, <12>, <13>, - <14>, <15>, <16>, <18>, <20>, <21>, - <22>, <23>; + clock-indices = <0>, <1>, <3>, + <5>, <8>, <12>, + <13>, <14>, + <15>, <16>, <18>, + <20>, <21>, <22>, + <23>; clock-output-names = "ahb0_fd", "ahb0_ve", "ahb0_gpu", "ahb0_ss", "ahb0_sd", "ahb0_nand1", "ahb0_nand0", "ahb0_sdram", @@ -293,7 +296,10 @@ compatible = "allwinner,sun9i-a80-ahb1-gates-clk"; reg = <0x06000584 0x4>; clocks = <&ahb1>; - clock-indices = <0>, <1>, <17>, <21>, <22>, <23>, <24>; + clock-indices = <0>, <1>, + <17>, <21>, + <22>, <23>, + <24>; clock-output-names = "ahb1_usbotg", "ahb1_usbhci", "ahb1_gmac", "ahb1_msgbox", "ahb1_spinlock", "ahb1_hstimer", @@ -305,8 +311,9 @@ compatible = "allwinner,sun9i-a80-ahb2-gates-clk"; reg = <0x06000588 0x4>; clocks = <&ahb2>; - clock-indices = <0>, <1>, <2>, <4>, <5>, <7>, <8>, - <11>; + clock-indices = <0>, <1>, + <2>, <4>, <5>, + <7>, <8>, <11>; clock-output-names = "ahb2_lcd0", "ahb2_lcd1", "ahb2_edp", "ahb2_csi", "ahb2_hdmi", "ahb2_de", "ahb2_mp", "ahb2_mipi_dsi"; @@ -317,8 +324,10 @@ compatible = "allwinner,sun9i-a80-apb0-gates-clk"; reg = <0x06000590 0x4>; clocks = <&apb0>; - clock-indices = <1>, <5>, <11>, <12>, <13>, <15>, - <17>, <18>, <19>; + clock-indices = <1>, <5>, + <11>, <12>, <13>, + <15>, <17>, <18>, + <19>; clock-output-names = "apb0_spdif", "apb0_pio", "apb0_ac97", "apb0_i2s0", "apb0_i2s1", "apb0_lradc", "apb0_gpadc", "apb0_twd", @@ -330,8 +339,11 @@ compatible = "allwinner,sun9i-a80-apb1-gates-clk"; reg = <0x06000594 0x4>; clocks = <&apb1>; - clock-indices = <0>, <1>, <2>, <3>, <4>, - <16>, <17>, <18>, <19>, <20>, <21>; + clock-indices = <0>, <1>, + <2>, <3>, <4>, + <16>, <17>, + <18>, <19>, + <20>, <21>; clock-output-names = "apb1_i2c0", "apb1_i2c1", "apb1_i2c2", "apb1_i2c3", "apb1_i2c4", "apb1_uart0", "apb1_uart1", diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 5114b68e99d5..96dabcb6c621 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -91,7 +91,7 @@ void it8152_init_irq(void) for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) { irq_set_chip_and_handler(irq, &it8152_irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } } diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index b55c3625d7ee..304adea4bc52 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -138,9 +138,9 @@ static struct locomo_dev_info locomo_devices[] = { }, }; -static void locomo_handler(unsigned int irq, struct irq_desc *desc) +static void locomo_handler(unsigned int __irq, struct irq_desc *desc) { - struct locomo *lchip = irq_get_chip_data(irq); + struct locomo *lchip = irq_desc_get_chip_data(desc); int req, i; /* Acknowledge the parent IRQ */ @@ -150,6 +150,8 @@ static void locomo_handler(unsigned int irq, struct irq_desc *desc) req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; if (req) { + unsigned int irq; + /* generate the next interrupt(s) */ irq = lchip->irq_base; for (i = 0; i <= 3; i++, irq++) { @@ -205,7 +207,7 @@ static void locomo_setup_irq(struct locomo *lchip) for ( ; irq <= lchip->irq_base + 3; irq++) { irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); irq_set_chip_data(irq, lchip); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } } @@ -475,8 +477,7 @@ static void __locomo_remove(struct locomo *lchip) device_for_each_child(lchip->dev, NULL, locomo_remove_child); if (lchip->irq != NO_IRQ) { - irq_set_chained_handler(lchip->irq, NULL); - irq_set_handler_data(lchip->irq, NULL); + irq_set_chained_handler_and_data(lchip->irq, NULL, NULL); } iounmap(lchip->base); diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 93ee70dbbdd3..4f290250fa93 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -197,10 +197,11 @@ static struct sa1111_dev_info sa1111_devices[] = { * will call us again if there are more interrupts to process. */ static void -sa1111_irq_handler(unsigned int irq, struct irq_desc *desc) +sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc) { + unsigned int irq = irq_desc_get_irq(desc); unsigned int stat0, stat1, i; - struct sa1111 *sachip = irq_get_handler_data(irq); + struct sa1111 *sachip = irq_desc_get_handler_data(desc); void __iomem *mapbase = sachip->base + SA1111_INTC; stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0); @@ -486,7 +487,7 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) irq_set_chip_and_handler(irq, &sa1111_low_chip, handle_edge_irq); irq_set_chip_data(irq, sachip); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) { @@ -494,7 +495,7 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) irq_set_chip_and_handler(irq, &sa1111_high_chip, handle_edge_irq); irq_set_chip_data(irq, sachip); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } /* diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig deleted file mode 100644 index 5666e3700a82..000000000000 --- a/arch/arm/configs/armadillo800eva_defconfig +++ /dev/null @@ -1,162 +0,0 @@ -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=16 -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_PID_NS is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_PERF_EVENTS=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set -CONFIG_ARCH_SHMOBILE_LEGACY=y -CONFIG_ARCH_R8A7740=y -CONFIG_MACH_ARMADILLO800EVA=y -# CONFIG_SH_TIMER_TMU is not set -CONFIG_ARM_THUMB=y -CONFIG_CACHE_L2X0=y -CONFIG_ARM_ERRATA_430973=y -CONFIG_ARM_ERRATA_458693=y -CONFIG_ARM_ERRATA_460075=y -CONFIG_PL310_ERRATA_588369=y -CONFIG_ARM_ERRATA_720789=y -CONFIG_PL310_ERRATA_727915=y -CONFIG_ARM_ERRATA_743622=y -CONFIG_ARM_ERRATA_751472=y -CONFIG_PL310_ERRATA_753970=y -CONFIG_ARM_ERRATA_754322=y -CONFIG_PL310_ERRATA_769419=y -CONFIG_ARM_ERRATA_775420=y -CONFIG_AEABI=y -# CONFIG_OABI_COMPAT is not set -CONFIG_FORCE_MAX_ZONEORDER=13 -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_ARM_APPENDED_DTB=y -CONFIG_KEXEC=y -CONFIG_VFP=y -CONFIG_NEON=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set -# CONFIG_INET_DIAG is not set -# CONFIG_IPV6 is not set -# CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_NETDEVICES=y -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_FARADAY is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_SH_ETH=y -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_KEYBOARD_ATKBD is not set -CONFIG_KEYBOARD_GPIO=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ST1232=y -# CONFIG_SERIO is not set -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_SH_SCI=y -CONFIG_SERIAL_SH_SCI_NR_UARTS=9 -CONFIG_SERIAL_SH_SCI_CONSOLE=y -# CONFIG_HW_RANDOM is not set -CONFIG_I2C=y -CONFIG_I2C_GPIO=y -CONFIG_I2C_SH_MOBILE=y -# CONFIG_HWMON is not set -CONFIG_REGULATOR=y -CONFIG_REGULATOR_GPIO=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_VIDEO_DEV=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SOC_CAMERA=y -CONFIG_SOC_CAMERA_MT9T112=y -CONFIG_VIDEO_SH_MOBILE_CEU=y -CONFIG_FB=y -CONFIG_FB_SH_MOBILE_LCDC=y -CONFIG_FB_SH_MOBILE_HDMI=y -CONFIG_LCD_CLASS_DEVICE=y -CONFIG_BACKLIGHT_PWM=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_SND_SUPPORT_OLD_API is not set -# CONFIG_SND_VERBOSE_PROCFS is not set -# CONFIG_SND_DRIVERS is not set -# CONFIG_SND_ARM is not set -CONFIG_SND_SOC_SH4_FSI=y -# CONFIG_HID_SUPPORT is not set -CONFIG_USB=y -CONFIG_USB_RENESAS_USBHS=y -CONFIG_USB_GADGET=y -CONFIG_USB_RENESAS_USBHS_UDC=y -CONFIG_USB_ETH=m -CONFIG_MMC=y -CONFIG_MMC_SDHI=y -CONFIG_MMC_SH_MMCIF=y -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_GPIO=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_S35390A=y -CONFIG_DMADEVICES=y -CONFIG_SH_DMAE=y -CONFIG_UIO=y -CONFIG_UIO_PDRV_GENIRQ=y -CONFIG_PWM=y -CONFIG_PWM_RENESAS_TPU=y -# CONFIG_DNOTIFY is not set -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -# CONFIG_MISC_FILESYSTEMS is not set -CONFIG_NFS_FS=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_V4_1=y -CONFIG_ROOT_NFS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_ARM_UNWIND is not set -CONFIG_CRYPTO=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_XZ_DEC=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index b47863d49ac6..7569b391704e 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -354,8 +354,7 @@ CONFIG_PROVE_LOCKING=y # CONFIG_FTRACE is not set # CONFIG_ARM_UNWIND is not set CONFIG_SECURITYFS=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -# CONFIG_CRYPTO_HW is not set +CONFIG_CRYPTO_DEV_FSL_CAAM=y CONFIG_CRC_CCITT=m CONFIG_CRC_T10DIF=y CONFIG_CRC7=m diff --git a/arch/arm/configs/kzm9g_defconfig b/arch/arm/configs/kzm9g_defconfig deleted file mode 100644 index 23e8d146dc16..000000000000 --- a/arch/arm/configs/kzm9g_defconfig +++ /dev/null @@ -1,154 +0,0 @@ -# CONFIG_ARM_PATCH_PHYS_VIRT is not set -CONFIG_EXPERIMENTAL=y -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=16 -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_USER_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_EMBEDDED=y -CONFIG_PERF_EVENTS=y -CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set -CONFIG_ARCH_SHMOBILE_LEGACY=y -CONFIG_ARCH_SH73A0=y -CONFIG_MACH_KZM9G=y -CONFIG_MEMORY_START=0x41000000 -CONFIG_MEMORY_SIZE=0x1f000000 -CONFIG_ARM_ERRATA_743622=y -CONFIG_ARM_ERRATA_754322=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_SMP=y -CONFIG_SCHED_MC=y -CONFIG_AEABI=y -# CONFIG_OABI_COMPAT is not set -CONFIG_HIGHMEM=y -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_ARM_APPENDED_DTB=y -CONFIG_KEXEC=y -CONFIG_VFP=y -CONFIG_NEON=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set -# CONFIG_INET_DIAG is not set -# CONFIG_IPV6 is not set -CONFIG_IRDA=y -CONFIG_SH_IRDA=y -# CONFIG_WIRELESS is not set -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_NETDEVICES=y -CONFIG_SMSC911X=y -# CONFIG_WLAN is not set -CONFIG_INPUT_SPARSEKMAP=y -# CONFIG_INPUT_MOUSEDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_KEYBOARD_ATKBD is not set -CONFIG_KEYBOARD_GPIO=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ST1232=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_ADXL34X=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_SH_SCI=y -CONFIG_SERIAL_SH_SCI_NR_UARTS=9 -CONFIG_SERIAL_SH_SCI_CONSOLE=y -# CONFIG_HW_RANDOM is not set -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_SH_MOBILE=y -CONFIG_GPIO_PCF857X=y -# CONFIG_HWMON is not set -CONFIG_MFD_AS3711=y -CONFIG_REGULATOR=y -CONFIG_REGULATOR_AS3711=y -CONFIG_FB=y -CONFIG_FB_SH_MOBILE_LCDC=y -CONFIG_BACKLIGHT_AS3711=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y -CONFIG_FB_SH_MOBILE_MERAM=y -CONFIG_SOUND=y -CONFIG_SND=y -# CONFIG_SND_SUPPORT_OLD_API is not set -# CONFIG_SND_VERBOSE_PROCFS is not set -# CONFIG_SND_DRIVERS is not set -# CONFIG_SND_ARM is not set -# CONFIG_SND_USB is not set -CONFIG_SND_SOC=y -CONFIG_SND_SOC_SH4_FSI=y -# CONFIG_HID_SUPPORT is not set -CONFIG_USB=y -CONFIG_USB_R8A66597_HCD=y -CONFIG_USB_RENESAS_USBHS=y -CONFIG_USB_STORAGE=y -CONFIG_USB_GADGET=y -CONFIG_USB_RENESAS_USBHS_UDC=y -CONFIG_USB_ETH=m -CONFIG_USB_MASS_STORAGE=m -CONFIG_MMC=y -# CONFIG_MMC_BLOCK_BOUNCE is not set -CONFIG_MMC_SDHI=y -CONFIG_MMC_SH_MMCIF=y -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_GPIO=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_RS5C372=y -CONFIG_DMADEVICES=y -CONFIG_SH_DMAE=y -CONFIG_ASYNC_TX_DMA=y -CONFIG_STAGING=y -CONFIG_IIO=y -CONFIG_AK8975=y -# CONFIG_DNOTIFY is not set -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -# CONFIG_MISC_FILESYSTEMS is not set -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_V4_1=y -CONFIG_ROOT_NFS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_SCHED_DEBUG is not set -# CONFIG_DEBUG_PREEMPT is not set -# CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_FTRACE is not set -# CONFIG_ARM_UNWIND is not set -CONFIG_CRYPTO=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y -CONFIG_CRC16=y diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore index 6231d36b3635..31e1f538df7d 100644 --- a/arch/arm/crypto/.gitignore +++ b/arch/arm/crypto/.gitignore @@ -1 +1,3 @@ aesbs-core.S +sha256-core.S +sha512-core.S diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 83c50193626c..30b3bc1666d2 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += kdebug.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += msgbuf.h generic-y += param.h generic-y += parport.h diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 28b9bb35949e..8857d2869a5f 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -19,9 +19,7 @@ struct pci_bus; struct device; struct hw_pci { -#ifdef CONFIG_PCI_MSI struct msi_controller *msi_ctrl; -#endif struct pci_ops *ops; int nr_controllers; void **private_data; @@ -42,9 +40,6 @@ struct hw_pci { * Per-controller structure */ struct pci_sys_data { -#ifdef CONFIG_PCI_MSI - struct msi_controller *msi_ctrl; -#endif struct list_head node; int busnr; /* primary bus number */ u64 mem_offset; /* bus->cpu memory mapping offset */ diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 6f225acc07c5..b7f6fb462ea0 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); */ static inline phys_addr_t __virt_to_idmap(unsigned long x) { - if (arch_virt_to_idmap) + if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap) return arch_virt_to_idmap(x); else return __virt_to_phys(x); diff --git a/arch/arm/include/asm/mm-arch-hooks.h b/arch/arm/include/asm/mm-arch-hooks.h deleted file mode 100644 index 7056660c7cc4..000000000000 --- a/arch/arm/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_ARM_MM_ARCH_HOOKS_H -#define _ASM_ARM_MM_ARCH_HOOKS_H - -#endif /* _ASM_ARM_MM_ARCH_HOOKS_H */ diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index c99e259469f7..12ebfcc1d539 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -10,7 +10,9 @@ * CPU. */ #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) -#define finish_arch_switch(prev) dsb(ish) +#define __complete_pending_tlbi() dsb(ish) +#else +#define __complete_pending_tlbi() #endif /* @@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ + __complete_pending_tlbi(); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index c3c45e628e33..2556a8801c8c 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S @@ -13,9 +13,12 @@ #define AT91_DBGU 0xfffff200 /* AT91_BASE_DBGU0 */ #elif defined(CONFIG_AT91_DEBUG_LL_DBGU1) #define AT91_DBGU 0xffffee00 /* AT91_BASE_DBGU1 */ -#else +#elif defined(CONFIG_AT91_DEBUG_LL_DBGU2) /* On sama5d4, use USART3 as low level serial console */ #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ +#else +/* On sama5d2, use UART1 as low level serial console */ +#define AT91_DBGU 0xf8020000 #endif #ifdef CONFIG_MMU diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h index 66f736f74684..bce58e975ad1 100644 --- a/arch/arm/include/debug/imx-uart.h +++ b/arch/arm/include/debug/imx-uart.h @@ -90,6 +90,17 @@ #define IMX6SX_UART_BASE_ADDR(n) IMX6SX_UART##n##_BASE_ADDR #define IMX6SX_UART_BASE(n) IMX6SX_UART_BASE_ADDR(n) +#define IMX6UL_UART1_BASE_ADDR 0x02020000 +#define IMX6UL_UART2_BASE_ADDR 0x021e8000 +#define IMX6UL_UART3_BASE_ADDR 0x021ec000 +#define IMX6UL_UART4_BASE_ADDR 0x021f0000 +#define IMX6UL_UART5_BASE_ADDR 0x021f4000 +#define IMX6UL_UART6_BASE_ADDR 0x021fc000 +#define IMX6UL_UART7_BASE_ADDR 0x02018000 +#define IMX6UL_UART8_BASE_ADDR 0x02024000 +#define IMX6UL_UART_BASE_ADDR(n) IMX6UL_UART##n##_BASE_ADDR +#define IMX6UL_UART_BASE(n) IMX6UL_UART_BASE_ADDR(n) + #define IMX7D_UART1_BASE_ADDR 0x30860000 #define IMX7D_UART2_BASE_ADDR 0x30890000 #define IMX7D_UART3_BASE_ADDR 0x30880000 @@ -124,6 +135,8 @@ #define UART_PADDR IMX_DEBUG_UART_BASE(IMX6SL) #elif defined(CONFIG_DEBUG_IMX6SX_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX6SX) +#elif defined(CONFIG_DEBUG_IMX6UL_UART) +#define UART_PADDR IMX_DEBUG_UART_BASE(IMX6UL) #elif defined(CONFIG_DEBUG_IMX7D_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX7D) diff --git a/arch/arm/include/debug/zynq.S b/arch/arm/include/debug/zynq.S index bd13dedbdeff..de86b9247564 100644 --- a/arch/arm/include/debug/zynq.S +++ b/arch/arm/include/debug/zynq.S @@ -38,7 +38,7 @@ .endm .macro senduart,rd,rx - str \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA + strb \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA .endm .macro waituart,rd,rx diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index fcbbbb1b9e95..874e1823f803 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -18,15 +18,6 @@ static int debug_pci; -#ifdef CONFIG_PCI_MSI -struct msi_controller *pcibios_msi_controller(struct pci_dev *dev) -{ - struct pci_sys_data *sysdata = dev->bus->sysdata; - - return sysdata->msi_ctrl; -} -#endif - /* * We can't use pci_get_device() here since we are * called from interrupt context. @@ -459,12 +450,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL); - if (!sys) - panic("PCI: unable to allocate sys data!"); + if (WARN(!sys, "PCI: unable to allocate sys data!")) + break; -#ifdef CONFIG_PCI_MSI - sys->msi_ctrl = hw->msi_ctrl; -#endif sys->busnr = busnr; sys->swizzle = hw->swizzle; sys->map_irq = hw->map_irq; @@ -486,11 +474,14 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, if (hw->scan) sys->bus = hw->scan(nr, sys); else - sys->bus = pci_scan_root_bus(parent, sys->busnr, - hw->ops, sys, &sys->resources); + sys->bus = pci_scan_root_bus_msi(parent, + sys->busnr, hw->ops, sys, + &sys->resources, hw->msi_ctrl); - if (!sys->bus) - panic("PCI: unable to scan bus!"); + if (WARN(!sys->bus, "PCI: unable to scan bus!")) { + kfree(sys); + break; + } busnr = sys->bus->busn_res.end + 1; @@ -521,6 +512,8 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw) struct pci_bus *bus = sys->bus; if (!pci_has_flag(PCI_PROBE_ONLY)) { + struct pci_bus *child; + /* * Size the bridge windows. */ @@ -530,25 +523,15 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw) * Assign resources. */ pci_bus_assign_resources(bus); - } + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } /* * Tell drivers about devices found. */ pci_bus_add_devices(bus); } - - list_for_each_entry(sys, &head, node) { - struct pci_bus *bus = sys->bus; - - /* Configure PCI Express settings */ - if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { - struct pci_bus *child; - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - } - } } #ifndef CONFIG_PCI_HOST_ITE8152 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 92828a1dec80..b48dd4f37f80 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -61,6 +61,7 @@ work_pending: movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) ldmia sp, {r0 - r6} @ have to reload r0 - r6 b local_restart @ ... and off we go +ENDPROC(ret_fast_syscall) /* * "slow" syscall return path. "why" tells us if this was a real syscall. diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index bd755d97e459..29e2991465cb 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -399,6 +399,9 @@ ENTRY(secondary_startup) sub lr, r4, r5 @ mmu has been enabled add r3, r7, lr ldrd r4, [r3, #0] @ get secondary_data.pgdir +ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE: +ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps +ARM_BE8(eor r4, r4, r5) @ without using a temp reg. ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir badr lr, __enable_mmu @ return address mov r13, r12 @ __secondary_switched address diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 350f188c92d2..baf8edebe26f 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -140,7 +140,7 @@ int __init arch_probe_nr_irqs(void) static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = d->affinity; + const struct cpumask *affinity = irq_data_get_affinity_mask(d); struct irq_chip *c; bool ret = false; @@ -160,7 +160,7 @@ static bool migrate_one_irq(struct irq_desc *desc) if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(d->affinity, affinity); + cpumask_copy(irq_data_get_affinity_mask(d), affinity); return ret; } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 357f57ea83f4..54272e0be713 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -818,12 +818,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) break; - of_node_put(dn); if (cpu >= nr_cpu_ids) { pr_warn("Failed to find logical CPU for %s\n", dn->name); + of_node_put(dn); break; } + of_node_put(dn); irqs[i] = cpu; cpumask_set_cpu(cpu, &pmu->supported_cpus); diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c index f90fdf4ce7c7..2e6024334790 100644 --- a/arch/arm/kernel/psci.c +++ b/arch/arm/kernel/psci.c @@ -278,7 +278,7 @@ out_put_node: return err; } -static const struct of_device_id psci_of_match[] __initconst = { +static const struct of_device_id const psci_of_match[] __initconst = { { .compatible = "arm,psci", .data = psci_0_1_init}, { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, {}, diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 1a4d232796be..38269358fd25 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -50,7 +50,7 @@ static void __soft_restart(void *addr) flush_cache_all(); /* Switch to the identity mapping. */ - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); + phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset); phys_reset((unsigned long)addr); /* Should never get here. */ diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 172c6a05d27f..e9035cda1485 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -36,29 +36,30 @@ static DEFINE_PER_CPU(bool, percpu_setup_called); static struct clock_event_device __percpu *twd_evt; static int twd_ppi; -static void twd_set_mode(enum clock_event_mode mode, - struct clock_event_device *clk) +static int twd_shutdown(struct clock_event_device *clk) { - unsigned long ctrl; - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE - | TWD_TIMER_CONTROL_PERIODIC; - writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), - twd_base + TWD_TIMER_LOAD); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* period set, and timer enabled in 'next_event' hook */ - ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT; - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - ctrl = 0; - } + writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); + return 0; +} +static int twd_set_oneshot(struct clock_event_device *clk) +{ + /* period set, and timer enabled in 'next_event' hook */ + writel_relaxed(TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT, + twd_base + TWD_TIMER_CONTROL); + return 0; +} + +static int twd_set_periodic(struct clock_event_device *clk) +{ + unsigned long ctrl = TWD_TIMER_CONTROL_ENABLE | + TWD_TIMER_CONTROL_IT_ENABLE | + TWD_TIMER_CONTROL_PERIODIC; + + writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), + twd_base + TWD_TIMER_LOAD); writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL); + return 0; } static int twd_set_next_event(unsigned long evt, @@ -94,7 +95,7 @@ static void twd_timer_stop(void) { struct clock_event_device *clk = raw_cpu_ptr(twd_evt); - twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); + twd_shutdown(clk); disable_percpu_irq(clk->irq); } @@ -296,7 +297,10 @@ static void twd_timer_setup(void) clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; - clk->set_mode = twd_set_mode; + clk->set_state_shutdown = twd_shutdown; + clk->set_state_periodic = twd_set_periodic; + clk->set_state_oneshot = twd_set_oneshot; + clk->tick_resume = twd_shutdown; clk->set_next_event = twd_set_next_event; clk->irq = twd_ppi; clk->cpumask = cpumask_of(cpu); diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index efe17dd9b921..54a5aeab988d 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk) */ void update_vsyscall(struct timekeeper *tk) { - struct timespec xtime_coarse; struct timespec64 *wtm = &tk->wall_to_monotonic; if (!cntvct_ok) { @@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk) vdso_write_begin(vdso_data); - xtime_coarse = __current_kernel_time(); vdso_data->tk_is_cntvct = tk_is_cntvct(tk); - vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; - vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->xtime_coarse_sec = tk->xtime_sec; + vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >> + tk->tkr_mono.shift); vdso_data->wtm_clock_sec = wtm->tv_sec; vdso_data->wtm_clock_nsec = wtm->tv_nsec; diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 3e58d710013c..4b39af2dfda9 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) } /* the mmap semaphore is taken only if not in an atomic context */ - atomic = in_atomic(); + atomic = faulthandler_disabled(); if (!atomic) down_read(¤t->mm->mmap_sem); diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index fd95f34945f4..89a755b90db2 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -8,6 +8,18 @@ menuconfig ARCH_AT91 select SOC_BUS if ARCH_AT91 +config SOC_SAMA5D2 + bool "SAMA5D2 family" if ARCH_MULTI_V7 + select SOC_SAMA5 + select CACHE_L2X0 + select HAVE_FB_ATMEL + select HAVE_AT91_UTMI + select HAVE_AT91_USB_CLK + select HAVE_AT91_H32MX + select HAVE_AT91_GENERATED_CLK + help + Select this if ou are using one of Atmel's SAMA5D2 family SoC. + config SOC_SAMA5D3 bool "SAMA5D3 family" if ARCH_MULTI_V7 select SOC_SAMA5 diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index eaf58f88ef5d..c1a7c6cc00e1 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -8,7 +8,6 @@ * Licensed under GPLv2 or later. */ -#include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_platform.h> @@ -38,7 +37,7 @@ static void __init at91rm9200_dt_device_init(void) at91rm9200_pm_init(); } -static const char *at91rm9200_dt_board_compat[] __initconst = { +static const char *const at91rm9200_dt_board_compat[] __initconst = { "atmel,at91rm9200", NULL }; diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c index e47a2093a0e7..7eb64f763034 100644 --- a/arch/arm/mach-at91/at91sam9.c +++ b/arch/arm/mach-at91/at91sam9.c @@ -72,7 +72,7 @@ static void __init at91sam9_dt_device_init(void) at91sam9260_pm_init(); } -static const char *at91_dt_board_compat[] __initconst = { +static const char *const at91_dt_board_compat[] __initconst = { "atmel,at91sam9", NULL }; @@ -89,7 +89,7 @@ static void __init at91sam9g45_dt_device_init(void) at91sam9g45_pm_init(); } -static const char *at91sam9g45_board_compat[] __initconst = { +static const char *const at91sam9g45_board_compat[] __initconst = { "atmel,at91sam9g45", NULL }; @@ -106,7 +106,7 @@ static void __init at91sam9x5_dt_device_init(void) at91sam9x5_pm_init(); } -static const char *at91sam9x5_board_compat[] __initconst = { +static const char *const at91sam9x5_board_compat[] __initconst = { "atmel,at91sam9x5", "atmel,at91sam9n12", NULL diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index e24df77abd79..265ffeb2037e 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -311,7 +311,7 @@ static void at91sam9_sdram_standby(void) at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1); } -static const struct of_device_id ramc_ids[] __initconst = { +static const struct of_device_id const ramc_ids[] __initconst = { { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c index 41d829d8e7d5..d9cf6799aec0 100644 --- a/arch/arm/mach-at91/sama5.c +++ b/arch/arm/mach-at91/sama5.c @@ -18,6 +18,8 @@ #include "soc.h" static const struct at91_soc sama5_socs[] = { + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27_EXID_MATCH, + "sama5d27", "sama5d2"), AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH, "sama5d31", "sama5d3"), AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH, @@ -52,7 +54,7 @@ static void __init sama5_dt_device_init(void) at91sam9x5_pm_init(); } -static const char *sama5_dt_board_compat[] __initconst = { +static const char *const sama5_dt_board_compat[] __initconst = { "atmel,sama5", NULL }; @@ -63,7 +65,8 @@ DT_MACHINE_START(sama5_dt, "Atmel SAMA5") .dt_compat = sama5_dt_board_compat, MACHINE_END -static const char *sama5_alt_dt_board_compat[] __initconst = { +static const char *const sama5_alt_dt_board_compat[] __initconst = { + "atmel,sama5d2", "atmel,sama5d4", NULL }; diff --git a/arch/arm/mach-at91/soc.h b/arch/arm/mach-at91/soc.h index be23c400596b..8ede0ef86172 100644 --- a/arch/arm/mach-at91/soc.h +++ b/arch/arm/mach-at91/soc.h @@ -62,6 +62,9 @@ at91_soc_init(const struct at91_soc *socs); #define AT91SAM9XE256_CIDR_MATCH 0x329a93a0 #define AT91SAM9XE512_CIDR_MATCH 0x329aa3a0 +#define SAMA5D2_CIDR_MATCH 0x0a5c08c0 +#define SAMA5D27_EXID_MATCH 0x00000021 + #define SAMA5D3_CIDR_MATCH 0x0a5c07c0 #define SAMA5D31_EXID_MATCH 0x00444300 #define SAMA5D33_EXID_MATCH 0x00414300 diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index 0ac9e4b3b265..1319c3c14327 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -140,10 +140,12 @@ config ARCH_BCM_63XX config ARCH_BRCMSTB bool "Broadcom BCM7XXX based boards" if ARCH_MULTI_V7 select ARM_GIC + select ARM_ERRATA_798181 if SMP select HAVE_ARM_ARCH_TIMER select BRCMSTB_GISB_ARB select BRCMSTB_L2_IRQ select BCM7120_L2_IRQ + select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE select ARCH_WANT_OPTIONAL_GPIOLIB help Say Y if you intend to run the kernel on a Broadcom ARM-based STB diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile index 4fb0da458e91..1780a3ff42f9 100644 --- a/arch/arm/mach-bcm/Makefile +++ b/arch/arm/mach-bcm/Makefile @@ -39,10 +39,8 @@ obj-$(CONFIG_ARCH_BCM_5301X) += bcm_5301x.o # BCM63XXx ifeq ($(CONFIG_ARCH_BCM_63XX),y) -CFLAGS_bcm63xx_headsmp.o += -march=armv7-a obj-y += bcm63xx.o -obj-$(CONFIG_SMP) += bcm63xx_smp.o bcm63xx_headsmp.o \ - bcm63xx_pmb.o +obj-$(CONFIG_SMP) += bcm63xx_smp.o bcm63xx_pmb.o endif ifeq ($(CONFIG_ARCH_BRCMSTB),y) diff --git a/arch/arm/mach-bcm/bcm63xx_headsmp.S b/arch/arm/mach-bcm/bcm63xx_headsmp.S deleted file mode 100644 index c7af397c7f14..000000000000 --- a/arch/arm/mach-bcm/bcm63xx_headsmp.S +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (C) 2015, Broadcom Corporation - * All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/assembler.h> - -ENTRY(bcm63138_secondary_startup) - ARM_BE8(setend be) - /* - * L1 cache does have unpredictable contents at power-up clean its - * contents without flushing - */ - bl v7_invalidate_l1 - nop - - b secondary_startup -ENDPROC(bcm63138_secondary_startup) diff --git a/arch/arm/mach-bcm/bcm63xx_smp.c b/arch/arm/mach-bcm/bcm63xx_smp.c index 3f014f18cea5..19be90421f4d 100644 --- a/arch/arm/mach-bcm/bcm63xx_smp.c +++ b/arch/arm/mach-bcm/bcm63xx_smp.c @@ -127,7 +127,7 @@ static int bcm63138_smp_boot_secondary(unsigned int cpu, } /* Locate the secondary CPU node */ - dn = of_get_cpu_node(cpu_logical_map(cpu), NULL); + dn = of_get_cpu_node(cpu, NULL); if (!dn) { pr_err("SMP: failed to locate secondary CPU%d node\n", cpu); ret = -ENODEV; @@ -135,7 +135,7 @@ static int bcm63138_smp_boot_secondary(unsigned int cpu, } /* Write the secondary init routine to the BootLUT reset vector */ - val = virt_to_phys(bcm63138_secondary_startup); + val = virt_to_phys(secondary_startup); writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT); /* Power up the core, will jump straight to its reset vector when we diff --git a/arch/arm/mach-bcm/bcm63xx_smp.h b/arch/arm/mach-bcm/bcm63xx_smp.h index 50b76044536e..9c6d50e2b111 100644 --- a/arch/arm/mach-bcm/bcm63xx_smp.h +++ b/arch/arm/mach-bcm/bcm63xx_smp.h @@ -3,7 +3,6 @@ struct device_node; -extern void bcm63138_secondary_startup(void); extern int bcm63xx_pmb_power_on_cpu(struct device_node *dn); #endif /* __BCM63XX_SMP_H */ diff --git a/arch/arm/mach-bcm/bcm_5301x.c b/arch/arm/mach-bcm/bcm_5301x.c index 7aef92720eb4..5478fe6bcce6 100644 --- a/arch/arm/mach-bcm/bcm_5301x.c +++ b/arch/arm/mach-bcm/bcm_5301x.c @@ -44,7 +44,7 @@ static void __init bcm5301x_init_early(void) "imprecise external abort"); } -static const char __initconst *bcm5301x_dt_compat[] = { +static const char *const bcm5301x_dt_compat[] __initconst = { "brcm,bcm4708", NULL, }; diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c index a55a7ecf146a..cf3f8658f0e5 100644 --- a/arch/arm/mach-bcm/bcm_kona_smc.c +++ b/arch/arm/mach-bcm/bcm_kona_smc.c @@ -33,7 +33,7 @@ struct bcm_kona_smc_data { unsigned result; }; -static const struct of_device_id bcm_kona_smc_ids[] __initconst = { +static const struct of_device_id const bcm_kona_smc_ids[] __initconst = { {.compatible = "brcm,kona-smc"}, {.compatible = "bcm,kona-smc"}, /* deprecated name */ {}, diff --git a/arch/arm/mach-clps711x/board-autcpu12.c b/arch/arm/mach-clps711x/board-autcpu12.c index 45abf6bd5f68..c3d964221767 100644 --- a/arch/arm/mach-clps711x/board-autcpu12.c +++ b/arch/arm/mach-clps711x/board-autcpu12.c @@ -160,7 +160,7 @@ static struct platform_device autcpu12_mmgpio_pdev __initdata = { }, }; -static const struct gpio autcpu12_gpios[] __initconst = { +static const struct gpio const autcpu12_gpios[] __initconst = { { AUTCPU12_DPOT_CS, GPIOF_OUT_INIT_HIGH, "DPOT CS" }, { AUTCPU12_DPOT_CLK, GPIOF_OUT_INIT_LOW, "DPOT CLK" }, { AUTCPU12_DPOT_UD, GPIOF_OUT_INIT_LOW, "DPOT UD" }, diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index 4e9837ded96d..9b1dc223d8d3 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -113,30 +113,33 @@ void cns3xxx_power_off(void) */ static void __iomem *cns3xxx_tmr1; -static void cns3xxx_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *clk) +static int cns3xxx_shutdown(struct clock_event_device *clk) +{ + writel(0, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); + return 0; +} + +static int cns3xxx_set_oneshot(struct clock_event_device *clk) +{ + unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); + + /* period set, and timer enabled in 'next_event' hook */ + ctrl |= (1 << 2) | (1 << 9); + writel(ctrl, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); + return 0; +} + +static int cns3xxx_set_periodic(struct clock_event_device *clk) { unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); int pclk = cns3xxx_cpu_clock() / 8; int reload; - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - reload = pclk * 20 / (3 * HZ) * 0x25000; - writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET); - ctrl |= (1 << 0) | (1 << 2) | (1 << 9); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* period set, and timer enabled in 'next_event' hook */ - ctrl |= (1 << 2) | (1 << 9); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - ctrl = 0; - } - + reload = pclk * 20 / (3 * HZ) * 0x25000; + writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET); + ctrl |= (1 << 0) | (1 << 2) | (1 << 9); writel(ctrl, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); + return 0; } static int cns3xxx_timer_set_next_event(unsigned long evt, @@ -151,12 +154,16 @@ static int cns3xxx_timer_set_next_event(unsigned long evt, } static struct clock_event_device cns3xxx_tmr1_clockevent = { - .name = "cns3xxx timer1", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = cns3xxx_timer_set_mode, - .set_next_event = cns3xxx_timer_set_next_event, - .rating = 350, - .cpumask = cpu_all_mask, + .name = "cns3xxx timer1", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = cns3xxx_shutdown, + .set_state_periodic = cns3xxx_set_periodic, + .set_state_oneshot = cns3xxx_set_oneshot, + .tick_resume = cns3xxx_shutdown, + .set_next_event = cns3xxx_timer_set_next_event, + .rating = 350, + .cpumask = cpu_all_mask, }; static void __init cns3xxx_clockevents_init(unsigned int timer_irq) @@ -339,7 +346,7 @@ static struct usb_ohci_pdata cns3xxx_usb_ohci_pdata = { .power_off = csn3xxx_usb_power_off, }; -static struct of_dev_auxdata cns3xxx_auxdata[] __initconst = { +static const struct of_dev_auxdata const cns3xxx_auxdata[] __initconst = { { "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata }, { "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata }, { "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL }, @@ -392,7 +399,7 @@ static void __init cns3xxx_init(void) cns3xxx_auxdata, NULL); } -static const char *cns3xxx_dt_compat[] __initdata = { +static const char *const cns3xxx_dt_compat[] __initconst = { "cavium,cns3410", "cavium,cns3420", NULL, diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c index 006dae8dfe44..507aad4b8dd9 100644 --- a/arch/arm/mach-davinci/cp_intc.c +++ b/arch/arm/mach-davinci/cp_intc.c @@ -85,23 +85,13 @@ static int cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type) return 0; } -/* - * Faking this allows us to to work with suspend functions of - * generic drivers which call {enable|disable}_irq_wake for - * wake up interrupt sources (eg RTC on DA850). - */ -static int cp_intc_set_wake(struct irq_data *d, unsigned int on) -{ - return 0; -} - static struct irq_chip cp_intc_irq_chip = { .name = "cp_intc", .irq_ack = cp_intc_ack_irq, .irq_mask = cp_intc_mask_irq, .irq_unmask = cp_intc_unmask_irq, .irq_set_type = cp_intc_set_irq_type, - .irq_set_wake = cp_intc_set_wake, + .flags = IRQCHIP_SKIP_SET_WAKE, }; static struct irq_domain *cp_intc_domain; @@ -112,7 +102,7 @@ static int cp_intc_host_map(struct irq_domain *h, unsigned int virq, pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw); irq_set_chip(virq, &cp_intc_irq_chip); - set_irq_flags(virq, IRQF_VALID | IRQF_PROBE); + irq_set_probe(virq); irq_set_handler(virq, handle_edge_irq); return 0; } diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c index 3b8740c083c4..676997895e13 100644 --- a/arch/arm/mach-davinci/da850.c +++ b/arch/arm/mach-davinci/da850.c @@ -715,7 +715,7 @@ const short da850_lcdcntl_pins[] __initconst = { -1 }; -const short da850_vpif_capture_pins[] __initdata = { +const short da850_vpif_capture_pins[] __initconst = { DA850_VPIF_DIN0, DA850_VPIF_DIN1, DA850_VPIF_DIN2, DA850_VPIF_DIN3, DA850_VPIF_DIN4, DA850_VPIF_DIN5, DA850_VPIF_DIN6, DA850_VPIF_DIN7, DA850_VPIF_DIN8, DA850_VPIF_DIN9, DA850_VPIF_DIN10, DA850_VPIF_DIN11, @@ -725,7 +725,7 @@ const short da850_vpif_capture_pins[] __initdata = { -1 }; -const short da850_vpif_display_pins[] __initdata = { +const short da850_vpif_display_pins[] __initconst = { DA850_VPIF_DOUT0, DA850_VPIF_DOUT1, DA850_VPIF_DOUT2, DA850_VPIF_DOUT3, DA850_VPIF_DOUT4, DA850_VPIF_DOUT5, DA850_VPIF_DOUT6, DA850_VPIF_DOUT7, DA850_VPIF_DOUT8, DA850_VPIF_DOUT9, DA850_VPIF_DOUT10, diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c index 438f68547f4c..06b6451225c1 100644 --- a/arch/arm/mach-davinci/da8xx-dt.c +++ b/arch/arm/mach-davinci/da8xx-dt.c @@ -20,7 +20,7 @@ #define DA8XX_NUM_UARTS 3 -static const struct of_device_id da8xx_irq_match[] __initconst = { +static const struct of_device_id const da8xx_irq_match[] __initconst = { { .compatible = "ti,cp-intc", .data = cp_intc_of_init, }, { } }; @@ -59,7 +59,7 @@ static void __init da850_init_machine(void) } -static const char *da850_boards_compat[] __initdata = { +static const char *const da850_boards_compat[] __initconst = { "enbw,cmc", "ti,da850-evm", "ti,da850", diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index ddfdd820e6f2..29e08aac8294 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -1010,11 +1010,13 @@ static struct davinci_spi_platform_data da8xx_spi_pdata[] = { .version = SPI_VERSION_2, .intr_line = 1, .dma_event_q = EVENTQ_0, + .prescaler_limit = 2, }, [1] = { .version = SPI_VERSION_2, .intr_line = 1, .dma_event_q = EVENTQ_0, + .prescaler_limit = 2, }, }; diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 9cbeda798584..567dc56fe8cd 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -411,6 +411,7 @@ static struct davinci_spi_platform_data dm355_spi0_pdata = { .num_chipselect = 2, .cshold_bug = true, .dma_event_q = EVENTQ_1, + .prescaler_limit = 1, }; static struct platform_device dm355_spi0_device = { .name = "spi_davinci", diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index e3a3c54b6832..6a890a8486d0 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -646,6 +646,7 @@ static struct davinci_spi_platform_data dm365_spi0_pdata = { .version = SPI_VERSION_1, .num_chipselect = 2, .dma_event_q = EVENTQ_3, + .prescaler_limit = 1, }; static struct resource dm365_spi0_resources[] = { diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index 160c9602f490..6c18445a4639 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -303,36 +303,42 @@ static int davinci_set_next_event(unsigned long cycles, return 0; } -static void davinci_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int davinci_shutdown(struct clock_event_device *evt) { struct timer_s *t = &timers[TID_CLOCKEVENT]; - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - t->period = davinci_clock_tick_rate / (HZ); - t->opts &= ~TIMER_OPTS_STATE_MASK; - t->opts |= TIMER_OPTS_PERIODIC; - timer32_config(t); - break; - case CLOCK_EVT_MODE_ONESHOT: - t->opts &= ~TIMER_OPTS_STATE_MASK; - t->opts |= TIMER_OPTS_ONESHOT; - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - t->opts &= ~TIMER_OPTS_STATE_MASK; - t->opts |= TIMER_OPTS_DISABLED; - break; - case CLOCK_EVT_MODE_RESUME: - break; - } + t->opts &= ~TIMER_OPTS_STATE_MASK; + t->opts |= TIMER_OPTS_DISABLED; + return 0; +} + +static int davinci_set_oneshot(struct clock_event_device *evt) +{ + struct timer_s *t = &timers[TID_CLOCKEVENT]; + + t->opts &= ~TIMER_OPTS_STATE_MASK; + t->opts |= TIMER_OPTS_ONESHOT; + return 0; +} + +static int davinci_set_periodic(struct clock_event_device *evt) +{ + struct timer_s *t = &timers[TID_CLOCKEVENT]; + + t->period = davinci_clock_tick_rate / (HZ); + t->opts &= ~TIMER_OPTS_STATE_MASK; + t->opts |= TIMER_OPTS_PERIODIC; + timer32_config(t); + return 0; } static struct clock_event_device clockevent_davinci = { - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = davinci_set_next_event, - .set_mode = davinci_set_mode, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = davinci_set_next_event, + .set_state_shutdown = davinci_shutdown, + .set_state_periodic = davinci_set_periodic, + .set_state_oneshot = davinci_set_oneshot, }; diff --git a/arch/arm/mach-digicolor/digicolor.c b/arch/arm/mach-digicolor/digicolor.c index cfc88d1caa47..4d62f1bde4ed 100644 --- a/arch/arm/mach-digicolor/digicolor.c +++ b/arch/arm/mach-digicolor/digicolor.c @@ -8,7 +8,7 @@ #include <asm/mach/arch.h> -static const char *digicolor_dt_compat[] __initconst = { +static const char *const digicolor_dt_compat[] __initconst = { "cnxt,cx92755", NULL, }; diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index df0223f76fa9..305d7c6242bb 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -69,8 +69,9 @@ static struct irq_chip pmu_irq_chip = { .irq_ack = pmu_irq_ack, }; -static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) +static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc) { + unsigned int irq = irq_desc_get_irq(desc); unsigned long cause = readl(PMU_INTERRUPT_CAUSE); cause &= readl(PMU_INTERRUPT_MASK); @@ -172,7 +173,7 @@ void __init dove_init_irq(void) for (i = IRQ_DOVE_PMU_START; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, &pmu_irq_chip, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); - set_irq_flags(i, IRQF_VALID); + irq_clear_status_flags(i, IRQ_NOREQUEST); } irq_set_chained_handler(IRQ_DOVE_PMU, pmu_irq_handler); } diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index 8254e716b095..688e5fed49a7 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c @@ -65,7 +65,7 @@ static void __init ebsa110_init_irq(void) for (irq = 0; irq < NR_IRQS; irq++) { irq_set_chip_and_handler(irq, &ebsa110_irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } } diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig index bec570ae6494..61a75ca3684e 100644 --- a/arch/arm/mach-ep93xx/Kconfig +++ b/arch/arm/mach-ep93xx/Kconfig @@ -15,45 +15,8 @@ config CRUNCH comment "EP93xx Platforms" -choice - prompt "EP93xx first SDRAM bank selection" - default EP93XX_SDCE3_SYNC_PHYS_OFFSET - -config EP93XX_SDCE3_SYNC_PHYS_OFFSET - bool "0x00000000 - SDCE3/SyncBoot" - help - Select this option if you want support for EP93xx boards with the - first SDRAM bank at 0x00000000. - -config EP93XX_SDCE0_PHYS_OFFSET - bool "0xc0000000 - SDCEO" - help - Select this option if you want support for EP93xx boards with the - first SDRAM bank at 0xc0000000. - -config EP93XX_SDCE1_PHYS_OFFSET - bool "0xd0000000 - SDCE1" - help - Select this option if you want support for EP93xx boards with the - first SDRAM bank at 0xd0000000. - -config EP93XX_SDCE2_PHYS_OFFSET - bool "0xe0000000 - SDCE2" - help - Select this option if you want support for EP93xx boards with the - first SDRAM bank at 0xe0000000. - -config EP93XX_SDCE3_ASYNC_PHYS_OFFSET - bool "0xf0000000 - SDCE3/AsyncBoot" - help - Select this option if you want support for EP93xx boards with the - first SDRAM bank at 0xf0000000. - -endchoice - config MACH_ADSSPHERE bool "Support ADS Sphere" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET help Say 'Y' here if you want your kernel to support the ADS Sphere board. @@ -63,7 +26,6 @@ config MACH_EDB93XX config MACH_EDB9301 bool "Support Cirrus Logic EDB9301" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_EDB93XX help Say 'Y' here if you want your kernel to support the Cirrus @@ -71,7 +33,6 @@ config MACH_EDB9301 config MACH_EDB9302 bool "Support Cirrus Logic EDB9302" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_EDB93XX help Say 'Y' here if you want your kernel to support the Cirrus @@ -79,7 +40,6 @@ config MACH_EDB9302 config MACH_EDB9302A bool "Support Cirrus Logic EDB9302A" - depends on EP93XX_SDCE0_PHYS_OFFSET select MACH_EDB93XX help Say 'Y' here if you want your kernel to support the Cirrus @@ -87,7 +47,6 @@ config MACH_EDB9302A config MACH_EDB9307 bool "Support Cirrus Logic EDB9307" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_EDB93XX help Say 'Y' here if you want your kernel to support the Cirrus @@ -95,7 +54,6 @@ config MACH_EDB9307 config MACH_EDB9307A bool "Support Cirrus Logic EDB9307A" - depends on EP93XX_SDCE0_PHYS_OFFSET select MACH_EDB93XX help Say 'Y' here if you want your kernel to support the Cirrus @@ -103,7 +61,6 @@ config MACH_EDB9307A config MACH_EDB9312 bool "Support Cirrus Logic EDB9312" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_EDB93XX help Say 'Y' here if you want your kernel to support the Cirrus @@ -111,7 +68,6 @@ config MACH_EDB9312 config MACH_EDB9315 bool "Support Cirrus Logic EDB9315" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_EDB93XX help Say 'Y' here if you want your kernel to support the Cirrus @@ -119,14 +75,12 @@ config MACH_EDB9315 config MACH_EDB9315A bool "Support Cirrus Logic EDB9315A" - depends on EP93XX_SDCE0_PHYS_OFFSET select MACH_EDB93XX help Say 'Y' here if you want your kernel to support the Cirrus Logic EDB9315A Evaluation Board. config MACH_GESBC9312 - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET bool "Support Glomation GESBC-9312-sx" help Say 'Y' here if you want your kernel to support the Glomation @@ -137,7 +91,6 @@ config MACH_MICRO9 config MACH_MICRO9H bool "Support Contec Micro9-High" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_MICRO9 help Say 'Y' here if you want your kernel to support the @@ -145,7 +98,6 @@ config MACH_MICRO9H config MACH_MICRO9M bool "Support Contec Micro9-Mid" - depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET select MACH_MICRO9 help Say 'Y' here if you want your kernel to support the @@ -153,7 +105,6 @@ config MACH_MICRO9M config MACH_MICRO9L bool "Support Contec Micro9-Lite" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET select MACH_MICRO9 help Say 'Y' here if you want your kernel to support the @@ -161,7 +112,6 @@ config MACH_MICRO9L config MACH_MICRO9S bool "Support Contec Micro9-Slim" - depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET select MACH_MICRO9 help Say 'Y' here if you want your kernel to support the @@ -169,28 +119,24 @@ config MACH_MICRO9S config MACH_SIM_ONE bool "Support Simplemachines Sim.One board" - depends on EP93XX_SDCE0_PHYS_OFFSET help Say 'Y' here if you want your kernel to support the Simplemachines Sim.One board. config MACH_SNAPPER_CL15 bool "Support Bluewater Systems Snapper CL15 Module" - depends on EP93XX_SDCE0_PHYS_OFFSET help Say 'Y' here if you want your kernel to support the Bluewater Systems Snapper CL15 Module. config MACH_TS72XX bool "Support Technologic Systems TS-72xx SBC" - depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET help Say 'Y' here if you want your kernel to support the Technologic Systems TS-72xx board. config MACH_VISION_EP9307 bool "Support Vision Engraving Systems EP9307 SoM" - depends on EP93XX_SDCE0_PHYS_OFFSET help Say 'Y' here if you want your kernel to support the Vision Engraving Systems EP9307 SoM. diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 78d427b34b1f..b7ae4345ac08 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile @@ -1,7 +1,7 @@ # # Makefile for the linux kernel. # -obj-y := core.o clock.o +obj-y := core.o clock.o timer-ep93xx.o obj-$(CONFIG_EP93XX_DMA) += dma.o diff --git a/arch/arm/mach-ep93xx/Makefile.boot b/arch/arm/mach-ep93xx/Makefile.boot index d3113a71cb40..ed82ed7c949f 100644 --- a/arch/arm/mach-ep93xx/Makefile.boot +++ b/arch/arm/mach-ep93xx/Makefile.boot @@ -1,14 +1 @@ - zreladdr-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) += 0x00008000 -params_phys-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) := 0x00000100 - - zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) += 0xc0008000 -params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0000100 - - zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) += 0xd0008000 -params_phys-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0000100 - - zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) += 0xe0008000 -params_phys-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0000100 - - zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) += 0xf0008000 -params_phys-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0000100 +# Empty file waiting for deletion once Makefile.boot isn't needed any more. diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 0e571f1749d6..c393b1b0310d 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -22,7 +22,6 @@ #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/sys_soc.h> -#include <linux/timex.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/gpio.h> @@ -38,6 +37,7 @@ #include <linux/irqchip/arm-vic.h> #include <linux/reboot.h> #include <linux/usb/ohci_pdriver.h> +#include <linux/random.h> #include <mach/hardware.h> #include <linux/platform_data/video-ep93xx.h> @@ -47,7 +47,6 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> -#include <asm/mach/time.h> #include "soc.h" @@ -73,113 +72,6 @@ void __init ep93xx_map_io(void) iotable_init(ep93xx_io_desc, ARRAY_SIZE(ep93xx_io_desc)); } - -/************************************************************************* - * Timer handling for EP93xx - ************************************************************************* - * The ep93xx has four internal timers. Timers 1, 2 (both 16 bit) and - * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate - * an interrupt on underflow. Timer 4 (40 bit) counts down at 983.04 kHz, - * is free-running, and can't generate interrupts. - * - * The 508 kHz timers are ideal for use for the timer interrupt, as the - * most common values of HZ divide 508 kHz nicely. We pick one of the 16 - * bit timers (timer 1) since we don't need more than 16 bits of reload - * value as long as HZ >= 8. - * - * The higher clock rate of timer 4 makes it a better choice than the - * other timers for use in gettimeoffset(), while the fact that it can't - * generate interrupts means we don't have to worry about not being able - * to use this timer for something else. We also use timer 4 for keeping - * track of lost jiffies. - */ -#define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x)) -#define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00) -#define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04) -#define EP93XX_TIMER1_CONTROL EP93XX_TIMER_REG(0x08) -#define EP93XX_TIMER123_CONTROL_ENABLE (1 << 7) -#define EP93XX_TIMER123_CONTROL_MODE (1 << 6) -#define EP93XX_TIMER123_CONTROL_CLKSEL (1 << 3) -#define EP93XX_TIMER1_CLEAR EP93XX_TIMER_REG(0x0c) -#define EP93XX_TIMER2_LOAD EP93XX_TIMER_REG(0x20) -#define EP93XX_TIMER2_VALUE EP93XX_TIMER_REG(0x24) -#define EP93XX_TIMER2_CONTROL EP93XX_TIMER_REG(0x28) -#define EP93XX_TIMER2_CLEAR EP93XX_TIMER_REG(0x2c) -#define EP93XX_TIMER4_VALUE_LOW EP93XX_TIMER_REG(0x60) -#define EP93XX_TIMER4_VALUE_HIGH EP93XX_TIMER_REG(0x64) -#define EP93XX_TIMER4_VALUE_HIGH_ENABLE (1 << 8) -#define EP93XX_TIMER3_LOAD EP93XX_TIMER_REG(0x80) -#define EP93XX_TIMER3_VALUE EP93XX_TIMER_REG(0x84) -#define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88) -#define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c) - -#define EP93XX_TIMER123_CLOCK 508469 -#define EP93XX_TIMER4_CLOCK 983040 - -#define TIMER1_RELOAD ((EP93XX_TIMER123_CLOCK / HZ) - 1) -#define TIMER4_TICKS_PER_JIFFY DIV_ROUND_CLOSEST(EP93XX_TIMER4_CLOCK, HZ) - -static unsigned int last_jiffy_time; - -static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id) -{ - /* Writing any value clears the timer interrupt */ - __raw_writel(1, EP93XX_TIMER1_CLEAR); - - /* Recover lost jiffies */ - while ((signed long) - (__raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time) - >= TIMER4_TICKS_PER_JIFFY) { - last_jiffy_time += TIMER4_TICKS_PER_JIFFY; - timer_tick(); - } - - return IRQ_HANDLED; -} - -static struct irqaction ep93xx_timer_irq = { - .name = "ep93xx timer", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = ep93xx_timer_interrupt, -}; - -static u32 ep93xx_gettimeoffset(void) -{ - int offset; - - offset = __raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time; - - /* - * Timer 4 is based on a 983.04 kHz reference clock, - * so dividing by 983040 gives the fraction of a second, - * so dividing by 0.983040 converts to uS. - * Refactor the calculation to avoid overflow. - * Finally, multiply by 1000 to give nS. - */ - return (offset + (53 * offset / 3072)) * 1000; -} - -void __init ep93xx_timer_init(void) -{ - u32 tmode = EP93XX_TIMER123_CONTROL_MODE | - EP93XX_TIMER123_CONTROL_CLKSEL; - - arch_gettimeoffset = ep93xx_gettimeoffset; - - /* Enable periodic HZ timer. */ - __raw_writel(tmode, EP93XX_TIMER1_CONTROL); - __raw_writel(TIMER1_RELOAD, EP93XX_TIMER1_LOAD); - __raw_writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE, - EP93XX_TIMER1_CONTROL); - - /* Enable lost jiffy timer. */ - __raw_writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE, - EP93XX_TIMER4_VALUE_HIGH); - - setup_irq(IRQ_EP93XX_TIMER1, &ep93xx_timer_irq); -} - - /************************************************************************* * EP93xx IRQ handling *************************************************************************/ @@ -971,6 +863,12 @@ static const char __init *ep93xx_get_soc_id(void) if (id != id2) return "invalid"; + /* Toss the unique ID into the entropy pool */ + add_device_randomness(&id2, 4); + add_device_randomness(&id3, 4); + add_device_randomness(&id4, 4); + add_device_randomness(&id5, 4); + snprintf(ep93xx_soc_id, sizeof(ep93xx_soc_id), "%08x%08x%08x%08x", id2, id3, id4, id5); diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c index 27b14ae92c7e..ad92d9f7e4df 100644 --- a/arch/arm/mach-ep93xx/edb93xx.c +++ b/arch/arm/mach-ep93xx/edb93xx.c @@ -205,8 +205,6 @@ static void __init edb93xx_register_pwm(void) * EDB93xx framebuffer *************************************************************************/ static struct ep93xxfb_mach_info __initdata edb93xxfb_info = { - .num_modes = EP93XXFB_USE_MODEDB, - .bpp = 16, .flags = 0, }; diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c index 3c950f5864f3..7bb540c421ee 100644 --- a/arch/arm/mach-ep93xx/simone.c +++ b/arch/arm/mach-ep93xx/simone.c @@ -40,8 +40,6 @@ static struct ep93xx_eth_data __initdata simone_eth_data = { }; static struct ep93xxfb_mach_info __initdata simone_fb_info = { - .num_modes = EP93XXFB_USE_MODEDB, - .bpp = 16, .flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING, }; @@ -169,6 +167,7 @@ static struct spi_board_info simone_spi_devices[] __initdata = { static struct ep93xx_spi_info simone_spi_info __initdata = { .num_chipselect = ARRAY_SIZE(simone_spi_devices), + .use_dma = 1, }; static struct i2c_gpio_platform_data __initdata simone_i2c_gpio_data = { diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c index aa86f86638dd..c4904264256a 100644 --- a/arch/arm/mach-ep93xx/snappercl15.c +++ b/arch/arm/mach-ep93xx/snappercl15.c @@ -144,8 +144,6 @@ static struct i2c_board_info __initdata snappercl15_i2c_data[] = { }; static struct ep93xxfb_mach_info __initdata snappercl15_fb_info = { - .num_modes = EP93XXFB_USE_MODEDB, - .bpp = 16, }; static struct platform_device snappercl15_audio_device = { diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c new file mode 100644 index 000000000000..e5f791145bd0 --- /dev/null +++ b/arch/arm/mach-ep93xx/timer-ep93xx.c @@ -0,0 +1,143 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/sched_clock.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <asm/mach/time.h> +#include "soc.h" + +/************************************************************************* + * Timer handling for EP93xx + ************************************************************************* + * The ep93xx has four internal timers. Timers 1, 2 (both 16 bit) and + * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate + * an interrupt on underflow. Timer 4 (40 bit) counts down at 983.04 kHz, + * is free-running, and can't generate interrupts. + * + * The 508 kHz timers are ideal for use for the timer interrupt, as the + * most common values of HZ divide 508 kHz nicely. We pick the 32 bit + * timer (timer 3) to get as long sleep intervals as possible when using + * CONFIG_NO_HZ. + * + * The higher clock rate of timer 4 makes it a better choice than the + * other timers for use as clock source and for sched_clock(), providing + * a stable 40 bit time base. + ************************************************************************* + */ +#define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x)) +#define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00) +#define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04) +#define EP93XX_TIMER1_CONTROL EP93XX_TIMER_REG(0x08) +#define EP93XX_TIMER123_CONTROL_ENABLE (1 << 7) +#define EP93XX_TIMER123_CONTROL_MODE (1 << 6) +#define EP93XX_TIMER123_CONTROL_CLKSEL (1 << 3) +#define EP93XX_TIMER1_CLEAR EP93XX_TIMER_REG(0x0c) +#define EP93XX_TIMER2_LOAD EP93XX_TIMER_REG(0x20) +#define EP93XX_TIMER2_VALUE EP93XX_TIMER_REG(0x24) +#define EP93XX_TIMER2_CONTROL EP93XX_TIMER_REG(0x28) +#define EP93XX_TIMER2_CLEAR EP93XX_TIMER_REG(0x2c) +#define EP93XX_TIMER4_VALUE_LOW EP93XX_TIMER_REG(0x60) +#define EP93XX_TIMER4_VALUE_HIGH EP93XX_TIMER_REG(0x64) +#define EP93XX_TIMER4_VALUE_HIGH_ENABLE (1 << 8) +#define EP93XX_TIMER3_LOAD EP93XX_TIMER_REG(0x80) +#define EP93XX_TIMER3_VALUE EP93XX_TIMER_REG(0x84) +#define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88) +#define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c) + +#define EP93XX_TIMER123_RATE 508469 +#define EP93XX_TIMER4_RATE 983040 + +static u64 notrace ep93xx_read_sched_clock(void) +{ + u64 ret; + + ret = readl(EP93XX_TIMER4_VALUE_LOW); + ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); + return ret; +} + +cycle_t ep93xx_clocksource_read(struct clocksource *c) +{ + u64 ret; + + ret = readl(EP93XX_TIMER4_VALUE_LOW); + ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); + return (cycle_t) ret; +} + +static int ep93xx_clkevt_set_next_event(unsigned long next, + struct clock_event_device *evt) +{ + /* Default mode: periodic, off, 508 kHz */ + u32 tmode = EP93XX_TIMER123_CONTROL_MODE | + EP93XX_TIMER123_CONTROL_CLKSEL; + + /* Clear timer */ + writel(tmode, EP93XX_TIMER3_CONTROL); + + /* Set next event */ + writel(next, EP93XX_TIMER3_LOAD); + writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE, + EP93XX_TIMER3_CONTROL); + return 0; +} + + +static int ep93xx_clkevt_shutdown(struct clock_event_device *evt) +{ + /* Disable timer */ + writel(0, EP93XX_TIMER3_CONTROL); + + return 0; +} + +static struct clock_event_device ep93xx_clockevent = { + .name = "timer1", + .features = CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = ep93xx_clkevt_shutdown, + .set_state_oneshot = ep93xx_clkevt_shutdown, + .tick_resume = ep93xx_clkevt_shutdown, + .set_next_event = ep93xx_clkevt_set_next_event, + .rating = 300, +}; + +static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + /* Writing any value clears the timer interrupt */ + writel(1, EP93XX_TIMER3_CLEAR); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct irqaction ep93xx_timer_irq = { + .name = "ep93xx timer", + .flags = IRQF_TIMER | IRQF_IRQPOLL, + .handler = ep93xx_timer_interrupt, + .dev_id = &ep93xx_clockevent, +}; + +void __init ep93xx_timer_init(void) +{ + /* Enable and register clocksource and sched_clock on timer 4 */ + writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE, + EP93XX_TIMER4_VALUE_HIGH); + clocksource_mmio_init(NULL, "timer4", + EP93XX_TIMER4_RATE, 200, 40, + ep93xx_clocksource_read); + sched_clock_register(ep93xx_read_sched_clock, 40, + EP93XX_TIMER4_RATE); + + /* Set up clockevent on timer 3 */ + setup_irq(IRQ_EP93XX_TIMER3, &ep93xx_timer_irq); + clockevents_config_and_register(&ep93xx_clockevent, + EP93XX_TIMER123_RATE, + 1, + 0xffffffffU); +} diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c index 6bc1c181581d..5cced5988498 100644 --- a/arch/arm/mach-ep93xx/vision_ep9307.c +++ b/arch/arm/mach-ep93xx/vision_ep9307.c @@ -29,6 +29,8 @@ #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> +#include <sound/cs4271.h> + #include <mach/hardware.h> #include <linux/platform_data/video-ep93xx.h> #include <linux/platform_data/spi-ep93xx.h> @@ -104,8 +106,6 @@ static void vision_lcd_blank(int blank_mode, struct fb_info *info) } static struct ep93xxfb_mach_info ep93xxfb_info __initdata = { - .num_modes = EP93XXFB_USE_MODEDB, - .bpp = 16, .flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING, .setup = vision_lcd_setup, .teardown = vision_lcd_teardown, @@ -169,6 +169,35 @@ static struct i2c_board_info vision_i2c_info[] __initdata = { }; /************************************************************************* + * SPI CS4271 Audio Codec + *************************************************************************/ +static struct cs4271_platform_data vision_cs4271_data = { + .gpio_nreset = EP93XX_GPIO_LINE_H(2), +}; + +static int vision_cs4271_hw_setup(struct spi_device *spi) +{ + return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6, + GPIOF_OUT_INIT_HIGH, spi->modalias); +} + +static void vision_cs4271_hw_cleanup(struct spi_device *spi) +{ + gpio_free(EP93XX_GPIO_LINE_EGPIO6); +} + +static void vision_cs4271_hw_cs_control(struct spi_device *spi, int value) +{ + gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value); +} + +static struct ep93xx_spi_chip_ops vision_cs4271_hw = { + .setup = vision_cs4271_hw_setup, + .cleanup = vision_cs4271_hw_cleanup, + .cs_control = vision_cs4271_hw_cs_control, +}; + +/************************************************************************* * SPI Flash *************************************************************************/ #define VISION_SPI_FLASH_CS EP93XX_GPIO_LINE_EGPIO7 @@ -262,12 +291,20 @@ static struct ep93xx_spi_chip_ops vision_spi_mmc_hw = { *************************************************************************/ static struct spi_board_info vision_spi_board_info[] __initdata = { { + .modalias = "cs4271", + .platform_data = &vision_cs4271_data, + .controller_data = &vision_cs4271_hw, + .max_speed_hz = 6000000, + .bus_num = 0, + .chip_select = 0, + .mode = SPI_MODE_3, + }, { .modalias = "sst25l", .platform_data = &vision_spi_flash_data, .controller_data = &vision_spi_flash_hw, .max_speed_hz = 20000000, .bus_num = 0, - .chip_select = 0, + .chip_select = 1, .mode = SPI_MODE_3, }, { .modalias = "mmc_spi", @@ -275,16 +312,31 @@ static struct spi_board_info vision_spi_board_info[] __initdata = { .controller_data = &vision_spi_mmc_hw, .max_speed_hz = 20000000, .bus_num = 0, - .chip_select = 1, + .chip_select = 2, .mode = SPI_MODE_3, }, }; static struct ep93xx_spi_info vision_spi_master __initdata = { - .num_chipselect = ARRAY_SIZE(vision_spi_board_info), + .num_chipselect = ARRAY_SIZE(vision_spi_board_info), + .use_dma = 1, }; /************************************************************************* + * I2S Audio + *************************************************************************/ +static struct platform_device vision_audio_device = { + .name = "edb93xx-audio", + .id = -1, +}; + +static void __init vision_register_i2s(void) +{ + ep93xx_register_i2s(); + platform_device_register(&vision_audio_device); +} + +/************************************************************************* * Machine Initialization *************************************************************************/ static void __init vision_init_machine(void) @@ -309,6 +361,7 @@ static void __init vision_init_machine(void) ARRAY_SIZE(vision_i2c_info)); ep93xx_register_spi(&vision_spi_master, vision_spi_board_info, ARRAY_SIZE(vision_spi_board_info)); + vision_register_i2s(); } MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307") diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 81064cd61a0a..4c4858c566d8 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -30,6 +30,11 @@ menuconfig ARCH_EXYNOS if ARCH_EXYNOS +config S5P_DEV_MFC + bool + help + Compile in setup memory (init) code for MFC + config ARCH_EXYNOS3 bool "SAMSUNG EXYNOS3" select ARM_CPU_SUSPEND if PM diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile index bcefb5473ee4..2f306767cdfe 100644 --- a/arch/arm/mach-exynos/Makefile +++ b/arch/arm/mach-exynos/Makefile @@ -23,3 +23,5 @@ AFLAGS_sleep.o :=-Wa,-march=armv7-a$(plus_sec) obj-$(CONFIG_EXYNOS5420_MCPM) += mcpm-exynos.o CFLAGS_mcpm-exynos.o += -march=armv7-a + +obj-$(CONFIG_S5P_DEV_MFC) += s5p-dev-mfc.o diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index e3a9256ed55f..153492513c40 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h @@ -128,6 +128,12 @@ void exynos_firmware_init(void); /* CPU BOOT mode flag for Exynos3250 SoC bootloader */ #define C2_STATE (1 << 3) +/* + * Magic values for bootloader indicating chosen low power mode. + * See also Documentation/arm/Samsung/Bootloader-interface.txt + */ +#define EXYNOS_SLEEP_MAGIC 0x00000bad +#define EXYNOS_AFTR_MAGIC 0xfcba0d10 void exynos_set_boot_flag(unsigned int cpu, unsigned int mode); void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode); diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c index 245f6dec1ded..111cfbf66fdb 100644 --- a/arch/arm/mach-exynos/firmware.c +++ b/arch/arm/mach-exynos/firmware.c @@ -25,8 +25,6 @@ #include "common.h" #include "smc.h" -#define EXYNOS_SLEEP_MAGIC 0x00000bad -#define EXYNOS_AFTR_MAGIC 0xfcba0d10 #define EXYNOS_BOOT_ADDR 0x8 #define EXYNOS_BOOT_FLAG 0xc diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 58e05a2eae57..98a2c0cbb833 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -182,7 +182,7 @@ static inline void __iomem *cpu_boot_reg(int cpu) boot_reg = cpu_boot_reg_base(); if (!boot_reg) - return ERR_PTR(-ENODEV); + return IOMEM_ERR_PTR(-ENODEV); if (soc_is_exynos4412()) boot_reg += 4*cpu; else if (soc_is_exynos5420() || soc_is_exynos5800()) diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 6001f1c9d136..4a87e86dec45 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void) pd->base = of_iomap(np, 0); if (!pd->base) { pr_warn("%s: failed to map memory\n", __func__); - kfree(pd->pd.name); + kfree_const(pd->pd.name); kfree(pd); - of_node_put(np); continue; } diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c index e812c1c85624..de68938ee6aa 100644 --- a/arch/arm/mach-exynos/pmu.c +++ b/arch/arm/mach-exynos/pmu.c @@ -698,7 +698,7 @@ static void exynos_power_off(void) ; } -void exynos5420_powerdown_conf(enum sys_powerdown mode) +static void exynos5420_powerdown_conf(enum sys_powerdown mode) { u32 this_cluster; @@ -991,7 +991,6 @@ static int exynos_pmu_probe(struct platform_device *pdev) static struct platform_driver exynos_pmu_driver = { .driver = { .name = "exynos-pmu", - .owner = THIS_MODULE, .of_match_table = exynos_pmu_of_device_ids, }, .probe = exynos_pmu_probe, diff --git a/arch/arm/plat-samsung/include/plat/regs-srom.h b/arch/arm/mach-exynos/regs-srom.h index 9b6729c81cda..5c4d4427db7b 100644 --- a/arch/arm/plat-samsung/include/plat/regs-srom.h +++ b/arch/arm/mach-exynos/regs-srom.h @@ -1,5 +1,4 @@ -/* linux/arch/arm/plat-samsung/include/plat/regs-srom.h - * +/* * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * diff --git a/arch/arm/plat-samsung/s5p-dev-mfc.c b/arch/arm/mach-exynos/s5p-dev-mfc.c index 0b04b6b0fa30..0b04b6b0fa30 100644 --- a/arch/arm/plat-samsung/s5p-dev-mfc.c +++ b/arch/arm/mach-exynos/s5p-dev-mfc.c diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index f572219c7a40..e00eb39453a4 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -32,13 +32,11 @@ #include <asm/suspend.h> #include <plat/pm-common.h> -#include <plat/regs-srom.h> #include "common.h" -#include "regs-pmu.h" #include "exynos-pmu.h" - -#define S5P_CHECK_SLEEP 0x00000BAD +#include "regs-pmu.h" +#include "regs-srom.h" #define REG_TABLE_END (-1U) @@ -331,7 +329,7 @@ static void exynos_pm_enter_sleep_mode(void) { /* Set value of power down register for sleep mode */ exynos_sys_powerdown_conf(SYS_SLEEP); - pmu_raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1); + pmu_raw_writel(EXYNOS_SLEEP_MAGIC, S5P_INFORM1); } static void exynos_pm_prepare(void) diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 9e8220e38398..0f0c9e040fcc 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -106,7 +106,7 @@ static void __init __fb_init_irq(void) for (irq = _DC21285_IRQ(0); irq < _DC21285_IRQ(20); irq++) { irq_set_chip_and_handler(irq, &fb_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } } diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index bf7aa7d298e7..810edc78c817 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c @@ -57,34 +57,32 @@ static int ckevt_dc21285_set_next_event(unsigned long delta, return 0; } -static void ckevt_dc21285_set_mode(enum clock_event_mode mode, - struct clock_event_device *c) +static int ckevt_dc21285_shutdown(struct clock_event_device *c) { - switch (mode) { - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_PERIODIC: - *CSR_TIMER1_CLR = 0; - *CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ); - *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | - TIMER_CNTL_DIV16; - break; - - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - *CSR_TIMER1_CNTL = 0; - break; - } + *CSR_TIMER1_CNTL = 0; + return 0; +} + +static int ckevt_dc21285_set_periodic(struct clock_event_device *c) +{ + *CSR_TIMER1_CLR = 0; + *CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ); + *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | + TIMER_CNTL_DIV16; + return 0; } static struct clock_event_device ckevt_dc21285 = { - .name = "dc21285_timer1", - .features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .irq = IRQ_TIMER1, - .set_next_event = ckevt_dc21285_set_next_event, - .set_mode = ckevt_dc21285_set_mode, + .name = "dc21285_timer1", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .irq = IRQ_TIMER1, + .set_next_event = ckevt_dc21285_set_next_event, + .set_state_shutdown = ckevt_dc21285_shutdown, + .set_state_periodic = ckevt_dc21285_set_periodic, + .set_state_oneshot = ckevt_dc21285_shutdown, + .tick_resume = ckevt_dc21285_set_periodic, }; static irqreturn_t timer1_interrupt(int irq, void *dev_id) @@ -94,7 +92,7 @@ static irqreturn_t timer1_interrupt(int irq, void *dev_id) *CSR_TIMER1_CLR = 0; /* Stop the timer if in one-shot mode */ - if (ce->mode == CLOCK_EVT_MODE_ONESHOT) + if (clockevent_state_oneshot(ce)) *CSR_TIMER1_CNTL = 0; ce->event_handler(ce); diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c index c3a0abbc9049..fcd79bc3a3e1 100644 --- a/arch/arm/mach-footbridge/isa-irq.c +++ b/arch/arm/mach-footbridge/isa-irq.c @@ -153,13 +153,13 @@ void __init isa_init_irq(unsigned int host_irq) for (irq = _ISA_IRQ(0); irq < _ISA_IRQ(8); irq++) { irq_set_chip_and_handler(irq, &isa_lo_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } for (irq = _ISA_IRQ(8); irq < _ISA_IRQ(16); irq++) { irq_set_chip_and_handler(irq, &isa_hi_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } request_resource(&ioport_resource, &pic1_resource); @@ -175,8 +175,8 @@ void __init isa_init_irq(unsigned int host_irq) * resistor on this line. */ if (machine_is_netwinder()) - set_irq_flags(_ISA_IRQ(11), IRQF_VALID | - IRQF_PROBE | IRQF_NOAUTOEN); + irq_modify_status(_ISA_IRQ(11), + IRQ_NOREQUEST | IRQ_NOPROBE, IRQ_NOAUTOEN); } } diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c index 3292f2e6ed6f..220333ed741d 100644 --- a/arch/arm/mach-gemini/gpio.c +++ b/arch/arm/mach-gemini/gpio.c @@ -220,7 +220,7 @@ void __init gemini_gpio_init(void) j < GPIO_IRQ_BASE + (i + 1) * 32; j++) { irq_set_chip_and_handler(j, &gpio_irq_chip, handle_edge_irq); - set_irq_flags(j, IRQF_VALID); + irq_clear_status_flags(j, IRQ_NOREQUEST); } irq_set_chained_handler_and_data(IRQ_GPIO(i), gpio_irq_handler, diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h index 98e7b0f286bf..f0390f184742 100644 --- a/arch/arm/mach-gemini/include/mach/hardware.h +++ b/arch/arm/mach-gemini/include/mach/hardware.h @@ -57,9 +57,6 @@ #define GEMINI_USB1_BASE 0x69000000 #define GEMINI_BIG_ENDIAN_BASE 0x80000000 -#define GEMINI_TIMER1_BASE GEMINI_TIMER_BASE -#define GEMINI_TIMER2_BASE (GEMINI_TIMER_BASE + 0x10) -#define GEMINI_TIMER3_BASE (GEMINI_TIMER_BASE + 0x20) /* * UART Clock when System clk is 150MHz diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c index 44f50dcb616d..d929b3ff18fd 100644 --- a/arch/arm/mach-gemini/irq.c +++ b/arch/arm/mach-gemini/irq.c @@ -92,7 +92,7 @@ void __init gemini_init_irq(void) } else { irq_set_handler(i, handle_level_irq); } - set_irq_flags(i, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); } /* Disable all interrupts */ diff --git a/arch/arm/mach-gemini/time.c b/arch/arm/mach-gemini/time.c index 0a63c4d25b64..f5f18df5aacd 100644 --- a/arch/arm/mach-gemini/time.c +++ b/arch/arm/mach-gemini/time.c @@ -15,93 +15,147 @@ #include <asm/mach/time.h> #include <linux/clockchips.h> #include <linux/clocksource.h> +#include <linux/sched_clock.h> /* * Register definitions for the timers */ -#define TIMER_COUNT(BASE_ADDR) (BASE_ADDR + 0x00) -#define TIMER_LOAD(BASE_ADDR) (BASE_ADDR + 0x04) -#define TIMER_MATCH1(BASE_ADDR) (BASE_ADDR + 0x08) -#define TIMER_MATCH2(BASE_ADDR) (BASE_ADDR + 0x0C) -#define TIMER_CR(BASE_ADDR) (BASE_ADDR + 0x30) - -#define TIMER_1_CR_ENABLE (1 << 0) -#define TIMER_1_CR_CLOCK (1 << 1) -#define TIMER_1_CR_INT (1 << 2) -#define TIMER_2_CR_ENABLE (1 << 3) -#define TIMER_2_CR_CLOCK (1 << 4) -#define TIMER_2_CR_INT (1 << 5) -#define TIMER_3_CR_ENABLE (1 << 6) -#define TIMER_3_CR_CLOCK (1 << 7) -#define TIMER_3_CR_INT (1 << 8) + +#define TIMER1_BASE GEMINI_TIMER_BASE +#define TIMER2_BASE (GEMINI_TIMER_BASE + 0x10) +#define TIMER3_BASE (GEMINI_TIMER_BASE + 0x20) + +#define TIMER_COUNT(BASE) (IO_ADDRESS(BASE) + 0x00) +#define TIMER_LOAD(BASE) (IO_ADDRESS(BASE) + 0x04) +#define TIMER_MATCH1(BASE) (IO_ADDRESS(BASE) + 0x08) +#define TIMER_MATCH2(BASE) (IO_ADDRESS(BASE) + 0x0C) +#define TIMER_CR (IO_ADDRESS(GEMINI_TIMER_BASE) + 0x30) +#define TIMER_INTR_STATE (IO_ADDRESS(GEMINI_TIMER_BASE) + 0x34) +#define TIMER_INTR_MASK (IO_ADDRESS(GEMINI_TIMER_BASE) + 0x38) + +#define TIMER_1_CR_ENABLE (1 << 0) +#define TIMER_1_CR_CLOCK (1 << 1) +#define TIMER_1_CR_INT (1 << 2) +#define TIMER_2_CR_ENABLE (1 << 3) +#define TIMER_2_CR_CLOCK (1 << 4) +#define TIMER_2_CR_INT (1 << 5) +#define TIMER_3_CR_ENABLE (1 << 6) +#define TIMER_3_CR_CLOCK (1 << 7) +#define TIMER_3_CR_INT (1 << 8) +#define TIMER_1_CR_UPDOWN (1 << 9) +#define TIMER_2_CR_UPDOWN (1 << 10) +#define TIMER_3_CR_UPDOWN (1 << 11) +#define TIMER_DEFAULT_FLAGS (TIMER_1_CR_UPDOWN | \ + TIMER_3_CR_ENABLE | \ + TIMER_3_CR_UPDOWN) + +#define TIMER_1_INT_MATCH1 (1 << 0) +#define TIMER_1_INT_MATCH2 (1 << 1) +#define TIMER_1_INT_OVERFLOW (1 << 2) +#define TIMER_2_INT_MATCH1 (1 << 3) +#define TIMER_2_INT_MATCH2 (1 << 4) +#define TIMER_2_INT_OVERFLOW (1 << 5) +#define TIMER_3_INT_MATCH1 (1 << 6) +#define TIMER_3_INT_MATCH2 (1 << 7) +#define TIMER_3_INT_OVERFLOW (1 << 8) +#define TIMER_INT_ALL_MASK 0x1ff + static unsigned int tick_rate; +static u64 notrace gemini_read_sched_clock(void) +{ + return readl(TIMER_COUNT(TIMER3_BASE)); +} + static int gemini_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { u32 cr; - cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); + /* Setup the match register */ + cr = readl(TIMER_COUNT(TIMER1_BASE)); + writel(cr + cycles, TIMER_MATCH1(TIMER1_BASE)); + if (readl(TIMER_COUNT(TIMER1_BASE)) - cr > cycles) + return -ETIME; - /* This may be overdoing it, feel free to test without this */ - cr &= ~TIMER_2_CR_ENABLE; - cr &= ~TIMER_2_CR_INT; - writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); + return 0; +} - /* Set next event */ - writel(cycles, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE))); - writel(cycles, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE))); - cr |= TIMER_2_CR_ENABLE; - cr |= TIMER_2_CR_INT; - writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); +static int gemini_timer_shutdown(struct clock_event_device *evt) +{ + u32 cr; + + /* + * Disable also for oneshot: the set_next() call will arm the timer + * instead. + */ + /* Stop timer and interrupt. */ + cr = readl(TIMER_CR); + cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT); + writel(cr, TIMER_CR); + + /* Setup counter start from 0 */ + writel(0, TIMER_COUNT(TIMER1_BASE)); + writel(0, TIMER_LOAD(TIMER1_BASE)); + + /* enable interrupt */ + cr = readl(TIMER_INTR_MASK); + cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2); + cr |= TIMER_1_INT_MATCH1; + writel(cr, TIMER_INTR_MASK); + + /* start the timer */ + cr = readl(TIMER_CR); + cr |= TIMER_1_CR_ENABLE; + writel(cr, TIMER_CR); return 0; } -static void gemini_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int gemini_timer_set_periodic(struct clock_event_device *evt) { u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ); u32 cr; - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* Start the timer */ - writel(period, - TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE))); - writel(period, - TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE))); - cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); - cr |= TIMER_2_CR_ENABLE; - cr |= TIMER_2_CR_INT; - writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); - break; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - /* - * Disable also for oneshot: the set_next() call will - * arm the timer instead. - */ - cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); - cr &= ~TIMER_2_CR_ENABLE; - cr &= ~TIMER_2_CR_INT; - writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); - break; - default: - break; - } + /* Stop timer and interrupt */ + cr = readl(TIMER_CR); + cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT); + writel(cr, TIMER_CR); + + /* Setup timer to fire at 1/HT intervals. */ + cr = 0xffffffff - (period - 1); + writel(cr, TIMER_COUNT(TIMER1_BASE)); + writel(cr, TIMER_LOAD(TIMER1_BASE)); + + /* enable interrupt on overflow */ + cr = readl(TIMER_INTR_MASK); + cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2); + cr |= TIMER_1_INT_OVERFLOW; + writel(cr, TIMER_INTR_MASK); + + /* Start the timer */ + cr = readl(TIMER_CR); + cr |= TIMER_1_CR_ENABLE; + cr |= TIMER_1_CR_INT; + writel(cr, TIMER_CR); + + return 0; } -/* Use TIMER2 as clock event */ +/* Use TIMER1 as clock event */ static struct clock_event_device gemini_clockevent = { - .name = "TIMER2", - .rating = 300, /* Reasonably fast and accurate clock event */ - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = gemini_timer_set_next_event, - .set_mode = gemini_timer_set_mode, + .name = "TIMER1", + /* Reasonably fast and accurate clock event */ + .rating = 300, + .shift = 32, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = gemini_timer_set_next_event, + .set_state_shutdown = gemini_timer_shutdown, + .set_state_periodic = gemini_timer_set_periodic, + .set_state_oneshot = gemini_timer_shutdown, + .tick_resume = gemini_timer_shutdown, }; /* @@ -151,20 +205,35 @@ void __init gemini_timer_init(void) } /* - * Make irqs happen for the system timer + * Reset the interrupt mask and status */ - setup_irq(IRQ_TIMER2, &gemini_timer_irq); - - /* Enable and use TIMER1 as clock source */ - writel(0xffffffff, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER1_BASE))); - writel(0xffffffff, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER1_BASE))); - writel(TIMER_1_CR_ENABLE, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE))); - if (clocksource_mmio_init(TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER1_BASE)), - "TIMER1", tick_rate, 300, 32, - clocksource_mmio_readl_up)) - pr_err("timer: failed to initialize gemini clock source\n"); - - /* Configure and register the clockevent */ + writel(TIMER_INT_ALL_MASK, TIMER_INTR_MASK); + writel(0, TIMER_INTR_STATE); + writel(TIMER_DEFAULT_FLAGS, TIMER_CR); + + /* + * Setup free-running clocksource timer (interrupts + * disabled.) + */ + writel(0, TIMER_COUNT(TIMER3_BASE)); + writel(0, TIMER_LOAD(TIMER3_BASE)); + writel(0, TIMER_MATCH1(TIMER3_BASE)); + writel(0, TIMER_MATCH2(TIMER3_BASE)); + clocksource_mmio_init(TIMER_COUNT(TIMER3_BASE), + "gemini_clocksource", tick_rate, + 300, 32, clocksource_mmio_readl_up); + sched_clock_register(gemini_read_sched_clock, 32, tick_rate); + + /* + * Setup clockevent timer (interrupt-driven.) + */ + writel(0, TIMER_COUNT(TIMER1_BASE)); + writel(0, TIMER_LOAD(TIMER1_BASE)); + writel(0, TIMER_MATCH1(TIMER1_BASE)); + writel(0, TIMER_MATCH2(TIMER1_BASE)); + setup_irq(IRQ_TIMER1, &gemini_timer_irq); + gemini_clockevent.cpumask = cpumask_of(0); clockevents_config_and_register(&gemini_clockevent, tick_rate, 1, 0xffffffff); + } diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c index c6bd7c7bd4aa..8cc62150116a 100644 --- a/arch/arm/mach-hisi/hisilicon.c +++ b/arch/arm/mach-hisi/hisilicon.c @@ -11,7 +11,6 @@ * published by the Free Software Foundation. */ -#include <linux/clk-provider.h> #include <linux/clocksource.h> #include <linux/irqchip.h> diff --git a/arch/arm/mach-imx/3ds_debugboard.c b/arch/arm/mach-imx/3ds_debugboard.c index 134377352966..45903be6e7b3 100644 --- a/arch/arm/mach-imx/3ds_debugboard.c +++ b/arch/arm/mach-imx/3ds_debugboard.c @@ -195,7 +195,7 @@ int __init mxc_expio_init(u32 base, u32 intr_gpio) for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) { irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq); - set_irq_flags(i, IRQF_VALID); + irq_clear_status_flags(i, IRQ_NOREQUEST); } irq_set_irq_type(p_irq, IRQF_TRIGGER_LOW); irq_set_chained_handler(p_irq, mxc_expio_irq_handler); diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 573536f1bb73..8ceda2844c4f 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -548,6 +548,14 @@ config SOC_IMX6SX help This enables support for Freescale i.MX6 SoloX processor. +config SOC_IMX6UL + bool "i.MX6 UltraLite support" + select PINCTRL_IMX6UL + select SOC_IMX6 + + help + This enables support for Freescale i.MX6 UltraLite processor. + config SOC_IMX7D bool "i.MX7 Dual support" select PINCTRL_IMX7D diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 37c502ac9595..fb689d813b09 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -83,6 +83,7 @@ endif obj-$(CONFIG_SOC_IMX6Q) += mach-imx6q.o obj-$(CONFIG_SOC_IMX6SL) += mach-imx6sl.o obj-$(CONFIG_SOC_IMX6SX) += mach-imx6sx.o +obj-$(CONFIG_SOC_IMX6UL) += mach-imx6ul.o obj-$(CONFIG_SOC_IMX7D) += mach-imx7d.o ifeq ($(CONFIG_SUSPEND),y) diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c index a7fa92a7b1d7..5b0f752d5507 100644 --- a/arch/arm/mach-imx/cpu.c +++ b/arch/arm/mach-imx/cpu.c @@ -130,6 +130,9 @@ struct device * __init imx_soc_device_init(void) case MXC_CPU_IMX6Q: soc_id = "i.MX6Q"; break; + case MXC_CPU_IMX6UL: + soc_id = "i.MX6UL"; + break; case MXC_CPU_IMX7D: soc_id = "i.MX7D"; break; diff --git a/arch/arm/mach-imx/epit.c b/arch/arm/mach-imx/epit.c index 074b1a81ba76..08ce20771bb3 100644 --- a/arch/arm/mach-imx/epit.c +++ b/arch/arm/mach-imx/epit.c @@ -57,7 +57,6 @@ #include "hardware.h" static struct clock_event_device clockevent_epit; -static enum clock_event_mode clockevent_mode = CLOCK_EVT_MODE_UNUSED; static void __iomem *timer_base; @@ -106,8 +105,8 @@ static int epit_set_next_event(unsigned long evt, return 0; } -static void epit_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +/* Left event sources disabled, no more interrupts appear */ +static int epit_shutdown(struct clock_event_device *evt) { unsigned long flags; @@ -120,39 +119,41 @@ static void epit_set_mode(enum clock_event_mode mode, /* Disable interrupt in GPT module */ epit_irq_disable(); - if (mode != clockevent_mode) { - /* Set event time into far-far future */ - - /* Clear pending interrupt */ - epit_irq_acknowledge(); - } + /* Clear pending interrupt */ + epit_irq_acknowledge(); - /* Remember timer mode */ - clockevent_mode = mode; local_irq_restore(flags); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - printk(KERN_ERR "epit_set_mode: Periodic mode is not " - "supported for i.MX EPIT\n"); - break; - case CLOCK_EVT_MODE_ONESHOT: + return 0; +} + +static int epit_set_oneshot(struct clock_event_device *evt) +{ + unsigned long flags; + + /* + * The timer interrupt generation is disabled at least + * for enough time to call epit_set_next_event() + */ + local_irq_save(flags); + + /* Disable interrupt in GPT module */ + epit_irq_disable(); + + /* Clear pending interrupt, only while switching mode */ + if (!clockevent_state_oneshot(evt)) + epit_irq_acknowledge(); + /* * Do not put overhead of interrupt enable/disable into * epit_set_next_event(), the core has about 4 minutes * to call epit_set_next_event() or shutdown clock after * mode switching */ - local_irq_save(flags); - epit_irq_enable(); - local_irq_restore(flags); - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_RESUME: - /* Left event sources disabled, no more interrupts appear */ - break; - } + epit_irq_enable(); + local_irq_restore(flags); + + return 0; } /* @@ -176,11 +177,13 @@ static struct irqaction epit_timer_irq = { }; static struct clock_event_device clockevent_epit = { - .name = "epit", - .features = CLOCK_EVT_FEAT_ONESHOT, - .set_mode = epit_set_mode, - .set_next_event = epit_set_next_event, - .rating = 200, + .name = "epit", + .features = CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = epit_shutdown, + .tick_resume = epit_shutdown, + .set_state_oneshot = epit_set_oneshot, + .set_next_event = epit_set_next_event, + .rating = 200, }; static int __init epit_clockevent_init(struct clk *timer_clk) diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 80bad29d609a..8c4467fad837 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void) } } -#ifdef CONFIG_PM_GENERIC_DOMAINS - static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd) { int iso, iso2sw; @@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = { static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) { struct clk *clk; - bool is_off; int i; imx6q_pu_domain.reg = pu_reg; @@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) } imx6q_pu_domain.num_clks = i; - is_off = IS_ENABLED(CONFIG_PM); - if (is_off) { - _imx6q_pm_pu_power_off(&imx6q_pu_domain.base); - } else { - /* - * Enable power if compiled without CONFIG_PM in case the - * bootloader disabled it. - */ - imx6q_pm_pu_power_on(&imx6q_pu_domain.base); - } + /* Enable power always in case bootloader disabled it. */ + imx6q_pm_pu_power_on(&imx6q_pu_domain.base); + + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) + return 0; - pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off); + pm_genpd_init(&imx6q_pu_domain.base, NULL, false); return of_genpd_add_provider_onecell(dev->of_node, &imx_gpc_onecell_data); @@ -437,13 +429,6 @@ clk_err: return -EINVAL; } -#else -static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg) -{ - return 0; -} -#endif /* CONFIG_PM_GENERIC_DOMAINS */ - static int imx_gpc_probe(struct platform_device *pdev) { struct regulator *pu_reg; diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c new file mode 100644 index 000000000000..1b97fe133cef --- /dev/null +++ b/arch/arm/mach-imx/mach-imx6ul.c @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/irqchip.h> +#include <linux/mfd/syscon.h> +#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> +#include <linux/micrel_phy.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/regmap.h> +#include <asm/mach/arch.h> +#include <asm/mach/map.h> + +#include "common.h" + +static void __init imx6ul_enet_clk_init(void) +{ + struct regmap *gpr; + + gpr = syscon_regmap_lookup_by_compatible("fsl,imx6ul-iomuxc-gpr"); + if (!IS_ERR(gpr)) + regmap_update_bits(gpr, IOMUXC_GPR1, IMX6UL_GPR1_ENET_CLK_DIR, + IMX6UL_GPR1_ENET_CLK_OUTPUT); + else + pr_err("failed to find fsl,imx6ul-iomux-gpr regmap\n"); + +} + +static int ksz8081_phy_fixup(struct phy_device *dev) +{ + if (dev && dev->interface == PHY_INTERFACE_MODE_MII) { + phy_write(dev, 0x1f, 0x8110); + phy_write(dev, 0x16, 0x201); + } else if (dev && dev->interface == PHY_INTERFACE_MODE_RMII) { + phy_write(dev, 0x1f, 0x8190); + phy_write(dev, 0x16, 0x202); + } + + return 0; +} + +static void __init imx6ul_enet_phy_init(void) +{ + if (IS_BUILTIN(CONFIG_PHYLIB)) + phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff, + ksz8081_phy_fixup); +} + +static inline void imx6ul_enet_init(void) +{ + imx6ul_enet_clk_init(); + imx6ul_enet_phy_init(); +} + +static void __init imx6ul_init_machine(void) +{ + struct device *parent; + + parent = imx_soc_device_init(); + if (parent == NULL) + pr_warn("failed to initialize soc device\n"); + + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + imx6ul_enet_init(); + imx_anatop_init(); +} + +static void __init imx6ul_init_irq(void) +{ + imx_init_revision_from_anatop(); + imx_src_init(); + irqchip_init(); +} + +static const char *imx6ul_dt_compat[] __initconst = { + "fsl,imx6ul", + NULL, +}; + +DT_MACHINE_START(IMX6UL, "Freescale i.MX6 Ultralite (Device Tree)") + .init_irq = imx6ul_init_irq, + .init_machine = imx6ul_init_machine, + .dt_compat = imx6ul_dt_compat, +MACHINE_END diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c index 4d4a19099a43..62f3437257f1 100644 --- a/arch/arm/mach-imx/mach-imx7d.c +++ b/arch/arm/mach-imx/mach-imx7d.c @@ -31,7 +31,7 @@ static void __init imx7d_init_irq(void) irqchip_init(); } -static const char *imx7d_dt_compat[] __initconst = { +static const char *const imx7d_dt_compat[] __initconst = { "fsl,imx7d", NULL, }; diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c index d08c37c696f6..2c0853560bd2 100644 --- a/arch/arm/mach-imx/mach-mx31ads.c +++ b/arch/arm/mach-imx/mach-mx31ads.c @@ -238,7 +238,7 @@ static void __init mx31ads_init_expio(void) for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) { irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq); - set_irq_flags(i, IRQF_VALID); + irq_clear_status_flags(i, IRQ_NOREQUEST); } irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_4)); irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH); diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h index c4436d4fd6fd..a5b1af6d7441 100644 --- a/arch/arm/mach-imx/mxc.h +++ b/arch/arm/mach-imx/mxc.h @@ -38,6 +38,7 @@ #define MXC_CPU_IMX6DL 0x61 #define MXC_CPU_IMX6SX 0x62 #define MXC_CPU_IMX6Q 0x63 +#define MXC_CPU_IMX6UL 0x64 #define MXC_CPU_IMX7D 0x72 #define IMX_DDR_TYPE_LPDDR2 1 @@ -165,6 +166,11 @@ static inline bool cpu_is_imx6sx(void) return __mxc_cpu_type == MXC_CPU_IMX6SX; } +static inline bool cpu_is_imx6ul(void) +{ + return __mxc_cpu_type == MXC_CPU_IMX6UL; +} + static inline bool cpu_is_imx6q(void) { return __mxc_cpu_type == MXC_CPU_IMX6Q; diff --git a/arch/arm/mach-iop13xx/irq.c b/arch/arm/mach-iop13xx/irq.c index bc739701c301..623d85a4af2d 100644 --- a/arch/arm/mach-iop13xx/irq.c +++ b/arch/arm/mach-iop13xx/irq.c @@ -233,7 +233,7 @@ void __init iop13xx_init_irq(void) irq_set_chip(i, &iop13xx_irqchip4); irq_set_handler(i, handle_level_irq); - set_irq_flags(i, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); } iop13xx_msi_init(); diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c index d7ee2789d890..2d1f69a68cbc 100644 --- a/arch/arm/mach-iop32x/irq.c +++ b/arch/arm/mach-iop32x/irq.c @@ -69,6 +69,6 @@ void __init iop32x_init_irq(void) for (i = 0; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, &ext_chip, handle_level_irq); - set_irq_flags(i, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); } } diff --git a/arch/arm/mach-iop33x/irq.c b/arch/arm/mach-iop33x/irq.c index f7f5d3e451c7..c99ec8d0d285 100644 --- a/arch/arm/mach-iop33x/irq.c +++ b/arch/arm/mach-iop33x/irq.c @@ -113,6 +113,6 @@ void __init iop33x_init_irq(void) irq_set_chip_and_handler(i, (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2, handle_level_irq); - set_irq_flags(i, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); } } diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 8537d4c41e34..1cb6f2f02880 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -296,7 +296,7 @@ void __init ixp4xx_init_irq(void) for(i = 0; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, &ixp4xx_irq_chip, handle_level_irq); - set_irq_flags(i, IRQF_VALID); + irq_clear_status_flags(i, IRQ_NOREQUEST); } } @@ -521,43 +521,55 @@ static int ixp4xx_set_next_event(unsigned long evt, return 0; } -static void ixp4xx_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int ixp4xx_shutdown(struct clock_event_device *evt) { unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK; unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK; - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - osrt = IXP4XX_LATCH & ~IXP4XX_OST_RELOAD_MASK; - opts = IXP4XX_OST_ENABLE; - break; - case CLOCK_EVT_MODE_ONESHOT: - /* period set by 'set next_event' */ - osrt = 0; - opts = IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT; - break; - case CLOCK_EVT_MODE_SHUTDOWN: - opts &= ~IXP4XX_OST_ENABLE; - break; - case CLOCK_EVT_MODE_RESUME: - opts |= IXP4XX_OST_ENABLE; - break; - case CLOCK_EVT_MODE_UNUSED: - default: - osrt = opts = 0; - break; - } + opts &= ~IXP4XX_OST_ENABLE; + *IXP4XX_OSRT1 = osrt | opts; + return 0; +} +static int ixp4xx_set_oneshot(struct clock_event_device *evt) +{ + unsigned long opts = IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT; + unsigned long osrt = 0; + + /* period set by 'set next_event' */ *IXP4XX_OSRT1 = osrt | opts; + return 0; +} + +static int ixp4xx_set_periodic(struct clock_event_device *evt) +{ + unsigned long opts = IXP4XX_OST_ENABLE; + unsigned long osrt = IXP4XX_LATCH & ~IXP4XX_OST_RELOAD_MASK; + + *IXP4XX_OSRT1 = osrt | opts; + return 0; +} + +static int ixp4xx_resume(struct clock_event_device *evt) +{ + unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK; + unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK; + + opts |= IXP4XX_OST_ENABLE; + *IXP4XX_OSRT1 = osrt | opts; + return 0; } static struct clock_event_device clockevent_ixp4xx = { - .name = "ixp4xx timer1", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_mode = ixp4xx_set_mode, - .set_next_event = ixp4xx_set_next_event, + .name = "ixp4xx timer1", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_state_shutdown = ixp4xx_shutdown, + .set_state_periodic = ixp4xx_set_periodic, + .set_state_oneshot = ixp4xx_set_oneshot, + .tick_resume = ixp4xx_resume, + .set_next_event = ixp4xx_set_next_event, }; static void __init ixp4xx_clockevent_init(void) diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c index edea697e8253..e283939a216f 100644 --- a/arch/arm/mach-keystone/pm_domain.c +++ b/arch/arm/mach-keystone/pm_domain.c @@ -16,7 +16,6 @@ #include <linux/pm_runtime.h> #include <linux/pm_clock.h> #include <linux/platform_device.h> -#include <linux/clk-provider.h> #include <linux/of.h> static struct dev_pm_domain keystone_pm_domain = { diff --git a/arch/arm/mach-ks8695/irq.c b/arch/arm/mach-ks8695/irq.c index 76802aac0f45..31439f2ee21e 100644 --- a/arch/arm/mach-ks8695/irq.c +++ b/arch/arm/mach-ks8695/irq.c @@ -172,6 +172,6 @@ void __init ks8695_init_irq(void) handle_edge_irq); } - set_irq_flags(irq, IRQF_VALID); + irq_clear_status_flags(irq, IRQ_NOREQUEST); } } diff --git a/arch/arm/mach-ks8695/time.c b/arch/arm/mach-ks8695/time.c index a197874bf382..18eb0fbd8d82 100644 --- a/arch/arm/mach-ks8695/time.c +++ b/arch/arm/mach-ks8695/time.c @@ -54,28 +54,25 @@ /* Timer0 Timeout Counter Register */ #define T0TC_WATCHDOG (0xff) /* Enable watchdog mode */ -static void ks8695_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int ks8695_set_periodic(struct clock_event_device *evt) { + u32 rate = DIV_ROUND_CLOSEST(KS8695_CLOCK_RATE, HZ); + u32 half = DIV_ROUND_CLOSEST(rate, 2); u32 tmcon; - if (mode == CLOCK_EVT_FEAT_PERIODIC) { - u32 rate = DIV_ROUND_CLOSEST(KS8695_CLOCK_RATE, HZ); - u32 half = DIV_ROUND_CLOSEST(rate, 2); - - /* Disable timer 1 */ - tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON); - tmcon &= ~TMCON_T1EN; - writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); + /* Disable timer 1 */ + tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON); + tmcon &= ~TMCON_T1EN; + writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); - /* Both registers need to count down */ - writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC); - writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD); + /* Both registers need to count down */ + writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC); + writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD); - /* Re-enable timer1 */ - tmcon |= TMCON_T1EN; - writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); - } + /* Re-enable timer1 */ + tmcon |= TMCON_T1EN; + writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON); + return 0; } static int ks8695_set_next_event(unsigned long cycles, @@ -102,11 +99,13 @@ static int ks8695_set_next_event(unsigned long cycles, } static struct clock_event_device clockevent_ks8695 = { - .name = "ks8695_t1tc", - .rating = 300, /* Reasonably fast and accurate clock event */ - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .set_next_event = ks8695_set_next_event, - .set_mode = ks8695_set_mode, + .name = "ks8695_t1tc", + /* Reasonably fast and accurate clock event */ + .rating = 300, + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .set_next_event = ks8695_set_next_event, + .set_state_periodic = ks8695_set_periodic, }; /* diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index d4f7dc87042b..cce4cef12b6e 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c @@ -283,25 +283,25 @@ static int lpc32xx_set_irq_type(struct irq_data *d, unsigned int type) case IRQ_TYPE_EDGE_RISING: /* Rising edge sensitive */ __lpc32xx_set_irq_type(d->hwirq, 1, 1); - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: /* Falling edge sensitive */ __lpc32xx_set_irq_type(d->hwirq, 0, 1); - __irq_set_handler_locked(d->irq, handle_edge_irq); + irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_LEVEL_LOW: /* Low level sensitive */ __lpc32xx_set_irq_type(d->hwirq, 0, 0); - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); break; case IRQ_TYPE_LEVEL_HIGH: /* High level sensitive */ __lpc32xx_set_irq_type(d->hwirq, 1, 0); - __irq_set_handler_locked(d->irq, handle_level_irq); + irq_set_handler_locked(d, handle_level_irq); break; /* Other modes are not supported */ @@ -434,7 +434,7 @@ void __init lpc32xx_init_irq(void) for (i = 0; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, &lpc32xx_irq_chip, handle_level_irq); - set_irq_flags(i, IRQF_VALID); + irq_clear_status_flags(i, IRQ_NOREQUEST); } /* Set default mappings */ diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index 7858d5b6f6ce..77d6b1bab278 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -212,7 +212,7 @@ static struct lpc32xx_mlc_platform_data lpc32xx_mlc_data = { .dma_filter = pl08x_filter_id, }; -static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = { +static const struct of_dev_auxdata const lpc32xx_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("arm,pl022", 0x20084000, "dev:ssp0", NULL), OF_DEV_AUXDATA("arm,pl022", 0x2008C000, "dev:ssp1", NULL), OF_DEV_AUXDATA("arm,pl110", 0x31040000, "dev:clcd", &lpc32xx_clcd_data), @@ -248,7 +248,7 @@ static void __init lpc3250_machine_init(void) lpc32xx_auxdata_lookup, NULL); } -static char const *lpc32xx_dt_compat[] __initdata = { +static const char *const lpc32xx_dt_compat[] __initconst = { "nxp,lpc3220", "nxp,lpc3230", "nxp,lpc3240", diff --git a/arch/arm/mach-lpc32xx/timer.c b/arch/arm/mach-lpc32xx/timer.c index 4e5837299c04..ff3499d1fb1a 100644 --- a/arch/arm/mach-lpc32xx/timer.c +++ b/arch/arm/mach-lpc32xx/timer.c @@ -43,36 +43,24 @@ static int lpc32xx_clkevt_next_event(unsigned long delta, return 0; } -static void lpc32xx_clkevt_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int lpc32xx_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - WARN_ON(1); - break; - - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_SHUTDOWN: - /* - * Disable the timer. When using oneshot, we must also - * disable the timer to wait for the first call to - * set_next_event(). - */ - __raw_writel(0, LPC32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_RESUME: - break; - } + /* + * Disable the timer. When using oneshot, we must also + * disable the timer to wait for the first call to + * set_next_event(). + */ + __raw_writel(0, LPC32XX_TIMER_TCR(LPC32XX_TIMER0_BASE)); + return 0; } static struct clock_event_device lpc32xx_clkevt = { - .name = "lpc32xx_clkevt", - .features = CLOCK_EVT_FEAT_ONESHOT, - .rating = 300, - .set_next_event = lpc32xx_clkevt_next_event, - .set_mode = lpc32xx_clkevt_mode, + .name = "lpc32xx_clkevt", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = lpc32xx_clkevt_next_event, + .set_state_shutdown = lpc32xx_shutdown, + .set_state_oneshot = lpc32xx_shutdown, }; static irqreturn_t lpc32xx_timer_interrupt(int irq, void *dev_id) diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig index 9f59e58da3a4..aeece17e5cea 100644 --- a/arch/arm/mach-mediatek/Kconfig +++ b/arch/arm/mach-mediatek/Kconfig @@ -3,6 +3,7 @@ menuconfig ARCH_MEDIATEK select ARM_GIC select PINCTRL select MTK_TIMER + select MFD_SYSCON help Support for Mediatek MT65xx & MT81xx SoCs diff --git a/arch/arm/mach-mmp/mmp-dt.c b/arch/arm/mach-mmp/mmp-dt.c index b2296c9309b8..6e155f03b83c 100644 --- a/arch/arm/mach-mmp/mmp-dt.c +++ b/arch/arm/mach-mmp/mmp-dt.c @@ -20,12 +20,12 @@ extern void __init mmp_dt_init_timer(void); -static const char *pxa168_dt_board_compat[] __initdata = { +static const char *const pxa168_dt_board_compat[] __initconst = { "mrvl,pxa168-aspenite", NULL, }; -static const char *pxa910_dt_board_compat[] __initdata = { +static const char *const pxa910_dt_board_compat[] __initconst = { "mrvl,pxa910-dkb", NULL, }; diff --git a/arch/arm/mach-mmp/mmp2-dt.c b/arch/arm/mach-mmp/mmp2-dt.c index 998c0f533abc..0341359b24a4 100644 --- a/arch/arm/mach-mmp/mmp2-dt.c +++ b/arch/arm/mach-mmp/mmp2-dt.c @@ -30,7 +30,7 @@ static void __init mmp_init_time(void) of_clk_init(NULL); } -static const char *mmp2_dt_board_compat[] __initdata = { +static const char *const mmp2_dt_board_compat[] __initconst = { "mrvl,mmp2-brownstone", NULL, }; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 10bfa03e58d4..dbc697b2fda1 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -124,32 +124,25 @@ static int timer_set_next_event(unsigned long delta, return 0; } -static void timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int timer_set_shutdown(struct clock_event_device *evt) { unsigned long flags; local_irq_save(flags); - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - /* disable the matching interrupt */ - __raw_writel(0x00, mmp_timer_base + TMR_IER(0)); - break; - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_PERIODIC: - break; - } + /* disable the matching interrupt */ + __raw_writel(0x00, mmp_timer_base + TMR_IER(0)); local_irq_restore(flags); + + return 0; } static struct clock_event_device ckevt = { - .name = "clockevent", - .features = CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_next_event = timer_set_next_event, - .set_mode = timer_set_mode, + .name = "clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_next_event = timer_set_next_event, + .set_state_shutdown = timer_set_shutdown, + .set_state_oneshot = timer_set_shutdown, }; static cycle_t clksrc_read(struct clocksource *cs) diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c index afee9083ad92..9f739f3cad4c 100644 --- a/arch/arm/mach-mvebu/board-v7.c +++ b/arch/arm/mach-mvebu/board-v7.c @@ -14,7 +14,6 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/clk-provider.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index 4f4e22206ae5..e8fdb9ceedf0 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -415,7 +415,7 @@ static __init int armada_38x_cpuidle_init(void) void __iomem *mpsoc_base; u32 reg; - pr_warn("CPU idle is currently broken on Armada 38x: disabling"); + pr_warn("CPU idle is currently broken on Armada 38x: disabling\n"); return 0; np = of_find_compatible_node(NULL, NULL, @@ -486,7 +486,7 @@ static int __init mvebu_v7_cpu_pm_init(void) */ if (of_machine_is_compatible("marvell,armada380")) { cpu_hotplug_disable(); - pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling"); + pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n"); } if (of_machine_is_compatible("marvell,armadaxp")) diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 2e7cec86e50e..f1ea4700efcf 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -282,7 +282,7 @@ static void __init apx4devkit_init(void) #define TX28_FEC_PHY_RESET MXS_GPIO_NR(4, 13) #define TX28_FEC_nINT MXS_GPIO_NR(4, 5) -static const struct gpio tx28_gpios[] __initconst = { +static const struct gpio const tx28_gpios[] __initconst = { { ENET0_MDC__GPIO_4_0, GPIOF_OUT_INIT_LOW, "GPIO_4_0" }, { ENET0_MDIO__GPIO_4_1, GPIOF_OUT_INIT_LOW, "GPIO_4_1" }, { ENET0_RX_EN__GPIO_4_2, GPIOF_OUT_INIT_LOW, "GPIO_4_2" }, @@ -528,7 +528,7 @@ static void mxs_restart(enum reboot_mode mode, const char *cmd) soft_restart(0); } -static const char *mxs_dt_compat[] __initdata = { +static const char *const mxs_dt_compat[] __initconst = { "fsl,imx28", "fsl,imx23", NULL, diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index db25b0cef3a7..6373e2bff203 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c @@ -174,7 +174,7 @@ void __init netx_init_irq(void) for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { irq_set_chip_and_handler(irq, &netx_hif_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID); + irq_clear_status_flags(irq, IRQ_NOREQUEST); } writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN); diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c index 5fb2a590ec17..054a8a61e379 100644 --- a/arch/arm/mach-netx/time.c +++ b/arch/arm/mach-netx/time.c @@ -34,40 +34,40 @@ #define TIMER_CLOCKEVENT 0 #define TIMER_CLOCKSOURCE 1 -static void netx_set_mode(enum clock_event_mode mode, - struct clock_event_device *clk) +static inline void timer_shutdown(struct clock_event_device *evt) { - u32 tmode; - /* disable timer */ writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT)); +} + +static int netx_shutdown(struct clock_event_device *evt) +{ + timer_shutdown(evt); + + return 0; +} + +static int netx_set_oneshot(struct clock_event_device *evt) +{ + u32 tmode = NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN; + + timer_shutdown(evt); + writel(0, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT)); + writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT)); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT)); - tmode = NETX_GPIO_COUNTER_CTRL_RST_EN | - NETX_GPIO_COUNTER_CTRL_IRQ_EN | - NETX_GPIO_COUNTER_CTRL_RUN; - break; - - case CLOCK_EVT_MODE_ONESHOT: - writel(0, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT)); - tmode = NETX_GPIO_COUNTER_CTRL_IRQ_EN | - NETX_GPIO_COUNTER_CTRL_RUN; - break; - - default: - WARN(1, "%s: unhandled mode %d\n", __func__, mode); - /* fall through */ - - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_RESUME: - tmode = 0; - break; - } + return 0; +} +static int netx_set_periodic(struct clock_event_device *evt) +{ + u32 tmode = NETX_GPIO_COUNTER_CTRL_RST_EN | + NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN; + + timer_shutdown(evt); + writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT)); writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT)); + + return 0; } static int netx_set_next_event(unsigned long evt, @@ -81,7 +81,10 @@ static struct clock_event_device netx_clockevent = { .name = "netx-timer" __stringify(TIMER_CLOCKEVENT), .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_next_event = netx_set_next_event, - .set_mode = netx_set_mode, + .set_state_shutdown = netx_shutdown, + .set_state_periodic = netx_set_periodic, + .set_state_oneshot = netx_set_oneshot, + .tick_resume = netx_shutdown, }; /* diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c index 9bda46f1fab7..0c612d95bd5c 100644 --- a/arch/arm/mach-nomadik/cpu-8815.c +++ b/arch/arm/mach-nomadik/cpu-8815.c @@ -26,10 +26,8 @@ #include <linux/irq.h> #include <linux/dma-mapping.h> #include <linux/of_irq.h> -#include <linux/of_gpio.h> #include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/gpio.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -109,40 +107,6 @@ static void cpu8815_restart(enum reboot_mode mode, const char *cmd) writel(1, srcbase + 0x18); } -/* - * This GPIO pin turns on a line that is used to detect card insertion - * on this board. - */ -static int __init cpu8815_mmcsd_init(void) -{ - struct device_node *cdbias; - int gpio, err; - - cdbias = of_find_node_by_path("/usb-s8815/mmcsd-gpio"); - if (!cdbias) { - pr_info("could not find MMC/SD card detect bias node\n"); - return 0; - } - gpio = of_get_gpio(cdbias, 0); - if (gpio < 0) { - pr_info("could not obtain MMC/SD card detect bias GPIO\n"); - return 0; - } - err = gpio_request(gpio, "card detect bias"); - if (err) { - pr_info("failed to request card detect bias GPIO %d\n", gpio); - return -ENODEV; - } - err = gpio_direction_output(gpio, 0); - if (err){ - pr_info("failed to set GPIO %d as output, low\n", gpio); - return err; - } - pr_info("enabled USB-S8815 CD bias GPIO %d, low\n", gpio); - return 0; -} -device_initcall(cpu8815_mmcsd_init); - static const char * cpu8815_board_compat[] = { "st,nomadik-nhk-15", "calaosystems,usb-s8815", @@ -150,9 +114,8 @@ static const char * cpu8815_board_compat[] = { }; DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815") - /* At full speed latency must be >=2, so 0x249 in low bits */ - .l2c_aux_val = 0x00700249, - .l2c_aux_mask = 0xfe0fefff, + .l2c_aux_val = 0, + .l2c_aux_mask = ~0, .map_io = cpu8815_map_io, .restart = cpu8815_restart, .dt_compat = cpu8815_board_compat, diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c index 3c0e42219200..dfec671b1639 100644 --- a/arch/arm/mach-omap1/fpga.c +++ b/arch/arm/mach-omap1/fpga.c @@ -169,7 +169,7 @@ void omap1510_fpga_init_irq(void) } irq_set_handler(i, handle_edge_irq); - set_irq_flags(i, IRQF_VALID); + irq_clear_status_flags(i, IRQ_NOREQUEST); } /* diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c index f4d346fda9da..b11edc8a46f0 100644 --- a/arch/arm/mach-omap1/irq.c +++ b/arch/arm/mach-omap1/irq.c @@ -262,7 +262,7 @@ void __init omap1_init_irq(void) irq_trigger = irq_banks[i].trigger_map >> IRQ_BIT(j); omap_irq_set_cfg(j, 0, 0, irq_trigger); - set_irq_flags(j, IRQF_VALID); + irq_clear_status_flags(j, IRQ_NOREQUEST); } omap_alloc_gc(irq_banks[i].va, irq_base + i * 32, 32); } diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index a7588cfd0286..524977a31a49 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -124,29 +124,26 @@ static int omap_mpu_set_next_event(unsigned long cycles, return 0; } -static void omap_mpu_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int omap_mpu_set_oneshot(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - omap_mpu_set_autoreset(0); - break; - case CLOCK_EVT_MODE_ONESHOT: - omap_mpu_timer_stop(0); - omap_mpu_remove_autoreset(0); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } + omap_mpu_timer_stop(0); + omap_mpu_remove_autoreset(0); + return 0; +} + +static int omap_mpu_set_periodic(struct clock_event_device *evt) +{ + omap_mpu_set_autoreset(0); + return 0; } static struct clock_event_device clockevent_mpu_timer1 = { - .name = "mpu_timer1", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = omap_mpu_set_next_event, - .set_mode = omap_mpu_set_mode, + .name = "mpu_timer1", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = omap_mpu_set_next_event, + .set_state_periodic = omap_mpu_set_periodic, + .set_state_oneshot = omap_mpu_set_oneshot, }; static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id) diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c index 36bf174b3fac..0ae6c52a7d70 100644 --- a/arch/arm/mach-omap1/timer32k.c +++ b/arch/arm/mach-omap1/timer32k.c @@ -114,29 +114,28 @@ static int omap_32k_timer_set_next_event(unsigned long delta, return 0; } -static void omap_32k_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int omap_32k_timer_shutdown(struct clock_event_device *evt) { omap_32k_timer_stop(); + return 0; +} - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD); - break; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - break; - case CLOCK_EVT_MODE_RESUME: - break; - } +static int omap_32k_timer_set_periodic(struct clock_event_device *evt) +{ + omap_32k_timer_stop(); + omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD); + return 0; } static struct clock_event_device clockevent_32k_timer = { - .name = "32k-timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = omap_32k_timer_set_next_event, - .set_mode = omap_32k_timer_set_mode, + .name = "32k-timer", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = omap_32k_timer_set_next_event, + .set_state_shutdown = omap_32k_timer_shutdown, + .set_state_periodic = omap_32k_timer_set_periodic, + .set_state_oneshot = omap_32k_timer_shutdown, + .tick_resume = omap_32k_timer_shutdown, }; static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id) diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index ecc04ff13e95..9e2a68456b81 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -60,6 +60,7 @@ config SOC_AM43XX select ARM_GIC select MACH_OMAP_GENERIC select MIGHT_HAVE_CACHE_L2X0 + select HAVE_ARM_SCU config SOC_DRA7XX bool "TI DRA7XX" @@ -177,26 +178,6 @@ config MACH_OMAP_LDP default y select OMAP_PACKAGE_CBB -config MACH_OMAP3530_LV_SOM - bool "OMAP3 Logic 3530 LV SOM board" - depends on ARCH_OMAP3 - default y - select OMAP_PACKAGE_CBB - help - Support for the LogicPD OMAP3530 SOM Development kit - for full description please see the products webpage at - http://www.logicpd.com/products/development-kits/texas-instruments-zoom%E2%84%A2-omap35x-development-kit - -config MACH_OMAP3_TORPEDO - bool "OMAP3 Logic 35x Torpedo board" - depends on ARCH_OMAP3 - default y - select OMAP_PACKAGE_CBB - help - Support for the LogicPD OMAP35x Torpedo Development kit - for full description please see the products webpage at - http://www.logicpd.com/products/development-kits/zoom-omap35x-torpedo-development-kit - config MACH_OMAP3517EVM bool "OMAP3517/ AM3517 EVM board" depends on ARCH_OMAP3 diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 903c85be2897..935869698cbc 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -12,8 +12,7 @@ obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o timer.o pm.o \ hwmod-common = omap_hwmod.o omap_hwmod_reset.o \ omap_hwmod_common_data.o -clock-common = clock.o clock_common_data.o \ - clkt_dpll.o clkt_clksel.o +clock-common = clock.o secure-common = omap-smc.o omap-secure.o obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) @@ -182,24 +181,17 @@ obj-$(CONFIG_SOC_DRA7XX) += $(clockdomain-common) obj-$(CONFIG_SOC_DRA7XX) += clockdomains7xx_data.o # Clock framework -obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o +obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpllcore.o obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_virt_prcm_set.o -obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o clkt_iclk.o -obj-$(CONFIG_SOC_OMAP2430) += clock2430.o -obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o -obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o -obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o -obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o -obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o +obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o +obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) +obj-$(CONFIG_ARCH_OMAP3) += clkt34xx_dpll3m2.o obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) -obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o -obj-$(CONFIG_SOC_AM33XX) += $(clock-common) dpll3xxx.o +obj-$(CONFIG_SOC_AM33XX) += $(clock-common) obj-$(CONFIG_SOC_OMAP5) += $(clock-common) -obj-$(CONFIG_SOC_OMAP5) += dpll3xxx.o dpll44xx.o obj-$(CONFIG_SOC_DRA7XX) += $(clock-common) -obj-$(CONFIG_SOC_DRA7XX) += dpll3xxx.o dpll44xx.o -obj-$(CONFIG_SOC_AM43XX) += $(clock-common) dpll3xxx.o +obj-$(CONFIG_SOC_AM43XX) += $(clock-common) # OMAP2 clock rate set data (old "OPP" data) obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o @@ -234,8 +226,7 @@ obj-$(CONFIG_SOC_DRA7XX) += omap_hwmod_7xx_data.o # EMU peripherals obj-$(CONFIG_HW_PERF_EVENTS) += pmu.o -iommu-$(CONFIG_OMAP_IOMMU) := omap-iommu.o -obj-y += $(iommu-m) $(iommu-y) +obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o # OMAP2420 MSDI controller integration support ("MMC") obj-$(CONFIG_SOC_OMAP2420) += msdi.o @@ -243,9 +234,6 @@ obj-$(CONFIG_SOC_OMAP2420) += msdi.o # Specific board support obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o pdata-quirks.o obj-$(CONFIG_MACH_OMAP_LDP) += board-ldp.o -obj-$(CONFIG_MACH_OMAP3530_LV_SOM) += board-omap3logic.o -obj-$(CONFIG_MACH_OMAP3_TORPEDO) += board-omap3logic.o -obj-$(CONFIG_MACH_OMAP3_PANDORA) += board-omap3pandora.o obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o sdram-nokia.o obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-peripherals.o diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 34ff14b7beab..24c9afc9e8a7 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -169,7 +169,7 @@ static const char *const ti814x_boards_compat[] __initconst = { NULL, }; -DT_MACHINE_START(TI81XX_DT, "Generic ti814x (Flattened Device Tree)") +DT_MACHINE_START(TI814X_DT, "Generic ti814x (Flattened Device Tree)") .reserve = omap_reserve, .map_io = ti81xx_map_io, .init_early = ti814x_init_early, @@ -297,7 +297,7 @@ static const char *const dra74x_boards_compat[] __initconst = { DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)") .reserve = omap_reserve, .smp = smp_ops(omap4_smp_ops), - .map_io = omap5_map_io, + .map_io = dra7xx_map_io, .init_early = dra7xx_init_early, .init_late = dra7xx_init_late, .init_irq = omap_gic_of_init, @@ -316,7 +316,7 @@ static const char *const dra72x_boards_compat[] __initconst = { DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)") .reserve = omap_reserve, - .map_io = omap5_map_io, + .map_io = dra7xx_map_io, .init_early = dra7xx_init_early, .init_late = dra7xx_init_late, .init_irq = omap_gic_of_init, diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c deleted file mode 100644 index 6049f60a8813..000000000000 --- a/arch/arm/mach-omap2/board-omap3logic.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * linux/arch/arm/mach-omap2/board-omap3logic.c - * - * Copyright (C) 2010 Li-Pro.Net - * Stephan Linz <linz@li-pro.net> - * - * Copyright (C) 2010-2012 Logic Product Development, Inc. - * Peter Barada <peter.barada@logicpd.com> - * Ashwin BIhari <ashwin.bihari@logicpd.com> - * - * Modified from Beagle, EVM, and RX51 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/gpio.h> - -#include <linux/regulator/fixed.h> -#include <linux/regulator/machine.h> - -#include <linux/i2c/twl.h> -#include <linux/mmc/host.h> -#include <linux/usb/phy.h> - -#include <asm/mach-types.h> -#include <asm/mach/arch.h> -#include <asm/mach/map.h> - -#include "common.h" -#include "mux.h" -#include "hsmmc.h" -#include "control.h" -#include "common-board-devices.h" -#include "gpmc.h" -#include "gpmc-smsc911x.h" - -#define OMAP3LOGIC_SMSC911X_CS 1 - -#define OMAP3530_LV_SOM_MMC_GPIO_CD 110 -#define OMAP3530_LV_SOM_MMC_GPIO_WP 126 -#define OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ 152 - -#define OMAP3_TORPEDO_MMC_GPIO_CD 127 -#define OMAP3_TORPEDO_SMSC911X_GPIO_IRQ 129 - -static struct regulator_consumer_supply omap3logic_vmmc1_supply[] = { - REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"), -}; - -/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */ -static struct regulator_init_data omap3logic_vmmc1 = { - .constraints = { - .name = "VMMC1", - .min_uV = 1850000, - .max_uV = 3150000, - .valid_modes_mask = REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE - | REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(omap3logic_vmmc1_supply), - .consumer_supplies = omap3logic_vmmc1_supply, -}; - -static struct twl4030_gpio_platform_data omap3logic_gpio_data = { - .use_leds = true, - .pullups = BIT(1), - .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8) - | BIT(13) | BIT(15) | BIT(16) | BIT(17), -}; - -static struct twl4030_usb_data omap3logic_usb_data = { - .usb_mode = T2_USB_MODE_ULPI, -}; - - -static struct twl4030_platform_data omap3logic_twldata = { - /* platform_data for children goes here */ - .gpio = &omap3logic_gpio_data, - .vmmc1 = &omap3logic_vmmc1, - .usb = &omap3logic_usb_data, -}; - -static int __init omap3logic_i2c_init(void) -{ - omap3_pmic_init("twl4030", &omap3logic_twldata); - return 0; -} - -static struct omap2_hsmmc_info __initdata board_mmc_info[] = { - { - .name = "external", - .mmc = 1, - .caps = MMC_CAP_4_BIT_DATA, - .gpio_cd = -EINVAL, - .gpio_wp = -EINVAL, - }, - {} /* Terminator */ -}; - -static void __init board_mmc_init(void) -{ - if (machine_is_omap3530_lv_som()) { - /* OMAP3530 LV SOM board */ - board_mmc_info[0].gpio_cd = OMAP3530_LV_SOM_MMC_GPIO_CD; - board_mmc_info[0].gpio_wp = OMAP3530_LV_SOM_MMC_GPIO_WP; - omap_mux_init_signal("gpio_110", OMAP_PIN_OUTPUT); - omap_mux_init_signal("gpio_126", OMAP_PIN_OUTPUT); - } else if (machine_is_omap3_torpedo()) { - /* OMAP3 Torpedo board */ - board_mmc_info[0].gpio_cd = OMAP3_TORPEDO_MMC_GPIO_CD; - omap_mux_init_signal("gpio_127", OMAP_PIN_OUTPUT); - } else { - /* unsupported board */ - printk(KERN_ERR "%s(): unknown machine type\n", __func__); - return; - } - - omap_hsmmc_init(board_mmc_info); -} - -static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = { - .cs = OMAP3LOGIC_SMSC911X_CS, - .gpio_irq = -EINVAL, - .gpio_reset = -EINVAL, -}; - -/* TODO/FIXME (comment by Peter Barada, LogicPD): - * Fix the PBIAS voltage for Torpedo MMC1 pins that - * are used for other needs (IRQs, etc). */ -static void omap3torpedo_fix_pbias_voltage(void) -{ - u16 control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE; - u32 reg; - - if (machine_is_omap3_torpedo()) - { - /* Set the bias for the pin */ - reg = omap_ctrl_readl(control_pbias_offset); - - reg &= ~OMAP343X_PBIASLITEPWRDNZ1; - omap_ctrl_writel(reg, control_pbias_offset); - - /* 100ms delay required for PBIAS configuration */ - msleep(100); - - reg |= OMAP343X_PBIASLITEVMODE1; - reg |= OMAP343X_PBIASLITEPWRDNZ1; - omap_ctrl_writel(reg | 0x300, control_pbias_offset); - } -} - -static inline void __init board_smsc911x_init(void) -{ - if (machine_is_omap3530_lv_som()) { - /* OMAP3530 LV SOM board */ - board_smsc911x_data.gpio_irq = - OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ; - omap_mux_init_signal("gpio_152", OMAP_PIN_INPUT); - } else if (machine_is_omap3_torpedo()) { - /* OMAP3 Torpedo board */ - board_smsc911x_data.gpio_irq = OMAP3_TORPEDO_SMSC911X_GPIO_IRQ; - omap_mux_init_signal("gpio_129", OMAP_PIN_INPUT); - } else { - /* unsupported board */ - printk(KERN_ERR "%s(): unknown machine type\n", __func__); - return; - } - - gpmc_smsc911x_init(&board_smsc911x_data); -} - -#ifdef CONFIG_OMAP_MUX -static struct omap_board_mux board_mux[] __initdata = { - /* mUSB */ - OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), - OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - - { .reg_offset = OMAP_MUX_TERMINATOR }, -}; -#endif - -static struct regulator_consumer_supply dummy_supplies[] = { - REGULATOR_SUPPLY("vddvario", "smsc911x.0"), - REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), -}; - -static void __init omap3logic_init(void) -{ - regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); - omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); - omap3torpedo_fix_pbias_voltage(); - omap3logic_i2c_init(); - omap_serial_init(); - omap_sdrc_init(NULL, NULL); - board_mmc_init(); - board_smsc911x_init(); - - usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb"); - usb_musb_init(NULL); - - /* Ensure SDRC pins are mux'd for self-refresh */ - omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); - omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); -} - -MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board") - .atag_offset = 0x100, - .reserve = omap_reserve, - .map_io = omap3_map_io, - .init_early = omap35xx_init_early, - .init_irq = omap3_init_irq, - .init_machine = omap3logic_init, - .init_late = omap35xx_init_late, - .init_time = omap3_sync32k_timer_init, - .restart = omap3xxx_restart, -MACHINE_END - -MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board") - .atag_offset = 0x100, - .reserve = omap_reserve, - .map_io = omap3_map_io, - .init_early = omap35xx_init_early, - .init_irq = omap3_init_irq, - .init_machine = omap3logic_init, - .init_late = omap35xx_init_late, - .init_time = omap3_sync32k_timer_init, - .restart = omap3xxx_restart, -MACHINE_END diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c deleted file mode 100644 index 969e1003dd92..000000000000 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ /dev/null @@ -1,633 +0,0 @@ -/* - * board-omap3pandora.c (Pandora Handheld Console) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/platform_device.h> - -#include <linux/spi/spi.h> -#include <linux/regulator/machine.h> -#include <linux/i2c/twl.h> -#include <linux/omap-gpmc.h> -#include <linux/wl12xx.h> -#include <linux/mtd/partitions.h> -#include <linux/mtd/nand.h> -#include <linux/leds.h> -#include <linux/input.h> -#include <linux/input/matrix_keypad.h> -#include <linux/gpio.h> -#include <linux/gpio_keys.h> -#include <linux/mmc/host.h> -#include <linux/mmc/card.h> -#include <linux/regulator/fixed.h> -#include <linux/usb/phy.h> -#include <linux/platform_data/spi-omap2-mcspi.h> - -#include <asm/mach-types.h> -#include <asm/mach/arch.h> -#include <asm/mach/map.h> - -#include "common.h" -#include <video/omapdss.h> -#include <video/omap-panel-data.h> -#include <linux/platform_data/mtd-nand-omap2.h> - -#include "mux.h" -#include "sdram-micron-mt46h32m32lf-6.h" -#include "hsmmc.h" -#include "common-board-devices.h" - -#define PANDORA_WIFI_IRQ_GPIO 21 -#define PANDORA_WIFI_NRESET_GPIO 23 -#define OMAP3_PANDORA_TS_GPIO 94 - -static struct mtd_partition omap3pandora_nand_partitions[] = { - { - .name = "xloader", - .offset = 0, - .size = 4 * NAND_BLOCK_SIZE, - .mask_flags = MTD_WRITEABLE - }, { - .name = "uboot", - .offset = MTDPART_OFS_APPEND, - .size = 15 * NAND_BLOCK_SIZE, - }, { - .name = "uboot-env", - .offset = MTDPART_OFS_APPEND, - .size = 1 * NAND_BLOCK_SIZE, - }, { - .name = "boot", - .offset = MTDPART_OFS_APPEND, - .size = 80 * NAND_BLOCK_SIZE, - }, { - .name = "rootfs", - .offset = MTDPART_OFS_APPEND, - .size = MTDPART_SIZ_FULL, - }, -}; - -static struct omap_nand_platform_data pandora_nand_data = { - .cs = 0, - .devsize = NAND_BUSWIDTH_16, - .xfer_type = NAND_OMAP_PREFETCH_DMA, - .parts = omap3pandora_nand_partitions, - .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions), -}; - -static struct gpio_led pandora_gpio_leds[] = { - { - .name = "pandora::sd1", - .default_trigger = "mmc0", - .gpio = 128, - }, { - .name = "pandora::sd2", - .default_trigger = "mmc1", - .gpio = 129, - }, { - .name = "pandora::bluetooth", - .gpio = 158, - }, { - .name = "pandora::wifi", - .gpio = 159, - }, -}; - -static struct gpio_led_platform_data pandora_gpio_led_data = { - .leds = pandora_gpio_leds, - .num_leds = ARRAY_SIZE(pandora_gpio_leds), -}; - -static struct platform_device pandora_leds_gpio = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &pandora_gpio_led_data, - }, -}; - -static struct platform_device pandora_backlight = { - .name = "pandora-backlight", - .id = -1, -}; - -#define GPIO_BUTTON(gpio_num, ev_type, ev_code, act_low, descr) \ -{ \ - .gpio = gpio_num, \ - .type = ev_type, \ - .code = ev_code, \ - .active_low = act_low, \ - .debounce_interval = 4, \ - .desc = "btn " descr, \ -} - -#define GPIO_BUTTON_LOW(gpio_num, event_code, description) \ - GPIO_BUTTON(gpio_num, EV_KEY, event_code, 1, description) - -static struct gpio_keys_button pandora_gpio_keys[] = { - GPIO_BUTTON_LOW(110, KEY_UP, "up"), - GPIO_BUTTON_LOW(103, KEY_DOWN, "down"), - GPIO_BUTTON_LOW(96, KEY_LEFT, "left"), - GPIO_BUTTON_LOW(98, KEY_RIGHT, "right"), - GPIO_BUTTON_LOW(109, KEY_PAGEUP, "game 1"), - GPIO_BUTTON_LOW(111, KEY_END, "game 2"), - GPIO_BUTTON_LOW(106, KEY_PAGEDOWN, "game 3"), - GPIO_BUTTON_LOW(101, KEY_HOME, "game 4"), - GPIO_BUTTON_LOW(102, KEY_RIGHTSHIFT, "l"), - GPIO_BUTTON_LOW(97, KEY_KPPLUS, "l2"), - GPIO_BUTTON_LOW(105, KEY_RIGHTCTRL, "r"), - GPIO_BUTTON_LOW(107, KEY_KPMINUS, "r2"), - GPIO_BUTTON_LOW(104, KEY_LEFTCTRL, "ctrl"), - GPIO_BUTTON_LOW(99, KEY_MENU, "menu"), - GPIO_BUTTON_LOW(176, KEY_COFFEE, "hold"), - GPIO_BUTTON(100, EV_KEY, KEY_LEFTALT, 0, "alt"), - GPIO_BUTTON(108, EV_SW, SW_LID, 1, "lid"), -}; - -static struct gpio_keys_platform_data pandora_gpio_key_info = { - .buttons = pandora_gpio_keys, - .nbuttons = ARRAY_SIZE(pandora_gpio_keys), -}; - -static struct platform_device pandora_keys_gpio = { - .name = "gpio-keys", - .id = -1, - .dev = { - .platform_data = &pandora_gpio_key_info, - }, -}; - -static const uint32_t board_keymap[] = { - /* row, col, code */ - KEY(0, 0, KEY_9), - KEY(0, 1, KEY_8), - KEY(0, 2, KEY_I), - KEY(0, 3, KEY_J), - KEY(0, 4, KEY_N), - KEY(0, 5, KEY_M), - KEY(1, 0, KEY_0), - KEY(1, 1, KEY_7), - KEY(1, 2, KEY_U), - KEY(1, 3, KEY_H), - KEY(1, 4, KEY_B), - KEY(1, 5, KEY_SPACE), - KEY(2, 0, KEY_BACKSPACE), - KEY(2, 1, KEY_6), - KEY(2, 2, KEY_Y), - KEY(2, 3, KEY_G), - KEY(2, 4, KEY_V), - KEY(2, 5, KEY_FN), - KEY(3, 0, KEY_O), - KEY(3, 1, KEY_5), - KEY(3, 2, KEY_T), - KEY(3, 3, KEY_F), - KEY(3, 4, KEY_C), - KEY(4, 0, KEY_P), - KEY(4, 1, KEY_4), - KEY(4, 2, KEY_R), - KEY(4, 3, KEY_D), - KEY(4, 4, KEY_X), - KEY(5, 0, KEY_K), - KEY(5, 1, KEY_3), - KEY(5, 2, KEY_E), - KEY(5, 3, KEY_S), - KEY(5, 4, KEY_Z), - KEY(6, 0, KEY_L), - KEY(6, 1, KEY_2), - KEY(6, 2, KEY_W), - KEY(6, 3, KEY_A), - KEY(6, 4, KEY_DOT), - KEY(7, 0, KEY_ENTER), - KEY(7, 1, KEY_1), - KEY(7, 2, KEY_Q), - KEY(7, 3, KEY_LEFTSHIFT), - KEY(7, 4, KEY_COMMA), -}; - -static struct matrix_keymap_data board_map_data = { - .keymap = board_keymap, - .keymap_size = ARRAY_SIZE(board_keymap), -}; - -static struct twl4030_keypad_data pandora_kp_data = { - .keymap_data = &board_map_data, - .rows = 8, - .cols = 6, - .rep = 1, -}; - -static struct connector_atv_platform_data pandora_tv_pdata = { - .name = "tv", - .source = "venc.0", - .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO, - .invert_polarity = false, -}; - -static struct platform_device pandora_tv_connector_device = { - .name = "connector-analog-tv", - .id = 0, - .dev.platform_data = &pandora_tv_pdata, -}; - -static struct omap_dss_board_info pandora_dss_data = { - .default_display_name = "lcd", -}; - -static void pandora_wl1251_init_card(struct mmc_card *card) -{ - /* - * We have TI wl1251 attached to MMC3. Pass this information to - * SDIO core because it can't be probed by normal methods. - */ - if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) { - card->quirks |= MMC_QUIRK_NONSTD_SDIO; - card->cccr.wide_bus = 1; - card->cis.vendor = 0x104c; - card->cis.device = 0x9066; - card->cis.blksize = 512; - card->cis.max_dtr = 20000000; - } -} - -static struct omap2_hsmmc_info omap3pandora_mmc[] = { - { - .mmc = 1, - .caps = MMC_CAP_4_BIT_DATA, - .gpio_cd = -EINVAL, - .gpio_wp = 126, - .ext_clock = 0, - .deferred = true, - }, - { - .mmc = 2, - .caps = MMC_CAP_4_BIT_DATA, - .gpio_cd = -EINVAL, - .gpio_wp = 127, - .ext_clock = 1, - .transceiver = true, - .deferred = true, - }, - { - .mmc = 3, - .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD, - .gpio_cd = -EINVAL, - .gpio_wp = -EINVAL, - .init_card = pandora_wl1251_init_card, - }, - {} /* Terminator */ -}; - -static int omap3pandora_twl_gpio_setup(struct device *dev, - unsigned gpio, unsigned ngpio) -{ - int ret, gpio_32khz; - - /* gpio + {0,1} is "mmc{0,1}_cd" (input/IRQ) */ - omap3pandora_mmc[0].gpio_cd = gpio + 0; - omap3pandora_mmc[1].gpio_cd = gpio + 1; - omap_hsmmc_late_init(omap3pandora_mmc); - - /* gpio + 13 drives 32kHz buffer for wifi module */ - gpio_32khz = gpio + 13; - ret = gpio_request_one(gpio_32khz, GPIOF_OUT_INIT_HIGH, "wifi 32kHz"); - if (ret < 0) { - pr_err("Cannot get GPIO line %d, ret=%d\n", gpio_32khz, ret); - return -ENODEV; - } - - return 0; -} - -static struct twl4030_gpio_platform_data omap3pandora_gpio_data = { - .setup = omap3pandora_twl_gpio_setup, -}; - -static struct regulator_consumer_supply pandora_vmmc1_supply[] = { - REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"), -}; - -static struct regulator_consumer_supply pandora_vmmc2_supply[] = { - REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1") -}; - -static struct regulator_consumer_supply pandora_vmmc3_supply[] = { - REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"), -}; - -static struct regulator_consumer_supply pandora_vdds_supplies[] = { - REGULATOR_SUPPLY("vdds_sdi", "omapdss"), - REGULATOR_SUPPLY("vdds_dsi", "omapdss"), - REGULATOR_SUPPLY("vdds_dsi", "omapdss_dpi.0"), - REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"), -}; - -static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = { - REGULATOR_SUPPLY("vcc", "spi1.1"), -}; - -static struct regulator_consumer_supply pandora_usb_phy_supply[] = { - REGULATOR_SUPPLY("vcc", "usb_phy_gen_xceiv.2"), /* hsusb port 2 */ -}; - -/* ads7846 on SPI and 2 nub controllers on I2C */ -static struct regulator_consumer_supply pandora_vaux4_supplies[] = { - REGULATOR_SUPPLY("vcc", "spi1.0"), - REGULATOR_SUPPLY("vcc", "3-0066"), - REGULATOR_SUPPLY("vcc", "3-0067"), -}; - -static struct regulator_consumer_supply pandora_adac_supply[] = { - REGULATOR_SUPPLY("vcc", "soc-audio"), -}; - -/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */ -static struct regulator_init_data pandora_vmmc1 = { - .constraints = { - .min_uV = 1850000, - .max_uV = 3150000, - .valid_modes_mask = REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE - | REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc1_supply), - .consumer_supplies = pandora_vmmc1_supply, -}; - -/* VMMC2 for MMC2 pins CMD, CLK, DAT0..DAT3 (max 100 mA) */ -static struct regulator_init_data pandora_vmmc2 = { - .constraints = { - .min_uV = 1850000, - .max_uV = 3150000, - .valid_modes_mask = REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE - | REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc2_supply), - .consumer_supplies = pandora_vmmc2_supply, -}; - -/* VAUX1 for LCD */ -static struct regulator_init_data pandora_vaux1 = { - .constraints = { - .min_uV = 3000000, - .max_uV = 3000000, - .apply_uV = true, - .valid_modes_mask = REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .valid_ops_mask = REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_vcc_lcd_supply), - .consumer_supplies = pandora_vcc_lcd_supply, -}; - -/* VAUX2 for USB host PHY */ -static struct regulator_init_data pandora_vaux2 = { - .constraints = { - .min_uV = 1800000, - .max_uV = 1800000, - .apply_uV = true, - .valid_modes_mask = REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .valid_ops_mask = REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_usb_phy_supply), - .consumer_supplies = pandora_usb_phy_supply, -}; - -/* VAUX4 for ads7846 and nubs */ -static struct regulator_init_data pandora_vaux4 = { - .constraints = { - .min_uV = 2800000, - .max_uV = 2800000, - .apply_uV = true, - .valid_modes_mask = REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .valid_ops_mask = REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_vaux4_supplies), - .consumer_supplies = pandora_vaux4_supplies, -}; - -/* VSIM for audio DAC */ -static struct regulator_init_data pandora_vsim = { - .constraints = { - .min_uV = 2800000, - .max_uV = 2800000, - .apply_uV = true, - .valid_modes_mask = REGULATOR_MODE_NORMAL - | REGULATOR_MODE_STANDBY, - .valid_ops_mask = REGULATOR_CHANGE_MODE - | REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_adac_supply), - .consumer_supplies = pandora_adac_supply, -}; - -/* Fixed regulator internal to Wifi module */ -static struct regulator_init_data pandora_vmmc3 = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc3_supply), - .consumer_supplies = pandora_vmmc3_supply, -}; - -static struct fixed_voltage_config pandora_vwlan = { - .supply_name = "vwlan", - .microvolts = 1800000, /* 1.8V */ - .gpio = PANDORA_WIFI_NRESET_GPIO, - .startup_delay = 50000, /* 50ms */ - .enable_high = 1, - .enabled_at_boot = 0, - .init_data = &pandora_vmmc3, -}; - -static struct platform_device pandora_vwlan_device = { - .name = "reg-fixed-voltage", - .id = 1, - .dev = { - .platform_data = &pandora_vwlan, - }, -}; - -static struct twl4030_bci_platform_data pandora_bci_data; - -static struct twl4030_power_data pandora_power_data = { - .use_poweroff = true, -}; - -static struct twl4030_platform_data omap3pandora_twldata = { - .gpio = &omap3pandora_gpio_data, - .vmmc1 = &pandora_vmmc1, - .vmmc2 = &pandora_vmmc2, - .vaux1 = &pandora_vaux1, - .vaux2 = &pandora_vaux2, - .vaux4 = &pandora_vaux4, - .vsim = &pandora_vsim, - .keypad = &pandora_kp_data, - .bci = &pandora_bci_data, - .power = &pandora_power_data, -}; - -static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = { - { - I2C_BOARD_INFO("bq27500", 0x55), - .flags = I2C_CLIENT_WAKE, - }, -}; - -static int __init omap3pandora_i2c_init(void) -{ - omap3_pmic_get_config(&omap3pandora_twldata, - TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO, - TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2); - - omap3pandora_twldata.vdac->constraints.apply_uV = true; - - omap3pandora_twldata.vpll2->constraints.apply_uV = true; - omap3pandora_twldata.vpll2->num_consumer_supplies = - ARRAY_SIZE(pandora_vdds_supplies); - omap3pandora_twldata.vpll2->consumer_supplies = pandora_vdds_supplies; - - omap3_pmic_init("tps65950", &omap3pandora_twldata); - /* i2c2 pins are not connected */ - omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo, - ARRAY_SIZE(omap3pandora_i2c3_boardinfo)); - return 0; -} - -static struct panel_tpo_td043mtea1_platform_data pandora_lcd_pdata = { - .name = "lcd", - .source = "dpi.0", - - .data_lines = 24, - .nreset_gpio = 157, -}; - -static struct spi_board_info omap3pandora_spi_board_info[] __initdata = { - { - .modalias = "panel-tpo-td043mtea1", - .bus_num = 1, - .chip_select = 1, - .max_speed_hz = 375000, - .platform_data = &pandora_lcd_pdata, - } -}; - -static void __init pandora_wl1251_init(void) -{ - struct wl1251_platform_data pandora_wl1251_pdata; - int ret; - - memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata)); - - pandora_wl1251_pdata.power_gpio = -1; - - ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq"); - if (ret < 0) - goto fail; - - pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO); - if (pandora_wl1251_pdata.irq < 0) - goto fail_irq; - - pandora_wl1251_pdata.use_eeprom = true; - ret = wl1251_set_platform_data(&pandora_wl1251_pdata); - if (ret < 0) - goto fail_irq; - - return; - -fail_irq: - gpio_free(PANDORA_WIFI_IRQ_GPIO); -fail: - printk(KERN_ERR "wl1251 board initialisation failed\n"); -} - -static struct usbhs_phy_data phy_data[] __initdata = { - { - .port = 2, - .reset_gpio = 16, - .vcc_gpio = -EINVAL, - }, -}; - -static struct platform_device *omap3pandora_devices[] __initdata = { - &pandora_leds_gpio, - &pandora_keys_gpio, - &pandora_vwlan_device, - &pandora_backlight, - &pandora_tv_connector_device, -}; - -static struct usbhs_omap_platform_data usbhs_bdata __initdata = { - .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, -}; - -#ifdef CONFIG_OMAP_MUX -static struct omap_board_mux board_mux[] __initdata = { - { .reg_offset = OMAP_MUX_TERMINATOR }, -}; -#endif - -static void __init omap3pandora_init(void) -{ - omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); - omap_hsmmc_init(omap3pandora_mmc); - omap3pandora_i2c_init(); - pandora_wl1251_init(); - platform_add_devices(omap3pandora_devices, - ARRAY_SIZE(omap3pandora_devices)); - omap_display_init(&pandora_dss_data); - omap_serial_init(); - omap_sdrc_init(mt46h32m32lf6_sdrc_params, - mt46h32m32lf6_sdrc_params); - spi_register_board_info(omap3pandora_spi_board_info, - ARRAY_SIZE(omap3pandora_spi_board_info)); - omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL); - - usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data)); - usbhs_init(&usbhs_bdata); - - usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb"); - usb_musb_init(NULL); - gpmc_nand_init(&pandora_nand_data, NULL); - - /* Ensure SDRC pins are mux'd for self-refresh */ - omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); - omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); -} - -MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console") - .atag_offset = 0x100, - .reserve = omap_reserve, - .map_io = omap3_map_io, - .init_early = omap35xx_init_early, - .init_irq = omap3_init_irq, - .init_machine = omap3pandora_init, - .init_late = omap35xx_init_late, - .init_time = omap3_sync32k_timer_init, - .restart = omap3xxx_restart, -MACHINE_END diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c index eb69acf21014..3f6521313c93 100644 --- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c +++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c @@ -23,12 +23,13 @@ #include "clock.h" #include "clock3xxx.h" -#include "clock34xx.h" #include "sdrc.h" #include "sram.h" #define CYCLES_PER_MHZ 1000000 +struct clk *sdrc_ick_p, *arm_fck_p; + /* * CORE DPLL (DPLL3) M2 divider rate programming functions * @@ -60,12 +61,14 @@ int omap3_core_dpll_m2_set_rate(struct clk_hw *hw, unsigned long rate, if (!clk || !rate) return -EINVAL; - validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); + new_div = DIV_ROUND_UP(parent_rate, rate); + validrate = parent_rate / new_div; + if (validrate != rate) return -EINVAL; - sdrcrate = __clk_get_rate(sdrc_ick_p); - clkrate = __clk_get_rate(hw->clk); + sdrcrate = clk_get_rate(sdrc_ick_p); + clkrate = clk_hw_get_rate(hw); if (rate > clkrate) sdrcrate <<= ((rate / clkrate) >> 1); else @@ -83,7 +86,7 @@ int omap3_core_dpll_m2_set_rate(struct clk_hw *hw, unsigned long rate, /* * XXX This only needs to be done when the CPU frequency changes */ - _mpurate = __clk_get_rate(arm_fck_p) / CYCLES_PER_MHZ; + _mpurate = clk_get_rate(arm_fck_p) / CYCLES_PER_MHZ; c = (_mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT; c += 1; /* for safety */ c *= SDRC_MPURATE_LOOPS; diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c deleted file mode 100644 index 7ee26108ac0d..000000000000 --- a/arch/arm/mach-omap2/clkt_clksel.c +++ /dev/null @@ -1,466 +0,0 @@ -/* - * clkt_clksel.c - OMAP2/3/4 clksel clock functions - * - * Copyright (C) 2005-2008 Texas Instruments, Inc. - * Copyright (C) 2004-2010 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * - * clksel clocks are clocks that do not have a fixed parent, or that - * can divide their parent's rate, or possibly both at the same time, based - * on the contents of a hardware register bitfield. - * - * All of the various mux and divider settings can be encoded into - * struct clksel* data structures, and then these can be autogenerated - * from some hardware database for each new chip generation. This - * should avoid the need to write, review, and validate a lot of new - * clock code for each new chip, since it can be exported from the SoC - * design flow. This is now done on OMAP4. - * - * The fusion of mux and divider clocks is a software creation. In - * hardware reality, the multiplexer (parent selection) and the - * divider exist separately. XXX At some point these clksel clocks - * should be split into "divider" clocks and "mux" clocks to better - * match the hardware. - * - * (The name "clksel" comes from the name of the corresponding - * register field in the OMAP2/3 family of SoCs.) - * - * XXX Currently these clocks are only used in the OMAP2/3/4 code, but - * many of the OMAP1 clocks should be convertible to use this - * mechanism. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk-provider.h> -#include <linux/io.h> -#include <linux/bug.h> - -#include "clock.h" - -/* Private functions */ - -/** - * _get_clksel_by_parent() - return clksel struct for a given clk & parent - * @clk: OMAP struct clk ptr to inspect - * @src_clk: OMAP struct clk ptr of the parent clk to search for - * - * Scan the struct clksel array associated with the clock to find - * the element associated with the supplied parent clock address. - * Returns a pointer to the struct clksel on success or NULL on error. - */ -static const struct clksel *_get_clksel_by_parent(struct clk_hw_omap *clk, - struct clk *src_clk) -{ - const struct clksel *clks; - - if (!src_clk) - return NULL; - - for (clks = clk->clksel; clks->parent; clks++) - if (clks->parent == src_clk) - break; /* Found the requested parent */ - - if (!clks->parent) { - /* This indicates a data problem */ - WARN(1, "clock: %s: could not find parent clock %s in clksel array\n", - __clk_get_name(clk->hw.clk), __clk_get_name(src_clk)); - return NULL; - } - - return clks; -} - -/** - * _write_clksel_reg() - program a clock's clksel register in hardware - * @clk: struct clk * to program - * @v: clksel bitfield value to program (with LSB at bit 0) - * - * Shift the clksel register bitfield value @v to its appropriate - * location in the clksel register and write it in. This function - * will ensure that the write to the clksel_reg reaches its - * destination before returning -- important since PRM and CM register - * accesses can be quite slow compared to ARM cycles -- but does not - * take into account any time the hardware might take to switch the - * clock source. - */ -static void _write_clksel_reg(struct clk_hw_omap *clk, u32 field_val) -{ - u32 v; - - v = omap2_clk_readl(clk, clk->clksel_reg); - v &= ~clk->clksel_mask; - v |= field_val << __ffs(clk->clksel_mask); - omap2_clk_writel(v, clk, clk->clksel_reg); - - v = omap2_clk_readl(clk, clk->clksel_reg); /* OCP barrier */ -} - -/** - * _clksel_to_divisor() - turn clksel field value into integer divider - * @clk: OMAP struct clk to use - * @field_val: register field value to find - * - * Given a struct clk of a rate-selectable clksel clock, and a register field - * value to search for, find the corresponding clock divisor. The register - * field value should be pre-masked and shifted down so the LSB is at bit 0 - * before calling. Returns 0 on error or returns the actual integer divisor - * upon success. - */ -static u32 _clksel_to_divisor(struct clk_hw_omap *clk, u32 field_val) -{ - const struct clksel *clks; - const struct clksel_rate *clkr; - struct clk *parent; - - parent = __clk_get_parent(clk->hw.clk); - - clks = _get_clksel_by_parent(clk, parent); - if (!clks) - return 0; - - for (clkr = clks->rates; clkr->div; clkr++) { - if (!(clkr->flags & cpu_mask)) - continue; - - if (clkr->val == field_val) - break; - } - - if (!clkr->div) { - /* This indicates a data error */ - WARN(1, "clock: %s: could not find fieldval %d for parent %s\n", - __clk_get_name(clk->hw.clk), field_val, - __clk_get_name(parent)); - return 0; - } - - return clkr->div; -} - -/** - * _divisor_to_clksel() - turn clksel integer divisor into a field value - * @clk: OMAP struct clk to use - * @div: integer divisor to search for - * - * Given a struct clk of a rate-selectable clksel clock, and a clock - * divisor, find the corresponding register field value. Returns the - * register field value _before_ left-shifting (i.e., LSB is at bit - * 0); or returns 0xFFFFFFFF (~0) upon error. - */ -static u32 _divisor_to_clksel(struct clk_hw_omap *clk, u32 div) -{ - const struct clksel *clks; - const struct clksel_rate *clkr; - struct clk *parent; - - /* should never happen */ - WARN_ON(div == 0); - - parent = __clk_get_parent(clk->hw.clk); - clks = _get_clksel_by_parent(clk, parent); - if (!clks) - return ~0; - - for (clkr = clks->rates; clkr->div; clkr++) { - if (!(clkr->flags & cpu_mask)) - continue; - - if (clkr->div == div) - break; - } - - if (!clkr->div) { - pr_err("clock: %s: could not find divisor %d for parent %s\n", - __clk_get_name(clk->hw.clk), div, - __clk_get_name(parent)); - return ~0; - } - - return clkr->val; -} - -/** - * _read_divisor() - get current divisor applied to parent clock (from hdwr) - * @clk: OMAP struct clk to use. - * - * Read the current divisor register value for @clk that is programmed - * into the hardware, convert it into the actual divisor value, and - * return it; or return 0 on error. - */ -static u32 _read_divisor(struct clk_hw_omap *clk) -{ - u32 v; - - if (!clk->clksel || !clk->clksel_mask) - return 0; - - v = omap2_clk_readl(clk, clk->clksel_reg); - v &= clk->clksel_mask; - v >>= __ffs(clk->clksel_mask); - - return _clksel_to_divisor(clk, v); -} - -/* Public functions */ - -/** - * omap2_clksel_round_rate_div() - find divisor for the given clock and rate - * @clk: OMAP struct clk to use - * @target_rate: desired clock rate - * @new_div: ptr to where we should store the divisor - * - * Finds 'best' divider value in an array based on the source and target - * rates. The divider array must be sorted with smallest divider first. - * This function is also used by the DPLL3 M2 divider code. - * - * Returns the rounded clock rate or returns 0xffffffff on error. - */ -u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk, - unsigned long target_rate, - u32 *new_div) -{ - unsigned long test_rate; - const struct clksel *clks; - const struct clksel_rate *clkr; - u32 last_div = 0; - struct clk *parent; - unsigned long parent_rate; - const char *clk_name; - - parent = __clk_get_parent(clk->hw.clk); - clk_name = __clk_get_name(clk->hw.clk); - parent_rate = __clk_get_rate(parent); - - if (!clk->clksel || !clk->clksel_mask) - return ~0; - - pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n", - clk_name, target_rate); - - *new_div = 1; - - clks = _get_clksel_by_parent(clk, parent); - if (!clks) - return ~0; - - for (clkr = clks->rates; clkr->div; clkr++) { - if (!(clkr->flags & cpu_mask)) - continue; - - /* Sanity check */ - if (clkr->div <= last_div) - pr_err("clock: %s: clksel_rate table not sorted\n", - clk_name); - - last_div = clkr->div; - - test_rate = parent_rate / clkr->div; - - if (test_rate <= target_rate) - break; /* found it */ - } - - if (!clkr->div) { - pr_err("clock: %s: could not find divisor for target rate %ld for parent %s\n", - clk_name, target_rate, __clk_get_name(parent)); - return ~0; - } - - *new_div = clkr->div; - - pr_debug("clock: new_div = %d, new_rate = %ld\n", *new_div, - (parent_rate / clkr->div)); - - return parent_rate / clkr->div; -} - -/* - * Clocktype interface functions to the OMAP clock code - * (i.e., those used in struct clk field function pointers, etc.) - */ - -/** - * omap2_clksel_find_parent_index() - return the array index of the current - * hardware parent of @hw - * @hw: struct clk_hw * to find the current hardware parent of - * - * Given a struct clk_hw pointer @hw to the 'hw' member of a struct - * clk_hw_omap record representing a source-selectable hardware clock, - * read the hardware register and determine what its parent is - * currently set to. Intended to be called only by the common clock - * framework struct clk_hw_ops.get_parent function pointer. Return - * the array index of this parent clock upon success -- there is no - * way to return an error, so if we encounter an error, just WARN() - * and pretend that we know that we're doing. - */ -u8 omap2_clksel_find_parent_index(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - const struct clksel *clks; - const struct clksel_rate *clkr; - u32 r, found = 0; - struct clk *parent; - const char *clk_name; - int ret = 0, f = 0; - - parent = __clk_get_parent(hw->clk); - clk_name = __clk_get_name(hw->clk); - - /* XXX should be able to return an error */ - WARN((!clk->clksel || !clk->clksel_mask), - "clock: %s: attempt to call on a non-clksel clock", clk_name); - - r = omap2_clk_readl(clk, clk->clksel_reg) & clk->clksel_mask; - r >>= __ffs(clk->clksel_mask); - - for (clks = clk->clksel; clks->parent && !found; clks++) { - for (clkr = clks->rates; clkr->div && !found; clkr++) { - if (!(clkr->flags & cpu_mask)) - continue; - - if (clkr->val == r) { - found = 1; - ret = f; - } - } - f++; - } - - /* This indicates a data error */ - WARN(!found, "clock: %s: init parent: could not find regval %0x\n", - clk_name, r); - - return ret; -} - - -/** - * omap2_clksel_recalc() - function ptr to pass via struct clk .recalc field - * @clk: struct clk * - * - * This function is intended to be called only by the clock framework. - * Each clksel clock should have its struct clk .recalc field set to this - * function. Returns the clock's current rate, based on its parent's rate - * and its current divisor setting in the hardware. - */ -unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate) -{ - unsigned long rate; - u32 div = 0; - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - - if (!parent_rate) - return 0; - - div = _read_divisor(clk); - if (!div) - rate = parent_rate; - else - rate = parent_rate / div; - - pr_debug("%s: recalc'd %s's rate to %lu (div %d)\n", __func__, - __clk_get_name(hw->clk), rate, div); - - return rate; -} - -/** - * omap2_clksel_round_rate() - find rounded rate for the given clock and rate - * @clk: OMAP struct clk to use - * @target_rate: desired clock rate - * - * This function is intended to be called only by the clock framework. - * Finds best target rate based on the source clock and possible dividers. - * rates. The divider array must be sorted with smallest divider first. - * - * Returns the rounded clock rate or returns 0xffffffff on error. - */ -long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate, - unsigned long *parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 new_div; - - return omap2_clksel_round_rate_div(clk, target_rate, &new_div); -} - -/** - * omap2_clksel_set_rate() - program clock rate in hardware - * @clk: struct clk * to program rate - * @rate: target rate to program - * - * This function is intended to be called only by the clock framework. - * Program @clk's rate to @rate in the hardware. The clock can be - * either enabled or disabled when this happens, although if the clock - * is enabled, some downstream devices may glitch or behave - * unpredictably when the clock rate is changed - this depends on the - * hardware. This function does not currently check the usecount of - * the clock, so if multiple drivers are using the clock, and the rate - * is changed, they will all be affected without any notification. - * Returns -EINVAL upon error, or 0 upon success. - */ -int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 field_val, validrate, new_div = 0; - - if (!clk->clksel || !clk->clksel_mask) - return -EINVAL; - - validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); - if (validrate != rate) - return -EINVAL; - - field_val = _divisor_to_clksel(clk, new_div); - if (field_val == ~0) - return -EINVAL; - - _write_clksel_reg(clk, field_val); - - pr_debug("clock: %s: set rate to %ld\n", __clk_get_name(hw->clk), - __clk_get_rate(hw->clk)); - - return 0; -} - -/* - * Clksel parent setting function - not passed in struct clk function - * pointer - instead, the OMAP clock code currently assumes that any - * parent-setting clock is a clksel clock, and calls - * omap2_clksel_set_parent() by default - */ - -/** - * omap2_clksel_set_parent() - change a clock's parent clock - * @clk: struct clk * of the child clock - * @new_parent: struct clk * of the new parent clock - * - * This function is intended to be called only by the clock framework. - * Change the parent clock of clock @clk to @new_parent. This is - * intended to be used while @clk is disabled. This function does not - * currently check the usecount of the clock, so if multiple drivers - * are using the clock, and the parent is changed, they will all be - * affected without any notification. Returns -EINVAL upon error, or - * 0 upon success. - */ -int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - - if (!clk->clksel || !clk->clksel_mask) - return -EINVAL; - - _write_clksel_reg(clk, field_val); - return 0; -} diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c deleted file mode 100644 index f251a14cbf16..000000000000 --- a/arch/arm/mach-omap2/clkt_dpll.c +++ /dev/null @@ -1,370 +0,0 @@ -/* - * OMAP2/3/4 DPLL clock functions - * - * Copyright (C) 2005-2008 Texas Instruments, Inc. - * Copyright (C) 2004-2010 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk-provider.h> -#include <linux/io.h> - -#include <asm/div64.h> - -#include "clock.h" - -/* DPLL rate rounding: minimum DPLL multiplier, divider values */ -#define DPLL_MIN_MULTIPLIER 2 -#define DPLL_MIN_DIVIDER 1 - -/* Possible error results from _dpll_test_mult */ -#define DPLL_MULT_UNDERFLOW -1 - -/* - * Scale factor to mitigate roundoff errors in DPLL rate rounding. - * The higher the scale factor, the greater the risk of arithmetic overflow, - * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR - * must be a power of DPLL_SCALE_BASE. - */ -#define DPLL_SCALE_FACTOR 64 -#define DPLL_SCALE_BASE 2 -#define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \ - (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) - -/* - * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx. - * From device data manual section 4.3 "DPLL and DLL Specifications". - */ -#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000 -#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000 - -/* _dpll_test_fint() return codes */ -#define DPLL_FINT_UNDERFLOW -1 -#define DPLL_FINT_INVALID -2 - -/* Private functions */ - -/* - * _dpll_test_fint - test whether an Fint value is valid for the DPLL - * @clk: DPLL struct clk to test - * @n: divider value (N) to test - * - * Tests whether a particular divider @n will result in a valid DPLL - * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter - * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate - * (assuming that it is counting N upwards), or -2 if the enclosing loop - * should skip to the next iteration (again assuming N is increasing). - */ -static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n) -{ - struct dpll_data *dd; - long fint, fint_min, fint_max; - int ret = 0; - - dd = clk->dpll_data; - - /* DPLL divider must result in a valid jitter correction val */ - fint = __clk_get_rate(__clk_get_parent(clk->hw.clk)) / n; - - if (dd->flags & DPLL_J_TYPE) { - fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN; - fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX; - } else { - fint_min = ti_clk_features.fint_min; - fint_max = ti_clk_features.fint_max; - } - - if (!fint_min || !fint_max) { - WARN(1, "No fint limits available!\n"); - return DPLL_FINT_INVALID; - } - - if (fint < ti_clk_features.fint_min) { - pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n", - n); - dd->max_divider = n; - ret = DPLL_FINT_UNDERFLOW; - } else if (fint > ti_clk_features.fint_max) { - pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n", - n); - dd->min_divider = n; - ret = DPLL_FINT_INVALID; - } else if (fint > ti_clk_features.fint_band1_max && - fint < ti_clk_features.fint_band2_min) { - pr_debug("rejecting n=%d due to Fint failure\n", n); - ret = DPLL_FINT_INVALID; - } - - return ret; -} - -static unsigned long _dpll_compute_new_rate(unsigned long parent_rate, - unsigned int m, unsigned int n) -{ - unsigned long long num; - - num = (unsigned long long)parent_rate * m; - do_div(num, n); - return num; -} - -/* - * _dpll_test_mult - test a DPLL multiplier value - * @m: pointer to the DPLL m (multiplier) value under test - * @n: current DPLL n (divider) value under test - * @new_rate: pointer to storage for the resulting rounded rate - * @target_rate: the desired DPLL rate - * @parent_rate: the DPLL's parent clock rate - * - * This code tests a DPLL multiplier value, ensuring that the - * resulting rate will not be higher than the target_rate, and that - * the multiplier value itself is valid for the DPLL. Initially, the - * integer pointed to by the m argument should be prescaled by - * multiplying by DPLL_SCALE_FACTOR. The code will replace this with - * a non-scaled m upon return. This non-scaled m will result in a - * new_rate as close as possible to target_rate (but not greater than - * target_rate) given the current (parent_rate, n, prescaled m) - * triple. Returns DPLL_MULT_UNDERFLOW in the event that the - * non-scaled m attempted to underflow, which can allow the calling - * function to bail out early; or 0 upon success. - */ -static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, - unsigned long target_rate, - unsigned long parent_rate) -{ - int r = 0, carry = 0; - - /* Unscale m and round if necessary */ - if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL) - carry = 1; - *m = (*m / DPLL_SCALE_FACTOR) + carry; - - /* - * The new rate must be <= the target rate to avoid programming - * a rate that is impossible for the hardware to handle - */ - *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); - if (*new_rate > target_rate) { - (*m)--; - *new_rate = 0; - } - - /* Guard against m underflow */ - if (*m < DPLL_MIN_MULTIPLIER) { - *m = DPLL_MIN_MULTIPLIER; - *new_rate = 0; - r = DPLL_MULT_UNDERFLOW; - } - - if (*new_rate == 0) - *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); - - return r; -} - -/** - * _omap2_dpll_is_in_bypass - check if DPLL is in bypass mode or not - * @v: bitfield value of the DPLL enable - * - * Checks given DPLL enable bitfield to see whether the DPLL is in bypass - * mode or not. Returns 1 if the DPLL is in bypass, 0 otherwise. - */ -static int _omap2_dpll_is_in_bypass(u32 v) -{ - u8 mask, val; - - mask = ti_clk_features.dpll_bypass_vals; - - /* - * Each set bit in the mask corresponds to a bypass value equal - * to the bitshift. Go through each set-bit in the mask and - * compare against the given register value. - */ - while (mask) { - val = __ffs(mask); - mask ^= (1 << val); - if (v == val) - return 1; - } - - return 0; -} - -/* Public functions */ -u8 omap2_init_dpll_parent(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 v; - struct dpll_data *dd; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - v = omap2_clk_readl(clk, dd->control_reg); - v &= dd->enable_mask; - v >>= __ffs(dd->enable_mask); - - /* Reparent the struct clk in case the dpll is in bypass */ - if (_omap2_dpll_is_in_bypass(v)) - return 1; - - return 0; -} - -/** - * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate - * @clk: struct clk * of a DPLL - * - * DPLLs can be locked or bypassed - basically, enabled or disabled. - * When locked, the DPLL output depends on the M and N values. When - * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock - * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and - * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively - * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk. - * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is - * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0 - * if the clock @clk is not a DPLL. - */ -unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) -{ - long long dpll_clk; - u32 dpll_mult, dpll_div, v; - struct dpll_data *dd; - - dd = clk->dpll_data; - if (!dd) - return 0; - - /* Return bypass rate if DPLL is bypassed */ - v = omap2_clk_readl(clk, dd->control_reg); - v &= dd->enable_mask; - v >>= __ffs(dd->enable_mask); - - if (_omap2_dpll_is_in_bypass(v)) - return __clk_get_rate(dd->clk_bypass); - - v = omap2_clk_readl(clk, dd->mult_div1_reg); - dpll_mult = v & dd->mult_mask; - dpll_mult >>= __ffs(dd->mult_mask); - dpll_div = v & dd->div1_mask; - dpll_div >>= __ffs(dd->div1_mask); - - dpll_clk = (long long) __clk_get_rate(dd->clk_ref) * dpll_mult; - do_div(dpll_clk, dpll_div + 1); - - return dpll_clk; -} - -/* DPLL rate rounding code */ - -/** - * omap2_dpll_round_rate - round a target rate for an OMAP DPLL - * @clk: struct clk * for a DPLL - * @target_rate: desired DPLL clock rate - * - * Given a DPLL and a desired target rate, round the target rate to a - * possible, programmable rate for this DPLL. Attempts to select the - * minimum possible n. Stores the computed (m, n) in the DPLL's - * dpll_data structure so set_rate() will not need to call this - * (expensive) function again. Returns ~0 if the target rate cannot - * be rounded, or the rounded rate upon success. - */ -long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, - unsigned long *parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - int m, n, r, scaled_max_m; - int min_delta_m = INT_MAX, min_delta_n = INT_MAX; - unsigned long scaled_rt_rp; - unsigned long new_rate = 0; - struct dpll_data *dd; - unsigned long ref_rate; - long delta; - long prev_min_delta = LONG_MAX; - const char *clk_name; - - if (!clk || !clk->dpll_data) - return ~0; - - dd = clk->dpll_data; - - ref_rate = __clk_get_rate(dd->clk_ref); - clk_name = __clk_get_name(hw->clk); - pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n", - clk_name, target_rate); - - scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR); - scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; - - dd->last_rounded_rate = 0; - - for (n = dd->min_divider; n <= dd->max_divider; n++) { - - /* Is the (input clk, divider) pair valid for the DPLL? */ - r = _dpll_test_fint(clk, n); - if (r == DPLL_FINT_UNDERFLOW) - break; - else if (r == DPLL_FINT_INVALID) - continue; - - /* Compute the scaled DPLL multiplier, based on the divider */ - m = scaled_rt_rp * n; - - /* - * Since we're counting n up, a m overflow means we - * can bail out completely (since as n increases in - * the next iteration, there's no way that m can - * increase beyond the current m) - */ - if (m > scaled_max_m) - break; - - r = _dpll_test_mult(&m, n, &new_rate, target_rate, - ref_rate); - - /* m can't be set low enough for this n - try with a larger n */ - if (r == DPLL_MULT_UNDERFLOW) - continue; - - /* skip rates above our target rate */ - delta = target_rate - new_rate; - if (delta < 0) - continue; - - if (delta < prev_min_delta) { - prev_min_delta = delta; - min_delta_m = m; - min_delta_n = n; - } - - pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n", - clk_name, m, n, new_rate); - - if (delta == 0) - break; - } - - if (prev_min_delta == LONG_MAX) { - pr_debug("clock: %s: cannot round to rate %lu\n", - clk_name, target_rate); - return ~0; - } - - dd->last_rounded_m = min_delta_m; - dd->last_rounded_n = min_delta_n; - dd->last_rounded_rate = target_rate - prev_min_delta; - - return dd->last_rounded_rate; -} - diff --git a/arch/arm/mach-omap2/clkt_iclk.c b/arch/arm/mach-omap2/clkt_iclk.c deleted file mode 100644 index 55eb579aeae1..000000000000 --- a/arch/arm/mach-omap2/clkt_iclk.c +++ /dev/null @@ -1,68 +0,0 @@ -/* - * OMAP2/3 interface clock control - * - * Copyright (C) 2011 Nokia Corporation - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk-provider.h> -#include <linux/io.h> - -#include "clock.h" - -/* Register offsets */ -#define CM_AUTOIDLE 0x30 -#define CM_ICLKEN 0x10 - -/* Private functions */ - -/* XXX */ -void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk) -{ - u32 v; - void __iomem *r; - - r = (__force void __iomem *) - ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); - - v = omap2_clk_readl(clk, r); - v |= (1 << clk->enable_bit); - omap2_clk_writel(v, clk, r); -} - -/* XXX */ -void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk) -{ - u32 v; - void __iomem *r; - - r = (__force void __iomem *) - ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); - - v = omap2_clk_readl(clk, r); - v &= ~(1 << clk->enable_bit); - omap2_clk_writel(v, clk, r); -} - -/* Public data */ - -const struct clk_hw_omap_ops clkhwops_iclk = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, -}; - -const struct clk_hw_omap_ops clkhwops_iclk_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = omap2_clk_dflt_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - - - diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index a699d7169307..acb60ed17273 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -20,12 +20,11 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/delay.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/bitops.h> -#include <linux/regmap.h> #include <linux/of_address.h> -#include <linux/bootmem.h> #include <asm/cpu.h> #include <trace/events/power.h> @@ -40,19 +39,8 @@ #include "cm-regbits-34xx.h" #include "common.h" -/* - * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait - * for a module to indicate that it is no longer in idle - */ -#define MAX_MODULE_ENABLE_WAIT 100000 - u16 cpu_mask; -/* - * Clock features setup. Used instead of CPU type checks. - */ -struct ti_clk_features ti_clk_features; - /* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */ #define OMAP3430_DPLL_FINT_BAND1_MIN 750000 #define OMAP3430_DPLL_FINT_BAND1_MAX 2100000 @@ -66,119 +54,24 @@ struct ti_clk_features ti_clk_features; #define OMAP3PLUS_DPLL_FINT_MIN 32000 #define OMAP3PLUS_DPLL_FINT_MAX 52000000 -/* - * clkdm_control: if true, then when a clock is enabled in the - * hardware, its clockdomain will first be enabled; and when a clock - * is disabled in the hardware, its clockdomain will be disabled - * afterwards. - */ -static bool clkdm_control = true; - -static LIST_HEAD(clk_hw_omap_clocks); - -struct clk_iomap { - struct regmap *regmap; - void __iomem *mem; -}; - -static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS]; - -static void clk_memmap_writel(u32 val, void __iomem *reg) -{ - struct clk_omap_reg *r = (struct clk_omap_reg *)® - struct clk_iomap *io = clk_memmaps[r->index]; - - if (io->regmap) - regmap_write(io->regmap, r->offset, val); - else - writel_relaxed(val, io->mem + r->offset); -} - -static u32 clk_memmap_readl(void __iomem *reg) -{ - u32 val; - struct clk_omap_reg *r = (struct clk_omap_reg *)® - struct clk_iomap *io = clk_memmaps[r->index]; - - if (io->regmap) - regmap_read(io->regmap, r->offset, &val); - else - val = readl_relaxed(io->mem + r->offset); - - return val; -} - -void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg) -{ - if (WARN_ON_ONCE(!(clk->flags & MEMMAP_ADDRESSING))) - writel_relaxed(val, reg); - else - clk_memmap_writel(val, reg); -} - -u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg) -{ - if (WARN_ON_ONCE(!(clk->flags & MEMMAP_ADDRESSING))) - return readl_relaxed(reg); - else - return clk_memmap_readl(reg); -} - static struct ti_clk_ll_ops omap_clk_ll_ops = { - .clk_readl = clk_memmap_readl, - .clk_writel = clk_memmap_writel, + .clkdm_clk_enable = clkdm_clk_enable, + .clkdm_clk_disable = clkdm_clk_disable, + .cm_wait_module_ready = omap_cm_wait_module_ready, + .cm_split_idlest_reg = cm_split_idlest_reg, }; /** - * omap2_clk_provider_init - initialize a clock provider - * @match_table: DT device table to match for devices to init - * @np: device node pointer for the this clock provider - * @index: index for the clock provider - + @syscon: syscon regmap pointer - * @mem: iomem pointer for the clock provider memory area, only used if - * syscon is not provided + * omap2_clk_setup_ll_ops - setup clock driver low-level ops * - * Initializes a clock provider module (CM/PRM etc.), registering - * the memory mapping at specified index and initializing the - * low level driver infrastructure. Returns 0 in success. + * Sets up clock driver low-level platform ops. These are needed + * for register accesses and various other misc platform operations. + * Returns 0 on success, -EBUSY if low level ops have been registered + * already. */ -int __init omap2_clk_provider_init(struct device_node *np, int index, - struct regmap *syscon, void __iomem *mem) +int __init omap2_clk_setup_ll_ops(void) { - struct clk_iomap *io; - - ti_clk_ll_ops = &omap_clk_ll_ops; - - io = kzalloc(sizeof(*io), GFP_KERNEL); - - io->regmap = syscon; - io->mem = mem; - - clk_memmaps[index] = io; - - ti_dt_clk_init_provider(np, index); - - return 0; -} - -/** - * omap2_clk_legacy_provider_init - initialize a legacy clock provider - * @index: index for the clock provider - * @mem: iomem pointer for the clock provider memory area - * - * Initializes a legacy clock provider memory mapping. - */ -void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) -{ - struct clk_iomap *io; - - ti_clk_ll_ops = &omap_clk_ll_ops; - - io = memblock_virt_alloc(sizeof(*io), 0); - - io->mem = mem; - - clk_memmaps[index] = io; + return ti_clk_setup_ll_ops(&omap_clk_ll_ops); } /* @@ -187,77 +80,6 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) /* Private functions */ - -/** - * _wait_idlest_generic - wait for a module to leave the idle state - * @clk: module clock to wait for (needed for register offsets) - * @reg: virtual address of module IDLEST register - * @mask: value to mask against to determine if the module is active - * @idlest: idle state indicator (0 or 1) for the clock - * @name: name of the clock (for printk) - * - * Wait for a module to leave idle, where its idle-status register is - * not inside the CM module. Returns 1 if the module left idle - * promptly, or 0 if the module did not leave idle before the timeout - * elapsed. XXX Deprecated - should be moved into drivers for the - * individual IP block that the IDLEST register exists in. - */ -static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg, - u32 mask, u8 idlest, const char *name) -{ - int i = 0, ena = 0; - - ena = (idlest) ? 0 : mask; - - omap_test_timeout(((omap2_clk_readl(clk, reg) & mask) == ena), - MAX_MODULE_ENABLE_WAIT, i); - - if (i < MAX_MODULE_ENABLE_WAIT) - pr_debug("omap clock: module associated with clock %s ready after %d loops\n", - name, i); - else - pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n", - name, MAX_MODULE_ENABLE_WAIT); - - return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0; -}; - -/** - * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE - * @clk: struct clk * belonging to the module - * - * If the necessary clocks for the OMAP hardware IP block that - * corresponds to clock @clk are enabled, then wait for the module to - * indicate readiness (i.e., to leave IDLE). This code does not - * belong in the clock code and will be moved in the medium term to - * module-dependent code. No return value. - */ -static void _omap2_module_wait_ready(struct clk_hw_omap *clk) -{ - void __iomem *companion_reg, *idlest_reg; - u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; - s16 prcm_mod; - int r; - - /* Not all modules have multiple clocks that their IDLEST depends on */ - if (clk->ops->find_companion) { - clk->ops->find_companion(clk, &companion_reg, &other_bit); - if (!(omap2_clk_readl(clk, companion_reg) & (1 << other_bit))) - return; - } - - clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); - r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id); - if (r) { - /* IDLEST register not in the CM module */ - _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit), - idlest_val, __clk_get_name(clk->hw.clk)); - } else { - omap_cm_wait_module_ready(0, prcm_mod, idlest_reg_id, - idlest_bit); - }; -} - /* Public functions */ /** @@ -290,279 +112,6 @@ void omap2_init_clk_clkdm(struct clk_hw *hw) } } -/** - * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable - * - * Prevent the OMAP clock code from calling into the clockdomain code - * when a hardware clock in that clockdomain is enabled or disabled. - * Intended to be called at init time from omap*_clk_init(). No - * return value. - */ -void __init omap2_clk_disable_clkdm_control(void) -{ - clkdm_control = false; -} - -/** - * omap2_clk_dflt_find_companion - find companion clock to @clk - * @clk: struct clk * to find the companion clock of - * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in - * @other_bit: u8 ** to return the companion clock bit shift in - * - * Note: We don't need special code here for INVERT_ENABLE for the - * time being since INVERT_ENABLE only applies to clocks enabled by - * CM_CLKEN_PLL - * - * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's - * just a matter of XORing the bits. - * - * Some clocks don't have companion clocks. For example, modules with - * only an interface clock (such as MAILBOXES) don't have a companion - * clock. Right now, this code relies on the hardware exporting a bit - * in the correct companion register that indicates that the - * nonexistent 'companion clock' is active. Future patches will - * associate this type of code with per-module data structures to - * avoid this issue, and remove the casts. No return value. - */ -void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, u8 *other_bit) -{ - u32 r; - - /* - * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes - * it's just a matter of XORing the bits. - */ - r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN)); - - *other_reg = (__force void __iomem *)r; - *other_bit = clk->enable_bit; -} - -/** - * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk - * @clk: struct clk * to find IDLEST info for - * @idlest_reg: void __iomem ** to return the CM_IDLEST va in - * @idlest_bit: u8 * to return the CM_IDLEST bit shift in - * @idlest_val: u8 * to return the idle status indicator - * - * Return the CM_IDLEST register address and bit shift corresponding - * to the module that "owns" this clock. This default code assumes - * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that - * the IDLEST register address ID corresponds to the CM_*CLKEN - * register address ID (e.g., that CM_FCLKEN2 corresponds to - * CM_IDLEST2). This is not true for all modules. No return value. - */ -void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - *idlest_bit = clk->enable_bit; - - /* - * 24xx uses 0 to indicate not ready, and 1 to indicate ready. - * 34xx reverses this, just to keep us on our toes - * AM35xx uses both, depending on the module. - */ - *idlest_val = ti_clk_features.cm_idlest_val; -} - -/** - * omap2_dflt_clk_enable - enable a clock in the hardware - * @hw: struct clk_hw * of the clock to enable - * - * Enable the clock @hw in the hardware. We first call into the OMAP - * clockdomain code to "enable" the corresponding clockdomain if this - * is the first enabled user of the clockdomain. Then program the - * hardware to enable the clock. Then wait for the IP block that uses - * this clock to leave idle (if applicable). Returns the error value - * from clkdm_clk_enable() if it terminated with an error, or -EINVAL - * if @hw has a null clock enable_reg, or zero upon success. - */ -int omap2_dflt_clk_enable(struct clk_hw *hw) -{ - struct clk_hw_omap *clk; - u32 v; - int ret = 0; - - clk = to_clk_hw_omap(hw); - - if (clkdm_control && clk->clkdm) { - ret = clkdm_clk_enable(clk->clkdm, hw->clk); - if (ret) { - WARN(1, "%s: could not enable %s's clockdomain %s: %d\n", - __func__, __clk_get_name(hw->clk), - clk->clkdm->name, ret); - return ret; - } - } - - if (unlikely(clk->enable_reg == NULL)) { - pr_err("%s: %s missing enable_reg\n", __func__, - __clk_get_name(hw->clk)); - ret = -EINVAL; - goto err; - } - - /* FIXME should not have INVERT_ENABLE bit here */ - v = omap2_clk_readl(clk, clk->enable_reg); - if (clk->flags & INVERT_ENABLE) - v &= ~(1 << clk->enable_bit); - else - v |= (1 << clk->enable_bit); - omap2_clk_writel(v, clk, clk->enable_reg); - v = omap2_clk_readl(clk, clk->enable_reg); /* OCP barrier */ - - if (clk->ops && clk->ops->find_idlest) - _omap2_module_wait_ready(clk); - - return 0; - -err: - if (clkdm_control && clk->clkdm) - clkdm_clk_disable(clk->clkdm, hw->clk); - return ret; -} - -/** - * omap2_dflt_clk_disable - disable a clock in the hardware - * @hw: struct clk_hw * of the clock to disable - * - * Disable the clock @hw in the hardware, and call into the OMAP - * clockdomain code to "disable" the corresponding clockdomain if all - * clocks/hwmods in that clockdomain are now disabled. No return - * value. - */ -void omap2_dflt_clk_disable(struct clk_hw *hw) -{ - struct clk_hw_omap *clk; - u32 v; - - clk = to_clk_hw_omap(hw); - if (!clk->enable_reg) { - /* - * 'independent' here refers to a clock which is not - * controlled by its parent. - */ - pr_err("%s: independent clock %s has no enable_reg\n", - __func__, __clk_get_name(hw->clk)); - return; - } - - v = omap2_clk_readl(clk, clk->enable_reg); - if (clk->flags & INVERT_ENABLE) - v |= (1 << clk->enable_bit); - else - v &= ~(1 << clk->enable_bit); - omap2_clk_writel(v, clk, clk->enable_reg); - /* No OCP barrier needed here since it is a disable operation */ - - if (clkdm_control && clk->clkdm) - clkdm_clk_disable(clk->clkdm, hw->clk); -} - -/** - * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw - * @hw: struct clk_hw * of the clock being enabled - * - * Increment the usecount of the clockdomain of the clock pointed to - * by @hw; if the usecount is 1, the clockdomain will be "enabled." - * Only needed for clocks that don't use omap2_dflt_clk_enable() as - * their enable function pointer. Passes along the return value of - * clkdm_clk_enable(), -EINVAL if @hw is not associated with a - * clockdomain, or 0 if clock framework-based clockdomain control is - * not implemented. - */ -int omap2_clkops_enable_clkdm(struct clk_hw *hw) -{ - struct clk_hw_omap *clk; - int ret = 0; - - clk = to_clk_hw_omap(hw); - - if (unlikely(!clk->clkdm)) { - pr_err("%s: %s: no clkdm set ?!\n", __func__, - __clk_get_name(hw->clk)); - return -EINVAL; - } - - if (unlikely(clk->enable_reg)) - pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__, - __clk_get_name(hw->clk)); - - if (!clkdm_control) { - pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", - __func__, __clk_get_name(hw->clk)); - return 0; - } - - ret = clkdm_clk_enable(clk->clkdm, hw->clk); - WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n", - __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret); - - return ret; -} - -/** - * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw - * @hw: struct clk_hw * of the clock being disabled - * - * Decrement the usecount of the clockdomain of the clock pointed to - * by @hw; if the usecount is 0, the clockdomain will be "disabled." - * Only needed for clocks that don't use omap2_dflt_clk_disable() as their - * disable function pointer. No return value. - */ -void omap2_clkops_disable_clkdm(struct clk_hw *hw) -{ - struct clk_hw_omap *clk; - - clk = to_clk_hw_omap(hw); - - if (unlikely(!clk->clkdm)) { - pr_err("%s: %s: no clkdm set ?!\n", __func__, - __clk_get_name(hw->clk)); - return; - } - - if (unlikely(clk->enable_reg)) - pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__, - __clk_get_name(hw->clk)); - - if (!clkdm_control) { - pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", - __func__, __clk_get_name(hw->clk)); - return; - } - - clkdm_clk_disable(clk->clkdm, hw->clk); -} - -/** - * omap2_dflt_clk_is_enabled - is clock enabled in the hardware? - * @hw: struct clk_hw * to check - * - * Return 1 if the clock represented by @hw is enabled in the - * hardware, or 0 otherwise. Intended for use in the struct - * clk_ops.is_enabled function pointer. - */ -int omap2_dflt_clk_is_enabled(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 v; - - v = omap2_clk_readl(clk, clk->enable_reg); - - if (clk->flags & INVERT_ENABLE) - v ^= BIT(clk->enable_bit); - - v &= BIT(clk->enable_bit); - - return v ? 1 : 0; -} - static int __initdata mpurate; /* @@ -584,178 +133,6 @@ static int __init omap_clk_setup(char *str) __setup("mpurate=", omap_clk_setup); /** - * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock - * @clk: struct clk * to initialize - * - * Add an OMAP clock @clk to the internal list of OMAP clocks. Used - * temporarily for autoidle handling, until this support can be - * integrated into the common clock framework code in some way. No - * return value. - */ -void omap2_init_clk_hw_omap_clocks(struct clk *clk) -{ - struct clk_hw_omap *c; - - if (__clk_get_flags(clk) & CLK_IS_BASIC) - return; - - c = to_clk_hw_omap(__clk_get_hw(clk)); - list_add(&c->node, &clk_hw_omap_clocks); -} - -/** - * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that - * support it - * - * Enable clock autoidle on all OMAP clocks that have allow_idle - * function pointers associated with them. This function is intended - * to be temporary until support for this is added to the common clock - * code. Returns 0. - */ -int omap2_clk_enable_autoidle_all(void) -{ - struct clk_hw_omap *c; - - list_for_each_entry(c, &clk_hw_omap_clocks, node) - if (c->ops && c->ops->allow_idle) - c->ops->allow_idle(c); - - of_ti_clk_allow_autoidle_all(); - - return 0; -} - -/** - * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that - * support it - * - * Disable clock autoidle on all OMAP clocks that have allow_idle - * function pointers associated with them. This function is intended - * to be temporary until support for this is added to the common clock - * code. Returns 0. - */ -int omap2_clk_disable_autoidle_all(void) -{ - struct clk_hw_omap *c; - - list_for_each_entry(c, &clk_hw_omap_clocks, node) - if (c->ops && c->ops->deny_idle) - c->ops->deny_idle(c); - - of_ti_clk_deny_autoidle_all(); - - return 0; -} - -/** - * omap2_clk_deny_idle - disable autoidle on an OMAP clock - * @clk: struct clk * to disable autoidle for - * - * Disable autoidle on an OMAP clock. - */ -int omap2_clk_deny_idle(struct clk *clk) -{ - struct clk_hw_omap *c; - - if (__clk_get_flags(clk) & CLK_IS_BASIC) - return -EINVAL; - - c = to_clk_hw_omap(__clk_get_hw(clk)); - if (c->ops && c->ops->deny_idle) - c->ops->deny_idle(c); - return 0; -} - -/** - * omap2_clk_allow_idle - enable autoidle on an OMAP clock - * @clk: struct clk * to enable autoidle for - * - * Enable autoidle on an OMAP clock. - */ -int omap2_clk_allow_idle(struct clk *clk) -{ - struct clk_hw_omap *c; - - if (__clk_get_flags(clk) & CLK_IS_BASIC) - return -EINVAL; - - c = to_clk_hw_omap(__clk_get_hw(clk)); - if (c->ops && c->ops->allow_idle) - c->ops->allow_idle(c); - return 0; -} - -/** - * omap2_clk_enable_init_clocks - prepare & enable a list of clocks - * @clk_names: ptr to an array of strings of clock names to enable - * @num_clocks: number of clock names in @clk_names - * - * Prepare and enable a list of clocks, named by @clk_names. No - * return value. XXX Deprecated; only needed until these clocks are - * properly claimed and enabled by the drivers or core code that uses - * them. XXX What code disables & calls clk_put on these clocks? - */ -void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks) -{ - struct clk *init_clk; - int i; - - for (i = 0; i < num_clocks; i++) { - init_clk = clk_get(NULL, clk_names[i]); - if (WARN(IS_ERR(init_clk), "could not find init clock %s\n", - clk_names[i])) - continue; - clk_prepare_enable(init_clk); - } -} - -const struct clk_hw_omap_ops clkhwops_wait = { - .find_idlest = omap2_clk_dflt_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -/** - * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument - * @mpurate_ck_name: clk name of the clock to change rate - * - * Change the ARM MPU clock rate to the rate specified on the command - * line, if one was specified. @mpurate_ck_name should be - * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx. - * XXX Does not handle voltage scaling - on OMAP2xxx this is currently - * handled by the virt_prcm_set clock, but this should be handled by - * the OPP layer. XXX This is intended to be handled by the OPP layer - * code in the near future and should be removed from the clock code. - * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects - * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name - * cannot be found, or 0 upon success. - */ -int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name) -{ - struct clk *mpurate_ck; - int r; - - if (!mpurate) - return -EINVAL; - - mpurate_ck = clk_get(NULL, mpurate_ck_name); - if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name)) - return -ENOENT; - - r = clk_set_rate(mpurate_ck, mpurate); - if (r < 0) { - WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n", - mpurate_ck_name, mpurate, r); - clk_put(mpurate_ck); - return -EINVAL; - } - - calibrate_delay(); - clk_put(mpurate_ck); - - return 0; -} - -/** * omap2_clk_print_new_rates - print summary of current clock tree rates * @hfclkin_ck_name: clk name for the off-chip HF oscillator * @core_ck_name: clk name for the on-chip CORE_CLK @@ -801,29 +178,30 @@ void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name, */ void __init ti_clk_init_features(void) { + struct ti_clk_features features = { 0 }; /* Fint setup for DPLLs */ if (cpu_is_omap3430()) { - ti_clk_features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN; - ti_clk_features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX; - ti_clk_features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX; - ti_clk_features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN; + features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN; + features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX; + features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX; + features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN; } else { - ti_clk_features.fint_min = OMAP3PLUS_DPLL_FINT_MIN; - ti_clk_features.fint_max = OMAP3PLUS_DPLL_FINT_MAX; + features.fint_min = OMAP3PLUS_DPLL_FINT_MIN; + features.fint_max = OMAP3PLUS_DPLL_FINT_MAX; } /* Bypass value setup for DPLLs */ if (cpu_is_omap24xx()) { - ti_clk_features.dpll_bypass_vals |= + features.dpll_bypass_vals |= (1 << OMAP2XXX_EN_DPLL_LPBYPASS) | (1 << OMAP2XXX_EN_DPLL_FRBYPASS); } else if (cpu_is_omap34xx()) { - ti_clk_features.dpll_bypass_vals |= + features.dpll_bypass_vals |= (1 << OMAP3XXX_EN_DPLL_LPBYPASS) | (1 << OMAP3XXX_EN_DPLL_FRBYPASS); } else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx() || soc_is_omap54xx() || soc_is_dra7xx()) { - ti_clk_features.dpll_bypass_vals |= + features.dpll_bypass_vals |= (1 << OMAP4XXX_EN_DPLL_LPBYPASS) | (1 << OMAP4XXX_EN_DPLL_FRBYPASS) | (1 << OMAP4XXX_EN_DPLL_MNBYPASS); @@ -831,7 +209,7 @@ void __init ti_clk_init_features(void) /* Jitter correction only available on OMAP343X */ if (cpu_is_omap343x()) - ti_clk_features.flags |= TI_CLK_DPLL_HAS_FREQSEL; + features.flags |= TI_CLK_DPLL_HAS_FREQSEL; /* Idlest value for interface clocks. * 24xx uses 0 to indicate not ready, and 1 to indicate ready. @@ -839,11 +217,13 @@ void __init ti_clk_init_features(void) * AM35xx uses both, depending on the module. */ if (cpu_is_omap24xx()) - ti_clk_features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL; + features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL; else if (cpu_is_omap34xx()) - ti_clk_features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL; + features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL; /* On OMAP3430 ES1.0, DPLL4 can't be re-programmed */ if (omap_rev() == OMAP3430_REV_ES1_0) - ti_clk_features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM; + features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM; + + ti_clk_setup_features(&features); } diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index 652ed0ab86ec..67da640ba1c7 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -23,90 +23,6 @@ #include <linux/clk-provider.h> #include <linux/clk/ti.h> -struct omap_clk { - u16 cpu; - struct clk_lookup lk; -}; - -#define CLK(dev, con, ck) \ - { \ - .lk = { \ - .dev_id = dev, \ - .con_id = con, \ - .clk = ck, \ - }, \ - } - -struct clockdomain; - -#define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name) \ - static struct clk_core _name##_core = { \ - .name = #_name, \ - .hw = &_name##_hw.hw, \ - .parent_names = _parent_array_name, \ - .num_parents = ARRAY_SIZE(_parent_array_name), \ - .ops = &_clkops_name, \ - }; \ - static struct clk _name = { \ - .core = &_name##_core, \ - }; - -#define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name, \ - _clkops_name, _flags) \ - static struct clk_core _name##_core = { \ - .name = #_name, \ - .hw = &_name##_hw.hw, \ - .parent_names = _parent_array_name, \ - .num_parents = ARRAY_SIZE(_parent_array_name), \ - .ops = &_clkops_name, \ - .flags = _flags, \ - }; \ - static struct clk _name = { \ - .core = &_name##_core, \ - }; - -#define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name) \ - static struct clk_hw_omap _name##_hw = { \ - .hw = { \ - .clk = &_name, \ - }, \ - .clkdm_name = _clkdm_name, \ - }; - -#define DEFINE_CLK_OMAP_MUX(_name, _clkdm_name, _clksel, \ - _clksel_reg, _clksel_mask, \ - _parent_names, _ops) \ - static struct clk _name; \ - static struct clk_hw_omap _name##_hw = { \ - .hw = { \ - .clk = &_name, \ - }, \ - .clksel = _clksel, \ - .clksel_reg = _clksel_reg, \ - .clksel_mask = _clksel_mask, \ - .clkdm_name = _clkdm_name, \ - }; \ - DEFINE_STRUCT_CLK(_name, _parent_names, _ops); - -#define DEFINE_CLK_OMAP_MUX_GATE(_name, _clkdm_name, _clksel, \ - _clksel_reg, _clksel_mask, \ - _enable_reg, _enable_bit, \ - _hwops, _parent_names, _ops) \ - static struct clk _name; \ - static struct clk_hw_omap _name##_hw = { \ - .hw = { \ - .clk = &_name, \ - }, \ - .ops = _hwops, \ - .enable_reg = _enable_reg, \ - .enable_bit = _enable_bit, \ - .clksel = _clksel, \ - .clksel_reg = _clksel_reg, \ - .clksel_mask = _clksel_mask, \ - .clkdm_name = _clkdm_name, \ - }; \ - DEFINE_STRUCT_CLK(_name, _parent_names, _ops); - /* struct clksel_rate.flags possibilities */ #define RATE_IN_242X (1 << 0) #define RATE_IN_243X (1 << 1) @@ -127,38 +43,6 @@ struct clockdomain; /* RATE_IN_3430ES2PLUS_36XX includes 34xx/35xx with ES >=2, and all 36xx/37xx */ #define RATE_IN_3430ES2PLUS_36XX (RATE_IN_3430ES2PLUS | RATE_IN_36XX) - -/** - * struct clksel_rate - register bitfield values corresponding to clk divisors - * @val: register bitfield value (shifted to bit 0) - * @div: clock divisor corresponding to @val - * @flags: (see "struct clksel_rate.flags possibilities" above) - * - * @val should match the value of a read from struct clk.clksel_reg - * AND'ed with struct clk.clksel_mask, shifted right to bit 0. - * - * @div is the divisor that should be applied to the parent clock's rate - * to produce the current clock's rate. - */ -struct clksel_rate { - u32 val; - u8 div; - u16 flags; -}; - -/** - * struct clksel - available parent clocks, and a pointer to their divisors - * @parent: struct clk * to a possible parent clock - * @rates: available divisors for this parent clock - * - * A struct clksel is always associated with one or more struct clks - * and one or more struct clksel_rates. - */ -struct clksel { - struct clk *parent; - const struct clksel_rate *rates; -}; - /* CM_CLKSEL2_PLL.CORE_CLK_SRC bits (2XXX) */ #define CORE_CLK_SRC_32K 0x0 #define CORE_CLK_SRC_DPLL 0x1 @@ -180,105 +64,18 @@ struct clksel { #define OMAP4XXX_EN_DPLL_FRBYPASS 0x6 #define OMAP4XXX_EN_DPLL_LOCKED 0x7 -u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk); -void omap3_dpll_allow_idle(struct clk_hw_omap *clk); -void omap3_dpll_deny_idle(struct clk_hw_omap *clk); -void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk); -void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk); - -void __init omap2_clk_disable_clkdm_control(void); - -/* clkt_clksel.c public functions */ -u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk, - unsigned long target_rate, - u32 *new_div); -u8 omap2_clksel_find_parent_index(struct clk_hw *hw); -unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate); -long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate, - unsigned long *parent_rate); -int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate); -int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val); - -/* clkt_iclk.c public functions */ -extern void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk); -extern void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk); - -unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk); - -void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, - u8 *other_bit); -void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, u8 *idlest_val); -int omap2_clk_enable_autoidle_all(void); -int omap2_clk_allow_idle(struct clk *clk); -int omap2_clk_deny_idle(struct clk *clk); -int omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name); void omap2_clk_print_new_rates(const char *hfclkin_ck_name, const char *core_ck_name, const char *mpu_ck_name); -u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg); -void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg); - extern u16 cpu_mask; -/* - * Clock features setup. Used instead of CPU type checks. - */ -struct ti_clk_features { - u32 flags; - long fint_min; - long fint_max; - long fint_band1_max; - long fint_band2_min; - u8 dpll_bypass_vals; - u8 cm_idlest_val; -}; - -#define TI_CLK_DPLL_HAS_FREQSEL (1 << 0) -#define TI_CLK_DPLL4_DENY_REPROGRAM (1 << 1) - -extern struct ti_clk_features ti_clk_features; - extern const struct clkops clkops_omap2_dflt_wait; extern const struct clkops clkops_omap2_dflt; extern struct clk_functions omap2_clk_functions; -extern const struct clksel_rate gpt_32k_rates[]; -extern const struct clksel_rate gpt_sys_rates[]; -extern const struct clksel_rate gfx_l3_rates[]; -extern const struct clksel_rate dsp_ick_rates[]; - -extern const struct clk_hw_omap_ops clkhwops_iclk_wait; -extern const struct clk_hw_omap_ops clkhwops_wait; -extern const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait; -extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait; -extern const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait; -extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait; -extern const struct clk_hw_omap_ops clkhwops_apll54; -extern const struct clk_hw_omap_ops clkhwops_apll96; - -/* clksel_rate blocks shared between OMAP44xx and AM33xx */ -extern const struct clksel_rate div_1_0_rates[]; -extern const struct clksel_rate div3_1to4_rates[]; -extern const struct clksel_rate div_1_1_rates[]; -extern const struct clksel_rate div_1_2_rates[]; -extern const struct clksel_rate div_1_3_rates[]; -extern const struct clksel_rate div_1_4_rates[]; -extern const struct clksel_rate div31_1to31_rates[]; - -extern int omap2_clkops_enable_clkdm(struct clk_hw *hw); -extern void omap2_clkops_disable_clkdm(struct clk_hw *hw); - -struct regmap; - -int __init omap2_clk_provider_init(struct device_node *np, int index, - struct regmap *syscon, void __iomem *mem); -void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem); +int __init omap2_clk_setup_ll_ops(void); void __init ti_clk_init_features(void); #endif diff --git a/arch/arm/mach-omap2/clock2430.c b/arch/arm/mach-omap2/clock2430.c deleted file mode 100644 index cef0c8d1de52..000000000000 --- a/arch/arm/mach-omap2/clock2430.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * clock2430.c - OMAP2430-specific clock integration code - * - * Copyright (C) 2005-2008 Texas Instruments, Inc. - * Copyright (C) 2004-2010 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, - * Gordon McNutt and RidgeRun, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "soc.h" -#include "iomap.h" -#include "clock.h" -#include "clock2xxx.h" -#include "cm2xxx.h" -#include "cm-regbits-24xx.h" - -/** - * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the - * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function - * passes back the correct CM_IDLEST register address for I2CHS - * modules. No return value. - */ -static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - *idlest_reg = OMAP2430_CM_REGADDR(CORE_MOD, CM_IDLEST); - *idlest_bit = clk->enable_bit; - *idlest_val = OMAP24XX_CM_IDLEST_VAL; -} - -/* 2430 I2CHS has non-standard IDLEST register */ -const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = { - .find_idlest = omap2430_clk_i2chs_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; diff --git a/arch/arm/mach-omap2/clock2xxx.c b/arch/arm/mach-omap2/clock2xxx.c deleted file mode 100644 index b870f6a9e283..000000000000 --- a/arch/arm/mach-omap2/clock2xxx.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * clock2xxx.c - OMAP2xxx-specific clock integration code - * - * Copyright (C) 2005-2008 Texas Instruments, Inc. - * Copyright (C) 2004-2010 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, - * Gordon McNutt and RidgeRun, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "soc.h" -#include "clock.h" -#include "clock2xxx.h" -#include "cm.h" -#include "cm-regbits-24xx.h" - -struct clk_hw *dclk_hw; -/* - * Omap24xx specific clock functions - */ - -/* - * Switch the MPU rate if specified on cmdline. We cannot do this - * early until cmdline is parsed. XXX This should be removed from the - * clock code and handled by the OPP layer code in the near future. - */ -static int __init omap2xxx_clk_arch_init(void) -{ - int ret; - - if (!cpu_is_omap24xx()) - return 0; - - ret = omap2_clk_switch_mpurate_at_boot("virt_prcm_set"); - if (!ret) - omap2_clk_print_new_rates("sys_ck", "dpll_ck", "mpu_ck"); - - return ret; -} - -omap_arch_initcall(omap2xxx_clk_arch_init); - - diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c deleted file mode 100644 index 4596468e50ab..000000000000 --- a/arch/arm/mach-omap2/clock34xx.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * OMAP3-specific clock framework functions - * - * Copyright (C) 2007-2008 Texas Instruments, Inc. - * Copyright (C) 2007-2011 Nokia Corporation - * - * Paul Walmsley - * Jouni Högander - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu, - * Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "clock.h" -#include "clock34xx.h" -#include "cm3xxx.h" -#include "cm-regbits-34xx.h" - -/** - * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift - * from the CM_{I,F}CLKEN bit. Pass back the correct info via - * @idlest_reg and @idlest_bit. No return value. - */ -static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT; - *idlest_val = OMAP34XX_CM_IDLEST_VAL; -} -const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait = { - .find_idlest = omap3430es2_clk_ssi_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = omap3430es2_clk_ssi_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -/** - * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * Some OMAP modules on OMAP3 ES2+ chips have both initiator and - * target IDLEST bits. For our purposes, we are concerned with the - * target IDLEST bits, which exist at a different bit position than - * the *CLKEN bit position for these modules (DSS and USBHOST) (The - * default find_idlest code assumes that they are at the same - * position.) No return value. - */ -static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - /* USBHOST_IDLE has same shift */ - *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT; - *idlest_val = OMAP34XX_CM_IDLEST_VAL; -} - -const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = { - .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -/** - * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different - * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via - * @idlest_reg and @idlest_bit. No return value. - */ -static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT; - *idlest_val = OMAP34XX_CM_IDLEST_VAL; -} - -const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; - -const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait = { - .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h deleted file mode 100644 index 084ba71b2b31..000000000000 --- a/arch/arm/mach-omap2/clock34xx.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * OMAP34xx clock function prototypes and macros - * - * Copyright (C) 2007-2010 Texas Instruments, Inc. - * Copyright (C) 2007-2011 Nokia Corporation - */ - -#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H -#define __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H - -extern const struct clkops clkops_omap3430es2_ssi_wait; -extern const struct clkops clkops_omap3430es2_iclk_ssi_wait; -extern const struct clkops clkops_omap3430es2_hsotgusb_wait; -extern const struct clkops clkops_omap3430es2_iclk_hsotgusb_wait; -extern const struct clkops clkops_omap3430es2_dss_usbhost_wait; -extern const struct clkops clkops_omap3430es2_iclk_dss_usbhost_wait; - -#endif diff --git a/arch/arm/mach-omap2/clock3517.c b/arch/arm/mach-omap2/clock3517.c deleted file mode 100644 index 4d79ae2c0241..000000000000 --- a/arch/arm/mach-omap2/clock3517.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * OMAP3517/3505-specific clock framework functions - * - * Copyright (C) 2010 Texas Instruments, Inc. - * Copyright (C) 2011 Nokia Corporation - * - * Ranjith Lohithakshan - * Paul Walmsley - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu, - * Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "clock.h" -#include "clock3517.h" -#include "cm3xxx.h" -#include "cm-regbits-34xx.h" - -/* - * In AM35xx IPSS, the {ICK,FCK} enable bits for modules are exported - * in the same register at a bit offset of 0x8. The EN_ACK for ICK is - * at an offset of 4 from ICK enable bit. - */ -#define AM35XX_IPSS_ICK_MASK 0xF -#define AM35XX_IPSS_ICK_EN_ACK_OFFSET 0x4 -#define AM35XX_IPSS_ICK_FCK_OFFSET 0x8 -#define AM35XX_IPSS_CLK_IDLEST_VAL 0 - -/** - * am35xx_clk_find_idlest - return clock ACK info for AM35XX IPSS - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * The interface clocks on AM35xx IPSS reflects the clock idle status - * in the enable register itsel at a bit offset of 4 from the enable - * bit. A value of 1 indicates that clock is enabled. - */ -static void am35xx_clk_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - *idlest_reg = (__force void __iomem *)(clk->enable_reg); - *idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET; - *idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL; -} - -/** - * am35xx_clk_find_companion - find companion clock to @clk - * @clk: struct clk * to find the companion clock of - * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in - * @other_bit: u8 ** to return the companion clock bit shift in - * - * Some clocks don't have companion clocks. For example, modules with - * only an interface clock (such as HECC) don't have a companion - * clock. Right now, this code relies on the hardware exporting a bit - * in the correct companion register that indicates that the - * nonexistent 'companion clock' is active. Future patches will - * associate this type of code with per-module data structures to - * avoid this issue, and remove the casts. No return value. - */ -static void am35xx_clk_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, - u8 *other_bit) -{ - *other_reg = (__force void __iomem *)(clk->enable_reg); - if (clk->enable_bit & AM35XX_IPSS_ICK_MASK) - *other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET; - else - *other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET; -} -const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = { - .find_idlest = am35xx_clk_find_idlest, - .find_companion = am35xx_clk_find_companion, -}; - -/** - * am35xx_clk_ipss_find_idlest - return CM_IDLEST info for IPSS - * @clk: struct clk * being enabled - * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into - * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into - * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator - * - * The IPSS target CM_IDLEST bit is at a different shift from the - * CM_{I,F}CLKEN bit. Pass back the correct info via @idlest_reg - * and @idlest_bit. No return value. - */ -static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) -{ - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; - *idlest_bit = AM35XX_ST_IPSS_SHIFT; - *idlest_val = OMAP34XX_CM_IDLEST_VAL; -} - -const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = { - .allow_idle = omap2_clkt_iclk_allow_idle, - .deny_idle = omap2_clkt_iclk_deny_idle, - .find_idlest = am35xx_clk_ipss_find_idlest, - .find_companion = omap2_clk_dflt_find_companion, -}; diff --git a/arch/arm/mach-omap2/clock3517.h b/arch/arm/mach-omap2/clock3517.h deleted file mode 100644 index ca5e5a64c2e2..000000000000 --- a/arch/arm/mach-omap2/clock3517.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * OMAP3517/3505 clock function prototypes and macros - * - * Copyright (C) 2010 Texas Instruments, Inc. - * Copyright (C) 2010 Nokia Corporation - */ - -#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK3517_H -#define __ARCH_ARM_MACH_OMAP2_CLOCK3517_H - -extern const struct clkops clkops_am35xx_ipss_module_wait; -extern const struct clkops clkops_am35xx_ipss_wait; - -#endif diff --git a/arch/arm/mach-omap2/clock36xx.c b/arch/arm/mach-omap2/clock36xx.c deleted file mode 100644 index 91ccb962e09e..000000000000 --- a/arch/arm/mach-omap2/clock36xx.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * OMAP36xx-specific clkops - * - * Copyright (C) 2010 Texas Instruments, Inc. - * Copyright (C) 2010 Nokia Corporation - * - * Mike Turquette - * Vijaykumar GN - * Paul Walmsley - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu, - * Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/clk-provider.h> -#include <linux/io.h> - -#include "clock.h" -#include "clock36xx.h" -#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) - -/** - * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering - * from HSDivider PWRDN problem Implements Errata ID: i556. - * @clk: DPLL output struct clk - * - * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck, - * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset - * valueafter their respective PWRDN bits are set. Any dummy write - * (Any other value different from the Read value) to the - * corresponding CM_CLKSEL register will refresh the dividers. - */ -int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk) -{ - struct clk_divider *parent; - struct clk_hw *parent_hw; - u32 dummy_v, orig_v; - struct clk_hw_omap *omap_clk = to_clk_hw_omap(clk); - int ret; - - /* Clear PWRDN bit of HSDIVIDER */ - ret = omap2_dflt_clk_enable(clk); - - parent_hw = __clk_get_hw(__clk_get_parent(clk->clk)); - parent = to_clk_divider(parent_hw); - - /* Restore the dividers */ - if (!ret) { - orig_v = omap2_clk_readl(omap_clk, parent->reg); - dummy_v = orig_v; - - /* Write any other value different from the Read value */ - dummy_v ^= (1 << parent->shift); - omap2_clk_writel(dummy_v, omap_clk, parent->reg); - - /* Write the original divider */ - omap2_clk_writel(orig_v, omap_clk, parent->reg); - } - - return ret; -} diff --git a/arch/arm/mach-omap2/clock36xx.h b/arch/arm/mach-omap2/clock36xx.h deleted file mode 100644 index 945bb7f083e9..000000000000 --- a/arch/arm/mach-omap2/clock36xx.h +++ /dev/null @@ -1,13 +0,0 @@ -/* - * OMAP36xx clock function prototypes and macros - * - * Copyright (C) 2010 Texas Instruments, Inc. - * Copyright (C) 2010 Nokia Corporation - */ - -#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK36XX_H -#define __ARCH_ARM_MACH_OMAP2_CLOCK36XX_H - -extern int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *hw); - -#endif diff --git a/arch/arm/mach-omap2/clock3xxx.c b/arch/arm/mach-omap2/clock3xxx.c deleted file mode 100644 index a9e86db5daf9..000000000000 --- a/arch/arm/mach-omap2/clock3xxx.c +++ /dev/null @@ -1,135 +0,0 @@ -/* - * OMAP3-specific clock framework functions - * - * Copyright (C) 2007-2008 Texas Instruments, Inc. - * Copyright (C) 2007-2010 Nokia Corporation - * - * Paul Walmsley - * Jouni Högander - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include "soc.h" -#include "clock.h" -#include "clock3xxx.h" -#include "prm2xxx_3xxx.h" -#include "prm-regbits-34xx.h" -#include "cm2xxx_3xxx.h" -#include "cm-regbits-34xx.h" - -/* - * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks - * that are sourced by DPLL5, and both of these require this clock - * to be at 120 MHz for proper operation. - */ -#define DPLL5_FREQ_FOR_USBHOST 120000000 - -/* needed by omap3_core_dpll_m2_set_rate() */ -struct clk *sdrc_ick_p, *arm_fck_p; - -/** - * omap3_dpll4_set_rate - set rate for omap3 per-dpll - * @hw: clock to change - * @rate: target rate for clock - * @parent_rate: rate of the parent clock - * - * Check if the current SoC supports the per-dpll reprogram operation - * or not, and then do the rate change if supported. Returns -EINVAL - * if not supported, 0 for success, and potential error codes from the - * clock rate change. - */ -int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - /* - * According to the 12-5 CDP code from TI, "Limitation 2.5" - * on 3430ES1 prevents us from changing DPLL multipliers or dividers - * on DPLL4. - */ - if (ti_clk_features.flags & TI_CLK_DPLL4_DENY_REPROGRAM) { - pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); - return -EINVAL; - } - - return omap3_noncore_dpll_set_rate(hw, rate, parent_rate); -} - -/** - * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll - * @hw: clock to change - * @rate: target rate for clock - * @parent_rate: rate of the parent clock - * @index: parent index, 0 - reference clock, 1 - bypass clock - * - * Check if the current SoC support the per-dpll reprogram operation - * or not, and then do the rate + parent change if supported. Returns - * -EINVAL if not supported, 0 for success, and potential error codes - * from the clock rate change. - */ -int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate, u8 index) -{ - if (ti_clk_features.flags & TI_CLK_DPLL4_DENY_REPROGRAM) { - pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); - return -EINVAL; - } - - return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate, - index); -} - -void __init omap3_clk_lock_dpll5(void) -{ - struct clk *dpll5_clk; - struct clk *dpll5_m2_clk; - - dpll5_clk = clk_get(NULL, "dpll5_ck"); - clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST); - clk_prepare_enable(dpll5_clk); - - /* Program dpll5_m2_clk divider for no division */ - dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck"); - clk_prepare_enable(dpll5_m2_clk); - clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST); - - clk_disable_unprepare(dpll5_m2_clk); - clk_disable_unprepare(dpll5_clk); - return; -} - -/* Common clock code */ - -/* - * Switch the MPU rate if specified on cmdline. We cannot do this - * early until cmdline is parsed. XXX This should be removed from the - * clock code and handled by the OPP layer code in the near future. - */ -static int __init omap3xxx_clk_arch_init(void) -{ - int ret; - - if (!cpu_is_omap34xx()) - return 0; - - ret = omap2_clk_switch_mpurate_at_boot("dpll1_ck"); - if (!ret) - omap2_clk_print_new_rates("osc_sys_ck", "core_ck", "arm_fck"); - - return ret; -} - -omap_arch_initcall(omap3xxx_clk_arch_init); - - diff --git a/arch/arm/mach-omap2/clock44xx.h b/arch/arm/mach-omap2/clock44xx.h deleted file mode 100644 index 287a46f78d97..000000000000 --- a/arch/arm/mach-omap2/clock44xx.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * OMAP4 clock function prototypes and macros - * - * Copyright (C) 2009 Texas Instruments, Inc. - * Copyright (C) 2010 Nokia Corporation - */ - -#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H -#define __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H - -/* - * OMAP4430_REGM4XEN_MULT: If the CM_CLKMODE_DPLL_ABE.DPLL_REGM4XEN bit is - * set, then the DPLL's lock frequency is multiplied by 4 (OMAP4430 TRM - * vV Section 3.6.3.3.1 "DPLLs Output Clocks Parameters") - */ -#define OMAP4430_REGM4XEN_MULT 4 - -int omap4xxx_clk_init(void); - -#endif diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c deleted file mode 100644 index 61b60dfb14ce..000000000000 --- a/arch/arm/mach-omap2/clock_common_data.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * linux/arch/arm/mach-omap2/clock_common_data.c - * - * Copyright (C) 2005-2009 Texas Instruments, Inc. - * Copyright (C) 2004-2009 Nokia Corporation - * - * Contacts: - * Richard Woodruff <r-woodruff2@ti.com> - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This file contains clock data that is common to both the OMAP2xxx and - * OMAP3xxx clock definition files. - */ - -#include "clock.h" - -/* clksel_rate data common to 24xx/343x */ -const struct clksel_rate gpt_32k_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_3XXX }, - { .div = 0 } -}; - -const struct clksel_rate gpt_sys_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX }, - { .div = 0 } -}; - -const struct clksel_rate gfx_l3_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX }, - { .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_3XXX }, - { .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_3XXX }, - { .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_3XXX }, - { .div = 0 } -}; - -const struct clksel_rate dsp_ick_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_24XX }, - { .div = 2, .val = 2, .flags = RATE_IN_24XX }, - { .div = 3, .val = 3, .flags = RATE_IN_243X }, - { .div = 0 }, -}; - - -/* clksel_rate blocks shared between OMAP44xx and AM33xx */ - -const struct clksel_rate div_1_0_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div3_1to4_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_4430 }, - { .div = 2, .val = 1, .flags = RATE_IN_4430 }, - { .div = 4, .val = 2, .flags = RATE_IN_4430 }, - { .div = 0 }, -}; - -const struct clksel_rate div_1_1_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div_1_2_rates[] = { - { .div = 1, .val = 2, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div_1_3_rates[] = { - { .div = 1, .val = 3, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div_1_4_rates[] = { - { .div = 1, .val = 4, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; - -const struct clksel_rate div31_1to31_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 2, .val = 2, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 3, .val = 3, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 4, .val = 4, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 5, .val = 5, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 6, .val = 6, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 7, .val = 7, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 8, .val = 8, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 9, .val = 9, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 10, .val = 10, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 11, .val = 11, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 12, .val = 12, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 13, .val = 13, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 14, .val = 14, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 15, .val = 15, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 16, .val = 16, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 17, .val = 17, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 18, .val = 18, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 19, .val = 19, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 20, .val = 20, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 21, .val = 21, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 22, .val = 22, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 23, .val = 23, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 24, .val = 24, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 25, .val = 25, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 26, .val = 26, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 27, .val = 27, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 28, .val = 28, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 29, .val = 29, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 30, .val = 30, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, - { .div = 0 }, -}; diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h index 77bab5fb6814..2c398ce1a0f2 100644 --- a/arch/arm/mach-omap2/clockdomain.h +++ b/arch/arm/mach-omap2/clockdomain.h @@ -216,7 +216,8 @@ extern void __init omap242x_clockdomains_init(void); extern void __init omap243x_clockdomains_init(void); extern void __init omap3xxx_clockdomains_init(void); extern void __init am33xx_clockdomains_init(void); -extern void __init ti81xx_clockdomains_init(void); +extern void __init ti814x_clockdomains_init(void); +extern void __init ti816x_clockdomains_init(void); extern void __init omap44xx_clockdomains_init(void); extern void __init omap54xx_clockdomains_init(void); extern void __init dra7xx_clockdomains_init(void); diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c index 57d5df0c1fbd..7581e036bda6 100644 --- a/arch/arm/mach-omap2/clockdomains7xx_data.c +++ b/arch/arm/mach-omap2/clockdomains7xx_data.c @@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = { .dep_bit = DRA7XX_L4PER2_STATDEP_SHIFT, .wkdep_srcs = l4per2_wkup_sleep_deps, .sleepdep_srcs = l4per2_wkup_sleep_deps, - .flags = CLKDM_CAN_HWSUP_SWSUP, + .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain mpu0_7xx_clkdm = { diff --git a/arch/arm/mach-omap2/clockdomains81xx_data.c b/arch/arm/mach-omap2/clockdomains81xx_data.c index ce2a82001d0d..53442c86a820 100644 --- a/arch/arm/mach-omap2/clockdomains81xx_data.c +++ b/arch/arm/mach-omap2/clockdomains81xx_data.c @@ -165,7 +165,24 @@ static struct clockdomain default_l3_slow_816x_clkdm = { .flags = CLKDM_CAN_SWSUP, }; -static struct clockdomain *clockdomains_ti81xx[] __initdata = { +static struct clockdomain *clockdomains_ti814x[] __initdata = { + &alwon_l3_slow_81xx_clkdm, + &alwon_l3_med_81xx_clkdm, + &alwon_l3_fast_81xx_clkdm, + &alwon_ethernet_81xx_clkdm, + &mmu_81xx_clkdm, + &mmu_cfg_81xx_clkdm, + NULL, +}; + +void __init ti814x_clockdomains_init(void) +{ + clkdm_register_platform_funcs(&am33xx_clkdm_operations); + clkdm_register_clkdms(clockdomains_ti814x); + clkdm_complete_init(); +} + +static struct clockdomain *clockdomains_ti816x[] __initdata = { &alwon_mpu_816x_clkdm, &alwon_l3_slow_81xx_clkdm, &alwon_l3_med_81xx_clkdm, @@ -185,10 +202,10 @@ static struct clockdomain *clockdomains_ti81xx[] __initdata = { NULL, }; -void __init ti81xx_clockdomains_init(void) +void __init ti816x_clockdomains_init(void) { clkdm_register_platform_funcs(&am33xx_clkdm_operations); - clkdm_register_clkdms(clockdomains_ti81xx); + clkdm_register_clkdms(clockdomains_ti816x); clkdm_complete_init(); } #endif diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index cf3cf22ecd42..749d50bb4ca5 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -198,6 +198,7 @@ void __init omap3_map_io(void); void __init am33xx_map_io(void); void __init omap4_map_io(void); void __init omap5_map_io(void); +void __init dra7xx_map_io(void); void __init ti81xx_map_io(void); /** diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c index f008930277ed..cf5855174c93 100644 --- a/arch/arm/mach-omap2/control.c +++ b/arch/arm/mach-omap2/control.c @@ -652,6 +652,7 @@ static const struct of_device_id omap_scrm_dt_match_table[] = { { .compatible = "ti,am4-scm", .data = &ctrl_data }, { .compatible = "ti,omap2-scm", .data = &omap2_ctrl_data }, { .compatible = "ti,omap3-scm", .data = &omap2_ctrl_data }, + { .compatible = "ti,dm814-scm", .data = &ctrl_data }, { .compatible = "ti,dm816-scrm", .data = &ctrl_data }, { .compatible = "ti,omap4-scm-core", .data = &ctrl_data }, { .compatible = "ti,omap5-scm-core", .data = &ctrl_data }, diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c deleted file mode 100644 index 44e57ec225d4..000000000000 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ /dev/null @@ -1,818 +0,0 @@ -/* - * OMAP3/4 - specific DPLL control functions - * - * Copyright (C) 2009-2010 Texas Instruments, Inc. - * Copyright (C) 2009-2010 Nokia Corporation - * - * Written by Paul Walmsley - * Testing and integration fixes by Jouni Högander - * - * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth - * Menon - * - * Parts of this code are based on code written by - * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/list.h> -#include <linux/errno.h> -#include <linux/delay.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/bitops.h> -#include <linux/clkdev.h> - -#include "clockdomain.h" -#include "clock.h" - -/* CM_AUTOIDLE_PLL*.AUTO_* bit values */ -#define DPLL_AUTOIDLE_DISABLE 0x0 -#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1 - -#define MAX_DPLL_WAIT_TRIES 1000000 - -/* Private functions */ - -/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */ -static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits) -{ - const struct dpll_data *dd; - u32 v; - - dd = clk->dpll_data; - - v = omap2_clk_readl(clk, dd->control_reg); - v &= ~dd->enable_mask; - v |= clken_bits << __ffs(dd->enable_mask); - omap2_clk_writel(v, clk, dd->control_reg); -} - -/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ -static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) -{ - const struct dpll_data *dd; - int i = 0; - int ret = -EINVAL; - const char *clk_name; - - dd = clk->dpll_data; - clk_name = __clk_get_name(clk->hw.clk); - - state <<= __ffs(dd->idlest_mask); - - while (((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask) - != state) && i < MAX_DPLL_WAIT_TRIES) { - i++; - udelay(1); - } - - if (i == MAX_DPLL_WAIT_TRIES) { - printk(KERN_ERR "clock: %s failed transition to '%s'\n", - clk_name, (state) ? "locked" : "bypassed"); - } else { - pr_debug("clock: %s transition to '%s' in %d loops\n", - clk_name, (state) ? "locked" : "bypassed", i); - - ret = 0; - } - - return ret; -} - -/* From 3430 TRM ES2 4.7.6.2 */ -static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n) -{ - unsigned long fint; - u16 f = 0; - - fint = __clk_get_rate(clk->dpll_data->clk_ref) / n; - - pr_debug("clock: fint is %lu\n", fint); - - if (fint >= 750000 && fint <= 1000000) - f = 0x3; - else if (fint > 1000000 && fint <= 1250000) - f = 0x4; - else if (fint > 1250000 && fint <= 1500000) - f = 0x5; - else if (fint > 1500000 && fint <= 1750000) - f = 0x6; - else if (fint > 1750000 && fint <= 2100000) - f = 0x7; - else if (fint > 7500000 && fint <= 10000000) - f = 0xB; - else if (fint > 10000000 && fint <= 12500000) - f = 0xC; - else if (fint > 12500000 && fint <= 15000000) - f = 0xD; - else if (fint > 15000000 && fint <= 17500000) - f = 0xE; - else if (fint > 17500000 && fint <= 21000000) - f = 0xF; - else - pr_debug("clock: unknown freqsel setting for %d\n", n); - - return f; -} - -/* - * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report - * readiness before returning. Will save and restore the DPLL's - * autoidle state across the enable, per the CDP code. If the DPLL - * locked successfully, return 0; if the DPLL did not lock in the time - * allotted, or DPLL3 was passed in, return -EINVAL. - */ -static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) -{ - const struct dpll_data *dd; - u8 ai; - u8 state = 1; - int r = 0; - - pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk->hw.clk)); - - dd = clk->dpll_data; - state <<= __ffs(dd->idlest_mask); - - /* Check if already locked */ - if ((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask) == state) - goto done; - - ai = omap3_dpll_autoidle_read(clk); - - if (ai) - omap3_dpll_deny_idle(clk); - - _omap3_dpll_write_clken(clk, DPLL_LOCKED); - - r = _omap3_wait_dpll_status(clk, 1); - - if (ai) - omap3_dpll_allow_idle(clk); - -done: - return r; -} - -/* - * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to enter low-power bypass mode. In - * bypass mode, the DPLL's rate is set equal to its parent clock's - * rate. Waits for the DPLL to report readiness before returning. - * Will save and restore the DPLL's autoidle state across the enable, - * per the CDP code. If the DPLL entered bypass mode successfully, - * return 0; if the DPLL did not enter bypass in the time allotted, or - * DPLL3 was passed in, or the DPLL does not support low-power bypass, - * return -EINVAL. - */ -static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk) -{ - int r; - u8 ai; - - if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) - return -EINVAL; - - pr_debug("clock: configuring DPLL %s for low-power bypass\n", - __clk_get_name(clk->hw.clk)); - - ai = omap3_dpll_autoidle_read(clk); - - _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); - - r = _omap3_wait_dpll_status(clk, 0); - - if (ai) - omap3_dpll_allow_idle(clk); - - return r; -} - -/* - * _omap3_noncore_dpll_stop - instruct a DPLL to stop - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to enter low-power stop. Will save and - * restore the DPLL's autoidle state across the stop, per the CDP - * code. If DPLL3 was passed in, or the DPLL does not support - * low-power stop, return -EINVAL; otherwise, return 0. - */ -static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk) -{ - u8 ai; - - if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) - return -EINVAL; - - pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk->hw.clk)); - - ai = omap3_dpll_autoidle_read(clk); - - _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); - - if (ai) - omap3_dpll_allow_idle(clk); - - return 0; -} - -/** - * _lookup_dco - Lookup DCO used by j-type DPLL - * @clk: pointer to a DPLL struct clk - * @dco: digital control oscillator selector - * @m: DPLL multiplier to set - * @n: DPLL divider to set - * - * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" - * - * XXX This code is not needed for 3430/AM35xx; can it be optimized - * out in non-multi-OMAP builds for those chips? - */ -static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) -{ - unsigned long fint, clkinp; /* watch out for overflow */ - - clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk)); - fint = (clkinp / n) * m; - - if (fint < 1000000000) - *dco = 2; - else - *dco = 4; -} - -/** - * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL - * @clk: pointer to a DPLL struct clk - * @sd_div: target sigma-delta divider - * @m: DPLL multiplier to set - * @n: DPLL divider to set - * - * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" - * - * XXX This code is not needed for 3430/AM35xx; can it be optimized - * out in non-multi-OMAP builds for those chips? - */ -static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) -{ - unsigned long clkinp, sd; /* watch out for overflow */ - int mod1, mod2; - - clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk)); - - /* - * target sigma-delta to near 250MHz - * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] - */ - clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ - mod1 = (clkinp * m) % (250 * n); - sd = (clkinp * m) / (250 * n); - mod2 = sd % 10; - sd /= 10; - - if (mod1 || mod2) - sd++; - *sd_div = sd; -} - -/* - * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly - * @clk: struct clk * of DPLL to set - * @freqsel: FREQSEL value to set - * - * Program the DPLL with the last M, N values calculated, and wait for - * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success. - */ -static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) -{ - struct dpll_data *dd = clk->dpll_data; - u8 dco, sd_div; - u32 v; - - /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */ - _omap3_noncore_dpll_bypass(clk); - - /* - * Set jitter correction. Jitter correction applicable for OMAP343X - * only since freqsel field is no longer present on other devices. - */ - if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) { - v = omap2_clk_readl(clk, dd->control_reg); - v &= ~dd->freqsel_mask; - v |= freqsel << __ffs(dd->freqsel_mask); - omap2_clk_writel(v, clk, dd->control_reg); - } - - /* Set DPLL multiplier, divider */ - v = omap2_clk_readl(clk, dd->mult_div1_reg); - - /* Handle Duty Cycle Correction */ - if (dd->dcc_mask) { - if (dd->last_rounded_rate >= dd->dcc_rate) - v |= dd->dcc_mask; /* Enable DCC */ - else - v &= ~dd->dcc_mask; /* Disable DCC */ - } - - v &= ~(dd->mult_mask | dd->div1_mask); - v |= dd->last_rounded_m << __ffs(dd->mult_mask); - v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); - - /* Configure dco and sd_div for dplls that have these fields */ - if (dd->dco_mask) { - _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n); - v &= ~(dd->dco_mask); - v |= dco << __ffs(dd->dco_mask); - } - if (dd->sddiv_mask) { - _lookup_sddiv(clk, &sd_div, dd->last_rounded_m, - dd->last_rounded_n); - v &= ~(dd->sddiv_mask); - v |= sd_div << __ffs(dd->sddiv_mask); - } - - omap2_clk_writel(v, clk, dd->mult_div1_reg); - - /* Set 4X multiplier and low-power mode */ - if (dd->m4xen_mask || dd->lpmode_mask) { - v = omap2_clk_readl(clk, dd->control_reg); - - if (dd->m4xen_mask) { - if (dd->last_rounded_m4xen) - v |= dd->m4xen_mask; - else - v &= ~dd->m4xen_mask; - } - - if (dd->lpmode_mask) { - if (dd->last_rounded_lpmode) - v |= dd->lpmode_mask; - else - v &= ~dd->lpmode_mask; - } - - omap2_clk_writel(v, clk, dd->control_reg); - } - - /* We let the clock framework set the other output dividers later */ - - /* REVISIT: Set ramp-up delay? */ - - _omap3_noncore_dpll_lock(clk); - - return 0; -} - -/* Public functions */ - -/** - * omap3_dpll_recalc - recalculate DPLL rate - * @clk: DPLL struct clk - * - * Recalculate and propagate the DPLL rate. - */ -unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - - return omap2_get_dpll_rate(clk); -} - -/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */ - -/** - * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. - * The choice of modes depends on the DPLL's programmed rate: if it is - * the same as the DPLL's parent clock, it will enter bypass; - * otherwise, it will enter lock. This code will wait for the DPLL to - * indicate readiness before returning, unless the DPLL takes too long - * to enter the target state. Intended to be used as the struct clk's - * enable function. If DPLL3 was passed in, or the DPLL does not - * support low-power stop, or if the DPLL took too long to enter - * bypass or lock, return -EINVAL; otherwise, return 0. - */ -int omap3_noncore_dpll_enable(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - int r; - struct dpll_data *dd; - struct clk_hw *parent; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - if (clk->clkdm) { - r = clkdm_clk_enable(clk->clkdm, hw->clk); - if (r) { - WARN(1, - "%s: could not enable %s's clockdomain %s: %d\n", - __func__, __clk_get_name(hw->clk), - clk->clkdm->name, r); - return r; - } - } - - parent = __clk_get_hw(__clk_get_parent(hw->clk)); - - if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) { - WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); - r = _omap3_noncore_dpll_bypass(clk); - } else { - WARN_ON(parent != __clk_get_hw(dd->clk_ref)); - r = _omap3_noncore_dpll_lock(clk); - } - - return r; -} - -/** - * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop - * @clk: pointer to a DPLL struct clk - * - * Instructs a non-CORE DPLL to enter low-power stop. This function is - * intended for use in struct clkops. No return value. - */ -void omap3_noncore_dpll_disable(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - - _omap3_noncore_dpll_stop(clk); - if (clk->clkdm) - clkdm_clk_disable(clk->clkdm, hw->clk); -} - - -/* Non-CORE DPLL rate set code */ - -/** - * omap3_noncore_dpll_determine_rate - determine rate for a DPLL - * @hw: pointer to the clock to determine rate for - * @rate: target rate for the DPLL - * @best_parent_rate: pointer for returning best parent rate - * @best_parent_clk: pointer for returning best parent clock - * - * Determines which DPLL mode to use for reaching a desired target rate. - * Checks whether the DPLL shall be in bypass or locked mode, and if - * locked, calculates the M,N values for the DPLL via round-rate. - * Returns a positive clock rate with success, negative error value - * in failure. - */ -long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct dpll_data *dd; - - if (!hw || !rate) - return -EINVAL; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - if (__clk_get_rate(dd->clk_bypass) == rate && - (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { - *best_parent_clk = __clk_get_hw(dd->clk_bypass); - } else { - rate = omap2_dpll_round_rate(hw, rate, best_parent_rate); - *best_parent_clk = __clk_get_hw(dd->clk_ref); - } - - *best_parent_rate = rate; - - return rate; -} - -/** - * omap3_noncore_dpll_set_parent - set parent for a DPLL clock - * @hw: pointer to the clock to set parent for - * @index: parent index to select - * - * Sets parent for a DPLL clock. This sets the DPLL into bypass or - * locked mode. Returns 0 with success, negative error value otherwise. - */ -int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - int ret; - - if (!hw) - return -EINVAL; - - if (index) - ret = _omap3_noncore_dpll_bypass(clk); - else - ret = _omap3_noncore_dpll_lock(clk); - - return ret; -} - -/** - * omap3_noncore_dpll_set_rate - set rate for a DPLL clock - * @hw: pointer to the clock to set parent for - * @rate: target rate for the clock - * @parent_rate: rate of the parent clock - * - * Sets rate for a DPLL clock. First checks if the clock parent is - * reference clock (in bypass mode, the rate of the clock can't be - * changed) and proceeds with the rate change operation. Returns 0 - * with success, negative error value otherwise. - */ -int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct dpll_data *dd; - u16 freqsel = 0; - int ret; - - if (!hw || !rate) - return -EINVAL; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - if (__clk_get_hw(__clk_get_parent(hw->clk)) != - __clk_get_hw(dd->clk_ref)) - return -EINVAL; - - if (dd->last_rounded_rate == 0) - return -EINVAL; - - /* Freqsel is available only on OMAP343X devices */ - if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) { - freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); - WARN_ON(!freqsel); - } - - pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, - __clk_get_name(hw->clk), rate); - - ret = omap3_noncore_dpll_program(clk, freqsel); - - return ret; -} - -/** - * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock - * @hw: pointer to the clock to set rate and parent for - * @rate: target rate for the DPLL - * @parent_rate: clock rate of the DPLL parent - * @index: new parent index for the DPLL, 0 - reference, 1 - bypass - * - * Sets rate and parent for a DPLL clock. If new parent is the bypass - * clock, only selects the parent. Otherwise proceeds with a rate - * change, as this will effectively also change the parent as the - * DPLL is put into locked mode. Returns 0 with success, negative error - * value otherwise. - */ -int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, - unsigned long rate, - unsigned long parent_rate, - u8 index) -{ - int ret; - - if (!hw || !rate) - return -EINVAL; - - /* - * clk-ref at index[0], in which case we only need to set rate, - * the parent will be changed automatically with the lock sequence. - * With clk-bypass case we only need to change parent. - */ - if (index) - ret = omap3_noncore_dpll_set_parent(hw, index); - else - ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate); - - return ret; -} - -/* DPLL autoidle read/set code */ - -/** - * omap3_dpll_autoidle_read - read a DPLL's autoidle bits - * @clk: struct clk * of the DPLL to read - * - * Return the DPLL's autoidle bits, shifted down to bit 0. Returns - * -EINVAL if passed a null pointer or if the struct clk does not - * appear to refer to a DPLL. - */ -u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk) -{ - const struct dpll_data *dd; - u32 v; - - if (!clk || !clk->dpll_data) - return -EINVAL; - - dd = clk->dpll_data; - - if (!dd->autoidle_reg) - return -EINVAL; - - v = omap2_clk_readl(clk, dd->autoidle_reg); - v &= dd->autoidle_mask; - v >>= __ffs(dd->autoidle_mask); - - return v; -} - -/** - * omap3_dpll_allow_idle - enable DPLL autoidle bits - * @clk: struct clk * of the DPLL to operate on - * - * Enable DPLL automatic idle control. This automatic idle mode - * switching takes effect only when the DPLL is locked, at least on - * OMAP3430. The DPLL will enter low-power stop when its downstream - * clocks are gated. No return value. - */ -void omap3_dpll_allow_idle(struct clk_hw_omap *clk) -{ - const struct dpll_data *dd; - u32 v; - - if (!clk || !clk->dpll_data) - return; - - dd = clk->dpll_data; - - if (!dd->autoidle_reg) - return; - - /* - * REVISIT: CORE DPLL can optionally enter low-power bypass - * by writing 0x5 instead of 0x1. Add some mechanism to - * optionally enter this mode. - */ - v = omap2_clk_readl(clk, dd->autoidle_reg); - v &= ~dd->autoidle_mask; - v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); - omap2_clk_writel(v, clk, dd->autoidle_reg); - -} - -/** - * omap3_dpll_deny_idle - prevent DPLL from automatically idling - * @clk: struct clk * of the DPLL to operate on - * - * Disable DPLL automatic idle control. No return value. - */ -void omap3_dpll_deny_idle(struct clk_hw_omap *clk) -{ - const struct dpll_data *dd; - u32 v; - - if (!clk || !clk->dpll_data) - return; - - dd = clk->dpll_data; - - if (!dd->autoidle_reg) - return; - - v = omap2_clk_readl(clk, dd->autoidle_reg); - v &= ~dd->autoidle_mask; - v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); - omap2_clk_writel(v, clk, dd->autoidle_reg); - -} - -/* Clock control for DPLL outputs */ - -/* Find the parent DPLL for the given clkoutx2 clock */ -static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) -{ - struct clk_hw_omap *pclk = NULL; - struct clk *parent; - - /* Walk up the parents of clk, looking for a DPLL */ - do { - do { - parent = __clk_get_parent(hw->clk); - hw = __clk_get_hw(parent); - } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); - if (!hw) - break; - pclk = to_clk_hw_omap(hw); - } while (pclk && !pclk->dpll_data); - - /* clk does not have a DPLL as a parent? error in the clock data */ - if (!pclk) { - WARN_ON(1); - return NULL; - } - - return pclk; -} - -/** - * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate - * @clk: DPLL output struct clk - * - * Using parent clock DPLL data, look up DPLL state. If locked, set our - * rate to the dpll_clk * 2; otherwise, just use dpll_clk. - */ -unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, - unsigned long parent_rate) -{ - const struct dpll_data *dd; - unsigned long rate; - u32 v; - struct clk_hw_omap *pclk = NULL; - - if (!parent_rate) - return 0; - - pclk = omap3_find_clkoutx2_dpll(hw); - - if (!pclk) - return 0; - - dd = pclk->dpll_data; - - WARN_ON(!dd->enable_mask); - - v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; - v >>= __ffs(dd->enable_mask); - if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) - rate = parent_rate; - else - rate = parent_rate * 2; - return rate; -} - -int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - return 0; -} - -long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) -{ - const struct dpll_data *dd; - u32 v; - struct clk_hw_omap *pclk = NULL; - - if (!*prate) - return 0; - - pclk = omap3_find_clkoutx2_dpll(hw); - - if (!pclk) - return 0; - - dd = pclk->dpll_data; - - /* TYPE J does not have a clkoutx2 */ - if (dd->flags & DPLL_J_TYPE) { - *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate); - return *prate; - } - - WARN_ON(!dd->enable_mask); - - v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; - v >>= __ffs(dd->enable_mask); - - /* If in bypass, the rate is fixed to the bypass rate*/ - if (v != OMAP3XXX_EN_DPLL_LOCKED) - return *prate; - - if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { - unsigned long best_parent; - - best_parent = (rate / 2); - *prate = __clk_round_rate(__clk_get_parent(hw->clk), - best_parent); - } - - return *prate * 2; -} - -/* OMAP3/4 non-CORE DPLL clkops */ -const struct clk_hw_omap_ops clkhwops_omap3_dpll = { - .allow_idle = omap3_dpll_allow_idle, - .deny_idle = omap3_dpll_deny_idle, -}; diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c deleted file mode 100644 index f231be05b9a6..000000000000 --- a/arch/arm/mach-omap2/dpll44xx.c +++ /dev/null @@ -1,232 +0,0 @@ -/* - * OMAP4-specific DPLL control functions - * - * Copyright (C) 2011 Texas Instruments, Inc. - * Rajendra Nayak - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/bitops.h> - -#include "clock.h" - -/* - * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that - * can supported when using the DPLL low-power mode. Frequencies are - * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control, - * Status, and Low-Power Operation Mode". - */ -#define OMAP4_DPLL_LP_FINT_MAX 1000000 -#define OMAP4_DPLL_LP_FOUT_MAX 100000000 - -/* - * Bitfield declarations - */ -#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK (1 << 8) -#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK (1 << 10) -#define OMAP4430_DPLL_REGM4XEN_MASK (1 << 11) - -/* Static rate multiplier for OMAP4 REGM4XEN clocks */ -#define OMAP4430_REGM4XEN_MULT 4 - -void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk) -{ - u32 v; - u32 mask; - - if (!clk || !clk->clksel_reg) - return; - - mask = clk->flags & CLOCK_CLKOUTX2 ? - OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : - OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; - - v = omap2_clk_readl(clk, clk->clksel_reg); - /* Clear the bit to allow gatectrl */ - v &= ~mask; - omap2_clk_writel(v, clk, clk->clksel_reg); -} - -void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk) -{ - u32 v; - u32 mask; - - if (!clk || !clk->clksel_reg) - return; - - mask = clk->flags & CLOCK_CLKOUTX2 ? - OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : - OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; - - v = omap2_clk_readl(clk, clk->clksel_reg); - /* Set the bit to deny gatectrl */ - v |= mask; - omap2_clk_writel(v, clk, clk->clksel_reg); -} - -const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = { - .allow_idle = omap4_dpllmx_allow_gatectrl, - .deny_idle = omap4_dpllmx_deny_gatectrl, -}; - -/** - * omap4_dpll_lpmode_recalc - compute DPLL low-power setting - * @dd: pointer to the dpll data structure - * - * Calculates if low-power mode can be enabled based upon the last - * multiplier and divider values calculated. If low-power mode can be - * enabled, then the bit to enable low-power mode is stored in the - * last_rounded_lpmode variable. This implementation is based upon the - * criteria for enabling low-power mode as described in the OMAP4430/60 - * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power - * Operation Mode". - */ -static void omap4_dpll_lpmode_recalc(struct dpll_data *dd) -{ - long fint, fout; - - fint = __clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1); - fout = fint * dd->last_rounded_m; - - if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX)) - dd->last_rounded_lpmode = 1; - else - dd->last_rounded_lpmode = 0; -} - -/** - * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit - * @clk: struct clk * of the DPLL to compute the rate for - * - * Compute the output rate for the OMAP4 DPLL represented by @clk. - * Takes the REGM4XEN bit into consideration, which is needed for the - * OMAP4 ABE DPLL. Returns the DPLL's output rate (before M-dividers) - * upon success, or 0 upon error. - */ -unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - u32 v; - unsigned long rate; - struct dpll_data *dd; - - if (!clk || !clk->dpll_data) - return 0; - - dd = clk->dpll_data; - - rate = omap2_get_dpll_rate(clk); - - /* regm4xen adds a multiplier of 4 to DPLL calculations */ - v = omap2_clk_readl(clk, dd->control_reg); - if (v & OMAP4430_DPLL_REGM4XEN_MASK) - rate *= OMAP4430_REGM4XEN_MULT; - - return rate; -} - -/** - * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit - * @clk: struct clk * of the DPLL to round a rate for - * @target_rate: the desired rate of the DPLL - * - * Compute the rate that would be programmed into the DPLL hardware - * for @clk if set_rate() were to be provided with the rate - * @target_rate. Takes the REGM4XEN bit into consideration, which is - * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before - * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or - * ~0 if an error occurred in omap2_dpll_round_rate(). - */ -long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, - unsigned long target_rate, - unsigned long *parent_rate) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct dpll_data *dd; - long r; - - if (!clk || !clk->dpll_data) - return -EINVAL; - - dd = clk->dpll_data; - - dd->last_rounded_m4xen = 0; - - /* - * First try to compute the DPLL configuration for - * target rate without using the 4X multiplier. - */ - r = omap2_dpll_round_rate(hw, target_rate, NULL); - if (r != ~0) - goto out; - - /* - * If we did not find a valid DPLL configuration, try again, but - * this time see if using the 4X multiplier can help. Enabling the - * 4X multiplier is equivalent to dividing the target rate by 4. - */ - r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT, - NULL); - if (r == ~0) - return r; - - dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT; - dd->last_rounded_m4xen = 1; - -out: - omap4_dpll_lpmode_recalc(dd); - - return dd->last_rounded_rate; -} - -/** - * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL - * @hw: pointer to the clock to determine rate for - * @rate: target rate for the DPLL - * @best_parent_rate: pointer for returning best parent rate - * @best_parent_clk: pointer for returning best parent clock - * - * Determines which DPLL mode to use for reaching a desired rate. - * Checks whether the DPLL shall be in bypass or locked mode, and if - * locked, calculates the M,N values for the DPLL via round-rate. - * Returns a positive clock rate with success, negative error value - * in failure. - */ -long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct dpll_data *dd; - - if (!hw || !rate) - return -EINVAL; - - dd = clk->dpll_data; - if (!dd) - return -EINVAL; - - if (__clk_get_rate(dd->clk_bypass) == rate && - (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { - *best_parent_clk = __clk_get_hw(dd->clk_bypass); - } else { - rate = omap4_dpll_regm4xen_round_rate(hw, rate, - best_parent_rate); - *best_parent_clk = __clk_get_hw(dd->clk_ref); - } - - *best_parent_rate = rate; - - return rate; -} diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 820dde8b5b04..6a4822dbb4ea 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -37,7 +37,6 @@ #include "clock.h" #include "clock2xxx.h" #include "clock3xxx.h" -#include "clock44xx.h" #include "omap-pm.h" #include "sdrc.h" #include "control.h" @@ -236,7 +235,7 @@ static struct map_desc omap44xx_io_desc[] __initdata = { }; #endif -#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) +#ifdef CONFIG_SOC_OMAP5 static struct map_desc omap54xx_io_desc[] __initdata = { { .virtual = L3_54XX_VIRT, @@ -265,6 +264,53 @@ static struct map_desc omap54xx_io_desc[] __initdata = { }; #endif +#ifdef CONFIG_SOC_DRA7XX +static struct map_desc dra7xx_io_desc[] __initdata = { + { + .virtual = L4_CFG_MPU_DRA7XX_VIRT, + .pfn = __phys_to_pfn(L4_CFG_MPU_DRA7XX_PHYS), + .length = L4_CFG_MPU_DRA7XX_SIZE, + .type = MT_DEVICE, + }, + { + .virtual = L3_MAIN_SN_DRA7XX_VIRT, + .pfn = __phys_to_pfn(L3_MAIN_SN_DRA7XX_PHYS), + .length = L3_MAIN_SN_DRA7XX_SIZE, + .type = MT_DEVICE, + }, + { + .virtual = L4_PER1_DRA7XX_VIRT, + .pfn = __phys_to_pfn(L4_PER1_DRA7XX_PHYS), + .length = L4_PER1_DRA7XX_SIZE, + .type = MT_DEVICE, + }, + { + .virtual = L4_PER2_DRA7XX_VIRT, + .pfn = __phys_to_pfn(L4_PER2_DRA7XX_PHYS), + .length = L4_PER2_DRA7XX_SIZE, + .type = MT_DEVICE, + }, + { + .virtual = L4_PER3_DRA7XX_VIRT, + .pfn = __phys_to_pfn(L4_PER3_DRA7XX_PHYS), + .length = L4_PER3_DRA7XX_SIZE, + .type = MT_DEVICE, + }, + { + .virtual = L4_CFG_DRA7XX_VIRT, + .pfn = __phys_to_pfn(L4_CFG_DRA7XX_PHYS), + .length = L4_CFG_DRA7XX_SIZE, + .type = MT_DEVICE, + }, + { + .virtual = L4_WKUP_DRA7XX_VIRT, + .pfn = __phys_to_pfn(L4_WKUP_DRA7XX_PHYS), + .length = L4_WKUP_DRA7XX_SIZE, + .type = MT_DEVICE, + }, +}; +#endif + #ifdef CONFIG_SOC_OMAP2420 void __init omap242x_map_io(void) { @@ -309,12 +355,19 @@ void __init omap4_map_io(void) } #endif -#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) +#ifdef CONFIG_SOC_OMAP5 void __init omap5_map_io(void) { iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc)); } #endif + +#ifdef CONFIG_SOC_DRA7XX +void __init dra7xx_map_io(void) +{ + iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc)); +} +#endif /* * omap2_init_reprogram_sdrc - reprogram SDRC timing parameters * @@ -554,11 +607,11 @@ void __init ti814x_init_early(void) omap2_prcm_base_init(); omap3xxx_voltagedomains_init(); omap3xxx_powerdomains_init(); - ti81xx_clockdomains_init(); - ti81xx_hwmod_init(); + ti814x_clockdomains_init(); + dm814x_hwmod_init(); omap_hwmod_init_postsetup(); if (of_have_populated_dt()) - omap_clk_soc_init = ti81xx_dt_clk_init; + omap_clk_soc_init = dm814x_dt_clk_init; } void __init ti816x_init_early(void) @@ -571,11 +624,11 @@ void __init ti816x_init_early(void) omap2_prcm_base_init(); omap3xxx_voltagedomains_init(); omap3xxx_powerdomains_init(); - ti81xx_clockdomains_init(); - ti81xx_hwmod_init(); + ti816x_clockdomains_init(); + dm816x_hwmod_init(); omap_hwmod_init_postsetup(); if (of_have_populated_dt()) - omap_clk_soc_init = ti81xx_dt_clk_init; + omap_clk_soc_init = dm816x_dt_clk_init; } #endif @@ -723,6 +776,8 @@ int __init omap_clk_init(void) ti_clk_init_features(); + omap2_clk_setup_ll_ops(); + if (of_have_populated_dt()) { ret = omap_control_init(); if (ret) diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h index cce2b65039f1..6191d244438a 100644 --- a/arch/arm/mach-omap2/iomap.h +++ b/arch/arm/mach-omap2/iomap.h @@ -194,3 +194,66 @@ #define L4_PER_54XX_PHYS L4_PER_54XX_BASE /* 0x48000000 --> 0xfa000000 */ #define L4_PER_54XX_VIRT (L4_PER_54XX_PHYS + OMAP2_L4_IO_OFFSET) #define L4_PER_54XX_SIZE SZ_4M + +/* + * ---------------------------------------------------------------------------- + * DRA7xx specific IO mapping + * ---------------------------------------------------------------------------- + */ +/* + * L3_MAIN_SN_DRA7XX_PHYS 0x44000000 --> 0xf8000000 + * The overall space is 24MiB (0x4400_0000<->0x457F_FFFF), but mapping + * everything is just inefficient, since, there are too many address holes. + */ +#define L3_MAIN_SN_DRA7XX_PHYS L3_MAIN_SN_DRA7XX_BASE +#define L3_MAIN_SN_DRA7XX_VIRT (L3_MAIN_SN_DRA7XX_PHYS + OMAP4_L3_IO_OFFSET) +#define L3_MAIN_SN_DRA7XX_SIZE SZ_1M + +/* + * L4_PER1_DRA7XX_PHYS (0x4800_000<>0x480D_2FFF) -> 0.82MiB (alloc 1MiB) + * (0x48000000<->0x48100000) <=> (0xFA000000<->0xFA100000) + */ +#define L4_PER1_DRA7XX_PHYS L4_PER1_DRA7XX_BASE +#define L4_PER1_DRA7XX_VIRT (L4_PER1_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET) +#define L4_PER1_DRA7XX_SIZE SZ_1M + +/* + * L4_CFG_MPU_DRA7XX_PHYS (0x48210000<>0x482A_F2FF) -> 0.62MiB (alloc 1MiB) + * (0x48210000<->0x48310000) <=> (0xFA210000<->0xFA310000) + * NOTE: This is a bit of an orphan memory map sitting isolated in TRM + */ +#define L4_CFG_MPU_DRA7XX_PHYS L4_CFG_MPU_DRA7XX_BASE +#define L4_CFG_MPU_DRA7XX_VIRT (L4_CFG_MPU_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET) +#define L4_CFG_MPU_DRA7XX_SIZE SZ_1M + +/* + * L4_PER2_DRA7XX_PHYS (0x4840_0000<>0x4848_8FFF) -> .53MiB (alloc 1MiB) + * (0x48400000<->0x48500000) <=> (0xFA400000<->0xFA500000) + */ +#define L4_PER2_DRA7XX_PHYS L4_PER2_DRA7XX_BASE +#define L4_PER2_DRA7XX_VIRT (L4_PER2_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET) +#define L4_PER2_DRA7XX_SIZE SZ_1M + +/* + * L4_PER3_DRA7XX_PHYS (0x4880_0000<>0x489E_0FFF) -> 1.87MiB (alloc 2MiB) + * (0x48800000<->0x48A00000) <=> (0xFA800000<->0xFAA00000) + */ +#define L4_PER3_DRA7XX_PHYS L4_PER3_DRA7XX_BASE +#define L4_PER3_DRA7XX_VIRT (L4_PER3_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET) +#define L4_PER3_DRA7XX_SIZE SZ_2M + +/* + * L4_CFG_DRA7XX_PHYS (0x4A00_0000<>0x4A22_BFFF) ->2.17MiB (alloc 3MiB)? + * (0x4A000000<->0x4A300000) <=> (0xFC000000<->0xFC300000) + */ +#define L4_CFG_DRA7XX_PHYS L4_CFG_DRA7XX_BASE +#define L4_CFG_DRA7XX_VIRT (L4_CFG_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET) +#define L4_CFG_DRA7XX_SIZE (SZ_1M + SZ_2M) + +/* + * L4_WKUP_DRA7XX_PHYS (0x4AE0_0000<>0x4AE3_EFFF) -> .24 mb (alloc 1MiB)? + * (0x4AE00000<->4AF00000) <=> (0xFCE00000<->0xFCF00000) + */ +#define L4_WKUP_DRA7XX_PHYS L4_WKUP_DRA7XX_BASE +#define L4_WKUP_DRA7XX_VIRT (L4_WKUP_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET) +#define L4_WKUP_DRA7XX_SIZE SZ_1M diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index 4068350f9059..8867eb4025bf 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -11,7 +11,6 @@ */ #include <linux/of.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/slab.h> @@ -63,15 +62,5 @@ static int __init omap_iommu_init(void) return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL); } -/* must be ready before omap3isp is probed */ omap_subsys_initcall(omap_iommu_init); - -static void __exit omap_iommu_exit(void) -{ - /* Do nothing */ -} -module_exit(omap_iommu_exit); - -MODULE_AUTHOR("Hiroshi DOYU"); -MODULE_DESCRIPTION("omap iommu: omap device registration"); -MODULE_LICENSE("GPL v2"); +/* must be ready before omap3isp is probed */ diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 79f49d904a06..65024af169d3 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -105,7 +105,7 @@ static void dummy_cpu_resume(void) static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) {} -struct cpu_pm_ops omap_pm_ops = { +static struct cpu_pm_ops omap_pm_ops = { .finish_suspend = default_finish_suspend, .resume = dummy_cpu_resume, .scu_prepare = dummy_scu_prepare, diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 8e52621b5a6b..e1d2e991d17a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = { .irq_mask = wakeupgen_mask, .irq_unmask = wakeupgen_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = irq_chip_set_type_parent, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, diff --git a/arch/arm/mach-omap2/omap3-restart.c b/arch/arm/mach-omap2/omap3-restart.c index 103a49f68bcb..4bdd22edb96b 100644 --- a/arch/arm/mach-omap2/omap3-restart.c +++ b/arch/arm/mach-omap2/omap3-restart.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/reboot.h> +#include "common.h" #include "control.h" #include "prm.h" diff --git a/arch/arm/mach-omap2/omap4-restart.c b/arch/arm/mach-omap2/omap4-restart.c index a99e7f7fb5be..e17136a50e27 100644 --- a/arch/arm/mach-omap2/omap4-restart.c +++ b/arch/arm/mach-omap2/omap4-restart.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/reboot.h> +#include "common.h" #include "prm.h" /** diff --git a/arch/arm/mach-omap2/omap54xx.h b/arch/arm/mach-omap2/omap54xx.h index 2d35c5709408..0ca8e938096b 100644 --- a/arch/arm/mach-omap2/omap54xx.h +++ b/arch/arm/mach-omap2/omap54xx.h @@ -30,6 +30,14 @@ #define OMAP54XX_CTRL_BASE 0x4a002800 #define OMAP54XX_SAR_RAM_BASE 0x4ae26000 +/* DRA7 specific base addresses */ +#define L3_MAIN_SN_DRA7XX_BASE 0x44000000 +#define L4_PER1_DRA7XX_BASE 0x48000000 +#define L4_CFG_MPU_DRA7XX_BASE 0x48210000 +#define L4_PER2_DRA7XX_BASE 0x48400000 +#define L4_PER3_DRA7XX_BASE 0x48800000 +#define L4_CFG_DRA7XX_BASE 0x4A000000 +#define L4_WKUP_DRA7XX_BASE 0x4ae00000 #define DRA7XX_CM_CORE_AON_BASE 0x4a005000 #define DRA7XX_CTRL_BASE 0x4a003400 #define DRA7XX_TAP_BASE 0x4ae0c000 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index d78c12e7cb5e..cc8a987149e2 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -130,6 +130,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/io.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/err.h> @@ -299,7 +300,20 @@ static void _write_sysconfig(u32 v, struct omap_hwmod *oh) /* Module might have lost context, always update cache and register */ oh->_sysc_cache = v; + + /* + * Some IP blocks (such as RTC) require unlocking of IP before + * accessing its registers. If a function pointer is present + * to unlock, then call it before accessing sysconfig and + * call lock after writing sysconfig. + */ + if (oh->class->unlock) + oh->class->unlock(oh); + omap_hwmod_write(v, oh, oh->class->sysc->sysc_offs); + + if (oh->class->lock) + oh->class->lock(oh); } /** @@ -2373,6 +2387,9 @@ static int of_dev_hwmod_lookup(struct device_node *np, * registers. This address is needed early so the OCP registers that * are part of the device's address space can be ioremapped properly. * + * If SYSC access is not needed, the registers will not be remapped + * and non-availability of MPU access is not treated as an error. + * * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and * -ENXIO on absent or invalid register target address space. */ @@ -2387,6 +2404,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, _save_mpu_port_index(oh); + /* if we don't need sysc access we don't need to ioremap */ + if (!oh->class->sysc) + return 0; + + /* we can't continue without MPU PORT if we need sysc access */ if (oh->_int_flags & _HWMOD_NO_MPU_PORT) return -ENXIO; @@ -2396,8 +2418,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, oh->name); /* Extract the IO space from device tree blob */ - if (!np) + if (!np) { + pr_err("omap_hwmod: %s: no dt node\n", oh->name); return -ENXIO; + } va_start = of_iomap(np, index + oh->mpu_rt_idx); } else { @@ -2456,13 +2480,11 @@ static int __init _init(struct omap_hwmod *oh, void *data) oh->name, np->name); } - if (oh->class->sysc) { - r = _init_mpu_rt_base(oh, NULL, index, np); - if (r < 0) { - WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", - oh->name); - return 0; - } + r = _init_mpu_rt_base(oh, NULL, index, np); + if (r < 0) { + WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", + oh->name); + return 0; } r = _init_clocks(oh, NULL); @@ -3878,7 +3900,8 @@ void __init omap_hwmod_init(void) soc_ops.init_clkdm = _init_clkdm; soc_ops.update_context_lost = _omap4_update_context_lost; soc_ops.get_context_lost = _omap4_get_context_lost; - } else if (cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) { + } else if (cpu_is_ti814x() || cpu_is_ti816x() || soc_is_am33xx() || + soc_is_am43xx()) { soc_ops.enable_module = _omap4_enable_module; soc_ops.disable_module = _omap4_disable_module; soc_ops.wait_target_ready = _omap4_wait_target_ready; diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index b5d27ec81610..ca6df1a73475 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -576,6 +576,8 @@ struct omap_hwmod_omap4_prcm { * @pre_shutdown: ptr to fn to be executed immediately prior to device shutdown * @reset: ptr to fn to be executed in place of the standard hwmod reset fn * @enable_preprogram: ptr to fn to be executed during device enable + * @lock: ptr to fn to be executed to lock IP registers + * @unlock: ptr to fn to be executed to unlock IP registers * * Represent the class of a OMAP hardware "modules" (e.g. timer, * smartreflex, gpio, uart...) @@ -600,6 +602,8 @@ struct omap_hwmod_class { int (*pre_shutdown)(struct omap_hwmod *oh); int (*reset)(struct omap_hwmod *oh); int (*enable_preprogram)(struct omap_hwmod *oh); + void (*lock)(struct omap_hwmod *oh); + void (*unlock)(struct omap_hwmod *oh); }; /** @@ -755,7 +759,8 @@ extern int omap3xxx_hwmod_init(void); extern int omap44xx_hwmod_init(void); extern int omap54xx_hwmod_init(void); extern int am33xx_hwmod_init(void); -extern int ti81xx_hwmod_init(void); +extern int dm814x_hwmod_init(void); +extern int dm816x_hwmod_init(void); extern int dra7xx_hwmod_init(void); int am43xx_hwmod_init(void); diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 6dcfd03ced8f..36bcd2e75422 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c @@ -20,7 +20,7 @@ #include "prm-regbits-24xx.h" #include "wd_timer.h" -struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = { +static struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = { { .name = "dispc", .dma_req = 5 }, { .dma_req = -1, }, }; diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c index 215d5efa0dba..e97a894b5f88 100644 --- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c @@ -480,7 +480,7 @@ static struct omap_hwmod am43xx_dss_core_hwmod = { /* dispc */ -struct omap_dss_dispc_dev_attr am43xx_dss_dispc_dev_attr = { +static struct omap_dss_dispc_dev_attr am43xx_dss_dispc_dev_attr = { .manager_count = 1, .has_framedonetv_irq = 0 }; diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 2606c6608bd8..562247bced49 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = { .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; @@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = { .class = &dra7xx_gpmc_hwmod_class, .clkdm_name = "l3main1_clkdm", /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ - .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, + .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS, .main_clk = "l3_iclk_div", .prcm = { .omap4 = { diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c index c92413769144..b1288f56d509 100644 --- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c @@ -32,21 +32,59 @@ */ /* - * The alwon .clkctrl_offs field is offset from the CM_ALWON, that's - * TRM 18.7.17 CM_ALWON device register values minus 0x1400. + * Common alwon .clkctrl_offs from dm814x TRM "Table 2-278. CM_ALWON REGISTERS" + * also dm816x TRM 18.7.17 CM_ALWON device register values minus 0x1400. */ +#define DM81XX_CM_ALWON_MCASP0_CLKCTRL 0x140 +#define DM81XX_CM_ALWON_MCASP1_CLKCTRL 0x144 +#define DM81XX_CM_ALWON_MCASP2_CLKCTRL 0x148 +#define DM81XX_CM_ALWON_MCBSP_CLKCTRL 0x14c +#define DM81XX_CM_ALWON_UART_0_CLKCTRL 0x150 +#define DM81XX_CM_ALWON_UART_1_CLKCTRL 0x154 +#define DM81XX_CM_ALWON_UART_2_CLKCTRL 0x158 +#define DM81XX_CM_ALWON_GPIO_0_CLKCTRL 0x15c +#define DM81XX_CM_ALWON_GPIO_1_CLKCTRL 0x160 +#define DM81XX_CM_ALWON_I2C_0_CLKCTRL 0x164 +#define DM81XX_CM_ALWON_I2C_1_CLKCTRL 0x168 +#define DM81XX_CM_ALWON_WDTIMER_CLKCTRL 0x18c +#define DM81XX_CM_ALWON_SPI_CLKCTRL 0x190 +#define DM81XX_CM_ALWON_MAILBOX_CLKCTRL 0x194 +#define DM81XX_CM_ALWON_SPINBOX_CLKCTRL 0x198 +#define DM81XX_CM_ALWON_MMUDATA_CLKCTRL 0x19c +#define DM81XX_CM_ALWON_MMUCFG_CLKCTRL 0x1a8 +#define DM81XX_CM_ALWON_CONTROL_CLKCTRL 0x1c4 +#define DM81XX_CM_ALWON_GPMC_CLKCTRL 0x1d0 +#define DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL 0x1d4 +#define DM81XX_CM_ALWON_L3_CLKCTRL 0x1e4 +#define DM81XX_CM_ALWON_L4HS_CLKCTRL 0x1e8 +#define DM81XX_CM_ALWON_L4LS_CLKCTRL 0x1ec +#define DM81XX_CM_ALWON_RTC_CLKCTRL 0x1f0 +#define DM81XX_CM_ALWON_TPCC_CLKCTRL 0x1f4 +#define DM81XX_CM_ALWON_TPTC0_CLKCTRL 0x1f8 +#define DM81XX_CM_ALWON_TPTC1_CLKCTRL 0x1fc +#define DM81XX_CM_ALWON_TPTC2_CLKCTRL 0x200 +#define DM81XX_CM_ALWON_TPTC3_CLKCTRL 0x204 + +/* Registers specific to dm814x */ +#define DM814X_CM_ALWON_MCASP_3_4_5_CLKCTRL 0x16c +#define DM814X_CM_ALWON_ATL_CLKCTRL 0x170 +#define DM814X_CM_ALWON_MLB_CLKCTRL 0x174 +#define DM814X_CM_ALWON_PATA_CLKCTRL 0x178 +#define DM814X_CM_ALWON_UART_3_CLKCTRL 0x180 +#define DM814X_CM_ALWON_UART_4_CLKCTRL 0x184 +#define DM814X_CM_ALWON_UART_5_CLKCTRL 0x188 +#define DM814X_CM_ALWON_OCM_0_CLKCTRL 0x1b4 +#define DM814X_CM_ALWON_VCP_CLKCTRL 0x1b8 +#define DM814X_CM_ALWON_MPU_CLKCTRL 0x1dc +#define DM814X_CM_ALWON_DEBUGSS_CLKCTRL 0x1e0 +#define DM814X_CM_ALWON_DCAN_0_1_CLKCTRL 0x218 +#define DM814X_CM_ALWON_MMCHS_0_CLKCTRL 0x21c +#define DM814X_CM_ALWON_MMCHS_1_CLKCTRL 0x220 +#define DM814X_CM_ALWON_MMCHS_2_CLKCTRL 0x224 +#define DM814X_CM_ALWON_CUST_EFUSE_CLKCTRL 0x228 + +/* Registers specific to dm816x */ #define DM816X_DM_ALWON_BASE 0x1400 -#define DM816X_CM_ALWON_MCASP0_CLKCTRL (0x1540 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_MCASP1_CLKCTRL (0x1544 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_MCASP2_CLKCTRL (0x1548 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_MCBSP_CLKCTRL (0x154c - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_UART_0_CLKCTRL (0x1550 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_UART_1_CLKCTRL (0x1554 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_UART_2_CLKCTRL (0x1558 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_GPIO_0_CLKCTRL (0x155c - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_GPIO_1_CLKCTRL (0x1560 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_I2C_0_CLKCTRL (0x1564 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_I2C_1_CLKCTRL (0x1568 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_1_CLKCTRL (0x1570 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_2_CLKCTRL (0x1574 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_3_CLKCTRL (0x1578 - DM816X_DM_ALWON_BASE) @@ -54,29 +92,11 @@ #define DM816X_CM_ALWON_TIMER_5_CLKCTRL (0x1580 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_6_CLKCTRL (0x1584 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_7_CLKCTRL (0x1588 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_WDTIMER_CLKCTRL (0x158c - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_SPI_CLKCTRL (0x1590 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_MAILBOX_CLKCTRL (0x1594 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_SPINBOX_CLKCTRL (0x1598 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_MMUDATA_CLKCTRL (0x159c - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_MMUCFG_CLKCTRL (0x15a8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SDIO_CLKCTRL (0x15b0 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_OCMC_0_CLKCTRL (0x15b4 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_OCMC_1_CLKCTRL (0x15b8 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_CONTRL_CLKCTRL (0x15c4 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_GPMC_CLKCTRL (0x15d0 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_ETHERNET_0_CLKCTRL (0x15d4 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_ETHERNET_1_CLKCTRL (0x15d8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MPU_CLKCTRL (0x15dc - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_L3_CLKCTRL (0x15e4 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_L4HS_CLKCTRL (0x15e8 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_L4LS_CLKCTRL (0x15ec - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_RTC_CLKCTRL (0x15f0 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_TPCC_CLKCTRL (0x15f4 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_TPTC0_CLKCTRL (0x15f8 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_TPTC1_CLKCTRL (0x15fc - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_TPTC2_CLKCTRL (0x1600 - DM816X_DM_ALWON_BASE) -#define DM816X_CM_ALWON_TPTC3_CLKCTRL (0x1604 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SR_0_CLKCTRL (0x1608 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SR_1_CLKCTRL (0x160c - DM816X_DM_ALWON_BASE) @@ -88,28 +108,28 @@ #define DM816X_CM_DEFAULT_USB_CLKCTRL (0x558 - DM816X_CM_DEFAULT_OFFSET) /* L3 Interconnect entries clocked at 125, 250 and 500MHz */ -static struct omap_hwmod dm816x_alwon_l3_slow_hwmod = { +static struct omap_hwmod dm81xx_alwon_l3_slow_hwmod = { .name = "alwon_l3_slow", .clkdm_name = "alwon_l3s_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; -static struct omap_hwmod dm816x_default_l3_slow_hwmod = { +static struct omap_hwmod dm81xx_default_l3_slow_hwmod = { .name = "default_l3_slow", .clkdm_name = "default_l3_slow_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; -static struct omap_hwmod dm816x_alwon_l3_med_hwmod = { +static struct omap_hwmod dm81xx_alwon_l3_med_hwmod = { .name = "l3_med", .clkdm_name = "alwon_l3_med_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; -static struct omap_hwmod dm816x_alwon_l3_fast_hwmod = { +static struct omap_hwmod dm81xx_alwon_l3_fast_hwmod = { .name = "l3_fast", .clkdm_name = "alwon_l3_fast_clkdm", .class = &l3_hwmod_class, @@ -120,7 +140,7 @@ static struct omap_hwmod dm816x_alwon_l3_fast_hwmod = { * L4 standard peripherals, see TRM table 1-12 for devices using this. * See TRM table 1-73 for devices using the 125MHz SYSCLK6 clock. */ -static struct omap_hwmod dm816x_l4_ls_hwmod = { +static struct omap_hwmod dm81xx_l4_ls_hwmod = { .name = "l4_ls", .clkdm_name = "alwon_l3s_clkdm", .class = &l4_hwmod_class, @@ -131,27 +151,54 @@ static struct omap_hwmod dm816x_l4_ls_hwmod = { * table 1-13. On dm816x, only EMAC, MDIO and SATA use this. See also TRM * table 1-73 for devices using 250MHz SYSCLK5 clock. */ -static struct omap_hwmod dm816x_l4_hs_hwmod = { +static struct omap_hwmod dm81xx_l4_hs_hwmod = { .name = "l4_hs", .clkdm_name = "alwon_l3_med_clkdm", .class = &l4_hwmod_class, }; /* L3 slow -> L4 ls peripheral interface running at 125MHz */ -static struct omap_hwmod_ocp_if dm816x_alwon_l3_slow__l4_ls = { - .master = &dm816x_alwon_l3_slow_hwmod, - .slave = &dm816x_l4_ls_hwmod, +static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__l4_ls = { + .master = &dm81xx_alwon_l3_slow_hwmod, + .slave = &dm81xx_l4_ls_hwmod, .user = OCP_USER_MPU, }; /* L3 med -> L4 fast peripheral interface running at 250MHz */ -static struct omap_hwmod_ocp_if dm816x_alwon_l3_slow__l4_hs = { - .master = &dm816x_alwon_l3_med_hwmod, - .slave = &dm816x_l4_hs_hwmod, +static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__l4_hs = { + .master = &dm81xx_alwon_l3_med_hwmod, + .slave = &dm81xx_l4_hs_hwmod, .user = OCP_USER_MPU, }; /* MPU */ +static struct omap_hwmod dm814x_mpu_hwmod = { + .name = "mpu", + .clkdm_name = "alwon_l3s_clkdm", + .class = &mpu_hwmod_class, + .flags = HWMOD_INIT_NO_IDLE, + .main_clk = "mpu_ck", + .prcm = { + .omap4 = { + .clkctrl_offs = DM814X_CM_ALWON_MPU_CLKCTRL, + .modulemode = MODULEMODE_SWCTRL, + }, + }, +}; + +static struct omap_hwmod_ocp_if dm814x_mpu__alwon_l3_slow = { + .master = &dm814x_mpu_hwmod, + .slave = &dm81xx_alwon_l3_slow_hwmod, + .user = OCP_USER_MPU, +}; + +/* L3 med peripheral interface running at 200MHz */ +static struct omap_hwmod_ocp_if dm814x_mpu__alwon_l3_med = { + .master = &dm814x_mpu_hwmod, + .slave = &dm81xx_alwon_l3_med_hwmod, + .user = OCP_USER_MPU, +}; + static struct omap_hwmod dm816x_mpu_hwmod = { .name = "mpu", .clkdm_name = "alwon_mpu_clkdm", @@ -168,14 +215,14 @@ static struct omap_hwmod dm816x_mpu_hwmod = { static struct omap_hwmod_ocp_if dm816x_mpu__alwon_l3_slow = { .master = &dm816x_mpu_hwmod, - .slave = &dm816x_alwon_l3_slow_hwmod, + .slave = &dm81xx_alwon_l3_slow_hwmod, .user = OCP_USER_MPU, }; /* L3 med peripheral interface running at 250MHz */ static struct omap_hwmod_ocp_if dm816x_mpu__alwon_l3_med = { .master = &dm816x_mpu_hwmod, - .slave = &dm816x_alwon_l3_med_hwmod, + .slave = &dm81xx_alwon_l3_med_hwmod, .user = OCP_USER_MPU, }; @@ -197,13 +244,13 @@ static struct omap_hwmod_class uart_class = { .sysc = &uart_sysc, }; -static struct omap_hwmod dm816x_uart1_hwmod = { +static struct omap_hwmod dm81xx_uart1_hwmod = { .name = "uart1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_UART_0_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_UART_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, @@ -211,20 +258,20 @@ static struct omap_hwmod dm816x_uart1_hwmod = { .flags = DEBUG_TI81XXUART1_FLAGS, }; -static struct omap_hwmod_ocp_if dm816x_l4_ls__uart1 = { - .master = &dm816x_l4_ls_hwmod, - .slave = &dm816x_uart1_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart1 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_uart1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; -static struct omap_hwmod dm816x_uart2_hwmod = { +static struct omap_hwmod dm81xx_uart2_hwmod = { .name = "uart2", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_UART_1_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_UART_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, @@ -232,20 +279,20 @@ static struct omap_hwmod dm816x_uart2_hwmod = { .flags = DEBUG_TI81XXUART2_FLAGS, }; -static struct omap_hwmod_ocp_if dm816x_l4_ls__uart2 = { - .master = &dm816x_l4_ls_hwmod, - .slave = &dm816x_uart2_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart2 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_uart2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; -static struct omap_hwmod dm816x_uart3_hwmod = { +static struct omap_hwmod dm81xx_uart3_hwmod = { .name = "uart3", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_UART_2_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_UART_2_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, @@ -253,9 +300,9 @@ static struct omap_hwmod dm816x_uart3_hwmod = { .flags = DEBUG_TI81XXUART3_FLAGS, }; -static struct omap_hwmod_ocp_if dm816x_l4_ls__uart3 = { - .master = &dm816x_l4_ls_hwmod, - .slave = &dm816x_uart3_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart3 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_uart3_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; @@ -276,23 +323,23 @@ static struct omap_hwmod_class wd_timer_class = { .reset = &omap2_wd_timer_reset, }; -static struct omap_hwmod dm816x_wd_timer_hwmod = { +static struct omap_hwmod dm81xx_wd_timer_hwmod = { .name = "wd_timer", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk18_ck", .flags = HWMOD_NO_IDLEST, .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_WDTIMER_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_WDTIMER_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &wd_timer_class, }; -static struct omap_hwmod_ocp_if dm816x_l4_ls__wd_timer1 = { - .master = &dm816x_l4_ls_hwmod, - .slave = &dm816x_wd_timer_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_ls__wd_timer1 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_wd_timer_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; @@ -320,27 +367,27 @@ static struct omap_hwmod dm81xx_i2c1_hwmod = { .main_clk = "sysclk10_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_I2C_0_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_I2C_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &i2c_class, }; -static struct omap_hwmod_ocp_if dm816x_l4_ls__i2c1 = { - .master = &dm816x_l4_ls_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c1 = { + .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_i2c1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; -static struct omap_hwmod dm816x_i2c2_hwmod = { +static struct omap_hwmod dm81xx_i2c2_hwmod = { .name = "i2c2", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_I2C_1_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_I2C_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, @@ -358,9 +405,9 @@ static struct omap_hwmod_class_sysconfig dm81xx_elm_sysc = { .sysc_fields = &omap_hwmod_sysc_type1, }; -static struct omap_hwmod_ocp_if dm816x_l4_ls__i2c2 = { - .master = &dm816x_l4_ls_hwmod, - .slave = &dm816x_i2c2_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c2 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_i2c2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; @@ -378,7 +425,7 @@ static struct omap_hwmod dm81xx_elm_hwmod = { }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__elm = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_elm_hwmod, .user = OCP_USER_MPU, }; @@ -417,7 +464,7 @@ static struct omap_hwmod dm81xx_gpio1_hwmod = { .main_clk = "sysclk6_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_GPIO_0_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_GPIO_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, @@ -427,7 +474,7 @@ static struct omap_hwmod dm81xx_gpio1_hwmod = { }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio1 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_gpio1_hwmod, .user = OCP_USER_MPU, }; @@ -443,7 +490,7 @@ static struct omap_hwmod dm81xx_gpio2_hwmod = { .main_clk = "sysclk6_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_GPIO_1_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, @@ -453,7 +500,7 @@ static struct omap_hwmod dm81xx_gpio2_hwmod = { }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio2 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_gpio2_hwmod, .user = OCP_USER_MPU, }; @@ -482,14 +529,14 @@ static struct omap_hwmod dm81xx_gpmc_hwmod = { .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS, .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_GPMC_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_GPMC_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; -struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = { - .master = &dm816x_alwon_l3_slow_hwmod, +static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = { + .master = &dm81xx_alwon_l3_slow_hwmod, .slave = &dm81xx_gpmc_hwmod, .user = OCP_USER_MPU, }; @@ -522,7 +569,7 @@ static struct omap_hwmod dm81xx_usbss_hwmod = { }; static struct omap_hwmod_ocp_if dm81xx_default_l3_slow__usbss = { - .master = &dm816x_default_l3_slow_hwmod, + .master = &dm81xx_default_l3_slow_hwmod, .slave = &dm81xx_usbss_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, @@ -547,6 +594,22 @@ static struct omap_timer_capability_dev_attr capability_alwon_dev_attr = { .timer_capability = OMAP_TIMER_ALWON, }; +static struct omap_hwmod dm814x_timer1_hwmod = { + .name = "timer1", + .clkdm_name = "alwon_l3s_clkdm", + .main_clk = "timer_sys_ck", + .dev_attr = &capability_alwon_dev_attr, + .class = &dm816x_timer_hwmod_class, + .flags = HWMOD_NO_IDLEST, +}; + +static struct omap_hwmod_ocp_if dm814x_l4_ls__timer1 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm814x_timer1_hwmod, + .clk = "timer_sys_ck", + .user = OCP_USER_MPU, +}; + static struct omap_hwmod dm816x_timer1_hwmod = { .name = "timer1", .clkdm_name = "alwon_l3s_clkdm", @@ -562,12 +625,28 @@ static struct omap_hwmod dm816x_timer1_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer1 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; +static struct omap_hwmod dm814x_timer2_hwmod = { + .name = "timer2", + .clkdm_name = "alwon_l3s_clkdm", + .main_clk = "timer_sys_ck", + .dev_attr = &capability_alwon_dev_attr, + .class = &dm816x_timer_hwmod_class, + .flags = HWMOD_NO_IDLEST, +}; + +static struct omap_hwmod_ocp_if dm814x_l4_ls__timer2 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm814x_timer2_hwmod, + .clk = "timer_sys_ck", + .user = OCP_USER_MPU, +}; + static struct omap_hwmod dm816x_timer2_hwmod = { .name = "timer2", .clkdm_name = "alwon_l3s_clkdm", @@ -583,7 +662,7 @@ static struct omap_hwmod dm816x_timer2_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer2 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, @@ -604,7 +683,7 @@ static struct omap_hwmod dm816x_timer3_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer3 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer3_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, @@ -625,7 +704,7 @@ static struct omap_hwmod dm816x_timer4_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer4 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer4_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, @@ -646,7 +725,7 @@ static struct omap_hwmod dm816x_timer5_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer5 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer5_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, @@ -667,7 +746,7 @@ static struct omap_hwmod dm816x_timer6_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer6 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer6_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, @@ -688,12 +767,68 @@ static struct omap_hwmod dm816x_timer7_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer7 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_timer7_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; +/* CPSW on dm814x */ +static struct omap_hwmod_class_sysconfig dm814x_cpgmac_sysc = { + .rev_offs = 0x0, + .sysc_offs = 0x8, + .syss_offs = 0x4, + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | + SYSS_HAS_RESET_STATUS, + .idlemodes = SIDLE_FORCE | SIDLE_NO | MSTANDBY_FORCE | + MSTANDBY_NO, + .sysc_fields = &omap_hwmod_sysc_type3, +}; + +static struct omap_hwmod_class dm814x_cpgmac0_hwmod_class = { + .name = "cpgmac0", + .sysc = &dm814x_cpgmac_sysc, +}; + +static struct omap_hwmod dm814x_cpgmac0_hwmod = { + .name = "cpgmac0", + .class = &dm814x_cpgmac0_hwmod_class, + .clkdm_name = "alwon_ethernet_clkdm", + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, + .main_clk = "cpsw_125mhz_gclk", + .prcm = { + .omap4 = { + .clkctrl_offs = DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL, + .modulemode = MODULEMODE_SWCTRL, + }, + }, +}; + +static struct omap_hwmod_class dm814x_mdio_hwmod_class = { + .name = "davinci_mdio", +}; + +static struct omap_hwmod dm814x_mdio_hwmod = { + .name = "davinci_mdio", + .class = &dm814x_mdio_hwmod_class, + .clkdm_name = "alwon_ethernet_clkdm", + .main_clk = "cpsw_125mhz_gclk", +}; + +static struct omap_hwmod_ocp_if dm814x_l4_hs__cpgmac0 = { + .master = &dm81xx_l4_hs_hwmod, + .slave = &dm814x_cpgmac0_hwmod, + .clk = "cpsw_125mhz_gclk", + .user = OCP_USER_MPU, +}; + +static struct omap_hwmod_ocp_if dm814x_cpgmac0__mdio = { + .master = &dm814x_cpgmac0_hwmod, + .slave = &dm814x_mdio_hwmod, + .user = OCP_USER_MPU, + .flags = HWMOD_NO_IDLEST, +}; + /* EMAC Ethernet */ static struct omap_hwmod_class_sysconfig dm816x_emac_sysc = { .rev_offs = 0x0, @@ -717,21 +852,21 @@ static struct omap_hwmod dm816x_emac0_hwmod = { .class = &dm816x_emac_hwmod_class, }; -static struct omap_hwmod_ocp_if dm816x_l4_hs__emac0 = { - .master = &dm816x_l4_hs_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { + .master = &dm81xx_l4_hs_hwmod, .slave = &dm816x_emac0_hwmod, .clk = "sysclk5_ck", .user = OCP_USER_MPU, }; -static struct omap_hwmod_class dm816x_mdio_hwmod_class = { +static struct omap_hwmod_class dm81xx_mdio_hwmod_class = { .name = "davinci_mdio", .sysc = &dm816x_emac_sysc, }; -struct omap_hwmod dm816x_emac0_mdio_hwmod = { +static struct omap_hwmod dm81xx_emac0_mdio_hwmod = { .name = "davinci_mdio", - .class = &dm816x_mdio_hwmod_class, + .class = &dm81xx_mdio_hwmod_class, .clkdm_name = "alwon_ethernet_clkdm", .main_clk = "sysclk24_ck", .flags = HWMOD_NO_IDLEST, @@ -741,15 +876,15 @@ struct omap_hwmod dm816x_emac0_mdio_hwmod = { */ .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_ETHERNET_0_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; -struct omap_hwmod_ocp_if dm816x_emac0__mdio = { - .master = &dm816x_l4_hs_hwmod, - .slave = &dm816x_emac0_mdio_hwmod, +static struct omap_hwmod_ocp_if dm81xx_emac0__mdio = { + .master = &dm81xx_l4_hs_hwmod, + .slave = &dm81xx_emac0_mdio_hwmod, .user = OCP_USER_MPU, }; @@ -768,7 +903,7 @@ static struct omap_hwmod dm816x_emac1_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_hs__emac1 = { - .master = &dm816x_l4_hs_hwmod, + .master = &dm81xx_l4_hs_hwmod, .slave = &dm816x_emac1_hwmod, .clk = "sysclk5_ck", .user = OCP_USER_MPU, @@ -815,7 +950,7 @@ static struct omap_hwmod dm816x_mmc1_hwmod = { }; static struct omap_hwmod_ocp_if dm816x_l4_ls__mmc1 = { - .master = &dm816x_l4_ls_hwmod, + .master = &dm81xx_l4_ls_hwmod, .slave = &dm816x_mmc1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, @@ -843,13 +978,13 @@ static struct omap2_mcspi_dev_attr dm816x_mcspi1_dev_attr = { .num_chipselect = 4, }; -static struct omap_hwmod dm816x_mcspi1_hwmod = { +static struct omap_hwmod dm81xx_mcspi1_hwmod = { .name = "mcspi1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_SPI_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, @@ -857,14 +992,14 @@ static struct omap_hwmod dm816x_mcspi1_hwmod = { .dev_attr = &dm816x_mcspi1_dev_attr, }; -static struct omap_hwmod_ocp_if dm816x_l4_ls__mcspi1 = { - .master = &dm816x_l4_ls_hwmod, - .slave = &dm816x_mcspi1_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi1 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_mcspi1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; -static struct omap_hwmod_class_sysconfig dm816x_mailbox_sysc = { +static struct omap_hwmod_class_sysconfig dm81xx_mailbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, @@ -874,55 +1009,55 @@ static struct omap_hwmod_class_sysconfig dm816x_mailbox_sysc = { .sysc_fields = &omap_hwmod_sysc_type1, }; -static struct omap_hwmod_class dm816x_mailbox_hwmod_class = { +static struct omap_hwmod_class dm81xx_mailbox_hwmod_class = { .name = "mailbox", - .sysc = &dm816x_mailbox_sysc, + .sysc = &dm81xx_mailbox_sysc, }; -static struct omap_hwmod dm816x_mailbox_hwmod = { +static struct omap_hwmod dm81xx_mailbox_hwmod = { .name = "mailbox", .clkdm_name = "alwon_l3s_clkdm", - .class = &dm816x_mailbox_hwmod_class, + .class = &dm81xx_mailbox_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_MAILBOX_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_MAILBOX_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; -static struct omap_hwmod_ocp_if dm816x_l4_ls__mailbox = { - .master = &dm816x_l4_ls_hwmod, - .slave = &dm816x_mailbox_hwmod, +static struct omap_hwmod_ocp_if dm81xx_l4_ls__mailbox = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_mailbox_hwmod, .user = OCP_USER_MPU, }; -static struct omap_hwmod_class dm816x_tpcc_hwmod_class = { +static struct omap_hwmod_class dm81xx_tpcc_hwmod_class = { .name = "tpcc", }; -struct omap_hwmod dm816x_tpcc_hwmod = { +static struct omap_hwmod dm81xx_tpcc_hwmod = { .name = "tpcc", - .class = &dm816x_tpcc_hwmod_class, + .class = &dm81xx_tpcc_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_TPCC_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_TPCC_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; -struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tpcc = { - .master = &dm816x_alwon_l3_fast_hwmod, - .slave = &dm816x_tpcc_hwmod, +static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tpcc = { + .master = &dm81xx_alwon_l3_fast_hwmod, + .slave = &dm81xx_tpcc_hwmod, .clk = "sysclk4_ck", .user = OCP_USER_MPU, }; -static struct omap_hwmod_addr_space dm816x_tptc0_addr_space[] = { +static struct omap_hwmod_addr_space dm81xx_tptc0_addr_space[] = { { .pa_start = 0x49800000, .pa_end = 0x49800000 + SZ_8K - 1, @@ -931,40 +1066,40 @@ static struct omap_hwmod_addr_space dm816x_tptc0_addr_space[] = { { }, }; -static struct omap_hwmod_class dm816x_tptc0_hwmod_class = { +static struct omap_hwmod_class dm81xx_tptc0_hwmod_class = { .name = "tptc0", }; -struct omap_hwmod dm816x_tptc0_hwmod = { +static struct omap_hwmod dm81xx_tptc0_hwmod = { .name = "tptc0", - .class = &dm816x_tptc0_hwmod_class, + .class = &dm81xx_tptc0_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_TPTC0_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_TPTC0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; -struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc0 = { - .master = &dm816x_alwon_l3_fast_hwmod, - .slave = &dm816x_tptc0_hwmod, +static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc0 = { + .master = &dm81xx_alwon_l3_fast_hwmod, + .slave = &dm81xx_tptc0_hwmod, .clk = "sysclk4_ck", - .addr = dm816x_tptc0_addr_space, + .addr = dm81xx_tptc0_addr_space, .user = OCP_USER_MPU, }; -struct omap_hwmod_ocp_if dm816x_tptc0__alwon_l3_fast = { - .master = &dm816x_tptc0_hwmod, - .slave = &dm816x_alwon_l3_fast_hwmod, +static struct omap_hwmod_ocp_if dm81xx_tptc0__alwon_l3_fast = { + .master = &dm81xx_tptc0_hwmod, + .slave = &dm81xx_alwon_l3_fast_hwmod, .clk = "sysclk4_ck", - .addr = dm816x_tptc0_addr_space, + .addr = dm81xx_tptc0_addr_space, .user = OCP_USER_MPU, }; -static struct omap_hwmod_addr_space dm816x_tptc1_addr_space[] = { +static struct omap_hwmod_addr_space dm81xx_tptc1_addr_space[] = { { .pa_start = 0x49900000, .pa_end = 0x49900000 + SZ_8K - 1, @@ -973,40 +1108,40 @@ static struct omap_hwmod_addr_space dm816x_tptc1_addr_space[] = { { }, }; -static struct omap_hwmod_class dm816x_tptc1_hwmod_class = { +static struct omap_hwmod_class dm81xx_tptc1_hwmod_class = { .name = "tptc1", }; -struct omap_hwmod dm816x_tptc1_hwmod = { +static struct omap_hwmod dm81xx_tptc1_hwmod = { .name = "tptc1", - .class = &dm816x_tptc1_hwmod_class, + .class = &dm81xx_tptc1_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_TPTC1_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_TPTC1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; -struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc1 = { - .master = &dm816x_alwon_l3_fast_hwmod, - .slave = &dm816x_tptc1_hwmod, +static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc1 = { + .master = &dm81xx_alwon_l3_fast_hwmod, + .slave = &dm81xx_tptc1_hwmod, .clk = "sysclk4_ck", - .addr = dm816x_tptc1_addr_space, + .addr = dm81xx_tptc1_addr_space, .user = OCP_USER_MPU, }; -struct omap_hwmod_ocp_if dm816x_tptc1__alwon_l3_fast = { - .master = &dm816x_tptc1_hwmod, - .slave = &dm816x_alwon_l3_fast_hwmod, +static struct omap_hwmod_ocp_if dm81xx_tptc1__alwon_l3_fast = { + .master = &dm81xx_tptc1_hwmod, + .slave = &dm81xx_alwon_l3_fast_hwmod, .clk = "sysclk4_ck", - .addr = dm816x_tptc1_addr_space, + .addr = dm81xx_tptc1_addr_space, .user = OCP_USER_MPU, }; -static struct omap_hwmod_addr_space dm816x_tptc2_addr_space[] = { +static struct omap_hwmod_addr_space dm81xx_tptc2_addr_space[] = { { .pa_start = 0x49a00000, .pa_end = 0x49a00000 + SZ_8K - 1, @@ -1015,40 +1150,40 @@ static struct omap_hwmod_addr_space dm816x_tptc2_addr_space[] = { { }, }; -static struct omap_hwmod_class dm816x_tptc2_hwmod_class = { +static struct omap_hwmod_class dm81xx_tptc2_hwmod_class = { .name = "tptc2", }; -struct omap_hwmod dm816x_tptc2_hwmod = { +static struct omap_hwmod dm81xx_tptc2_hwmod = { .name = "tptc2", - .class = &dm816x_tptc2_hwmod_class, + .class = &dm81xx_tptc2_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_TPTC2_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_TPTC2_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; -struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc2 = { - .master = &dm816x_alwon_l3_fast_hwmod, - .slave = &dm816x_tptc2_hwmod, +static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc2 = { + .master = &dm81xx_alwon_l3_fast_hwmod, + .slave = &dm81xx_tptc2_hwmod, .clk = "sysclk4_ck", - .addr = dm816x_tptc2_addr_space, + .addr = dm81xx_tptc2_addr_space, .user = OCP_USER_MPU, }; -struct omap_hwmod_ocp_if dm816x_tptc2__alwon_l3_fast = { - .master = &dm816x_tptc2_hwmod, - .slave = &dm816x_alwon_l3_fast_hwmod, +static struct omap_hwmod_ocp_if dm81xx_tptc2__alwon_l3_fast = { + .master = &dm81xx_tptc2_hwmod, + .slave = &dm81xx_alwon_l3_fast_hwmod, .clk = "sysclk4_ck", - .addr = dm816x_tptc2_addr_space, + .addr = dm81xx_tptc2_addr_space, .user = OCP_USER_MPU, }; -static struct omap_hwmod_addr_space dm816x_tptc3_addr_space[] = { +static struct omap_hwmod_addr_space dm81xx_tptc3_addr_space[] = { { .pa_start = 0x49b00000, .pa_end = 0x49b00000 + SZ_8K - 1, @@ -1057,50 +1192,96 @@ static struct omap_hwmod_addr_space dm816x_tptc3_addr_space[] = { { }, }; -static struct omap_hwmod_class dm816x_tptc3_hwmod_class = { +static struct omap_hwmod_class dm81xx_tptc3_hwmod_class = { .name = "tptc3", }; -struct omap_hwmod dm816x_tptc3_hwmod = { +static struct omap_hwmod dm81xx_tptc3_hwmod = { .name = "tptc3", - .class = &dm816x_tptc3_hwmod_class, + .class = &dm81xx_tptc3_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { - .clkctrl_offs = DM816X_CM_ALWON_TPTC3_CLKCTRL, + .clkctrl_offs = DM81XX_CM_ALWON_TPTC3_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; -struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc3 = { - .master = &dm816x_alwon_l3_fast_hwmod, - .slave = &dm816x_tptc3_hwmod, +static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc3 = { + .master = &dm81xx_alwon_l3_fast_hwmod, + .slave = &dm81xx_tptc3_hwmod, .clk = "sysclk4_ck", - .addr = dm816x_tptc3_addr_space, + .addr = dm81xx_tptc3_addr_space, .user = OCP_USER_MPU, }; -struct omap_hwmod_ocp_if dm816x_tptc3__alwon_l3_fast = { - .master = &dm816x_tptc3_hwmod, - .slave = &dm816x_alwon_l3_fast_hwmod, +static struct omap_hwmod_ocp_if dm81xx_tptc3__alwon_l3_fast = { + .master = &dm81xx_tptc3_hwmod, + .slave = &dm81xx_alwon_l3_fast_hwmod, .clk = "sysclk4_ck", - .addr = dm816x_tptc3_addr_space, + .addr = dm81xx_tptc3_addr_space, .user = OCP_USER_MPU, }; +/* + * REVISIT: Test and enable the following once clocks work: + * dm81xx_l4_ls__gpio1 + * dm81xx_l4_ls__gpio2 + * dm81xx_l4_ls__mailbox + * dm81xx_alwon_l3_slow__gpmc + * dm81xx_default_l3_slow__usbss + * + * Also note that some devices share a single clkctrl_offs.. + * For example, i2c1 and 3 share one, and i2c2 and 4 share one. + */ +static struct omap_hwmod_ocp_if *dm814x_hwmod_ocp_ifs[] __initdata = { + &dm814x_mpu__alwon_l3_slow, + &dm814x_mpu__alwon_l3_med, + &dm81xx_alwon_l3_slow__l4_ls, + &dm81xx_alwon_l3_slow__l4_hs, + &dm81xx_l4_ls__uart1, + &dm81xx_l4_ls__uart2, + &dm81xx_l4_ls__uart3, + &dm81xx_l4_ls__wd_timer1, + &dm81xx_l4_ls__i2c1, + &dm81xx_l4_ls__i2c2, + &dm81xx_l4_ls__elm, + &dm81xx_l4_ls__mcspi1, + &dm81xx_alwon_l3_fast__tpcc, + &dm81xx_alwon_l3_fast__tptc0, + &dm81xx_alwon_l3_fast__tptc1, + &dm81xx_alwon_l3_fast__tptc2, + &dm81xx_alwon_l3_fast__tptc3, + &dm81xx_tptc0__alwon_l3_fast, + &dm81xx_tptc1__alwon_l3_fast, + &dm81xx_tptc2__alwon_l3_fast, + &dm81xx_tptc3__alwon_l3_fast, + &dm814x_l4_ls__timer1, + &dm814x_l4_ls__timer2, + &dm814x_l4_hs__cpgmac0, + &dm814x_cpgmac0__mdio, + NULL, +}; + +int __init dm814x_hwmod_init(void) +{ + omap_hwmod_init(); + return omap_hwmod_register_links(dm814x_hwmod_ocp_ifs); +} + static struct omap_hwmod_ocp_if *dm816x_hwmod_ocp_ifs[] __initdata = { &dm816x_mpu__alwon_l3_slow, &dm816x_mpu__alwon_l3_med, - &dm816x_alwon_l3_slow__l4_ls, - &dm816x_alwon_l3_slow__l4_hs, - &dm816x_l4_ls__uart1, - &dm816x_l4_ls__uart2, - &dm816x_l4_ls__uart3, - &dm816x_l4_ls__wd_timer1, - &dm816x_l4_ls__i2c1, - &dm816x_l4_ls__i2c2, + &dm81xx_alwon_l3_slow__l4_ls, + &dm81xx_alwon_l3_slow__l4_hs, + &dm81xx_l4_ls__uart1, + &dm81xx_l4_ls__uart2, + &dm81xx_l4_ls__uart3, + &dm81xx_l4_ls__wd_timer1, + &dm81xx_l4_ls__i2c1, + &dm81xx_l4_ls__i2c2, &dm81xx_l4_ls__gpio1, &dm81xx_l4_ls__gpio2, &dm81xx_l4_ls__elm, @@ -1112,26 +1293,26 @@ static struct omap_hwmod_ocp_if *dm816x_hwmod_ocp_ifs[] __initdata = { &dm816x_l4_ls__timer5, &dm816x_l4_ls__timer6, &dm816x_l4_ls__timer7, - &dm816x_l4_ls__mcspi1, - &dm816x_l4_ls__mailbox, - &dm816x_l4_hs__emac0, - &dm816x_emac0__mdio, + &dm81xx_l4_ls__mcspi1, + &dm81xx_l4_ls__mailbox, + &dm81xx_l4_hs__emac0, + &dm81xx_emac0__mdio, &dm816x_l4_hs__emac1, - &dm816x_alwon_l3_fast__tpcc, - &dm816x_alwon_l3_fast__tptc0, - &dm816x_alwon_l3_fast__tptc1, - &dm816x_alwon_l3_fast__tptc2, - &dm816x_alwon_l3_fast__tptc3, - &dm816x_tptc0__alwon_l3_fast, - &dm816x_tptc1__alwon_l3_fast, - &dm816x_tptc2__alwon_l3_fast, - &dm816x_tptc3__alwon_l3_fast, + &dm81xx_alwon_l3_fast__tpcc, + &dm81xx_alwon_l3_fast__tptc0, + &dm81xx_alwon_l3_fast__tptc1, + &dm81xx_alwon_l3_fast__tptc2, + &dm81xx_alwon_l3_fast__tptc3, + &dm81xx_tptc0__alwon_l3_fast, + &dm81xx_tptc1__alwon_l3_fast, + &dm81xx_tptc2__alwon_l3_fast, + &dm81xx_tptc3__alwon_l3_fast, &dm81xx_alwon_l3_slow__gpmc, &dm81xx_default_l3_slow__usbss, NULL, }; -int __init ti81xx_hwmod_init(void) +int __init dm816x_hwmod_init(void) { omap_hwmod_init(); return omap_hwmod_register_links(dm816x_hwmod_ocp_ifs); diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 821171cf6b7d..1a352f561113 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -31,7 +31,7 @@ struct pdata_init { void (*fn)(void); }; -struct of_dev_auxdata omap_auxdata_lookup[]; +static struct of_dev_auxdata omap_auxdata_lookup[]; static struct twl4030_gpio_platform_data twl_gpio_auxdata; #ifdef CONFIG_MACH_NOKIA_N8X0 @@ -128,7 +128,7 @@ static void __init omap3_sbc_t3530_legacy_init(void) omap3_sbc_t3x_usb_hub_init(167, "sb-t35 usb hub"); } -struct ti_st_plat_data wilink_pdata = { +static struct ti_st_plat_data wilink_pdata = { .nshutdown_gpio = 137, .dev_name = "/dev/ttyO1", .flow_cntrl = 1, @@ -323,7 +323,7 @@ static struct pdata_init auxdata_quirks[] __initdata = { { /* sentinel */ }, }; -struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { +static struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { #ifdef CONFIG_MACH_NOKIA_N8X0 OF_DEV_AUXDATA("ti,omap2420-mmc", 0x4809c000, "mmci-omap.0", NULL), OF_DEV_AUXDATA("menelaus", 0x72, "1-0072", &n8x0_menelaus_platform_data), diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c index b1aad7e1426c..2a1a4180d5d0 100644 --- a/arch/arm/mach-omap2/pm24xx.c +++ b/arch/arm/mach-omap2/pm24xx.c @@ -25,6 +25,7 @@ #include <linux/sysfs.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/irq.h> #include <linux/time.h> diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c index 70bc7066a4c2..d31c495175c1 100644 --- a/arch/arm/mach-omap2/powerdomains3xxx_data.c +++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c @@ -349,6 +349,41 @@ static struct powerdomain device_81xx_pwrdm = { .voltdm = { .name = "core" }, }; +static struct powerdomain gem_814x_pwrdm = { + .name = "gem_pwrdm", + .prcm_offs = TI814X_PRM_DSP_MOD, + .pwrsts = PWRSTS_OFF_ON, + .voltdm = { .name = "dsp" }, +}; + +static struct powerdomain ivahd_814x_pwrdm = { + .name = "ivahd_pwrdm", + .prcm_offs = TI814X_PRM_HDVICP_MOD, + .pwrsts = PWRSTS_OFF_ON, + .voltdm = { .name = "iva" }, +}; + +static struct powerdomain hdvpss_814x_pwrdm = { + .name = "hdvpss_pwrdm", + .prcm_offs = TI814X_PRM_HDVPSS_MOD, + .pwrsts = PWRSTS_OFF_ON, + .voltdm = { .name = "dsp" }, +}; + +static struct powerdomain sgx_814x_pwrdm = { + .name = "sgx_pwrdm", + .prcm_offs = TI814X_PRM_GFX_MOD, + .pwrsts = PWRSTS_OFF_ON, + .voltdm = { .name = "core" }, +}; + +static struct powerdomain isp_814x_pwrdm = { + .name = "isp_pwrdm", + .prcm_offs = TI814X_PRM_ISP_MOD, + .pwrsts = PWRSTS_OFF_ON, + .voltdm = { .name = "core" }, +}; + static struct powerdomain active_816x_pwrdm = { .name = "active_pwrdm", .prcm_offs = TI816X_PRM_ACTIVE_MOD, @@ -448,7 +483,18 @@ static struct powerdomain *powerdomains_am35x[] __initdata = { NULL }; -static struct powerdomain *powerdomains_ti81xx[] __initdata = { +static struct powerdomain *powerdomains_ti814x[] __initdata = { + &alwon_81xx_pwrdm, + &device_81xx_pwrdm, + &gem_814x_pwrdm, + &ivahd_814x_pwrdm, + &hdvpss_814x_pwrdm, + &sgx_814x_pwrdm, + &isp_814x_pwrdm, + NULL +}; + +static struct powerdomain *powerdomains_ti816x[] __initdata = { &alwon_81xx_pwrdm, &device_81xx_pwrdm, &active_816x_pwrdm, @@ -460,6 +506,73 @@ static struct powerdomain *powerdomains_ti81xx[] __initdata = { NULL }; +/* TI81XX specific ops */ +#define TI81XX_PM_PWSTCTRL 0x0000 +#define TI81XX_RM_RSTCTRL 0x0010 +#define TI81XX_PM_PWSTST 0x0004 + +static int ti81xx_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) +{ + omap2_prm_rmw_mod_reg_bits(OMAP_POWERSTATE_MASK, + (pwrst << OMAP_POWERSTATE_SHIFT), + pwrdm->prcm_offs, TI81XX_PM_PWSTCTRL); + return 0; +} + +static int ti81xx_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + TI81XX_PM_PWSTCTRL, + OMAP_POWERSTATE_MASK); +} + +static int ti81xx_pwrdm_read_pwrst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + (pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL : + TI81XX_PM_PWSTST, + OMAP_POWERSTATEST_MASK); +} + +static int ti81xx_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + (pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL : + TI81XX_PM_PWSTST, + OMAP3430_LOGICSTATEST_MASK); +} + +static int ti81xx_pwrdm_wait_transition(struct powerdomain *pwrdm) +{ + u32 c = 0; + + while ((omap2_prm_read_mod_reg(pwrdm->prcm_offs, + (pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL : + TI81XX_PM_PWSTST) & + OMAP_INTRANSITION_MASK) && + (c++ < PWRDM_TRANSITION_BAILOUT)) + udelay(1); + + if (c > PWRDM_TRANSITION_BAILOUT) { + pr_err("powerdomain: %s timeout waiting for transition\n", + pwrdm->name); + return -EAGAIN; + } + + pr_debug("powerdomain: completed transition in %d loops\n", c); + + return 0; +} + +/* For dm814x we need to fix up fix GFX pwstst and rstctrl reg offsets */ +static struct pwrdm_ops ti81xx_pwrdm_operations = { + .pwrdm_set_next_pwrst = ti81xx_pwrdm_set_next_pwrst, + .pwrdm_read_next_pwrst = ti81xx_pwrdm_read_next_pwrst, + .pwrdm_read_pwrst = ti81xx_pwrdm_read_pwrst, + .pwrdm_read_logic_pwrst = ti81xx_pwrdm_read_logic_pwrst, + .pwrdm_wait_transition = ti81xx_pwrdm_wait_transition, +}; + void __init omap3xxx_powerdomains_init(void) { unsigned int rev; @@ -467,15 +580,22 @@ void __init omap3xxx_powerdomains_init(void) if (!cpu_is_omap34xx() && !cpu_is_ti81xx()) return; - pwrdm_register_platform_funcs(&omap3_pwrdm_operations); + /* Only 81xx needs custom pwrdm_operations */ + if (!cpu_is_ti81xx()) + pwrdm_register_platform_funcs(&omap3_pwrdm_operations);; rev = omap_rev(); if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) { pwrdm_register_pwrdms(powerdomains_am35x); + } else if (rev == TI8148_REV_ES1_0 || rev == TI8148_REV_ES2_0 || + rev == TI8148_REV_ES2_1) { + pwrdm_register_platform_funcs(&ti81xx_pwrdm_operations); + pwrdm_register_pwrdms(powerdomains_ti814x); } else if (rev == TI8168_REV_ES1_0 || rev == TI8168_REV_ES1_1 || rev == TI8168_REV_ES2_0 || rev == TI8168_REV_ES2_1) { - pwrdm_register_pwrdms(powerdomains_ti81xx); + pwrdm_register_platform_funcs(&ti81xx_pwrdm_operations); + pwrdm_register_pwrdms(powerdomains_ti816x); } else { pwrdm_register_pwrdms(powerdomains_omap3430_common); diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h index 6ae0b3a1781e..c8f590b7c32d 100644 --- a/arch/arm/mach-omap2/prcm-common.h +++ b/arch/arm/mach-omap2/prcm-common.h @@ -51,6 +51,12 @@ /* * TI81XX PRM module offsets */ +#define TI814X_PRM_DSP_MOD 0x0a00 +#define TI814X_PRM_HDVICP_MOD 0x0c00 +#define TI814X_PRM_ISP_MOD 0x0d00 +#define TI814X_PRM_HDVPSS_MOD 0x0e00 +#define TI814X_PRM_GFX_MOD 0x0f00 + #define TI81XX_PRM_DEVICE_MOD 0x0000 #define TI816X_PRM_ACTIVE_MOD 0x0a00 #define TI81XX_PRM_DEFAULT_MOD 0x0b00 @@ -472,6 +478,7 @@ struct omap_prcm_irq { * struct omap_prcm_irq_setup - PRCM interrupt controller details * @ack: PRM register offset for the first PRM_IRQSTATUS_MPU register * @mask: PRM register offset for the first PRM_IRQENABLE_MPU register + * @pm_ctrl: PRM register offset for the PRM_IO_PMCTRL register * @nr_regs: number of PRM_IRQ{STATUS,ENABLE}_MPU* registers * @nr_irqs: number of entries in the @irqs array * @irqs: ptr to an array of PRCM interrupt bits (see @nr_irqs) @@ -494,6 +501,7 @@ struct omap_prcm_irq { struct omap_prcm_irq_setup { u16 ack; u16 mask; + u16 pm_ctrl; u8 nr_regs; u8 nr_irqs; const struct omap_prcm_irq *irqs; diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h index 7eebc27fa892..7c34c44eb0ae 100644 --- a/arch/arm/mach-omap2/prcm43xx.h +++ b/arch/arm/mach-omap2/prcm43xx.h @@ -25,6 +25,13 @@ #define AM43XX_PRM_WKUP_INST 0x2000 #define AM43XX_PRM_DEVICE_INST 0x4000 +/* PRM_IRQ offsets */ +#define AM43XX_PRM_IRQSTATUS_MPU_OFFSET 0x0004 +#define AM43XX_PRM_IRQENABLE_MPU_OFFSET 0x0008 + +/* Other PRM offsets */ +#define AM43XX_PRM_IO_PMCTRL_OFFSET 0x0024 + /* RM RSTCTRL offsets */ #define AM43XX_RM_PER_RSTCTRL_OFFSET 0x0010 #define AM43XX_RM_GFX_RSTCTRL_OFFSET 0x0010 diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 4541700f743a..30768003f854 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -18,13 +18,14 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/of_irq.h> - +#include <linux/of.h> #include "soc.h" #include "iomap.h" #include "common.h" #include "vp.h" #include "prm44xx.h" +#include "prcm43xx.h" #include "prm-regbits-44xx.h" #include "prcm44xx.h" #include "prminst44xx.h" @@ -45,6 +46,7 @@ static const struct omap_prcm_irq omap4_prcm_irqs[] = { static struct omap_prcm_irq_setup omap4_prcm_irq_setup = { .ack = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .mask = OMAP4_PRM_IRQENABLE_MPU_OFFSET, + .pm_ctrl = OMAP4_PRM_IO_PMCTRL_OFFSET, .nr_regs = 2, .irqs = omap4_prcm_irqs, .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs), @@ -216,11 +218,11 @@ static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs) */ static void omap44xx_prm_read_pending_irqs(unsigned long *events) { - events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET, - OMAP4_PRM_IRQSTATUS_MPU_OFFSET); + int i; - events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET, - OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); + for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++) + events[i] = _read_pending_irq_reg(omap4_prcm_irq_setup.mask + + i * 4, omap4_prcm_irq_setup.ack + i * 4); } /** @@ -250,17 +252,17 @@ static void omap44xx_prm_ocp_barrier(void) */ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) { - saved_mask[0] = - omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQENABLE_MPU_OFFSET); - saved_mask[1] = - omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); + int i; + u16 reg; + + for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++) { + reg = omap4_prcm_irq_setup.mask + i * 4; - omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQENABLE_MPU_OFFSET); - omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); + saved_mask[i] = + omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, + reg); + omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, reg); + } /* OCP barrier */ omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, @@ -279,10 +281,12 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) */ static void omap44xx_prm_restore_irqen(u32 *saved_mask) { - omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQENABLE_MPU_OFFSET); - omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); + int i; + + for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++) + omap4_prm_write_inst_reg(saved_mask[i], + OMAP4430_PRM_OCP_SOCKET_INST, + omap4_prcm_irq_setup.mask + i * 4); } /** @@ -306,10 +310,10 @@ static void omap44xx_prm_reconfigure_io_chain(void) omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, OMAP4430_WUCLK_CTRL_MASK, inst, - OMAP4_PRM_IO_PMCTRL_OFFSET); + omap4_prcm_irq_setup.pm_ctrl); omap_test_timeout( (((omap4_prm_read_inst_reg(inst, - OMAP4_PRM_IO_PMCTRL_OFFSET) & + omap4_prcm_irq_setup.pm_ctrl) & OMAP4430_WUCLK_STATUS_MASK) >> OMAP4430_WUCLK_STATUS_SHIFT) == 1), MAX_IOPAD_LATCH_TIME, i); @@ -319,10 +323,10 @@ static void omap44xx_prm_reconfigure_io_chain(void) /* Trigger WUCLKIN disable */ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, 0x0, inst, - OMAP4_PRM_IO_PMCTRL_OFFSET); + omap4_prcm_irq_setup.pm_ctrl); omap_test_timeout( (((omap4_prm_read_inst_reg(inst, - OMAP4_PRM_IO_PMCTRL_OFFSET) & + omap4_prcm_irq_setup.pm_ctrl) & OMAP4430_WUCLK_STATUS_MASK) >> OMAP4430_WUCLK_STATUS_SHIFT) == 0), MAX_IOPAD_LATCH_TIME, i); @@ -350,7 +354,7 @@ static void __init omap44xx_prm_enable_io_wakeup(void) omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK, OMAP4430_GLOBAL_WUEN_MASK, inst, - OMAP4_PRM_IO_PMCTRL_OFFSET); + omap4_prcm_irq_setup.pm_ctrl); } /** @@ -719,6 +723,15 @@ int __init omap44xx_prm_init(const struct omap_prcm_init_data *data) omap4_prminst_set_prm_dev_inst(data->device_inst_offset); + /* Add AM437X specific differences */ + if (of_device_is_compatible(data->np, "ti,am4-prcm")) { + omap4_prcm_irq_setup.nr_irqs = 1; + omap4_prcm_irq_setup.nr_regs = 1; + omap4_prcm_irq_setup.pm_ctrl = AM43XX_PRM_IO_PMCTRL_OFFSET; + omap4_prcm_irq_setup.ack = AM43XX_PRM_IRQSTATUS_MPU_OFFSET; + omap4_prcm_irq_setup.mask = AM43XX_PRM_IRQENABLE_MPU_OFFSET; + } + return prm_register(&omap44xx_prm_ll_data); } diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 7add7994dbfc..257e98c26618 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c @@ -696,6 +696,7 @@ static struct omap_prcm_init_data am4_prm_data __initdata = { .index = TI_CLKM_PRM, .init = omap44xx_prm_init, .device_inst_offset = AM43XX_PRM_DEVICE_INST, + .flags = PRM_HAS_IO_WAKEUP, }; #endif @@ -705,7 +706,7 @@ static struct omap_prcm_init_data scrm_data __initdata = { }; #endif -static const struct of_device_id omap_prcm_dt_match_table[] __initconst = { +static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = { #ifdef CONFIG_SOC_AM33XX { .compatible = "ti,am3-prcm", .data = &am3_prm_data }, #endif diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index cac46d852da1..e4d8701f99f9 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -102,38 +102,38 @@ static int omap2_gp_timer_set_next_event(unsigned long cycles, return 0; } -static void omap2_gp_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int omap2_gp_timer_shutdown(struct clock_event_device *evt) +{ + __omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate); + return 0; +} + +static int omap2_gp_timer_set_periodic(struct clock_event_device *evt) { u32 period; __omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - period = clkev.rate / HZ; - period -= 1; - /* Looks like we need to first set the load value separately */ - __omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG, - 0xffffffff - period, OMAP_TIMER_POSTED); - __omap_dm_timer_load_start(&clkev, - OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST, - 0xffffffff - period, OMAP_TIMER_POSTED); - break; - case CLOCK_EVT_MODE_ONESHOT: - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } + period = clkev.rate / HZ; + period -= 1; + /* Looks like we need to first set the load value separately */ + __omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG, 0xffffffff - period, + OMAP_TIMER_POSTED); + __omap_dm_timer_load_start(&clkev, + OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST, + 0xffffffff - period, OMAP_TIMER_POSTED); + return 0; } static struct clock_event_device clockevent_gpt = { - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 300, - .set_next_event = omap2_gp_timer_set_next_event, - .set_mode = omap2_gp_timer_set_mode, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = omap2_gp_timer_set_next_event, + .set_state_shutdown = omap2_gp_timer_shutdown, + .set_state_periodic = omap2_gp_timer_set_periodic, + .set_state_oneshot = omap2_gp_timer_shutdown, + .tick_resume = omap2_gp_timer_shutdown, }; static struct property device_disabled = { @@ -208,8 +208,7 @@ static void __init omap_dmtimer_init(void) /* If we are a secure device, remove any secure timer nodes */ if ((omap_type() != OMAP2_DEVICE_TYPE_GP)) { np = omap_get_timer_dt(omap_timer_match, "ti,timer-secure"); - if (np) - of_node_put(np); + of_node_put(np); } } @@ -649,23 +648,10 @@ static OMAP_SYS_32K_TIMER_INIT(4, 1, "timer_32k_ck", "ti,timer-alwon", #ifdef CONFIG_ARCH_OMAP4 #ifdef CONFIG_HAVE_ARM_TWD -static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, OMAP44XX_LOCAL_TWD_BASE, 29); void __init omap4_local_timer_init(void) { omap4_sync32k_timer_init(); - /* Local timers are not supprted on OMAP4430 ES1.0 */ - if (omap_rev() != OMAP4430_REV_ES1_0) { - int err; - - if (of_have_populated_dt()) { - clocksource_of_init(); - return; - } - - err = twd_local_timer_register(&twd_local_timer); - if (err) - pr_err("twd_local_timer_register failed %d\n", err); - } + clocksource_of_init(); } #else void __init omap4_local_timer_init(void) diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index 076fd20d7e5a..e5a35f6b83a7 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c @@ -563,7 +563,7 @@ struct i2c_init_data { u8 hsscll_12; }; -static const __initdata struct i2c_init_data omap4_i2c_timing_data[] = { +static const struct i2c_init_data const omap4_i2c_timing_data[] __initconst = { { .load = 50, .loadbits = 0x3, diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c index 261bb7cb4e60..307676d8c53c 100644 --- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c +++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c @@ -95,7 +95,7 @@ static struct voltagedomain *voltagedomains_am35xx[] __initdata = { }; -static const char *sys_clk_name __initdata = "sys_ck"; +static const char *const sys_clk_name __initconst = "sys_ck"; void __init omap3xxx_voltagedomains_init(void) { diff --git a/arch/arm/mach-omap2/voltagedomains44xx_data.c b/arch/arm/mach-omap2/voltagedomains44xx_data.c index 48b22a0a0c88..9b1f245b57d6 100644 --- a/arch/arm/mach-omap2/voltagedomains44xx_data.c +++ b/arch/arm/mach-omap2/voltagedomains44xx_data.c @@ -92,7 +92,7 @@ static struct voltagedomain *voltagedomains_omap4[] __initdata = { NULL, }; -static const char *sys_clk_name __initdata = "sys_clkin_ck"; +static const char *const sys_clk_name __initconst = "sys_clkin_ck"; void __init omap44xx_voltagedomains_init(void) { diff --git a/arch/arm/mach-omap2/voltagedomains54xx_data.c b/arch/arm/mach-omap2/voltagedomains54xx_data.c index 33d22b87252d..af5ff6496441 100644 --- a/arch/arm/mach-omap2/voltagedomains54xx_data.c +++ b/arch/arm/mach-omap2/voltagedomains54xx_data.c @@ -78,7 +78,7 @@ static struct voltagedomain *voltagedomains_omap5[] __initdata = { NULL, }; -static const char *sys_clk_name __initdata = "sys_clkin"; +static const char *const sys_clk_name __initconst = "sys_clkin"; void __init omap54xx_voltagedomains_init(void) { diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c index 79f033b1ddff..d0871786dd8a 100644 --- a/arch/arm/mach-orion5x/board-dt.c +++ b/arch/arm/mach-orion5x/board-dt.c @@ -16,7 +16,6 @@ #include <linux/of_platform.h> #include <linux/cpu.h> #include <linux/mbus.h> -#include <linux/clk-provider.h> #include <linux/clocksource.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index 09d2a26985da..f267e58a8283 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c @@ -236,9 +236,7 @@ static int __init dns323_read_mac_addr(void) } iounmap(mac_page); - printk("DNS-323: Found ethernet MAC address: "); - for (i = 0; i < 6; i++) - printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n"); + printk("DNS-323: Found ethernet MAC address: %pM\n", addr); memcpy(dns323_eth_data.mac_addr, addr, 6); diff --git a/arch/arm/mach-orion5x/include/mach/irqs.h b/arch/arm/mach-orion5x/include/mach/irqs.h index a6fa9d8f12d8..2431d9923427 100644 --- a/arch/arm/mach-orion5x/include/mach/irqs.h +++ b/arch/arm/mach-orion5x/include/mach/irqs.h @@ -16,42 +16,42 @@ /* * Orion Main Interrupt Controller */ -#define IRQ_ORION5X_BRIDGE 0 -#define IRQ_ORION5X_DOORBELL_H2C 1 -#define IRQ_ORION5X_DOORBELL_C2H 2 -#define IRQ_ORION5X_UART0 3 -#define IRQ_ORION5X_UART1 4 -#define IRQ_ORION5X_I2C 5 -#define IRQ_ORION5X_GPIO_0_7 6 -#define IRQ_ORION5X_GPIO_8_15 7 -#define IRQ_ORION5X_GPIO_16_23 8 -#define IRQ_ORION5X_GPIO_24_31 9 -#define IRQ_ORION5X_PCIE0_ERR 10 -#define IRQ_ORION5X_PCIE0_INT 11 -#define IRQ_ORION5X_USB1_CTRL 12 -#define IRQ_ORION5X_DEV_BUS_ERR 14 -#define IRQ_ORION5X_PCI_ERR 15 -#define IRQ_ORION5X_USB_BR_ERR 16 -#define IRQ_ORION5X_USB0_CTRL 17 -#define IRQ_ORION5X_ETH_RX 18 -#define IRQ_ORION5X_ETH_TX 19 -#define IRQ_ORION5X_ETH_MISC 20 -#define IRQ_ORION5X_ETH_SUM 21 -#define IRQ_ORION5X_ETH_ERR 22 -#define IRQ_ORION5X_IDMA_ERR 23 -#define IRQ_ORION5X_IDMA_0 24 -#define IRQ_ORION5X_IDMA_1 25 -#define IRQ_ORION5X_IDMA_2 26 -#define IRQ_ORION5X_IDMA_3 27 -#define IRQ_ORION5X_CESA 28 -#define IRQ_ORION5X_SATA 29 -#define IRQ_ORION5X_XOR0 30 -#define IRQ_ORION5X_XOR1 31 +#define IRQ_ORION5X_BRIDGE (1 + 0) +#define IRQ_ORION5X_DOORBELL_H2C (1 + 1) +#define IRQ_ORION5X_DOORBELL_C2H (1 + 2) +#define IRQ_ORION5X_UART0 (1 + 3) +#define IRQ_ORION5X_UART1 (1 + 4) +#define IRQ_ORION5X_I2C (1 + 5) +#define IRQ_ORION5X_GPIO_0_7 (1 + 6) +#define IRQ_ORION5X_GPIO_8_15 (1 + 7) +#define IRQ_ORION5X_GPIO_16_23 (1 + 8) +#define IRQ_ORION5X_GPIO_24_31 (1 + 9) +#define IRQ_ORION5X_PCIE0_ERR (1 + 10) +#define IRQ_ORION5X_PCIE0_INT (1 + 11) +#define IRQ_ORION5X_USB1_CTRL (1 + 12) +#define IRQ_ORION5X_DEV_BUS_ERR (1 + 14) +#define IRQ_ORION5X_PCI_ERR (1 + 15) +#define IRQ_ORION5X_USB_BR_ERR (1 + 16) +#define IRQ_ORION5X_USB0_CTRL (1 + 17) +#define IRQ_ORION5X_ETH_RX (1 + 18) +#define IRQ_ORION5X_ETH_TX (1 + 19) +#define IRQ_ORION5X_ETH_MISC (1 + 20) +#define IRQ_ORION5X_ETH_SUM (1 + 21) +#define IRQ_ORION5X_ETH_ERR (1 + 22) +#define IRQ_ORION5X_IDMA_ERR (1 + 23) +#define IRQ_ORION5X_IDMA_0 (1 + 24) +#define IRQ_ORION5X_IDMA_1 (1 + 25) +#define IRQ_ORION5X_IDMA_2 (1 + 26) +#define IRQ_ORION5X_IDMA_3 (1 + 27) +#define IRQ_ORION5X_CESA (1 + 28) +#define IRQ_ORION5X_SATA (1 + 29) +#define IRQ_ORION5X_XOR0 (1 + 30) +#define IRQ_ORION5X_XOR1 (1 + 31) /* * Orion General Purpose Pins */ -#define IRQ_ORION5X_GPIO_START 32 +#define IRQ_ORION5X_GPIO_START 33 #define NR_GPIO_IRQS 32 #define NR_IRQS (IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS) diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c index cd4bac4d7e43..086ecb87d885 100644 --- a/arch/arm/mach-orion5x/irq.c +++ b/arch/arm/mach-orion5x/irq.c @@ -42,7 +42,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs) stat = readl_relaxed(MAIN_IRQ_CAUSE); stat &= readl_relaxed(MAIN_IRQ_MASK); if (stat) { - unsigned int hwirq = __fls(stat); + unsigned int hwirq = 1 + __fls(stat); handle_IRQ(hwirq, regs); return; } @@ -51,7 +51,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs) void __init orion5x_init_irq(void) { - orion_irq_init(0, MAIN_IRQ_MASK); + orion_irq_init(1, MAIN_IRQ_MASK); #ifdef CONFIG_MULTI_IRQ_HANDLER set_handle_irq(orion5x_legacy_handle_irq); diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c index 7189827d641d..24b2959719fa 100644 --- a/arch/arm/mach-orion5x/tsx09-common.c +++ b/arch/arm/mach-orion5x/tsx09-common.c @@ -101,9 +101,7 @@ static int __init qnap_tsx09_check_mac_addr(const char *addr_str) addr[i] = byte; } - printk(KERN_INFO "tsx09: found ethernet mac address "); - for (i = 0; i < 6; i++) - printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n"); + printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6); diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index d897292712eb..70366b35d299 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c @@ -496,18 +496,18 @@ static struct irq_chip balloon3_irq_chip = { .irq_unmask = balloon3_unmask_irq, }; -static void balloon3_irq_handler(unsigned int irq, struct irq_desc *desc) +static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc) { unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; do { - /* clear useless edge notification */ - if (desc->irq_data.chip->irq_ack) { - struct irq_data *d; + struct irq_data *d = irq_desc_get_irq_data(desc); + struct irq_chip *chip = irq_data_get_chip(d); + unsigned int irq; - d = irq_get_irq_data(BALLOON3_AUX_NIRQ); - desc->irq_data.chip->irq_ack(d); - } + /* clear useless edge notification */ + if (chip->irq_ack) + chip->irq_ack(d); while (pending) { irq = BALLOON3_IRQ(0) + __ffs(pending); @@ -528,7 +528,7 @@ static void __init balloon3_init_irq(void) for (irq = BALLOON3_IRQ(0); irq <= BALLOON3_IRQ(7); irq++) { irq_set_chip_and_handler(irq, &balloon3_irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } irq_set_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler); diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c index c092730749b9..bf366b39fa61 100644 --- a/arch/arm/mach-pxa/capc7117.c +++ b/arch/arm/mach-pxa/capc7117.c @@ -24,6 +24,7 @@ #include <linux/ata_platform.h> #include <linux/serial_8250.h> #include <linux/gpio.h> +#include <linux/regulator/machine.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -144,6 +145,8 @@ static void __init capc7117_init(void) capc7117_uarts_init(); capc7117_ide_init(); + + regulator_has_full_constraints(); } MACHINE_START(CAPC7117, diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c index d8f816c24a2f..1fa79f1f832d 100644 --- a/arch/arm/mach-pxa/cm-x2xx-pci.c +++ b/arch/arm/mach-pxa/cm-x2xx-pci.c @@ -29,8 +29,9 @@ void __iomem *it8152_base_address; static int cmx2xx_it8152_irq_gpio; -static void cmx2xx_it8152_irq_demux(unsigned int irq, struct irq_desc *desc) +static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc) { + unsigned int irq = irq_desc_get_irq(desc); /* clear our parent irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c index bb99f59a36d8..a17a91eb8e9a 100644 --- a/arch/arm/mach-pxa/cm-x2xx.c +++ b/arch/arm/mach-pxa/cm-x2xx.c @@ -13,6 +13,7 @@ #include <linux/syscore_ops.h> #include <linux/irq.h> #include <linux/gpio.h> +#include <linux/regulator/machine.h> #include <linux/dm9000.h> #include <linux/leds.h> @@ -466,6 +467,8 @@ static void __init cmx2xx_init(void) cmx2xx_init_ac97(); cmx2xx_init_touchscreen(); cmx2xx_init_leds(); + + regulator_has_full_constraints(); } static void __init cmx2xx_init_irq(void) diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index 4d3588d26c2a..5851f4c254c1 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -835,6 +835,8 @@ static void __init cm_x300_init(void) cm_x300_init_ac97(); cm_x300_init_wi2wi(); cm_x300_init_bl(); + + regulator_has_full_constraints(); } static void __init cm_x300_fixup(struct tag *tags, char **cmdline) diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c index 5f9d9303b346..3503826333c7 100644 --- a/arch/arm/mach-pxa/colibri-pxa270.c +++ b/arch/arm/mach-pxa/colibri-pxa270.c @@ -18,6 +18,7 @@ #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> +#include <linux/regulator/machine.h> #include <linux/ucb1400.h> #include <asm/mach/arch.h> @@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(void) printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n", colibri_pxa270_baseboard); } + + regulator_has_full_constraints(); } /* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index 35434662dc7c..e6ce669b54af 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -17,6 +17,7 @@ #include <linux/platform_data/camera-pxa.h> #include <mach/audio.h> #include <mach/hardware.h> +#include <linux/platform_data/mmp_dma.h> #include <linux/platform_data/mtd-nand-pxa3xx.h> #include "devices.h" @@ -1193,3 +1194,39 @@ void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) pd->dev.platform_data = info; platform_device_add(pd); } + +static struct mmp_dma_platdata pxa_dma_pdata = { + .dma_channels = 0, +}; + +static struct resource pxa_dma_resource[] = { + [0] = { + .start = 0x40000000, + .end = 0x4000ffff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = IRQ_DMA, + .end = IRQ_DMA, + .flags = IORESOURCE_IRQ, + }, +}; + +static u64 pxadma_dmamask = 0xffffffffUL; + +static struct platform_device pxa2xx_pxa_dma = { + .name = "pxa-dma", + .id = 0, + .dev = { + .dma_mask = &pxadma_dmamask, + .coherent_dma_mask = 0xffffffff, + }, + .num_resources = ARRAY_SIZE(pxa_dma_resource), + .resource = pxa_dma_resource, +}; + +void __init pxa2xx_set_dmac_info(int nb_channels) +{ + pxa_dma_pdata.dma_channels = nb_channels; + pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata); +} diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 51531ecffca8..9d7072b04045 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -1306,6 +1306,8 @@ static void __init em_x270_init(void) em_x270_init_i2c(); em_x270_init_camera(); em_x270_userspace_consumers_init(); + + regulator_has_full_constraints(); } MACHINE_START(EM_X270, "Compulab EM-X270") diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c index c98511c5abd1..9b0eb0252af6 100644 --- a/arch/arm/mach-pxa/icontrol.c +++ b/arch/arm/mach-pxa/icontrol.c @@ -26,6 +26,7 @@ #include <linux/spi/spi.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/can/platform/mcp251x.h> +#include <linux/regulator/machine.h> #include "generic.h" @@ -185,6 +186,8 @@ static void __init icontrol_init(void) mxm_8x10_mmc_init(); icontrol_can_init(); + + regulator_has_full_constraints(); } MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM") diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 98608c5575cb..9c10248fadcc 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -133,7 +133,6 @@ static int pxa_irq_map(struct irq_domain *h, unsigned int virq, irq_set_chip_and_handler(virq, &pxa_internal_irq_chip, handle_level_irq); irq_set_chip_data(virq, base); - set_irq_flags(virq, IRQF_VALID); return 0; } diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index eaee2c20b189..b070167deef2 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -120,8 +120,9 @@ static struct irq_chip lpd270_irq_chip = { .irq_unmask = lpd270_unmask_irq, }; -static void lpd270_irq_handler(unsigned int irq, struct irq_desc *desc) +static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc) { + unsigned int irq; unsigned long pending; pending = __raw_readw(LPD270_INT_STATUS) & lpd270_irq_enabled; @@ -151,7 +152,7 @@ static void __init lpd270_init_irq(void) for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) { irq_set_chip_and_handler(irq, &lpd270_irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lpd270_irq_handler); irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING); diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 2897da2a5df6..9a0c8affdadb 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -284,8 +284,9 @@ static struct irq_chip pcm990_irq_chip = { .irq_unmask = pcm990_unmask_irq, }; -static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc) +static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc) { + unsigned int irq; unsigned long pending; pending = ~pcm990_cpld_readb(PCM990_CTRL_INTSETCLR); @@ -311,7 +312,7 @@ static void __init pcm990_init_irq(void) for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) { irq_set_chip_and_handler(irq, &pcm990_irq_chip, handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } /* disable all Interrupts */ diff --git a/arch/arm/mach-pxa/pxa-dt.c b/arch/arm/mach-pxa/pxa-dt.c index 7e0e5bd0c9de..8e0e62ccdced 100644 --- a/arch/arm/mach-pxa/pxa-dt.c +++ b/arch/arm/mach-pxa/pxa-dt.c @@ -19,7 +19,7 @@ #include "generic.h" #ifdef CONFIG_PXA3xx -static const struct of_dev_auxdata pxa3xx_auxdata_lookup[] __initconst = { +static const struct of_dev_auxdata const pxa3xx_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("mrvl,pxa-uart", 0x40100000, "pxa2xx-uart.0", NULL), OF_DEV_AUXDATA("mrvl,pxa-uart", 0x40200000, "pxa2xx-uart.1", NULL), OF_DEV_AUXDATA("mrvl,pxa-uart", 0x40700000, "pxa2xx-uart.2", NULL), @@ -39,7 +39,7 @@ static void __init pxa3xx_dt_init(void) pxa3xx_auxdata_lookup, NULL); } -static const char *pxa3xx_dt_board_compat[] __initdata = { +static const char *const pxa3xx_dt_board_compat[] __initconst = { "marvell,pxa300", "marvell,pxa310", "marvell,pxa320", diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index 23a90c62ec11..1dc85ffc3e20 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c @@ -206,6 +206,7 @@ static int __init pxa25x_init(void) register_syscore_ops(&pxa_irq_syscore_ops); register_syscore_ops(&pxa2xx_mfp_syscore_ops); + pxa2xx_set_dmac_info(16); pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info); ret = platform_add_devices(pxa25x_devices, ARRAY_SIZE(pxa25x_devices)); diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index b5abdeb5bb2d..e6aae9e8adfb 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -310,6 +310,7 @@ static int __init pxa27x_init(void) if (!of_have_populated_dt()) { pxa_register_device(&pxa27x_device_gpio, &pxa27x_gpio_info); + pxa2xx_set_dmac_info(32); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index bd4cbef15ccf..165638462a2f 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -325,7 +325,7 @@ static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *, for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) { irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip, handle_edge_irq); - set_irq_flags(irq, IRQF_VALID); + irq_clear_status_flags(irq, IRQ_NOREQUEST); } pxa_ext_wakeup_chip.irq_set_wake = fn; @@ -431,6 +431,7 @@ static int __init pxa3xx_init(void) if (of_have_populated_dt()) return 0; + pxa2xx_set_dmac_info(32); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); if (ret) return ret; diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c index 051a6555cbf9..bdc0c41bc4fd 100644 --- a/arch/arm/mach-pxa/sharpsl_pm.c +++ b/arch/arm/mach-pxa/sharpsl_pm.c @@ -841,11 +841,9 @@ static int sharpsl_pm_probe(struct platform_device *pdev) sharpsl_pm.charge_mode = CHRG_OFF; sharpsl_pm.flags = 0; - init_timer(&sharpsl_pm.ac_timer); - sharpsl_pm.ac_timer.function = sharpsl_ac_timer; + setup_timer(&sharpsl_pm.ac_timer, sharpsl_ac_timer, 0UL); - init_timer(&sharpsl_pm.chrg_full_timer); - sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer; + setup_timer(&sharpsl_pm.chrg_full_timer, sharpsl_chrg_full_timer, 0UL); led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger); diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index 685deff861d2..e0a53208880a 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c @@ -131,17 +131,4 @@ static struct platform_driver tosa_bt_driver = { .name = "tosa-bt", }, }; - - -static int __init tosa_bt_init(void) -{ - return platform_driver_register(&tosa_bt_driver); -} - -static void __exit tosa_bt_exit(void) -{ - platform_driver_unregister(&tosa_bt_driver); -} - -module_init(tosa_bt_init); -module_exit(tosa_bt_exit); +module_platform_driver(tosa_bt_driver); diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c index 872dcb20e757..066e3a250ee0 100644 --- a/arch/arm/mach-pxa/trizeps4.c +++ b/arch/arm/mach-pxa/trizeps4.c @@ -26,6 +26,7 @@ #include <linux/dm9000.h> #include <linux/mtd/physmap.h> #include <linux/mtd/partitions.h> +#include <linux/regulator/machine.h> #include <linux/i2c/pxa-i2c.h> #include <asm/types.h> @@ -534,6 +535,8 @@ static void __init trizeps4_init(void) BCR_writew(trizeps_conxs_bcr); board_backlight_power(1); + + regulator_has_full_constraints(); } static void __init trizeps4_map_io(void) diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index de3b08073fe7..4841d6cefe76 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -276,8 +276,9 @@ static inline unsigned long viper_irq_pending(void) viper_irq_enabled_mask; } -static void viper_irq_handler(unsigned int irq, struct irq_desc *desc) +static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc) { + unsigned int irq; unsigned long pending; pending = viper_irq_pending(); @@ -313,7 +314,7 @@ static void __init viper_init_irq(void) isa_irq = viper_bit_to_irq(level); irq_set_chip_and_handler(isa_irq, &viper_irq_chip, handle_edge_irq); - set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(isa_irq, IRQ_NOREQUEST | IRQ_NOPROBE); } irq_set_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO), diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c index aa89488f961e..54122a983ae3 100644 --- a/arch/arm/mach-pxa/vpac270.c +++ b/arch/arm/mach-pxa/vpac270.c @@ -24,6 +24,7 @@ #include <linux/dm9000.h> #include <linux/ucb1400.h> #include <linux/ata_platform.h> +#include <linux/regulator/machine.h> #include <linux/regulator/max1586.h> #include <linux/i2c/pxa-i2c.h> @@ -711,6 +712,8 @@ static void __init vpac270_init(void) vpac270_ts_init(); vpac270_rtc_init(); vpac270_ide_init(); + + regulator_has_full_constraints(); } MACHINE_START(VPAC270, "Voipac PXA270") diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index ac2ae5c71ab4..6f94dd7b4dee 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -105,8 +105,9 @@ static inline unsigned long zeus_irq_pending(void) return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask; } -static void zeus_irq_handler(unsigned int irq, struct irq_desc *desc) +static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc) { + unsigned int irq; unsigned long pending; pending = zeus_irq_pending(); @@ -151,7 +152,7 @@ static void __init zeus_init_irq(void) isa_irq = zeus_bit_to_irq(level); irq_set_chip_and_handler(isa_irq, &zeus_irq_chip, handle_edge_irq); - set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(isa_irq, IRQ_NOREQUEST | IRQ_NOPROBE); } irq_set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING); @@ -868,6 +869,8 @@ static void __init zeus_init(void) i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices)); pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info); spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info)); + + regulator_has_full_constraints(); } static struct map_desc zeus_io_desc[] __initdata = { diff --git a/arch/arm/mach-realview/realview-dt.c b/arch/arm/mach-realview/realview-dt.c index cc28b89dd48f..382cc1b90519 100644 --- a/arch/arm/mach-realview/realview-dt.c +++ b/arch/arm/mach-realview/realview-dt.c @@ -13,7 +13,7 @@ #include <asm/hardware/cache-l2x0.h> #include "core.h" -static const char *realview_dt_platform_compat[] __initconst = { +static const char *const realview_dt_platform_compat[] __initconst = { "arm,realview-eb", "arm,realview-pb1176", "arm,realview-pb11mp", diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c index 8fcec1cc101e..3e7a4b761a95 100644 --- a/arch/arm/mach-rockchip/platsmp.c +++ b/arch/arm/mach-rockchip/platsmp.c @@ -72,29 +72,22 @@ static struct reset_control *rockchip_get_core_reset(int cpu) static int pmu_set_power_domain(int pd, bool on) { u32 val = (on) ? 0 : BIT(pd); + struct reset_control *rstc = rockchip_get_core_reset(pd); int ret; + if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) { + pr_err("%s: could not get reset control for core %d\n", + __func__, pd); + return PTR_ERR(rstc); + } + /* * We need to soft reset the cpu when we turn off the cpu power domain, * or else the active processors might be stalled when the individual * processor is powered down. */ - if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) { - struct reset_control *rstc = rockchip_get_core_reset(pd); - - if (IS_ERR(rstc)) { - pr_err("%s: could not get reset control for core %d\n", - __func__, pd); - return PTR_ERR(rstc); - } - - if (on) - reset_control_deassert(rstc); - else - reset_control_assert(rstc); - - reset_control_put(rstc); - } + if (!IS_ERR(rstc) && !on) + reset_control_assert(rstc); ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val); if (ret < 0) { @@ -107,11 +100,17 @@ static int pmu_set_power_domain(int pd, bool on) ret = pmu_power_domain_is_on(pd); if (ret < 0) { pr_err("%s: could not read power domain state\n", - __func__); + __func__); return ret; } } + if (!IS_ERR(rstc)) { + if (on) + reset_control_deassert(rstc); + reset_control_put(rstc); + } + return 0; } @@ -130,7 +129,7 @@ static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle) if (cpu >= ncores) { pr_err("%s: cpu %d outside maximum number of cpus %d\n", - __func__, cpu, ncores); + __func__, cpu, ncores); return -ENXIO; } @@ -140,14 +139,19 @@ static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle) return ret; if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) { - /* We communicate with the bootrom to active the cpus other + /* + * We communicate with the bootrom to active the cpus other * than cpu0, after a blob of initialize code, they will * stay at wfe state, once they are actived, they will check * the mailbox: * sram_base_addr + 4: 0xdeadbeaf * sram_base_addr + 8: start address for pc - * */ - udelay(10); + * The cpu0 need to wait the other cpus other than cpu0 entering + * the wfe state.The wait time is affected by many aspects. + * (e.g: cpu frequency, bootrom frequency, sram frequency, ...) + */ + mdelay(1); /* ensure the cpus other than cpu0 to startup */ + writel(virt_to_phys(secondary_startup), sram_base_addr + 8); writel(0xDEADBEAF, sram_base_addr + 4); dsb_sev(); @@ -317,6 +321,13 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus) #ifdef CONFIG_HOTPLUG_CPU static int rockchip_cpu_kill(unsigned int cpu) { + /* + * We need a delay here to ensure that the dying CPU can finish + * executing v7_coherency_exit() and reach the WFI/WFE state + * prior to having the power domain disabled. + */ + mdelay(1); + pmu_set_power_domain(0 + cpu, false); return 1; } @@ -324,7 +335,7 @@ static int rockchip_cpu_kill(unsigned int cpu) static void rockchip_cpu_die(unsigned int cpu) { v7_exit_coherency_flush(louis); - while(1) + while (1) cpu_do_idle(); } #endif @@ -337,4 +348,5 @@ static struct smp_operations rockchip_smp_ops __initdata = { .cpu_die = rockchip_cpu_die, #endif }; + CPU_METHOD_OF_DECLARE(rk3066_smp, "rockchip,rk3066-smp", &rockchip_smp_ops); diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c index b0dcbe28f78c..bee8c8051929 100644 --- a/arch/arm/mach-rockchip/pm.c +++ b/arch/arm/mach-rockchip/pm.c @@ -45,9 +45,11 @@ static phys_addr_t rk3288_bootram_phy; static struct regmap *pmu_regmap; static struct regmap *sgrf_regmap; +static struct regmap *grf_regmap; static u32 rk3288_pmu_pwr_mode_con; static u32 rk3288_sgrf_soc_con0; +static u32 rk3288_sgrf_cpu_con0; static inline u32 rk3288_l2_config(void) { @@ -66,10 +68,37 @@ static void rk3288_config_bootdata(void) rkpm_bootdata_l2ctlr = rk3288_l2_config(); } +#define GRF_UOC0_CON0 0x320 +#define GRF_UOC1_CON0 0x334 +#define GRF_UOC2_CON0 0x348 +#define GRF_SIDDQ BIT(13) + +static bool rk3288_slp_disable_osc(void) +{ + static const u32 reg_offset[] = { GRF_UOC0_CON0, GRF_UOC1_CON0, + GRF_UOC2_CON0 }; + u32 reg, i; + + /* + * if any usb phy is still on(GRF_SIDDQ==0), that means we need the + * function of usb wakeup, so do not switch to 32khz, since the usb phy + * clk does not connect to 32khz osc + */ + for (i = 0; i < ARRAY_SIZE(reg_offset); i++) { + regmap_read(grf_regmap, reg_offset[i], ®); + if (!(reg & GRF_SIDDQ)) + return false; + } + + return true; +} + static void rk3288_slp_mode_set(int level) { u32 mode_set, mode_set1; + bool osc_disable = rk3288_slp_disable_osc(); + regmap_read(sgrf_regmap, RK3288_SGRF_CPU_CON0, &rk3288_sgrf_cpu_con0); regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0); regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON, @@ -94,9 +123,6 @@ static void rk3288_slp_mode_set(int level) regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR, rk3288_bootram_phy); - regmap_write(pmu_regmap, RK3288_PMU_WAKEUP_CFG1, - PMU_ARMINT_WAKEUP_EN); - mode_set = BIT(PMU_GLOBAL_INT_DISABLE) | BIT(PMU_L2FLUSH_EN) | BIT(PMU_SREF0_ENTER_EN) | BIT(PMU_SREF1_ENTER_EN) | BIT(PMU_DDR0_GATING_EN) | BIT(PMU_DDR1_GATING_EN) | @@ -107,13 +133,31 @@ static void rk3288_slp_mode_set(int level) if (level == ROCKCHIP_ARM_OFF_LOGIC_DEEP) { /* arm off, logic deep sleep */ - mode_set |= BIT(PMU_BUS_PD_EN) | + mode_set |= BIT(PMU_BUS_PD_EN) | BIT(PMU_PMU_USE_LF) | BIT(PMU_DDR1IO_RET_EN) | BIT(PMU_DDR0IO_RET_EN) | - BIT(PMU_OSC_24M_DIS) | BIT(PMU_PMU_USE_LF) | BIT(PMU_ALIVE_USE_LF) | BIT(PMU_PLL_PD_EN); + if (osc_disable) + mode_set |= BIT(PMU_OSC_24M_DIS); + mode_set1 |= BIT(PMU_CLR_ALIVE) | BIT(PMU_CLR_BUS) | BIT(PMU_CLR_PERI) | BIT(PMU_CLR_DMA); + + regmap_write(pmu_regmap, RK3288_PMU_WAKEUP_CFG1, + PMU_ARMINT_WAKEUP_EN); + + /* + * In deep suspend we use PMU_PMU_USE_LF to let the rk3288 + * switch its main clock supply to the alternative 32kHz + * source. Therefore set 30ms on a 32kHz clock for pmic + * stabilization. Similar 30ms on 24MHz for the other + * mode below. + */ + regmap_write(pmu_regmap, RK3288_PMU_STABL_CNT, 32 * 30); + + /* only wait for stabilization, if we turned the osc off */ + regmap_write(pmu_regmap, RK3288_PMU_OSC_CNT, + osc_disable ? 32 * 30 : 0); } else { /* * arm off, logic normal @@ -121,6 +165,15 @@ static void rk3288_slp_mode_set(int level) * wakeup will be error */ mode_set |= BIT(PMU_CLK_CORE_SRC_GATE_EN); + + regmap_write(pmu_regmap, RK3288_PMU_WAKEUP_CFG1, + PMU_ARMINT_WAKEUP_EN | PMU_GPIOINT_WAKEUP_EN); + + /* 30ms on a 24MHz clock for pmic stabilization */ + regmap_write(pmu_regmap, RK3288_PMU_STABL_CNT, 24000 * 30); + + /* oscillator is still running, so no need to wait */ + regmap_write(pmu_regmap, RK3288_PMU_OSC_CNT, 0); } regmap_write(pmu_regmap, RK3288_PMU_PWRMODE_CON, mode_set); @@ -129,6 +182,9 @@ static void rk3288_slp_mode_set(int level) static void rk3288_slp_mode_set_resume(void) { + regmap_write(sgrf_regmap, RK3288_SGRF_CPU_CON0, + rk3288_sgrf_cpu_con0 | SGRF_DAPDEVICEEN_WRITE); + regmap_write(pmu_regmap, RK3288_PMU_PWRMODE_CON, rk3288_pmu_pwr_mode_con); @@ -190,7 +246,14 @@ static int rk3288_suspend_init(struct device_node *np) "rockchip,rk3288-sgrf"); if (IS_ERR(sgrf_regmap)) { pr_err("%s: could not find sgrf regmap\n", __func__); - return PTR_ERR(pmu_regmap); + return PTR_ERR(sgrf_regmap); + } + + grf_regmap = syscon_regmap_lookup_by_compatible( + "rockchip,rk3288-grf"); + if (IS_ERR(grf_regmap)) { + pr_err("%s: could not find grf regmap\n", __func__); + return PTR_ERR(grf_regmap); } sram_np = of_find_compatible_node(NULL, NULL, @@ -221,9 +284,6 @@ static int rk3288_suspend_init(struct device_node *np) memcpy(rk3288_bootram_base, rockchip_slp_cpu_resume, rk3288_bootram_sz); - regmap_write(pmu_regmap, RK3288_PMU_OSC_CNT, OSC_STABL_CNT_THRESH); - regmap_write(pmu_regmap, RK3288_PMU_STABL_CNT, PMU_STABL_CNT_THRESH); - return 0; } diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h index 3e8d39c0c3d5..b5af26f8336e 100644 --- a/arch/arm/mach-rockchip/pm.h +++ b/arch/arm/mach-rockchip/pm.h @@ -59,19 +59,9 @@ static inline void rockchip_suspend_init(void) #define SGRF_DAPDEVICEEN BIT(0) #define SGRF_DAPDEVICEEN_WRITE BIT(16) -#define RK3288_CRU_MODE_CON 0x50 -#define RK3288_CRU_SEL0_CON 0x60 -#define RK3288_CRU_SEL1_CON 0x64 -#define RK3288_CRU_SEL10_CON 0x88 -#define RK3288_CRU_SEL33_CON 0xe4 -#define RK3288_CRU_SEL37_CON 0xf4 - /* PMU_WAKEUP_CFG1 bits */ #define PMU_ARMINT_WAKEUP_EN BIT(0) - -/* wait 30ms for OSC stable and 30ms for pmic stable */ -#define OSC_STABL_CNT_THRESH (32 * 30) -#define PMU_STABL_CNT_THRESH (32 * 30) +#define PMU_GPIOINT_WAKEUP_EN BIT(3) enum rk3288_pwr_mode_con { PMU_PWR_MODE_EN = 0, diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c index fcb1d59f7aec..f726d4c4e6dd 100644 --- a/arch/arm/mach-rpc/ecard.c +++ b/arch/arm/mach-rpc/ecard.c @@ -946,7 +946,7 @@ static int __init ecard_probe(int slot, unsigned irq, card_type_t type) irq_set_chip_and_handler(ec->irq, &ecard_chip, handle_level_irq); irq_set_chip_data(ec->irq, ec); - set_irq_flags(ec->irq, IRQF_VALID); + irq_clear_status_flags(ec->irq, IRQ_NOREQUEST); } #ifdef CONFIG_ARCH_RPC diff --git a/arch/arm/mach-rpc/irq.c b/arch/arm/mach-rpc/irq.c index 3e4fa849c64d..66502e6207fe 100644 --- a/arch/arm/mach-rpc/irq.c +++ b/arch/arm/mach-rpc/irq.c @@ -117,7 +117,7 @@ extern unsigned char rpc_default_fiq_start, rpc_default_fiq_end; void __init rpc_init_irq(void) { - unsigned int irq, flags; + unsigned int irq, clr, set = 0; iomd_writeb(0, IOMD_IRQMASKA); iomd_writeb(0, IOMD_IRQMASKB); @@ -128,37 +128,37 @@ void __init rpc_init_irq(void) &rpc_default_fiq_end - &rpc_default_fiq_start); for (irq = 0; irq < NR_IRQS; irq++) { - flags = IRQF_VALID; + clr = IRQ_NOREQUEST; if (irq <= 6 || (irq >= 9 && irq <= 15)) - flags |= IRQF_PROBE; + clr |= IRQ_NOPROBE; if (irq == 21 || (irq >= 16 && irq <= 19) || irq == IRQ_KEYBOARDTX) - flags |= IRQF_NOAUTOEN; + set |= IRQ_NOAUTOEN; switch (irq) { case 0 ... 7: irq_set_chip_and_handler(irq, &iomd_a_chip, handle_level_irq); - set_irq_flags(irq, flags); + irq_modify_status(irq, clr, set); break; case 8 ... 15: irq_set_chip_and_handler(irq, &iomd_b_chip, handle_level_irq); - set_irq_flags(irq, flags); + irq_modify_status(irq, clr, set); break; case 16 ... 21: irq_set_chip_and_handler(irq, &iomd_dma_chip, handle_level_irq); - set_irq_flags(irq, flags); + irq_modify_status(irq, clr, set); break; case 64 ... 71: irq_set_chip(irq, &iomd_fiq_chip); - set_irq_flags(irq, IRQF_VALID); + irq_modify_status(irq, clr, set); break; } } diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index 23bec3a85b22..ef68ecb27396 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig @@ -124,6 +124,11 @@ config S3C24XX_PLL This also means that the PLL tables for the selected CPU(s) will be built which may increase the size of the kernel image. +config S3C_SETUP_CAMIF + bool + help + Compile in common setup code for S3C CAMIF devices + # cpu frequency items common between s3c2410 and s3c2440/s3c2442 config S3C2410_IOTIMING diff --git a/arch/arm/mach-s3c24xx/Makefile b/arch/arm/mach-s3c24xx/Makefile index 05920c8a5764..8ac2f58a3480 100644 --- a/arch/arm/mach-s3c24xx/Makefile +++ b/arch/arm/mach-s3c24xx/Makefile @@ -99,3 +99,4 @@ obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o obj-$(CONFIG_S3C2443_SETUP_SPI) += setup-spi.o obj-$(CONFIG_ARCH_S3C24XX) += setup-i2c.o obj-$(CONFIG_S3C24XX_SETUP_TS) += setup-ts.o +obj-$(CONFIG_S3C_SETUP_CAMIF) += setup-camif.o diff --git a/arch/arm/mach-s3c24xx/bast-irq.c b/arch/arm/mach-s3c24xx/bast-irq.c index cb1b791954de..ced1ab86ac83 100644 --- a/arch/arm/mach-s3c24xx/bast-irq.c +++ b/arch/arm/mach-s3c24xx/bast-irq.c @@ -147,7 +147,7 @@ static __init int bast_irq_init(void) irq_set_chip_and_handler(irqno, &bast_pc104_chip, handle_level_irq); - set_irq_flags(irqno, IRQF_VALID); + irq_clear_status_flags(irqno, IRQ_NOREQUEST); } } diff --git a/arch/arm/plat-samsung/include/plat/fb-core.h b/arch/arm/mach-s3c24xx/fb-core.h index bca383efcf6d..103bdbaddd55 100644 --- a/arch/arm/plat-samsung/include/plat/fb-core.h +++ b/arch/arm/mach-s3c24xx/fb-core.h @@ -1,6 +1,4 @@ /* - * arch/arm/plat-samsung/include/plat/fb-core.h - * * Copyright 2010 Samsung Electronics Co., Ltd. * Pawel Osciak <p.osciak@samsung.com> * diff --git a/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c b/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c index f886478b88c5..5f028ff84cfe 100644 --- a/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c +++ b/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c @@ -39,7 +39,7 @@ static void __init s3c2416_dt_machine_init(void) s3c_pm_init(); } -static char const *s3c2416_dt_compat[] __initdata = { +static const char *const s3c2416_dt_compat[] __initconst = { "samsung,s3c2416", "samsung,s3c2450", NULL diff --git a/arch/arm/plat-samsung/include/plat/nand-core.h b/arch/arm/mach-s3c24xx/nand-core.h index 6de20789a95e..7e811fe1cf41 100644 --- a/arch/arm/plat-samsung/include/plat/nand-core.h +++ b/arch/arm/mach-s3c24xx/nand-core.h @@ -1,5 +1,4 @@ -/* arch/arm/plat-samsung/include/plat/nand-core.h - * +/* * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * diff --git a/arch/arm/mach-s3c24xx/s3c2412.c b/arch/arm/mach-s3c24xx/s3c2412.c index 64a13605cfc3..fb5ee8d38913 100644 --- a/arch/arm/mach-s3c24xx/s3c2412.c +++ b/arch/arm/mach-s3c24xx/s3c2412.c @@ -40,11 +40,11 @@ #include <plat/cpu.h> #include <plat/cpu-freq.h> #include <plat/devs.h> -#include <plat/nand-core.h> #include <plat/pm.h> #include <plat/regs-spi.h> #include "common.h" +#include "nand-core.h" #include "regs-dsc.h" #include "s3c2412-power.h" diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c index 3f8ca2a3ef17..621b8648a7ef 100644 --- a/arch/arm/mach-s3c24xx/s3c2416.c +++ b/arch/arm/mach-s3c24xx/s3c2416.c @@ -59,12 +59,12 @@ #include <plat/pm.h> #include <plat/iic-core.h> -#include <plat/fb-core.h> -#include <plat/nand-core.h> #include <plat/adc-core.h> -#include <plat/spi-core.h> #include "common.h" +#include "fb-core.h" +#include "nand-core.h" +#include "spi-core.h" static struct map_desc s3c2416_iodesc[] __initdata = { IODESC_ENT(WATCHDOG), diff --git a/arch/arm/mach-s3c24xx/s3c2443.c b/arch/arm/mach-s3c24xx/s3c2443.c index 87b6b89d8ee7..b559d378cf43 100644 --- a/arch/arm/mach-s3c24xx/s3c2443.c +++ b/arch/arm/mach-s3c24xx/s3c2443.c @@ -41,10 +41,11 @@ #include <plat/gpio-cfg-helpers.h> #include <plat/devs.h> #include <plat/cpu.h> -#include <plat/fb-core.h> -#include <plat/nand-core.h> #include <plat/adc-core.h> -#include <plat/spi-core.h> + +#include "fb-core.h" +#include "nand-core.h" +#include "spi-core.h" static struct map_desc s3c2443_iodesc[] __initdata = { IODESC_ENT(WATCHDOG), diff --git a/arch/arm/mach-s3c24xx/s3c244x.c b/arch/arm/mach-s3c24xx/s3c244x.c index b14119585dc7..31fd273269c2 100644 --- a/arch/arm/mach-s3c24xx/s3c244x.c +++ b/arch/arm/mach-s3c24xx/s3c244x.c @@ -41,9 +41,9 @@ #include <plat/devs.h> #include <plat/cpu.h> #include <plat/pm.h> -#include <plat/nand-core.h> #include "common.h" +#include "nand-core.h" #include "regs-dsc.h" static struct map_desc s3c244x_iodesc[] __initdata = { diff --git a/arch/arm/plat-samsung/setup-camif.c b/arch/arm/mach-s3c24xx/setup-camif.c index 72d8edb8927a..72d8edb8927a 100644 --- a/arch/arm/plat-samsung/setup-camif.c +++ b/arch/arm/mach-s3c24xx/setup-camif.c diff --git a/arch/arm/plat-samsung/include/plat/spi-core.h b/arch/arm/mach-s3c24xx/spi-core.h index 0b9428ab3fc3..0b9428ab3fc3 100644 --- a/arch/arm/plat-samsung/include/plat/spi-core.h +++ b/arch/arm/mach-s3c24xx/spi-core.h diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig index eff95e950d81..28c7097e8506 100644 --- a/arch/arm/mach-s3c64xx/Kconfig +++ b/arch/arm/mach-s3c64xx/Kconfig @@ -34,6 +34,12 @@ config S3C64XX_DEV_ONENAND1 help Compile in platform device definition for OneNAND1 controller +config SAMSUNG_DEV_BACKLIGHT + bool + depends on SAMSUNG_DEV_PWM + help + Compile in platform device definition LCD backlight with PWM Timer + # platform specific device setup config S3C64XX_SETUP_I2C0 diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile index 17f4b07ec763..bb233f342f31 100644 --- a/arch/arm/mach-s3c64xx/Makefile +++ b/arch/arm/mach-s3c64xx/Makefile @@ -40,6 +40,8 @@ obj-$(CONFIG_S3C64XX_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o obj-$(CONFIG_S3C64XX_SETUP_SPI) += setup-spi.o obj-$(CONFIG_S3C64XX_SETUP_USB_PHY) += setup-usb-phy.o +obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o + # Machine support obj-$(CONFIG_MACH_ANW6410) += mach-anw6410.o diff --git a/arch/arm/plat-samsung/include/plat/ata-core.h b/arch/arm/mach-s3c64xx/ata-core.h index f5a4ec7141b1..5951f24a9ec8 100644 --- a/arch/arm/plat-samsung/include/plat/ata-core.h +++ b/arch/arm/mach-s3c64xx/ata-core.h @@ -1,5 +1,4 @@ -/* linux/arch/arm/plat-samsung/include/plat/ata-core.h - * +/* * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * diff --git a/arch/arm/plat-samsung/include/plat/backlight.h b/arch/arm/mach-s3c64xx/backlight.h index ad530c78fe8c..8dcacac523a2 100644 --- a/arch/arm/plat-samsung/include/plat/backlight.h +++ b/arch/arm/mach-s3c64xx/backlight.h @@ -1,5 +1,4 @@ -/* linux/arch/arm/plat-samsung/include/plat/backlight.h - * +/* * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index 16547f2641a3..fd63ecfb2f81 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c @@ -21,7 +21,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/clk-provider.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/serial_core.h> @@ -48,12 +47,12 @@ #include <plat/devs.h> #include <plat/pm.h> #include <plat/gpio-cfg.h> -#include <plat/irq-uart.h> #include <plat/pwm-core.h> #include <plat/regs-irqtype.h> -#include <plat/watchdog-reset.h> #include "common.h" +#include "irq-uart.h" +#include "watchdog-reset.h" /* External clock frequency */ static unsigned long xtal_f = 12000000, xusbxti_f = 48000000; @@ -420,7 +419,7 @@ static int __init s3c64xx_init_irq_eint(void) for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) { irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq); irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq)); - set_irq_flags(irq, IRQF_VALID); + irq_clear_status_flags(irq, IRQ_NOREQUEST); } irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3); diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/mach-s3c64xx/dev-backlight.c index 2157c5b539e6..38c323e68e3f 100644 --- a/arch/arm/plat-samsung/dev-backlight.c +++ b/arch/arm/mach-s3c64xx/dev-backlight.c @@ -1,5 +1,4 @@ -/* linux/arch/arm/plat-samsung/dev-backlight.c - * +/* * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * @@ -18,7 +17,8 @@ #include <plat/devs.h> #include <plat/gpio-cfg.h> -#include <plat/backlight.h> + +#include "backlight.h" struct samsung_bl_drvdata { struct platform_pwm_backlight_data plat_data; diff --git a/arch/arm/plat-samsung/include/plat/irq-uart.h b/arch/arm/mach-s3c64xx/irq-uart.h index a9331e49bea3..4b296132962f 100644 --- a/arch/arm/plat-samsung/include/plat/irq-uart.h +++ b/arch/arm/mach-s3c64xx/irq-uart.h @@ -1,5 +1,4 @@ -/* arch/arm/plat-samsung/include/plat/irq-uart.h - * +/* * Copyright (c) 2010 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c index 2fddf38192df..bbf74edd3dd9 100644 --- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c +++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c @@ -15,11 +15,10 @@ #include <asm/system_misc.h> #include <plat/cpu.h> -#include <plat/watchdog-reset.h> - #include <mach/map.h> #include "common.h" +#include "watchdog-reset.h" /* * IO mapping for shared system controller IP. @@ -61,7 +60,7 @@ static void s3c64xx_dt_restart(enum reboot_mode mode, const char *cmd) soft_restart(0); } -static char const *s3c64xx_dt_compat[] __initdata = { +static const char *const s3c64xx_dt_compat[] __initconst = { "samsung,s3c6400", "samsung,s3c6410", NULL diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index b7447a92276e..d590b88bd8a8 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -68,9 +68,9 @@ #include <plat/adc.h> #include <linux/platform_data/touchscreen-s3c2410.h> #include <plat/keypad.h> -#include <plat/backlight.h> #include <plat/samsung-time.h> +#include "backlight.h" #include "common.h" #include "regs-modem.h" #include "regs-srom.h" diff --git a/arch/arm/plat-samsung/include/plat/onenand-core.h b/arch/arm/mach-s3c64xx/onenand-core.h index 7701cb7020c8..925eb13bbb60 100644 --- a/arch/arm/plat-samsung/include/plat/onenand-core.h +++ b/arch/arm/mach-s3c64xx/onenand-core.h @@ -1,6 +1,4 @@ /* - * linux/arch/arm/plat-samsung/onenand-core.h - * * Copyright (c) 2010 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> * Marek Szyprowski <m.szyprowski@samsung.com> diff --git a/arch/arm/plat-samsung/include/plat/regs-usb-hsotg-phy.h b/arch/arm/mach-s3c64xx/regs-usb-hsotg-phy.h index fcf279662067..eae3c311e590 100644 --- a/arch/arm/plat-samsung/include/plat/regs-usb-hsotg-phy.h +++ b/arch/arm/mach-s3c64xx/regs-usb-hsotg-phy.h @@ -1,5 +1,4 @@ -/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h - * +/* * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c index 1ce48c54cd9c..33273abef669 100644 --- a/arch/arm/mach-s3c64xx/s3c6400.c +++ b/arch/arm/mach-s3c64xx/s3c6400.c @@ -41,9 +41,9 @@ #include <plat/devs.h> #include <plat/sdhci.h> #include <plat/iic-core.h> -#include <plat/onenand-core.h> #include "common.h" +#include "onenand-core.h" void __init s3c6400_map_io(void) { diff --git a/arch/arm/mach-s3c64xx/s3c6410.c b/arch/arm/mach-s3c64xx/s3c6410.c index b2a7930548d9..eadc48dee0e4 100644 --- a/arch/arm/mach-s3c64xx/s3c6410.c +++ b/arch/arm/mach-s3c64xx/s3c6410.c @@ -41,12 +41,12 @@ #include <plat/cpu.h> #include <plat/devs.h> #include <plat/sdhci.h> -#include <plat/ata-core.h> #include <plat/adc-core.h> #include <plat/iic-core.h> -#include <plat/onenand-core.h> +#include "ata-core.h" #include "common.h" +#include "onenand-core.h" void __init s3c6410_map_io(void) { diff --git a/arch/arm/mach-s3c64xx/setup-usb-phy.c b/arch/arm/mach-s3c64xx/setup-usb-phy.c index ca960bda02fd..2b17b7f5152f 100644 --- a/arch/arm/mach-s3c64xx/setup-usb-phy.c +++ b/arch/arm/mach-s3c64xx/setup-usb-phy.c @@ -16,10 +16,10 @@ #include <linux/platform_device.h> #include <mach/map.h> #include <plat/cpu.h> -#include <plat/regs-usb-hsotg-phy.h> #include <plat/usb-phy.h> #include "regs-sys.h" +#include "regs-usb-hsotg-phy.h" static int s3c_usb_otgphy_init(struct platform_device *pdev) { diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/mach-s3c64xx/watchdog-reset.h index 0386b8f76623..42707dfbda9c 100644 --- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h +++ b/arch/arm/mach-s3c64xx/watchdog-reset.h @@ -1,5 +1,4 @@ -/* arch/arm/plat-s3c/include/plat/watchdog-reset.h - * +/* * Copyright (c) 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 99d9a3b1bf34..6d237b4f7a8e 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -320,10 +320,10 @@ static int neponset_probe(struct platform_device *dev) irq_set_chip_and_handler(d->irq_base + NEP_IRQ_SMC91X, &nochip, handle_simple_irq); - set_irq_flags(d->irq_base + NEP_IRQ_SMC91X, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(d->irq_base + NEP_IRQ_SMC91X, IRQ_NOREQUEST | IRQ_NOPROBE); irq_set_chip_and_handler(d->irq_base + NEP_IRQ_USAR, &nochip, handle_simple_irq); - set_irq_flags(d->irq_base + NEP_IRQ_USAR, IRQF_VALID | IRQF_PROBE); + irq_clear_status_flags(d->irq_base + NEP_IRQ_USAR, IRQ_NOREQUEST | IRQ_NOPROBE); irq_set_chip(d->irq_base + NEP_IRQ_SA1111, &nochip); irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 34eac88a9889..926e336d6aeb 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -82,6 +82,11 @@ config ARCH_R8A7791 select ARCH_RCAR_GEN2 select I2C +config ARCH_R8A7793 + bool "R-Car M2-N (R8A7793)" + select ARCH_RCAR_GEN2 + select I2C + config ARCH_R8A7794 bool "R-Car E2 (R8A77940)" select ARCH_RCAR_GEN2 @@ -91,13 +96,6 @@ config ARCH_SH73A0 select ARCH_RMOBILE select RENESAS_INTC_IRQPIN -comment "Renesas ARM SoCs Board Type" - -config MACH_MARZEN - bool "MARZEN board" - depends on ARCH_R8A7779 - select REGULATOR_FIXED_VOLTAGE if REGULATOR - comment "Renesas ARM SoCs System Configuration" endif @@ -105,22 +103,6 @@ if ARCH_SHMOBILE_LEGACY comment "Renesas ARM SoCs System Type" -config ARCH_SH73A0 - bool "SH-Mobile AG5 (R8A73A00)" - select ARCH_RMOBILE - select ARCH_WANT_OPTIONAL_GPIOLIB - select ARM_GIC - select I2C - select SH_INTC - select RENESAS_INTC_IRQPIN - -config ARCH_R8A7740 - bool "R-Mobile A1 (R8A77400)" - select ARCH_RMOBILE - select ARCH_WANT_OPTIONAL_GPIOLIB - select ARM_GIC - select RENESAS_INTC_IRQPIN - config ARCH_R8A7778 bool "R-Car M1A (R8A77781)" select ARCH_RCAR_GEN1 @@ -135,15 +117,6 @@ config ARCH_R8A7779 comment "Renesas ARM SoCs Board Type" -config MACH_ARMADILLO800EVA - bool "Armadillo-800 EVA board" - depends on ARCH_R8A7740 - select ARCH_REQUIRE_GPIOLIB - select REGULATOR_FIXED_VOLTAGE if REGULATOR - select SMSC_PHY if SH_ETH - select SND_SOC_WM8978 if SND_SIMPLE_CARD && I2C - select USE_OF - config MACH_BOCKW bool "BOCK-W platform" depends on ARCH_R8A7778 @@ -166,21 +139,6 @@ config MACH_BOCKW_REFERENCE This is intended to aid developers -config MACH_MARZEN - bool "MARZEN board" - depends on ARCH_R8A7779 - select ARCH_REQUIRE_GPIOLIB - select REGULATOR_FIXED_VOLTAGE if REGULATOR - select USE_OF - -config MACH_KZM9G - bool "KZM-A9-GT board" - depends on ARCH_SH73A0 - select ARCH_REQUIRE_GPIOLIB - select REGULATOR_FIXED_VOLTAGE if REGULATOR - select SND_SOC_AK4642 if SND_SIMPLE_CARD - select USE_OF - comment "Renesas ARM SoCs System Configuration" config CPU_HAS_INTEVT diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile index 89e463de4479..476de30798d7 100644 --- a/arch/arm/mach-shmobile/Makefile +++ b/arch/arm/mach-shmobile/Makefile @@ -6,13 +6,14 @@ obj-y := timer.o console.o # CPU objects -obj-$(CONFIG_ARCH_SH73A0) += setup-sh73a0.o pm-sh73a0.o +obj-$(CONFIG_ARCH_SH73A0) += setup-sh73a0.o obj-$(CONFIG_ARCH_R8A73A4) += setup-r8a73a4.o -obj-$(CONFIG_ARCH_R8A7740) += setup-r8a7740.o pm-r8a7740.o +obj-$(CONFIG_ARCH_R8A7740) += setup-r8a7740.o obj-$(CONFIG_ARCH_R8A7778) += setup-r8a7778.o obj-$(CONFIG_ARCH_R8A7779) += setup-r8a7779.o pm-r8a7779.o obj-$(CONFIG_ARCH_R8A7790) += setup-r8a7790.o obj-$(CONFIG_ARCH_R8A7791) += setup-r8a7791.o +obj-$(CONFIG_ARCH_R8A7793) += setup-r8a7793.o obj-$(CONFIG_ARCH_R8A7794) += setup-r8a7794.o obj-$(CONFIG_ARCH_EMEV2) += setup-emev2.o obj-$(CONFIG_ARCH_R7S72100) += setup-r7s72100.o @@ -20,10 +21,7 @@ obj-$(CONFIG_ARCH_R7S72100) += setup-r7s72100.o # Clock objects ifndef CONFIG_COMMON_CLK obj-y += clock.o -obj-$(CONFIG_ARCH_SH73A0) += clock-sh73a0.o -obj-$(CONFIG_ARCH_R8A7740) += clock-r8a7740.o obj-$(CONFIG_ARCH_R8A7778) += clock-r8a7778.o -obj-$(CONFIG_ARCH_R8A7779) += clock-r8a7779.o endif # CPU reset vector handling objects @@ -34,6 +32,7 @@ obj-$(CONFIG_ARCH_RCAR_GEN2) += setup-rcar-gen2.o platsmp-apmu.o $(cpu-y) CFLAGS_setup-rcar-gen2.o += -march=armv7-a obj-$(CONFIG_ARCH_R8A7790) += regulator-quirk-rcar-gen2.o obj-$(CONFIG_ARCH_R8A7791) += regulator-quirk-rcar-gen2.o +obj-$(CONFIG_ARCH_R8A7793) += regulator-quirk-rcar-gen2.o # SMP objects smp-y := $(cpu-y) @@ -51,14 +50,9 @@ obj-$(CONFIG_PM_RMOBILE) += pm-rmobile.o obj-$(CONFIG_ARCH_RCAR_GEN2) += pm-rcar-gen2.o # Board objects -ifdef CONFIG_ARCH_SHMOBILE_MULTI -obj-$(CONFIG_MACH_MARZEN) += board-marzen-reference.o -else +ifndef CONFIG_ARCH_SHMOBILE_MULTI obj-$(CONFIG_MACH_BOCKW) += board-bockw.o obj-$(CONFIG_MACH_BOCKW_REFERENCE) += board-bockw-reference.o -obj-$(CONFIG_MACH_MARZEN) += board-marzen.o -obj-$(CONFIG_MACH_ARMADILLO800EVA) += board-armadillo800eva.o -obj-$(CONFIG_MACH_KZM9G) += board-kzm9g.o intc-sh73a0.o endif # Framework support diff --git a/arch/arm/mach-shmobile/Makefile.boot b/arch/arm/mach-shmobile/Makefile.boot index e1ef19cef89c..a489fe9a76cd 100644 --- a/arch/arm/mach-shmobile/Makefile.boot +++ b/arch/arm/mach-shmobile/Makefile.boot @@ -1,10 +1,7 @@ # per-board load address for uImage loadaddr-y := -loadaddr-$(CONFIG_MACH_ARMADILLO800EVA) += 0x40008000 loadaddr-$(CONFIG_MACH_BOCKW) += 0x60008000 loadaddr-$(CONFIG_MACH_BOCKW_REFERENCE) += 0x60008000 -loadaddr-$(CONFIG_MACH_KZM9G) += 0x41008000 -loadaddr-$(CONFIG_MACH_MARZEN) += 0x60008000 __ZRELADDR := $(sort $(loadaddr-y)) zreladdr-y += $(__ZRELADDR) diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c deleted file mode 100644 index bf37e3c532f6..000000000000 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ /dev/null @@ -1,1365 +0,0 @@ -/* - * armadillo 800 eva board support - * - * Copyright (C) 2012 Renesas Solutions Corp. - * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/clk.h> -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/gpio.h> -#include <linux/gpio_keys.h> -#include <linux/i2c-gpio.h> -#include <linux/input.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/mfd/tmio.h> -#include <linux/mmc/host.h> -#include <linux/mmc/sh_mmcif.h> -#include <linux/mmc/sh_mobile_sdhi.h> -#include <linux/pinctrl/machine.h> -#include <linux/platform_data/st1232_pdata.h> -#include <linux/platform_device.h> -#include <linux/pwm.h> -#include <linux/pwm_backlight.h> -#include <linux/reboot.h> -#include <linux/regulator/driver.h> -#include <linux/regulator/fixed.h> -#include <linux/regulator/gpio-regulator.h> -#include <linux/regulator/machine.h> -#include <linux/sh_eth.h> -#include <linux/usb/renesas_usbhs.h> -#include <linux/videodev2.h> - -#include <asm/hardware/cache-l2x0.h> -#include <asm/mach-types.h> -#include <asm/mach/arch.h> -#include <asm/mach/map.h> -#include <asm/mach/time.h> -#include <asm/page.h> -#include <media/mt9t112.h> -#include <media/sh_mobile_ceu.h> -#include <media/soc_camera.h> -#include <sound/sh_fsi.h> -#include <sound/simple_card.h> -#include <video/sh_mobile_hdmi.h> -#include <video/sh_mobile_lcdc.h> - -#include "common.h" -#include "irqs.h" -#include "pm-rmobile.h" -#include "r8a7740.h" -#include "sh-gpio.h" - -/* - * CON1 Camera Module - * CON2 Extension Bus - * CON3 HDMI Output - * CON4 Composite Video Output - * CON5 H-UDI JTAG - * CON6 ARM JTAG - * CON7 SD1 - * CON8 SD2 - * CON9 RTC BackUp - * CON10 Monaural Mic Input - * CON11 Stereo Headphone Output - * CON12 Audio Line Output(L) - * CON13 Audio Line Output(R) - * CON14 AWL13 Module - * CON15 Extension - * CON16 LCD1 - * CON17 LCD2 - * CON19 Power Input - * CON20 USB1 - * CON21 USB2 - * CON22 Serial - * CON23 LAN - * CON24 USB3 - * LED1 Camera LED(Yellow) - * LED2 Power LED (Green) - * ED3-LED6 User LED(Yellow) - * LED7 LAN link LED(Green) - * LED8 LAN activity LED(Yellow) - */ - -/* - * DipSwitch - * - * SW1 - * - * -12345678-+---------------+---------------------------- - * 1 | boot | hermit - * 0 | boot | OS auto boot - * -12345678-+---------------+---------------------------- - * 00 | boot device | eMMC - * 10 | boot device | SDHI0 (CON7) - * 01 | boot device | - - * 11 | boot device | Extension Buss (CS0) - * -12345678-+---------------+---------------------------- - * 0 | Extension Bus | D8-D15 disable, eMMC enable - * 1 | Extension Bus | D8-D15 enable, eMMC disable - * -12345678-+---------------+---------------------------- - * 0 | SDHI1 | COM8 disable, COM14 enable - * 1 | SDHI1 | COM8 enable, COM14 disable - * -12345678-+---------------+---------------------------- - * 0 | USB0 | COM20 enable, COM24 disable - * 1 | USB0 | COM20 disable, COM24 enable - * -12345678-+---------------+---------------------------- - * 00 | JTAG | SH-X2 - * 10 | JTAG | ARM - * 01 | JTAG | - - * 11 | JTAG | Boundary Scan - *-----------+---------------+---------------------------- - */ - -/* - * FSI-WM8978 - * - * this command is required when playback. - * - * # amixer set "Headphone" 50 - * - * this command is required when capture. - * - * # amixer set "Input PGA" 15 - * # amixer set "Left Input Mixer MicP" on - * # amixer set "Left Input Mixer MicN" on - * # amixer set "Right Input Mixer MicN" on - * # amixer set "Right Input Mixer MicP" on - */ - -/* - * USB function - * - * When you use USB Function, - * set SW1.6 ON, and connect cable to CN24. - * - * USBF needs workaround on R8A7740 chip. - * These are a little bit complex. - * see - * usbhsf_power_ctrl() - */ -#define IRQ7 irq_pin(7) -#define USBCR1 IOMEM(0xe605810a) -#define USBH 0xC6700000 -#define USBH_USBCTR 0x10834 - -struct usbhsf_private { - struct clk *phy; - struct clk *usb24; - struct clk *pci; - struct clk *func; - struct clk *host; - void __iomem *usbh_base; - struct renesas_usbhs_platform_info info; -}; - -#define usbhsf_get_priv(pdev) \ - container_of(renesas_usbhs_get_info(pdev), \ - struct usbhsf_private, info) - -static int usbhsf_get_id(struct platform_device *pdev) -{ - return USBHS_GADGET; -} - -static int usbhsf_power_ctrl(struct platform_device *pdev, - void __iomem *base, int enable) -{ - struct usbhsf_private *priv = usbhsf_get_priv(pdev); - - /* - * Work around for USB Function. - * It needs USB host clock, and settings - */ - if (enable) { - /* - * enable all the related usb clocks - * for usb workaround - */ - clk_enable(priv->usb24); - clk_enable(priv->pci); - clk_enable(priv->host); - clk_enable(priv->func); - clk_enable(priv->phy); - - /* - * set USBCR1 - * - * Port1 is driven by USB function, - * Port2 is driven by USB HOST - * One HOST (Port1 or Port2 is HOST) - * USB PLL input clock = 24MHz - */ - __raw_writew(0xd750, USBCR1); - mdelay(1); - - /* - * start USB Host - */ - __raw_writel(0x0000000c, priv->usbh_base + USBH_USBCTR); - __raw_writel(0x00000008, priv->usbh_base + USBH_USBCTR); - mdelay(10); - - /* - * USB PHY Power ON - */ - __raw_writew(0xd770, USBCR1); - __raw_writew(0x4000, base + 0x102); /* USBF :: SUSPMODE */ - - } else { - __raw_writel(0x0000010f, priv->usbh_base + USBH_USBCTR); - __raw_writew(0xd7c0, USBCR1); /* GPIO */ - - clk_disable(priv->phy); - clk_disable(priv->func); /* usb work around */ - clk_disable(priv->host); /* usb work around */ - clk_disable(priv->pci); /* usb work around */ - clk_disable(priv->usb24); /* usb work around */ - } - - return 0; -} - -static int usbhsf_get_vbus(struct platform_device *pdev) -{ - return gpio_get_value(209); -} - -static irqreturn_t usbhsf_interrupt(int irq, void *data) -{ - struct platform_device *pdev = data; - - renesas_usbhs_call_notify_hotplug(pdev); - - return IRQ_HANDLED; -} - -static int usbhsf_hardware_exit(struct platform_device *pdev) -{ - struct usbhsf_private *priv = usbhsf_get_priv(pdev); - - if (!IS_ERR(priv->phy)) - clk_put(priv->phy); - if (!IS_ERR(priv->usb24)) - clk_put(priv->usb24); - if (!IS_ERR(priv->pci)) - clk_put(priv->pci); - if (!IS_ERR(priv->host)) - clk_put(priv->host); - if (!IS_ERR(priv->func)) - clk_put(priv->func); - if (priv->usbh_base) - iounmap(priv->usbh_base); - - priv->phy = NULL; - priv->usb24 = NULL; - priv->pci = NULL; - priv->host = NULL; - priv->func = NULL; - priv->usbh_base = NULL; - - free_irq(IRQ7, pdev); - - return 0; -} - -static int usbhsf_hardware_init(struct platform_device *pdev) -{ - struct usbhsf_private *priv = usbhsf_get_priv(pdev); - int ret; - - priv->phy = clk_get(&pdev->dev, "phy"); - priv->usb24 = clk_get(&pdev->dev, "usb24"); - priv->pci = clk_get(&pdev->dev, "pci"); - priv->func = clk_get(&pdev->dev, "func"); - priv->host = clk_get(&pdev->dev, "host"); - priv->usbh_base = ioremap_nocache(USBH, 0x20000); - - if (IS_ERR(priv->phy) || - IS_ERR(priv->usb24) || - IS_ERR(priv->pci) || - IS_ERR(priv->host) || - IS_ERR(priv->func) || - !priv->usbh_base) { - dev_err(&pdev->dev, "USB clock setting failed\n"); - usbhsf_hardware_exit(pdev); - return -EIO; - } - - ret = request_irq(IRQ7, usbhsf_interrupt, IRQF_TRIGGER_NONE, - dev_name(&pdev->dev), pdev); - if (ret) { - dev_err(&pdev->dev, "request_irq err\n"); - return ret; - } - irq_set_irq_type(IRQ7, IRQ_TYPE_EDGE_BOTH); - - /* usb24 use 1/1 of parent clock (= usb24s = 24MHz) */ - clk_set_rate(priv->usb24, - clk_get_rate(clk_get_parent(priv->usb24))); - - return 0; -} - -static struct usbhsf_private usbhsf_private = { - .info = { - .platform_callback = { - .get_id = usbhsf_get_id, - .get_vbus = usbhsf_get_vbus, - .hardware_init = usbhsf_hardware_init, - .hardware_exit = usbhsf_hardware_exit, - .power_ctrl = usbhsf_power_ctrl, - }, - .driver_param = { - .buswait_bwait = 5, - .detection_delay = 5, - .d0_rx_id = SHDMA_SLAVE_USBHS_RX, - .d1_tx_id = SHDMA_SLAVE_USBHS_TX, - }, - } -}; - -static struct resource usbhsf_resources[] = { - { - .name = "USBHS", - .start = 0xe6890000, - .end = 0xe6890104 - 1, - .flags = IORESOURCE_MEM, - }, - { - .start = gic_spi(51), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device usbhsf_device = { - .name = "renesas_usbhs", - .dev = { - .platform_data = &usbhsf_private.info, - }, - .id = -1, - .num_resources = ARRAY_SIZE(usbhsf_resources), - .resource = usbhsf_resources, -}; - -/* Ether */ -static struct sh_eth_plat_data sh_eth_platdata = { - .phy = 0x00, /* LAN8710A */ - .edmac_endian = EDMAC_LITTLE_ENDIAN, - .phy_interface = PHY_INTERFACE_MODE_MII, -}; - -static struct resource sh_eth_resources[] = { - { - .start = 0xe9a00000, - .end = 0xe9a00800 - 1, - .flags = IORESOURCE_MEM, - }, { - .start = 0xe9a01800, - .end = 0xe9a02000 - 1, - .flags = IORESOURCE_MEM, - }, { - .start = gic_spi(110), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device sh_eth_device = { - .name = "r8a7740-gether", - .id = -1, - .dev = { - .platform_data = &sh_eth_platdata, - .dma_mask = &sh_eth_device.dev.coherent_dma_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, - .resource = sh_eth_resources, - .num_resources = ARRAY_SIZE(sh_eth_resources), -}; - -/* PWM */ -static struct resource pwm_resources[] = { - [0] = { - .start = 0xe6600000, - .end = 0xe66000ff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device pwm_device = { - .name = "renesas-tpu-pwm", - .id = -1, - .num_resources = ARRAY_SIZE(pwm_resources), - .resource = pwm_resources, -}; - -static struct pwm_lookup pwm_lookup[] = { - PWM_LOOKUP("renesas-tpu-pwm", 2, "pwm-backlight.0", NULL, - 33333, PWM_POLARITY_INVERSED), -}; - -/* LCDC and backlight */ -static struct platform_pwm_backlight_data pwm_backlight_data = { - .lth_brightness = 50, - .max_brightness = 255, - .dft_brightness = 255, - .pwm_period_ns = 33333, /* 30kHz */ - .enable_gpio = 61, -}; - -static struct platform_device pwm_backlight_device = { - .name = "pwm-backlight", - .dev = { - .platform_data = &pwm_backlight_data, - }, -}; - -static struct fb_videomode lcdc0_mode = { - .name = "AMPIER/AM-800480", - .xres = 800, - .yres = 480, - .left_margin = 88, - .right_margin = 40, - .hsync_len = 128, - .upper_margin = 20, - .lower_margin = 5, - .vsync_len = 5, - .sync = 0, -}; - -static struct sh_mobile_lcdc_info lcdc0_info = { - .clock_source = LCDC_CLK_BUS, - .ch[0] = { - .chan = LCDC_CHAN_MAINLCD, - .fourcc = V4L2_PIX_FMT_RGB565, - .interface_type = RGB24, - .clock_divider = 5, - .flags = 0, - .lcd_modes = &lcdc0_mode, - .num_modes = 1, - .panel_cfg = { - .width = 111, - .height = 68, - }, - }, -}; - -static struct resource lcdc0_resources[] = { - [0] = { - .name = "LCD0", - .start = 0xfe940000, - .end = 0xfe943fff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(177), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device lcdc0_device = { - .name = "sh_mobile_lcdc_fb", - .num_resources = ARRAY_SIZE(lcdc0_resources), - .resource = lcdc0_resources, - .id = 0, - .dev = { - .platform_data = &lcdc0_info, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, -}; - -/* - * LCDC1/HDMI - */ -static struct sh_mobile_hdmi_info hdmi_info = { - .flags = HDMI_OUTPUT_PUSH_PULL | - HDMI_OUTPUT_POLARITY_HI | - HDMI_32BIT_REG | - HDMI_HAS_HTOP1 | - HDMI_SND_SRC_SPDIF, -}; - -static struct resource hdmi_resources[] = { - [0] = { - .name = "HDMI", - .start = 0xe6be0000, - .end = 0xe6be03ff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(131), - .flags = IORESOURCE_IRQ, - }, - [2] = { - .name = "HDMI emma3pf", - .start = 0xe6be4000, - .end = 0xe6be43ff, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device hdmi_device = { - .name = "sh-mobile-hdmi", - .num_resources = ARRAY_SIZE(hdmi_resources), - .resource = hdmi_resources, - .id = -1, - .dev = { - .platform_data = &hdmi_info, - }, -}; - -static const struct fb_videomode lcdc1_mode = { - .name = "HDMI 720p", - .xres = 1280, - .yres = 720, - .pixclock = 13468, - .left_margin = 220, - .right_margin = 110, - .hsync_len = 40, - .upper_margin = 20, - .lower_margin = 5, - .vsync_len = 5, - .refresh = 60, - .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, -}; - -static struct sh_mobile_lcdc_info hdmi_lcdc_info = { - .clock_source = LCDC_CLK_PERIPHERAL, /* HDMI clock */ - .ch[0] = { - .chan = LCDC_CHAN_MAINLCD, - .fourcc = V4L2_PIX_FMT_RGB565, - .interface_type = RGB24, - .clock_divider = 1, - .flags = LCDC_FLAGS_DWPOL, - .lcd_modes = &lcdc1_mode, - .num_modes = 1, - .tx_dev = &hdmi_device, - .panel_cfg = { - .width = 1280, - .height = 720, - }, - }, -}; - -static struct resource hdmi_lcdc_resources[] = { - [0] = { - .name = "LCDC1", - .start = 0xfe944000, - .end = 0xfe948000 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(178), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device hdmi_lcdc_device = { - .name = "sh_mobile_lcdc_fb", - .num_resources = ARRAY_SIZE(hdmi_lcdc_resources), - .resource = hdmi_lcdc_resources, - .id = 1, - .dev = { - .platform_data = &hdmi_lcdc_info, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, -}; - -/* LEDS */ -static struct gpio_led gpio_leds[] = { - { - .name = "LED3", - .gpio = 102, - .default_state = LEDS_GPIO_DEFSTATE_ON, - }, { - .name = "LED4", - .gpio = 111, - .default_state = LEDS_GPIO_DEFSTATE_ON, - }, { - .name = "LED5", - .gpio = 110, - .default_state = LEDS_GPIO_DEFSTATE_ON, - }, { - .name = "LED6", - .gpio = 177, - .default_state = LEDS_GPIO_DEFSTATE_ON, - }, -}; - -static struct gpio_led_platform_data leds_gpio_info = { - .leds = gpio_leds, - .num_leds = ARRAY_SIZE(gpio_leds), -}; - -static struct platform_device leds_gpio_device = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &leds_gpio_info, - }, -}; - -/* GPIO KEY */ -#define GPIO_KEY(c, g, d, ...) \ - { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ } - -static struct gpio_keys_button gpio_buttons[] = { - GPIO_KEY(KEY_POWER, 99, "SW3", .wakeup = 1), - GPIO_KEY(KEY_BACK, 100, "SW4"), - GPIO_KEY(KEY_MENU, 97, "SW5"), - GPIO_KEY(KEY_HOME, 98, "SW6"), -}; - -static struct gpio_keys_platform_data gpio_key_info = { - .buttons = gpio_buttons, - .nbuttons = ARRAY_SIZE(gpio_buttons), -}; - -static struct platform_device gpio_keys_device = { - .name = "gpio-keys", - .id = -1, - .dev = { - .platform_data = &gpio_key_info, - }, -}; - -/* Fixed 3.3V regulator to be used by SDHI1, MMCIF */ -static struct regulator_consumer_supply fixed3v3_power_consumers[] = { - REGULATOR_SUPPLY("vmmc", "sh_mmcif"), - REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), -}; - -/* Fixed 3.3V regulator used by LCD backlight */ -static struct regulator_consumer_supply fixed5v0_power_consumers[] = { - REGULATOR_SUPPLY("power", "pwm-backlight.0"), -}; - -/* Fixed 3.3V regulator to be used by SDHI0 */ -static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { - REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), -}; - -static struct regulator_init_data vcc_sdhi0_init_data = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(vcc_sdhi0_consumers), - .consumer_supplies = vcc_sdhi0_consumers, -}; - -static struct fixed_voltage_config vcc_sdhi0_info = { - .supply_name = "SDHI0 Vcc", - .microvolts = 3300000, - .gpio = 75, - .enable_high = 1, - .init_data = &vcc_sdhi0_init_data, -}; - -static struct platform_device vcc_sdhi0 = { - .name = "reg-fixed-voltage", - .id = 1, - .dev = { - .platform_data = &vcc_sdhi0_info, - }, -}; - -/* 1.8 / 3.3V SDHI0 VccQ regulator */ -static struct regulator_consumer_supply vccq_sdhi0_consumers[] = { - REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), -}; - -static struct regulator_init_data vccq_sdhi0_init_data = { - .constraints = { - .input_uV = 3300000, - .min_uV = 1800000, - .max_uV = 3300000, - .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | - REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(vccq_sdhi0_consumers), - .consumer_supplies = vccq_sdhi0_consumers, -}; - -static struct gpio vccq_sdhi0_gpios[] = { - {17, GPIOF_OUT_INIT_LOW, "vccq-sdhi0" }, -}; - -static struct gpio_regulator_state vccq_sdhi0_states[] = { - { .value = 3300000, .gpios = (0 << 0) }, - { .value = 1800000, .gpios = (1 << 0) }, -}; - -static struct gpio_regulator_config vccq_sdhi0_info = { - .supply_name = "vqmmc", - - .enable_gpio = 74, - .enable_high = 1, - .enabled_at_boot = 0, - - .gpios = vccq_sdhi0_gpios, - .nr_gpios = ARRAY_SIZE(vccq_sdhi0_gpios), - - .states = vccq_sdhi0_states, - .nr_states = ARRAY_SIZE(vccq_sdhi0_states), - - .type = REGULATOR_VOLTAGE, - .init_data = &vccq_sdhi0_init_data, -}; - -static struct platform_device vccq_sdhi0 = { - .name = "gpio-regulator", - .id = -1, - .dev = { - .platform_data = &vccq_sdhi0_info, - }, -}; - -/* Fixed 3.3V regulator to be used by SDHI1 */ -static struct regulator_consumer_supply vcc_sdhi1_consumers[] = { - REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), -}; - -static struct regulator_init_data vcc_sdhi1_init_data = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(vcc_sdhi1_consumers), - .consumer_supplies = vcc_sdhi1_consumers, -}; - -static struct fixed_voltage_config vcc_sdhi1_info = { - .supply_name = "SDHI1 Vcc", - .microvolts = 3300000, - .gpio = 16, - .enable_high = 1, - .init_data = &vcc_sdhi1_init_data, -}; - -static struct platform_device vcc_sdhi1 = { - .name = "reg-fixed-voltage", - .id = 2, - .dev = { - .platform_data = &vcc_sdhi1_info, - }, -}; - -/* SDHI0 */ -static struct tmio_mmc_data sdhi0_info = { - .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, - .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, - .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | - MMC_CAP_POWER_OFF_CARD, - .flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, - .cd_gpio = 167, -}; - -static struct resource sdhi0_resources[] = { - { - .name = "SDHI0", - .start = 0xe6850000, - .end = 0xe6850100 - 1, - .flags = IORESOURCE_MEM, - }, - /* - * no SH_MOBILE_SDHI_IRQ_CARD_DETECT here - */ - { - .name = SH_MOBILE_SDHI_IRQ_SDCARD, - .start = gic_spi(118), - .flags = IORESOURCE_IRQ, - }, - { - .name = SH_MOBILE_SDHI_IRQ_SDIO, - .start = gic_spi(119), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device sdhi0_device = { - .name = "sh_mobile_sdhi", - .id = 0, - .dev = { - .platform_data = &sdhi0_info, - }, - .num_resources = ARRAY_SIZE(sdhi0_resources), - .resource = sdhi0_resources, -}; - -/* SDHI1 */ -static struct tmio_mmc_data sdhi1_info = { - .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI1_TX, - .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI1_RX, - .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | - MMC_CAP_POWER_OFF_CARD, - .flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, - /* Port72 cannot generate IRQs, will be used in polling mode. */ - .cd_gpio = 72, -}; - -static struct resource sdhi1_resources[] = { - [0] = { - .name = "SDHI1", - .start = 0xe6860000, - .end = 0xe6860100 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(121), - .flags = IORESOURCE_IRQ, - }, - [2] = { - .start = gic_spi(122), - .flags = IORESOURCE_IRQ, - }, - [3] = { - .start = gic_spi(123), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device sdhi1_device = { - .name = "sh_mobile_sdhi", - .id = 1, - .dev = { - .platform_data = &sdhi1_info, - }, - .num_resources = ARRAY_SIZE(sdhi1_resources), - .resource = sdhi1_resources, -}; - -static const struct pinctrl_map eva_sdhi1_pinctrl_map[] = { - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740", - "sdhi1_data4", "sdhi1"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740", - "sdhi1_ctrl", "sdhi1"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740", - "sdhi1_cd", "sdhi1"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740", - "sdhi1_wp", "sdhi1"), -}; - -/* MMCIF */ -static struct sh_mmcif_plat_data sh_mmcif_plat = { - .sup_pclk = 0, - .caps = MMC_CAP_4_BIT_DATA | - MMC_CAP_8_BIT_DATA | - MMC_CAP_NONREMOVABLE, - .ccs_unsupported = true, - .slave_id_tx = SHDMA_SLAVE_MMCIF_TX, - .slave_id_rx = SHDMA_SLAVE_MMCIF_RX, -}; - -static struct resource sh_mmcif_resources[] = { - [0] = { - .name = "MMCIF", - .start = 0xe6bd0000, - .end = 0xe6bd0100 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - /* MMC ERR */ - .start = gic_spi(56), - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* MMC NOR */ - .start = gic_spi(57), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device sh_mmcif_device = { - .name = "sh_mmcif", - .id = -1, - .dev = { - .platform_data = &sh_mmcif_plat, - }, - .num_resources = ARRAY_SIZE(sh_mmcif_resources), - .resource = sh_mmcif_resources, -}; - -/* Camera */ -static int mt9t111_power(struct device *dev, int mode) -{ - struct clk *mclk = clk_get(NULL, "video1"); - - if (IS_ERR(mclk)) { - dev_err(dev, "can't get video1 clock\n"); - return -EINVAL; - } - - if (mode) { - /* video1 (= CON1 camera) expect 24MHz */ - clk_set_rate(mclk, clk_round_rate(mclk, 24000000)); - clk_enable(mclk); - gpio_set_value(158, 1); - } else { - gpio_set_value(158, 0); - clk_disable(mclk); - } - - clk_put(mclk); - - return 0; -} - -static struct i2c_board_info i2c_camera_mt9t111 = { - I2C_BOARD_INFO("mt9t112", 0x3d), -}; - -static struct mt9t112_camera_info mt9t111_info = { - .divider = { 16, 0, 0, 7, 0, 10, 14, 7, 7 }, -}; - -static struct soc_camera_link mt9t111_link = { - .i2c_adapter_id = 0, - .bus_id = 0, - .board_info = &i2c_camera_mt9t111, - .power = mt9t111_power, - .priv = &mt9t111_info, -}; - -static struct platform_device camera_device = { - .name = "soc-camera-pdrv", - .id = 0, - .dev = { - .platform_data = &mt9t111_link, - }, -}; - -/* CEU0 */ -static struct sh_mobile_ceu_info sh_mobile_ceu0_info = { - .flags = SH_CEU_FLAG_LOWER_8BIT, -}; - -static struct resource ceu0_resources[] = { - [0] = { - .name = "CEU", - .start = 0xfe910000, - .end = 0xfe91009f, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(160), - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* place holder for contiguous memory */ - }, -}; - -static struct platform_device ceu0_device = { - .name = "sh_mobile_ceu", - .id = 0, - .num_resources = ARRAY_SIZE(ceu0_resources), - .resource = ceu0_resources, - .dev = { - .platform_data = &sh_mobile_ceu0_info, - .coherent_dma_mask = 0xffffffff, - }, -}; - -/* FSI */ -static struct sh_fsi_platform_info fsi_info = { - /* FSI-WM8978 */ - .port_a = { - .tx_id = SHDMA_SLAVE_FSIA_TX, - }, - /* FSI-HDMI */ - .port_b = { - .flags = SH_FSI_FMT_SPDIF | - SH_FSI_ENABLE_STREAM_MODE | - SH_FSI_CLK_CPG, - .tx_id = SHDMA_SLAVE_FSIB_TX, - } -}; - -static struct resource fsi_resources[] = { - [0] = { - .name = "FSI", - .start = 0xfe1f0000, - .end = 0xfe1f0400 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(9), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device fsi_device = { - .name = "sh_fsi2", - .id = -1, - .num_resources = ARRAY_SIZE(fsi_resources), - .resource = fsi_resources, - .dev = { - .platform_data = &fsi_info, - }, -}; - -/* FSI-WM8978 */ -static struct asoc_simple_card_info fsi_wm8978_info = { - .name = "wm8978", - .card = "FSI2A-WM8978", - .codec = "wm8978.0-001a", - .platform = "sh_fsi2", - .daifmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM, - .cpu_dai = { - .name = "fsia-dai", - }, - .codec_dai = { - .name = "wm8978-hifi", - .sysclk = 12288000, - }, -}; - -static struct platform_device fsi_wm8978_device = { - .name = "asoc-simple-card", - .id = 0, - .dev = { - .platform_data = &fsi_wm8978_info, - .coherent_dma_mask = DMA_BIT_MASK(32), - .dma_mask = &fsi_wm8978_device.dev.coherent_dma_mask, - }, -}; - -/* FSI-HDMI */ -static struct asoc_simple_card_info fsi2_hdmi_info = { - .name = "HDMI", - .card = "FSI2B-HDMI", - .codec = "sh-mobile-hdmi", - .platform = "sh_fsi2", - .daifmt = SND_SOC_DAIFMT_CBS_CFS, - .cpu_dai = { - .name = "fsib-dai", - }, - .codec_dai = { - .name = "sh_mobile_hdmi-hifi", - }, -}; - -static struct platform_device fsi_hdmi_device = { - .name = "asoc-simple-card", - .id = 1, - .dev = { - .platform_data = &fsi2_hdmi_info, - .coherent_dma_mask = DMA_BIT_MASK(32), - .dma_mask = &fsi_hdmi_device.dev.coherent_dma_mask, - }, -}; - -/* RTC: RTC connects i2c-gpio. */ -static struct i2c_gpio_platform_data i2c_gpio_data = { - .sda_pin = 208, - .scl_pin = 91, - .udelay = 5, /* 100 kHz */ -}; - -static struct platform_device i2c_gpio_device = { - .name = "i2c-gpio", - .id = 2, - .dev = { - .platform_data = &i2c_gpio_data, - }, -}; - -/* I2C */ -static struct st1232_pdata st1232_i2c0_pdata = { - .reset_gpio = 166, -}; - -static struct i2c_board_info i2c0_devices[] = { - { - I2C_BOARD_INFO("st1232-ts", 0x55), - .irq = irq_pin(10), - .platform_data = &st1232_i2c0_pdata, - }, - { - I2C_BOARD_INFO("wm8978", 0x1a), - }, -}; - -static struct i2c_board_info i2c2_devices[] = { - { - I2C_BOARD_INFO("s35390a", 0x30), - .type = "s35390a", - }, -}; - -/* - * board devices - */ -static struct platform_device *eva_devices[] __initdata = { - &lcdc0_device, - &pwm_device, - &pwm_backlight_device, - &leds_gpio_device, - &gpio_keys_device, - &sh_eth_device, - &vcc_sdhi0, - &vccq_sdhi0, - &sdhi0_device, - &sh_mmcif_device, - &hdmi_device, - &hdmi_lcdc_device, - &camera_device, - &ceu0_device, - &fsi_device, - &fsi_wm8978_device, - &fsi_hdmi_device, - &i2c_gpio_device, -}; - -static const struct pinctrl_map eva_pinctrl_map[] = { - /* CEU0 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740", - "ceu0_data_0_7", "ceu0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740", - "ceu0_clk_0", "ceu0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740", - "ceu0_sync", "ceu0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740", - "ceu0_field", "ceu0"), - /* FSIA */ - PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740", - "fsia_sclk_in", "fsia"), - PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740", - "fsia_mclk_out", "fsia"), - PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740", - "fsia_data_in_1", "fsia"), - PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740", - "fsia_data_out_0", "fsia"), - /* FSIB */ - PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740", - "fsib_mclk_in", "fsib"), - /* GETHER */ - PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740", - "gether_mii", "gether"), - PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740", - "gether_int", "gether"), - /* HDMI */ - PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740", - "hdmi", "hdmi"), - /* LCD0 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740", - "lcd0_data24_0", "lcd0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740", - "lcd0_lclk_1", "lcd0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740", - "lcd0_sync", "lcd0"), - /* MMCIF */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-r8a7740", - "mmc0_data8_1", "mmc0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-r8a7740", - "mmc0_ctrl_1", "mmc0"), - /* SCIFA1 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.1", "pfc-r8a7740", - "scifa1_data", "scifa1"), - /* SDHI0 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740", - "sdhi0_data4", "sdhi0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740", - "sdhi0_ctrl", "sdhi0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740", - "sdhi0_wp", "sdhi0"), - /* ST1232 */ - PIN_MAP_MUX_GROUP_DEFAULT("0-0055", "pfc-r8a7740", - "intc_irq10", "intc"), - /* TPU0 */ - PIN_MAP_MUX_GROUP_DEFAULT("renesas-tpu-pwm", "pfc-r8a7740", - "tpu0_to2_1", "tpu0"), - /* USBHS */ - PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs", "pfc-r8a7740", - "intc_irq7_1", "intc"), -}; - -static void __init eva_clock_init(void) -{ - struct clk *system = clk_get(NULL, "system_clk"); - struct clk *xtal1 = clk_get(NULL, "extal1"); - struct clk *usb24s = clk_get(NULL, "usb24s"); - struct clk *fsibck = clk_get(NULL, "fsibck"); - - if (IS_ERR(system) || - IS_ERR(xtal1) || - IS_ERR(usb24s) || - IS_ERR(fsibck)) { - pr_err("armadillo800eva board clock init failed\n"); - goto clock_error; - } - - /* armadillo 800 eva extal1 is 24MHz */ - clk_set_rate(xtal1, 24000000); - - /* usb24s use extal1 (= system) clock (= 24MHz) */ - clk_set_parent(usb24s, system); - - /* FSIBCK is 12.288MHz, and it is parent of FSI-B */ - clk_set_rate(fsibck, 12288000); - -clock_error: - if (!IS_ERR(system)) - clk_put(system); - if (!IS_ERR(xtal1)) - clk_put(xtal1); - if (!IS_ERR(usb24s)) - clk_put(usb24s); - if (!IS_ERR(fsibck)) - clk_put(fsibck); -} - -/* - * board init - */ -#define GPIO_PORT7CR IOMEM(0xe6050007) -#define GPIO_PORT8CR IOMEM(0xe6050008) -static void __init eva_init(void) -{ - static struct pm_domain_device domain_devices[] __initdata = { - { "A4LC", &lcdc0_device }, - { "A4LC", &hdmi_lcdc_device }, - { "A4MP", &hdmi_device }, - { "A4MP", &fsi_device }, - { "A4R", &ceu0_device }, - { "A4S", &sh_eth_device }, - { "A3SP", &pwm_device }, - { "A3SP", &sdhi0_device }, - { "A3SP", &sh_mmcif_device }, - }; - struct platform_device *usb = NULL, *sdhi1 = NULL; - - regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, - ARRAY_SIZE(fixed3v3_power_consumers), 3300000); - regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, - ARRAY_SIZE(fixed5v0_power_consumers), 5000000); - - pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); - pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); - - r8a7740_pinmux_init(); - r8a7740_meram_workaround(); - - /* GETHER */ - gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ - - /* USB */ - gpio_request_one(159, GPIOF_IN, NULL); /* USB_DEVICE_MODE */ - - if (gpio_get_value(159)) { - /* USB Host */ - } else { - /* USB Func */ - /* - * The USBHS interrupt handlers needs to read the IRQ pin value - * (HI/LOW) to diffentiate USB connection and disconnection - * events (usbhsf_get_vbus()). We thus need to select both the - * intc_irq7_1 pin group and GPIO 209 here. - */ - gpio_request_one(209, GPIOF_IN, NULL); - - platform_device_register(&usbhsf_device); - usb = &usbhsf_device; - } - - /* CON1/CON15 Camera */ - gpio_request_one(173, GPIOF_OUT_INIT_LOW, NULL); /* STANDBY */ - gpio_request_one(172, GPIOF_OUT_INIT_HIGH, NULL); /* RST */ - /* see mt9t111_power() */ - gpio_request_one(158, GPIOF_OUT_INIT_LOW, NULL); /* CAM_PON */ - - /* FSI-WM8978 */ - gpio_request(7, NULL); - gpio_request(8, NULL); - gpio_direction_none(GPIO_PORT7CR); /* FSIAOBT needs no direction */ - gpio_direction_none(GPIO_PORT8CR); /* FSIAOLR needs no direction */ - - /* - * CAUTION - * - * DBGMD/LCDC0/FSIA MUX - * DBGMD_SELECT_B should be set after setting PFC Function. - */ - gpio_request_one(176, GPIOF_OUT_INIT_HIGH, NULL); - - /* - * We can switch CON8/CON14 by SW1.5, - * but it needs after DBGMD_SELECT_B - */ - gpio_request_one(6, GPIOF_IN, NULL); - if (gpio_get_value(6)) { - /* CON14 enable */ - } else { - /* CON8 (SDHI1) enable */ - pinctrl_register_mappings(eva_sdhi1_pinctrl_map, - ARRAY_SIZE(eva_sdhi1_pinctrl_map)); - - platform_device_register(&vcc_sdhi1); - platform_device_register(&sdhi1_device); - sdhi1 = &sdhi1_device; - } - - -#ifdef CONFIG_CACHE_L2X0 - /* Shared attribute override enable, 32K*8way */ - l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff); -#endif - - i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); - i2c_register_board_info(2, i2c2_devices, ARRAY_SIZE(i2c2_devices)); - - r8a7740_add_standard_devices(); - - platform_add_devices(eva_devices, - ARRAY_SIZE(eva_devices)); - - rmobile_add_devices_to_domains(domain_devices, - ARRAY_SIZE(domain_devices)); - if (usb) - rmobile_add_device_to_domain("A3SP", usb); - if (sdhi1) - rmobile_add_device_to_domain("A3SP", sdhi1); - - r8a7740_pm_init(); -} - -static void __init eva_earlytimer_init(void) -{ - r8a7740_clock_init(MD_CK0 | MD_CK2); - shmobile_earlytimer_init(); - - /* the rate of extal1 clock must be set before late_time_init */ - eva_clock_init(); -} - -#define RESCNT2 IOMEM(0xe6188020) -static void eva_restart(enum reboot_mode mode, const char *cmd) -{ - /* Do soft power on reset */ - writel((1 << 31), RESCNT2); -} - -static const char *eva_boards_compat_dt[] __initdata = { - "renesas,armadillo800eva", - NULL, -}; - -DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva") - .map_io = r8a7740_map_io, - .init_early = r8a7740_add_early_devices, - .init_irq = r8a7740_init_irq_of, - .init_machine = eva_init, - .init_late = shmobile_init_late, - .init_time = eva_earlytimer_init, - .dt_compat = eva_boards_compat_dt, - .restart = eva_restart, -MACHINE_END diff --git a/arch/arm/mach-shmobile/board-bockw-reference.c b/arch/arm/mach-shmobile/board-bockw-reference.c index 9a74efda3d18..4f78296f7d04 100644 --- a/arch/arm/mach-shmobile/board-bockw-reference.c +++ b/arch/arm/mach-shmobile/board-bockw-reference.c @@ -72,7 +72,7 @@ static void __init bockw_init(void) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } -static const char *bockw_boards_compat_dt[] __initdata = { +static const char *const bockw_boards_compat_dt[] __initconst = { "renesas,bockw-reference", NULL, }; diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 25558d1f417f..25a0e7233fe4 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c @@ -723,7 +723,7 @@ static void __init bockw_init_late(void) ADD_USB_FUNC_DEVICE_IF_POSSIBLE(); } -static const char *bockw_boards_compat_dt[] __initdata = { +static const char *const bockw_boards_compat_dt[] __initconst = { "renesas,bockw", NULL, }; diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c deleted file mode 100644 index 260d8319fd82..000000000000 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ /dev/null @@ -1,916 +0,0 @@ -/* - * KZM-A9-GT board support - * - * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/delay.h> -#include <linux/gpio.h> -#include <linux/gpio_keys.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/i2c.h> -#include <linux/i2c/pcf857x.h> -#include <linux/input.h> -#include <linux/irqchip/arm-gic.h> -#include <linux/mmc/host.h> -#include <linux/mmc/sh_mmcif.h> -#include <linux/mmc/sh_mobile_sdhi.h> -#include <linux/mfd/as3711.h> -#include <linux/mfd/tmio.h> -#include <linux/pinctrl/machine.h> -#include <linux/pinctrl/pinconf-generic.h> -#include <linux/platform_device.h> -#include <linux/reboot.h> -#include <linux/regulator/fixed.h> -#include <linux/regulator/machine.h> -#include <linux/smsc911x.h> -#include <linux/usb/r8a66597.h> -#include <linux/usb/renesas_usbhs.h> -#include <linux/videodev2.h> - -#include <sound/sh_fsi.h> -#include <sound/simple_card.h> -#include <asm/hardware/cache-l2x0.h> -#include <asm/mach-types.h> -#include <asm/mach/arch.h> -#include <video/sh_mobile_lcdc.h> - -#include "common.h" -#include "intc.h" -#include "irqs.h" -#include "sh73a0.h" - -/* - * external GPIO - */ -#define GPIO_PCF8575_BASE (310) -#define GPIO_PCF8575_PORT10 (GPIO_PCF8575_BASE + 8) -#define GPIO_PCF8575_PORT11 (GPIO_PCF8575_BASE + 9) -#define GPIO_PCF8575_PORT12 (GPIO_PCF8575_BASE + 10) -#define GPIO_PCF8575_PORT13 (GPIO_PCF8575_BASE + 11) -#define GPIO_PCF8575_PORT14 (GPIO_PCF8575_BASE + 12) -#define GPIO_PCF8575_PORT15 (GPIO_PCF8575_BASE + 13) -#define GPIO_PCF8575_PORT16 (GPIO_PCF8575_BASE + 14) - -/* Dummy supplies, where voltage doesn't matter */ -static struct regulator_consumer_supply dummy_supplies[] = { - REGULATOR_SUPPLY("vddvario", "smsc911x.0"), - REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), -}; - -/* - * FSI-AK4648 - * - * this command is required when playback. - * - * # amixer set "LINEOUT Mixer DACL" on - */ - -/* SMSC 9221 */ -static struct resource smsc9221_resources[] = { - [0] = { - .start = 0x10000000, /* CS4 */ - .end = 0x100000ff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = irq_pin(3), /* IRQ3 */ - .flags = IORESOURCE_IRQ, - }, -}; - -static struct smsc911x_platform_config smsc9221_platdata = { - .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, - .phy_interface = PHY_INTERFACE_MODE_MII, - .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, - .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, -}; - -static struct platform_device smsc_device = { - .name = "smsc911x", - .dev = { - .platform_data = &smsc9221_platdata, - }, - .resource = smsc9221_resources, - .num_resources = ARRAY_SIZE(smsc9221_resources), -}; - -/* USB external chip */ -static struct r8a66597_platdata usb_host_data = { - .on_chip = 0, - .xtal = R8A66597_PLATDATA_XTAL_48MHZ, -}; - -static struct resource usb_resources[] = { - [0] = { - .start = 0x10010000, - .end = 0x1001ffff - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = irq_pin(1), /* IRQ1 */ - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device usb_host_device = { - .name = "r8a66597_hcd", - .dev = { - .platform_data = &usb_host_data, - .dma_mask = NULL, - .coherent_dma_mask = 0xffffffff, - }, - .num_resources = ARRAY_SIZE(usb_resources), - .resource = usb_resources, -}; - -/* USB Func CN17 */ -struct usbhs_private { - void __iomem *phy; - void __iomem *cr2; - struct renesas_usbhs_platform_info info; -}; - -#define IRQ15 irq_pin(15) -#define USB_PHY_MODE (1 << 4) -#define USB_PHY_INT_EN ((1 << 3) | (1 << 2)) -#define USB_PHY_ON (1 << 1) -#define USB_PHY_OFF (1 << 0) -#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF) - -#define usbhs_get_priv(pdev) \ - container_of(renesas_usbhs_get_info(pdev), struct usbhs_private, info) - -static int usbhs_get_vbus(struct platform_device *pdev) -{ - struct usbhs_private *priv = usbhs_get_priv(pdev); - - return !((1 << 7) & __raw_readw(priv->cr2)); -} - -static int usbhs_phy_reset(struct platform_device *pdev) -{ - struct usbhs_private *priv = usbhs_get_priv(pdev); - - /* init phy */ - __raw_writew(0x8a0a, priv->cr2); - - return 0; -} - -static int usbhs_get_id(struct platform_device *pdev) -{ - return USBHS_GADGET; -} - -static irqreturn_t usbhs_interrupt(int irq, void *data) -{ - struct platform_device *pdev = data; - struct usbhs_private *priv = usbhs_get_priv(pdev); - - renesas_usbhs_call_notify_hotplug(pdev); - - /* clear status */ - __raw_writew(__raw_readw(priv->phy) | USB_PHY_INT_CLR, priv->phy); - - return IRQ_HANDLED; -} - -static int usbhs_hardware_init(struct platform_device *pdev) -{ - struct usbhs_private *priv = usbhs_get_priv(pdev); - int ret; - - /* clear interrupt status */ - __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy); - - ret = request_irq(IRQ15, usbhs_interrupt, IRQF_TRIGGER_HIGH, - dev_name(&pdev->dev), pdev); - if (ret) { - dev_err(&pdev->dev, "request_irq err\n"); - return ret; - } - - /* enable USB phy interrupt */ - __raw_writew(USB_PHY_MODE | USB_PHY_INT_EN, priv->phy); - - return 0; -} - -static int usbhs_hardware_exit(struct platform_device *pdev) -{ - struct usbhs_private *priv = usbhs_get_priv(pdev); - - /* clear interrupt status */ - __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy); - - free_irq(IRQ15, pdev); - - return 0; -} - -static u32 usbhs_pipe_cfg[] = { - USB_ENDPOINT_XFER_CONTROL, - USB_ENDPOINT_XFER_ISOC, - USB_ENDPOINT_XFER_ISOC, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_INT, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, - USB_ENDPOINT_XFER_BULK, -}; - -static struct usbhs_private usbhs_private = { - .phy = IOMEM(0xe60781e0), /* USBPHYINT */ - .cr2 = IOMEM(0xe605810c), /* USBCR2 */ - .info = { - .platform_callback = { - .hardware_init = usbhs_hardware_init, - .hardware_exit = usbhs_hardware_exit, - .get_id = usbhs_get_id, - .phy_reset = usbhs_phy_reset, - .get_vbus = usbhs_get_vbus, - }, - .driver_param = { - .buswait_bwait = 4, - .has_otg = 1, - .pipe_type = usbhs_pipe_cfg, - .pipe_size = ARRAY_SIZE(usbhs_pipe_cfg), - }, - }, -}; - -static struct resource usbhs_resources[] = { - [0] = { - .start = 0xE6890000, - .end = 0xE68900e6 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(62), - .end = gic_spi(62), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device usbhs_device = { - .name = "renesas_usbhs", - .id = -1, - .dev = { - .dma_mask = NULL, - .coherent_dma_mask = 0xffffffff, - .platform_data = &usbhs_private.info, - }, - .num_resources = ARRAY_SIZE(usbhs_resources), - .resource = usbhs_resources, -}; - -/* LCDC */ -static struct fb_videomode kzm_lcdc_mode = { - .name = "WVGA Panel", - .xres = 800, - .yres = 480, - .left_margin = 220, - .right_margin = 110, - .hsync_len = 70, - .upper_margin = 20, - .lower_margin = 5, - .vsync_len = 5, - .sync = 0, -}; - -static struct sh_mobile_lcdc_info lcdc_info = { - .clock_source = LCDC_CLK_BUS, - .ch[0] = { - .chan = LCDC_CHAN_MAINLCD, - .fourcc = V4L2_PIX_FMT_RGB565, - .interface_type = RGB24, - .lcd_modes = &kzm_lcdc_mode, - .num_modes = 1, - .clock_divider = 5, - .flags = 0, - .panel_cfg = { - .width = 152, - .height = 91, - }, - } -}; - -static struct resource lcdc_resources[] = { - [0] = { - .name = "LCDC", - .start = 0xfe940000, - .end = 0xfe943fff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = intcs_evt2irq(0x580), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device lcdc_device = { - .name = "sh_mobile_lcdc_fb", - .num_resources = ARRAY_SIZE(lcdc_resources), - .resource = lcdc_resources, - .dev = { - .platform_data = &lcdc_info, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, -}; - -/* Fixed 1.8V regulator to be used by MMCIF */ -static struct regulator_consumer_supply fixed1v8_power_consumers[] = -{ - REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), - REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), -}; - -/* MMCIF */ -static struct resource sh_mmcif_resources[] = { - [0] = { - .name = "MMCIF", - .start = 0xe6bd0000, - .end = 0xe6bd00ff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(140), - .flags = IORESOURCE_IRQ, - }, - [2] = { - .start = gic_spi(141), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct sh_mmcif_plat_data sh_mmcif_platdata = { - .ocr = MMC_VDD_165_195, - .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, - .ccs_unsupported = true, - .slave_id_tx = SHDMA_SLAVE_MMCIF_TX, - .slave_id_rx = SHDMA_SLAVE_MMCIF_RX, -}; - -static struct platform_device mmc_device = { - .name = "sh_mmcif", - .dev = { - .dma_mask = NULL, - .coherent_dma_mask = 0xffffffff, - .platform_data = &sh_mmcif_platdata, - }, - .num_resources = ARRAY_SIZE(sh_mmcif_resources), - .resource = sh_mmcif_resources, -}; - -/* Fixed 3.3V regulators to be used by SDHI0 */ -static struct regulator_consumer_supply vcc_sdhi0_consumers[] = -{ - REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), -}; - -static struct regulator_init_data vcc_sdhi0_init_data = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(vcc_sdhi0_consumers), - .consumer_supplies = vcc_sdhi0_consumers, -}; - -static struct fixed_voltage_config vcc_sdhi0_info = { - .supply_name = "SDHI0 Vcc", - .microvolts = 3300000, - .gpio = 15, - .enable_high = 1, - .init_data = &vcc_sdhi0_init_data, -}; - -static struct platform_device vcc_sdhi0 = { - .name = "reg-fixed-voltage", - .id = 0, - .dev = { - .platform_data = &vcc_sdhi0_info, - }, -}; - -/* Fixed 3.3V regulators to be used by SDHI2 */ -static struct regulator_consumer_supply vcc_sdhi2_consumers[] = -{ - REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.2"), -}; - -static struct regulator_init_data vcc_sdhi2_init_data = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = ARRAY_SIZE(vcc_sdhi2_consumers), - .consumer_supplies = vcc_sdhi2_consumers, -}; - -static struct fixed_voltage_config vcc_sdhi2_info = { - .supply_name = "SDHI2 Vcc", - .microvolts = 3300000, - .gpio = 14, - .enable_high = 1, - .init_data = &vcc_sdhi2_init_data, -}; - -static struct platform_device vcc_sdhi2 = { - .name = "reg-fixed-voltage", - .id = 1, - .dev = { - .platform_data = &vcc_sdhi2_info, - }, -}; - -/* SDHI */ -static struct tmio_mmc_data sdhi0_info = { - .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, - .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, - .flags = TMIO_MMC_HAS_IDLE_WAIT, - .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | - MMC_CAP_POWER_OFF_CARD, -}; - -static struct resource sdhi0_resources[] = { - [0] = { - .name = "SDHI0", - .start = 0xee100000, - .end = 0xee1000ff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, - .start = gic_spi(83), - .flags = IORESOURCE_IRQ, - }, - [2] = { - .name = SH_MOBILE_SDHI_IRQ_SDCARD, - .start = gic_spi(84), - .flags = IORESOURCE_IRQ, - }, - [3] = { - .name = SH_MOBILE_SDHI_IRQ_SDIO, - .start = gic_spi(85), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device sdhi0_device = { - .name = "sh_mobile_sdhi", - .num_resources = ARRAY_SIZE(sdhi0_resources), - .resource = sdhi0_resources, - .dev = { - .platform_data = &sdhi0_info, - }, -}; - -/* Micro SD */ -static struct tmio_mmc_data sdhi2_info = { - .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI2_TX, - .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI2_RX, - .flags = TMIO_MMC_HAS_IDLE_WAIT | - TMIO_MMC_USE_GPIO_CD | - TMIO_MMC_WRPROTECT_DISABLE, - .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_POWER_OFF_CARD, - .cd_gpio = 13, -}; - -static struct resource sdhi2_resources[] = { - [0] = { - .name = "SDHI2", - .start = 0xee140000, - .end = 0xee1400ff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, - .start = gic_spi(103), - .flags = IORESOURCE_IRQ, - }, - [2] = { - .name = SH_MOBILE_SDHI_IRQ_SDCARD, - .start = gic_spi(104), - .flags = IORESOURCE_IRQ, - }, - [3] = { - .name = SH_MOBILE_SDHI_IRQ_SDIO, - .start = gic_spi(105), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device sdhi2_device = { - .name = "sh_mobile_sdhi", - .id = 2, - .num_resources = ARRAY_SIZE(sdhi2_resources), - .resource = sdhi2_resources, - .dev = { - .platform_data = &sdhi2_info, - }, -}; - -/* KEY */ -#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } - -static struct gpio_keys_button gpio_buttons[] = { - GPIO_KEY(KEY_BACK, GPIO_PCF8575_PORT10, "SW3"), - GPIO_KEY(KEY_RIGHT, GPIO_PCF8575_PORT11, "SW2-R"), - GPIO_KEY(KEY_LEFT, GPIO_PCF8575_PORT12, "SW2-L"), - GPIO_KEY(KEY_ENTER, GPIO_PCF8575_PORT13, "SW2-P"), - GPIO_KEY(KEY_UP, GPIO_PCF8575_PORT14, "SW2-U"), - GPIO_KEY(KEY_DOWN, GPIO_PCF8575_PORT15, "SW2-D"), - GPIO_KEY(KEY_HOME, GPIO_PCF8575_PORT16, "SW1"), -}; - -static struct gpio_keys_platform_data gpio_key_info = { - .buttons = gpio_buttons, - .nbuttons = ARRAY_SIZE(gpio_buttons), -}; - -static struct platform_device gpio_keys_device = { - .name = "gpio-keys", - .dev = { - .platform_data = &gpio_key_info, - }, -}; - -/* FSI-AK4648 */ -static struct sh_fsi_platform_info fsi_info = { - .port_a = { - .tx_id = SHDMA_SLAVE_FSI2A_TX, - }, -}; - -static struct resource fsi_resources[] = { - [0] = { - .name = "FSI", - .start = 0xEC230000, - .end = 0xEC230400 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(146), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device fsi_device = { - .name = "sh_fsi2", - .id = -1, - .num_resources = ARRAY_SIZE(fsi_resources), - .resource = fsi_resources, - .dev = { - .platform_data = &fsi_info, - }, -}; - -static struct asoc_simple_card_info fsi2_ak4648_info = { - .name = "AK4648", - .card = "FSI2A-AK4648", - .codec = "ak4642-codec.0-0012", - .platform = "sh_fsi2", - .daifmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM, - .cpu_dai = { - .name = "fsia-dai", - }, - .codec_dai = { - .name = "ak4642-hifi", - .sysclk = 11289600, - }, -}; - -static struct platform_device fsi_ak4648_device = { - .name = "asoc-simple-card", - .dev = { - .platform_data = &fsi2_ak4648_info, - .coherent_dma_mask = DMA_BIT_MASK(32), - .dma_mask = &fsi_ak4648_device.dev.coherent_dma_mask, - }, -}; - -/* I2C */ - -/* StepDown1 is used to supply 1.315V to the CPU */ -static struct regulator_init_data as3711_sd1 = { - .constraints = { - .name = "1.315V CPU", - .boot_on = 1, - .always_on = 1, - .min_uV = 1315000, - .max_uV = 1335000, - }, -}; - -/* StepDown2 is used to supply 1.8V to the CPU and to the board */ -static struct regulator_init_data as3711_sd2 = { - .constraints = { - .name = "1.8V", - .boot_on = 1, - .always_on = 1, - .min_uV = 1800000, - .max_uV = 1800000, - }, -}; - -/* - * StepDown3 is switched in parallel with StepDown2, seems to be off, - * according to read-back pre-set register values - */ - -/* StepDown4 is used to supply 1.215V to the CPU and to the board */ -static struct regulator_init_data as3711_sd4 = { - .constraints = { - .name = "1.215V", - .boot_on = 1, - .always_on = 1, - .min_uV = 1215000, - .max_uV = 1235000, - }, -}; - -/* LDO1 is unused and unconnected */ - -/* LDO2 is used to supply 2.8V to the CPU */ -static struct regulator_init_data as3711_ldo2 = { - .constraints = { - .name = "2.8V CPU", - .boot_on = 1, - .always_on = 1, - .min_uV = 2800000, - .max_uV = 2800000, - }, -}; - -/* LDO3 is used to supply 3.0V to the CPU */ -static struct regulator_init_data as3711_ldo3 = { - .constraints = { - .name = "3.0V CPU", - .boot_on = 1, - .always_on = 1, - .min_uV = 3000000, - .max_uV = 3000000, - }, -}; - -/* LDO4 is used to supply 2.8V to the board */ -static struct regulator_init_data as3711_ldo4 = { - .constraints = { - .name = "2.8V", - .boot_on = 1, - .always_on = 1, - .min_uV = 2800000, - .max_uV = 2800000, - }, -}; - -/* LDO5 is switched parallel to LDO4, also set to 2.8V */ -static struct regulator_init_data as3711_ldo5 = { - .constraints = { - .name = "2.8V #2", - .boot_on = 1, - .always_on = 1, - .min_uV = 2800000, - .max_uV = 2800000, - }, -}; - -/* LDO6 is unused and unconnected */ - -/* LDO7 is used to supply 1.15V to the CPU */ -static struct regulator_init_data as3711_ldo7 = { - .constraints = { - .name = "1.15V CPU", - .boot_on = 1, - .always_on = 1, - .min_uV = 1150000, - .max_uV = 1150000, - }, -}; - -/* LDO8 is switched parallel to LDO7, also set to 1.15V */ -static struct regulator_init_data as3711_ldo8 = { - .constraints = { - .name = "1.15V CPU #2", - .boot_on = 1, - .always_on = 1, - .min_uV = 1150000, - .max_uV = 1150000, - }, -}; - -static struct as3711_platform_data as3711_pdata = { - .regulator = { - .init_data = { - [AS3711_REGULATOR_SD_1] = &as3711_sd1, - [AS3711_REGULATOR_SD_2] = &as3711_sd2, - [AS3711_REGULATOR_SD_4] = &as3711_sd4, - [AS3711_REGULATOR_LDO_2] = &as3711_ldo2, - [AS3711_REGULATOR_LDO_3] = &as3711_ldo3, - [AS3711_REGULATOR_LDO_4] = &as3711_ldo4, - [AS3711_REGULATOR_LDO_5] = &as3711_ldo5, - [AS3711_REGULATOR_LDO_7] = &as3711_ldo7, - [AS3711_REGULATOR_LDO_8] = &as3711_ldo8, - }, - }, - .backlight = { - .su2_fb = "sh_mobile_lcdc_fb.0", - .su2_max_uA = 36000, - .su2_feedback = AS3711_SU2_CURR_AUTO, - .su2_fbprot = AS3711_SU2_GPIO4, - .su2_auto_curr1 = true, - .su2_auto_curr2 = true, - .su2_auto_curr3 = true, - }, -}; - -static struct pcf857x_platform_data pcf8575_pdata = { - .gpio_base = GPIO_PCF8575_BASE, -}; - -static struct i2c_board_info i2c0_devices[] = { - { - I2C_BOARD_INFO("ak4648", 0x12), - }, - { - I2C_BOARD_INFO("r2025sd", 0x32), - }, - { - I2C_BOARD_INFO("ak8975", 0x0c), - .irq = irq_pin(28), /* IRQ28 */ - }, - { - I2C_BOARD_INFO("adxl34x", 0x1d), - .irq = irq_pin(26), /* IRQ26 */ - }, - { - I2C_BOARD_INFO("as3711", 0x40), - .irq = intcs_evt2irq(0x3300), /* IRQ24 */ - .platform_data = &as3711_pdata, - }, -}; - -static struct i2c_board_info i2c1_devices[] = { - { - I2C_BOARD_INFO("st1232-ts", 0x55), - .irq = irq_pin(8), /* IRQ8 */ - }, -}; - -static struct i2c_board_info i2c3_devices[] = { - { - I2C_BOARD_INFO("pcf8575", 0x20), - .irq = irq_pin(19), /* IRQ19 */ - .platform_data = &pcf8575_pdata, - }, -}; - -static struct platform_device *kzm_devices[] __initdata = { - &smsc_device, - &usb_host_device, - &usbhs_device, - &lcdc_device, - &mmc_device, - &vcc_sdhi0, - &vcc_sdhi2, - &sdhi0_device, - &sdhi2_device, - &gpio_keys_device, - &fsi_device, - &fsi_ak4648_device, -}; - -static unsigned long pin_pullup_conf[] = { - PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 0), -}; - -static const struct pinctrl_map kzm_pinctrl_map[] = { - /* FSIA (AK4648) */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0", - "fsia_mclk_in", "fsia"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0", - "fsia_sclk_in", "fsia"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0", - "fsia_data_in", "fsia"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0", - "fsia_data_out", "fsia"), - /* I2C3 */ - PIN_MAP_MUX_GROUP_DEFAULT("i2c-sh_mobile.3", "pfc-sh73a0", - "i2c3_1", "i2c3"), - /* LCD */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-sh73a0", - "lcd_data24", "lcd"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-sh73a0", - "lcd_sync", "lcd"), - /* MMCIF */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0", - "mmc0_data8_0", "mmc0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0", - "mmc0_ctrl_0", "mmc0"), - PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mmcif.0", "pfc-sh73a0", - "PORT279", pin_pullup_conf), - PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0", - "mmc0_data8_0", pin_pullup_conf), - /* SCIFA4 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0", - "scifa4_data", "scifa4"), - PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0", - "scifa4_ctrl", "scifa4"), - /* SDHI0 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0", - "sdhi0_data4", "sdhi0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0", - "sdhi0_ctrl", "sdhi0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0", - "sdhi0_cd", "sdhi0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0", - "sdhi0_wp", "sdhi0"), - /* SDHI2 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-sh73a0", - "sdhi2_data4", "sdhi2"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-sh73a0", - "sdhi2_ctrl", "sdhi2"), - /* SMSC */ - PIN_MAP_MUX_GROUP_DEFAULT("smsc911x.0", "pfc-sh73a0", - "bsc_cs4", "bsc"), - /* USB */ - PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs", "pfc-sh73a0", - "usb_vbus", "usb"), -}; - -static void __init kzm_init(void) -{ - regulator_register_always_on(2, "fixed-1.8V", fixed1v8_power_consumers, - ARRAY_SIZE(fixed1v8_power_consumers), 1800000); - regulator_register_fixed(3, dummy_supplies, ARRAY_SIZE(dummy_supplies)); - - pinctrl_register_mappings(kzm_pinctrl_map, ARRAY_SIZE(kzm_pinctrl_map)); - - sh73a0_pinmux_init(); - - /* SMSC */ - gpio_request_one(224, GPIOF_IN, NULL); /* IRQ3 */ - - /* LCDC */ - gpio_request_one(222, GPIOF_OUT_INIT_HIGH, NULL); /* LCDCDON */ - gpio_request_one(226, GPIOF_OUT_INIT_HIGH, NULL); /* SC */ - - /* Touchscreen */ - gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */ - -#ifdef CONFIG_CACHE_L2X0 - /* Shared attribute override enable, 64K*8way */ - l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff); -#endif - - i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); - i2c_register_board_info(1, i2c1_devices, ARRAY_SIZE(i2c1_devices)); - i2c_register_board_info(3, i2c3_devices, ARRAY_SIZE(i2c3_devices)); - - sh73a0_add_standard_devices(); - platform_add_devices(kzm_devices, ARRAY_SIZE(kzm_devices)); - - sh73a0_pm_init(); -} - -static void kzm9g_restart(enum reboot_mode mode, const char *cmd) -{ -#define RESCNT2 IOMEM(0xe6188020) - /* Do soft power on reset */ - writel((1 << 31), RESCNT2); -} - -static const char *kzm9g_boards_compat_dt[] __initdata = { - "renesas,kzm9g", - NULL, -}; - -DT_MACHINE_START(KZM9G_DT, "kzm9g") - .smp = smp_ops(sh73a0_smp_ops), - .map_io = sh73a0_map_io, - .init_early = sh73a0_add_early_devices, - .init_irq = sh73a0_init_irq, - .init_machine = kzm_init, - .init_late = shmobile_init_late, - .init_time = sh73a0_earlytimer_init, - .restart = kzm9g_restart, - .dt_compat = kzm9g_boards_compat_dt, -MACHINE_END diff --git a/arch/arm/mach-shmobile/board-marzen-reference.c b/arch/arm/mach-shmobile/board-marzen-reference.c deleted file mode 100644 index b15eb923263f..000000000000 --- a/arch/arm/mach-shmobile/board-marzen-reference.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * marzen board support - Reference DT implementation - * - * Copyright (C) 2011 Renesas Solutions Corp. - * Copyright (C) 2011 Magnus Damm - * Copyright (C) 2013 Simon Horman - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/clk/shmobile.h> -#include <linux/clocksource.h> -#include <linux/of_platform.h> - -#include <asm/irq.h> -#include <asm/mach/arch.h> - -#include "common.h" -#include "irqs.h" -#include "r8a7779.h" - -static void __init marzen_init_timer(void) -{ - r8a7779_clocks_init(r8a7779_read_mode_pins()); - clocksource_of_init(); -} - -static void __init marzen_init(void) -{ - of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); - r8a7779_init_irq_extpin_dt(1); /* IRQ1 as individual interrupt */ -} - -static const char *marzen_boards_compat_dt[] __initdata = { - "renesas,marzen", - "renesas,marzen-reference", - NULL, -}; - -DT_MACHINE_START(MARZEN, "marzen") - .smp = smp_ops(r8a7779_smp_ops), - .map_io = r8a7779_map_io, - .init_early = shmobile_init_delay, - .init_time = marzen_init_timer, - .init_irq = r8a7779_init_irq_dt, - .init_machine = marzen_init, - .init_late = shmobile_init_late, - .dt_compat = marzen_boards_compat_dt, -MACHINE_END diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c deleted file mode 100644 index 51db288f192a..000000000000 --- a/arch/arm/mach-shmobile/board-marzen.c +++ /dev/null @@ -1,347 +0,0 @@ -/* - * marzen board support - * - * Copyright (C) 2011, 2013 Renesas Solutions Corp. - * Copyright (C) 2011 Magnus Damm - * Copyright (C) 2013 Cogent Embedded, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/leds.h> -#include <linux/dma-mapping.h> -#include <linux/pinctrl/machine.h> -#include <linux/platform_data/camera-rcar.h> -#include <linux/platform_data/gpio-rcar.h> -#include <linux/platform_data/usb-rcar-phy.h> -#include <linux/regulator/fixed.h> -#include <linux/regulator/machine.h> -#include <linux/smsc911x.h> -#include <linux/spi/spi.h> -#include <linux/spi/sh_hspi.h> -#include <linux/mmc/host.h> -#include <linux/mmc/sh_mobile_sdhi.h> -#include <linux/mfd/tmio.h> - -#include <media/soc_camera.h> -#include <asm/mach-types.h> -#include <asm/mach/arch.h> -#include <asm/traps.h> - -#include "common.h" -#include "irqs.h" -#include "r8a7779.h" - -/* Fixed 3.3V regulator to be used by SDHI0 */ -static struct regulator_consumer_supply fixed3v3_power_consumers[] = { - REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), - REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), -}; - -/* Dummy supplies, where voltage doesn't matter */ -static struct regulator_consumer_supply dummy_supplies[] = { - REGULATOR_SUPPLY("vddvario", "smsc911x"), - REGULATOR_SUPPLY("vdd33a", "smsc911x"), -}; - -/* USB PHY */ -static struct resource usb_phy_resources[] = { - [0] = { - .start = 0xffe70800, - .end = 0xffe70900 - 1, - .flags = IORESOURCE_MEM, - }, -}; - -static struct rcar_phy_platform_data usb_phy_platform_data; - -static struct platform_device usb_phy = { - .name = "rcar_usb_phy", - .id = -1, - .dev = { - .platform_data = &usb_phy_platform_data, - }, - .resource = usb_phy_resources, - .num_resources = ARRAY_SIZE(usb_phy_resources), -}; - -/* SMSC LAN89218 */ -static struct resource smsc911x_resources[] = { - [0] = { - .start = 0x18000000, /* ExCS0 */ - .end = 0x180000ff, /* A1->A7 */ - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = irq_pin(1), /* IRQ 1 */ - .flags = IORESOURCE_IRQ, - }, -}; - -static struct smsc911x_platform_config smsc911x_platdata = { - .flags = SMSC911X_USE_32BIT, /* 32-bit SW on 16-bit HW bus */ - .phy_interface = PHY_INTERFACE_MODE_MII, - .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, - .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, -}; - -static struct platform_device eth_device = { - .name = "smsc911x", - .id = -1, - .dev = { - .platform_data = &smsc911x_platdata, - }, - .resource = smsc911x_resources, - .num_resources = ARRAY_SIZE(smsc911x_resources), -}; - -static struct resource sdhi0_resources[] = { - [0] = { - .name = "sdhi0", - .start = 0xffe4c000, - .end = 0xffe4c0ff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_iid(0x88), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct tmio_mmc_data sdhi0_platform_data = { - .chan_priv_tx = (void *)HPBDMA_SLAVE_SDHI0_TX, - .chan_priv_rx = (void *)HPBDMA_SLAVE_SDHI0_RX, - .flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, - .capabilities = MMC_CAP_SD_HIGHSPEED, -}; - -static struct platform_device sdhi0_device = { - .name = "sh_mobile_sdhi", - .num_resources = ARRAY_SIZE(sdhi0_resources), - .resource = sdhi0_resources, - .id = 0, - .dev = { - .platform_data = &sdhi0_platform_data, - } -}; - -/* Thermal */ -static struct resource thermal_resources[] = { - [0] = { - .start = 0xFFC48000, - .end = 0xFFC48038 - 1, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device thermal_device = { - .name = "rcar_thermal", - .resource = thermal_resources, - .num_resources = ARRAY_SIZE(thermal_resources), -}; - -/* HSPI */ -static struct resource hspi_resources[] = { - [0] = { - .start = 0xFFFC7000, - .end = 0xFFFC7018 - 1, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device hspi_device = { - .name = "sh-hspi", - .id = 0, - .resource = hspi_resources, - .num_resources = ARRAY_SIZE(hspi_resources), -}; - -/* LEDS */ -static struct gpio_led marzen_leds[] = { - { - .name = "led2", - .gpio = RCAR_GP_PIN(4, 29), - .default_state = LEDS_GPIO_DEFSTATE_ON, - }, { - .name = "led3", - .gpio = RCAR_GP_PIN(4, 30), - .default_state = LEDS_GPIO_DEFSTATE_ON, - }, { - .name = "led4", - .gpio = RCAR_GP_PIN(4, 31), - .default_state = LEDS_GPIO_DEFSTATE_ON, - }, -}; - -static struct gpio_led_platform_data marzen_leds_pdata = { - .leds = marzen_leds, - .num_leds = ARRAY_SIZE(marzen_leds), -}; - -static struct platform_device leds_device = { - .name = "leds-gpio", - .id = 0, - .dev = { - .platform_data = &marzen_leds_pdata, - }, -}; - -/* VIN */ -static struct rcar_vin_platform_data vin_platform_data __initdata = { - .flags = RCAR_VIN_BT656, -}; - -#define MARZEN_VIN(idx) \ -static struct resource vin##idx##_resources[] __initdata = { \ - DEFINE_RES_MEM(0xffc50000 + 0x1000 * (idx), 0x1000), \ - DEFINE_RES_IRQ(gic_iid(0x5f + (idx))), \ -}; \ - \ -static struct platform_device_info vin##idx##_info __initdata = { \ - .name = "r8a7779-vin", \ - .id = idx, \ - .res = vin##idx##_resources, \ - .num_res = ARRAY_SIZE(vin##idx##_resources), \ - .dma_mask = DMA_BIT_MASK(32), \ - .data = &vin_platform_data, \ - .size_data = sizeof(vin_platform_data), \ -} -MARZEN_VIN(1); -MARZEN_VIN(3); - -#define MARZEN_CAMERA(idx) \ -static struct i2c_board_info camera##idx##_info = { \ - I2C_BOARD_INFO("adv7180", 0x20 + (idx)), \ -}; \ - \ -static struct soc_camera_link iclink##idx##_adv7180 = { \ - .bus_id = 1 + 2 * (idx), \ - .i2c_adapter_id = 0, \ - .board_info = &camera##idx##_info, \ -}; \ - \ -static struct platform_device camera##idx##_device = { \ - .name = "soc-camera-pdrv", \ - .id = idx, \ - .dev = { \ - .platform_data = &iclink##idx##_adv7180, \ - }, \ -}; - -MARZEN_CAMERA(0); -MARZEN_CAMERA(1); - -static struct platform_device *marzen_devices[] __initdata = { - ð_device, - &sdhi0_device, - &thermal_device, - &hspi_device, - &leds_device, - &usb_phy, - &camera0_device, - &camera1_device, -}; - -static const struct pinctrl_map marzen_pinctrl_map[] = { - /* DU (CN10: ARGB0, CN13: LVDS) */ - PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779", - "du0_rgb888", "du0"), - PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779", - "du0_sync_1", "du0"), - PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779", - "du0_clk_out_0", "du0"), - PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779", - "du1_rgb666", "du1"), - PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779", - "du1_sync_1", "du1"), - PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779", - "du1_clk_out", "du1"), - /* HSPI0 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh-hspi.0", "pfc-r8a7779", - "hspi0", "hspi0"), - /* SCIF2 (CN18: DEBUG0) */ - PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-r8a7779", - "scif2_data_c", "scif2"), - /* SCIF4 (CN19: DEBUG1) */ - PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-r8a7779", - "scif4_data", "scif4"), - /* SDHI0 */ - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7779", - "sdhi0_data4", "sdhi0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7779", - "sdhi0_ctrl", "sdhi0"), - PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7779", - "sdhi0_cd", "sdhi0"), - /* SMSC */ - PIN_MAP_MUX_GROUP_DEFAULT("smsc911x", "pfc-r8a7779", - "intc_irq1_b", "intc"), - PIN_MAP_MUX_GROUP_DEFAULT("smsc911x", "pfc-r8a7779", - "lbsc_ex_cs0", "lbsc"), - /* USB0 */ - PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.0", "pfc-r8a7779", - "usb0", "usb0"), - /* USB1 */ - PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.0", "pfc-r8a7779", - "usb1", "usb1"), - /* USB2 */ - PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.1", "pfc-r8a7779", - "usb2", "usb2"), - /* VIN1 */ - PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.1", "pfc-r8a7779", - "vin1_clk", "vin1"), - PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.1", "pfc-r8a7779", - "vin1_data8", "vin1"), - /* VIN3 */ - PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.3", "pfc-r8a7779", - "vin3_clk", "vin3"), - PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.3", "pfc-r8a7779", - "vin3_data8", "vin3"), -}; - -static void __init marzen_init(void) -{ - regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, - ARRAY_SIZE(fixed3v3_power_consumers), 3300000); - regulator_register_fixed(1, dummy_supplies, - ARRAY_SIZE(dummy_supplies)); - - pinctrl_register_mappings(marzen_pinctrl_map, - ARRAY_SIZE(marzen_pinctrl_map)); - r8a7779_pinmux_init(); - r8a7779_init_irq_extpin(1); /* IRQ1 as individual interrupt */ - - r8a7779_add_standard_devices(); - platform_device_register_full(&vin1_info); - platform_device_register_full(&vin3_info); - platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices)); -} - -static const char *marzen_boards_compat_dt[] __initdata = { - "renesas,marzen", - NULL, -}; - -DT_MACHINE_START(MARZEN, "marzen") - .smp = smp_ops(r8a7779_smp_ops), - .map_io = r8a7779_map_io, - .init_early = r8a7779_add_early_devices, - .init_irq = r8a7779_init_irq_dt, - .init_machine = marzen_init, - .init_late = r8a7779_init_late, - .dt_compat = marzen_boards_compat_dt, - .init_time = r8a7779_earlytimer_init, -MACHINE_END diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c deleted file mode 100644 index 9cac8247c72b..000000000000 --- a/arch/arm/mach-shmobile/clock-r8a7740.c +++ /dev/null @@ -1,675 +0,0 @@ -/* - * R8A7740 processor support - * - * Copyright (C) 2011 Renesas Solutions Corp. - * Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/io.h> -#include <linux/sh_clk.h> -#include <linux/clkdev.h> - -#include "clock.h" -#include "common.h" -#include "r8a7740.h" - -/* - * | MDx | XTAL1/EXTAL1 | System | EXTALR | - * Clock |-------+-----------------+ clock | 32.768 | RCLK - * Mode | 2/1/0 | src MHz | source | KHz | source - * -------+-------+-----------------+-----------+--------+---------- - * 0 | 0 0 0 | External 20~50 | XTAL1 | O | EXTALR - * 1 | 0 0 1 | Crystal 20~30 | XTAL1 | O | EXTALR - * 2 | 0 1 0 | External 40~50 | XTAL1 / 2 | O | EXTALR - * 3 | 0 1 1 | Crystal 40~50 | XTAL1 / 2 | O | EXTALR - * 4 | 1 0 0 | External 20~50 | XTAL1 | x | XTAL1 / 1024 - * 5 | 1 0 1 | Crystal 20~30 | XTAL1 | x | XTAL1 / 1024 - * 6 | 1 1 0 | External 40~50 | XTAL1 / 2 | x | XTAL1 / 2048 - * 7 | 1 1 1 | Crystal 40~50 | XTAL1 / 2 | x | XTAL1 / 2048 - */ - -/* CPG registers */ -#define FRQCRA IOMEM(0xe6150000) -#define FRQCRB IOMEM(0xe6150004) -#define VCLKCR1 IOMEM(0xE6150008) -#define VCLKCR2 IOMEM(0xE615000c) -#define FRQCRC IOMEM(0xe61500e0) -#define FSIACKCR IOMEM(0xe6150018) -#define PLLC01CR IOMEM(0xe6150028) - -#define SUBCKCR IOMEM(0xe6150080) -#define USBCKCR IOMEM(0xe615008c) - -#define MSTPSR0 IOMEM(0xe6150030) -#define MSTPSR1 IOMEM(0xe6150038) -#define MSTPSR2 IOMEM(0xe6150040) -#define MSTPSR3 IOMEM(0xe6150048) -#define MSTPSR4 IOMEM(0xe615004c) -#define FSIBCKCR IOMEM(0xe6150090) -#define HDMICKCR IOMEM(0xe6150094) -#define SMSTPCR0 IOMEM(0xe6150130) -#define SMSTPCR1 IOMEM(0xe6150134) -#define SMSTPCR2 IOMEM(0xe6150138) -#define SMSTPCR3 IOMEM(0xe615013c) -#define SMSTPCR4 IOMEM(0xe6150140) - -#define FSIDIVA IOMEM(0xFE1F8000) -#define FSIDIVB IOMEM(0xFE1F8008) - -/* Fixed 32 KHz root clock from EXTALR pin */ -static struct clk extalr_clk = { - .rate = 32768, -}; - -/* - * 25MHz default rate for the EXTAL1 root input clock. - * If needed, reset this with clk_set_rate() from the platform code. - */ -static struct clk extal1_clk = { - .rate = 25000000, -}; - -/* - * 48MHz default rate for the EXTAL2 root input clock. - * If needed, reset this with clk_set_rate() from the platform code. - */ -static struct clk extal2_clk = { - .rate = 48000000, -}; - -/* - * 27MHz default rate for the DV_CLKI root input clock. - * If needed, reset this with clk_set_rate() from the platform code. - */ -static struct clk dv_clk = { - .rate = 27000000, -}; - -SH_CLK_RATIO(div2, 1, 2); -SH_CLK_RATIO(div1k, 1, 1024); - -SH_FIXED_RATIO_CLK(extal1_div2_clk, extal1_clk, div2); -SH_FIXED_RATIO_CLK(extal1_div1024_clk, extal1_clk, div1k); -SH_FIXED_RATIO_CLK(extal1_div2048_clk, extal1_div2_clk, div1k); -SH_FIXED_RATIO_CLK(extal2_div2_clk, extal2_clk, div2); - -static struct sh_clk_ops followparent_clk_ops = { - .recalc = followparent_recalc, -}; - -/* Main clock */ -static struct clk system_clk = { - .ops = &followparent_clk_ops, -}; - -SH_FIXED_RATIO_CLK(system_div2_clk, system_clk, div2); - -/* r_clk */ -static struct clk r_clk = { - .ops = &followparent_clk_ops, -}; - -/* PLLC0/PLLC1 */ -static unsigned long pllc01_recalc(struct clk *clk) -{ - unsigned long mult = 1; - - if (__raw_readl(PLLC01CR) & (1 << 14)) - mult = ((__raw_readl(clk->enable_reg) >> 24) & 0x7f) + 1; - - return clk->parent->rate * mult; -} - -static struct sh_clk_ops pllc01_clk_ops = { - .recalc = pllc01_recalc, -}; - -static struct clk pllc0_clk = { - .ops = &pllc01_clk_ops, - .flags = CLK_ENABLE_ON_INIT, - .parent = &system_clk, - .enable_reg = (void __iomem *)FRQCRC, -}; - -static struct clk pllc1_clk = { - .ops = &pllc01_clk_ops, - .flags = CLK_ENABLE_ON_INIT, - .parent = &system_div2_clk, - .enable_reg = (void __iomem *)FRQCRA, -}; - -/* PLLC1 / 2 */ -SH_FIXED_RATIO_CLK(pllc1_div2_clk, pllc1_clk, div2); - -/* USB clock */ -/* - * USBCKCR is controlling usb24 clock - * bit[7] : parent clock - * bit[6] : clock divide rate - * And this bit[7] is used as a "usb24s" from other devices. - * (Video clock / Sub clock / SPU clock) - * You can controll this clock as a below. - * - * struct clk *usb24 = clk_get(dev, "usb24"); - * struct clk *usb24s = clk_get(NULL, "usb24s"); - * struct clk *system = clk_get(NULL, "system_clk"); - * int rate = clk_get_rate(system); - * - * clk_set_parent(usb24s, system); // for bit[7] - * clk_set_rate(usb24, rate / 2); // for bit[6] - */ -static struct clk *usb24s_parents[] = { - [0] = &system_clk, - [1] = &extal2_clk -}; - -static int usb24s_enable(struct clk *clk) -{ - __raw_writel(__raw_readl(USBCKCR) & ~(1 << 8), USBCKCR); - - return 0; -} - -static void usb24s_disable(struct clk *clk) -{ - __raw_writel(__raw_readl(USBCKCR) | (1 << 8), USBCKCR); -} - -static int usb24s_set_parent(struct clk *clk, struct clk *parent) -{ - int i, ret; - u32 val; - - if (!clk->parent_table || !clk->parent_num) - return -EINVAL; - - /* Search the parent */ - for (i = 0; i < clk->parent_num; i++) - if (clk->parent_table[i] == parent) - break; - - if (i == clk->parent_num) - return -ENODEV; - - ret = clk_reparent(clk, parent); - if (ret < 0) - return ret; - - val = __raw_readl(USBCKCR); - val &= ~(1 << 7); - val |= i << 7; - __raw_writel(val, USBCKCR); - - return 0; -} - -static struct sh_clk_ops usb24s_clk_ops = { - .recalc = followparent_recalc, - .enable = usb24s_enable, - .disable = usb24s_disable, - .set_parent = usb24s_set_parent, -}; - -static struct clk usb24s_clk = { - .ops = &usb24s_clk_ops, - .parent_table = usb24s_parents, - .parent_num = ARRAY_SIZE(usb24s_parents), - .parent = &system_clk, -}; - -static unsigned long usb24_recalc(struct clk *clk) -{ - return clk->parent->rate / - ((__raw_readl(USBCKCR) & (1 << 6)) ? 1 : 2); -}; - -static int usb24_set_rate(struct clk *clk, unsigned long rate) -{ - u32 val; - - /* closer to which ? parent->rate or parent->rate/2 */ - val = __raw_readl(USBCKCR); - val &= ~(1 << 6); - val |= (rate > (clk->parent->rate / 4) * 3) << 6; - __raw_writel(val, USBCKCR); - - return 0; -} - -static struct sh_clk_ops usb24_clk_ops = { - .recalc = usb24_recalc, - .set_rate = usb24_set_rate, -}; - -static struct clk usb24_clk = { - .ops = &usb24_clk_ops, - .parent = &usb24s_clk, -}; - -/* External FSIACK/FSIBCK clock */ -static struct clk fsiack_clk = { -}; - -static struct clk fsibck_clk = { -}; - -static struct clk *main_clks[] = { - &extalr_clk, - &extal1_clk, - &extal2_clk, - &extal1_div2_clk, - &extal1_div1024_clk, - &extal1_div2048_clk, - &extal2_div2_clk, - &dv_clk, - &system_clk, - &system_div2_clk, - &r_clk, - &pllc0_clk, - &pllc1_clk, - &pllc1_div2_clk, - &usb24s_clk, - &usb24_clk, - &fsiack_clk, - &fsibck_clk, -}; - -/* DIV4 clocks */ -static void div4_kick(struct clk *clk) -{ - unsigned long value; - - /* set KICK bit in FRQCRB to update hardware setting */ - value = __raw_readl(FRQCRB); - value |= (1 << 31); - __raw_writel(value, FRQCRB); -} - -static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18, - 24, 32, 36, 48, 0, 72, 96, 0 }; - -static struct clk_div_mult_table div4_div_mult_table = { - .divisors = divisors, - .nr_divisors = ARRAY_SIZE(divisors), -}; - -static struct clk_div4_table div4_table = { - .div_mult_table = &div4_div_mult_table, - .kick = div4_kick, -}; - -enum { - DIV4_I, DIV4_ZG, DIV4_B, DIV4_M1, DIV4_HP, - DIV4_HPP, DIV4_USBP, DIV4_S, DIV4_ZB, DIV4_M3, DIV4_CP, - DIV4_NR -}; - -static struct clk div4_clks[DIV4_NR] = { - [DIV4_I] = SH_CLK_DIV4(&pllc1_clk, FRQCRA, 20, 0x6fff, CLK_ENABLE_ON_INIT), - [DIV4_ZG] = SH_CLK_DIV4(&pllc1_clk, FRQCRA, 16, 0x6fff, CLK_ENABLE_ON_INIT), - [DIV4_B] = SH_CLK_DIV4(&pllc1_clk, FRQCRA, 8, 0x6fff, CLK_ENABLE_ON_INIT), - [DIV4_M1] = SH_CLK_DIV4(&pllc1_clk, FRQCRA, 4, 0x6fff, CLK_ENABLE_ON_INIT), - [DIV4_HP] = SH_CLK_DIV4(&pllc1_clk, FRQCRB, 4, 0x6fff, 0), - [DIV4_HPP] = SH_CLK_DIV4(&pllc1_clk, FRQCRC, 20, 0x6fff, 0), - [DIV4_USBP] = SH_CLK_DIV4(&pllc1_clk, FRQCRC, 16, 0x6fff, 0), - [DIV4_S] = SH_CLK_DIV4(&pllc1_clk, FRQCRC, 12, 0x6fff, 0), - [DIV4_ZB] = SH_CLK_DIV4(&pllc1_clk, FRQCRC, 8, 0x6fff, 0), - [DIV4_M3] = SH_CLK_DIV4(&pllc1_clk, FRQCRC, 4, 0x6fff, 0), - [DIV4_CP] = SH_CLK_DIV4(&pllc1_clk, FRQCRC, 0, 0x6fff, 0), -}; - -/* DIV6 reparent */ -enum { - DIV6_HDMI, - DIV6_VCLK1, DIV6_VCLK2, - DIV6_FSIA, DIV6_FSIB, - DIV6_REPARENT_NR, -}; - -static struct clk *hdmi_parent[] = { - [0] = &pllc1_div2_clk, - [1] = &system_clk, - [2] = &dv_clk -}; - -static struct clk *vclk_parents[8] = { - [0] = &pllc1_div2_clk, - [2] = &dv_clk, - [3] = &usb24s_clk, - [4] = &extal1_div2_clk, - [5] = &extalr_clk, -}; - -static struct clk *fsia_parents[] = { - [0] = &pllc1_div2_clk, - [1] = &fsiack_clk, /* external clock */ -}; - -static struct clk *fsib_parents[] = { - [0] = &pllc1_div2_clk, - [1] = &fsibck_clk, /* external clock */ -}; - -static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = { - [DIV6_HDMI] = SH_CLK_DIV6_EXT(HDMICKCR, 0, - hdmi_parent, ARRAY_SIZE(hdmi_parent), 6, 2), - [DIV6_VCLK1] = SH_CLK_DIV6_EXT(VCLKCR1, 0, - vclk_parents, ARRAY_SIZE(vclk_parents), 12, 3), - [DIV6_VCLK2] = SH_CLK_DIV6_EXT(VCLKCR2, 0, - vclk_parents, ARRAY_SIZE(vclk_parents), 12, 3), - [DIV6_FSIA] = SH_CLK_DIV6_EXT(FSIACKCR, 0, - fsia_parents, ARRAY_SIZE(fsia_parents), 6, 2), - [DIV6_FSIB] = SH_CLK_DIV6_EXT(FSIBCKCR, 0, - fsib_parents, ARRAY_SIZE(fsib_parents), 6, 2), -}; - -/* DIV6 clocks */ -enum { - DIV6_SUB, - DIV6_NR -}; - -static struct clk div6_clks[DIV6_NR] = { - [DIV6_SUB] = SH_CLK_DIV6(&pllc1_div2_clk, SUBCKCR, 0), -}; - -/* HDMI1/2 clock */ -static unsigned long hdmi12_recalc(struct clk *clk) -{ - u32 val = __raw_readl(HDMICKCR); - int shift = (int)clk->priv; - - val >>= shift; - val &= 0x3; - - return clk->parent->rate / (1 << val); -}; - -static int hdmi12_set_rate(struct clk *clk, unsigned long rate) -{ - u32 val, mask; - int i, shift; - - for (i = 0; i < 3; i++) - if (rate == clk->parent->rate / (1 << i)) - goto find; - return -ENODEV; - -find: - shift = (int)clk->priv; - - val = __raw_readl(HDMICKCR); - mask = ~(0x3 << shift); - val = (val & mask) | i << shift; - __raw_writel(val, HDMICKCR); - - return 0; -}; - -static struct sh_clk_ops hdmi12_clk_ops = { - .recalc = hdmi12_recalc, - .set_rate = hdmi12_set_rate, -}; - -static struct clk hdmi1_clk = { - .ops = &hdmi12_clk_ops, - .priv = (void *)9, - .parent = &div6_reparent_clks[DIV6_HDMI], /* late install */ -}; - -static struct clk hdmi2_clk = { - .ops = &hdmi12_clk_ops, - .priv = (void *)11, - .parent = &div6_reparent_clks[DIV6_HDMI], /* late install */ -}; - -static struct clk *late_main_clks[] = { - &hdmi1_clk, - &hdmi2_clk, -}; - -/* FSI DIV */ -enum { FSIDIV_A, FSIDIV_B, FSIDIV_REPARENT_NR }; - -static struct clk fsidivs[] = { - [FSIDIV_A] = SH_CLK_FSIDIV(FSIDIVA, &div6_reparent_clks[DIV6_FSIA]), - [FSIDIV_B] = SH_CLK_FSIDIV(FSIDIVB, &div6_reparent_clks[DIV6_FSIB]), -}; - -/* MSTP */ -enum { - MSTP128, MSTP127, MSTP125, - MSTP116, MSTP111, MSTP100, MSTP117, - - MSTP230, MSTP229, - MSTP222, - MSTP218, MSTP217, MSTP216, MSTP214, - MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, - - MSTP329, MSTP328, MSTP323, MSTP320, - MSTP314, MSTP313, MSTP312, - MSTP309, MSTP304, - - MSTP416, MSTP415, MSTP407, MSTP406, - - MSTP_NR -}; - -static struct clk mstp_clks[MSTP_NR] = { - [MSTP128] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 28, 0), /* CEU21 */ - [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */ - [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ - [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ - [MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP], SMSTPCR1, 16, 0), /* IIC0 */ - [MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */ - [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ - - [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */ - [MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 29, 0), /* INTCA */ - [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */ - [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ - [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ - [MSTP216] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */ - [MSTP214] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */ - [MSTP207] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ - [MSTP206] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ - [MSTP204] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ - [MSTP203] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */ - [MSTP202] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */ - [MSTP201] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ - [MSTP200] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ - - [MSTP329] = SH_CLK_MSTP32(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ - [MSTP328] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 28, 0), /* FSI */ - [MSTP323] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ - [MSTP320] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 20, 0), /* USBF */ - [MSTP314] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ - [MSTP313] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */ - [MSTP312] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */ - [MSTP309] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR3, 9, 0), /* GEther */ - [MSTP304] = SH_CLK_MSTP32(&div4_clks[DIV4_CP], SMSTPCR3, 4, 0), /* TPU0 */ - - [MSTP416] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR4, 16, 0), /* USBHOST */ - [MSTP415] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR4, 15, 0), /* SDHI2 */ - [MSTP407] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR4, 7, 0), /* USB-Func */ - [MSTP406] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR4, 6, 0), /* USB Phy */ -}; - -static struct clk_lookup lookups[] = { - /* main clocks */ - CLKDEV_CON_ID("extalr", &extalr_clk), - CLKDEV_CON_ID("extal1", &extal1_clk), - CLKDEV_CON_ID("extal2", &extal2_clk), - CLKDEV_CON_ID("extal1_div2", &extal1_div2_clk), - CLKDEV_CON_ID("extal1_div1024", &extal1_div1024_clk), - CLKDEV_CON_ID("extal1_div2048", &extal1_div2048_clk), - CLKDEV_CON_ID("extal2_div2", &extal2_div2_clk), - CLKDEV_CON_ID("dv_clk", &dv_clk), - CLKDEV_CON_ID("system_clk", &system_clk), - CLKDEV_CON_ID("system_div2_clk", &system_div2_clk), - CLKDEV_CON_ID("r_clk", &r_clk), - CLKDEV_CON_ID("pllc0_clk", &pllc0_clk), - CLKDEV_CON_ID("pllc1_clk", &pllc1_clk), - CLKDEV_CON_ID("pllc1_div2_clk", &pllc1_div2_clk), - CLKDEV_CON_ID("usb24s", &usb24s_clk), - CLKDEV_CON_ID("hdmi1", &hdmi1_clk), - CLKDEV_CON_ID("hdmi2", &hdmi2_clk), - CLKDEV_CON_ID("video1", &div6_reparent_clks[DIV6_VCLK1]), - CLKDEV_CON_ID("video2", &div6_reparent_clks[DIV6_VCLK2]), - CLKDEV_CON_ID("fsiack", &fsiack_clk), - CLKDEV_CON_ID("fsibck", &fsibck_clk), - - /* DIV4 clocks */ - CLKDEV_CON_ID("i_clk", &div4_clks[DIV4_I]), - CLKDEV_CON_ID("zg_clk", &div4_clks[DIV4_ZG]), - CLKDEV_CON_ID("b_clk", &div4_clks[DIV4_B]), - CLKDEV_CON_ID("m1_clk", &div4_clks[DIV4_M1]), - CLKDEV_CON_ID("hp_clk", &div4_clks[DIV4_HP]), - CLKDEV_CON_ID("hpp_clk", &div4_clks[DIV4_HPP]), - CLKDEV_CON_ID("s_clk", &div4_clks[DIV4_S]), - CLKDEV_CON_ID("zb_clk", &div4_clks[DIV4_ZB]), - CLKDEV_CON_ID("m3_clk", &div4_clks[DIV4_M3]), - CLKDEV_CON_ID("cp_clk", &div4_clks[DIV4_CP]), - - /* DIV6 clocks */ - CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]), - - /* MSTP32 clocks */ - CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), - CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), - CLKDEV_DEV_ID("fff20000.i2c", &mstp_clks[MSTP116]), - CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), - CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), - CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP128]), - - CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), - CLKDEV_DEV_ID("e6c80000.serial", &mstp_clks[MSTP200]), - CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), - CLKDEV_DEV_ID("e6c70000.serial", &mstp_clks[MSTP201]), - CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), - CLKDEV_DEV_ID("e6c60000.serial", &mstp_clks[MSTP202]), - CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), - CLKDEV_DEV_ID("e6c50000.serial", &mstp_clks[MSTP203]), - CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), - CLKDEV_DEV_ID("e6c40000.serial", &mstp_clks[MSTP204]), - CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), - CLKDEV_DEV_ID("e6c30000.serial", &mstp_clks[MSTP206]), - CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), - CLKDEV_DEV_ID("e6cb0000.serial", &mstp_clks[MSTP207]), - CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), - CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), - CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), - CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), - CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]), - CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP222]), - CLKDEV_DEV_ID("renesas_intc_irqpin.0", &mstp_clks[MSTP229]), - CLKDEV_DEV_ID("renesas_intc_irqpin.1", &mstp_clks[MSTP229]), - CLKDEV_DEV_ID("renesas_intc_irqpin.2", &mstp_clks[MSTP229]), - CLKDEV_DEV_ID("renesas_intc_irqpin.3", &mstp_clks[MSTP229]), - CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]), - CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP230]), - - CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), - CLKDEV_DEV_ID("fe1f0000.sound", &mstp_clks[MSTP328]), - CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), - CLKDEV_DEV_ID("e6c20000.i2c", &mstp_clks[MSTP323]), - CLKDEV_DEV_ID("renesas_usbhs", &mstp_clks[MSTP320]), - CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), - CLKDEV_DEV_ID("e6850000.sd", &mstp_clks[MSTP314]), - CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), - CLKDEV_DEV_ID("e6860000.sd", &mstp_clks[MSTP313]), - CLKDEV_DEV_ID("sh_mmcif", &mstp_clks[MSTP312]), - CLKDEV_DEV_ID("e6bd0000.mmc", &mstp_clks[MSTP312]), - CLKDEV_DEV_ID("r8a7740-gether", &mstp_clks[MSTP309]), - CLKDEV_DEV_ID("e9a00000.ethernet", &mstp_clks[MSTP309]), - CLKDEV_DEV_ID("renesas-tpu-pwm", &mstp_clks[MSTP304]), - CLKDEV_DEV_ID("e6600000.pwm", &mstp_clks[MSTP304]), - - CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP415]), - CLKDEV_DEV_ID("e6870000.sd", &mstp_clks[MSTP415]), - - /* ICK */ - CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP111]), - CLKDEV_ICK_ID("fck", "fff90000.timer", &mstp_clks[MSTP111]), - CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP125]), - CLKDEV_ICK_ID("fck", "fff80000.timer", &mstp_clks[MSTP125]), - CLKDEV_ICK_ID("fck", "sh-cmt-48.1", &mstp_clks[MSTP329]), - CLKDEV_ICK_ID("fck", "e6138000.timer", &mstp_clks[MSTP329]), - CLKDEV_ICK_ID("host", "renesas_usbhs", &mstp_clks[MSTP416]), - CLKDEV_ICK_ID("func", "renesas_usbhs", &mstp_clks[MSTP407]), - CLKDEV_ICK_ID("phy", "renesas_usbhs", &mstp_clks[MSTP406]), - CLKDEV_ICK_ID("pci", "renesas_usbhs", &div4_clks[DIV4_USBP]), - CLKDEV_ICK_ID("usb24", "renesas_usbhs", &usb24_clk), - CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), - - CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), - CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]), - CLKDEV_ICK_ID("diva", "sh_fsi2", &fsidivs[FSIDIV_A]), - CLKDEV_ICK_ID("divb", "sh_fsi2", &fsidivs[FSIDIV_B]), - CLKDEV_ICK_ID("xcka", "sh_fsi2", &fsiack_clk), - CLKDEV_ICK_ID("xckb", "sh_fsi2", &fsibck_clk), -}; - -void __init r8a7740_clock_init(u8 md_ck) -{ - int k, ret = 0; - - /* detect system clock parent */ - if (md_ck & MD_CK1) - system_clk.parent = &extal1_div2_clk; - else - system_clk.parent = &extal1_clk; - - /* detect RCLK parent */ - switch (md_ck & (MD_CK2 | MD_CK1)) { - case MD_CK2 | MD_CK1: - r_clk.parent = &extal1_div2048_clk; - break; - case MD_CK2: - r_clk.parent = &extal1_div1024_clk; - break; - case MD_CK1: - default: - r_clk.parent = &extalr_clk; - break; - } - - for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) - ret = clk_register(main_clks[k]); - - if (!ret) - ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); - - if (!ret) - ret = sh_clk_div6_register(div6_clks, DIV6_NR); - - if (!ret) - ret = sh_clk_div6_reparent_register(div6_reparent_clks, - DIV6_REPARENT_NR); - - if (!ret) - ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); - - for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) - ret = clk_register(late_main_clks[k]); - - if (!ret) - ret = sh_clk_fsidiv_register(fsidivs, FSIDIV_REPARENT_NR); - - clkdev_add_table(lookups, ARRAY_SIZE(lookups)); - - if (!ret) - shmobile_clk_init(); - else - panic("failed to setup r8a7740 clocks\n"); -} diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c deleted file mode 100644 index fa8ab2cc9187..000000000000 --- a/arch/arm/mach-shmobile/clock-r8a7779.c +++ /dev/null @@ -1,271 +0,0 @@ -/* - * r8a7779 clock framework support - * - * Copyright (C) 2011 Renesas Solutions Corp. - * Copyright (C) 2011 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include <linux/bitops.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/io.h> -#include <linux/sh_clk.h> -#include <linux/clkdev.h> -#include <linux/sh_timer.h> - -#include "clock.h" -#include "common.h" -#include "r8a7779.h" - -/* - * MD1 = 1 MD1 = 0 - * (PLLA = 1500) (PLLA = 1600) - * (MHz) (MHz) - *------------------------------------------------+-------------------- - * clkz 1000 (2/3) 800 (1/2) - * clkzs 250 (1/6) 200 (1/8) - * clki 750 (1/2) 800 (1/2) - * clks 250 (1/6) 200 (1/8) - * clks1 125 (1/12) 100 (1/16) - * clks3 187.5 (1/8) 200 (1/8) - * clks4 93.7 (1/16) 100 (1/16) - * clkp 62.5 (1/24) 50 (1/32) - * clkg 62.5 (1/24) 66.6 (1/24) - * clkb, CLKOUT - * (MD2 = 0) 62.5 (1/24) 66.6 (1/24) - * (MD2 = 1) 41.6 (1/36) 50 (1/32) -*/ - -#define MD(nr) BIT(nr) - -#define MSTPCR0 IOMEM(0xffc80030) -#define MSTPCR1 IOMEM(0xffc80034) -#define MSTPCR3 IOMEM(0xffc8003c) -#define MSTPSR1 IOMEM(0xffc80044) - -/* ioremap() through clock mapping mandatory to avoid - * collision with ARM coherent DMA virtual memory range. - */ - -static struct clk_mapping cpg_mapping = { - .phys = 0xffc80000, - .len = 0x80, -}; - -/* - * Default rate for the root input clock, reset this with clk_set_rate() - * from the platform code. - */ -static struct clk plla_clk = { - /* .rate will be updated on r8a7779_clock_init() */ - .mapping = &cpg_mapping, -}; - -/* - * clock ratio of these clock will be updated - * on r8a7779_clock_init() - */ -SH_FIXED_RATIO_CLK_SET(clkz_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clkzs_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clki_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clks_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clks1_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clks3_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clks4_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clkb_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clkout_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clkp_clk, plla_clk, 1, 1); -SH_FIXED_RATIO_CLK_SET(clkg_clk, plla_clk, 1, 1); - -static struct clk *main_clks[] = { - &plla_clk, - &clkz_clk, - &clkzs_clk, - &clki_clk, - &clks_clk, - &clks1_clk, - &clks3_clk, - &clks4_clk, - &clkb_clk, - &clkout_clk, - &clkp_clk, - &clkg_clk, -}; - -enum { MSTP323, MSTP322, MSTP321, MSTP320, - MSTP120, - MSTP116, MSTP115, MSTP114, - MSTP110, MSTP109, MSTP108, - MSTP103, MSTP101, MSTP100, - MSTP030, - MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021, - MSTP016, MSTP015, MSTP014, - MSTP007, - MSTP_NR }; - -static struct clk mstp_clks[MSTP_NR] = { - [MSTP323] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 23, 0), /* SDHI0 */ - [MSTP322] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 22, 0), /* SDHI1 */ - [MSTP321] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 21, 0), /* SDHI2 */ - [MSTP320] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 20, 0), /* SDHI3 */ - [MSTP120] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1, 20, MSTPSR1, 0), /* VIN3 */ - [MSTP116] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1, 16, MSTPSR1, 0), /* PCIe */ - [MSTP115] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1, 15, MSTPSR1, 0), /* SATA */ - [MSTP114] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1, 14, MSTPSR1, 0), /* Ether */ - [MSTP110] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1, 10, MSTPSR1, 0), /* VIN0 */ - [MSTP109] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1, 9, MSTPSR1, 0), /* VIN1 */ - [MSTP108] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1, 8, MSTPSR1, 0), /* VIN2 */ - [MSTP103] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1, 3, MSTPSR1, 0), /* DU */ - [MSTP101] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1, 1, MSTPSR1, 0), /* USB2 */ - [MSTP100] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1, 0, MSTPSR1, 0), /* USB0/1 */ - [MSTP030] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 30, 0), /* I2C0 */ - [MSTP029] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 29, 0), /* I2C1 */ - [MSTP028] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 28, 0), /* I2C2 */ - [MSTP027] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 27, 0), /* I2C3 */ - [MSTP026] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 26, 0), /* SCIF0 */ - [MSTP025] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 25, 0), /* SCIF1 */ - [MSTP024] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 24, 0), /* SCIF2 */ - [MSTP023] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 23, 0), /* SCIF3 */ - [MSTP022] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 22, 0), /* SCIF4 */ - [MSTP021] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 21, 0), /* SCIF5 */ - [MSTP016] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 16, 0), /* TMU0 */ - [MSTP015] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 15, 0), /* TMU1 */ - [MSTP014] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 14, 0), /* TMU2 */ - [MSTP007] = SH_CLK_MSTP32(&clks_clk, MSTPCR0, 7, 0), /* HSPI */ -}; - -static struct clk_lookup lookups[] = { - /* main clocks */ - CLKDEV_CON_ID("plla_clk", &plla_clk), - CLKDEV_CON_ID("clkz_clk", &clkz_clk), - CLKDEV_CON_ID("clkzs_clk", &clkzs_clk), - - /* DIV4 clocks */ - CLKDEV_CON_ID("shyway_clk", &clks_clk), - CLKDEV_CON_ID("bus_clk", &clkout_clk), - CLKDEV_CON_ID("shyway4_clk", &clks4_clk), - CLKDEV_CON_ID("shyway3_clk", &clks3_clk), - CLKDEV_CON_ID("shyway1_clk", &clks1_clk), - CLKDEV_CON_ID("peripheral_clk", &clkp_clk), - - /* MSTP32 clocks */ - CLKDEV_DEV_ID("r8a7779-vin.3", &mstp_clks[MSTP120]), /* VIN3 */ - CLKDEV_DEV_ID("rcar-pcie", &mstp_clks[MSTP116]), /* PCIe */ - CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */ - CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */ - CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ - CLKDEV_DEV_ID("r8a7779-vin.0", &mstp_clks[MSTP110]), /* VIN0 */ - CLKDEV_DEV_ID("r8a7779-vin.1", &mstp_clks[MSTP109]), /* VIN1 */ - CLKDEV_DEV_ID("r8a7779-vin.2", &mstp_clks[MSTP108]), /* VIN2 */ - CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */ - CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */ - CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */ - CLKDEV_DEV_ID("ohci-platform.0", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */ - CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]), /* TMU0 */ - CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */ - CLKDEV_DEV_ID("ffc70000.i2c", &mstp_clks[MSTP030]), /* I2C0 */ - CLKDEV_DEV_ID("i2c-rcar.1", &mstp_clks[MSTP029]), /* I2C1 */ - CLKDEV_DEV_ID("ffc71000.i2c", &mstp_clks[MSTP029]), /* I2C1 */ - CLKDEV_DEV_ID("i2c-rcar.2", &mstp_clks[MSTP028]), /* I2C2 */ - CLKDEV_DEV_ID("ffc72000.i2c", &mstp_clks[MSTP028]), /* I2C2 */ - CLKDEV_DEV_ID("i2c-rcar.3", &mstp_clks[MSTP027]), /* I2C3 */ - CLKDEV_DEV_ID("ffc73000.i2c", &mstp_clks[MSTP027]), /* I2C3 */ - CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */ - CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */ - CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */ - CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP023]), /* SCIF3 */ - CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP022]), /* SCIF4 */ - CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP021]), /* SCIF6 */ - CLKDEV_DEV_ID("sh-hspi.0", &mstp_clks[MSTP007]), /* HSPI0 */ - CLKDEV_DEV_ID("fffc7000.spi", &mstp_clks[MSTP007]), /* HSPI0 */ - CLKDEV_DEV_ID("sh-hspi.1", &mstp_clks[MSTP007]), /* HSPI1 */ - CLKDEV_DEV_ID("fffc8000.spi", &mstp_clks[MSTP007]), /* HSPI1 */ - CLKDEV_DEV_ID("sh-hspi.2", &mstp_clks[MSTP007]), /* HSPI2 */ - CLKDEV_DEV_ID("fffc6000.spi", &mstp_clks[MSTP007]), /* HSPI2 */ - CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP323]), /* SDHI0 */ - CLKDEV_DEV_ID("ffe4c000.sd", &mstp_clks[MSTP323]), /* SDHI0 */ - CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */ - CLKDEV_DEV_ID("ffe4d000.sd", &mstp_clks[MSTP322]), /* SDHI1 */ - CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */ - CLKDEV_DEV_ID("ffe4e000.sd", &mstp_clks[MSTP321]), /* SDHI2 */ - CLKDEV_DEV_ID("sh_mobile_sdhi.3", &mstp_clks[MSTP320]), /* SDHI3 */ - CLKDEV_DEV_ID("ffe4f000.sd", &mstp_clks[MSTP320]), /* SDHI3 */ - CLKDEV_DEV_ID("rcar-du-r8a7779", &mstp_clks[MSTP103]), /* DU */ -}; - -void __init r8a7779_clock_init(void) -{ - u32 mode = r8a7779_read_mode_pins(); - int k, ret = 0; - - if (mode & MD(1)) { - plla_clk.rate = 1500000000; - - SH_CLK_SET_RATIO(&clkz_clk_ratio, 2, 3); - SH_CLK_SET_RATIO(&clkzs_clk_ratio, 1, 6); - SH_CLK_SET_RATIO(&clki_clk_ratio, 1, 2); - SH_CLK_SET_RATIO(&clks_clk_ratio, 1, 6); - SH_CLK_SET_RATIO(&clks1_clk_ratio, 1, 12); - SH_CLK_SET_RATIO(&clks3_clk_ratio, 1, 8); - SH_CLK_SET_RATIO(&clks4_clk_ratio, 1, 16); - SH_CLK_SET_RATIO(&clkp_clk_ratio, 1, 24); - SH_CLK_SET_RATIO(&clkg_clk_ratio, 1, 24); - if (mode & MD(2)) { - SH_CLK_SET_RATIO(&clkb_clk_ratio, 1, 36); - SH_CLK_SET_RATIO(&clkout_clk_ratio, 1, 36); - } else { - SH_CLK_SET_RATIO(&clkb_clk_ratio, 1, 24); - SH_CLK_SET_RATIO(&clkout_clk_ratio, 1, 24); - } - } else { - plla_clk.rate = 1600000000; - - SH_CLK_SET_RATIO(&clkz_clk_ratio, 1, 2); - SH_CLK_SET_RATIO(&clkzs_clk_ratio, 1, 8); - SH_CLK_SET_RATIO(&clki_clk_ratio, 1, 2); - SH_CLK_SET_RATIO(&clks_clk_ratio, 1, 8); - SH_CLK_SET_RATIO(&clks1_clk_ratio, 1, 16); - SH_CLK_SET_RATIO(&clks3_clk_ratio, 1, 8); - SH_CLK_SET_RATIO(&clks4_clk_ratio, 1, 16); - SH_CLK_SET_RATIO(&clkp_clk_ratio, 1, 32); - SH_CLK_SET_RATIO(&clkg_clk_ratio, 1, 24); - if (mode & MD(2)) { - SH_CLK_SET_RATIO(&clkb_clk_ratio, 1, 32); - SH_CLK_SET_RATIO(&clkout_clk_ratio, 1, 32); - } else { - SH_CLK_SET_RATIO(&clkb_clk_ratio, 1, 24); - SH_CLK_SET_RATIO(&clkout_clk_ratio, 1, 24); - } - } - - for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) - ret = clk_register(main_clks[k]); - - if (!ret) - ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); - - clkdev_add_table(lookups, ARRAY_SIZE(lookups)); - - if (!ret) - shmobile_clk_init(); - else - panic("failed to setup r8a7779 clocks\n"); -} - -/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ -void __init __weak r8a7779_register_twd(void) { } - -void __init r8a7779_earlytimer_init(void) -{ - r8a7779_clock_init(); - r8a7779_register_twd(); - shmobile_earlytimer_init(); -} diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c deleted file mode 100644 index 3855fb024fdb..000000000000 --- a/arch/arm/mach-shmobile/clock-sh73a0.c +++ /dev/null @@ -1,752 +0,0 @@ -/* - * sh73a0 clock framework support - * - * Copyright (C) 2010 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/io.h> -#include <linux/sh_clk.h> -#include <linux/clkdev.h> -#include <asm/processor.h> -#include "clock.h" -#include "common.h" - -#define FRQCRA IOMEM(0xe6150000) -#define FRQCRB IOMEM(0xe6150004) -#define FRQCRD IOMEM(0xe61500e4) -#define VCLKCR1 IOMEM(0xe6150008) -#define VCLKCR2 IOMEM(0xe615000C) -#define VCLKCR3 IOMEM(0xe615001C) -#define ZBCKCR IOMEM(0xe6150010) -#define FLCKCR IOMEM(0xe6150014) -#define SD0CKCR IOMEM(0xe6150074) -#define SD1CKCR IOMEM(0xe6150078) -#define SD2CKCR IOMEM(0xe615007C) -#define FSIACKCR IOMEM(0xe6150018) -#define FSIBCKCR IOMEM(0xe6150090) -#define SUBCKCR IOMEM(0xe6150080) -#define SPUACKCR IOMEM(0xe6150084) -#define SPUVCKCR IOMEM(0xe6150094) -#define MSUCKCR IOMEM(0xe6150088) -#define HSICKCR IOMEM(0xe615008C) -#define MFCK1CR IOMEM(0xe6150098) -#define MFCK2CR IOMEM(0xe615009C) -#define DSITCKCR IOMEM(0xe6150060) -#define DSI0PCKCR IOMEM(0xe6150064) -#define DSI1PCKCR IOMEM(0xe6150068) -#define DSI0PHYCR 0xe615006C -#define DSI1PHYCR 0xe6150070 -#define PLLECR IOMEM(0xe61500d0) -#define PLL0CR IOMEM(0xe61500d8) -#define PLL1CR IOMEM(0xe6150028) -#define PLL2CR IOMEM(0xe615002c) -#define PLL3CR IOMEM(0xe61500dc) -#define SMSTPCR0 IOMEM(0xe6150130) -#define SMSTPCR1 IOMEM(0xe6150134) -#define SMSTPCR2 IOMEM(0xe6150138) -#define SMSTPCR3 IOMEM(0xe615013c) -#define SMSTPCR4 IOMEM(0xe6150140) -#define SMSTPCR5 IOMEM(0xe6150144) -#define CKSCR IOMEM(0xe61500c0) - -/* Fixed 32 KHz root clock from EXTALR pin */ -static struct clk r_clk = { - .rate = 32768, -}; - -/* - * 26MHz default rate for the EXTAL1 root input clock. - * If needed, reset this with clk_set_rate() from the platform code. - */ -struct clk sh73a0_extal1_clk = { - .rate = 26000000, -}; - -/* - * 48MHz default rate for the EXTAL2 root input clock. - * If needed, reset this with clk_set_rate() from the platform code. - */ -struct clk sh73a0_extal2_clk = { - .rate = 48000000, -}; - -static struct sh_clk_ops main_clk_ops = { - .recalc = followparent_recalc, -}; - -/* Main clock */ -static struct clk main_clk = { - /* .parent wll be set on sh73a0_clock_init() */ - .ops = &main_clk_ops, -}; - -/* PLL0, PLL1, PLL2, PLL3 */ -static unsigned long pll_recalc(struct clk *clk) -{ - unsigned long mult = 1; - - if (__raw_readl(PLLECR) & (1 << clk->enable_bit)) { - mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1); - /* handle CFG bit for PLL1 and PLL2 */ - switch (clk->enable_bit) { - case 1: - case 2: - if (__raw_readl(clk->enable_reg) & (1 << 20)) - mult *= 2; - } - } - - return clk->parent->rate * mult; -} - -static struct sh_clk_ops pll_clk_ops = { - .recalc = pll_recalc, -}; - -static struct clk pll0_clk = { - .ops = &pll_clk_ops, - .flags = CLK_ENABLE_ON_INIT, - .parent = &main_clk, - .enable_reg = (void __iomem *)PLL0CR, - .enable_bit = 0, -}; - -static struct clk pll1_clk = { - .ops = &pll_clk_ops, - .flags = CLK_ENABLE_ON_INIT, - .parent = &main_clk, - .enable_reg = (void __iomem *)PLL1CR, - .enable_bit = 1, -}; - -static struct clk pll2_clk = { - .ops = &pll_clk_ops, - .flags = CLK_ENABLE_ON_INIT, - .parent = &main_clk, - .enable_reg = (void __iomem *)PLL2CR, - .enable_bit = 2, -}; - -static struct clk pll3_clk = { - .ops = &pll_clk_ops, - .flags = CLK_ENABLE_ON_INIT, - .parent = &main_clk, - .enable_reg = (void __iomem *)PLL3CR, - .enable_bit = 3, -}; - -/* A fixed divide block */ -SH_CLK_RATIO(div2, 1, 2); -SH_CLK_RATIO(div7, 1, 7); -SH_CLK_RATIO(div13, 1, 13); - -SH_FIXED_RATIO_CLK(extal1_div2_clk, sh73a0_extal1_clk, div2); -SH_FIXED_RATIO_CLK(extal2_div2_clk, sh73a0_extal2_clk, div2); -SH_FIXED_RATIO_CLK(main_div2_clk, main_clk, div2); -SH_FIXED_RATIO_CLK(pll1_div2_clk, pll1_clk, div2); -SH_FIXED_RATIO_CLK(pll1_div7_clk, pll1_clk, div7); -SH_FIXED_RATIO_CLK(pll1_div13_clk, pll1_clk, div13); - -/* External input clock */ -struct clk sh73a0_extcki_clk = { -}; - -struct clk sh73a0_extalr_clk = { -}; - -static struct clk *main_clks[] = { - &r_clk, - &sh73a0_extal1_clk, - &sh73a0_extal2_clk, - &extal1_div2_clk, - &extal2_div2_clk, - &main_clk, - &main_div2_clk, - &pll0_clk, - &pll1_clk, - &pll2_clk, - &pll3_clk, - &pll1_div2_clk, - &pll1_div7_clk, - &pll1_div13_clk, - &sh73a0_extcki_clk, - &sh73a0_extalr_clk, -}; - -static int frqcr_kick(void) -{ - int i; - - /* set KICK bit in FRQCRB to update hardware setting, check success */ - __raw_writel(__raw_readl(FRQCRB) | (1 << 31), FRQCRB); - for (i = 1000; i; i--) - if (__raw_readl(FRQCRB) & (1 << 31)) - cpu_relax(); - else - return i; - - return -ETIMEDOUT; -} - -static void div4_kick(struct clk *clk) -{ - frqcr_kick(); -} - -static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18, - 24, 0, 36, 48, 7 }; - -static struct clk_div_mult_table div4_div_mult_table = { - .divisors = divisors, - .nr_divisors = ARRAY_SIZE(divisors), -}; - -static struct clk_div4_table div4_table = { - .div_mult_table = &div4_div_mult_table, - .kick = div4_kick, -}; - -enum { DIV4_I, DIV4_ZG, DIV4_M3, DIV4_B, DIV4_M1, DIV4_M2, - DIV4_Z, DIV4_ZX, DIV4_HP, DIV4_NR }; - -#define DIV4(_reg, _bit, _mask, _flags) \ - SH_CLK_DIV4(&pll1_clk, _reg, _bit, _mask, _flags) - -static struct clk div4_clks[DIV4_NR] = { - [DIV4_I] = DIV4(FRQCRA, 20, 0xdff, CLK_ENABLE_ON_INIT), - /* - * ZG clock is dividing PLL0 frequency to supply SGX. Make sure not to - * exceed maximum frequencies of 201.5MHz for VDD_DVFS=1.175 and - * 239.2MHz for VDD_DVFS=1.315V. - */ - [DIV4_ZG] = SH_CLK_DIV4(&pll0_clk, FRQCRA, 16, 0xd7f, CLK_ENABLE_ON_INIT), - [DIV4_M3] = DIV4(FRQCRA, 12, 0x1dff, CLK_ENABLE_ON_INIT), - [DIV4_B] = DIV4(FRQCRA, 8, 0xdff, CLK_ENABLE_ON_INIT), - [DIV4_M1] = DIV4(FRQCRA, 4, 0x1dff, 0), - [DIV4_M2] = DIV4(FRQCRA, 0, 0x1dff, 0), - [DIV4_Z] = SH_CLK_DIV4(&pll0_clk, FRQCRB, 24, 0x97f, 0), - [DIV4_ZX] = DIV4(FRQCRB, 12, 0xdff, 0), - [DIV4_HP] = DIV4(FRQCRB, 4, 0xdff, 0), -}; - -static unsigned long twd_recalc(struct clk *clk) -{ - return clk_get_rate(clk->parent) / 4; -} - -static struct sh_clk_ops twd_clk_ops = { - .recalc = twd_recalc, -}; - -static struct clk twd_clk = { - .parent = &div4_clks[DIV4_Z], - .ops = &twd_clk_ops, -}; - -static struct sh_clk_ops zclk_ops, kicker_ops; -static const struct sh_clk_ops *div4_clk_ops; - -static int zclk_set_rate(struct clk *clk, unsigned long rate) -{ - int ret; - - if (!clk->parent || !__clk_get(clk->parent)) - return -ENODEV; - - if (readl(FRQCRB) & (1 << 31)) - return -EBUSY; - - if (rate == clk_get_rate(clk->parent)) { - /* 1:1 - switch off divider */ - __raw_writel(__raw_readl(FRQCRB) & ~(1 << 28), FRQCRB); - /* nullify the divider to prepare for the next time */ - ret = div4_clk_ops->set_rate(clk, rate / 2); - if (!ret) - ret = frqcr_kick(); - if (ret > 0) - ret = 0; - } else { - /* Enable the divider */ - __raw_writel(__raw_readl(FRQCRB) | (1 << 28), FRQCRB); - - ret = frqcr_kick(); - if (ret >= 0) - /* - * set the divider - call the DIV4 method, it will kick - * FRQCRB too - */ - ret = div4_clk_ops->set_rate(clk, rate); - if (ret < 0) - goto esetrate; - } - -esetrate: - __clk_put(clk->parent); - return ret; -} - -static long zclk_round_rate(struct clk *clk, unsigned long rate) -{ - unsigned long div_freq = div4_clk_ops->round_rate(clk, rate), - parent_freq = clk_get_rate(clk->parent); - - if (rate > div_freq && abs(parent_freq - rate) < rate - div_freq) - return parent_freq; - - return div_freq; -} - -static unsigned long zclk_recalc(struct clk *clk) -{ - /* - * Must recalculate frequencies in case PLL0 has been changed, even if - * the divisor is unused ATM! - */ - unsigned long div_freq = div4_clk_ops->recalc(clk); - - if (__raw_readl(FRQCRB) & (1 << 28)) - return div_freq; - - return clk_get_rate(clk->parent); -} - -static int kicker_set_rate(struct clk *clk, unsigned long rate) -{ - if (__raw_readl(FRQCRB) & (1 << 31)) - return -EBUSY; - - return div4_clk_ops->set_rate(clk, rate); -} - -static void div4_clk_extend(void) -{ - int i; - - div4_clk_ops = div4_clks[0].ops; - - /* Add a kicker-busy check before changing the rate */ - kicker_ops = *div4_clk_ops; - /* We extend the DIV4 clock with a 1:1 pass-through case */ - zclk_ops = *div4_clk_ops; - - kicker_ops.set_rate = kicker_set_rate; - zclk_ops.set_rate = zclk_set_rate; - zclk_ops.round_rate = zclk_round_rate; - zclk_ops.recalc = zclk_recalc; - - for (i = 0; i < DIV4_NR; i++) - div4_clks[i].ops = i == DIV4_Z ? &zclk_ops : &kicker_ops; -} - -enum { DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_ZB1, - DIV6_FLCTL, DIV6_SDHI0, DIV6_SDHI1, DIV6_SDHI2, - DIV6_FSIA, DIV6_FSIB, DIV6_SUB, - DIV6_SPUA, DIV6_SPUV, DIV6_MSU, - DIV6_HSI, DIV6_MFG1, DIV6_MFG2, - DIV6_DSIT, DIV6_DSI0P, DIV6_DSI1P, - DIV6_NR }; - -static struct clk *vck_parent[8] = { - [0] = &pll1_div2_clk, - [1] = &pll2_clk, - [2] = &sh73a0_extcki_clk, - [3] = &sh73a0_extal2_clk, - [4] = &main_div2_clk, - [5] = &sh73a0_extalr_clk, - [6] = &main_clk, -}; - -static struct clk *pll_parent[4] = { - [0] = &pll1_div2_clk, - [1] = &pll2_clk, - [2] = &pll1_div13_clk, -}; - -static struct clk *hsi_parent[4] = { - [0] = &pll1_div2_clk, - [1] = &pll2_clk, - [2] = &pll1_div7_clk, -}; - -static struct clk *pll_extal2_parent[] = { - [0] = &pll1_div2_clk, - [1] = &pll2_clk, - [2] = &sh73a0_extal2_clk, - [3] = &sh73a0_extal2_clk, -}; - -static struct clk *dsi_parent[8] = { - [0] = &pll1_div2_clk, - [1] = &pll2_clk, - [2] = &main_clk, - [3] = &sh73a0_extal2_clk, - [4] = &sh73a0_extcki_clk, -}; - -static struct clk div6_clks[DIV6_NR] = { - [DIV6_VCK1] = SH_CLK_DIV6_EXT(VCLKCR1, 0, - vck_parent, ARRAY_SIZE(vck_parent), 12, 3), - [DIV6_VCK2] = SH_CLK_DIV6_EXT(VCLKCR2, 0, - vck_parent, ARRAY_SIZE(vck_parent), 12, 3), - [DIV6_VCK3] = SH_CLK_DIV6_EXT(VCLKCR3, 0, - vck_parent, ARRAY_SIZE(vck_parent), 12, 3), - [DIV6_ZB1] = SH_CLK_DIV6_EXT(ZBCKCR, CLK_ENABLE_ON_INIT, - pll_parent, ARRAY_SIZE(pll_parent), 7, 1), - [DIV6_FLCTL] = SH_CLK_DIV6_EXT(FLCKCR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 7, 1), - [DIV6_SDHI0] = SH_CLK_DIV6_EXT(SD0CKCR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 6, 2), - [DIV6_SDHI1] = SH_CLK_DIV6_EXT(SD1CKCR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 6, 2), - [DIV6_SDHI2] = SH_CLK_DIV6_EXT(SD2CKCR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 6, 2), - [DIV6_FSIA] = SH_CLK_DIV6_EXT(FSIACKCR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 6, 1), - [DIV6_FSIB] = SH_CLK_DIV6_EXT(FSIBCKCR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 6, 1), - [DIV6_SUB] = SH_CLK_DIV6_EXT(SUBCKCR, 0, - pll_extal2_parent, ARRAY_SIZE(pll_extal2_parent), 6, 2), - [DIV6_SPUA] = SH_CLK_DIV6_EXT(SPUACKCR, 0, - pll_extal2_parent, ARRAY_SIZE(pll_extal2_parent), 6, 2), - [DIV6_SPUV] = SH_CLK_DIV6_EXT(SPUVCKCR, 0, - pll_extal2_parent, ARRAY_SIZE(pll_extal2_parent), 6, 2), - [DIV6_MSU] = SH_CLK_DIV6_EXT(MSUCKCR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 7, 1), - [DIV6_HSI] = SH_CLK_DIV6_EXT(HSICKCR, 0, - hsi_parent, ARRAY_SIZE(hsi_parent), 6, 2), - [DIV6_MFG1] = SH_CLK_DIV6_EXT(MFCK1CR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 7, 1), - [DIV6_MFG2] = SH_CLK_DIV6_EXT(MFCK2CR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 7, 1), - [DIV6_DSIT] = SH_CLK_DIV6_EXT(DSITCKCR, 0, - pll_parent, ARRAY_SIZE(pll_parent), 7, 1), - [DIV6_DSI0P] = SH_CLK_DIV6_EXT(DSI0PCKCR, 0, - dsi_parent, ARRAY_SIZE(dsi_parent), 12, 3), - [DIV6_DSI1P] = SH_CLK_DIV6_EXT(DSI1PCKCR, 0, - dsi_parent, ARRAY_SIZE(dsi_parent), 12, 3), -}; - -/* DSI DIV */ -static unsigned long dsiphy_recalc(struct clk *clk) -{ - u32 value; - - value = __raw_readl(clk->mapping->base); - - /* FIXME */ - if (!(value & 0x000B8000)) - return clk->parent->rate; - - value &= 0x3f; - value += 1; - - if ((value < 12) || - (value > 33)) { - pr_err("DSIPHY has wrong value (%d)", value); - return 0; - } - - return clk->parent->rate / value; -} - -static long dsiphy_round_rate(struct clk *clk, unsigned long rate) -{ - return clk_rate_mult_range_round(clk, 12, 33, rate); -} - -static void dsiphy_disable(struct clk *clk) -{ - u32 value; - - value = __raw_readl(clk->mapping->base); - value &= ~0x000B8000; - - __raw_writel(value , clk->mapping->base); -} - -static int dsiphy_enable(struct clk *clk) -{ - u32 value; - int multi; - - value = __raw_readl(clk->mapping->base); - multi = (value & 0x3f) + 1; - - if ((multi < 12) || (multi > 33)) - return -EIO; - - __raw_writel(value | 0x000B8000, clk->mapping->base); - - return 0; -} - -static int dsiphy_set_rate(struct clk *clk, unsigned long rate) -{ - u32 value; - int idx; - - idx = rate / clk->parent->rate; - if ((idx < 12) || (idx > 33)) - return -EINVAL; - - idx += -1; - - value = __raw_readl(clk->mapping->base); - value = (value & ~0x3f) + idx; - - __raw_writel(value, clk->mapping->base); - - return 0; -} - -static struct sh_clk_ops dsiphy_clk_ops = { - .recalc = dsiphy_recalc, - .round_rate = dsiphy_round_rate, - .set_rate = dsiphy_set_rate, - .enable = dsiphy_enable, - .disable = dsiphy_disable, -}; - -static struct clk_mapping dsi0phy_clk_mapping = { - .phys = DSI0PHYCR, - .len = 4, -}; - -static struct clk_mapping dsi1phy_clk_mapping = { - .phys = DSI1PHYCR, - .len = 4, -}; - -static struct clk dsi0phy_clk = { - .ops = &dsiphy_clk_ops, - .parent = &div6_clks[DIV6_DSI0P], /* late install */ - .mapping = &dsi0phy_clk_mapping, -}; - -static struct clk dsi1phy_clk = { - .ops = &dsiphy_clk_ops, - .parent = &div6_clks[DIV6_DSI1P], /* late install */ - .mapping = &dsi1phy_clk_mapping, -}; - -static struct clk *late_main_clks[] = { - &dsi0phy_clk, - &dsi1phy_clk, - &twd_clk, -}; - -enum { MSTP001, - MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP112, MSTP100, - MSTP219, MSTP218, MSTP217, - MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, - MSTP331, MSTP329, MSTP328, MSTP325, MSTP323, MSTP322, - MSTP314, MSTP313, MSTP312, MSTP311, - MSTP304, MSTP303, MSTP302, MSTP301, MSTP300, - MSTP411, MSTP410, MSTP403, - MSTP508, - MSTP_NR }; - -#define MSTP(_parent, _reg, _bit, _flags) \ - SH_CLK_MSTP32(_parent, _reg, _bit, _flags) - -static struct clk mstp_clks[MSTP_NR] = { - [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ - [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */ - [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */ - [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */ - [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */ - [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ - [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ - [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ - [MSTP112] = MSTP(&div4_clks[DIV4_ZG], SMSTPCR1, 12, 0), /* SGX */ - [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ - [MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */ - [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */ - [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* MP-DMAC */ - [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ - [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ - [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ - [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */ - [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */ - [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ - [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ - [MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */ - [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ - [MSTP328] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 28, 0), /*FSI*/ - [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */ - [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */ - [MSTP322] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 22, 0), /* USB */ - [MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */ - [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */ - [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */ - [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */ - [MSTP304] = MSTP(&main_div2_clk, SMSTPCR3, 4, 0), /* TPU0 */ - [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */ - [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */ - [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */ - [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */ - [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */ - [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */ - [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ - [MSTP508] = MSTP(&div4_clks[DIV4_HP], SMSTPCR5, 8, 0), /* INTCA0 */ -}; - -/* The lookups structure below includes duplicate entries for some clocks - * with alternate names. - * - The traditional name used when a device is initialised with platform data - * - The name used when a device is initialised using device tree - * The longer-term aim is to remove these duplicates, and indeed the - * lookups table entirely, by describing clocks using device tree. - */ -static struct clk_lookup lookups[] = { - /* main clocks */ - CLKDEV_CON_ID("r_clk", &r_clk), - CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */ - - /* DIV4 clocks */ - CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]), - - /* DIV6 clocks */ - CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), - CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), - CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), - CLKDEV_CON_ID("sdhi0_clk", &div6_clks[DIV6_SDHI0]), - CLKDEV_CON_ID("sdhi1_clk", &div6_clks[DIV6_SDHI1]), - CLKDEV_CON_ID("sdhi2_clk", &div6_clks[DIV6_SDHI2]), - - /* MSTP32 clocks */ - CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ - CLKDEV_DEV_ID("e6824000.i2c", &mstp_clks[MSTP001]), /* I2C2 */ - CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */ - CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */ - CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */ - CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */ - CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ - CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ - CLKDEV_DEV_ID("e6820000.i2c", &mstp_clks[MSTP116]), /* I2C0 */ - CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ - CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ - CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP219]), /* SCIFA7 */ - CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */ - CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* MP-DMAC */ - CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ - CLKDEV_DEV_ID("e6cb0000.serial", &mstp_clks[MSTP207]), /* SCIFA5 */ - CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ - CLKDEV_DEV_ID("e6c3000.serial", &mstp_clks[MSTP206]), /* SCIFB */ - CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ - CLKDEV_DEV_ID("e6c40000.serial", &mstp_clks[MSTP204]), /* SCIFA0 */ - CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */ - CLKDEV_DEV_ID("e6c50000.serial", &mstp_clks[MSTP203]), /* SCIFA1 */ - CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */ - CLKDEV_DEV_ID("e6c60000.serial", &mstp_clks[MSTP202]), /* SCIFA2 */ - CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */ - CLKDEV_DEV_ID("e6c70000.serial", &mstp_clks[MSTP201]), /* SCIFA3 */ - CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */ - CLKDEV_DEV_ID("e6c80000.serial", &mstp_clks[MSTP200]), /* SCIFA4 */ - CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */ - CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP331]), /* SCIFA6 */ - CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI */ - CLKDEV_DEV_ID("ec230000.sound", &mstp_clks[MSTP328]), /* FSI */ - CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */ - CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */ - CLKDEV_DEV_ID("e6822000.i2c", &mstp_clks[MSTP323]), /* I2C1 */ - CLKDEV_DEV_ID("renesas_usbhs", &mstp_clks[MSTP322]), /* USB */ - CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ - CLKDEV_DEV_ID("ee100000.sd", &mstp_clks[MSTP314]), /* SDHI0 */ - CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ - CLKDEV_DEV_ID("ee120000.sd", &mstp_clks[MSTP313]), /* SDHI1 */ - CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */ - CLKDEV_DEV_ID("e6bd0000.mmc", &mstp_clks[MSTP312]), /* MMCIF0 */ - CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */ - CLKDEV_DEV_ID("ee140000.sd", &mstp_clks[MSTP311]), /* SDHI2 */ - CLKDEV_DEV_ID("renesas-tpu-pwm.0", &mstp_clks[MSTP304]), /* TPU0 */ - CLKDEV_DEV_ID("renesas-tpu-pwm.1", &mstp_clks[MSTP303]), /* TPU1 */ - CLKDEV_DEV_ID("renesas-tpu-pwm.2", &mstp_clks[MSTP302]), /* TPU2 */ - CLKDEV_DEV_ID("renesas-tpu-pwm.3", &mstp_clks[MSTP301]), /* TPU3 */ - CLKDEV_DEV_ID("renesas-tpu-pwm.4", &mstp_clks[MSTP300]), /* TPU4 */ - CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */ - CLKDEV_DEV_ID("e6826000.i2c", &mstp_clks[MSTP411]), /* I2C3 */ - CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */ - CLKDEV_DEV_ID("e6828000.i2c", &mstp_clks[MSTP410]), /* I2C4 */ - CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ - CLKDEV_DEV_ID("renesas_intc_irqpin.0", &mstp_clks[MSTP508]), /* INTCA0 */ - CLKDEV_DEV_ID("e6900000.irqpin", &mstp_clks[MSTP508]), /* INTCA0 */ - CLKDEV_DEV_ID("renesas_intc_irqpin.1", &mstp_clks[MSTP508]), /* INTCA0 */ - CLKDEV_DEV_ID("e6900004.irqpin", &mstp_clks[MSTP508]), /* INTCA0 */ - CLKDEV_DEV_ID("renesas_intc_irqpin.2", &mstp_clks[MSTP508]), /* INTCA0 */ - CLKDEV_DEV_ID("e6900008.irqpin", &mstp_clks[MSTP508]), /* INTCA0 */ - CLKDEV_DEV_ID("renesas_intc_irqpin.3", &mstp_clks[MSTP508]), /* INTCA0 */ - CLKDEV_DEV_ID("e690000c.irqpin", &mstp_clks[MSTP508]), /* INTCA0 */ - - /* ICK */ - CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), - CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), - CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), - CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]), - CLKDEV_ICK_ID("dsiphy_clk", "sh-mipi-dsi.0", &dsi0phy_clk), - CLKDEV_ICK_ID("dsiphy_clk", "sh-mipi-dsi.1", &dsi1phy_clk), - CLKDEV_ICK_ID("fck", "sh-cmt-48.1", &mstp_clks[MSTP329]), /* CMT1 */ - CLKDEV_ICK_ID("fck", "e6138000.timer", &mstp_clks[MSTP329]), /* CMT1 */ - CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP125]), /* TMU0 */ -}; - -void __init sh73a0_clock_init(void) -{ - int k, ret = 0; - - /* Set SDHI clocks to a known state */ - __raw_writel(0x108, SD0CKCR); - __raw_writel(0x108, SD1CKCR); - __raw_writel(0x108, SD2CKCR); - - /* detect main clock parent */ - switch ((__raw_readl(CKSCR) >> 28) & 0x03) { - case 0: - main_clk.parent = &sh73a0_extal1_clk; - break; - case 1: - main_clk.parent = &extal1_div2_clk; - break; - case 2: - main_clk.parent = &sh73a0_extal2_clk; - break; - case 3: - main_clk.parent = &extal2_div2_clk; - break; - } - - for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) - ret = clk_register(main_clks[k]); - - if (!ret) { - ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); - if (!ret) - div4_clk_extend(); - } - - if (!ret) - ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR); - - if (!ret) - ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); - - for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) - ret = clk_register(late_main_clks[k]); - - clkdev_add_table(lookups, ARRAY_SIZE(lookups)); - - if (!ret) - shmobile_clk_init(); - else - panic("failed to setup sh73a0 clocks\n"); -} diff --git a/arch/arm/mach-shmobile/dma-register.h b/arch/arm/mach-shmobile/dma-register.h deleted file mode 100644 index 52a2f66e600f..000000000000 --- a/arch/arm/mach-shmobile/dma-register.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * SH-ARM CPU-specific DMA definitions, used by both DMA drivers - * - * Copyright (C) 2012 Renesas Solutions Corp - * - * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * Based on arch/sh/include/cpu-sh4/cpu/dma-register.h - * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef DMA_REGISTER_H -#define DMA_REGISTER_H - -/* - * Direct Memory Access Controller - */ - -/* Transmit sizes and respective CHCR register values */ -enum { - XMIT_SZ_8BIT = 0, - XMIT_SZ_16BIT = 1, - XMIT_SZ_32BIT = 2, - XMIT_SZ_64BIT = 7, - XMIT_SZ_128BIT = 3, - XMIT_SZ_256BIT = 4, - XMIT_SZ_512BIT = 5, -}; - -/* log2(size / 8) - used to calculate number of transfers */ -static const unsigned int dma_ts_shift[] = { - [XMIT_SZ_8BIT] = 0, - [XMIT_SZ_16BIT] = 1, - [XMIT_SZ_32BIT] = 2, - [XMIT_SZ_64BIT] = 3, - [XMIT_SZ_128BIT] = 4, - [XMIT_SZ_256BIT] = 5, - [XMIT_SZ_512BIT] = 6, -}; - -#define TS_LOW_BIT 0x3 /* --xx */ -#define TS_HI_BIT 0xc /* xx-- */ - -#define TS_LOW_SHIFT (3) -#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */ - -#define TS_INDEX2VAL(i) \ - ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ - (((i) & TS_HI_BIT) << TS_HI_SHIFT)) - -#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz))) -#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz))) - - -/* - * USB High-Speed DMAC - */ -/* Transmit sizes and respective CHCR register values */ -enum { - USBTS_XMIT_SZ_8BYTE = 0, - USBTS_XMIT_SZ_16BYTE = 1, - USBTS_XMIT_SZ_32BYTE = 2, -}; - -/* log2(size / 8) - used to calculate number of transfers */ -static const unsigned int dma_usbts_shift[] = { - [USBTS_XMIT_SZ_8BYTE] = 3, - [USBTS_XMIT_SZ_16BYTE] = 4, - [USBTS_XMIT_SZ_32BYTE] = 5, -}; - -#define USBTS_LOW_BIT 0x3 /* --xx */ -#define USBTS_HI_BIT 0x0 /* ---- */ - -#define USBTS_LOW_SHIFT 6 -#define USBTS_HI_SHIFT 0 - -#define USBTS_INDEX2VAL(i) (((i) & 3) << 6) - -#endif /* DMA_REGISTER_H */ diff --git a/arch/arm/mach-shmobile/include/mach/head-kzm9g.txt b/arch/arm/mach-shmobile/include/mach/head-kzm9g.txt deleted file mode 100644 index 9531f46a822a..000000000000 --- a/arch/arm/mach-shmobile/include/mach/head-kzm9g.txt +++ /dev/null @@ -1,410 +0,0 @@ -LIST "KZM9G low-level initialization routine." -LIST "Adapted from u-boot KZM9G support code." - -LIST "Copyright (C) 2013 Ulrich Hecht" - -LIST "This program is free software; you can redistribute it and/or modify" -LIST "it under the terms of the GNU General Public License version 2 as" -LIST "published by the Free Software Foundation." - -LIST "This program is distributed in the hope that it will be useful," -LIST "but WITHOUT ANY WARRANTY; without even the implied warranty of" -LIST "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the" -LIST "GNU General Public License for more details." - - -LIST "Register definitions:" - -LIST "Secure control register" -#define LIFEC_SEC_SRC (0xE6110008) - -LIST "RWDT" -#define RWDT_BASE (0xE6020000) -#define RWTCSRA0 (RWDT_BASE + 0x04) - -LIST "HPB Semaphore Control Registers" -#define HPBSCR_BASE (0xE6000000) -#define HPBCTRL6 (HPBSCR_BASE + 0x1030) - -#define SBSC1_BASE (0xFE400000) -#define SDCR0A (SBSC1_BASE + 0x0008) -#define SDCR1A (SBSC1_BASE + 0x000C) -#define SDPCRA (SBSC1_BASE + 0x0010) -#define SDCR0SA (SBSC1_BASE + 0x0018) -#define SDCR1SA (SBSC1_BASE + 0x001C) -#define RTCSRA (SBSC1_BASE + 0x0020) -#define RTCORA (SBSC1_BASE + 0x0028) -#define RTCORHA (SBSC1_BASE + 0x002C) -#define SDWCRC0A (SBSC1_BASE + 0x0040) -#define SDWCRC1A (SBSC1_BASE + 0x0044) -#define SDWCR00A (SBSC1_BASE + 0x0048) -#define SDWCR01A (SBSC1_BASE + 0x004C) -#define SDWCR10A (SBSC1_BASE + 0x0050) -#define SDWCR11A (SBSC1_BASE + 0x0054) -#define SDWCR2A (SBSC1_BASE + 0x0060) -#define SDWCRC2A (SBSC1_BASE + 0x0064) -#define ZQCCRA (SBSC1_BASE + 0x0068) -#define SDMRACR0A (SBSC1_BASE + 0x0084) -#define SDMRTMPCRA (SBSC1_BASE + 0x008C) -#define SDMRTMPMSKA (SBSC1_BASE + 0x0094) -#define SDGENCNTA (SBSC1_BASE + 0x009C) -#define SDDRVCR0A (SBSC1_BASE + 0x00B4) -#define DLLCNT0A (SBSC1_BASE + 0x0354) - -#define SDMRA1 (0xFE500000) -#define SDMRA2 (0xFE5C0000) -#define SDMRA3 (0xFE504000) - -#define SBSC2_BASE (0xFB400000) -#define SDCR0B (SBSC2_BASE + 0x0008) -#define SDCR1B (SBSC2_BASE + 0x000C) -#define SDPCRB (SBSC2_BASE + 0x0010) -#define SDCR0SB (SBSC2_BASE + 0x0018) -#define SDCR1SB (SBSC2_BASE + 0x001C) -#define RTCSRB (SBSC2_BASE + 0x0020) -#define RTCORB (SBSC2_BASE + 0x0028) -#define RTCORHB (SBSC2_BASE + 0x002C) -#define SDWCRC0B (SBSC2_BASE + 0x0040) -#define SDWCRC1B (SBSC2_BASE + 0x0044) -#define SDWCR00B (SBSC2_BASE + 0x0048) -#define SDWCR01B (SBSC2_BASE + 0x004C) -#define SDWCR10B (SBSC2_BASE + 0x0050) -#define SDWCR11B (SBSC2_BASE + 0x0054) -#define SDPDCR0B (SBSC2_BASE + 0x0058) -#define SDWCR2B (SBSC2_BASE + 0x0060) -#define SDWCRC2B (SBSC2_BASE + 0x0064) -#define ZQCCRB (SBSC2_BASE + 0x0068) -#define SDMRACR0B (SBSC2_BASE + 0x0084) -#define SDMRTMPCRB (SBSC2_BASE + 0x008C) -#define SDMRTMPMSKB (SBSC2_BASE + 0x0094) -#define SDGENCNTB (SBSC2_BASE + 0x009C) -#define DPHYCNT0B (SBSC2_BASE + 0x00A0) -#define DPHYCNT1B (SBSC2_BASE + 0x00A4) -#define DPHYCNT2B (SBSC2_BASE + 0x00A8) -#define SDDRVCR0B (SBSC2_BASE + 0x00B4) -#define DLLCNT0B (SBSC2_BASE + 0x0354) - -#define SDMRB1 (0xFB500000) -#define SDMRB2 (0xFB5C0000) -#define SDMRB3 (0xFB504000) - -#define CPG_BASE (0xE6150000) -#define FRQCRA (CPG_BASE + 0x0000) -#define FRQCRB (CPG_BASE + 0x0004) -#define FRQCRD (CPG_BASE + 0x00E4) -#define VCLKCR1 (CPG_BASE + 0x0008) -#define VCLKCR2 (CPG_BASE + 0x000C) -#define VCLKCR3 (CPG_BASE + 0x001C) -#define ZBCKCR (CPG_BASE + 0x0010) -#define FLCKCR (CPG_BASE + 0x0014) -#define SD0CKCR (CPG_BASE + 0x0074) -#define SD1CKCR (CPG_BASE + 0x0078) -#define SD2CKCR (CPG_BASE + 0x007C) -#define FSIACKCR (CPG_BASE + 0x0018) -#define SUBCKCR (CPG_BASE + 0x0080) -#define SPUACKCR (CPG_BASE + 0x0084) -#define SPUVCKCR (CPG_BASE + 0x0094) -#define MSUCKCR (CPG_BASE + 0x0088) -#define HSICKCR (CPG_BASE + 0x008C) -#define FSIBCKCR (CPG_BASE + 0x0090) -#define MFCK1CR (CPG_BASE + 0x0098) -#define MFCK2CR (CPG_BASE + 0x009C) -#define DSITCKCR (CPG_BASE + 0x0060) -#define DSI0PCKCR (CPG_BASE + 0x0064) -#define DSI1PCKCR (CPG_BASE + 0x0068) -#define DSI0PHYCR (CPG_BASE + 0x006C) -#define DVFSCR3 (CPG_BASE + 0x0174) -#define DVFSCR4 (CPG_BASE + 0x0178) -#define DVFSCR5 (CPG_BASE + 0x017C) -#define MPMODE (CPG_BASE + 0x00CC) - -#define PLLECR (CPG_BASE + 0x00D0) -#define PLL0CR (CPG_BASE + 0x00D8) -#define PLL1CR (CPG_BASE + 0x0028) -#define PLL2CR (CPG_BASE + 0x002C) -#define PLL3CR (CPG_BASE + 0x00DC) -#define PLL0STPCR (CPG_BASE + 0x00F0) -#define PLL1STPCR (CPG_BASE + 0x00C8) -#define PLL2STPCR (CPG_BASE + 0x00F8) -#define PLL3STPCR (CPG_BASE + 0x00FC) -#define RMSTPCR0 (CPG_BASE + 0x0110) -#define RMSTPCR1 (CPG_BASE + 0x0114) -#define RMSTPCR2 (CPG_BASE + 0x0118) -#define RMSTPCR3 (CPG_BASE + 0x011C) -#define RMSTPCR4 (CPG_BASE + 0x0120) -#define RMSTPCR5 (CPG_BASE + 0x0124) -#define SMSTPCR0 (CPG_BASE + 0x0130) -#define SMSTPCR2 (CPG_BASE + 0x0138) -#define SMSTPCR3 (CPG_BASE + 0x013C) -#define CPGXXCR4 (CPG_BASE + 0x0150) -#define SRCR0 (CPG_BASE + 0x80A0) -#define SRCR2 (CPG_BASE + 0x80B0) -#define SRCR3 (CPG_BASE + 0x80A8) -#define VREFCR (CPG_BASE + 0x00EC) -#define PCLKCR (CPG_BASE + 0x1020) - -#define PORT32CR (0xE6051020) -#define PORT33CR (0xE6051021) -#define PORT34CR (0xE6051022) -#define PORT35CR (0xE6051023) - -LIST "DRAM initialization code:" - -EW RWTCSRA0, 0xA507 - -ED_AND LIFEC_SEC_SRC, 0xFFFF7FFF - -ED_AND SMSTPCR3,0xFFFF7FFF -ED_AND SRCR3, 0xFFFF7FFF -ED_AND SMSTPCR2,0xFFFBFFFF -ED_AND SRCR2, 0xFFFBFFFF -ED PLLECR, 0x00000000 - -WAIT_MASK PLLECR, 0x00000F00, 0x00000000 -WAIT_MASK FRQCRB, 0x80000000, 0x00000000 - -ED PLL0CR, 0x2D000000 -ED PLL1CR, 0x17100000 -ED FRQCRB, 0x96235880 -WAIT_MASK FRQCRB, 0x80000000, 0x00000000 - -ED FLCKCR, 0x0000000B -ED_AND SMSTPCR0, 0xFFFFFFFD - -ED_AND SRCR0, 0xFFFFFFFD -ED 0xE6001628, 0x514 -ED 0xE6001648, 0x514 -ED 0xE6001658, 0x514 -ED 0xE6001678, 0x514 - -ED DVFSCR4, 0x00092000 -ED DVFSCR5, 0x000000DC -ED PLLECR, 0x00000000 -WAIT_MASK PLLECR, 0x00000F00, 0x00000000 - -ED FRQCRA, 0x0012453C -ED FRQCRB, 0x80431350 -WAIT_MASK FRQCRB, 0x80000000, 0x00000000 -ED FRQCRD, 0x00000B0B -WAIT_MASK FRQCRD, 0x80000000, 0x00000000 - -ED PCLKCR, 0x00000003 -ED VCLKCR1, 0x0000012F -ED VCLKCR2, 0x00000119 -ED VCLKCR3, 0x00000119 -ED ZBCKCR, 0x00000002 -ED FLCKCR, 0x00000005 -ED SD0CKCR, 0x00000080 -ED SD1CKCR, 0x00000080 -ED SD2CKCR, 0x00000080 -ED FSIACKCR, 0x0000003F -ED FSIBCKCR, 0x0000003F -ED SUBCKCR, 0x00000080 -ED SPUACKCR, 0x0000000B -ED SPUVCKCR, 0x0000000B -ED MSUCKCR, 0x0000013F -ED HSICKCR, 0x00000080 -ED MFCK1CR, 0x0000003F -ED MFCK2CR, 0x0000003F -ED DSITCKCR, 0x00000107 -ED DSI0PCKCR, 0x00000313 -ED DSI1PCKCR, 0x0000130D -ED DSI0PHYCR, 0x2A800E0E -ED PLL0CR, 0x1E000000 -ED PLL0CR, 0x2D000000 -ED PLL1CR, 0x17100000 -ED PLL2CR, 0x27000080 -ED PLL3CR, 0x1D000000 -ED PLL0STPCR, 0x00080000 -ED PLL1STPCR, 0x000120C0 -ED PLL2STPCR, 0x00012000 -ED PLL3STPCR, 0x00000030 -ED PLLECR, 0x0000000B -WAIT_MASK PLLECR, 0x00000B00, 0x00000B00 - -ED DVFSCR3, 0x000120F0 -ED MPMODE, 0x00000020 -ED VREFCR, 0x0000028A -ED RMSTPCR0, 0xE4628087 -ED RMSTPCR1, 0xFFFFFFFF -ED RMSTPCR2, 0x53FFFFFF -ED RMSTPCR3, 0xFFFFFFFF -ED RMSTPCR4, 0x00800D3D -ED RMSTPCR5, 0xFFFFF3FF -ED SMSTPCR2, 0x00000000 -ED SRCR2, 0x00040000 -ED_AND PLLECR, 0xFFFFFFF7 -WAIT_MASK PLLECR, 0x00000800, 0x00000000 - -LIST "set SBSC operational" -ED HPBCTRL6, 0x00000001 -WAIT_MASK HPBCTRL6, 0x00000001, 0x00000001 - -LIST "set SBSC operating frequency" -ED FRQCRD, 0x00001414 -WAIT_MASK FRQCRD, 0x80000000, 0x00000000 -ED PLL3CR, 0x1D000000 -ED_OR PLLECR, 0x00000008 -WAIT_MASK PLLECR, 0x00000800, 0x00000800 - -LIST "enable DLL oscillation in DDRPHY" -ED_OR DLLCNT0A, 0x00000002 - -LIST "wait >= 100 ns" -ED SDGENCNTA, 0x00000005 -WAIT_MASK SDGENCNTA, 0xFFFFFFFF, 0x00000000 - -LIST "target LPDDR2 device settings" -ED SDCR0A, 0xACC90159 -ED SDCR1A, 0x00010059 -ED SDWCRC0A, 0x50874114 -ED SDWCRC1A, 0x33199B37 -ED SDWCRC2A, 0x008F2313 -ED SDWCR00A, 0x31020707 -ED SDWCR01A, 0x0017040A -ED SDWCR10A, 0x31020707 -ED SDWCR11A, 0x0017040A - -ED SDDRVCR0A, 0x055557ff - -ED SDWCR2A, 0x30000000 - -LIST "drive CKE high" -ED_OR SDPCRA, 0x00000080 -WAIT_MASK SDPCRA, 0x00000080, 0x00000080 - -LIST "wait >= 200 us" -ED SDGENCNTA, 0x00002710 -WAIT_MASK SDGENCNTA, 0xFFFFFFFF, 0x00000000 - -LIST "issue reset command to LPDDR2 device" -ED SDMRACR0A, 0x0000003F -ED SDMRA1, 0x00000000 - -LIST "wait >= 10 (or 1) us (docs inconsistent)" -ED SDGENCNTA, 0x000001F4 -WAIT_MASK SDGENCNTA, 0xFFFFFFFF, 0x00000000 - -LIST "MRW ZS initialization calibration command" -ED SDMRACR0A, 0x0000FF0A -ED SDMRA3, 0x00000000 - -LIST "wait >= 1 us" -ED SDGENCNTA, 0x00000032 -WAIT_MASK SDGENCNTA, 0xFFFFFFFF, 0x00000000 - -LIST "specify operating mode in LPDDR2" -ED SDMRACR0A, 0x00002201 -ED SDMRA1, 0x00000000 -ED SDMRACR0A, 0x00000402 -ED SDMRA1, 0x00000000 -ED SDMRACR0A, 0x00000203 -ED SDMRA1, 0x00000000 - -LIST "initialize DDR interface" -ED SDMRA2, 0x00000000 - -LIST "temperature sensor control" -ED SDMRTMPCRA, 0x88800004 -ED SDMRTMPMSKA,0x00000004 - -LIST "auto-refreshing control" -ED RTCORA, 0xA55A0032 -ED RTCORHA, 0xA55A000C -ED RTCSRA, 0xA55A2048 - -ED_OR SDCR0A, 0x00000800 -ED_OR SDCR1A, 0x00000400 - -LIST "auto ZQ calibration control" -ED ZQCCRA, 0xFFF20000 - -ED_OR DLLCNT0B, 0x00000002 -ED SDGENCNTB, 0x00000005 -WAIT_MASK SDGENCNTB, 0xFFFFFFFF, 0x00000000 - -ED SDCR0B, 0xACC90159 -ED SDCR1B, 0x00010059 -ED SDWCRC0B, 0x50874114 -ED SDWCRC1B, 0x33199B37 -ED SDWCRC2B, 0x008F2313 -ED SDWCR00B, 0x31020707 -ED SDWCR01B, 0x0017040A -ED SDWCR10B, 0x31020707 -ED SDWCR11B, 0x0017040A -ED SDDRVCR0B, 0x055557ff -ED SDWCR2B, 0x30000000 -ED_OR SDPCRB, 0x00000080 -WAIT_MASK SDPCRB, 0x00000080, 0x00000080 - -ED SDGENCNTB, 0x00002710 -WAIT_MASK SDGENCNTB, 0xFFFFFFFF, 0x00000000 -ED SDMRACR0B, 0x0000003F - -LIST "upstream u-boot writes to SDMRA1A for both SBSC 1 and 2, which does" -LIST "not seem to make a lot of sense..." -ED SDMRB1, 0x00000000 - -ED SDGENCNTB, 0x000001F4 -WAIT_MASK SDGENCNTB, 0xFFFFFFFF, 0x00000000 - -ED SDMRACR0B, 0x0000FF0A -ED SDMRB3, 0x00000000 -ED SDGENCNTB, 0x00000032 -WAIT_MASK SDGENCNTB, 0xFFFFFFFF, 0x00000000 - -ED SDMRACR0B, 0x00002201 -ED SDMRB1, 0x00000000 -ED SDMRACR0B, 0x00000402 -ED SDMRB1, 0x00000000 -ED SDMRACR0B, 0x00000203 -ED SDMRB1, 0x00000000 -ED SDMRB2, 0x00000000 -ED SDMRTMPCRB, 0x88800004 -ED SDMRTMPMSKB, 0x00000004 -ED RTCORB, 0xA55A0032 -ED RTCORHB, 0xA55A000C -ED RTCSRB, 0xA55A2048 -ED_OR SDCR0B, 0x00000800 -ED_OR SDCR1B, 0x00000400 -ED ZQCCRB, 0xFFF20000 -ED_OR SDPDCR0B, 0x00030000 -ED DPHYCNT1B, 0xA5390000 -ED DPHYCNT0B, 0x00001200 -ED DPHYCNT1B, 0x07CE0000 -ED DPHYCNT0B, 0x00001247 -WAIT_MASK DPHYCNT2B, 0xFFFFFFFF, 0x07CE0000 - -ED_AND SDPDCR0B, 0xFFFCFFFF - -ED FRQCRD, 0x00000B0B -WAIT_MASK FRQCRD, 0x80000000, 0x00000000 - -ED CPGXXCR4, 0xfffffffc - -LIST "Setup SCIF4 / workaround" -EB PORT32CR, 0x12 -EB PORT33CR, 0x22 -EB PORT34CR, 0x12 -EB PORT35CR, 0x22 - -EW 0xE6C80000, 0 -EB 0xE6C80004, 0x19 -EW 0xE6C80008, 0x0030 -EW 0xE6C80018, 0 -EW 0xE6C80030, 0x0014 - -LIST "Magic to avoid hangs and corruption on DRAM writes." - -LIST "It has been observed that the system would most often hang while" -LIST "decompressing the kernel, and if it didn't it would always write" -LIST "a corrupt image to DRAM." -LIST "This problem does not occur in u-boot, and the reason is that" -LIST "u-boot performs an additional cache invalidation after setting up" -LIST "the DRAM controller. Such an invalidation should not be necessary at" -LIST "this point, and attempts at removing parts of the routine to arrive" -LIST "at the minimal snippet of code necessary to avoid the DRAM stability" -LIST "problem yielded the following:" - -MRC p15, 0, r0, c1, c0, 0 -MCR p15, 0, r0, c1, c0, 0 diff --git a/arch/arm/mach-shmobile/include/mach/zboot.h b/arch/arm/mach-shmobile/include/mach/zboot.h deleted file mode 100644 index 175ee05465da..000000000000 --- a/arch/arm/mach-shmobile/include/mach/zboot.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef ZBOOT_H -#define ZBOOT_H - -#include <mach/zboot_macros.h> - -/************************************************** - * - * board specific settings - * - **************************************************/ - -#ifdef CONFIG_MACH_KZM9G -#define MEMORY_START 0x43000000 -#include "mach/head-kzm9g.txt" -#else -#error "unsupported board." -#endif - -#endif /* ZBOOT_H */ diff --git a/arch/arm/mach-shmobile/include/mach/zboot_macros.h b/arch/arm/mach-shmobile/include/mach/zboot_macros.h deleted file mode 100644 index 14fd3d538e9a..000000000000 --- a/arch/arm/mach-shmobile/include/mach/zboot_macros.h +++ /dev/null @@ -1,108 +0,0 @@ -#ifndef __ZBOOT_MACRO_H -#define __ZBOOT_MACRO_H - -/* The LIST command is used to include comments in the script */ -.macro LIST comment -.endm - -/* The ED command is used to write a 32-bit word */ -.macro ED, addr, data - LDR r0, 1f - LDR r1, 2f - STR r1, [r0] - B 3f -1 : .long \addr -2 : .long \data -3 : -.endm - -/* The EW command is used to write a 16-bit word */ -.macro EW, addr, data - LDR r0, 1f - LDR r1, 2f - STRH r1, [r0] - B 3f -1 : .long \addr -2 : .long \data -3 : -.endm - -/* The EB command is used to write an 8-bit word */ -.macro EB, addr, data - LDR r0, 1f - LDR r1, 2f - STRB r1, [r0] - B 3f -1 : .long \addr -2 : .long \data -3 : -.endm - -/* The WAIT command is used to delay the execution */ -.macro WAIT, time, reg - LDR r1, 1f - LDR r0, 2f - STR r0, [r1] -10 : - LDR r0, [r1] - CMP r0, #0x00000000 - BNE 10b - NOP - B 3f -1 : .long \reg -2 : .long \time * 100 -3 : -.endm - -/* The DD command is used to read a 32-bit word */ -.macro DD, start, end - LDR r1, 1f - B 2f -1 : .long \start -2 : -.endm - -/* loop until a given value has been read (with mask) */ -.macro WAIT_MASK, addr, data, cmp - LDR r0, 2f - LDR r1, 3f - LDR r2, 4f -1: - LDR r3, [r0, #0] - AND r3, r1, r3 - CMP r2, r3 - BNE 1b - B 5f -2: .long \addr -3: .long \data -4: .long \cmp -5: -.endm - -/* read 32-bit value from addr, "or" an immediate and write back */ -.macro ED_OR, addr, data - LDR r4, 1f - LDR r5, 2f - LDR r6, [r4] - ORR r5, r6, r5 - STR r5, [r4] - B 3f -1: .long \addr -2: .long \data -3: -.endm - -/* read 32-bit value from addr, "and" an immediate and write back */ -.macro ED_AND, addr, data - LDR r4, 1f - LDR r5, 2f - LDR r6, [r4] - AND r5, r6, r5 - STR r5, [r4] - B 3f -1: .long \addr -2: .long \data -3: -.endm - -#endif /* __ZBOOT_MACRO_H */ diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c deleted file mode 100644 index fd63ae6532fc..000000000000 --- a/arch/arm/mach-shmobile/intc-sh73a0.c +++ /dev/null @@ -1,337 +0,0 @@ -/* - * sh73a0 processor support - INTC hardware block - * - * Copyright (C) 2010 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/irq.h> -#include <linux/io.h> -#include <linux/irqchip.h> -#include <linux/irqchip/arm-gic.h> - -#include <asm/mach-types.h> -#include <asm/mach/arch.h> - -#include "intc.h" -#include "irqs.h" -#include "sh73a0.h" - -enum { - UNUSED = 0, - - /* interrupt sources INTCS */ - PINTCS_PINT1, PINTCS_PINT2, - RTDMAC_0_DEI0, RTDMAC_0_DEI1, RTDMAC_0_DEI2, RTDMAC_0_DEI3, - CEU, MFI, BBIF2, VPU, TSIF1, _3DG_SGX543, _2DDMAC_2DDM0, - RTDMAC_1_DEI4, RTDMAC_1_DEI5, RTDMAC_1_DADERR, - KEYSC_KEY, VINT, MSIOF, - TMU0_TUNI00, TMU0_TUNI01, TMU0_TUNI02, - CMT0, TSIF0, CMT2, LMB, MSUG, MSU_MSU, MSU_MSU2, - CTI, RWDT0, ICB, PEP, ASA, JPU_JPEG, LCDC, LCRC, - RTDMAC_2_DEI6, RTDMAC_2_DEI7, RTDMAC_2_DEI8, RTDMAC_2_DEI9, - RTDMAC_3_DEI10, RTDMAC_3_DEI11, - FRC, GCU, LCDC1, CSIRX, - DSITX0_DSITX00, DSITX0_DSITX01, - SPU2_SPU0, SPU2_SPU1, FSI, - TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12, - TSIF2, CMT4, MFIS2, CPORTS2R, TSG, DMASCH1, SCUW, - VIO60, VIO61, CEU21, CSI21, DSITX1_DSITX10, DSITX1_DSITX11, - DISP, DSRV, EMUX2_EMUX20I, EMUX2_EMUX21I, - MSTIF0_MST00I, MSTIF0_MST01I, MSTIF1_MST10I, MSTIF1_MST11I, - SPUV, - - /* interrupt groups INTCS */ - RTDMAC_0, RTDMAC_1, RTDMAC_2, RTDMAC_3, - DSITX0, SPU2, TMU1, MSU, -}; - -static struct intc_vect intcs_vectors[] = { - INTCS_VECT(PINTCS_PINT1, 0x0600), INTCS_VECT(PINTCS_PINT2, 0x0620), - INTCS_VECT(RTDMAC_0_DEI0, 0x0800), INTCS_VECT(RTDMAC_0_DEI1, 0x0820), - INTCS_VECT(RTDMAC_0_DEI2, 0x0840), INTCS_VECT(RTDMAC_0_DEI3, 0x0860), - INTCS_VECT(CEU, 0x0880), INTCS_VECT(MFI, 0x0900), - INTCS_VECT(BBIF2, 0x0960), INTCS_VECT(VPU, 0x0980), - INTCS_VECT(TSIF1, 0x09a0), INTCS_VECT(_3DG_SGX543, 0x09e0), - INTCS_VECT(_2DDMAC_2DDM0, 0x0a00), - INTCS_VECT(RTDMAC_1_DEI4, 0x0b80), INTCS_VECT(RTDMAC_1_DEI5, 0x0ba0), - INTCS_VECT(RTDMAC_1_DADERR, 0x0bc0), - INTCS_VECT(KEYSC_KEY, 0x0be0), INTCS_VECT(VINT, 0x0c80), - INTCS_VECT(MSIOF, 0x0d20), - INTCS_VECT(TMU0_TUNI00, 0x0e80), INTCS_VECT(TMU0_TUNI01, 0x0ea0), - INTCS_VECT(TMU0_TUNI02, 0x0ec0), - INTCS_VECT(CMT0, 0x0f00), INTCS_VECT(TSIF0, 0x0f20), - INTCS_VECT(CMT2, 0x0f40), INTCS_VECT(LMB, 0x0f60), - INTCS_VECT(MSUG, 0x0f80), - INTCS_VECT(MSU_MSU, 0x0fa0), INTCS_VECT(MSU_MSU2, 0x0fc0), - INTCS_VECT(CTI, 0x0400), INTCS_VECT(RWDT0, 0x0440), - INTCS_VECT(ICB, 0x0480), INTCS_VECT(PEP, 0x04a0), - INTCS_VECT(ASA, 0x04c0), INTCS_VECT(JPU_JPEG, 0x0560), - INTCS_VECT(LCDC, 0x0580), INTCS_VECT(LCRC, 0x05a0), - INTCS_VECT(RTDMAC_2_DEI6, 0x1300), INTCS_VECT(RTDMAC_2_DEI7, 0x1320), - INTCS_VECT(RTDMAC_2_DEI8, 0x1340), INTCS_VECT(RTDMAC_2_DEI9, 0x1360), - INTCS_VECT(RTDMAC_3_DEI10, 0x1380), INTCS_VECT(RTDMAC_3_DEI11, 0x13a0), - INTCS_VECT(FRC, 0x1700), INTCS_VECT(GCU, 0x1760), - INTCS_VECT(LCDC1, 0x1780), INTCS_VECT(CSIRX, 0x17a0), - INTCS_VECT(DSITX0_DSITX00, 0x17c0), INTCS_VECT(DSITX0_DSITX01, 0x17e0), - INTCS_VECT(SPU2_SPU0, 0x1800), INTCS_VECT(SPU2_SPU1, 0x1820), - INTCS_VECT(FSI, 0x1840), - INTCS_VECT(TMU1_TUNI10, 0x1900), INTCS_VECT(TMU1_TUNI11, 0x1920), - INTCS_VECT(TMU1_TUNI12, 0x1940), - INTCS_VECT(TSIF2, 0x1960), INTCS_VECT(CMT4, 0x1980), - INTCS_VECT(MFIS2, 0x1a00), INTCS_VECT(CPORTS2R, 0x1a20), - INTCS_VECT(TSG, 0x1ae0), INTCS_VECT(DMASCH1, 0x1b00), - INTCS_VECT(SCUW, 0x1b40), - INTCS_VECT(VIO60, 0x1b60), INTCS_VECT(VIO61, 0x1b80), - INTCS_VECT(CEU21, 0x1ba0), INTCS_VECT(CSI21, 0x1be0), - INTCS_VECT(DSITX1_DSITX10, 0x1c00), INTCS_VECT(DSITX1_DSITX11, 0x1c20), - INTCS_VECT(DISP, 0x1c40), INTCS_VECT(DSRV, 0x1c60), - INTCS_VECT(EMUX2_EMUX20I, 0x1c80), INTCS_VECT(EMUX2_EMUX21I, 0x1ca0), - INTCS_VECT(MSTIF0_MST00I, 0x1cc0), INTCS_VECT(MSTIF0_MST01I, 0x1ce0), - INTCS_VECT(MSTIF1_MST10I, 0x1d00), INTCS_VECT(MSTIF1_MST11I, 0x1d20), - INTCS_VECT(SPUV, 0x2300), -}; - -static struct intc_group intcs_groups[] __initdata = { - INTC_GROUP(RTDMAC_0, RTDMAC_0_DEI0, RTDMAC_0_DEI1, - RTDMAC_0_DEI2, RTDMAC_0_DEI3), - INTC_GROUP(RTDMAC_1, RTDMAC_1_DEI4, RTDMAC_1_DEI5, RTDMAC_1_DADERR), - INTC_GROUP(RTDMAC_2, RTDMAC_2_DEI6, RTDMAC_2_DEI7, - RTDMAC_2_DEI8, RTDMAC_2_DEI9), - INTC_GROUP(RTDMAC_3, RTDMAC_3_DEI10, RTDMAC_3_DEI11), - INTC_GROUP(TMU1, TMU1_TUNI12, TMU1_TUNI11, TMU1_TUNI10), - INTC_GROUP(DSITX0, DSITX0_DSITX00, DSITX0_DSITX01), - INTC_GROUP(SPU2, SPU2_SPU0, SPU2_SPU1), - INTC_GROUP(MSU, MSU_MSU, MSU_MSU2), -}; - -static struct intc_mask_reg intcs_mask_registers[] = { - { 0xffd20184, 0xffd201c4, 8, /* IMR1SA / IMCR1SA */ - { 0, 0, 0, CEU, - 0, 0, 0, 0 } }, - { 0xffd20188, 0xffd201c8, 8, /* IMR2SA / IMCR2SA */ - { 0, 0, 0, VPU, - BBIF2, 0, 0, MFI } }, - { 0xffd2018c, 0xffd201cc, 8, /* IMR3SA / IMCR3SA */ - { 0, 0, 0, _2DDMAC_2DDM0, - 0, ASA, PEP, ICB } }, - { 0xffd20190, 0xffd201d0, 8, /* IMR4SA / IMCR4SA */ - { 0, 0, 0, CTI, - JPU_JPEG, 0, LCRC, LCDC } }, - { 0xffd20194, 0xffd201d4, 8, /* IMR5SA / IMCR5SA */ - { KEYSC_KEY, RTDMAC_1_DADERR, RTDMAC_1_DEI5, RTDMAC_1_DEI4, - RTDMAC_0_DEI3, RTDMAC_0_DEI2, RTDMAC_0_DEI1, RTDMAC_0_DEI0 } }, - { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */ - { 0, 0, MSIOF, 0, - _3DG_SGX543, 0, 0, 0 } }, - { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */ - { 0, TMU0_TUNI02, TMU0_TUNI01, TMU0_TUNI00, - 0, 0, 0, 0 } }, - { 0xffd201a0, 0xffd201e0, 8, /* IMR8SA / IMCR8SA */ - { 0, 0, 0, 0, - 0, MSU_MSU, MSU_MSU2, MSUG } }, - { 0xffd201a4, 0xffd201e4, 8, /* IMR9SA / IMCR9SA */ - { 0, RWDT0, CMT2, CMT0, - 0, 0, 0, 0 } }, - { 0xffd201ac, 0xffd201ec, 8, /* IMR11SA / IMCR11SA */ - { 0, 0, 0, 0, - 0, TSIF1, LMB, TSIF0 } }, - { 0xffd201b0, 0xffd201f0, 8, /* IMR12SA / IMCR12SA */ - { 0, 0, 0, 0, - 0, 0, PINTCS_PINT2, PINTCS_PINT1 } }, - { 0xffd50180, 0xffd501c0, 8, /* IMR0SA3 / IMCR0SA3 */ - { RTDMAC_2_DEI6, RTDMAC_2_DEI7, RTDMAC_2_DEI8, RTDMAC_2_DEI9, - RTDMAC_3_DEI10, RTDMAC_3_DEI11, 0, 0 } }, - { 0xffd50190, 0xffd501d0, 8, /* IMR4SA3 / IMCR4SA3 */ - { FRC, 0, 0, GCU, - LCDC1, CSIRX, DSITX0_DSITX00, DSITX0_DSITX01 } }, - { 0xffd50194, 0xffd501d4, 8, /* IMR5SA3 / IMCR5SA3 */ - { SPU2_SPU0, SPU2_SPU1, FSI, 0, - 0, 0, 0, 0 } }, - { 0xffd50198, 0xffd501d8, 8, /* IMR6SA3 / IMCR6SA3 */ - { TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12, 0, - TSIF2, CMT4, 0, 0 } }, - { 0xffd5019c, 0xffd501dc, 8, /* IMR7SA3 / IMCR7SA3 */ - { MFIS2, CPORTS2R, 0, 0, - 0, 0, 0, TSG } }, - { 0xffd501a0, 0xffd501e0, 8, /* IMR8SA3 / IMCR8SA3 */ - { DMASCH1, 0, SCUW, VIO60, - VIO61, CEU21, 0, CSI21 } }, - { 0xffd501a4, 0xffd501e4, 8, /* IMR9SA3 / IMCR9SA3 */ - { DSITX1_DSITX10, DSITX1_DSITX11, DISP, DSRV, - EMUX2_EMUX20I, EMUX2_EMUX21I, MSTIF0_MST00I, MSTIF0_MST01I } }, - { 0xffd501a8, 0xffd501e8, 8, /* IMR10SA3 / IMCR10SA3 */ - { MSTIF0_MST00I, MSTIF0_MST01I, 0, 0, - 0, 0, 0, 0 } }, - { 0xffd60180, 0xffd601c0, 8, /* IMR0SA4 / IMCR0SA4 */ - { SPUV, 0, 0, 0, - 0, 0, 0, 0 } }, -}; - -/* Priority is needed for INTCA to receive the INTCS interrupt */ -static struct intc_prio_reg intcs_prio_registers[] = { - { 0xffd20000, 0, 16, 4, /* IPRAS */ { CTI, 0, _2DDMAC_2DDM0, ICB } }, - { 0xffd20004, 0, 16, 4, /* IPRBS */ { JPU_JPEG, LCDC, 0, LCRC } }, - { 0xffd20008, 0, 16, 4, /* IPRCS */ { BBIF2, 0, 0, 0 } }, - { 0xffd2000c, 0, 16, 4, /* IPRDS */ { PINTCS_PINT1, PINTCS_PINT2, - 0, 0 } }, - { 0xffd20010, 0, 16, 4, /* IPRES */ { RTDMAC_0, CEU, MFI, VPU } }, - { 0xffd20014, 0, 16, 4, /* IPRFS */ { KEYSC_KEY, RTDMAC_1, - CMT2, CMT0 } }, - { 0xffd20018, 0, 16, 4, /* IPRGS */ { TMU0_TUNI00, TMU0_TUNI01, - TMU0_TUNI02, TSIF1 } }, - { 0xffd2001c, 0, 16, 4, /* IPRHS */ { VINT, 0, 0, 0 } }, - { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, 0 } }, - { 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, _3DG_SGX543, MSUG, MSU } }, - { 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, ASA, LMB, PEP } }, - { 0xffd20030, 0, 16, 4, /* IPRMS */ { 0, 0, 0, RWDT0 } }, - { 0xffd50000, 0, 16, 4, /* IPRAS3 */ { RTDMAC_2, 0, 0, 0 } }, - { 0xffd50004, 0, 16, 4, /* IPRBS3 */ { RTDMAC_3, 0, 0, 0 } }, - { 0xffd50020, 0, 16, 4, /* IPRIS3 */ { FRC, 0, 0, 0 } }, - { 0xffd50024, 0, 16, 4, /* IPRJS3 */ { LCDC1, CSIRX, DSITX0, 0 } }, - { 0xffd50028, 0, 16, 4, /* IPRKS3 */ { SPU2, 0, FSI, 0 } }, - { 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, TSIF2 } }, - { 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, 0, 0, 0 } }, - { 0xffd50038, 0, 16, 4, /* IPROS3 */ { MFIS2, CPORTS2R, 0, 0 } }, - { 0xffd50040, 0, 16, 4, /* IPRQS3 */ { DMASCH1, 0, SCUW, VIO60 } }, - { 0xffd50044, 0, 16, 4, /* IPRRS3 */ { VIO61, CEU21, 0, CSI21 } }, - { 0xffd50048, 0, 16, 4, /* IPRSS3 */ { DSITX1_DSITX10, DSITX1_DSITX11, - DISP, DSRV } }, - { 0xffd5004c, 0, 16, 4, /* IPRTS3 */ { EMUX2_EMUX20I, EMUX2_EMUX21I, - MSTIF0_MST00I, MSTIF0_MST01I } }, - { 0xffd50050, 0, 16, 4, /* IPRUS3 */ { MSTIF1_MST10I, MSTIF1_MST11I, - 0, 0 } }, - { 0xffd60000, 0, 16, 4, /* IPRAS4 */ { SPUV, 0, 0, 0 } }, -}; - -static struct resource intcs_resources[] __initdata = { - [0] = { - .start = 0xffd20000, - .end = 0xffd201ff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = 0xffd50000, - .end = 0xffd501ff, - .flags = IORESOURCE_MEM, - }, - [2] = { - .start = 0xffd60000, - .end = 0xffd601ff, - .flags = IORESOURCE_MEM, - } -}; - -static struct intc_desc intcs_desc __initdata = { - .name = "sh73a0-intcs", - .resource = intcs_resources, - .num_resources = ARRAY_SIZE(intcs_resources), - .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers, - intcs_prio_registers, NULL, NULL), -}; - -static struct irqaction sh73a0_intcs_cascade; - -static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id) -{ - unsigned int evtcodeas = ioread32((void __iomem *)dev_id); - - generic_handle_irq(intcs_evt2irq(evtcodeas)); - - return IRQ_HANDLED; -} - -#define PINTER0_PHYS 0xe69000a0 -#define PINTER1_PHYS 0xe69000a4 -#define PINTER0_VIRT IOMEM(0xe69000a0) -#define PINTER1_VIRT IOMEM(0xe69000a4) -#define PINTRR0 IOMEM(0xe69000d0) -#define PINTRR1 IOMEM(0xe69000d4) - -#define PINT0A_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT0_IRQ(irq)) -#define PINT0B_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT0_IRQ(irq + 8)) -#define PINT0C_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT0_IRQ(irq + 16)) -#define PINT0D_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT0_IRQ(irq + 24)) -#define PINT1E_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT1_IRQ(irq)) - -INTC_PINT(intc_pint0, PINTER0_PHYS, 0xe69000b0, "sh73a0-pint0", \ - INTC_PINT_E(A), INTC_PINT_E(B), INTC_PINT_E(C), INTC_PINT_E(D), \ - INTC_PINT_V(A, PINT0A_IRQ), INTC_PINT_V(B, PINT0B_IRQ), \ - INTC_PINT_V(C, PINT0C_IRQ), INTC_PINT_V(D, PINT0D_IRQ), \ - INTC_PINT_E(A), INTC_PINT_E(B), INTC_PINT_E(C), INTC_PINT_E(D), \ - INTC_PINT_E(A), INTC_PINT_E(B), INTC_PINT_E(C), INTC_PINT_E(D)); - -INTC_PINT(intc_pint1, PINTER1_PHYS, 0xe69000c0, "sh73a0-pint1", \ - INTC_PINT_E(E), INTC_PINT_E_EMPTY, INTC_PINT_E_EMPTY, INTC_PINT_E_EMPTY, \ - INTC_PINT_V(E, PINT1E_IRQ), INTC_PINT_V_NONE, \ - INTC_PINT_V_NONE, INTC_PINT_V_NONE, \ - INTC_PINT_E_NONE, INTC_PINT_E_NONE, INTC_PINT_E_NONE, INTC_PINT_E(E), \ - INTC_PINT_E(E), INTC_PINT_E_NONE, INTC_PINT_E_NONE, INTC_PINT_E_NONE); - -static struct irqaction sh73a0_pint0_cascade; -static struct irqaction sh73a0_pint1_cascade; - -static void pint_demux(void __iomem *rr, void __iomem *er, int base_irq) -{ - unsigned long value = ioread32(rr) & ioread32(er); - int k; - - for (k = 0; k < 32; k++) { - if (value & (1 << (31 - k))) { - generic_handle_irq(base_irq + k); - iowrite32(~(1 << (31 - k)), rr); - } - } -} - -static irqreturn_t sh73a0_pint0_demux(int irq, void *dev_id) -{ - pint_demux(PINTRR0, PINTER0_VIRT, SH73A0_PINT0_IRQ(0)); - return IRQ_HANDLED; -} - -static irqreturn_t sh73a0_pint1_demux(int irq, void *dev_id) -{ - pint_demux(PINTRR1, PINTER1_VIRT, SH73A0_PINT1_IRQ(0)); - return IRQ_HANDLED; -} - -void __init sh73a0_init_irq(void) -{ - void __iomem *gic_dist_base = IOMEM(0xf0001000); - void __iomem *gic_cpu_base = IOMEM(0xf0000100); - void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE); - - gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE); - gic_init(0, 29, gic_dist_base, gic_cpu_base); - - register_intc_controller(&intcs_desc); - register_intc_controller(&intc_pint0_desc); - register_intc_controller(&intc_pint1_desc); - - /* demux using INTEVTSA */ - sh73a0_intcs_cascade.name = "INTCS cascade"; - sh73a0_intcs_cascade.handler = sh73a0_intcs_demux; - sh73a0_intcs_cascade.dev_id = intevtsa; - setup_irq(gic_spi(50), &sh73a0_intcs_cascade); - - /* PINT pins are sanely tied to the GIC as SPI */ - sh73a0_pint0_cascade.name = "PINT0 cascade"; - sh73a0_pint0_cascade.handler = sh73a0_pint0_demux; - setup_irq(gic_spi(33), &sh73a0_pint0_cascade); - - sh73a0_pint1_cascade.name = "PINT1 cascade"; - sh73a0_pint1_cascade.handler = sh73a0_pint1_demux; - setup_irq(gic_spi(34), &sh73a0_pint1_cascade); -} diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c index b0790fc32282..4e54512bee30 100644 --- a/arch/arm/mach-shmobile/platsmp-apmu.c +++ b/arch/arm/mach-shmobile/platsmp-apmu.c @@ -46,7 +46,7 @@ static int __maybe_unused apmu_power_on(void __iomem *p, int bit) return 0; } -static int apmu_power_off(void __iomem *p, int bit) +static int __maybe_unused apmu_power_off(void __iomem *p, int bit) { /* request Core Standby for next WFI */ writel_relaxed(3, p + CPUNCR_OFFS(bit)); @@ -67,7 +67,7 @@ static int __maybe_unused apmu_power_off_poll(void __iomem *p, int bit) return 0; } -static int apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu)) +static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu)) { void __iomem *p = apmu_cpus[cpu].iomem; diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c deleted file mode 100644 index 34608fcf0648..000000000000 --- a/arch/arm/mach-shmobile/pm-r8a7740.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * r8a7740 power management support - * - * Copyright (C) 2012 Renesas Solutions Corp. - * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/console.h> -#include <linux/io.h> -#include <linux/suspend.h> - -#include "common.h" -#include "pm-rmobile.h" - -#define SYSC_BASE IOMEM(0xe6180000) - -#if defined(CONFIG_PM) && !defined(CONFIG_ARCH_MULTIPLATFORM) -static int r8a7740_pd_a3sm_suspend(void) -{ - /* - * The A3SM domain contains the CPU core and therefore it should - * only be turned off if the CPU is not in use. - */ - return -EBUSY; -} - -static int r8a7740_pd_a3sp_suspend(void) -{ - /* - * Serial consoles make use of SCIF hardware located in A3SP, - * keep such power domain on if "no_console_suspend" is set. - */ - return console_suspend_enabled ? 0 : -EBUSY; -} - -static int r8a7740_pd_d4_suspend(void) -{ - /* - * The D4 domain contains the Coresight-ETM hardware block and - * therefore it should only be turned off if the debug module is - * not in use. - */ - return -EBUSY; -} - -static struct rmobile_pm_domain r8a7740_pm_domains[] = { - { - .genpd.name = "A4LC", - .base = SYSC_BASE, - .bit_shift = 1, - }, { - .genpd.name = "A4MP", - .base = SYSC_BASE, - .bit_shift = 2, - }, { - .genpd.name = "D4", - .base = SYSC_BASE, - .bit_shift = 3, - .gov = &pm_domain_always_on_gov, - .suspend = r8a7740_pd_d4_suspend, - }, { - .genpd.name = "A4R", - .base = SYSC_BASE, - .bit_shift = 5, - }, { - .genpd.name = "A3RV", - .base = SYSC_BASE, - .bit_shift = 6, - }, { - .genpd.name = "A4S", - .base = SYSC_BASE, - .bit_shift = 10, - .no_debug = true, - }, { - .genpd.name = "A3SP", - .base = SYSC_BASE, - .bit_shift = 11, - .gov = &pm_domain_always_on_gov, - .no_debug = true, - .suspend = r8a7740_pd_a3sp_suspend, - }, { - .genpd.name = "A3SM", - .base = SYSC_BASE, - .bit_shift = 12, - .gov = &pm_domain_always_on_gov, - .suspend = r8a7740_pd_a3sm_suspend, - }, { - .genpd.name = "A3SG", - .base = SYSC_BASE, - .bit_shift = 13, - }, { - .genpd.name = "A4SU", - .base = SYSC_BASE, - .bit_shift = 20, - }, -}; - -void __init r8a7740_init_pm_domains(void) -{ - rmobile_init_domains(r8a7740_pm_domains, ARRAY_SIZE(r8a7740_pm_domains)); - pm_genpd_add_subdomain_names("A4R", "A3RV"); - pm_genpd_add_subdomain_names("A4S", "A3SP"); - pm_genpd_add_subdomain_names("A4S", "A3SM"); - pm_genpd_add_subdomain_names("A4S", "A3SG"); -} -#endif /* CONFIG_PM && !CONFIG_ARCH_MULTIPLATFORM */ - -#ifdef CONFIG_SUSPEND -static int r8a7740_enter_suspend(suspend_state_t suspend_state) -{ - cpu_do_idle(); - return 0; -} - -static void r8a7740_suspend_init(void) -{ - shmobile_suspend_ops.enter = r8a7740_enter_suspend; -} -#else -static void r8a7740_suspend_init(void) {} -#endif - -void __init r8a7740_pm_init(void) -{ - r8a7740_suspend_init(); -} diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c index 44a74c4c5a01..47a862e7f8ba 100644 --- a/arch/arm/mach-shmobile/pm-r8a7779.c +++ b/arch/arm/mach-shmobile/pm-r8a7779.c @@ -35,7 +35,8 @@ struct r8a7779_pm_domain { struct rcar_sysc_ch ch; }; -static inline struct rcar_sysc_ch *to_r8a7779_ch(struct generic_pm_domain *d) +static inline +const struct rcar_sysc_ch *to_r8a7779_ch(struct generic_pm_domain *d) { return &container_of(d, struct r8a7779_pm_domain, genpd)->ch; } @@ -83,7 +84,6 @@ static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd) { struct generic_pm_domain *genpd = &r8a7779_pd->genpd; - genpd->flags = GENPD_FLAG_PM_CLK; pm_genpd_init(genpd, NULL, false); genpd->dev_ops.active_wakeup = pd_active_wakeup; genpd->power_off = pd_power_down; diff --git a/arch/arm/mach-shmobile/pm-rcar.c b/arch/arm/mach-shmobile/pm-rcar.c index 00022ee56f80..4092ad16e0a4 100644 --- a/arch/arm/mach-shmobile/pm-rcar.c +++ b/arch/arm/mach-shmobile/pm-rcar.c @@ -15,32 +15,58 @@ #include <asm/io.h> #include "pm-rcar.h" -/* SYSC */ -#define SYSCSR 0x00 -#define SYSCISR 0x04 -#define SYSCISCR 0x08 +/* SYSC Common */ +#define SYSCSR 0x00 /* SYSC Status Register */ +#define SYSCISR 0x04 /* Interrupt Status Register */ +#define SYSCISCR 0x08 /* Interrupt Status Clear Register */ +#define SYSCIER 0x0c /* Interrupt Enable Register */ +#define SYSCIMR 0x10 /* Interrupt Mask Register */ -#define PWRSR_OFFS 0x00 -#define PWROFFCR_OFFS 0x04 -#define PWRONCR_OFFS 0x0c -#define PWRER_OFFS 0x14 +/* SYSC Status Register */ +#define SYSCSR_PONENB 1 /* Ready for power resume requests */ +#define SYSCSR_POFFENB 0 /* Ready for power shutoff requests */ -#define SYSCSR_RETRIES 100 -#define SYSCSR_DELAY_US 1 +/* + * Power Control Register Offsets inside the register block for each domain + * Note: The "CR" registers for ARM cores exist on H1 only + * Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2 + */ +#define PWRSR_OFFS 0x00 /* Power Status Register */ +#define PWROFFCR_OFFS 0x04 /* Power Shutoff Control Register */ +#define PWROFFSR_OFFS 0x08 /* Power Shutoff Status Register */ +#define PWRONCR_OFFS 0x0c /* Power Resume Control Register */ +#define PWRONSR_OFFS 0x10 /* Power Resume Status Register */ +#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */ + + +#define SYSCSR_RETRIES 100 +#define SYSCSR_DELAY_US 1 + +#define PWRER_RETRIES 100 +#define PWRER_DELAY_US 1 -#define SYSCISR_RETRIES 1000 -#define SYSCISR_DELAY_US 1 +#define SYSCISR_RETRIES 1000 +#define SYSCISR_DELAY_US 1 static void __iomem *rcar_sysc_base; static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */ -static int rcar_sysc_pwr_on_off(struct rcar_sysc_ch *sysc_ch, - int sr_bit, int reg_offs) +static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on) { + unsigned int sr_bit, reg_offs; int k; + if (on) { + sr_bit = SYSCSR_PONENB; + reg_offs = PWRONCR_OFFS; + } else { + sr_bit = SYSCSR_POFFENB; + reg_offs = PWROFFCR_OFFS; + } + + /* Wait until SYSC is ready to accept a power request */ for (k = 0; k < SYSCSR_RETRIES; k++) { - if (ioread32(rcar_sysc_base + SYSCSR) & (1 << sr_bit)) + if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit)) break; udelay(SYSCSR_DELAY_US); } @@ -48,27 +74,17 @@ static int rcar_sysc_pwr_on_off(struct rcar_sysc_ch *sysc_ch, if (k == SYSCSR_RETRIES) return -EAGAIN; - iowrite32(1 << sysc_ch->chan_bit, + /* Submit power shutoff or power resume request */ + iowrite32(BIT(sysc_ch->chan_bit), rcar_sysc_base + sysc_ch->chan_offs + reg_offs); return 0; } -static int rcar_sysc_pwr_off(struct rcar_sysc_ch *sysc_ch) -{ - return rcar_sysc_pwr_on_off(sysc_ch, 0, PWROFFCR_OFFS); -} - -static int rcar_sysc_pwr_on(struct rcar_sysc_ch *sysc_ch) +static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on) { - return rcar_sysc_pwr_on_off(sysc_ch, 1, PWRONCR_OFFS); -} - -static int rcar_sysc_update(struct rcar_sysc_ch *sysc_ch, - int (*on_off_fn)(struct rcar_sysc_ch *)) -{ - unsigned int isr_mask = 1 << sysc_ch->isr_bit; - unsigned int chan_mask = 1 << sysc_ch->chan_bit; + unsigned int isr_mask = BIT(sysc_ch->isr_bit); + unsigned int chan_mask = BIT(sysc_ch->chan_bit); unsigned int status; unsigned long flags; int ret = 0; @@ -78,15 +94,26 @@ static int rcar_sysc_update(struct rcar_sysc_ch *sysc_ch, iowrite32(isr_mask, rcar_sysc_base + SYSCISCR); - do { - ret = on_off_fn(sysc_ch); + /* Submit power shutoff or resume request until it was accepted */ + for (k = 0; k < PWRER_RETRIES; k++) { + ret = rcar_sysc_pwr_on_off(sysc_ch, on); if (ret) goto out; status = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRER_OFFS); - } while (status & chan_mask); + if (!(status & chan_mask)) + break; + + udelay(PWRER_DELAY_US); + } + + if (k == PWRER_RETRIES) { + ret = -EIO; + goto out; + } + /* Wait until the power shutoff or resume request has completed * */ for (k = 0; k < SYSCISR_RETRIES; k++) { if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask) break; @@ -106,22 +133,22 @@ static int rcar_sysc_update(struct rcar_sysc_ch *sysc_ch, return ret; } -int rcar_sysc_power_down(struct rcar_sysc_ch *sysc_ch) +int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch) { - return rcar_sysc_update(sysc_ch, rcar_sysc_pwr_off); + return rcar_sysc_power(sysc_ch, false); } -int rcar_sysc_power_up(struct rcar_sysc_ch *sysc_ch) +int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch) { - return rcar_sysc_update(sysc_ch, rcar_sysc_pwr_on); + return rcar_sysc_power(sysc_ch, true); } -bool rcar_sysc_power_is_off(struct rcar_sysc_ch *sysc_ch) +bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch) { unsigned int st; st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS); - if (st & (1 << sysc_ch->chan_bit)) + if (st & BIT(sysc_ch->chan_bit)) return true; return false; diff --git a/arch/arm/mach-shmobile/pm-rcar.h b/arch/arm/mach-shmobile/pm-rcar.h index ef3a1ef628f1..1b901db4a24c 100644 --- a/arch/arm/mach-shmobile/pm-rcar.h +++ b/arch/arm/mach-shmobile/pm-rcar.h @@ -2,14 +2,14 @@ #define PM_RCAR_H struct rcar_sysc_ch { - unsigned long chan_offs; - unsigned int chan_bit; - unsigned int isr_bit; + u16 chan_offs; + u8 chan_bit; + u8 isr_bit; }; -int rcar_sysc_power_down(struct rcar_sysc_ch *sysc_ch); -int rcar_sysc_power_up(struct rcar_sysc_ch *sysc_ch); -bool rcar_sysc_power_is_off(struct rcar_sysc_ch *sysc_ch); +int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch); +int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch); +bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch); void __iomem *rcar_sysc_init(phys_addr_t base); #endif /* PM_RCAR_H */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index 95018209ff0b..a5b96b990aea 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -34,6 +34,12 @@ #define PSTR_RETRIES 100 #define PSTR_DELAY_US 10 +static inline +struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) +{ + return container_of(d, struct rmobile_pm_domain, genpd); +} + static int rmobile_pd_power_down(struct generic_pm_domain *genpd) { struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd); @@ -42,7 +48,7 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd) if (rmobile_pd->bit_shift == ~0) return -EBUSY; - mask = 1 << rmobile_pd->bit_shift; + mask = BIT(rmobile_pd->bit_shift); if (rmobile_pd->suspend) { int ret = rmobile_pd->suspend(); @@ -79,7 +85,7 @@ static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd, if (rmobile_pd->bit_shift == ~0) return 0; - mask = 1 << rmobile_pd->bit_shift; + mask = BIT(rmobile_pd->bit_shift); if (__raw_readl(rmobile_pd->base + PSTR) & mask) goto out; @@ -163,43 +169,6 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) __rmobile_pd_power_up(rmobile_pd, false); } -#ifdef CONFIG_ARCH_SHMOBILE_LEGACY - -void rmobile_init_domains(struct rmobile_pm_domain domains[], int num) -{ - int j; - - for (j = 0; j < num; j++) - rmobile_init_pm_domain(&domains[j]); -} - -void rmobile_add_device_to_domain_td(const char *domain_name, - struct platform_device *pdev, - struct gpd_timing_data *td) -{ - struct device *dev = &pdev->dev; - - __pm_genpd_name_add_device(domain_name, dev, td); -} - -void rmobile_add_devices_to_domains(struct pm_domain_device data[], - int size) -{ - struct gpd_timing_data latencies = { - .stop_latency_ns = DEFAULT_DEV_LATENCY_NS, - .start_latency_ns = DEFAULT_DEV_LATENCY_NS, - .save_state_latency_ns = DEFAULT_DEV_LATENCY_NS, - .restore_state_latency_ns = DEFAULT_DEV_LATENCY_NS, - }; - int j; - - for (j = 0; j < size; j++) - rmobile_add_device_to_domain_td(data[j].domain_name, - data[j].pdev, &latencies); -} - -#else /* !CONFIG_ARCH_SHMOBILE_LEGACY */ - static int rmobile_pd_suspend_busy(void) { /* @@ -430,5 +399,3 @@ static int __init rmobile_init_pm_domains(void) } core_initcall(rmobile_init_pm_domains); - -#endif /* !CONFIG_ARCH_SHMOBILE_LEGACY */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.h b/arch/arm/mach-shmobile/pm-rmobile.h index 53219786f539..30a4a421ee31 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.h +++ b/arch/arm/mach-shmobile/pm-rmobile.h @@ -26,39 +26,9 @@ struct rmobile_pm_domain { bool no_debug; }; -static inline -struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) -{ - return container_of(d, struct rmobile_pm_domain, genpd); -} - struct pm_domain_device { const char *domain_name; struct platform_device *pdev; }; -#if defined(CONFIG_PM_RMOBILE) && defined(CONFIG_ARCH_SHMOBILE_LEGACY) -extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num); -extern void rmobile_add_device_to_domain_td(const char *domain_name, - struct platform_device *pdev, - struct gpd_timing_data *td); - -static inline void rmobile_add_device_to_domain(const char *domain_name, - struct platform_device *pdev) -{ - rmobile_add_device_to_domain_td(domain_name, pdev, NULL); -} - -extern void rmobile_add_devices_to_domains(struct pm_domain_device data[], - int size); -#else - -#define rmobile_init_domains(domains, num) do { } while (0) -#define rmobile_add_device_to_domain_td(name, pdev, td) do { } while (0) -#define rmobile_add_device_to_domain(name, pdev) do { } while (0) - -static inline void rmobile_add_devices_to_domains(struct pm_domain_device d[], - int size) {} -#endif /* CONFIG_PM_RMOBILE */ - #endif /* PM_RMOBILE_H */ diff --git a/arch/arm/mach-shmobile/pm-sh73a0.c b/arch/arm/mach-shmobile/pm-sh73a0.c deleted file mode 100644 index a7e466817965..000000000000 --- a/arch/arm/mach-shmobile/pm-sh73a0.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * sh73a0 Power management support - * - * Copyright (C) 2012 Bastian Hecht <hechtb+renesas@gmail.com> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include <linux/suspend.h> -#include "common.h" - -#ifdef CONFIG_SUSPEND -static int sh73a0_enter_suspend(suspend_state_t suspend_state) -{ - cpu_do_idle(); - return 0; -} - -static void sh73a0_suspend_init(void) -{ - shmobile_suspend_ops.enter = sh73a0_enter_suspend; -} -#else -static void sh73a0_suspend_init(void) {} -#endif - -void __init sh73a0_pm_init(void) -{ - sh73a0_suspend_init(); -} diff --git a/arch/arm/mach-shmobile/r8a7740.h b/arch/arm/mach-shmobile/r8a7740.h deleted file mode 100644 index ca7805ad7ea3..000000000000 --- a/arch/arm/mach-shmobile/r8a7740.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (C) 2011 Renesas Solutions Corp. - * Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __ASM_R8A7740_H__ -#define __ASM_R8A7740_H__ - -/* - * MD_CKx pin - */ -#define MD_CK2 (1 << 2) -#define MD_CK1 (1 << 1) -#define MD_CK0 (1 << 0) - -/* DMA slave IDs */ -enum { - SHDMA_SLAVE_INVALID, - SHDMA_SLAVE_SDHI0_RX, - SHDMA_SLAVE_SDHI0_TX, - SHDMA_SLAVE_SDHI1_RX, - SHDMA_SLAVE_SDHI1_TX, - SHDMA_SLAVE_SDHI2_RX, - SHDMA_SLAVE_SDHI2_TX, - SHDMA_SLAVE_FSIA_RX, - SHDMA_SLAVE_FSIA_TX, - SHDMA_SLAVE_FSIB_TX, - SHDMA_SLAVE_USBHS_TX, - SHDMA_SLAVE_USBHS_RX, - SHDMA_SLAVE_MMCIF_TX, - SHDMA_SLAVE_MMCIF_RX, -}; - -extern void r8a7740_meram_workaround(void); -extern void r8a7740_init_irq_of(void); -extern void r8a7740_map_io(void); -extern void r8a7740_add_early_devices(void); -extern void r8a7740_add_standard_devices(void); -extern void r8a7740_clock_init(u8 md_ck); -extern void r8a7740_pinmux_init(void); -extern void r8a7740_pm_init(void); - -#if defined(CONFIG_PM) && !defined(CONFIG_ARCH_MULTIPLATFORM) -extern void __init r8a7740_init_pm_domains(void); -#else -static inline void r8a7740_init_pm_domains(void) {} -#endif /* CONFIG_PM && !CONFIG_ARCH_MULTIPLATFORM */ - -#endif /* __ASM_R8A7740_H__ */ diff --git a/arch/arm/mach-shmobile/r8a7779.h b/arch/arm/mach-shmobile/r8a7779.h index 19f97046dd70..db303f76704e 100644 --- a/arch/arm/mach-shmobile/r8a7779.h +++ b/arch/arm/mach-shmobile/r8a7779.h @@ -3,26 +3,7 @@ #include <linux/sh_clk.h> -/* HPB-DMA slave IDs */ -enum { - HPBDMA_SLAVE_DUMMY, - HPBDMA_SLAVE_SDHI0_TX, - HPBDMA_SLAVE_SDHI0_RX, -}; - -extern void r8a7779_init_irq_extpin(int irlm); -extern void r8a7779_init_irq_extpin_dt(int irlm); -extern void r8a7779_init_irq_dt(void); -extern void r8a7779_map_io(void); -extern void r8a7779_earlytimer_init(void); -extern void r8a7779_add_early_devices(void); -extern void r8a7779_add_standard_devices(void); -extern void r8a7779_init_late(void); -extern u32 r8a7779_read_mode_pins(void); -extern void r8a7779_clock_init(void); -extern void r8a7779_pinmux_init(void); extern void r8a7779_pm_init(void); -extern void r8a7779_register_twd(void); #ifdef CONFIG_PM extern void __init r8a7779_init_pm_domains(void); diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c index 384e6e934b87..62437b57813e 100644 --- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c +++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c @@ -123,7 +123,8 @@ static int __init rcar_gen2_regulator_quirk(void) u32 mon; if (!of_machine_is_compatible("renesas,koelsch") && - !of_machine_is_compatible("renesas,lager")) + !of_machine_is_compatible("renesas,lager") && + !of_machine_is_compatible("renesas,gose")) return -ENODEV; irqc = ioremap(IRQC_BASE, PAGE_SIZE); diff --git a/arch/arm/mach-shmobile/setup-r7s72100.c b/arch/arm/mach-shmobile/setup-r7s72100.c index 171174777b6f..d46639fc6849 100644 --- a/arch/arm/mach-shmobile/setup-r7s72100.c +++ b/arch/arm/mach-shmobile/setup-r7s72100.c @@ -20,7 +20,7 @@ #include "common.h" -static const char *r7s72100_boards_compat_dt[] __initdata = { +static const char *const r7s72100_boards_compat_dt[] __initconst = { "renesas,r7s72100", NULL, }; diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c index 446cee611902..20173c4f415d 100644 --- a/arch/arm/mach-shmobile/setup-r8a73a4.c +++ b/arch/arm/mach-shmobile/setup-r8a73a4.c @@ -20,7 +20,7 @@ #include "common.h" -static const char *r8a73a4_boards_compat_dt[] __initdata = { +static const char *const r8a73a4_boards_compat_dt[] __initconst = { "renesas,r8a73a4", NULL, }; diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c index 00291cc1772d..0c8f80c5b04d 100644 --- a/arch/arm/mach-shmobile/setup-r8a7740.c +++ b/arch/arm/mach-shmobile/setup-r8a7740.c @@ -13,31 +13,19 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ -#include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> -#include <linux/platform_data/irq-renesas-intc-irqpin.h> -#include <linux/platform_device.h> #include <linux/of_platform.h> -#include <linux/serial_sci.h> -#include <linux/sh_dma.h> -#include <linux/sh_timer.h> -#include <linux/platform_data/sh_ipmmu.h> -#include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/hardware/cache-l2x0.h> #include "common.h" -#include "dma-register.h" -#include "irqs.h" -#include "pm-rmobile.h" -#include "r8a7740.h" static struct map_desc r8a7740_io_desc[] __initdata = { /* @@ -64,613 +52,12 @@ static struct map_desc r8a7740_io_desc[] __initdata = { #endif }; -void __init r8a7740_map_io(void) +static void __init r8a7740_map_io(void) { debug_ll_io_init(); iotable_init(r8a7740_io_desc, ARRAY_SIZE(r8a7740_io_desc)); } -/* PFC */ -static const struct resource pfc_resources[] = { - DEFINE_RES_MEM(0xe6050000, 0x8000), - DEFINE_RES_MEM(0xe605800c, 0x0020), -}; - -void __init r8a7740_pinmux_init(void) -{ - platform_device_register_simple("pfc-r8a7740", -1, pfc_resources, - ARRAY_SIZE(pfc_resources)); -} - -static struct renesas_intc_irqpin_config irqpin0_platform_data = { - .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */ -}; - -static struct resource irqpin0_resources[] = { - DEFINE_RES_MEM(0xe6900000, 4), /* ICR1A */ - DEFINE_RES_MEM(0xe6900010, 4), /* INTPRI00A */ - DEFINE_RES_MEM(0xe6900020, 1), /* INTREQ00A */ - DEFINE_RES_MEM(0xe6900040, 1), /* INTMSK00A */ - DEFINE_RES_MEM(0xe6900060, 1), /* INTMSKCLR00A */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ0 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ1 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ2 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ3 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ4 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ5 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ6 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ7 */ -}; - -static struct platform_device irqpin0_device = { - .name = "renesas_intc_irqpin", - .id = 0, - .resource = irqpin0_resources, - .num_resources = ARRAY_SIZE(irqpin0_resources), - .dev = { - .platform_data = &irqpin0_platform_data, - }, -}; - -static struct renesas_intc_irqpin_config irqpin1_platform_data = { - .irq_base = irq_pin(8), /* IRQ8 -> IRQ15 */ -}; - -static struct resource irqpin1_resources[] = { - DEFINE_RES_MEM(0xe6900004, 4), /* ICR2A */ - DEFINE_RES_MEM(0xe6900014, 4), /* INTPRI10A */ - DEFINE_RES_MEM(0xe6900024, 1), /* INTREQ10A */ - DEFINE_RES_MEM(0xe6900044, 1), /* INTMSK10A */ - DEFINE_RES_MEM(0xe6900064, 1), /* INTMSKCLR10A */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ8 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ9 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ10 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ11 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ12 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ13 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ14 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ15 */ -}; - -static struct platform_device irqpin1_device = { - .name = "renesas_intc_irqpin", - .id = 1, - .resource = irqpin1_resources, - .num_resources = ARRAY_SIZE(irqpin1_resources), - .dev = { - .platform_data = &irqpin1_platform_data, - }, -}; - -static struct renesas_intc_irqpin_config irqpin2_platform_data = { - .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */ -}; - -static struct resource irqpin2_resources[] = { - DEFINE_RES_MEM(0xe6900008, 4), /* ICR3A */ - DEFINE_RES_MEM(0xe6900018, 4), /* INTPRI30A */ - DEFINE_RES_MEM(0xe6900028, 1), /* INTREQ30A */ - DEFINE_RES_MEM(0xe6900048, 1), /* INTMSK30A */ - DEFINE_RES_MEM(0xe6900068, 1), /* INTMSKCLR30A */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ16 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ17 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ18 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ19 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ20 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ21 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ22 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ23 */ -}; - -static struct platform_device irqpin2_device = { - .name = "renesas_intc_irqpin", - .id = 2, - .resource = irqpin2_resources, - .num_resources = ARRAY_SIZE(irqpin2_resources), - .dev = { - .platform_data = &irqpin2_platform_data, - }, -}; - -static struct renesas_intc_irqpin_config irqpin3_platform_data = { - .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */ -}; - -static struct resource irqpin3_resources[] = { - DEFINE_RES_MEM(0xe690000c, 4), /* ICR3A */ - DEFINE_RES_MEM(0xe690001c, 4), /* INTPRI30A */ - DEFINE_RES_MEM(0xe690002c, 1), /* INTREQ30A */ - DEFINE_RES_MEM(0xe690004c, 1), /* INTMSK30A */ - DEFINE_RES_MEM(0xe690006c, 1), /* INTMSKCLR30A */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ24 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ25 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ26 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ27 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ28 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ29 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ30 */ - DEFINE_RES_IRQ(gic_spi(149)), /* IRQ31 */ -}; - -static struct platform_device irqpin3_device = { - .name = "renesas_intc_irqpin", - .id = 3, - .resource = irqpin3_resources, - .num_resources = ARRAY_SIZE(irqpin3_resources), - .dev = { - .platform_data = &irqpin3_platform_data, - }, -}; - -/* SCIF */ -#define R8A7740_SCIF(scif_type, index, baseaddr, irq) \ -static struct plat_sci_port scif##index##_platform_data = { \ - .type = scif_type, \ - .flags = UPF_BOOT_AUTOCONF, \ - .scscr = SCSCR_RE | SCSCR_TE, \ -}; \ - \ -static struct resource scif##index##_resources[] = { \ - DEFINE_RES_MEM(baseaddr, 0x100), \ - DEFINE_RES_IRQ(irq), \ -}; \ - \ -static struct platform_device scif##index##_device = { \ - .name = "sh-sci", \ - .id = index, \ - .resource = scif##index##_resources, \ - .num_resources = ARRAY_SIZE(scif##index##_resources), \ - .dev = { \ - .platform_data = &scif##index##_platform_data, \ - }, \ -} - -R8A7740_SCIF(PORT_SCIFA, 0, 0xe6c40000, gic_spi(100)); -R8A7740_SCIF(PORT_SCIFA, 1, 0xe6c50000, gic_spi(101)); -R8A7740_SCIF(PORT_SCIFA, 2, 0xe6c60000, gic_spi(102)); -R8A7740_SCIF(PORT_SCIFA, 3, 0xe6c70000, gic_spi(103)); -R8A7740_SCIF(PORT_SCIFA, 4, 0xe6c80000, gic_spi(104)); -R8A7740_SCIF(PORT_SCIFA, 5, 0xe6cb0000, gic_spi(105)); -R8A7740_SCIF(PORT_SCIFA, 6, 0xe6cc0000, gic_spi(106)); -R8A7740_SCIF(PORT_SCIFA, 7, 0xe6cd0000, gic_spi(107)); -R8A7740_SCIF(PORT_SCIFB, 8, 0xe6c30000, gic_spi(108)); - -/* CMT */ -static struct sh_timer_config cmt1_platform_data = { - .channels_mask = 0x3f, -}; - -static struct resource cmt1_resources[] = { - DEFINE_RES_MEM(0xe6138000, 0x170), - DEFINE_RES_IRQ(gic_spi(58)), -}; - -static struct platform_device cmt1_device = { - .name = "sh-cmt-48", - .id = 1, - .dev = { - .platform_data = &cmt1_platform_data, - }, - .resource = cmt1_resources, - .num_resources = ARRAY_SIZE(cmt1_resources), -}; - -/* TMU */ -static struct sh_timer_config tmu0_platform_data = { - .channels_mask = 7, -}; - -static struct resource tmu0_resources[] = { - DEFINE_RES_MEM(0xfff80000, 0x2c), - DEFINE_RES_IRQ(gic_spi(198)), - DEFINE_RES_IRQ(gic_spi(199)), - DEFINE_RES_IRQ(gic_spi(200)), -}; - -static struct platform_device tmu0_device = { - .name = "sh-tmu", - .id = 0, - .dev = { - .platform_data = &tmu0_platform_data, - }, - .resource = tmu0_resources, - .num_resources = ARRAY_SIZE(tmu0_resources), -}; - -/* IPMMUI (an IPMMU module for ICB/LMB) */ -static struct resource ipmmu_resources[] = { - [0] = { - .name = "IPMMUI", - .start = 0xfe951000, - .end = 0xfe9510ff, - .flags = IORESOURCE_MEM, - }, -}; - -static const char * const ipmmu_dev_names[] = { - "sh_mobile_lcdc_fb.0", - "sh_mobile_lcdc_fb.1", - "sh_mobile_ceu.0", -}; - -static struct shmobile_ipmmu_platform_data ipmmu_platform_data = { - .dev_names = ipmmu_dev_names, - .num_dev_names = ARRAY_SIZE(ipmmu_dev_names), -}; - -static struct platform_device ipmmu_device = { - .name = "ipmmu", - .id = -1, - .dev = { - .platform_data = &ipmmu_platform_data, - }, - .resource = ipmmu_resources, - .num_resources = ARRAY_SIZE(ipmmu_resources), -}; - -static struct platform_device *r8a7740_early_devices[] __initdata = { - &scif0_device, - &scif1_device, - &scif2_device, - &scif3_device, - &scif4_device, - &scif5_device, - &scif6_device, - &scif7_device, - &scif8_device, - &irqpin0_device, - &irqpin1_device, - &irqpin2_device, - &irqpin3_device, - &tmu0_device, - &ipmmu_device, - &cmt1_device, -}; - -/* DMA */ -static const struct sh_dmae_slave_config r8a7740_dmae_slaves[] = { - { - .slave_id = SHDMA_SLAVE_SDHI0_TX, - .addr = 0xe6850030, - .chcr = CHCR_TX(XMIT_SZ_16BIT), - .mid_rid = 0xc1, - }, { - .slave_id = SHDMA_SLAVE_SDHI0_RX, - .addr = 0xe6850030, - .chcr = CHCR_RX(XMIT_SZ_16BIT), - .mid_rid = 0xc2, - }, { - .slave_id = SHDMA_SLAVE_SDHI1_TX, - .addr = 0xe6860030, - .chcr = CHCR_TX(XMIT_SZ_16BIT), - .mid_rid = 0xc9, - }, { - .slave_id = SHDMA_SLAVE_SDHI1_RX, - .addr = 0xe6860030, - .chcr = CHCR_RX(XMIT_SZ_16BIT), - .mid_rid = 0xca, - }, { - .slave_id = SHDMA_SLAVE_SDHI2_TX, - .addr = 0xe6870030, - .chcr = CHCR_TX(XMIT_SZ_16BIT), - .mid_rid = 0xcd, - }, { - .slave_id = SHDMA_SLAVE_SDHI2_RX, - .addr = 0xe6870030, - .chcr = CHCR_RX(XMIT_SZ_16BIT), - .mid_rid = 0xce, - }, { - .slave_id = SHDMA_SLAVE_FSIA_TX, - .addr = 0xfe1f0024, - .chcr = CHCR_TX(XMIT_SZ_32BIT), - .mid_rid = 0xb1, - }, { - .slave_id = SHDMA_SLAVE_FSIA_RX, - .addr = 0xfe1f0020, - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0xb2, - }, { - .slave_id = SHDMA_SLAVE_FSIB_TX, - .addr = 0xfe1f0064, - .chcr = CHCR_TX(XMIT_SZ_32BIT), - .mid_rid = 0xb5, - }, { - .slave_id = SHDMA_SLAVE_MMCIF_TX, - .addr = 0xe6bd0034, - .chcr = CHCR_TX(XMIT_SZ_32BIT), - .mid_rid = 0xd1, - }, { - .slave_id = SHDMA_SLAVE_MMCIF_RX, - .addr = 0xe6bd0034, - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0xd2, - }, -}; - -#define DMA_CHANNEL(a, b, c) \ -{ \ - .offset = a, \ - .dmars = b, \ - .dmars_bit = c, \ - .chclr_offset = (0x220 - 0x20) + a \ -} - -static const struct sh_dmae_channel r8a7740_dmae_channels[] = { - DMA_CHANNEL(0x00, 0, 0), - DMA_CHANNEL(0x10, 0, 8), - DMA_CHANNEL(0x20, 4, 0), - DMA_CHANNEL(0x30, 4, 8), - DMA_CHANNEL(0x50, 8, 0), - DMA_CHANNEL(0x60, 8, 8), -}; - -static struct sh_dmae_pdata dma_platform_data = { - .slave = r8a7740_dmae_slaves, - .slave_num = ARRAY_SIZE(r8a7740_dmae_slaves), - .channel = r8a7740_dmae_channels, - .channel_num = ARRAY_SIZE(r8a7740_dmae_channels), - .ts_low_shift = TS_LOW_SHIFT, - .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, - .ts_high_shift = TS_HI_SHIFT, - .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, - .ts_shift = dma_ts_shift, - .ts_shift_num = ARRAY_SIZE(dma_ts_shift), - .dmaor_init = DMAOR_DME, - .chclr_present = 1, -}; - -/* Resource order important! */ -static struct resource r8a7740_dmae0_resources[] = { - { - /* Channel registers and DMAOR */ - .start = 0xfe008020, - .end = 0xfe00828f, - .flags = IORESOURCE_MEM, - }, - { - /* DMARSx */ - .start = 0xfe009000, - .end = 0xfe00900b, - .flags = IORESOURCE_MEM, - }, - { - .name = "error_irq", - .start = gic_spi(34), - .end = gic_spi(34), - .flags = IORESOURCE_IRQ, - }, - { - /* IRQ for channels 0-5 */ - .start = gic_spi(28), - .end = gic_spi(33), - .flags = IORESOURCE_IRQ, - }, -}; - -/* Resource order important! */ -static struct resource r8a7740_dmae1_resources[] = { - { - /* Channel registers and DMAOR */ - .start = 0xfe018020, - .end = 0xfe01828f, - .flags = IORESOURCE_MEM, - }, - { - /* DMARSx */ - .start = 0xfe019000, - .end = 0xfe01900b, - .flags = IORESOURCE_MEM, - }, - { - .name = "error_irq", - .start = gic_spi(41), - .end = gic_spi(41), - .flags = IORESOURCE_IRQ, - }, - { - /* IRQ for channels 0-5 */ - .start = gic_spi(35), - .end = gic_spi(40), - .flags = IORESOURCE_IRQ, - }, -}; - -/* Resource order important! */ -static struct resource r8a7740_dmae2_resources[] = { - { - /* Channel registers and DMAOR */ - .start = 0xfe028020, - .end = 0xfe02828f, - .flags = IORESOURCE_MEM, - }, - { - /* DMARSx */ - .start = 0xfe029000, - .end = 0xfe02900b, - .flags = IORESOURCE_MEM, - }, - { - .name = "error_irq", - .start = gic_spi(48), - .end = gic_spi(48), - .flags = IORESOURCE_IRQ, - }, - { - /* IRQ for channels 0-5 */ - .start = gic_spi(42), - .end = gic_spi(47), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device dma0_device = { - .name = "sh-dma-engine", - .id = 0, - .resource = r8a7740_dmae0_resources, - .num_resources = ARRAY_SIZE(r8a7740_dmae0_resources), - .dev = { - .platform_data = &dma_platform_data, - }, -}; - -static struct platform_device dma1_device = { - .name = "sh-dma-engine", - .id = 1, - .resource = r8a7740_dmae1_resources, - .num_resources = ARRAY_SIZE(r8a7740_dmae1_resources), - .dev = { - .platform_data = &dma_platform_data, - }, -}; - -static struct platform_device dma2_device = { - .name = "sh-dma-engine", - .id = 2, - .resource = r8a7740_dmae2_resources, - .num_resources = ARRAY_SIZE(r8a7740_dmae2_resources), - .dev = { - .platform_data = &dma_platform_data, - }, -}; - -/* USB-DMAC */ -static const struct sh_dmae_channel r8a7740_usb_dma_channels[] = { - { - .offset = 0, - }, { - .offset = 0x20, - }, -}; - -static const struct sh_dmae_slave_config r8a7740_usb_dma_slaves[] = { - { - .slave_id = SHDMA_SLAVE_USBHS_TX, - .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE), - }, { - .slave_id = SHDMA_SLAVE_USBHS_RX, - .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE), - }, -}; - -static struct sh_dmae_pdata usb_dma_platform_data = { - .slave = r8a7740_usb_dma_slaves, - .slave_num = ARRAY_SIZE(r8a7740_usb_dma_slaves), - .channel = r8a7740_usb_dma_channels, - .channel_num = ARRAY_SIZE(r8a7740_usb_dma_channels), - .ts_low_shift = USBTS_LOW_SHIFT, - .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT, - .ts_high_shift = USBTS_HI_SHIFT, - .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT, - .ts_shift = dma_usbts_shift, - .ts_shift_num = ARRAY_SIZE(dma_usbts_shift), - .dmaor_init = DMAOR_DME, - .chcr_offset = 0x14, - .chcr_ie_bit = 1 << 5, - .dmaor_is_32bit = 1, - .needs_tend_set = 1, - .no_dmars = 1, - .slave_only = 1, -}; - -static struct resource r8a7740_usb_dma_resources[] = { - { - /* Channel registers and DMAOR */ - .start = 0xe68a0020, - .end = 0xe68a0064 - 1, - .flags = IORESOURCE_MEM, - }, - { - /* VCR/SWR/DMICR */ - .start = 0xe68a0000, - .end = 0xe68a0014 - 1, - .flags = IORESOURCE_MEM, - }, - { - /* IRQ for channels */ - .start = gic_spi(49), - .end = gic_spi(49), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device usb_dma_device = { - .name = "sh-dma-engine", - .id = 3, - .resource = r8a7740_usb_dma_resources, - .num_resources = ARRAY_SIZE(r8a7740_usb_dma_resources), - .dev = { - .platform_data = &usb_dma_platform_data, - }, -}; - -/* I2C */ -static struct resource i2c0_resources[] = { - [0] = { - .name = "IIC0", - .start = 0xfff20000, - .end = 0xfff20425 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(201), - .end = gic_spi(204), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct resource i2c1_resources[] = { - [0] = { - .name = "IIC1", - .start = 0xe6c20000, - .end = 0xe6c20425 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_spi(70), /* IIC1_ALI1 */ - .end = gic_spi(73), /* IIC1_DTEI1 */ - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device i2c0_device = { - .name = "i2c-sh_mobile", - .id = 0, - .resource = i2c0_resources, - .num_resources = ARRAY_SIZE(i2c0_resources), -}; - -static struct platform_device i2c1_device = { - .name = "i2c-sh_mobile", - .id = 1, - .resource = i2c1_resources, - .num_resources = ARRAY_SIZE(i2c1_resources), -}; - -static struct resource pmu_resources[] = { - [0] = { - .start = gic_spi(83), - .end = gic_spi(83), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device pmu_device = { - .name = "armv7-pmu", - .id = -1, - .num_resources = ARRAY_SIZE(pmu_resources), - .resource = pmu_resources, -}; - -static struct platform_device *r8a7740_late_devices[] __initdata = { - &i2c0_device, - &i2c1_device, - &dma0_device, - &dma1_device, - &dma2_device, - &usb_dma_device, - &pmu_device, -}; - /* * r8a7740 chip has lasting errata on MERAM buffer. * this is work-around for it. @@ -678,7 +65,7 @@ static struct platform_device *r8a7740_late_devices[] __initdata = { * "Media RAM (MERAM)" on r8a7740 documentation */ #define MEBUFCNTR 0xFE950098 -void __init r8a7740_meram_workaround(void) +static void __init r8a7740_meram_workaround(void) { void __iomem *reg; @@ -689,70 +76,13 @@ void __init r8a7740_meram_workaround(void) } } -void __init r8a7740_add_standard_devices(void) -{ - static struct pm_domain_device domain_devices[] __initdata = { - { "A4R", &tmu0_device }, - { "A4R", &i2c0_device }, - { "A4S", &irqpin0_device }, - { "A4S", &irqpin1_device }, - { "A4S", &irqpin2_device }, - { "A4S", &irqpin3_device }, - { "A3SP", &scif0_device }, - { "A3SP", &scif1_device }, - { "A3SP", &scif2_device }, - { "A3SP", &scif3_device }, - { "A3SP", &scif4_device }, - { "A3SP", &scif5_device }, - { "A3SP", &scif6_device }, - { "A3SP", &scif7_device }, - { "A3SP", &scif8_device }, - { "A3SP", &i2c1_device }, - { "A3SP", &ipmmu_device }, - { "A3SP", &dma0_device }, - { "A3SP", &dma1_device }, - { "A3SP", &dma2_device }, - { "A3SP", &usb_dma_device }, - }; - - r8a7740_init_pm_domains(); - - /* add devices */ - platform_add_devices(r8a7740_early_devices, - ARRAY_SIZE(r8a7740_early_devices)); - platform_add_devices(r8a7740_late_devices, - ARRAY_SIZE(r8a7740_late_devices)); - - /* add devices to PM domain */ - rmobile_add_devices_to_domains(domain_devices, - ARRAY_SIZE(domain_devices)); -} - -void __init r8a7740_add_early_devices(void) -{ - early_platform_add_devices(r8a7740_early_devices, - ARRAY_SIZE(r8a7740_early_devices)); - - /* setup early console here as well */ - shmobile_setup_console(); -} - -#ifdef CONFIG_USE_OF - -void __init r8a7740_init_irq_of(void) +static void __init r8a7740_init_irq_of(void) { void __iomem *intc_prio_base = ioremap_nocache(0xe6900010, 0x10); void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10); void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4); -#ifdef CONFIG_ARCH_SHMOBILE_LEGACY - void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000); - void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000); - - gic_init(0, 29, gic_dist_base, gic_cpu_base); -#else irqchip_init(); -#endif /* route signals to GIC */ iowrite32(0x0, pfc_inta_ctrl); @@ -787,7 +117,7 @@ static void __init r8a7740_generic_init(void) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } -static const char *r8a7740_boards_compat_dt[] __initdata = { +static const char *const r8a7740_boards_compat_dt[] __initconst = { "renesas,r8a7740", NULL, }; @@ -800,5 +130,3 @@ DT_MACHINE_START(R8A7740_DT, "Generic R8A7740 (Flattened Device Tree)") .init_late = shmobile_init_late, .dt_compat = r8a7740_boards_compat_dt, MACHINE_END - -#endif /* CONFIG_USE_OF */ diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c index c49aa094fe17..b9116c81e54b 100644 --- a/arch/arm/mach-shmobile/setup-r8a7778.c +++ b/arch/arm/mach-shmobile/setup-r8a7778.c @@ -615,7 +615,7 @@ void __init r8a7778_init_irq_dt(void) iounmap(base); } -static const char *r8a7778_compat_dt[] __initdata = { +static const char *const r8a7778_compat_dt[] __initconst = { "renesas,r8a7778", NULL, }; diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c index c03e562be12b..7ca891999547 100644 --- a/arch/arm/mach-shmobile/setup-r8a7779.c +++ b/arch/arm/mach-shmobile/setup-r8a7779.c @@ -14,37 +14,17 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ -#include <linux/kernel.h> +#include <linux/clk/shmobile.h> +#include <linux/clocksource.h> #include <linux/init.h> -#include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqchip/arm-gic.h> -#include <linux/of_platform.h> -#include <linux/platform_data/dma-rcar-hpbdma.h> -#include <linux/platform_data/gpio-rcar.h> -#include <linux/platform_data/irq-renesas-intc-irqpin.h> -#include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/input.h> -#include <linux/io.h> -#include <linux/serial_sci.h> -#include <linux/sh_timer.h> -#include <linux/dma-mapping.h> -#include <linux/usb/otg.h> -#include <linux/usb/hcd.h> -#include <linux/usb/ehci_pdriver.h> -#include <linux/usb/ohci_pdriver.h> -#include <linux/pm_runtime.h> -#include <asm/mach-types.h> #include <asm/mach/arch.h> -#include <asm/mach/time.h> #include <asm/mach/map.h> -#include <asm/hardware/cache-l2x0.h> #include "common.h" -#include "irqs.h" #include "r8a7779.h" static struct map_desc r8a7779_io_desc[] __initdata = { @@ -64,7 +44,7 @@ static struct map_desc r8a7779_io_desc[] __initdata = { }, }; -void __init r8a7779_map_io(void) +static void __init r8a7779_map_io(void) { debug_ll_io_init(); iotable_init(r8a7779_io_desc, ARRAY_SIZE(r8a7779_io_desc)); @@ -80,652 +60,12 @@ void __init r8a7779_map_io(void) #define INT2NTSR0 IOMEM(0xfe700060) #define INT2NTSR1 IOMEM(0xfe700064) -static struct renesas_intc_irqpin_config irqpin0_platform_data __initdata = { - .irq_base = irq_pin(0), /* IRQ0 -> IRQ3 */ - .sense_bitfield_width = 2, -}; - -static struct resource irqpin0_resources[] __initdata = { - DEFINE_RES_MEM(0xfe78001c, 4), /* ICR1 */ - DEFINE_RES_MEM(0xfe780010, 4), /* INTPRI */ - DEFINE_RES_MEM(0xfe780024, 4), /* INTREQ */ - DEFINE_RES_MEM(0xfe780044, 4), /* INTMSK0 */ - DEFINE_RES_MEM(0xfe780064, 4), /* INTMSKCLR0 */ - DEFINE_RES_IRQ(gic_spi(27)), /* IRQ0 */ - DEFINE_RES_IRQ(gic_spi(28)), /* IRQ1 */ - DEFINE_RES_IRQ(gic_spi(29)), /* IRQ2 */ - DEFINE_RES_IRQ(gic_spi(30)), /* IRQ3 */ -}; - -void __init r8a7779_init_irq_extpin_dt(int irlm) -{ - void __iomem *icr0 = ioremap_nocache(0xfe780000, PAGE_SIZE); - u32 tmp; - - if (!icr0) { - pr_warn("r8a7779: unable to setup external irq pin mode\n"); - return; - } - - tmp = ioread32(icr0); - if (irlm) - tmp |= 1 << 23; /* IRQ0 -> IRQ3 as individual pins */ - else - tmp &= ~(1 << 23); /* IRL mode - not supported */ - tmp |= (1 << 21); /* LVLMODE = 1 */ - iowrite32(tmp, icr0); - iounmap(icr0); -} - -void __init r8a7779_init_irq_extpin(int irlm) +static void __init r8a7779_init_irq_dt(void) { - r8a7779_init_irq_extpin_dt(irlm); - if (irlm) - platform_device_register_resndata( - NULL, "renesas_intc_irqpin", -1, - irqpin0_resources, ARRAY_SIZE(irqpin0_resources), - &irqpin0_platform_data, sizeof(irqpin0_platform_data)); -} - -/* PFC/GPIO */ -static struct resource r8a7779_pfc_resources[] = { - DEFINE_RES_MEM(0xfffc0000, 0x023c), -}; - -static struct platform_device r8a7779_pfc_device = { - .name = "pfc-r8a7779", - .id = -1, - .resource = r8a7779_pfc_resources, - .num_resources = ARRAY_SIZE(r8a7779_pfc_resources), -}; - -#define R8A7779_GPIO(idx, npins) \ -static struct resource r8a7779_gpio##idx##_resources[] = { \ - DEFINE_RES_MEM(0xffc40000 + (0x1000 * (idx)), 0x002c), \ - DEFINE_RES_IRQ(gic_iid(0xad + (idx))), \ -}; \ - \ -static struct gpio_rcar_config r8a7779_gpio##idx##_platform_data = { \ - .gpio_base = 32 * (idx), \ - .irq_base = 0, \ - .number_of_pins = npins, \ - .pctl_name = "pfc-r8a7779", \ -}; \ - \ -static struct platform_device r8a7779_gpio##idx##_device = { \ - .name = "gpio_rcar", \ - .id = idx, \ - .resource = r8a7779_gpio##idx##_resources, \ - .num_resources = ARRAY_SIZE(r8a7779_gpio##idx##_resources), \ - .dev = { \ - .platform_data = &r8a7779_gpio##idx##_platform_data, \ - }, \ -} - -R8A7779_GPIO(0, 32); -R8A7779_GPIO(1, 32); -R8A7779_GPIO(2, 32); -R8A7779_GPIO(3, 32); -R8A7779_GPIO(4, 32); -R8A7779_GPIO(5, 32); -R8A7779_GPIO(6, 9); - -static struct platform_device *r8a7779_pinctrl_devices[] __initdata = { - &r8a7779_pfc_device, - &r8a7779_gpio0_device, - &r8a7779_gpio1_device, - &r8a7779_gpio2_device, - &r8a7779_gpio3_device, - &r8a7779_gpio4_device, - &r8a7779_gpio5_device, - &r8a7779_gpio6_device, -}; - -void __init r8a7779_pinmux_init(void) -{ - platform_add_devices(r8a7779_pinctrl_devices, - ARRAY_SIZE(r8a7779_pinctrl_devices)); -} - -/* SCIF */ -#define R8A7779_SCIF(index, baseaddr, irq) \ -static struct plat_sci_port scif##index##_platform_data = { \ - .type = PORT_SCIF, \ - .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, \ - .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, \ -}; \ - \ -static struct resource scif##index##_resources[] = { \ - DEFINE_RES_MEM(baseaddr, 0x100), \ - DEFINE_RES_IRQ(irq), \ -}; \ - \ -static struct platform_device scif##index##_device = { \ - .name = "sh-sci", \ - .id = index, \ - .resource = scif##index##_resources, \ - .num_resources = ARRAY_SIZE(scif##index##_resources), \ - .dev = { \ - .platform_data = &scif##index##_platform_data, \ - }, \ -} - -R8A7779_SCIF(0, 0xffe40000, gic_iid(0x78)); -R8A7779_SCIF(1, 0xffe41000, gic_iid(0x79)); -R8A7779_SCIF(2, 0xffe42000, gic_iid(0x7a)); -R8A7779_SCIF(3, 0xffe43000, gic_iid(0x7b)); -R8A7779_SCIF(4, 0xffe44000, gic_iid(0x7c)); -R8A7779_SCIF(5, 0xffe45000, gic_iid(0x7d)); - -/* TMU */ -static struct sh_timer_config tmu0_platform_data = { - .channels_mask = 7, -}; - -static struct resource tmu0_resources[] = { - DEFINE_RES_MEM(0xffd80000, 0x30), - DEFINE_RES_IRQ(gic_iid(0x40)), - DEFINE_RES_IRQ(gic_iid(0x41)), - DEFINE_RES_IRQ(gic_iid(0x42)), -}; - -static struct platform_device tmu0_device = { - .name = "sh-tmu", - .id = 0, - .dev = { - .platform_data = &tmu0_platform_data, - }, - .resource = tmu0_resources, - .num_resources = ARRAY_SIZE(tmu0_resources), -}; - -/* I2C */ -static struct resource rcar_i2c0_res[] = { - { - .start = 0xffc70000, - .end = 0xffc70fff, - .flags = IORESOURCE_MEM, - }, { - .start = gic_iid(0x6f), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device i2c0_device = { - .name = "i2c-rcar", - .id = 0, - .resource = rcar_i2c0_res, - .num_resources = ARRAY_SIZE(rcar_i2c0_res), -}; - -static struct resource rcar_i2c1_res[] = { - { - .start = 0xffc71000, - .end = 0xffc71fff, - .flags = IORESOURCE_MEM, - }, { - .start = gic_iid(0x72), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device i2c1_device = { - .name = "i2c-rcar", - .id = 1, - .resource = rcar_i2c1_res, - .num_resources = ARRAY_SIZE(rcar_i2c1_res), -}; - -static struct resource rcar_i2c2_res[] = { - { - .start = 0xffc72000, - .end = 0xffc72fff, - .flags = IORESOURCE_MEM, - }, { - .start = gic_iid(0x70), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device i2c2_device = { - .name = "i2c-rcar", - .id = 2, - .resource = rcar_i2c2_res, - .num_resources = ARRAY_SIZE(rcar_i2c2_res), -}; - -static struct resource rcar_i2c3_res[] = { - { - .start = 0xffc73000, - .end = 0xffc73fff, - .flags = IORESOURCE_MEM, - }, { - .start = gic_iid(0x71), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device i2c3_device = { - .name = "i2c-rcar", - .id = 3, - .resource = rcar_i2c3_res, - .num_resources = ARRAY_SIZE(rcar_i2c3_res), -}; - -static struct resource sata_resources[] = { - [0] = { - .name = "rcar-sata", - .start = 0xfc600000, - .end = 0xfc601fff, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_iid(0x84), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device sata_device = { - .name = "sata_rcar", - .id = -1, - .resource = sata_resources, - .num_resources = ARRAY_SIZE(sata_resources), - .dev = { - .dma_mask = &sata_device.dev.coherent_dma_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, -}; - -/* USB */ -static struct usb_phy *phy; - -static int usb_power_on(struct platform_device *pdev) -{ - if (IS_ERR(phy)) - return PTR_ERR(phy); - - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); - - usb_phy_init(phy); - - return 0; -} - -static void usb_power_off(struct platform_device *pdev) -{ - if (IS_ERR(phy)) - return; - - usb_phy_shutdown(phy); - - pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); -} - -static int ehci_init_internal_buffer(struct usb_hcd *hcd) -{ - /* - * Below are recommended values from the datasheet; - * see [USB :: Setting of EHCI Internal Buffer]. - */ - /* EHCI IP internal buffer setting */ - iowrite32(0x00ff0040, hcd->regs + 0x0094); - /* EHCI IP internal buffer enable */ - iowrite32(0x00000001, hcd->regs + 0x009C); - - return 0; -} - -static struct usb_ehci_pdata ehcix_pdata = { - .power_on = usb_power_on, - .power_off = usb_power_off, - .power_suspend = usb_power_off, - .pre_setup = ehci_init_internal_buffer, -}; - -static struct resource ehci0_resources[] = { - [0] = { - .start = 0xffe70000, - .end = 0xffe70400 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_iid(0x4c), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ehci0_device = { - .name = "ehci-platform", - .id = 0, - .dev = { - .dma_mask = &ehci0_device.dev.coherent_dma_mask, - .coherent_dma_mask = 0xffffffff, - .platform_data = &ehcix_pdata, - }, - .num_resources = ARRAY_SIZE(ehci0_resources), - .resource = ehci0_resources, -}; - -static struct resource ehci1_resources[] = { - [0] = { - .start = 0xfff70000, - .end = 0xfff70400 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_iid(0x4d), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ehci1_device = { - .name = "ehci-platform", - .id = 1, - .dev = { - .dma_mask = &ehci1_device.dev.coherent_dma_mask, - .coherent_dma_mask = 0xffffffff, - .platform_data = &ehcix_pdata, - }, - .num_resources = ARRAY_SIZE(ehci1_resources), - .resource = ehci1_resources, -}; - -static struct usb_ohci_pdata ohcix_pdata = { - .power_on = usb_power_on, - .power_off = usb_power_off, - .power_suspend = usb_power_off, -}; - -static struct resource ohci0_resources[] = { - [0] = { - .start = 0xffe70400, - .end = 0xffe70800 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_iid(0x4c), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ohci0_device = { - .name = "ohci-platform", - .id = 0, - .dev = { - .dma_mask = &ohci0_device.dev.coherent_dma_mask, - .coherent_dma_mask = 0xffffffff, - .platform_data = &ohcix_pdata, - }, - .num_resources = ARRAY_SIZE(ohci0_resources), - .resource = ohci0_resources, -}; - -static struct resource ohci1_resources[] = { - [0] = { - .start = 0xfff70400, - .end = 0xfff70800 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = gic_iid(0x4d), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ohci1_device = { - .name = "ohci-platform", - .id = 1, - .dev = { - .dma_mask = &ohci1_device.dev.coherent_dma_mask, - .coherent_dma_mask = 0xffffffff, - .platform_data = &ohcix_pdata, - }, - .num_resources = ARRAY_SIZE(ohci1_resources), - .resource = ohci1_resources, -}; - -/* HPB-DMA */ - -/* Asynchronous mode register bits */ -#define HPB_DMAE_ASYNCMDR_ASMD43_MASK BIT(23) /* MMC1 */ -#define HPB_DMAE_ASYNCMDR_ASMD43_SINGLE BIT(23) /* MMC1 */ -#define HPB_DMAE_ASYNCMDR_ASMD43_MULTI 0 /* MMC1 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD43_MASK BIT(22) /* MMC1 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD43_BURST BIT(22) /* MMC1 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD43_NBURST 0 /* MMC1 */ -#define HPB_DMAE_ASYNCMDR_ASMD24_MASK BIT(21) /* MMC0 */ -#define HPB_DMAE_ASYNCMDR_ASMD24_SINGLE BIT(21) /* MMC0 */ -#define HPB_DMAE_ASYNCMDR_ASMD24_MULTI 0 /* MMC0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD24_MASK BIT(20) /* MMC0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD24_BURST BIT(20) /* MMC0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD24_NBURST 0 /* MMC0 */ -#define HPB_DMAE_ASYNCMDR_ASMD41_MASK BIT(19) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD41_SINGLE BIT(19) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD41_MULTI 0 /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD41_MASK BIT(18) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD41_BURST BIT(18) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD41_NBURST 0 /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD40_MASK BIT(17) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD40_SINGLE BIT(17) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD40_MULTI 0 /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD40_MASK BIT(16) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD40_BURST BIT(16) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD40_NBURST 0 /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD39_MASK BIT(15) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD39_SINGLE BIT(15) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD39_MULTI 0 /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD39_MASK BIT(14) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD39_BURST BIT(14) /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD39_NBURST 0 /* SDHI3 */ -#define HPB_DMAE_ASYNCMDR_ASMD27_MASK BIT(13) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD27_SINGLE BIT(13) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD27_MULTI 0 /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD27_MASK BIT(12) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD27_BURST BIT(12) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD27_NBURST 0 /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD26_MASK BIT(11) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD26_SINGLE BIT(11) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD26_MULTI 0 /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD26_MASK BIT(10) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD26_BURST BIT(10) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD26_NBURST 0 /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD25_MASK BIT(9) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD25_SINGLE BIT(9) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD25_MULTI 0 /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD25_MASK BIT(8) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD25_BURST BIT(8) /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD25_NBURST 0 /* SDHI2 */ -#define HPB_DMAE_ASYNCMDR_ASMD23_MASK BIT(7) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD23_SINGLE BIT(7) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD23_MULTI 0 /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD23_MASK BIT(6) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD23_BURST BIT(6) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD23_NBURST 0 /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD22_MASK BIT(5) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD22_SINGLE BIT(5) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD22_MULTI 0 /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD22_MASK BIT(4) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD22_BURST BIT(4) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD22_NBURST 0 /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD21_MASK BIT(3) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD21_SINGLE BIT(3) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD21_MULTI 0 /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD21_MASK BIT(2) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD21_BURST BIT(2) /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD21_NBURST 0 /* SDHI0 */ -#define HPB_DMAE_ASYNCMDR_ASMD20_MASK BIT(1) /* SDHI1 */ -#define HPB_DMAE_ASYNCMDR_ASMD20_SINGLE BIT(1) /* SDHI1 */ -#define HPB_DMAE_ASYNCMDR_ASMD20_MULTI 0 /* SDHI1 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD20_MASK BIT(0) /* SDHI1 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD20_BURST BIT(0) /* SDHI1 */ -#define HPB_DMAE_ASYNCMDR_ASBTMD20_NBURST 0 /* SDHI1 */ - -static const struct hpb_dmae_slave_config hpb_dmae_slaves[] = { - { - .id = HPBDMA_SLAVE_SDHI0_TX, - .addr = 0xffe4c000 + 0x30, - .dcr = HPB_DMAE_DCR_SPDS_16BIT | - HPB_DMAE_DCR_DMDL | - HPB_DMAE_DCR_DPDS_16BIT, - .rstr = HPB_DMAE_ASYNCRSTR_ASRST21 | - HPB_DMAE_ASYNCRSTR_ASRST22 | - HPB_DMAE_ASYNCRSTR_ASRST23, - .mdr = HPB_DMAE_ASYNCMDR_ASMD21_SINGLE | - HPB_DMAE_ASYNCMDR_ASBTMD21_NBURST, - .mdm = HPB_DMAE_ASYNCMDR_ASMD21_MASK | - HPB_DMAE_ASYNCMDR_ASBTMD21_MASK, - .port = 0x0D0C, - .flags = HPB_DMAE_SET_ASYNC_RESET | HPB_DMAE_SET_ASYNC_MODE, - .dma_ch = 21, - }, { - .id = HPBDMA_SLAVE_SDHI0_RX, - .addr = 0xffe4c000 + 0x30, - .dcr = HPB_DMAE_DCR_SMDL | - HPB_DMAE_DCR_SPDS_16BIT | - HPB_DMAE_DCR_DPDS_16BIT, - .rstr = HPB_DMAE_ASYNCRSTR_ASRST21 | - HPB_DMAE_ASYNCRSTR_ASRST22 | - HPB_DMAE_ASYNCRSTR_ASRST23, - .mdr = HPB_DMAE_ASYNCMDR_ASMD22_SINGLE | - HPB_DMAE_ASYNCMDR_ASBTMD22_NBURST, - .mdm = HPB_DMAE_ASYNCMDR_ASMD22_MASK | - HPB_DMAE_ASYNCMDR_ASBTMD22_MASK, - .port = 0x0D0C, - .flags = HPB_DMAE_SET_ASYNC_RESET | HPB_DMAE_SET_ASYNC_MODE, - .dma_ch = 22, - }, -}; - -static const struct hpb_dmae_channel hpb_dmae_channels[] = { - HPB_DMAE_CHANNEL(0x93, HPBDMA_SLAVE_SDHI0_TX), /* ch. 21 */ - HPB_DMAE_CHANNEL(0x93, HPBDMA_SLAVE_SDHI0_RX), /* ch. 22 */ -}; - -static struct hpb_dmae_pdata dma_platform_data __initdata = { - .slaves = hpb_dmae_slaves, - .num_slaves = ARRAY_SIZE(hpb_dmae_slaves), - .channels = hpb_dmae_channels, - .num_channels = ARRAY_SIZE(hpb_dmae_channels), - .ts_shift = { - [XMIT_SZ_8BIT] = 0, - [XMIT_SZ_16BIT] = 1, - [XMIT_SZ_32BIT] = 2, - }, - .num_hw_channels = 44, -}; - -static struct resource hpb_dmae_resources[] __initdata = { - /* Channel registers */ - DEFINE_RES_MEM(0xffc08000, 0x1000), - /* Common registers */ - DEFINE_RES_MEM(0xffc09000, 0x170), - /* Asynchronous reset registers */ - DEFINE_RES_MEM(0xffc00300, 4), - /* Asynchronous mode registers */ - DEFINE_RES_MEM(0xffc00400, 4), - /* IRQ for DMA channels */ - DEFINE_RES_NAMED(gic_iid(0x8e), 12, NULL, IORESOURCE_IRQ), -}; - -static void __init r8a7779_register_hpb_dmae(void) -{ - platform_device_register_resndata(NULL, "hpb-dma-engine", - -1, hpb_dmae_resources, - ARRAY_SIZE(hpb_dmae_resources), - &dma_platform_data, - sizeof(dma_platform_data)); -} - -static struct platform_device *r8a7779_early_devices[] __initdata = { - &tmu0_device, -}; - -static struct platform_device *r8a7779_standard_devices[] __initdata = { - &scif0_device, - &scif1_device, - &scif2_device, - &scif3_device, - &scif4_device, - &scif5_device, - &i2c0_device, - &i2c1_device, - &i2c2_device, - &i2c3_device, - &sata_device, -}; - -void __init r8a7779_add_standard_devices(void) -{ -#ifdef CONFIG_CACHE_L2X0 - /* Shared attribute override enable, 64K*16way */ - l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff); -#endif - r8a7779_pm_init(); - - r8a7779_init_pm_domains(); - - platform_add_devices(r8a7779_early_devices, - ARRAY_SIZE(r8a7779_early_devices)); - platform_add_devices(r8a7779_standard_devices, - ARRAY_SIZE(r8a7779_standard_devices)); - r8a7779_register_hpb_dmae(); -} - -void __init r8a7779_add_early_devices(void) -{ - early_platform_add_devices(r8a7779_early_devices, - ARRAY_SIZE(r8a7779_early_devices)); - - /* Early serial console setup is not included here due to - * memory map collisions. The SCIF serial ports in r8a7779 - * are difficult to identity map 1:1 due to collision with the - * virtual memory range used by the coherent DMA code on ARM. - * - * Anyone wanting to debug early can remove UPF_IOREMAP from - * the sh-sci serial console platform data, adjust mapbase - * to a static M:N virt:phys mapping that needs to be added to - * the mappings passed with iotable_init() above. - * - * Then add a call to shmobile_setup_console() from this function. - * - * As a final step pass earlyprint=sh-sci.2,115200 on the kernel - * command line in case of the marzen board. - */ -} - -static struct platform_device *r8a7779_late_devices[] __initdata = { - &ehci0_device, - &ehci1_device, - &ohci0_device, - &ohci1_device, -}; - -void __init r8a7779_init_late(void) -{ - /* get USB PHY */ - phy = usb_get_phy(USB_PHY_TYPE_USB2); - - shmobile_init_late(); - platform_add_devices(r8a7779_late_devices, - ARRAY_SIZE(r8a7779_late_devices)); -} - -#ifdef CONFIG_USE_OF -void __init r8a7779_init_irq_dt(void) -{ -#ifdef CONFIG_ARCH_SHMOBILE_LEGACY - void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000); - void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000); -#endif gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE); -#ifdef CONFIG_ARCH_SHMOBILE_LEGACY - gic_init(0, 29, gic_dist_base, gic_cpu_base); -#else irqchip_init(); -#endif + /* route all interrupts to ARM */ __raw_writel(0xffffffff, INT2NTSR0); __raw_writel(0x3fffffff, INT2NTSR1); @@ -740,7 +80,7 @@ void __init r8a7779_init_irq_dt(void) #define MODEMR 0xffcc0020 -u32 __init r8a7779_read_mode_pins(void) +static u32 __init r8a7779_read_mode_pins(void) { static u32 mode; static bool mode_valid; @@ -756,16 +96,23 @@ u32 __init r8a7779_read_mode_pins(void) return mode; } -static const char *r8a7779_compat_dt[] __initdata = { +static void __init r8a7779_init_time(void) +{ + r8a7779_clocks_init(r8a7779_read_mode_pins()); + clocksource_of_init(); +} + +static const char *const r8a7779_compat_dt[] __initconst = { "renesas,r8a7779", NULL, }; DT_MACHINE_START(R8A7779_DT, "Generic R8A7779 (Flattened Device Tree)") + .smp = smp_ops(r8a7779_smp_ops), .map_io = r8a7779_map_io, .init_early = shmobile_init_delay, + .init_time = r8a7779_init_time, .init_irq = r8a7779_init_irq_dt, .init_late = shmobile_init_late, .dt_compat = r8a7779_compat_dt, MACHINE_END -#endif /* CONFIG_USE_OF */ diff --git a/arch/arm/mach-shmobile/setup-r8a7791.c b/arch/arm/mach-shmobile/setup-r8a7791.c index ef8eb3af586d..3b8dbaf07777 100644 --- a/arch/arm/mach-shmobile/setup-r8a7791.c +++ b/arch/arm/mach-shmobile/setup-r8a7791.c @@ -23,7 +23,7 @@ #include "r8a7791.h" #include "rcar-gen2.h" -static const char *r8a7791_boards_compat_dt[] __initdata = { +static const char *const r8a7791_boards_compat_dt[] __initconst = { "renesas,r8a7791", NULL, }; diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c new file mode 100644 index 000000000000..1d2825cb7a65 --- /dev/null +++ b/arch/arm/mach-shmobile/setup-r8a7793.c @@ -0,0 +1,33 @@ +/* + * r8a7793 processor support + * + * Copyright (C) 2015 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/init.h> +#include <asm/mach/arch.h> + +#include "common.h" +#include "rcar-gen2.h" + +static const char *r8a7793_boards_compat_dt[] __initconst = { + "renesas,r8a7793", + NULL, +}; + +DT_MACHINE_START(R8A7793_DT, "Generic R8A7793 (Flattened Device Tree)") + .init_early = shmobile_init_delay, + .init_time = rcar_gen2_timer_init, + .init_late = shmobile_init_late, + .reserve = rcar_gen2_reserve, + .dt_compat = r8a7793_boards_compat_dt, +MACHINE_END diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c index 5d13595aa027..aa3339258d9c 100644 --- a/arch/arm/mach-shmobile/setup-rcar-gen2.c +++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c @@ -128,9 +128,7 @@ void __init rcar_gen2_timer_init(void) #endif /* CONFIG_ARM_ARCH_TIMER */ rcar_gen2_clocks_init(mode); -#ifdef CONFIG_ARCH_SHMOBILE_MULTI clocksource_of_init(); -#endif } struct memory_reserve_config { diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index fb2ab7590af8..99a2004cac76 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -18,28 +18,17 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <linux/platform_device.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/input.h> -#include <linux/i2c/i2c-sh_mobile.h> #include <linux/io.h> -#include <linux/serial_sci.h> -#include <linux/sh_dma.h> -#include <linux/sh_timer.h> -#include <linux/platform_data/sh_ipmmu.h> -#include <linux/platform_data/irq-renesas-intc-irqpin.h> #include <asm/hardware/cache-l2x0.h> -#include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include "common.h" -#include "dma-register.h" -#include "intc.h" -#include "irqs.h" #include "sh73a0.h" static struct map_desc sh73a0_io_desc[] __initdata = { @@ -54,737 +43,12 @@ static struct map_desc sh73a0_io_desc[] __initdata = { }, }; -void __init sh73a0_map_io(void) +static void __init sh73a0_map_io(void) { debug_ll_io_init(); iotable_init(sh73a0_io_desc, ARRAY_SIZE(sh73a0_io_desc)); } -/* PFC */ -static struct resource pfc_resources[] __initdata = { - DEFINE_RES_MEM(0xe6050000, 0x8000), - DEFINE_RES_MEM(0xe605801c, 0x000c), -}; - -void __init sh73a0_pinmux_init(void) -{ - platform_device_register_simple("pfc-sh73a0", -1, pfc_resources, - ARRAY_SIZE(pfc_resources)); -} - -/* SCIF */ -#define SH73A0_SCIF(scif_type, index, baseaddr, irq) \ -static struct plat_sci_port scif##index##_platform_data = { \ - .type = scif_type, \ - .flags = UPF_BOOT_AUTOCONF, \ - .scscr = SCSCR_RE | SCSCR_TE, \ -}; \ - \ -static struct resource scif##index##_resources[] = { \ - DEFINE_RES_MEM(baseaddr, 0x100), \ - DEFINE_RES_IRQ(irq), \ -}; \ - \ -static struct platform_device scif##index##_device = { \ - .name = "sh-sci", \ - .id = index, \ - .resource = scif##index##_resources, \ - .num_resources = ARRAY_SIZE(scif##index##_resources), \ - .dev = { \ - .platform_data = &scif##index##_platform_data, \ - }, \ -} - -SH73A0_SCIF(PORT_SCIFA, 0, 0xe6c40000, gic_spi(72)); -SH73A0_SCIF(PORT_SCIFA, 1, 0xe6c50000, gic_spi(73)); -SH73A0_SCIF(PORT_SCIFA, 2, 0xe6c60000, gic_spi(74)); -SH73A0_SCIF(PORT_SCIFA, 3, 0xe6c70000, gic_spi(75)); -SH73A0_SCIF(PORT_SCIFA, 4, 0xe6c80000, gic_spi(78)); -SH73A0_SCIF(PORT_SCIFA, 5, 0xe6cb0000, gic_spi(79)); -SH73A0_SCIF(PORT_SCIFA, 6, 0xe6cc0000, gic_spi(156)); -SH73A0_SCIF(PORT_SCIFA, 7, 0xe6cd0000, gic_spi(143)); -SH73A0_SCIF(PORT_SCIFB, 8, 0xe6c30000, gic_spi(80)); - -static struct sh_timer_config cmt1_platform_data = { - .channels_mask = 0x3f, -}; - -static struct resource cmt1_resources[] = { - DEFINE_RES_MEM(0xe6138000, 0x200), - DEFINE_RES_IRQ(gic_spi(65)), -}; - -static struct platform_device cmt1_device = { - .name = "sh-cmt-48", - .id = 1, - .dev = { - .platform_data = &cmt1_platform_data, - }, - .resource = cmt1_resources, - .num_resources = ARRAY_SIZE(cmt1_resources), -}; - -/* TMU */ -static struct sh_timer_config tmu0_platform_data = { - .channels_mask = 7, -}; - -static struct resource tmu0_resources[] = { - DEFINE_RES_MEM(0xfff60000, 0x2c), - DEFINE_RES_IRQ(intcs_evt2irq(0xe80)), - DEFINE_RES_IRQ(intcs_evt2irq(0xea0)), - DEFINE_RES_IRQ(intcs_evt2irq(0xec0)), -}; - -static struct platform_device tmu0_device = { - .name = "sh-tmu", - .id = 0, - .dev = { - .platform_data = &tmu0_platform_data, - }, - .resource = tmu0_resources, - .num_resources = ARRAY_SIZE(tmu0_resources), -}; - -static struct resource i2c0_resources[] = { - [0] = DEFINE_RES_MEM(0xe6820000, 0x426), - [1] = { - .start = gic_spi(167), - .end = gic_spi(170), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct resource i2c1_resources[] = { - [0] = DEFINE_RES_MEM(0xe6822000, 0x426), - [1] = { - .start = gic_spi(51), - .end = gic_spi(54), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct resource i2c2_resources[] = { - [0] = DEFINE_RES_MEM(0xe6824000, 0x426), - [1] = { - .start = gic_spi(171), - .end = gic_spi(174), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct resource i2c3_resources[] = { - [0] = DEFINE_RES_MEM(0xe6826000, 0x426), - [1] = { - .start = gic_spi(183), - .end = gic_spi(186), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct resource i2c4_resources[] = { - [0] = DEFINE_RES_MEM(0xe6828000, 0x426), - [1] = { - .start = gic_spi(187), - .end = gic_spi(190), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct i2c_sh_mobile_platform_data i2c_platform_data = { - .clks_per_count = 2, -}; - -static struct platform_device i2c0_device = { - .name = "i2c-sh_mobile", - .id = 0, - .resource = i2c0_resources, - .num_resources = ARRAY_SIZE(i2c0_resources), - .dev = { - .platform_data = &i2c_platform_data, - }, -}; - -static struct platform_device i2c1_device = { - .name = "i2c-sh_mobile", - .id = 1, - .resource = i2c1_resources, - .num_resources = ARRAY_SIZE(i2c1_resources), - .dev = { - .platform_data = &i2c_platform_data, - }, -}; - -static struct platform_device i2c2_device = { - .name = "i2c-sh_mobile", - .id = 2, - .resource = i2c2_resources, - .num_resources = ARRAY_SIZE(i2c2_resources), - .dev = { - .platform_data = &i2c_platform_data, - }, -}; - -static struct platform_device i2c3_device = { - .name = "i2c-sh_mobile", - .id = 3, - .resource = i2c3_resources, - .num_resources = ARRAY_SIZE(i2c3_resources), - .dev = { - .platform_data = &i2c_platform_data, - }, -}; - -static struct platform_device i2c4_device = { - .name = "i2c-sh_mobile", - .id = 4, - .resource = i2c4_resources, - .num_resources = ARRAY_SIZE(i2c4_resources), - .dev = { - .platform_data = &i2c_platform_data, - }, -}; - -static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = { - { - .slave_id = SHDMA_SLAVE_SCIF0_TX, - .addr = 0xe6c40020, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x21, - }, { - .slave_id = SHDMA_SLAVE_SCIF0_RX, - .addr = 0xe6c40024, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x22, - }, { - .slave_id = SHDMA_SLAVE_SCIF1_TX, - .addr = 0xe6c50020, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x25, - }, { - .slave_id = SHDMA_SLAVE_SCIF1_RX, - .addr = 0xe6c50024, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x26, - }, { - .slave_id = SHDMA_SLAVE_SCIF2_TX, - .addr = 0xe6c60020, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x29, - }, { - .slave_id = SHDMA_SLAVE_SCIF2_RX, - .addr = 0xe6c60024, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x2a, - }, { - .slave_id = SHDMA_SLAVE_SCIF3_TX, - .addr = 0xe6c70020, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x2d, - }, { - .slave_id = SHDMA_SLAVE_SCIF3_RX, - .addr = 0xe6c70024, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x2e, - }, { - .slave_id = SHDMA_SLAVE_SCIF4_TX, - .addr = 0xe6c80020, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x39, - }, { - .slave_id = SHDMA_SLAVE_SCIF4_RX, - .addr = 0xe6c80024, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x3a, - }, { - .slave_id = SHDMA_SLAVE_SCIF5_TX, - .addr = 0xe6cb0020, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x35, - }, { - .slave_id = SHDMA_SLAVE_SCIF5_RX, - .addr = 0xe6cb0024, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x36, - }, { - .slave_id = SHDMA_SLAVE_SCIF6_TX, - .addr = 0xe6cc0020, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x1d, - }, { - .slave_id = SHDMA_SLAVE_SCIF6_RX, - .addr = 0xe6cc0024, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x1e, - }, { - .slave_id = SHDMA_SLAVE_SCIF7_TX, - .addr = 0xe6cd0020, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x19, - }, { - .slave_id = SHDMA_SLAVE_SCIF7_RX, - .addr = 0xe6cd0024, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x1a, - }, { - .slave_id = SHDMA_SLAVE_SCIF8_TX, - .addr = 0xe6c30040, - .chcr = CHCR_TX(XMIT_SZ_8BIT), - .mid_rid = 0x3d, - }, { - .slave_id = SHDMA_SLAVE_SCIF8_RX, - .addr = 0xe6c30060, - .chcr = CHCR_RX(XMIT_SZ_8BIT), - .mid_rid = 0x3e, - }, { - .slave_id = SHDMA_SLAVE_SDHI0_TX, - .addr = 0xee100030, - .chcr = CHCR_TX(XMIT_SZ_16BIT), - .mid_rid = 0xc1, - }, { - .slave_id = SHDMA_SLAVE_SDHI0_RX, - .addr = 0xee100030, - .chcr = CHCR_RX(XMIT_SZ_16BIT), - .mid_rid = 0xc2, - }, { - .slave_id = SHDMA_SLAVE_SDHI1_TX, - .addr = 0xee120030, - .chcr = CHCR_TX(XMIT_SZ_16BIT), - .mid_rid = 0xc9, - }, { - .slave_id = SHDMA_SLAVE_SDHI1_RX, - .addr = 0xee120030, - .chcr = CHCR_RX(XMIT_SZ_16BIT), - .mid_rid = 0xca, - }, { - .slave_id = SHDMA_SLAVE_SDHI2_TX, - .addr = 0xee140030, - .chcr = CHCR_TX(XMIT_SZ_16BIT), - .mid_rid = 0xcd, - }, { - .slave_id = SHDMA_SLAVE_SDHI2_RX, - .addr = 0xee140030, - .chcr = CHCR_RX(XMIT_SZ_16BIT), - .mid_rid = 0xce, - }, { - .slave_id = SHDMA_SLAVE_MMCIF_TX, - .addr = 0xe6bd0034, - .chcr = CHCR_TX(XMIT_SZ_32BIT), - .mid_rid = 0xd1, - }, { - .slave_id = SHDMA_SLAVE_MMCIF_RX, - .addr = 0xe6bd0034, - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0xd2, - }, -}; - -#define DMAE_CHANNEL(_offset) \ - { \ - .offset = _offset - 0x20, \ - .dmars = _offset - 0x20 + 0x40, \ - } - -static const struct sh_dmae_channel sh73a0_dmae_channels[] = { - DMAE_CHANNEL(0x8000), - DMAE_CHANNEL(0x8080), - DMAE_CHANNEL(0x8100), - DMAE_CHANNEL(0x8180), - DMAE_CHANNEL(0x8200), - DMAE_CHANNEL(0x8280), - DMAE_CHANNEL(0x8300), - DMAE_CHANNEL(0x8380), - DMAE_CHANNEL(0x8400), - DMAE_CHANNEL(0x8480), - DMAE_CHANNEL(0x8500), - DMAE_CHANNEL(0x8580), - DMAE_CHANNEL(0x8600), - DMAE_CHANNEL(0x8680), - DMAE_CHANNEL(0x8700), - DMAE_CHANNEL(0x8780), - DMAE_CHANNEL(0x8800), - DMAE_CHANNEL(0x8880), - DMAE_CHANNEL(0x8900), - DMAE_CHANNEL(0x8980), -}; - -static struct sh_dmae_pdata sh73a0_dmae_platform_data = { - .slave = sh73a0_dmae_slaves, - .slave_num = ARRAY_SIZE(sh73a0_dmae_slaves), - .channel = sh73a0_dmae_channels, - .channel_num = ARRAY_SIZE(sh73a0_dmae_channels), - .ts_low_shift = TS_LOW_SHIFT, - .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, - .ts_high_shift = TS_HI_SHIFT, - .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, - .ts_shift = dma_ts_shift, - .ts_shift_num = ARRAY_SIZE(dma_ts_shift), - .dmaor_init = DMAOR_DME, -}; - -static struct resource sh73a0_dmae_resources[] = { - DEFINE_RES_MEM(0xfe000020, 0x89e0), - { - .name = "error_irq", - .start = gic_spi(129), - .end = gic_spi(129), - .flags = IORESOURCE_IRQ, - }, - { - /* IRQ for channels 0-19 */ - .start = gic_spi(109), - .end = gic_spi(128), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device dma0_device = { - .name = "sh-dma-engine", - .id = 0, - .resource = sh73a0_dmae_resources, - .num_resources = ARRAY_SIZE(sh73a0_dmae_resources), - .dev = { - .platform_data = &sh73a0_dmae_platform_data, - }, -}; - -/* MPDMAC */ -static const struct sh_dmae_slave_config sh73a0_mpdma_slaves[] = { - { - .slave_id = SHDMA_SLAVE_FSI2A_RX, - .addr = 0xec230020, - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0xd6, /* CHECK ME */ - }, { - .slave_id = SHDMA_SLAVE_FSI2A_TX, - .addr = 0xec230024, - .chcr = CHCR_TX(XMIT_SZ_32BIT), - .mid_rid = 0xd5, /* CHECK ME */ - }, { - .slave_id = SHDMA_SLAVE_FSI2C_RX, - .addr = 0xec230060, - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0xda, /* CHECK ME */ - }, { - .slave_id = SHDMA_SLAVE_FSI2C_TX, - .addr = 0xec230064, - .chcr = CHCR_TX(XMIT_SZ_32BIT), - .mid_rid = 0xd9, /* CHECK ME */ - }, { - .slave_id = SHDMA_SLAVE_FSI2B_RX, - .addr = 0xec240020, - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0x8e, /* CHECK ME */ - }, { - .slave_id = SHDMA_SLAVE_FSI2B_TX, - .addr = 0xec240024, - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0x8d, /* CHECK ME */ - }, { - .slave_id = SHDMA_SLAVE_FSI2D_RX, - .addr = 0xec240060, - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0x9a, /* CHECK ME */ - }, -}; - -#define MPDMA_CHANNEL(a, b, c) \ -{ \ - .offset = a, \ - .dmars = b, \ - .dmars_bit = c, \ - .chclr_offset = (0x220 - 0x20) + a \ -} - -static const struct sh_dmae_channel sh73a0_mpdma_channels[] = { - MPDMA_CHANNEL(0x00, 0, 0), - MPDMA_CHANNEL(0x10, 0, 8), - MPDMA_CHANNEL(0x20, 4, 0), - MPDMA_CHANNEL(0x30, 4, 8), - MPDMA_CHANNEL(0x50, 8, 0), - MPDMA_CHANNEL(0x70, 8, 8), -}; - -static struct sh_dmae_pdata sh73a0_mpdma_platform_data = { - .slave = sh73a0_mpdma_slaves, - .slave_num = ARRAY_SIZE(sh73a0_mpdma_slaves), - .channel = sh73a0_mpdma_channels, - .channel_num = ARRAY_SIZE(sh73a0_mpdma_channels), - .ts_low_shift = TS_LOW_SHIFT, - .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, - .ts_high_shift = TS_HI_SHIFT, - .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, - .ts_shift = dma_ts_shift, - .ts_shift_num = ARRAY_SIZE(dma_ts_shift), - .dmaor_init = DMAOR_DME, - .chclr_present = 1, -}; - -/* Resource order important! */ -static struct resource sh73a0_mpdma_resources[] = { - /* Channel registers and DMAOR */ - DEFINE_RES_MEM(0xec618020, 0x270), - /* DMARSx */ - DEFINE_RES_MEM(0xec619000, 0xc), - { - .name = "error_irq", - .start = gic_spi(181), - .end = gic_spi(181), - .flags = IORESOURCE_IRQ, - }, - { - /* IRQ for channels 0-5 */ - .start = gic_spi(175), - .end = gic_spi(180), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device mpdma0_device = { - .name = "sh-dma-engine", - .id = 1, - .resource = sh73a0_mpdma_resources, - .num_resources = ARRAY_SIZE(sh73a0_mpdma_resources), - .dev = { - .platform_data = &sh73a0_mpdma_platform_data, - }, -}; - -static struct resource pmu_resources[] = { - [0] = { - .start = gic_spi(55), - .end = gic_spi(55), - .flags = IORESOURCE_IRQ, - }, - [1] = { - .start = gic_spi(56), - .end = gic_spi(56), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device pmu_device = { - .name = "armv7-pmu", - .id = -1, - .num_resources = ARRAY_SIZE(pmu_resources), - .resource = pmu_resources, -}; - -/* an IPMMU module for ICB */ -static struct resource ipmmu_resources[] = { - DEFINE_RES_MEM(0xfe951000, 0x100), -}; - -static const char * const ipmmu_dev_names[] = { - "sh_mobile_lcdc_fb.0", -}; - -static struct shmobile_ipmmu_platform_data ipmmu_platform_data = { - .dev_names = ipmmu_dev_names, - .num_dev_names = ARRAY_SIZE(ipmmu_dev_names), -}; - -static struct platform_device ipmmu_device = { - .name = "ipmmu", - .id = -1, - .dev = { - .platform_data = &ipmmu_platform_data, - }, - .resource = ipmmu_resources, - .num_resources = ARRAY_SIZE(ipmmu_resources), -}; - -static struct renesas_intc_irqpin_config irqpin0_platform_data = { - .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */ - .control_parent = true, -}; - -static struct resource irqpin0_resources[] = { - DEFINE_RES_MEM(0xe6900000, 4), /* ICR1A */ - DEFINE_RES_MEM(0xe6900010, 4), /* INTPRI00A */ - DEFINE_RES_MEM(0xe6900020, 1), /* INTREQ00A */ - DEFINE_RES_MEM(0xe6900040, 1), /* INTMSK00A */ - DEFINE_RES_MEM(0xe6900060, 1), /* INTMSKCLR00A */ - DEFINE_RES_IRQ(gic_spi(1)), /* IRQ0 */ - DEFINE_RES_IRQ(gic_spi(2)), /* IRQ1 */ - DEFINE_RES_IRQ(gic_spi(3)), /* IRQ2 */ - DEFINE_RES_IRQ(gic_spi(4)), /* IRQ3 */ - DEFINE_RES_IRQ(gic_spi(5)), /* IRQ4 */ - DEFINE_RES_IRQ(gic_spi(6)), /* IRQ5 */ - DEFINE_RES_IRQ(gic_spi(7)), /* IRQ6 */ - DEFINE_RES_IRQ(gic_spi(8)), /* IRQ7 */ -}; - -static struct platform_device irqpin0_device = { - .name = "renesas_intc_irqpin", - .id = 0, - .resource = irqpin0_resources, - .num_resources = ARRAY_SIZE(irqpin0_resources), - .dev = { - .platform_data = &irqpin0_platform_data, - }, -}; - -static struct renesas_intc_irqpin_config irqpin1_platform_data = { - .irq_base = irq_pin(8), /* IRQ8 -> IRQ15 */ - .control_parent = true, /* Disable spurious IRQ10 */ -}; - -static struct resource irqpin1_resources[] = { - DEFINE_RES_MEM(0xe6900004, 4), /* ICR2A */ - DEFINE_RES_MEM(0xe6900014, 4), /* INTPRI10A */ - DEFINE_RES_MEM(0xe6900024, 1), /* INTREQ10A */ - DEFINE_RES_MEM(0xe6900044, 1), /* INTMSK10A */ - DEFINE_RES_MEM(0xe6900064, 1), /* INTMSKCLR10A */ - DEFINE_RES_IRQ(gic_spi(9)), /* IRQ8 */ - DEFINE_RES_IRQ(gic_spi(10)), /* IRQ9 */ - DEFINE_RES_IRQ(gic_spi(11)), /* IRQ10 */ - DEFINE_RES_IRQ(gic_spi(12)), /* IRQ11 */ - DEFINE_RES_IRQ(gic_spi(13)), /* IRQ12 */ - DEFINE_RES_IRQ(gic_spi(14)), /* IRQ13 */ - DEFINE_RES_IRQ(gic_spi(15)), /* IRQ14 */ - DEFINE_RES_IRQ(gic_spi(16)), /* IRQ15 */ -}; - -static struct platform_device irqpin1_device = { - .name = "renesas_intc_irqpin", - .id = 1, - .resource = irqpin1_resources, - .num_resources = ARRAY_SIZE(irqpin1_resources), - .dev = { - .platform_data = &irqpin1_platform_data, - }, -}; - -static struct renesas_intc_irqpin_config irqpin2_platform_data = { - .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */ - .control_parent = true, -}; - -static struct resource irqpin2_resources[] = { - DEFINE_RES_MEM(0xe6900008, 4), /* ICR3A */ - DEFINE_RES_MEM(0xe6900018, 4), /* INTPRI20A */ - DEFINE_RES_MEM(0xe6900028, 1), /* INTREQ20A */ - DEFINE_RES_MEM(0xe6900048, 1), /* INTMSK20A */ - DEFINE_RES_MEM(0xe6900068, 1), /* INTMSKCLR20A */ - DEFINE_RES_IRQ(gic_spi(17)), /* IRQ16 */ - DEFINE_RES_IRQ(gic_spi(18)), /* IRQ17 */ - DEFINE_RES_IRQ(gic_spi(19)), /* IRQ18 */ - DEFINE_RES_IRQ(gic_spi(20)), /* IRQ19 */ - DEFINE_RES_IRQ(gic_spi(21)), /* IRQ20 */ - DEFINE_RES_IRQ(gic_spi(22)), /* IRQ21 */ - DEFINE_RES_IRQ(gic_spi(23)), /* IRQ22 */ - DEFINE_RES_IRQ(gic_spi(24)), /* IRQ23 */ -}; - -static struct platform_device irqpin2_device = { - .name = "renesas_intc_irqpin", - .id = 2, - .resource = irqpin2_resources, - .num_resources = ARRAY_SIZE(irqpin2_resources), - .dev = { - .platform_data = &irqpin2_platform_data, - }, -}; - -static struct renesas_intc_irqpin_config irqpin3_platform_data = { - .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */ - .control_parent = true, -}; - -static struct resource irqpin3_resources[] = { - DEFINE_RES_MEM(0xe690000c, 4), /* ICR4A */ - DEFINE_RES_MEM(0xe690001c, 4), /* INTPRI30A */ - DEFINE_RES_MEM(0xe690002c, 1), /* INTREQ30A */ - DEFINE_RES_MEM(0xe690004c, 1), /* INTMSK30A */ - DEFINE_RES_MEM(0xe690006c, 1), /* INTMSKCLR30A */ - DEFINE_RES_IRQ(gic_spi(25)), /* IRQ24 */ - DEFINE_RES_IRQ(gic_spi(26)), /* IRQ25 */ - DEFINE_RES_IRQ(gic_spi(27)), /* IRQ26 */ - DEFINE_RES_IRQ(gic_spi(28)), /* IRQ27 */ - DEFINE_RES_IRQ(gic_spi(29)), /* IRQ28 */ - DEFINE_RES_IRQ(gic_spi(30)), /* IRQ29 */ - DEFINE_RES_IRQ(gic_spi(31)), /* IRQ30 */ - DEFINE_RES_IRQ(gic_spi(32)), /* IRQ31 */ -}; - -static struct platform_device irqpin3_device = { - .name = "renesas_intc_irqpin", - .id = 3, - .resource = irqpin3_resources, - .num_resources = ARRAY_SIZE(irqpin3_resources), - .dev = { - .platform_data = &irqpin3_platform_data, - }, -}; - -static struct platform_device *sh73a0_early_devices[] __initdata = { - &scif0_device, - &scif1_device, - &scif2_device, - &scif3_device, - &scif4_device, - &scif5_device, - &scif6_device, - &scif7_device, - &scif8_device, - &tmu0_device, - &ipmmu_device, - &cmt1_device, -}; - -static struct platform_device *sh73a0_late_devices[] __initdata = { - &i2c0_device, - &i2c1_device, - &i2c2_device, - &i2c3_device, - &i2c4_device, - &dma0_device, - &mpdma0_device, - &pmu_device, - &irqpin0_device, - &irqpin1_device, - &irqpin2_device, - &irqpin3_device, -}; - -#define SRCR2 IOMEM(0xe61580b0) - -void __init sh73a0_add_standard_devices(void) -{ - /* Clear software reset bit on SY-DMAC module */ - __raw_writel(__raw_readl(SRCR2) & ~(1 << 18), SRCR2); - - platform_add_devices(sh73a0_early_devices, - ARRAY_SIZE(sh73a0_early_devices)); - platform_add_devices(sh73a0_late_devices, - ARRAY_SIZE(sh73a0_late_devices)); -} - -/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ -void __init __weak sh73a0_register_twd(void) { } - -void __init sh73a0_earlytimer_init(void) -{ - shmobile_init_delay(); -#ifndef CONFIG_COMMON_CLK - sh73a0_clock_init(); -#endif - shmobile_earlytimer_init(); - sh73a0_register_twd(); -} - -void __init sh73a0_add_early_devices(void) -{ - early_platform_add_devices(sh73a0_early_devices, - ARRAY_SIZE(sh73a0_early_devices)); - - /* setup early console here as well */ - shmobile_setup_console(); -} - -#ifdef CONFIG_USE_OF - static void __init sh73a0_generic_init(void) { #ifdef CONFIG_CACHE_L2X0 @@ -794,7 +58,7 @@ static void __init sh73a0_generic_init(void) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } -static const char *sh73a0_boards_compat_dt[] __initdata = { +static const char *const sh73a0_boards_compat_dt[] __initconst = { "renesas,sh73a0", NULL, }; @@ -807,4 +71,3 @@ DT_MACHINE_START(SH73A0_DT, "Generic SH73A0 (Flattened Device Tree)") .init_late = shmobile_init_late, .dt_compat = sh73a0_boards_compat_dt, MACHINE_END -#endif /* CONFIG_USE_OF */ diff --git a/arch/arm/mach-shmobile/sh73a0.h b/arch/arm/mach-shmobile/sh73a0.h index 5a80f18b4fa0..39646806cf64 100644 --- a/arch/arm/mach-shmobile/sh73a0.h +++ b/arch/arm/mach-shmobile/sh73a0.h @@ -1,89 +1,6 @@ #ifndef __ASM_SH73A0_H__ #define __ASM_SH73A0_H__ -/* DMA slave IDs */ -enum { - SHDMA_SLAVE_INVALID, - SHDMA_SLAVE_SCIF0_TX, - SHDMA_SLAVE_SCIF0_RX, - SHDMA_SLAVE_SCIF1_TX, - SHDMA_SLAVE_SCIF1_RX, - SHDMA_SLAVE_SCIF2_TX, - SHDMA_SLAVE_SCIF2_RX, - SHDMA_SLAVE_SCIF3_TX, - SHDMA_SLAVE_SCIF3_RX, - SHDMA_SLAVE_SCIF4_TX, - SHDMA_SLAVE_SCIF4_RX, - SHDMA_SLAVE_SCIF5_TX, - SHDMA_SLAVE_SCIF5_RX, - SHDMA_SLAVE_SCIF6_TX, - SHDMA_SLAVE_SCIF6_RX, - SHDMA_SLAVE_SCIF7_TX, - SHDMA_SLAVE_SCIF7_RX, - SHDMA_SLAVE_SCIF8_TX, - SHDMA_SLAVE_SCIF8_RX, - SHDMA_SLAVE_SDHI0_TX, - SHDMA_SLAVE_SDHI0_RX, - SHDMA_SLAVE_SDHI1_TX, - SHDMA_SLAVE_SDHI1_RX, - SHDMA_SLAVE_SDHI2_TX, - SHDMA_SLAVE_SDHI2_RX, - SHDMA_SLAVE_MMCIF_TX, - SHDMA_SLAVE_MMCIF_RX, - SHDMA_SLAVE_FSI2A_TX, - SHDMA_SLAVE_FSI2A_RX, - SHDMA_SLAVE_FSI2B_TX, - SHDMA_SLAVE_FSI2B_RX, - SHDMA_SLAVE_FSI2C_TX, - SHDMA_SLAVE_FSI2C_RX, - SHDMA_SLAVE_FSI2D_RX, -}; - -/* - * SH73A0 IRQ LOCATION TABLE - * - * 416 ----------------------------------------- - * IRQ0-IRQ15 - * 431 ----------------------------------------- - * ... - * 448 ----------------------------------------- - * sh73a0-intcs - * sh73a0-intca-irq-pins - * 680 ----------------------------------------- - * ... - * 700 ----------------------------------------- - * sh73a0-pint0 - * 731 ----------------------------------------- - * 732 ----------------------------------------- - * sh73a0-pint1 - * 739 ----------------------------------------- - * ... - * 800 ----------------------------------------- - * IRQ16-IRQ31 - * 815 ----------------------------------------- - * ... - * 928 ----------------------------------------- - * sh73a0-intca-irq-pins - * 943 ----------------------------------------- - */ - -/* PINT interrupts are located at Linux IRQ 700 and up */ -#define SH73A0_PINT0_IRQ(irq) ((irq) + 700) -#define SH73A0_PINT1_IRQ(irq) ((irq) + 732) - -extern void sh73a0_init_irq(void); -extern void sh73a0_init_irq_dt(void); -extern void sh73a0_map_io(void); -extern void sh73a0_earlytimer_init(void); -extern void sh73a0_add_early_devices(void); -extern void sh73a0_add_standard_devices(void); -extern void sh73a0_clock_init(void); -extern void sh73a0_pinmux_init(void); -extern void sh73a0_pm_init(void); -extern struct clk sh73a0_extal1_clk; -extern struct clk sh73a0_extal2_clk; -extern struct clk sh73a0_extcki_clk; -extern struct clk sh73a0_extalr_clk; extern struct smp_operations sh73a0_smp_ops; #endif /* __ASM_SH73A0_H__ */ diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c index 01f792fcb220..353562b8a5ee 100644 --- a/arch/arm/mach-shmobile/smp-r8a7779.c +++ b/arch/arm/mach-shmobile/smp-r8a7779.c @@ -23,7 +23,6 @@ #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> -#include <asm/smp_twd.h> #include "common.h" #include "pm-rcar.h" @@ -32,41 +31,33 @@ #define AVECR IOMEM(0xfe700040) #define R8A7779_SCU_BASE 0xf0000000 -static struct rcar_sysc_ch r8a7779_ch_cpu1 = { +static const struct rcar_sysc_ch r8a7779_ch_cpu1 = { .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */ .chan_bit = 1, /* ARM1 */ .isr_bit = 1, /* ARM1 */ }; -static struct rcar_sysc_ch r8a7779_ch_cpu2 = { +static const struct rcar_sysc_ch r8a7779_ch_cpu2 = { .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */ .chan_bit = 2, /* ARM2 */ .isr_bit = 2, /* ARM2 */ }; -static struct rcar_sysc_ch r8a7779_ch_cpu3 = { +static const struct rcar_sysc_ch r8a7779_ch_cpu3 = { .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */ .chan_bit = 3, /* ARM3 */ .isr_bit = 3, /* ARM3 */ }; -static struct rcar_sysc_ch *r8a7779_ch_cpu[4] = { +static const struct rcar_sysc_ch * const r8a7779_ch_cpu[4] = { [1] = &r8a7779_ch_cpu1, [2] = &r8a7779_ch_cpu2, [3] = &r8a7779_ch_cpu3, }; -#if defined(CONFIG_HAVE_ARM_TWD) && !defined(CONFIG_ARCH_MULTIPLATFORM) -static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, R8A7779_SCU_BASE + 0x600, 29); -void __init r8a7779_register_twd(void) -{ - twd_local_timer_register(&twd_local_timer); -} -#endif - static int r8a7779_platform_cpu_kill(unsigned int cpu) { - struct rcar_sysc_ch *ch = NULL; + const struct rcar_sysc_ch *ch = NULL; int ret = -EIO; cpu = cpu_logical_map(cpu); @@ -82,7 +73,7 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu) static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle) { - struct rcar_sysc_ch *ch = NULL; + const struct rcar_sysc_ch *ch = NULL; unsigned int lcpu = cpu_logical_map(cpu); int ret; diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c index 930f45cbc08a..2ef0054ce934 100644 --- a/arch/arm/mach-shmobile/smp-r8a7790.c +++ b/arch/arm/mach-shmobile/smp-r8a7790.c @@ -26,12 +26,12 @@ #include "rcar-gen2.h" #include "r8a7790.h" -static struct rcar_sysc_ch r8a7790_ca15_scu = { +static const struct rcar_sysc_ch r8a7790_ca15_scu = { .chan_offs = 0x180, /* PWRSR5 .. PWRER5 */ .isr_bit = 12, /* CA15-SCU */ }; -static struct rcar_sysc_ch r8a7790_ca7_scu = { +static const struct rcar_sysc_ch r8a7790_ca7_scu = { .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */ .isr_bit = 21, /* CA7-SCU */ }; diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c index 2106d6b76a06..d03aa11fb46d 100644 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c @@ -33,14 +33,6 @@ #define SH73A0_SCU_BASE 0xf0000000 -#if defined(CONFIG_HAVE_ARM_TWD) && !defined(CONFIG_ARCH_MULTIPLATFORM) -static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, SH73A0_SCU_BASE + 0x600, 29); -void __init sh73a0_register_twd(void) -{ - twd_local_timer_register(&twd_local_timer); -} -#endif - static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned int lcpu = cpu_logical_map(cpu); diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index 0edf2a6d2bbe..f1d027aa7a81 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c @@ -70,18 +70,6 @@ void __init shmobile_init_delay(void) if (!max_freq) return; -#ifdef CONFIG_ARCH_SHMOBILE_LEGACY - /* Non-multiplatform r8a73a4 SoC cannot use arch timer due - * to GIC being initialized from C and arch timer via DT */ - if (of_machine_is_compatible("renesas,r8a73a4")) - has_arch_timer = false; - - /* Non-multiplatform r8a7790 SoC cannot use arch timer due - * to GIC being initialized from C and arch timer via DT */ - if (of_machine_is_compatible("renesas,r8a7790")) - has_arch_timer = false; -#endif - if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { if (is_a7_a8_a9) shmobile_setup_delay_hz(max_freq, 1, 3); diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 7259c3732702..5bc6ea87cdf7 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h @@ -25,6 +25,7 @@ #define SOCFPGA_RSTMGR_MODPERRST 0x14 #define SOCFPGA_RSTMGR_BRGMODRST 0x1c +#define SOCFPGA_A10_RSTMGR_CTRL 0xC #define SOCFPGA_A10_RSTMGR_MODMPURST 0x20 /* System Manager bits */ diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c index c6f1df89f9af..15c8ce8965f4 100644 --- a/arch/arm/mach-socfpga/platsmp.c +++ b/arch/arm/mach-socfpga/platsmp.c @@ -106,11 +106,23 @@ static void socfpga_cpu_die(unsigned int cpu) cpu_do_idle(); } +/* + * We need a dummy function so that platform_can_cpu_hotplug() knows + * we support CPU hotplug. However, the function does not need to do + * anything, because CPUs going offline just do WFI. We could reset + * the CPUs but it would increase power consumption. + */ +static int socfpga_cpu_kill(unsigned int cpu) +{ + return 1; +} + static struct smp_operations socfpga_smp_ops __initdata = { .smp_prepare_cpus = socfpga_smp_prepare_cpus, .smp_boot_secondary = socfpga_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = socfpga_cpu_die, + .cpu_kill = socfpga_cpu_kill, #endif }; @@ -119,6 +131,7 @@ static struct smp_operations socfpga_a10_smp_ops __initdata = { .smp_boot_secondary = socfpga_a10_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = socfpga_cpu_die, + .cpu_kill = socfpga_cpu_kill, #endif }; diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 19643a756c48..a1c0efaa8794 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -74,6 +74,19 @@ static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) writel(temp, rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); } +static void socfpga_arria10_restart(enum reboot_mode mode, const char *cmd) +{ + u32 temp; + + temp = readl(rst_manager_base_addr + SOCFPGA_A10_RSTMGR_CTRL); + + if (mode == REBOOT_HARD) + temp |= RSTMGR_CTRL_SWCOLDRSTREQ; + else + temp |= RSTMGR_CTRL_SWWARMRSTREQ; + writel(temp, rst_manager_base_addr + SOCFPGA_A10_RSTMGR_CTRL); +} + static const char *altera_dt_match[] = { "altr,socfpga", NULL @@ -86,3 +99,16 @@ DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA") .restart = socfpga_cyclone5_restart, .dt_compat = altera_dt_match, MACHINE_END + +static const char *altera_a10_dt_match[] = { + "altr,socfpga-arria10", + NULL +}; + +DT_MACHINE_START(SOCFPGA_A10, "Altera SOCFPGA Arria10") + .l2c_aux_val = 0, + .l2c_aux_mask = ~0, + .init_irq = socfpga_init_irq, + .restart = socfpga_arria10_restart, + .dt_compat = altera_a10_dt_match, +MACHINE_END diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h index a99d90a4d09c..06640914d9a0 100644 --- a/arch/arm/mach-spear/generic.h +++ b/arch/arm/mach-spear/generic.h @@ -3,7 +3,7 @@ * * Copyright (C) 2009-2012 ST Microelectronics * Rajeev Kumar <rajeev-dlh.kumar@st.com> - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/include/mach/irqs.h b/arch/arm/mach-spear/include/mach/irqs.h index 92da0a8c6bce..7058720c5278 100644 --- a/arch/arm/mach-spear/include/mach/irqs.h +++ b/arch/arm/mach-spear/include/mach/irqs.h @@ -3,7 +3,7 @@ * * Copyright (C) 2009-2012 ST Microelectronics * Rajeev Kumar <rajeev-dlh.kumar@st.com> - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/include/mach/misc_regs.h b/arch/arm/mach-spear/include/mach/misc_regs.h index 935639ce59ba..cfaf7c665b58 100644 --- a/arch/arm/mach-spear/include/mach/misc_regs.h +++ b/arch/arm/mach-spear/include/mach/misc_regs.h @@ -4,7 +4,7 @@ * Miscellaneous registers definitions for SPEAr3xx machine family * * Copyright (C) 2009 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/include/mach/spear.h b/arch/arm/mach-spear/include/mach/spear.h index f2d6a0176575..5ed841ccf8a3 100644 --- a/arch/arm/mach-spear/include/mach/spear.h +++ b/arch/arm/mach-spear/include/mach/spear.h @@ -3,7 +3,7 @@ * * Copyright (C) 2009,2012 ST Microelectronics * Rajeev Kumar<rajeev-dlh.kumar@st.com> - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/include/mach/uncompress.h b/arch/arm/mach-spear/include/mach/uncompress.h index 51b2dc93e4da..8439b9c12edb 100644 --- a/arch/arm/mach-spear/include/mach/uncompress.h +++ b/arch/arm/mach-spear/include/mach/uncompress.h @@ -4,7 +4,7 @@ * Serial port stubs for kernel decompress status messages * * Copyright (C) 2009 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/pl080.c b/arch/arm/mach-spear/pl080.c index cfa1199d0f4a..b4529f3e0ee9 100644 --- a/arch/arm/mach-spear/pl080.c +++ b/arch/arm/mach-spear/pl080.c @@ -4,7 +4,7 @@ * DMAC pl080 definitions for SPEAr platform * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/pl080.h b/arch/arm/mach-spear/pl080.h index eb6590ded40d..608dec6725ae 100644 --- a/arch/arm/mach-spear/pl080.h +++ b/arch/arm/mach-spear/pl080.h @@ -4,7 +4,7 @@ * DMAC pl080 definitions for SPEAr platform * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/restart.c b/arch/arm/mach-spear/restart.c index ce5e098c4888..b4342155a783 100644 --- a/arch/arm/mach-spear/restart.c +++ b/arch/arm/mach-spear/restart.c @@ -4,7 +4,7 @@ * SPEAr platform specific restart functions * * Copyright (C) 2009 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/spear1310.c b/arch/arm/mach-spear/spear1310.c index d9ce4d8000f0..cd5d375d91f0 100644 --- a/arch/arm/mach-spear/spear1310.c +++ b/arch/arm/mach-spear/spear1310.c @@ -4,7 +4,7 @@ * SPEAr1310 machine source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/spear1340.c b/arch/arm/mach-spear/spear1340.c index 3f3c0f124bd3..94594d5a446c 100644 --- a/arch/arm/mach-spear/spear1340.c +++ b/arch/arm/mach-spear/spear1340.c @@ -4,7 +4,7 @@ * SPEAr1340 machine source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c index 2e463a93468d..b7afce6795f4 100644 --- a/arch/arm/mach-spear/spear13xx.c +++ b/arch/arm/mach-spear/spear13xx.c @@ -4,7 +4,7 @@ * SPEAr13XX machines common source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/spear300.c b/arch/arm/mach-spear/spear300.c index b52e48f342f4..5b32edda2276 100644 --- a/arch/arm/mach-spear/spear300.c +++ b/arch/arm/mach-spear/spear300.c @@ -4,7 +4,7 @@ * SPEAr300 machine source file * * Copyright (C) 2009-2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/spear310.c b/arch/arm/mach-spear/spear310.c index ed2029db391f..86a44ac7ff67 100644 --- a/arch/arm/mach-spear/spear310.c +++ b/arch/arm/mach-spear/spear310.c @@ -4,7 +4,7 @@ * SPEAr310 machine source file * * Copyright (C) 2009-2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/spear320.c b/arch/arm/mach-spear/spear320.c index bf634b32a930..d45d751926c5 100644 --- a/arch/arm/mach-spear/spear320.c +++ b/arch/arm/mach-spear/spear320.c @@ -4,7 +4,7 @@ * SPEAr320 machine source file * * Copyright (C) 2009-2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c index bf3b1fd8cb23..23394ac76cf2 100644 --- a/arch/arm/mach-spear/spear3xx.c +++ b/arch/arm/mach-spear/spear3xx.c @@ -4,7 +4,7 @@ * SPEAr3XX machines common source file * * Copyright (C) 2009-2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c index 26fda4ed4d51..9ccffc1d0f28 100644 --- a/arch/arm/mach-spear/time.c +++ b/arch/arm/mach-spear/time.c @@ -66,8 +66,6 @@ static __iomem void *gpt_base; static struct clk *gpt_clk; -static void clockevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *clk_event_dev); static int clockevent_next_event(unsigned long evt, struct clock_event_device *clk_event_dev); @@ -95,54 +93,67 @@ static void __init spear_clocksource_init(void) 200, 16, clocksource_mmio_readw_up); } -static struct clock_event_device clkevt = { - .name = "tmr0", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = clockevent_set_mode, - .set_next_event = clockevent_next_event, - .shift = 0, /* to be computed */ -}; +static inline void timer_shutdown(struct clock_event_device *evt) +{ + u16 val = readw(gpt_base + CR(CLKEVT)); + + /* stop the timer */ + val &= ~CTRL_ENABLE; + writew(val, gpt_base + CR(CLKEVT)); +} + +static int spear_shutdown(struct clock_event_device *evt) +{ + timer_shutdown(evt); + + return 0; +} + +static int spear_set_oneshot(struct clock_event_device *evt) +{ + u16 val; -static void clockevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *clk_event_dev) + /* stop the timer */ + timer_shutdown(evt); + + val = readw(gpt_base + CR(CLKEVT)); + val |= CTRL_ONE_SHOT; + writew(val, gpt_base + CR(CLKEVT)); + + return 0; +} + +static int spear_set_periodic(struct clock_event_device *evt) { u32 period; u16 val; /* stop the timer */ + timer_shutdown(evt); + + period = clk_get_rate(gpt_clk) / HZ; + period >>= CTRL_PRESCALER16; + writew(period, gpt_base + LOAD(CLKEVT)); + val = readw(gpt_base + CR(CLKEVT)); - val &= ~CTRL_ENABLE; + val &= ~CTRL_ONE_SHOT; + val |= CTRL_ENABLE | CTRL_INT_ENABLE; writew(val, gpt_base + CR(CLKEVT)); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - period = clk_get_rate(gpt_clk) / HZ; - period >>= CTRL_PRESCALER16; - writew(period, gpt_base + LOAD(CLKEVT)); - - val = readw(gpt_base + CR(CLKEVT)); - val &= ~CTRL_ONE_SHOT; - val |= CTRL_ENABLE | CTRL_INT_ENABLE; - writew(val, gpt_base + CR(CLKEVT)); - - break; - case CLOCK_EVT_MODE_ONESHOT: - val = readw(gpt_base + CR(CLKEVT)); - val |= CTRL_ONE_SHOT; - writew(val, gpt_base + CR(CLKEVT)); - - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - - break; - default: - pr_err("Invalid mode requested\n"); - break; - } + return 0; } +static struct clock_event_device clkevt = { + .name = "tmr0", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = spear_shutdown, + .set_state_periodic = spear_set_periodic, + .set_state_oneshot = spear_set_oneshot, + .tick_resume = spear_shutdown, + .set_next_event = clockevent_next_event, + .shift = 0, /* to be computed */ +}; + static int clockevent_next_event(unsigned long cycles, struct clock_event_device *clk_event_dev) { @@ -193,7 +204,7 @@ static void __init spear_clockevent_init(int irq) setup_irq(irq, &spear_timer_irq); } -const static struct of_device_id timer_of_match[] __initconst = { +static const struct of_device_id const timer_of_match[] __initconst = { { .compatible = "st,spear-timer", }, { }, }; diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c index b373acade338..ae10fb280a78 100644 --- a/arch/arm/mach-sti/board-dt.c +++ b/arch/arm/mach-sti/board-dt.c @@ -14,7 +14,7 @@ #include "smp.h" -static const char *stih41x_dt_match[] __initdata = { +static const char *const stih41x_dt_match[] __initconst = { "st,stih415", "st,stih416", "st,stih407", diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S index 4c09bae86edf..e0ad451700d5 100644 --- a/arch/arm/mach-sti/headsmp.S +++ b/arch/arm/mach-sti/headsmp.S @@ -37,6 +37,7 @@ pen: ldr r7, [r6] * should now contain the SVC stack for this core */ b secondary_startup +ENDPROC(sti_secondary_startup) 1: .long . .long pen_release diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c index d4b624f8dfcb..c4ad6eae67fa 100644 --- a/arch/arm/mach-sti/platsmp.c +++ b/arch/arm/mach-sti/platsmp.c @@ -20,6 +20,7 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/memblock.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> @@ -38,8 +39,6 @@ static DEFINE_SPINLOCK(boot_lock); static void sti_secondary_init(unsigned int cpu) { - trace_hardirqs_off(); - /* * let the primary processor know we're out of the * pen, then head off into the C entry point @@ -99,14 +98,62 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) static void __init sti_smp_prepare_cpus(unsigned int max_cpus) { - void __iomem *scu_base = NULL; - struct device_node *np = of_find_compatible_node( - NULL, NULL, "arm,cortex-a9-scu"); + struct device_node *np; + void __iomem *scu_base; + u32 __iomem *cpu_strt_ptr; + u32 release_phys; + int cpu; + unsigned long entry_pa = virt_to_phys(sti_secondary_startup); + + np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); + if (np) { scu_base = of_iomap(np, 0); scu_enable(scu_base); of_node_put(np); } + + if (max_cpus <= 1) + return; + + for_each_possible_cpu(cpu) { + + np = of_get_cpu_node(cpu, NULL); + + if (!np) + continue; + + if (of_property_read_u32(np, "cpu-release-addr", + &release_phys)) { + pr_err("CPU %d: missing or invalid cpu-release-addr " + "property\n", cpu); + continue; + } + + /* + * holding pen is usually configured in SBC DMEM but can also be + * in RAM. + */ + + if (!memblock_is_memory(release_phys)) + cpu_strt_ptr = + ioremap(release_phys, sizeof(release_phys)); + else + cpu_strt_ptr = + (u32 __iomem *)phys_to_virt(release_phys); + + __raw_writel(entry_pa, cpu_strt_ptr); + + /* + * wmb so that data is actually written + * before cache flush is done + */ + smp_wmb(); + sync_cache_w(cpu_strt_ptr); + + if (!memblock_is_memory(release_phys)) + iounmap(cpu_strt_ptr); + } } struct smp_operations __initdata sti_smp_ops = { diff --git a/arch/arm/mach-sti/smp.h b/arch/arm/mach-sti/smp.h index 1871b72b1a7e..ae22707d301f 100644 --- a/arch/arm/mach-sti/smp.h +++ b/arch/arm/mach-sti/smp.h @@ -14,4 +14,6 @@ extern struct smp_operations sti_smp_ops; +void sti_secondary_startup(void); + #endif diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 5d1a318f1302..0fa4c5f8b1be 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -8,6 +8,7 @@ menuconfig ARCH_TEGRA select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP select PINCTRL + select PM_OPP select ARCH_HAS_RESET_CONTROLLER select RESET_CONTROLLER select SOC_BUS diff --git a/arch/arm/mach-uniphier/platsmp.c b/arch/arm/mach-uniphier/platsmp.c index 5943e1cb7fe1..4b784f721135 100644 --- a/arch/arm/mach-uniphier/platsmp.c +++ b/arch/arm/mach-uniphier/platsmp.c @@ -60,12 +60,6 @@ err: sbcm_regmap = NULL; } -static void __naked uniphier_secondary_startup(void) -{ - asm("bl v7_invalidate_l1\n" - "b secondary_startup\n"); -}; - static int uniphier_boot_secondary(unsigned int cpu, struct task_struct *idle) { @@ -75,7 +69,7 @@ static int uniphier_boot_secondary(unsigned int cpu, return -ENODEV; ret = regmap_write(sbcm_regmap, 0x1208, - virt_to_phys(uniphier_secondary_startup)); + virt_to_phys(secondary_startup)); if (!ret) asm("sev"); /* wake up secondary CPU */ diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile index 4418a5078833..c8643ac5db71 100644 --- a/arch/arm/mach-ux500/Makefile +++ b/arch/arm/mach-ux500/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o obj-$(CONFIG_MACH_MOP500) += board-mop500-regulators.o \ board-mop500-audio.o -obj-$(CONFIG_SMP) += platsmp.o headsmp.o +obj-$(CONFIG_SMP) += platsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 16913800bbf9..ba708ce08616 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -154,7 +154,6 @@ static const char * stericsson_dt_platform_compat[] = { }; DT_MACHINE_START(U8500_DT, "ST-Ericsson Ux5x0 platform (Device Tree Support)") - .smp = smp_ops(ux500_smp_ops), .map_io = u8500_map_io, .init_irq = ux500_init_irq, /* we re-use nomadik timer here */ diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index e31d3d61c998..b316e18a76aa 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c @@ -72,21 +72,12 @@ void __init ux500_init_irq(void) * Init clocks here so that they are available for system timer * initialization. */ - if (cpu_is_u8500_family()) { - u8500_of_clk_init(U8500_CLKRST1_BASE, - U8500_CLKRST2_BASE, - U8500_CLKRST3_BASE, - U8500_CLKRST5_BASE, - U8500_CLKRST6_BASE); - } else if (cpu_is_u9540()) { - u9540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, - U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, - U8500_CLKRST6_BASE); - } else if (cpu_is_u8540()) { - u8540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, - U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, - U8500_CLKRST6_BASE); - } + if (cpu_is_u8500_family()) + u8500_clk_init(); + else if (cpu_is_u9540()) + u9540_clk_init(); + else if (cpu_is_u8540()) + u8540_clk_init(); } static const char * __init ux500_get_machine(void) diff --git a/arch/arm/mach-ux500/headsmp.S b/arch/arm/mach-ux500/headsmp.S deleted file mode 100644 index 9cdea049485d..000000000000 --- a/arch/arm/mach-ux500/headsmp.S +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2009 ST-Ericsson - * This file is based ARM Realview platform - * Copyright (c) 2003 ARM Limited - * All Rights Reserved - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/linkage.h> -#include <linux/init.h> - -/* - * U8500 specific entry point for secondary CPUs. - */ -ENTRY(u8500_secondary_startup) - mrc p15, 0, r0, c0, c0, 5 - and r0, r0, #15 - adr r4, 1f - ldmia r4, {r5, r6} - sub r4, r4, r5 - add r6, r6, r4 -pen: ldr r7, [r6] - cmp r7, r0 - bne pen - - /* - * we've been released from the holding pen: secondary_stack - * should now contain the SVC stack for this core - */ - b secondary_startup -ENDPROC(u8500_secondary_startup) - - .align 2 -1: .long . - .long pen_release diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 62b1de922bd8..70766b963758 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -28,135 +28,81 @@ #include "db8500-regs.h" #include "id.h" -static void __iomem *scu_base; -static void __iomem *backupram; - -/* This is called from headsmp.S to wakeup the secondary core */ -extern void u8500_secondary_startup(void); - -/* - * Write pen_release in a way that is guaranteed to be visible to all - * observers, irrespective of whether they're taking part in coherency - * or not. This is necessary for the hotplug code to work reliably. - */ -static void write_pen_release(int val) -{ - pen_release = val; - smp_wmb(); - sync_cache_w(&pen_release); -} - -static DEFINE_SPINLOCK(boot_lock); - -static void ux500_secondary_init(unsigned int cpu) -{ - /* - * let the primary processor know we're out of the - * pen, then head off into the C entry point - */ - write_pen_release(-1); - - /* - * Synchronise with the boot thread. - */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); -} +/* Magic triggers in backup RAM */ +#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4 +#define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0 -static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) +static void wakeup_secondary(void) { - unsigned long timeout; - - /* - * set synchronisation state between this boot processor - * and the secondary one - */ - spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from - * the holding pen - release it, then wait for it to flag - * that it has been released by resetting pen_release. - */ - write_pen_release(cpu_logical_map(cpu)); - - arch_send_wakeup_ipi_mask(cpumask_of(cpu)); + struct device_node *np; + static void __iomem *backupram; - timeout = jiffies + (1 * HZ); - while (time_before(jiffies, timeout)) { - if (pen_release == -1) - break; + np = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram"); + if (!np) { + pr_err("No backupram base address\n"); + return; + } + backupram = of_iomap(np, 0); + of_node_put(np); + if (!backupram) { + pr_err("No backupram remap\n"); + return; } /* - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ - spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; -} - -static void __init wakeup_secondary(void) -{ - /* * write the address of secondary startup into the backup ram register * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the * backup ram register at offset 0x1FF0, which is what boot rom code - * is waiting for. This would wake up the secondary core from WFE + * is waiting for. This will wake up the secondary core from WFE. */ -#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4 - __raw_writel(virt_to_phys(u8500_secondary_startup), - backupram + UX500_CPU1_JUMPADDR_OFFSET); - -#define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0 - __raw_writel(0xA1FEED01, - backupram + UX500_CPU1_WAKEMAGIC_OFFSET); + writel(virt_to_phys(secondary_startup), + backupram + UX500_CPU1_JUMPADDR_OFFSET); + writel(0xA1FEED01, + backupram + UX500_CPU1_WAKEMAGIC_OFFSET); /* make sure write buffer is drained */ mb(); + iounmap(backupram); } -/* - * Initialise the CPU possible map early - this describes the CPUs - * which may be present or become present in the system. - */ -static void __init ux500_smp_init_cpus(void) +static void __init ux500_smp_prepare_cpus(unsigned int max_cpus) { - unsigned int i, ncores; struct device_node *np; + static void __iomem *scu_base; + unsigned int ncores; + int i; np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); + if (!np) { + pr_err("No SCU base address\n"); + return; + } scu_base = of_iomap(np, 0); of_node_put(np); - if (!scu_base) + if (!scu_base) { + pr_err("No SCU remap\n"); return; - backupram = ioremap(U8500_BACKUPRAM0_BASE, SZ_8K); - ncores = scu_get_core_count(scu_base); - - /* sanity check */ - if (ncores > nr_cpu_ids) { - pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", - ncores, nr_cpu_ids); - ncores = nr_cpu_ids; } + scu_enable(scu_base); + ncores = scu_get_core_count(scu_base); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); + iounmap(scu_base); } -static void __init ux500_smp_prepare_cpus(unsigned int max_cpus) +static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) { - scu_enable(scu_base); wakeup_secondary(); + arch_send_wakeup_ipi_mask(cpumask_of(cpu)); + return 0; } struct smp_operations ux500_smp_ops __initdata = { - .smp_init_cpus = ux500_smp_init_cpus, .smp_prepare_cpus = ux500_smp_prepare_cpus, - .smp_secondary_init = ux500_secondary_init, .smp_boot_secondary = ux500_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = ux500_cpu_die, #endif }; +CPU_METHOD_OF_DECLARE(ux500_smp, "ste,dbx500-smp", &ux500_smp_ops); diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h index 1fb6ad2789f1..65876eac0761 100644 --- a/arch/arm/mach-ux500/setup.h +++ b/arch/arm/mach-ux500/setup.h @@ -26,7 +26,6 @@ extern struct device *ux500_soc_device_init(const char *soc_id); extern void ux500_timer_init(void); -extern struct smp_operations ux500_smp_ops; extern void ux500_cpu_die(unsigned int cpu); #endif /* __ASM_ARCH_SETUP_H */ diff --git a/arch/arm/mach-w90x900/irq.c b/arch/arm/mach-w90x900/irq.c index d66d43ae8df5..491b317daffa 100644 --- a/arch/arm/mach-w90x900/irq.c +++ b/arch/arm/mach-w90x900/irq.c @@ -211,6 +211,6 @@ void __init nuc900_init_irq(void) for (irqno = IRQ_WDT; irqno <= IRQ_ADC; irqno++) { irq_set_chip_and_handler(irqno, &nuc900_irq_chip, handle_level_irq); - set_irq_flags(irqno, IRQF_VALID); + irq_clear_status_flags(irqno, IRQ_NOREQUEST); } } diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c index 9230d3725599..cd1966ec9143 100644 --- a/arch/arm/mach-w90x900/time.c +++ b/arch/arm/mach-w90x900/time.c @@ -48,31 +48,32 @@ static unsigned int timer0_load; -static void nuc900_clockevent_setmode(enum clock_event_mode mode, - struct clock_event_device *clk) +static int nuc900_clockevent_shutdown(struct clock_event_device *evt) { - unsigned int val; + unsigned int val = __raw_readl(REG_TCSR0) & ~(0x03 << 27); - val = __raw_readl(REG_TCSR0); - val &= ~(0x03 << 27); + __raw_writel(val, REG_TCSR0); + return 0; +} + +static int nuc900_clockevent_set_oneshot(struct clock_event_device *evt) +{ + unsigned int val = __raw_readl(REG_TCSR0) & ~(0x03 << 27); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - __raw_writel(timer0_load, REG_TICR0); - val |= (PERIOD | COUNTEN | INTEN | PRESCALE); - break; + val |= (ONESHOT | COUNTEN | INTEN | PRESCALE); - case CLOCK_EVT_MODE_ONESHOT: - val |= (ONESHOT | COUNTEN | INTEN | PRESCALE); - break; + __raw_writel(val, REG_TCSR0); + return 0; +} - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } +static int nuc900_clockevent_set_periodic(struct clock_event_device *evt) +{ + unsigned int val = __raw_readl(REG_TCSR0) & ~(0x03 << 27); + __raw_writel(timer0_load, REG_TICR0); + val |= (PERIOD | COUNTEN | INTEN | PRESCALE); __raw_writel(val, REG_TCSR0); + return 0; } static int nuc900_clockevent_setnextevent(unsigned long evt, @@ -90,11 +91,15 @@ static int nuc900_clockevent_setnextevent(unsigned long evt, } static struct clock_event_device nuc900_clockevent_device = { - .name = "nuc900-timer0", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = nuc900_clockevent_setmode, - .set_next_event = nuc900_clockevent_setnextevent, - .rating = 300, + .name = "nuc900-timer0", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = nuc900_clockevent_shutdown, + .set_state_periodic = nuc900_clockevent_set_periodic, + .set_state_oneshot = nuc900_clockevent_set_oneshot, + .tick_resume = nuc900_clockevent_shutdown, + .set_next_event = nuc900_clockevent_setnextevent, + .rating = 300, }; /*IRQ handler for the timer*/ diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig index 2a910dc0d15e..7fdc5bf24f9b 100644 --- a/arch/arm/mach-zx/Kconfig +++ b/arch/arm/mach-zx/Kconfig @@ -13,6 +13,7 @@ config SOC_ZX296702 select ARM_GLOBAL_TIMER select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP + select PM_GENERIC_DOMAINS help Support for ZTE ZX296702 SoC which is a dual core CortexA9MP endif diff --git a/arch/arm/mach-zx/Makefile b/arch/arm/mach-zx/Makefile index 7c2edf6e5f8b..a4b486433209 100644 --- a/arch/arm/mach-zx/Makefile +++ b/arch/arm/mach-zx/Makefile @@ -1,2 +1,2 @@ -obj-$(CONFIG_SOC_ZX296702) += zx296702.o +obj-$(CONFIG_SOC_ZX296702) += zx296702.o zx296702-pm-domain.o obj-$(CONFIG_SMP) += headsmp.o platsmp.o diff --git a/arch/arm/mach-zx/zx296702-pm-domain.c b/arch/arm/mach-zx/zx296702-pm-domain.c new file mode 100644 index 000000000000..e08574d4e2ca --- /dev/null +++ b/arch/arm/mach-zx/zx296702-pm-domain.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2015 Linaro Ltd. + * + * Author: Jun Nie <jun.nie@linaro.org> + * License terms: GNU General Public License (GPL) version 2 + */ +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> + +#define PCU_DM_CLKEN 0x18 +#define PCU_DM_RSTEN 0x1C +#define PCU_DM_ISOEN 0x20 +#define PCU_DM_PWRDN 0x24 +#define PCU_DM_ACK_SYNC 0x28 + +enum { + PCU_DM_NEON0 = 0, + PCU_DM_NEON1, + PCU_DM_GPU, + PCU_DM_DECPPU, + PCU_DM_VOU, + PCU_DM_R2D, + PCU_DM_TOP, +}; + +static void __iomem *pcubase; + +struct zx_pm_domain { + struct generic_pm_domain dm; + unsigned int bit; +}; + +static int normal_power_off(struct generic_pm_domain *domain) +{ + struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain; + unsigned long loop = 1000; + u32 tmp; + + tmp = readl_relaxed(pcubase + PCU_DM_CLKEN); + tmp &= ~BIT(zpd->bit); + writel_relaxed(tmp, pcubase + PCU_DM_CLKEN); + udelay(5); + + tmp = readl_relaxed(pcubase + PCU_DM_ISOEN); + tmp &= ~BIT(zpd->bit); + writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_ISOEN); + udelay(5); + + tmp = readl_relaxed(pcubase + PCU_DM_RSTEN); + tmp &= ~BIT(zpd->bit); + writel_relaxed(tmp, pcubase + PCU_DM_RSTEN); + udelay(5); + + tmp = readl_relaxed(pcubase + PCU_DM_PWRDN); + tmp &= ~BIT(zpd->bit); + writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_PWRDN); + do { + tmp = readl_relaxed(pcubase + PCU_DM_ACK_SYNC) & BIT(zpd->bit); + } while (--loop && !tmp); + + if (!loop) { + pr_err("Error: %s %s fail\n", __func__, domain->name); + return -EIO; + } + + return 0; +} + +static int normal_power_on(struct generic_pm_domain *domain) +{ + struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain; + unsigned long loop = 10000; + u32 tmp; + + tmp = readl_relaxed(pcubase + PCU_DM_PWRDN); + tmp &= ~BIT(zpd->bit); + writel_relaxed(tmp, pcubase + PCU_DM_PWRDN); + do { + tmp = readl_relaxed(pcubase + PCU_DM_ACK_SYNC) & BIT(zpd->bit); + } while (--loop && tmp); + + if (!loop) { + pr_err("Error: %s %s fail\n", __func__, domain->name); + return -EIO; + } + + tmp = readl_relaxed(pcubase + PCU_DM_RSTEN); + tmp &= ~BIT(zpd->bit); + writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_RSTEN); + udelay(5); + + tmp = readl_relaxed(pcubase + PCU_DM_ISOEN); + tmp &= ~BIT(zpd->bit); + writel_relaxed(tmp, pcubase + PCU_DM_ISOEN); + udelay(5); + + tmp = readl_relaxed(pcubase + PCU_DM_CLKEN); + tmp &= ~BIT(zpd->bit); + writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_CLKEN); + udelay(5); + return 0; +} + +static struct zx_pm_domain gpu_domain = { + .dm = { + .name = "gpu_domain", + .power_off = normal_power_off, + .power_on = normal_power_on, + }, + .bit = PCU_DM_GPU, +}; + +static struct zx_pm_domain decppu_domain = { + .dm = { + .name = "decppu_domain", + .power_off = normal_power_off, + .power_on = normal_power_on, + }, + .bit = PCU_DM_DECPPU, +}; + +static struct zx_pm_domain vou_domain = { + .dm = { + .name = "vou_domain", + .power_off = normal_power_off, + .power_on = normal_power_on, + }, + .bit = PCU_DM_VOU, +}; + +static struct zx_pm_domain r2d_domain = { + .dm = { + .name = "r2d_domain", + .power_off = normal_power_off, + .power_on = normal_power_on, + }, + .bit = PCU_DM_R2D, +}; + +static struct generic_pm_domain *zx296702_pm_domains[] = { + &vou_domain.dm, + &gpu_domain.dm, + &decppu_domain.dm, + &r2d_domain.dm, +}; + +static int zx296702_pd_probe(struct platform_device *pdev) +{ + struct genpd_onecell_data *genpd_data; + struct resource *res; + int i; + + genpd_data = devm_kzalloc(&pdev->dev, sizeof(*genpd_data), GFP_KERNEL); + if (!genpd_data) + return -ENOMEM; + + genpd_data->domains = zx296702_pm_domains; + genpd_data->num_domains = ARRAY_SIZE(zx296702_pm_domains); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + + pcubase = devm_ioremap_resource(&pdev->dev, res); + if (!pcubase) { + dev_err(&pdev->dev, "ioremap fail.\n"); + return -EIO; + } + + for (i = 0; i < ARRAY_SIZE(zx296702_pm_domains); ++i) + pm_genpd_init(zx296702_pm_domains[i], NULL, false); + + of_genpd_add_provider_onecell(pdev->dev.of_node, genpd_data); + return 0; +} + +static const struct of_device_id zx296702_pm_domain_matches[] __initconst = { + { .compatible = "zte,zx296702-pcu", }, + { }, +}; + +static struct platform_driver zx296702_pd_driver __initdata = { + .driver = { + .name = "zx-powerdomain", + .owner = THIS_MODULE, + .of_match_table = zx296702_pm_domain_matches, + }, + .probe = zx296702_pd_probe, +}; + +static int __init zx296702_pd_init(void) +{ + return platform_driver_register(&zx296702_pd_driver); +} +subsys_initcall(zx296702_pd_init); diff --git a/arch/arm/mach-zx/zx296702.c b/arch/arm/mach-zx/zx296702.c index 60bb1a8e1bf1..a041e13ab0ac 100644 --- a/arch/arm/mach-zx/zx296702.c +++ b/arch/arm/mach-zx/zx296702.c @@ -13,7 +13,7 @@ #include <linux/of_address.h> #include <linux/of_platform.h> -static const char *zx296702_dt_compat[] __initconst = { +static const char *const zx296702_dt_compat[] __initconst = { "zte,zx296702", NULL, }; diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 616d5840fc2e..6bd4a43e1a78 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -197,8 +197,8 @@ static const char * const zynq_dt_match[] = { DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") /* 64KB way size, 8-way associativity, parity disabled */ - .l2c_aux_val = 0x00000000, - .l2c_aux_mask = 0xffffffff, + .l2c_aux_val = 0x00400000, + .l2c_aux_mask = 0xffbfffff, .smp = smp_ops(zynq_smp_ops), .map_io = zynq_map_io, .init_irq = zynq_irq_init, diff --git a/arch/arm/mach-zynq/headsmp.S b/arch/arm/mach-zynq/headsmp.S index 045c72720a4d..f6d5de073e34 100644 --- a/arch/arm/mach-zynq/headsmp.S +++ b/arch/arm/mach-zynq/headsmp.S @@ -18,7 +18,7 @@ ARM_BE8(rev r0, r0) .globl zynq_secondary_trampoline_jump zynq_secondary_trampoline_jump: /* Space for jumping address */ - .word /* cpu 1 */ + .word 0 /* cpu 1 */ .globl zynq_secondary_trampoline_end zynq_secondary_trampoline_end: ENDPROC(zynq_secondary_trampoline) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1ced8a0f7a52..cba12f34ff77 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1971,7 +1971,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) { int next_bitmap; - if (mapping->nr_bitmaps > mapping->extensions) + if (mapping->nr_bitmaps >= mapping->extensions) return -EINVAL; next_bitmap = mapping->nr_bitmaps; diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0716bbe19872..de2b246fed38 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -274,7 +274,10 @@ __v7_ca15mp_setup: __v7_b15mp_setup: __v7_ca17mp_setup: mov r10, #0 -1: +1: adr r12, __v7_setup_stack @ the local stack + stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6 + bl v7_invalidate_l1 + ldmia r12, {r0-r5, lr} #ifdef CONFIG_SMP ALT_SMP(mrc p15, 0, r0, c1, c0, 1) ALT_UP(mov r0, #(1 << 6)) @ fake it for UP @@ -283,7 +286,7 @@ __v7_ca17mp_setup: orreq r0, r0, r10 @ Enable CPU-specific SMP bits mcreq p15, 0, r0, c1, c0, 1 #endif - b __v7_setup + b __v7_setup_cont /* * Errata: @@ -413,10 +416,11 @@ __v7_pj4b_setup: __v7_setup: adr r12, __v7_setup_stack @ the local stack - stmia r12, {r0-r5, r7, r9, r11, lr} + stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6 bl v7_invalidate_l1 - ldmia r12, {r0-r5, r7, r9, r11, lr} + ldmia r12, {r0-r5, lr} +__v7_setup_cont: and r0, r9, #0xff000000 @ ARM? teq r0, #0x41000000 bne __errata_finish @@ -480,7 +484,7 @@ ENDPROC(__v7_setup) .align 2 __v7_setup_stack: - .space 4 * 11 @ 11 registers + .space 4 * 7 @ 12 registers __INITDATA diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 4550d247e308..c011e2296cb1 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -74,32 +74,52 @@ struct jit_ctx { int bpf_jit_enable __read_mostly; -static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) +static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, + unsigned int size) +{ + void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); + + if (!ptr) + return -EFAULT; + memcpy(ret, ptr, size); + return 0; +} + +static u64 jit_get_skb_b(struct sk_buff *skb, int offset) { u8 ret; int err; - err = skb_copy_bits(skb, offset, &ret, 1); + if (offset < 0) + err = call_neg_helper(skb, offset, &ret, 1); + else + err = skb_copy_bits(skb, offset, &ret, 1); return (u64)err << 32 | ret; } -static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) +static u64 jit_get_skb_h(struct sk_buff *skb, int offset) { u16 ret; int err; - err = skb_copy_bits(skb, offset, &ret, 2); + if (offset < 0) + err = call_neg_helper(skb, offset, &ret, 2); + else + err = skb_copy_bits(skb, offset, &ret, 2); return (u64)err << 32 | ntohs(ret); } -static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) +static u64 jit_get_skb_w(struct sk_buff *skb, int offset) { u32 ret; int err; - err = skb_copy_bits(skb, offset, &ret, 4); + if (offset < 0) + err = call_neg_helper(skb, offset, &ret, 4); + else + err = skb_copy_bits(skb, offset, &ret, 4); return (u64)err << 32 | ntohl(ret); } @@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx) case BPF_LD | BPF_B | BPF_ABS: load_order = 0; load: - /* the interpreter will deal with the negative K */ - if ((int)k < 0) - return -ENOTSUPP; emit_mov_i(r_off, k, ctx); load_common: ctx->seen |= SEEN_DATA | SEEN_CALL; @@ -547,12 +564,24 @@ load_common: emit(ARM_SUB_I(r_scratch, r_skb_hl, 1 << load_order), ctx); emit(ARM_CMP_R(r_scratch, r_off), ctx); - condt = ARM_COND_HS; + condt = ARM_COND_GE; } else { emit(ARM_CMP_R(r_skb_hl, r_off), ctx); condt = ARM_COND_HI; } + /* + * test for negative offset, only if we are + * currently scheduled to take the fast + * path. this will update the flags so that + * the slowpath instruction are ignored if the + * offset is negative. + * + * for loard_order == 0 the HI condition will + * make loads at offset 0 take the slow path too. + */ + _emit(condt, ARM_CMP_I(r_off, 0), ctx); + _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), ctx); @@ -860,9 +889,11 @@ b_epilogue: off = offsetof(struct sk_buff, vlan_tci); emit(ARM_LDRH_I(r_A, r_skb, off), ctx); if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) - OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); - else - OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); + OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); + else { + OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); + OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); + } break; case BPF_ANC | SKF_AD_QUEUE: ctx->seen |= SEEN_SKB; diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index 6ad65d8ae237..101e8f2c7abe 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c @@ -77,41 +77,57 @@ static int iop_set_next_event(unsigned long delta, static unsigned long ticks_per_jiffy; -static void iop_set_mode(enum clock_event_mode mode, - struct clock_event_device *unused) +static int iop_set_periodic(struct clock_event_device *evt) { u32 tmr = read_tmr0(); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - write_tmr0(tmr & ~IOP_TMR_EN); - write_tcr0(ticks_per_jiffy - 1); - write_trr0(ticks_per_jiffy - 1); - tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* ->set_next_event sets period and enables timer */ - tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN); - break; - case CLOCK_EVT_MODE_RESUME: - tmr |= IOP_TMR_EN; - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - default: - tmr &= ~IOP_TMR_EN; - break; - } + write_tmr0(tmr & ~IOP_TMR_EN); + write_tcr0(ticks_per_jiffy - 1); + write_trr0(ticks_per_jiffy - 1); + tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN); write_tmr0(tmr); + return 0; +} + +static int iop_set_oneshot(struct clock_event_device *evt) +{ + u32 tmr = read_tmr0(); + + /* ->set_next_event sets period and enables timer */ + tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN); + write_tmr0(tmr); + return 0; +} + +static int iop_shutdown(struct clock_event_device *evt) +{ + u32 tmr = read_tmr0(); + + tmr &= ~IOP_TMR_EN; + write_tmr0(tmr); + return 0; +} + +static int iop_resume(struct clock_event_device *evt) +{ + u32 tmr = read_tmr0(); + + tmr |= IOP_TMR_EN; + write_tmr0(tmr); + return 0; } static struct clock_event_device iop_clockevent = { - .name = "iop_timer0", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 300, - .set_next_event = iop_set_next_event, - .set_mode = iop_set_mode, + .name = "iop_timer0", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = iop_set_next_event, + .set_state_shutdown = iop_shutdown, + .set_state_periodic = iop_set_periodic, + .tick_resume = iop_resume, + .set_state_oneshot = iop_set_oneshot, }; static irqreturn_t diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 5168a52a17f9..79c33eca09a3 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -407,9 +407,9 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) return 0; } -static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) +static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc) { - struct orion_gpio_chip *ochip = irq_get_handler_data(irq); + struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc); u32 cause, type; int i; @@ -582,8 +582,9 @@ void __init orion_gpio_init(struct device_node *np, for (i = 0; i < 4; i++) { if (irqs[i]) { - irq_set_handler_data(irqs[i], ochip); - irq_set_chained_handler(irqs[i], gpio_irq_handler); + irq_set_chained_handler_and_data(irqs[i], + gpio_irq_handler, + ochip); } } diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 261258f717fc..8085a8aac812 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c @@ -106,60 +106,63 @@ orion_clkevt_next_event(unsigned long delta, struct clock_event_device *dev) return 0; } -static void -orion_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) +static int orion_clkevt_shutdown(struct clock_event_device *evt) { unsigned long flags; u32 u; local_irq_save(flags); - if (mode == CLOCK_EVT_MODE_PERIODIC) { - /* - * Setup timer to fire at 1/HZ intervals. - */ - writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF); - writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF); - - /* - * Enable timer interrupt. - */ - u = readl(bridge_base + BRIDGE_MASK_OFF); - writel(u | BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF); - - /* - * Enable timer. - */ - u = readl(timer_base + TIMER_CTRL_OFF); - writel(u | TIMER1_EN | TIMER1_RELOAD_EN, - timer_base + TIMER_CTRL_OFF); - } else { - /* - * Disable timer. - */ - u = readl(timer_base + TIMER_CTRL_OFF); - writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF); - - /* - * Disable timer interrupt. - */ - u = readl(bridge_base + BRIDGE_MASK_OFF); - writel(u & ~BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF); - - /* - * ACK pending timer interrupt. - */ - writel(bridge_timer1_clr_mask, bridge_base + BRIDGE_CAUSE_OFF); - - } + + /* Disable timer */ + u = readl(timer_base + TIMER_CTRL_OFF); + writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF); + + /* Disable timer interrupt */ + u = readl(bridge_base + BRIDGE_MASK_OFF); + writel(u & ~BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF); + + /* ACK pending timer interrupt */ + writel(bridge_timer1_clr_mask, bridge_base + BRIDGE_CAUSE_OFF); + + local_irq_restore(flags); + + return 0; +} + +static int orion_clkevt_set_periodic(struct clock_event_device *evt) +{ + unsigned long flags; + u32 u; + + local_irq_save(flags); + + /* Setup timer to fire at 1/HZ intervals */ + writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF); + writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF); + + /* Enable timer interrupt */ + u = readl(bridge_base + BRIDGE_MASK_OFF); + writel(u | BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF); + + /* Enable timer */ + u = readl(timer_base + TIMER_CTRL_OFF); + writel(u | TIMER1_EN | TIMER1_RELOAD_EN, timer_base + TIMER_CTRL_OFF); + local_irq_restore(flags); + + return 0; } static struct clock_event_device orion_clkevt = { - .name = "orion_tick", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .rating = 300, - .set_next_event = orion_clkevt_next_event, - .set_mode = orion_clkevt_mode, + .name = "orion_tick", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .set_next_event = orion_clkevt_next_event, + .set_state_shutdown = orion_clkevt_shutdown, + .set_state_periodic = orion_clkevt_set_periodic, + .set_state_oneshot = orion_clkevt_shutdown, + .tick_resume = orion_clkevt_shutdown, }; static irqreturn_t orion_timer_interrupt(int irq, void *dev_id) diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c index d92f07f6ecfb..de2b061889ec 100644 --- a/arch/arm/plat-pxa/dma.c +++ b/arch/arm/plat-pxa/dma.c @@ -289,7 +289,8 @@ int pxa_request_dma (char *name, pxa_dma_prio prio, /* try grabbing a DMA channel with the requested priority */ for (i = 0; i < num_dma_channels; i++) { if ((dma_channels[i].prio == prio) && - !dma_channels[i].name) { + !dma_channels[i].name && + !pxad_toggle_reserved_channel(i)) { found = 1; break; } @@ -326,13 +327,14 @@ void pxa_free_dma (int dma_ch) local_irq_save(flags); DCSR(dma_ch) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; dma_channels[dma_ch].name = NULL; + pxad_toggle_reserved_channel(dma_ch); local_irq_restore(flags); } EXPORT_SYMBOL(pxa_free_dma); static irqreturn_t dma_irq_handler(int irq, void *dev_id) { - int i, dint = DINT; + int i, dint = DINT, done = 0; struct dma_channel *channel; while (dint) { @@ -341,16 +343,13 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) channel = &dma_channels[i]; if (channel->name && channel->irq_handler) { channel->irq_handler(i, channel->data); - } else { - /* - * IRQ for an unregistered DMA channel: - * let's clear the interrupts and disable it. - */ - printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i); - DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; + done++; } } - return IRQ_HANDLED; + if (done) + return IRQ_HANDLED; + else + return IRQ_NONE; } int __init pxa_init_dma(int irq, int num_ch) @@ -372,7 +371,8 @@ int __init pxa_init_dma(int irq, int num_ch) spin_lock_init(&dma_channels[i].lock); } - ret = request_irq(irq, dma_irq_handler, 0, "DMA", NULL); + ret = request_irq(irq, dma_irq_handler, IRQF_SHARED, "DMA", + dma_channels); if (ret) { printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n"); kfree(dma_channels); diff --git a/arch/arm/plat-pxa/include/plat/dma.h b/arch/arm/plat-pxa/include/plat/dma.h index a7b91dc06852..28848b344e2d 100644 --- a/arch/arm/plat-pxa/include/plat/dma.h +++ b/arch/arm/plat-pxa/include/plat/dma.h @@ -82,4 +82,19 @@ int pxa_request_dma (char *name, void pxa_free_dma (int dma_ch); +/* + * Cooperation with pxa_dma + dmaengine while there remains at least one pxa + * driver not converted to dmaengine. + */ +#if defined(CONFIG_PXA_DMA) +extern int pxad_toggle_reserved_channel(int legacy_channel); +#else +static inline int pxad_toggle_reserved_channel(int legacy_channel) +{ + return 0; +} +#endif + +extern void __init pxa2xx_set_dmac_info(int nb_channels); + #endif /* __PLAT_DMA_H */ diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig index cb8e3d655d1a..57729b915003 100644 --- a/arch/arm/plat-samsung/Kconfig +++ b/arch/arm/plat-samsung/Kconfig @@ -217,12 +217,6 @@ config SAMSUNG_DEV_PWM help Compile in platform device definition for PWM Timer -config SAMSUNG_DEV_BACKLIGHT - bool - depends on SAMSUNG_DEV_PWM - help - Compile in platform device definition LCD backlight with PWM Timer - config S3C24XX_PWM bool "PWM device support" select PWM @@ -231,25 +225,14 @@ config S3C24XX_PWM Support for exporting the PWM timer blocks via the pwm device system -config S3C_SETUP_CAMIF - bool - help - Compile in common setup code for S3C CAMIF devices - config SAMSUNG_PM_GPIO bool default y if GPIO_SAMSUNG && PM help Include legacy GPIO power management code for platforms not using pinctrl-samsung driver. - endif -config S5P_DEV_MFC - bool - help - Compile in setup memory (init) code for MFC - comment "Power management" config SAMSUNG_PM_DEBUG diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile index 1a29ab1f446d..8c911760f55f 100644 --- a/arch/arm/plat-samsung/Makefile +++ b/arch/arm/plat-samsung/Makefile @@ -20,11 +20,6 @@ obj-$(CONFIG_SAMSUNG_ATAGS) += platformdata.o obj-$(CONFIG_SAMSUNG_ATAGS) += devs.o obj-$(CONFIG_SAMSUNG_ATAGS) += dev-uart.o -obj-$(CONFIG_S5P_DEV_MFC) += s5p-dev-mfc.o - -obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o - -obj-$(CONFIG_S3C_SETUP_CAMIF) += setup-camif.o # PM support diff --git a/arch/arm/plat-samsung/include/plat/keypad-core.h b/arch/arm/plat-samsung/include/plat/keypad-core.h deleted file mode 100644 index d513e1b3a31e..000000000000 --- a/arch/arm/plat-samsung/include/plat/keypad-core.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * linux/arch/arm/plat-samsung/include/plat/keypad-core.h - * - * Copyright (C) 2010 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim <jy0922.shim@samsung.com> - * - * Samsung keypad controller core function - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ - -#ifndef __ASM_ARCH_KEYPAD_CORE_H -#define __ASM_ARCH_KEYPAD_CORE_H - -/* These function are only for use with the core support code, such as - * the cpu specific initialisation code - */ - -/* re-define device name depending on support. */ -static inline void samsung_keypad_setname(char *name) -{ -#ifdef CONFIG_SAMSUNG_DEV_KEYPAD - samsung_device_keypad.name = name; -#endif -} - -#endif /* __ASM_ARCH_KEYPAD_CORE_H */ diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile index 9d259d94e429..1160434eece0 100644 --- a/arch/arm/vdso/Makefile +++ b/arch/arm/vdso/Makefile @@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 VDSO_LDFLAGS += -nostdlib -shared VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id) -VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd) +VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd) obj-$(CONFIG_VDSO) += vdso.o extra-$(CONFIG_VDSO) += vdso.lds diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 0689c3fb56e3..d831bc2ac204 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -490,7 +490,8 @@ 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */ reg-names = "csr", "cfg"; ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */ - 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */ + 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0xf0 0x00000000 0xf0 0x00000000 0x10 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -513,8 +514,9 @@ reg = < 0x00 0x1f2c0000 0x0 0x00010000 /* Controller registers */ 0xd0 0xd0000000 0x0 0x00040000>; /* PCI config space */ reg-names = "csr", "cfg"; - ranges = <0x01000000 0x0 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */ - 0x02000000 0x0 0x80000000 0xd1 0x80000000 0x00 0x80000000>; /* mem */ + ranges = <0x01000000 0x00 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0xd1 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0xd8 0x00000000 0xd8 0x00000000 0x08 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -537,8 +539,9 @@ reg = < 0x00 0x1f2d0000 0x0 0x00010000 /* Controller registers */ 0x90 0xd0000000 0x0 0x00040000>; /* PCI config space */ reg-names = "csr", "cfg"; - ranges = <0x01000000 0x0 0x00000000 0x90 0x10000000 0x0 0x00010000 /* io */ - 0x02000000 0x0 0x80000000 0x91 0x80000000 0x0 0x80000000>; /* mem */ + ranges = <0x01000000 0x00 0x00000000 0x90 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0x91 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0x94 0x00000000 0x94 0x00000000 0x04 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -561,8 +564,9 @@ reg = < 0x00 0x1f500000 0x0 0x00010000 /* Controller registers */ 0xa0 0xd0000000 0x0 0x00040000>; /* PCI config space */ reg-names = "csr", "cfg"; - ranges = <0x01000000 0x0 0x00000000 0xa0 0x10000000 0x0 0x00010000 /* io */ - 0x02000000 0x0 0x80000000 0xa1 0x80000000 0x0 0x80000000>; /* mem */ + ranges = <0x01000000 0x00 0x00000000 0xa0 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0xa1 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0xb0 0x00000000 0xb0 0x00000000 0x10 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -585,8 +589,9 @@ reg = < 0x00 0x1f510000 0x0 0x00010000 /* Controller registers */ 0xc0 0xd0000000 0x0 0x00200000>; /* PCI config space */ reg-names = "csr", "cfg"; - ranges = <0x01000000 0x0 0x00000000 0xc0 0x10000000 0x0 0x00010000 /* io */ - 0x02000000 0x0 0x80000000 0xc1 0x80000000 0x0 0x80000000>; /* mem */ + ranges = <0x01000000 0x00 0x00000000 0xc0 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0xc1 0x80000000 0x00 0x80000000 /* mem */ + 0x43000000 0xc8 0x00000000 0xc8 0x00000000 0x08 0x00000000>; /* mem */ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; @@ -823,7 +828,7 @@ device_type = "dma"; reg = <0x0 0x1f270000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>, - <0x0 0x1b008000 0x0 0x2000>, + <0x0 0x1b000000 0x0 0x400000>, <0x0 0x1054a000 0x0 0x100>; interrupts = <0x0 0x82 0x4>, <0x0 0xb8 0x4>, diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 3303e8a7b837..f4bf2f2a014c 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -124,7 +124,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, &macp, ctx->key_enc, num_rounds(ctx)); - scatterwalk_start(&walk, req->assoc); + scatterwalk_start(&walk, req->src); do { u32 n = scatterwalk_clamp(&walk, len); @@ -151,6 +151,10 @@ static int ccm_encrypt(struct aead_request *req) struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); struct blkcipher_desc desc = { .info = req->iv }; struct blkcipher_walk walk; + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct scatterlist *src; + struct scatterlist *dst; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen; @@ -168,7 +172,12 @@ static int ccm_encrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - blkcipher_walk_init(&walk, req->dst, req->src, len); + src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); + dst = src; + if (req->src != req->dst) + dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); + + blkcipher_walk_init(&walk, dst, src, len); err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, AES_BLOCK_SIZE); @@ -194,7 +203,7 @@ static int ccm_encrypt(struct aead_request *req) return err; /* copy authtag to end of dst */ - scatterwalk_map_and_copy(mac, req->dst, req->cryptlen, + scatterwalk_map_and_copy(mac, dst, req->cryptlen, crypto_aead_authsize(aead), 1); return 0; @@ -207,6 +216,10 @@ static int ccm_decrypt(struct aead_request *req) unsigned int authsize = crypto_aead_authsize(aead); struct blkcipher_desc desc = { .info = req->iv }; struct blkcipher_walk walk; + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct scatterlist *src; + struct scatterlist *dst; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen - authsize; @@ -224,7 +237,12 @@ static int ccm_decrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - blkcipher_walk_init(&walk, req->dst, req->src, len); + src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); + dst = src; + if (req->src != req->dst) + dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); + + blkcipher_walk_init(&walk, dst, src, len); err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, AES_BLOCK_SIZE); @@ -250,44 +268,42 @@ static int ccm_decrypt(struct aead_request *req) return err; /* compare calculated auth tag with the stored one */ - scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize, + scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize, authsize, 0); - if (memcmp(mac, buf, authsize)) + if (crypto_memneq(mac, buf, authsize)) return -EBADMSG; return 0; } -static struct crypto_alg ccm_aes_alg = { - .cra_name = "ccm(aes)", - .cra_driver_name = "ccm-aes-ce", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct crypto_aes_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setkey = ccm_setkey, - .setauthsize = ccm_setauthsize, - .encrypt = ccm_encrypt, - .decrypt = ccm_decrypt, - } +static struct aead_alg ccm_aes_alg = { + .base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "ccm-aes-ce", + .cra_priority = 300, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_alignmask = 7, + .cra_module = THIS_MODULE, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = ccm_setkey, + .setauthsize = ccm_setauthsize, + .encrypt = ccm_encrypt, + .decrypt = ccm_decrypt, }; static int __init aes_mod_init(void) { if (!(elf_hwcap & HWCAP_AES)) return -ENODEV; - return crypto_register_alg(&ccm_aes_alg); + return crypto_register_aead(&ccm_aes_alg); } static void __exit aes_mod_exit(void) { - crypto_unregister_alg(&ccm_aes_alg); + crypto_unregister_aead(&ccm_aes_alg); } module_init(aes_mod_init); diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index b112a39834d0..70fd9ffb58cf 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -25,6 +25,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += msgbuf.h generic-y += msi.h diff --git a/arch/arm64/include/asm/mm-arch-hooks.h b/arch/arm64/include/asm/mm-arch-hooks.h deleted file mode 100644 index 562b655f5ba9..000000000000 --- a/arch/arm64/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_ARM64_MM_ARCH_HOOKS_H -#define _ASM_ARM64_MM_ARCH_HOOKS_H - -#endif /* _ASM_ARM64_MM_ARCH_HOOKS_H */ diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 9d4aa18f2a82..e8ca6eaedd02 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -122,12 +122,12 @@ static int __init uefi_init(void) /* Show what we know for posterity */ c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), - sizeof(vendor)); + sizeof(vendor) * sizeof(efi_char16_t)); if (c16) { for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) vendor[i] = c16[i]; vendor[i] = '\0'; - early_memunmap(c16, sizeof(vendor)); + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); } pr_info("EFI v%u.%.02u by %s\n", diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f860bfda454a..e16351819fed 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -585,7 +585,8 @@ ENDPROC(el0_irq) * */ ENTRY(cpu_switch_to) - add x8, x0, #THREAD_CPU_CONTEXT + mov x10, #THREAD_CPU_CONTEXT + add x8, x0, x10 mov x9, sp stp x19, x20, [x8], #16 // store callee-saved registers stp x21, x22, [x8], #16 @@ -594,7 +595,7 @@ ENTRY(cpu_switch_to) stp x27, x28, [x8], #16 stp x29, x9, [x8], #16 str lr, [x8] - add x8, x1, #THREAD_CPU_CONTEXT + add x8, x1, x10 ldp x19, x20, [x8], #16 // restore callee-saved registers ldp x21, x22, [x8], #16 ldp x23, x24, [x8], #16 diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 240b75c0e94f..463fa2e7e34c 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -61,7 +61,7 @@ void __init init_IRQ(void) static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = d->affinity; + const struct cpumask *affinity = irq_data_get_affinity_mask(d); struct irq_chip *c; bool ret = false; @@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc) if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(d->affinity, affinity); + cpumask_copy(irq_data_get_affinity_mask(d), affinity); return ret; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f3067d4d4e35..926ae8d9abc5 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -34,7 +34,6 @@ #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/root_dev.h> -#include <linux/clk-provider.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 1670f15ef69e..948f0ad2de23 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) * Other callers might not initialize the si_lsb field, * so check explicitely for the right codes here. */ - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) + if (from->si_signo == SIGBUS && + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif break; @@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE)) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index ec37ab3f524f..97bc68f4c689 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -199,16 +199,15 @@ up_fail: */ void update_vsyscall(struct timekeeper *tk) { - struct timespec xtime_coarse; u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); - xtime_coarse = __current_kernel_time(); vdso_data->use_syscall = use_syscall; - vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; - vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->xtime_coarse_sec = tk->xtime_sec; + vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> + tk->tkr_mono.shift; vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index f02530e726f6..85c57158dcd9 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) inject_abt32(vcpu, false, addr); - - inject_abt64(vcpu, false, addr); + else + inject_abt64(vcpu, false, addr); } /** @@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) inject_abt32(vcpu, true, addr); - - inject_abt64(vcpu, true, addr); + else + inject_abt64(vcpu, true, addr); } /** @@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) inject_undef32(vcpu); - - inject_undef64(vcpu); + else + inject_undef64(vcpu); } diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index 1d66afdfac07..f61f2dd67464 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -12,6 +12,7 @@ generic-y += irq_work.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += param.h generic-y += percpu.h generic-y += preempt.h diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index e998ff5d8e1a..f855646e0db7 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h @@ -297,6 +297,7 @@ extern void __iounmap(void __iomem *addr); #define ioremap_wc ioremap_nocache #define ioremap_wt ioremap_nocache +#define ioremap_uc ioremap_nocache #define cached(addr) P1SEGADDR(addr) #define uncached(addr) P2SEGADDR(addr) diff --git a/arch/avr32/include/asm/mm-arch-hooks.h b/arch/avr32/include/asm/mm-arch-hooks.h deleted file mode 100644 index 145452ffbdad..000000000000 --- a/arch/avr32/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_AVR32_MM_ARCH_HOOKS_H -#define _ASM_AVR32_MM_ARCH_HOOKS_H - -#endif /* _ASM_AVR32_MM_ARCH_HOOKS_H */ diff --git a/arch/avr32/include/asm/switch_to.h b/arch/avr32/include/asm/switch_to.h index 9a8e9d5208d4..6f00581c3d4f 100644 --- a/arch/avr32/include/asm/switch_to.h +++ b/arch/avr32/include/asm/switch_to.h @@ -15,11 +15,13 @@ */ #ifdef CONFIG_OWNERSHIP_TRACE #include <asm/ocd.h> -#define finish_arch_switch(prev) \ +#define ocd_switch(prev, next) \ do { \ ocd_write(PID, prev->pid); \ - ocd_write(PID, current->pid); \ + ocd_write(PID, next->pid); \ } while(0) +#else +#define ocd_switch(prev, next) #endif /* @@ -38,6 +40,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct cpu_context *); #define switch_to(prev, next, last) \ do { \ + ocd_switch(prev, next); \ last = __switch_to(prev, &prev->thread.cpu_context + 1, \ &next->thread.cpu_context); \ } while (0) diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index d0f771be9e96..a124c55733db 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -18,6 +18,7 @@ #include <mach/pm.h> +static bool disable_cpu_idle_poll; static cycle_t read_cycle_count(struct clocksource *cs) { @@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta, return 0; } -static void comparator_mode(enum clock_event_mode mode, - struct clock_event_device *evdev) +static int comparator_shutdown(struct clock_event_device *evdev) { - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - pr_debug("%s: start\n", evdev->name); - /* FALLTHROUGH */ - case CLOCK_EVT_MODE_RESUME: + pr_debug("%s: %s\n", __func__, evdev->name); + sysreg_write(COMPARE, 0); + + if (disable_cpu_idle_poll) { + disable_cpu_idle_poll = false; /* - * If we're using the COUNT and COMPARE registers we - * need to force idle poll. + * Only disable idle poll if we have forced that + * in a previous call. */ - cpu_idle_poll_ctrl(true); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - sysreg_write(COMPARE, 0); - pr_debug("%s: stop\n", evdev->name); - if (evdev->mode == CLOCK_EVT_MODE_ONESHOT || - evdev->mode == CLOCK_EVT_MODE_RESUME) { - /* - * Only disable idle poll if we have forced that - * in a previous call. - */ - cpu_idle_poll_ctrl(false); - } - break; - default: - BUG(); + cpu_idle_poll_ctrl(false); } + return 0; +} + +static int comparator_set_oneshot(struct clock_event_device *evdev) +{ + pr_debug("%s: %s\n", __func__, evdev->name); + + disable_cpu_idle_poll = true; + /* + * If we're using the COUNT and COMPARE registers we + * need to force idle poll. + */ + cpu_idle_poll_ctrl(true); + + return 0; } static struct clock_event_device comparator = { - .name = "avr32_comparator", - .features = CLOCK_EVT_FEAT_ONESHOT, - .shift = 16, - .rating = 50, - .set_next_event = comparator_next_event, - .set_mode = comparator_mode, + .name = "avr32_comparator", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 16, + .rating = 50, + .set_next_event = comparator_next_event, + .set_state_shutdown = comparator_shutdown, + .set_state_oneshot = comparator_set_oneshot, + .tick_resume = comparator_set_oneshot, }; void read_persistent_clock(struct timespec *ts) diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c index 23b1a97fae7a..52c179bec0cc 100644 --- a/arch/avr32/mach-at32ap/clock.c +++ b/arch/avr32/mach-at32ap/clock.c @@ -80,6 +80,9 @@ int clk_enable(struct clk *clk) { unsigned long flags; + if (!clk) + return 0; + spin_lock_irqsave(&clk_lock, flags); __clk_enable(clk); spin_unlock_irqrestore(&clk_lock, flags); @@ -106,6 +109,9 @@ void clk_disable(struct clk *clk) { unsigned long flags; + if (IS_ERR_OR_NULL(clk)) + return; + spin_lock_irqsave(&clk_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clk_lock, flags); @@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk) unsigned long flags; unsigned long rate; + if (!clk) + return 0; + spin_lock_irqsave(&clk_lock, flags); rate = clk->get_rate(clk); spin_unlock_irqrestore(&clk_lock, flags); @@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long flags, actual_rate; + if (!clk) + return 0; + if (!clk->set_rate) return -ENOSYS; @@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate) unsigned long flags; long ret; + if (!clk) + return 0; + if (!clk->set_rate) return -ENOSYS; @@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent) unsigned long flags; int ret; + if (!clk) + return 0; + if (!clk->set_parent) return -ENOSYS; @@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent); struct clk *clk_get_parent(struct clk *clk) { - return clk->parent; + return !clk ? NULL : clk->parent; } EXPORT_SYMBOL(clk_get_parent); diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 07051a63415d..61cd1e786a14 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -21,6 +21,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += msgbuf.h generic-y += mutex.h diff --git a/arch/blackfin/include/asm/mm-arch-hooks.h b/arch/blackfin/include/asm/mm-arch-hooks.h deleted file mode 100644 index 1c5211ec338f..000000000000 --- a/arch/blackfin/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_BLACKFIN_MM_ARCH_HOOKS_H -#define _ASM_BLACKFIN_MM_ARCH_HOOKS_H - -#endif /* _ASM_BLACKFIN_MM_ARCH_HOOKS_H */ diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 7aeb32272975..f17c4dc6050c 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -26,6 +26,7 @@ generic-y += kdebug.h generic-y += kmap_types.h generic-y += local.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += mmu.h generic-y += mmu_context.h diff --git a/arch/c6x/include/asm/mm-arch-hooks.h b/arch/c6x/include/asm/mm-arch-hooks.h deleted file mode 100644 index bb3c4a6ce8e9..000000000000 --- a/arch/c6x/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_C6X_MM_ARCH_HOOKS_H -#define _ASM_C6X_MM_ARCH_HOOKS_H - -#endif /* _ASM_C6X_MM_ARCH_HOOKS_H */ diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index d294f6aaff1d..ad2244f35bca 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -18,6 +18,7 @@ generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += module.h generic-y += percpu.h generic-y += preempt.h diff --git a/arch/cris/include/asm/mm-arch-hooks.h b/arch/cris/include/asm/mm-arch-hooks.h deleted file mode 100644 index 314f774db2b0..000000000000 --- a/arch/cris/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_CRIS_MM_ARCH_HOOKS_H -#define _ASM_CRIS_MM_ARCH_HOOKS_H - -#endif /* _ASM_CRIS_MM_ARCH_HOOKS_H */ diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 30edce31e5c2..8e47b832cc76 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -4,5 +4,6 @@ generic-y += cputime.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h diff --git a/arch/frv/include/asm/io.h b/arch/frv/include/asm/io.h index a31b63ec4930..70dfbea8c8d7 100644 --- a/arch/frv/include/asm/io.h +++ b/arch/frv/include/asm/io.h @@ -278,6 +278,7 @@ static inline void __iomem *ioremap_fullcache(unsigned long physaddr, unsigned l } #define ioremap_wc ioremap_nocache +#define ioremap_uc ioremap_nocache extern void iounmap(void volatile __iomem *addr); diff --git a/arch/frv/include/asm/mm-arch-hooks.h b/arch/frv/include/asm/mm-arch-hooks.h deleted file mode 100644 index 51d13a870404..000000000000 --- a/arch/frv/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_FRV_MM_ARCH_HOOKS_H -#define _ASM_FRV_MM_ARCH_HOOKS_H - -#endif /* _ASM_FRV_MM_ARCH_HOOKS_H */ diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c index 0635bd6c2af3..34bb4b13e079 100644 --- a/arch/frv/mb93090-mb00/pci-frv.c +++ b/arch/frv/mb93090-mb00/pci-frv.c @@ -175,14 +175,6 @@ static void __init pcibios_assign_resources(void) if (!r->start && r->end) pci_assign_resource(dev, idx); } - - if (pci_probe & PCI_ASSIGN_ROMS) { - r = &dev->resource[PCI_ROM_RESOURCE]; - r->end -= r->start; - r->start = 0; - if (r->end) - pci_assign_resource(dev, PCI_ROM_RESOURCE); - } } } diff --git a/arch/frv/mb93090-mb00/pci-frv.h b/arch/frv/mb93090-mb00/pci-frv.h index a7e487fe76ed..d51992ff5a61 100644 --- a/arch/frv/mb93090-mb00/pci-frv.h +++ b/arch/frv/mb93090-mb00/pci-frv.h @@ -14,14 +14,6 @@ #define DBG(x...) #endif -#define PCI_PROBE_BIOS 0x0001 -#define PCI_PROBE_CONF1 0x0002 -#define PCI_PROBE_CONF2 0x0004 -#define PCI_NO_CHECKS 0x0400 -#define PCI_ASSIGN_ROMS 0x1000 -#define PCI_BIOS_IRQ_SCAN 0x2000 -#define PCI_ASSIGN_ALL_BUSSES 0x4000 - extern unsigned int __nongpreldata pci_probe; /* pci-frv.c */ diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c index f211839e2cae..f9c86c475bbd 100644 --- a/arch/frv/mb93090-mb00/pci-vdk.c +++ b/arch/frv/mb93090-mb00/pci-vdk.c @@ -294,8 +294,6 @@ void pcibios_fixup_bus(struct pci_bus *bus) printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); #endif - pci_read_bridge_bases(bus); - if (bus->number == 0) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 00379d64f707..70e6ae1e7006 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -33,6 +33,7 @@ generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += mmu.h generic-y += mmu_context.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 5ade4a163558..daee37bd0999 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -28,6 +28,7 @@ generic-y += kmap_types.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += msgbuf.h generic-y += pci.h diff --git a/arch/hexagon/include/asm/mm-arch-hooks.h b/arch/hexagon/include/asm/mm-arch-hooks.h deleted file mode 100644 index 05e8b939e416..000000000000 --- a/arch/hexagon/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_HEXAGON_MM_ARCH_HOOKS_H -#define _ASM_HEXAGON_MM_ARCH_HOOKS_H - -#endif /* _ASM_HEXAGON_MM_ARCH_HOOKS_H */ diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index ccff13d33fa2..9de3ba12f6b9 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -4,6 +4,7 @@ generic-y += exec.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h generic-y += vtime.h diff --git a/arch/ia64/include/asm/mm-arch-hooks.h b/arch/ia64/include/asm/mm-arch-hooks.h deleted file mode 100644 index ab4b5c698322..000000000000 --- a/arch/ia64/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_IA64_MM_ARCH_HOOKS_H -#define _ASM_IA64_MM_ARCH_HOOKS_H - -#endif /* _ASM_IA64_MM_ARCH_HOOKS_H */ diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 7cc3be9fa7c6..d89b6013c941 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -533,10 +533,9 @@ void pcibios_fixup_bus(struct pci_bus *b) { struct pci_dev *dev; - if (b->self) { - pci_read_bridge_bases(b); + if (b->self) pcibios_fixup_bridge_resources(b->self); - } + list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); platform_pci_fixup_bus(b); diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index ba1cdc018731..e0eb704ca1fa 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -4,6 +4,7 @@ generic-y += cputime.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += module.h generic-y += preempt.h generic-y += sections.h diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index 0c3f25ee3381..61b8931bc192 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h @@ -69,6 +69,7 @@ extern void iounmap(volatile void __iomem *addr); #define ioremap_nocache(off,size) ioremap(off,size) #define ioremap_wc ioremap_nocache #define ioremap_wt ioremap_nocache +#define ioremap_uc ioremap_nocache /* * IO bus memory addresses are also 1:1 with the physical address @@ -174,6 +175,11 @@ static inline void _writel(unsigned long l, unsigned long addr) #define iowrite16 writew #define iowrite32 writel +#define ioread16be(addr) be16_to_cpu(readw(addr)) +#define ioread32be(addr) be32_to_cpu(readl(addr)) +#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr)) +#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr)) + #define mmiowb() #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ diff --git a/arch/m32r/include/asm/mm-arch-hooks.h b/arch/m32r/include/asm/mm-arch-hooks.h deleted file mode 100644 index 6d60b4750f41..000000000000 --- a/arch/m32r/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_M32R_MM_ARCH_HOOKS_H -#define _ASM_M32R_MM_ARCH_HOOKS_H - -#endif /* _ASM_M32R_MM_ARCH_HOOKS_H */ diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 33013dfcd3e1..c496d48a8c8d 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -125,6 +125,13 @@ endif # M68KCLASSIC if COLDFIRE +choice + prompt "ColdFire SoC type" + default M520x + help + Select the type of ColdFire System-on-Chip (SoC) that you want + to build for. + config M5206 bool "MCF5206" depends on !MMU @@ -174,9 +181,6 @@ config M525x help Freescale (Motorola) Coldfire 5251/5253 processor support. -config M527x - bool - config M5271 bool "MCF5271" depends on !MMU @@ -223,9 +227,6 @@ config M5307 help Motorola ColdFire 5307 processor support. -config M53xx - bool - config M532x bool "MCF532x" depends on !MMU @@ -251,9 +252,6 @@ config M5407 help Motorola ColdFire 5407 processor support. -config M54xx - bool - config M547x bool "MCF547x" select M54xx @@ -280,6 +278,17 @@ config M5441x help Freescale Coldfire 54410/54415/54416/54417/54418 processor support. +endchoice + +config M527x + bool + +config M53xx + bool + +config M54xx + bool + endif # COLDFIRE @@ -416,22 +425,18 @@ config HAVE_MBAR config HAVE_IPSBAR bool -config CLOCK_SET - bool "Enable setting the CPU clock frequency" - depends on COLDFIRE - default n - help - On some CPU's you do not need to know what the core CPU clock - frequency is. On these you can disable clock setting. On some - traditional 68K parts, and on all ColdFire parts you need to set - the appropriate CPU clock frequency. On these devices many of the - onboard peripherals derive their timing from the master CPU clock - frequency. - config CLOCK_FREQ int "Set the core clock frequency" + default "25000000" if M5206 + default "54000000" if M5206e + default "166666666" if M520x + default "140000000" if M5249 + default "150000000" if M527x || M523x + default "90000000" if M5307 + default "50000000" if M5407 + default "266000000" if M54xx default "66666666" - depends on CLOCK_SET + depends on COLDFIRE help Define the CPU clock frequency in use. This is the core clock frequency, it may or may not be the same as the external clock diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 753a6237f99a..0b6b40d37b95 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -57,7 +57,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -91,6 +91,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -287,7 +288,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -320,7 +320,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -345,6 +344,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -355,6 +355,7 @@ CONFIG_ARIADNE=y # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -363,6 +364,7 @@ CONFIG_HYDRA=y CONFIG_APNE=y CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -448,6 +450,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -536,6 +539,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -543,6 +547,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -571,14 +576,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 1f93dcaf02e5..eeb3a8991fc4 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -55,7 +55,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -89,6 +89,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -279,7 +280,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -302,7 +302,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -327,17 +326,20 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -406,6 +408,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -494,6 +497,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -501,6 +505,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -529,14 +534,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 831b8b8b92ad..3a7006654ce9 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -55,7 +55,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -89,6 +89,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -283,7 +284,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -311,7 +311,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -336,6 +335,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -343,11 +343,13 @@ CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_NE2000=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -428,6 +430,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -516,6 +519,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -523,6 +527,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -551,14 +556,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 91fd187c16d5..0586b323a673 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -53,7 +53,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -87,6 +87,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -277,7 +278,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -301,7 +301,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -326,17 +325,20 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -399,6 +401,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -487,6 +490,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -494,6 +498,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -522,14 +527,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 9d4934f1d2c3..ad1dbce07aa4 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -55,7 +55,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -89,6 +89,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -279,7 +280,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -302,7 +302,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -327,6 +326,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -334,11 +334,13 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -408,6 +410,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -496,6 +499,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -503,6 +507,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -531,14 +536,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/m5208evb_defconfig b/arch/m68k/configs/m5208evb_defconfig index e7292f460af4..4c7b7938d53a 100644 --- a/arch/m68k/configs/m5208evb_defconfig +++ b/arch/m68k/configs/m5208evb_defconfig @@ -1,10 +1,6 @@ -# CONFIG_MMU is not set -CONFIG_EXPERIMENTAL=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set @@ -16,17 +12,12 @@ CONFIG_EXPERT=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set -CONFIG_M520x=y -CONFIG_CLOCK_SET=y -CONFIG_CLOCK_FREQ=166666666 -CONFIG_CLOCK_DIV=2 -CONFIG_M5208EVB=y +# CONFIG_MMU is not set # CONFIG_4KSTACKS is not set CONFIG_RAMBASE=0x40000000 CONFIG_RAMSIZE=0x2000000 CONFIG_VECTORBASE=0x40000000 CONFIG_KERNELBASE=0x40020000 -CONFIG_RAM16BIT=y CONFIG_BINFMT_FLAT=y CONFIG_NET=y CONFIG_PACKET=y @@ -40,24 +31,19 @@ CONFIG_INET=y # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_RAM=y CONFIG_MTD_UCLINUX=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y CONFIG_FEC=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set +# CONFIG_UNIX98_PTYS is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_BAUDRATE=115200 CONFIG_SERIAL_MCF_CONSOLE=y -# CONFIG_UNIX98_PTYS is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set @@ -68,8 +54,6 @@ CONFIG_EXT2_FS=y CONFIG_ROMFS_FS=y CONFIG_ROMFS_BACKED_BY_MTD=y # CONFIG_NETWORK_FILESYSTEMS is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_FULLDEBUG=y CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" +CONFIG_FULLDEBUG=y diff --git a/arch/m68k/configs/m5249evb_defconfig b/arch/m68k/configs/m5249evb_defconfig index 0cd4b39f325b..a782f368650f 100644 --- a/arch/m68k/configs/m5249evb_defconfig +++ b/arch/m68k/configs/m5249evb_defconfig @@ -1,10 +1,6 @@ -# CONFIG_MMU is not set -CONFIG_EXPERIMENTAL=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set @@ -16,10 +12,8 @@ CONFIG_EXPERT=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set +# CONFIG_MMU is not set CONFIG_M5249=y -CONFIG_CLOCK_SET=y -CONFIG_CLOCK_FREQ=140000000 -CONFIG_CLOCK_DIV=2 CONFIG_M5249C3=y CONFIG_RAMBASE=0x00000000 CONFIG_RAMSIZE=0x00800000 @@ -38,23 +32,18 @@ CONFIG_INET=y # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_RAM=y CONFIG_MTD_UCLINUX=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_PPP=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set +# CONFIG_UNIX98_PTYS is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_CONSOLE=y -# CONFIG_UNIX98_PTYS is not set # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y @@ -62,7 +51,5 @@ CONFIG_EXT2_FS=y CONFIG_ROMFS_FS=y CONFIG_ROMFS_BACKED_BY_MTD=y # CONFIG_NETWORK_FILESYSTEMS is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" -# CONFIG_CRC32 is not set diff --git a/arch/m68k/configs/m5272c3_defconfig b/arch/m68k/configs/m5272c3_defconfig index a60cb3509135..6f5fb92f5cbf 100644 --- a/arch/m68k/configs/m5272c3_defconfig +++ b/arch/m68k/configs/m5272c3_defconfig @@ -1,10 +1,6 @@ -# CONFIG_MMU is not set -CONFIG_EXPERIMENTAL=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set @@ -16,8 +12,8 @@ CONFIG_EXPERT=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set +# CONFIG_MMU is not set CONFIG_M5272=y -CONFIG_CLOCK_SET=y CONFIG_M5272C3=y CONFIG_RAMBASE=0x00000000 CONFIG_RAMSIZE=0x00800000 @@ -36,23 +32,18 @@ CONFIG_INET=y # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_RAM=y CONFIG_MTD_UCLINUX=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y CONFIG_FEC=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set +# CONFIG_UNIX98_PTYS is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_CONSOLE=y -# CONFIG_UNIX98_PTYS is not set # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y @@ -61,6 +52,5 @@ CONFIG_EXT2_FS=y CONFIG_ROMFS_FS=y CONFIG_ROMFS_BACKED_BY_MTD=y # CONFIG_NETWORK_FILESYSTEMS is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig index e6502ab7cb2f..b5d7cd1ce856 100644 --- a/arch/m68k/configs/m5275evb_defconfig +++ b/arch/m68k/configs/m5275evb_defconfig @@ -1,10 +1,6 @@ -# CONFIG_MMU is not set -CONFIG_EXPERIMENTAL=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set @@ -16,11 +12,8 @@ CONFIG_EXPERT=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set +# CONFIG_MMU is not set CONFIG_M5275=y -CONFIG_CLOCK_SET=y -CONFIG_CLOCK_FREQ=150000000 -CONFIG_CLOCK_DIV=2 -CONFIG_M5275EVB=y # CONFIG_4KSTACKS is not set CONFIG_RAMBASE=0x00000000 CONFIG_RAMSIZE=0x00000000 @@ -39,24 +32,19 @@ CONFIG_INET=y # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_RAM=y CONFIG_MTD_UCLINUX=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y CONFIG_FEC=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_PPP=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set +# CONFIG_UNIX98_PTYS is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_CONSOLE=y -# CONFIG_UNIX98_PTYS is not set # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y @@ -65,8 +53,5 @@ CONFIG_EXT2_FS=y CONFIG_ROMFS_FS=y CONFIG_ROMFS_BACKED_BY_MTD=y # CONFIG_NETWORK_FILESYSTEMS is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" -# CONFIG_CRC32 is not set diff --git a/arch/m68k/configs/m5307c3_defconfig b/arch/m68k/configs/m5307c3_defconfig index 023812abd2e6..1b4c09461c40 100644 --- a/arch/m68k/configs/m5307c3_defconfig +++ b/arch/m68k/configs/m5307c3_defconfig @@ -1,10 +1,6 @@ -# CONFIG_MMU is not set -CONFIG_EXPERIMENTAL=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set @@ -16,10 +12,8 @@ CONFIG_EXPERT=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set +# CONFIG_MMU is not set CONFIG_M5307=y -CONFIG_CLOCK_SET=y -CONFIG_CLOCK_FREQ=90000000 -CONFIG_CLOCK_DIV=2 CONFIG_M5307C3=y CONFIG_RAMBASE=0x00000000 CONFIG_RAMSIZE=0x00800000 @@ -38,16 +32,11 @@ CONFIG_INET=y # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_RAM=y CONFIG_MTD_UCLINUX=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_PPP=y CONFIG_SLIP=y CONFIG_SLIP_COMPRESSED=y @@ -56,21 +45,17 @@ CONFIG_SLIP_COMPRESSED=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_CONSOLE=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y # CONFIG_DNOTIFY is not set CONFIG_ROMFS_FS=y CONFIG_ROMFS_BACKED_BY_MTD=y # CONFIG_NETWORK_FILESYSTEMS is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_FULLDEBUG=y CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" -# CONFIG_CRC32 is not set +CONFIG_FULLDEBUG=y diff --git a/arch/m68k/configs/m5407c3_defconfig b/arch/m68k/configs/m5407c3_defconfig index 557b39f3be90..275ad543d4bc 100644 --- a/arch/m68k/configs/m5407c3_defconfig +++ b/arch/m68k/configs/m5407c3_defconfig @@ -1,10 +1,6 @@ -# CONFIG_MMU is not set -CONFIG_EXPERIMENTAL=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set @@ -17,9 +13,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set +# CONFIG_MMU is not set CONFIG_M5407=y -CONFIG_CLOCK_SET=y -CONFIG_CLOCK_FREQ=50000000 CONFIG_M5407C3=y CONFIG_RAMBASE=0x00000000 CONFIG_RAMSIZE=0x00000000 @@ -38,22 +33,17 @@ CONFIG_INET=y # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_RAM=y CONFIG_MTD_UCLINUX=y CONFIG_BLK_DEV_RAM=y -# CONFIG_MISC_DEVICES is not set CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set CONFIG_PPP=y # CONFIG_INPUT is not set # CONFIG_VT is not set +# CONFIG_UNIX98_PTYS is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_CONSOLE=y -# CONFIG_UNIX98_PTYS is not set # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set @@ -63,8 +53,5 @@ CONFIG_EXT2_FS=y CONFIG_ROMFS_FS=y CONFIG_ROMFS_BACKED_BY_MTD=y # CONFIG_NETWORK_FILESYSTEMS is not set -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_BOOTPARAM=y CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" -# CONFIG_CRC32 is not set diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig index c5018a68819b..4f4ccd13c11b 100644 --- a/arch/m68k/configs/m5475evb_defconfig +++ b/arch/m68k/configs/m5475evb_defconfig @@ -1,11 +1,7 @@ -CONFIG_EXPERIMENTAL=y # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED=y -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_SYSCTL_SYSCALL=y # CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SIGNALFD is not set @@ -20,19 +16,16 @@ CONFIG_MODULES=y # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set CONFIG_COLDFIRE=y -CONFIG_M547x=y -CONFIG_CLOCK_SET=y -CONFIG_CLOCK_FREQ=266000000 # CONFIG_4KSTACKS is not set CONFIG_RAMBASE=0x0 CONFIG_RAMSIZE=0x2000000 CONFIG_VECTORBASE=0x0 CONFIG_MBAR=0xff000000 CONFIG_KERNELBASE=0x20000 +CONFIG_PCI=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_JEDECPROBE=y diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 72bc187ca995..b44acacaecf4 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -54,7 +54,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -88,6 +88,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -282,7 +283,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -311,7 +311,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -343,6 +342,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -351,12 +351,14 @@ CONFIG_MACMACE=y # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_MACSONIC=y CONFIG_MAC8390=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -430,6 +432,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -518,6 +521,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -525,6 +529,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -553,14 +558,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8fb65535597f..8afca3753db1 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -64,7 +64,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -98,6 +98,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -301,7 +302,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -344,7 +344,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -376,6 +375,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -391,6 +391,7 @@ CONFIG_MACMACE=y # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HP is not set CONFIG_BVME6000_NET=y CONFIG_MVME16x_NET=y @@ -403,6 +404,7 @@ CONFIG_NE2000=y CONFIG_APNE=y CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -510,6 +512,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -598,6 +601,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -605,6 +609,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -633,14 +638,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index f34491ec0126..ef00875994d9 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -52,7 +52,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -86,6 +86,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -276,7 +277,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -300,7 +300,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -325,6 +324,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -332,11 +332,13 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -399,6 +401,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -487,6 +490,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -494,6 +498,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -522,14 +527,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 3d3614d1b041..387c2bd90ff1 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -53,7 +53,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -87,6 +87,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -277,7 +278,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -301,7 +301,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -326,17 +325,20 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -399,6 +401,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -487,6 +490,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -494,6 +498,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -522,14 +527,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 643e9c93bea7..35355c1bc714 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -53,7 +53,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -87,6 +87,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -280,7 +281,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -307,7 +307,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -332,6 +331,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -341,12 +341,14 @@ CONFIG_VETH=m # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_NE2000=y # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -421,6 +423,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -509,6 +512,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -516,6 +520,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -544,14 +549,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 8fecc5aa166c..8442d267b877 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -50,7 +50,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -84,6 +84,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -274,7 +275,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -298,7 +298,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -323,17 +322,20 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_EZCHIP is not set CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -400,6 +402,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -487,6 +490,7 @@ CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -494,6 +498,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -522,14 +527,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 9902c5bfbdc8..0e1b542e1555 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -50,7 +50,7 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE=m +CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -84,6 +84,7 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m CONFIG_NFT_EXTHDR=m CONFIG_NFT_META=m CONFIG_NFT_CT=m @@ -274,7 +275,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_PMEM=m CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m @@ -298,7 +298,6 @@ CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m @@ -323,6 +322,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m CONFIG_VXLAN=m +CONFIG_GENEVE=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m @@ -330,11 +330,13 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set @@ -400,6 +402,7 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y +CONFIG_PROC_CHILDREN=y CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_ECRYPT_FS=m @@ -488,6 +491,7 @@ CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m +CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m @@ -495,6 +499,7 @@ CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -523,14 +528,15 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 1555bc189c7d..eb85bd9c6180 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -18,6 +18,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += mutex.h generic-y += percpu.h diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h index c94557b91448..50aa4dac9ca2 100644 --- a/arch/m68k/include/asm/coldfire.h +++ b/arch/m68k/include/asm/coldfire.h @@ -19,7 +19,7 @@ * in any case new boards come along from time to time that have yet * another different clocking frequency. */ -#ifdef CONFIG_CLOCK_SET +#ifdef CONFIG_CLOCK_FREQ #define MCF_CLK CONFIG_CLOCK_FREQ #else #error "Don't know what your ColdFire CPU clock frequency is??" diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h index 618c85d3c786..c98ac81582ac 100644 --- a/arch/m68k/include/asm/io_mm.h +++ b/arch/m68k/include/asm/io_mm.h @@ -413,7 +413,8 @@ static inline void isa_delay(void) #define writew(val, addr) out_le16((addr), (val)) #endif /* CONFIG_ATARI_ROM_ISA */ -#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) +#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) && \ + !(defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE)) /* * We need to define dummy functions for GENERIC_IOMAP support. */ @@ -467,6 +468,7 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned lon { return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); } +#define ioremap_uc ioremap_nocache static inline void __iomem *ioremap_wt(unsigned long physaddr, unsigned long size) { diff --git a/arch/m68k/include/asm/mm-arch-hooks.h b/arch/m68k/include/asm/mm-arch-hooks.h deleted file mode 100644 index 7e8709bc90ae..000000000000 --- a/arch/m68k/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_M68K_MM_ARCH_HOOKS_H -#define _ASM_M68K_MM_ARCH_HOOKS_H - -#endif /* _ASM_M68K_MM_ARCH_HOOKS_H */ diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c index 7ee853e1432b..2a33a9645ad8 100644 --- a/arch/m68k/kernel/bootinfo_proc.c +++ b/arch/m68k/kernel/bootinfo_proc.c @@ -62,12 +62,10 @@ static int __init init_bootinfo_procfs(void) if (!bootinfo_size) return -EINVAL; - bootinfo_copy = kmalloc(bootinfo_size, GFP_KERNEL); + bootinfo_copy = kmemdup(bootinfo_tmp, bootinfo_size, GFP_KERNEL); if (!bootinfo_copy) return -ENOMEM; - memcpy(bootinfo_copy, bootinfo_tmp, bootinfo_size); - pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL); if (!pde) { kfree(bootinfo_copy); diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index 199320f3c345..df31353fd200 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -25,6 +25,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += msgbuf.h generic-y += mutex.h generic-y += param.h diff --git a/arch/metag/include/asm/mm-arch-hooks.h b/arch/metag/include/asm/mm-arch-hooks.h deleted file mode 100644 index b0072b2eb0de..000000000000 --- a/arch/metag/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_METAG_MM_ARCH_HOOKS_H -#define _ASM_METAG_MM_ARCH_HOOKS_H - -#endif /* _ASM_METAG_MM_ARCH_HOOKS_H */ diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 9989ddb169ca..2f222f355c4b 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += device.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += syscalls.h generic-y += trace_clock.h diff --git a/arch/microblaze/include/asm/mm-arch-hooks.h b/arch/microblaze/include/asm/mm-arch-hooks.h deleted file mode 100644 index 5c4065911bda..000000000000 --- a/arch/microblaze/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_MICROBLAZE_MM_ARCH_HOOKS_H -#define _ASM_MICROBLAZE_MM_ARCH_HOOKS_H - -#endif /* _ASM_MICROBLAZE_MM_ARCH_HOOKS_H */ diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index ae838ed5fcf2..6b8b75266801 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -863,14 +863,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus) void pcibios_fixup_bus(struct pci_bus *bus) { - /* When called from the generic PCI probe, read PCI<->PCI bridge - * bases. This is -not- called when generating the PCI tree from - * the OF device-tree. - */ - if (bus->self != NULL) - pci_read_bridge_bases(bus); - - /* Now fixup the bus bus */ + /* Fixup the bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3a3548f267b4..c6d28bce0b40 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -152,7 +152,6 @@ config BMIPS_GENERIC select BCM7120_L2_IRQ select BRCMSTB_L2_IRQ select IRQ_MIPS_CPU - select RAW_IRQ_ACCESSORS select DMA_NONCOHERENT select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN @@ -1428,6 +1427,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA select GENERIC_CSUM + select MIPS_O32_FP64_SUPPORT if MIPS32_O32 help Choose this option to build a kernel for release 6 or later of the MIPS64 architecture. New MIPS processors, starting with the Warrior @@ -2263,11 +2263,6 @@ config MIPS_CM config MIPS_CPC bool -config SB1_PASS_1_WORKAROUNDS - bool - depends on CPU_SB1_PASS_1 - default y - config SB1_PASS_2_WORKAROUNDS bool depends on CPU_SB1 && (CPU_SB1_PASS_2_2 || CPU_SB1_PASS_2) diff --git a/arch/mips/Makefile b/arch/mips/Makefile index ae2dd59050f7..252e347958f3 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -181,13 +181,6 @@ cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,) -ifdef CONFIG_CPU_SB1 -ifdef CONFIG_SB1_PASS_1_WORKAROUNDS -KBUILD_AFLAGS_MODULE += -msb1-pass1-workarounds -KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds -endif -endif - # For smartmips configurations, there are hundreds of warnings due to ISA overrides # in assembly and header files. smartmips is only supported for MIPS32r1 onwards # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 6e46abe0dac6..bd34f4093cd9 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -35,6 +35,7 @@ #include <linux/init.h> #include <linux/io.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/slab.h> @@ -389,12 +390,11 @@ static long alchemy_calc_div(unsigned long rate, unsigned long prate, return div1; } -static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk, - int scale, int maxdiv) +static int alchemy_clk_fgcs_detr(struct clk_hw *hw, + struct clk_rate_request *req, + int scale, int maxdiv) { - struct clk *pc, *bpc, *free; + struct clk_hw *pc, *bpc, *free; long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; int j; @@ -408,7 +408,7 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, * the one that gets closest to but not over the requested rate. */ for (j = 0; j < 7; j++) { - pc = clk_get_parent_by_index(hw->clk, j); + pc = clk_hw_get_parent_by_index(hw, j); if (!pc) break; @@ -416,20 +416,20 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, * XXX: we would actually want clk_has_active_children() * but this is a good-enough approximation for now. */ - if (!__clk_is_prepared(pc)) { + if (!clk_hw_is_prepared(pc)) { if (!free) free = pc; } - pr = clk_get_rate(pc); - if (pr < rate) + pr = clk_hw_get_rate(pc); + if (pr < req->rate) continue; /* what can hardware actually provide */ - tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); + tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); nr = pr / tdv; - diff = rate - nr; - if (nr > rate) + diff = req->rate - nr; + if (nr > req->rate) continue; if (diff < lastdiff) { @@ -448,15 +448,16 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, */ if (lastdiff && free) { for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { - tpr = rate * j; + tpr = req->rate * j; if (tpr < 0) break; - pr = clk_round_rate(free, tpr); + pr = clk_hw_round_rate(free, tpr); - tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); + tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, + NULL); nr = pr / tdv; - diff = rate - nr; - if (nr > rate) + diff = req->rate - nr; + if (nr > req->rate) continue; if (diff < lastdiff) { lastdiff = diff; @@ -469,9 +470,14 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, } } - *best_parent_rate = bpr; - *best_parent_clk = __clk_get_hw(bpc); - return br; + if (br < 0) + return br; + + req->best_parent_rate = bpr; + req->best_parent_hw = bpc; + req->rate = br; + + return 0; } static int alchemy_clk_fgv1_en(struct clk_hw *hw) @@ -562,14 +568,10 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, return parent_rate / v; } -static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_fgv1_detr(struct clk_hw *hw, + struct clk_rate_request *req) { - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, 2, 512); + return alchemy_clk_fgcs_detr(hw, req, 2, 512); } /* Au1000, Au1100, Au15x0, Au12x0 */ @@ -696,11 +698,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, return t; } -static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_fgv2_detr(struct clk_hw *hw, + struct clk_rate_request *req) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int scale, maxdiv; @@ -713,8 +712,7 @@ static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, maxdiv = 512; } - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, scale, maxdiv); + return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv); } /* Au1300 larger input mux, no separate disable bit, flexible divider */ @@ -917,17 +915,13 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, return 0; } -static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_csrc_detr(struct clk_hw *hw, + struct clk_rate_request *req) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, scale, 4); + return alchemy_clk_fgcs_detr(hw, req, scale, 4); } static struct clk_ops alchemy_clkops_csrc = { diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 01a644f174dd..1ba21204ebe0 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -190,6 +190,7 @@ int get_c0_perfcount_int(void) { return ATH79_MISC_IRQ(5); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 56f5d080ef9d..b7fa9ae28c36 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 7fe5c61a3cb8..1f8546081d20 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += emergency-restart.h generic-y += irq_work.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mutex.h generic-y += parport.h generic-y += percpu.h diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 084780b355aa..1b0625189835 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode) goto fr_common; case FPU_64BIT: -#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ +#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \ || defined(CONFIG_64BIT)) /* we only have a 32-bit FPU */ return SIGFPE; diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h deleted file mode 100644 index 11d3b572b1b3..000000000000 --- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H -#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H - -#include <asm/bmips.h> - -#define plat_post_dma_flush bmips_post_dma_flush - -#include <asm/mach-generic/dma-coherence.h> - -#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h index 0a227d426b9c..520f8fc2c806 100644 --- a/arch/mips/include/asm/mach-sibyte/war.h +++ b/arch/mips/include/asm/mach-sibyte/war.h @@ -13,8 +13,7 @@ #define R4600_V2_HIT_CACHEOP_WAR 0 #define R5432_CP0_INTERRUPT_WAR 0 -#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ - defined(CONFIG_SB1_PASS_2_WORKAROUNDS) +#if defined(CONFIG_SB1_PASS_2_WORKAROUNDS) #ifndef __ASSEMBLY__ extern int sb1250_m3_workaround_needed(void); diff --git a/arch/mips/include/asm/mm-arch-hooks.h b/arch/mips/include/asm/mm-arch-hooks.h deleted file mode 100644 index b5609fe8e475..000000000000 --- a/arch/mips/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_MIPS_MM_ARCH_HOOKS_H -#define _ASM_MIPS_MM_ARCH_HOOKS_H - -#endif /* _ASM_MIPS_MM_ARCH_HOOKS_H */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 9d8106758142..ae8569475264 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ +#ifdef CONFIG_SMP + /* + * For SMP, multiple CPUs can race, so we need to do + * this atomically. + */ +#ifdef CONFIG_64BIT +#define LL_INSN "lld" +#define SC_INSN "scd" +#else /* CONFIG_32BIT */ +#define LL_INSN "ll" +#define SC_INSN "sc" +#endif + unsigned long page_global = _PAGE_GLOBAL; + unsigned long tmp; + + __asm__ __volatile__ ( + " .set push\n" + " .set noreorder\n" + "1: " LL_INSN " %[tmp], %[buddy]\n" + " bnez %[tmp], 2f\n" + " or %[tmp], %[tmp], %[global]\n" + " " SC_INSN " %[tmp], %[buddy]\n" + " beqz %[tmp], 1b\n" + " nop\n" + "2:\n" + " .set pop" + : [buddy] "+m" (buddy->pte), + [tmp] "=&r" (tmp) + : [global] "r" (page_global)); +#else /* !CONFIG_SMP */ if (pte_none(*buddy)) pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; +#endif /* CONFIG_SMP */ } #endif } diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 16f1ea9ab191..03722d4326a1 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu) extern void play_dead(void); #endif -extern asmlinkage void smp_call_function_interrupt(void); - static inline void arch_send_call_function_single_ipi(int cpu) { extern struct plat_smp_ops *mp_ops; /* private */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 28d6d9364bd1..a71da576883c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -152,6 +152,31 @@ .set noreorder bltz k0, 8f move k1, sp +#ifdef CONFIG_EVA + /* + * Flush interAptiv's Return Prediction Stack (RPS) by writing + * EntryHi. Toggling Config7.RPS is slower and less portable. + * + * The RPS isn't automatically flushed when exceptions are + * taken, which can result in kernel mode speculative accesses + * to user addresses if the RPS mispredicts. That's harmless + * when user and kernel share the same address space, but with + * EVA the same user segments may be unmapped to kernel mode, + * even containing sensitive MMIO regions or invalid memory. + * + * This can happen when the kernel sets the return address to + * ret_from_* and jr's to the exception handler, which looks + * more like a tail call than a function call. If nested calls + * don't evict the last user address in the RPS, it will + * mispredict the return and fetch from a user controlled + * address into the icache. + * + * More recent EVA-capable cores with MAAR to restrict + * speculative accesses aren't affected. + */ + MFC0 k0, CP0_ENTRYHI + MTC0 k0, CP0_ENTRYHI +#endif .set reorder /* Called from user mode, new stack. */ get_saved_sp diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 7163cd7fdd69..9733cd0266e4 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \ } \ } while (0) +/* + * For newly created kernel threads switch_to() will return to + * ret_from_kernel_thread, newly created user threads to ret_from_fork. + * That is, everything following resume() will be skipped for new threads. + * So everything that matters to new threads should be placed before resume(). + */ #define switch_to(prev, next, last) \ do { \ - u32 __c0_stat; \ s32 __fpsave = FP_SAVE_NONE; \ __mips_mt_fpaff_switch_to(prev); \ - if (cpu_has_dsp) \ + if (cpu_has_dsp) { \ __save_dsp(prev); \ - if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ - if (cop2_lazy_restore) \ - KSTK_STATUS(prev) &= ~ST0_CU2; \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_save(prev); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ + __restore_dsp(next); \ + } \ + if (cop2_present) { \ + set_c0_status(ST0_CU2); \ + if ((KSTK_STATUS(prev) & ST0_CU2)) { \ + if (cop2_lazy_restore) \ + KSTK_STATUS(prev) &= ~ST0_CU2; \ + cop2_save(prev); \ + } \ + if (KSTK_STATUS(next) & ST0_CU2 && \ + !cop2_lazy_restore) { \ + cop2_restore(next); \ + } \ + clear_c0_status(ST0_CU2); \ } \ __clear_software_ll_bit(); \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \ __fpsave = FP_SAVE_SCALAR; \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ __fpsave = FP_SAVE_VECTOR; \ - (last) = resume(prev, next, task_thread_info(next), __fpsave); \ -} while (0) - -#define finish_arch_switch(prev) \ -do { \ - u32 __c0_stat; \ - if (cop2_present && !cop2_lazy_restore && \ - (KSTK_STATUS(current) & ST0_CU2)) { \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_restore(current); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ - } \ - if (cpu_has_dsp) \ - __restore_dsp(current); \ if (cpu_has_userlocal) \ - write_c0_userlocal(current_thread_info()->tp_value); \ + write_c0_userlocal(task_thread_info(next)->tp_value); \ __restore_watch(); \ disable_msa(); \ + (last) = resume(prev, next, task_thread_info(next), __fpsave); \ } while (0) #endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h index 6c9906f59c6e..9081d88ae44f 100644 --- a/arch/mips/include/uapi/asm/sigcontext.h +++ b/arch/mips/include/uapi/asm/sigcontext.h @@ -16,7 +16,7 @@ /* * Keep this struct definition in sync with the sigcontext fragment - * in arch/mips/tools/offset.c + * in arch/mips/kernel/asm-offsets.c */ struct sigcontext { unsigned int sc_regmask; /* Unused */ @@ -46,7 +46,7 @@ struct sigcontext { #include <linux/posix_types.h> /* * Keep this struct definition in sync with the sigcontext fragment - * in arch/mips/tools/offset.c + * in arch/mips/kernel/asm-offsets.c * * Warning: this structure illdefined with sc_badvaddr being just an unsigned * int so it was changed to unsigned long in 2.6.0-test1. This may break diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index beabe19ff8e5..072fab13645d 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -1,5 +1,5 @@ /* - * offset.c: Calculate pt_regs and task_struct offsets. + * asm-offsets.c: Calculate pt_regs and task_struct offsets. * * Copyright (C) 1996 David S. Miller * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index af42e7003f12..baa7b6fc0a60 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set noat SAVE_ALL FEXPORT(handle_\exception\ext) - __BUILD_clear_\clear + __build_clear_\clear .set at __BUILD_\verbose \exception move a0, sp diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 3e4491aa6d6b..789d7bf4fef3 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { unsigned int real_len; - cpumask_t mask; + cpumask_t allowed, mask; int retval; struct task_struct *p; @@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (retval) goto out_unlock; - cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); + cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); + cpumask_and(&mask, &allowed, cpu_active_mask); out_unlock: read_unlock(&tasklist_lock); diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index b130033838ba..5fcec3032f38 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -38,7 +38,7 @@ char *mips_get_machine_name(void) return mips_machine_name; } -#ifdef CONFIG_OF +#ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { return add_memory_region(base, size, BOOT_MEM_RAM); diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 74bab9ddd0e1..c6bbf2165051 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -24,7 +24,7 @@ LEAF(relocate_new_kernel) process_entry: PTR_L s2, (s0) - PTR_ADD s0, s0, SZREG + PTR_ADDIU s0, s0, SZREG /* * In case of a kdump/crash kernel, the indirection page is not @@ -61,9 +61,9 @@ copy_word: /* copy page word by word */ REG_L s5, (s2) REG_S s5, (s4) - PTR_ADD s4, s4, SZREG - PTR_ADD s2, s2, SZREG - LONG_SUB s6, s6, 1 + PTR_ADDIU s4, s4, SZREG + PTR_ADDIU s2, s2, SZREG + LONG_ADDIU s6, s6, -1 beq s6, zero, process_entry b copy_word b process_entry diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index ad4d44635c76..a6f6b762c47a 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -80,7 +80,7 @@ syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - daddiu a1, v0, __NR_64_Linux + move a1, v0 jal syscall_trace_enter bltz v0, 2f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 446cc654da56..4b2010654c46 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -72,7 +72,7 @@ n32_syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - daddiu a1, v0, __NR_N32_Linux + move a1, v0 jal syscall_trace_enter bltz v0, 2f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 19a7705f2a01..5d7f2634996f 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 336708ae5c5b..78cf8c2f1de0 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id) if (action == 0) scheduler_ipi(); else - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index d0744cc77ea7..a31896c33716 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -192,16 +192,6 @@ asmlinkage void start_secondary(void) cpu_startup_entry(CPUHP_ONLINE); } -/* - * Call into both interrupt handlers, as we share the IPI for them - */ -void __irq_entry smp_call_function_interrupt(void) -{ - irq_enter(); - generic_smp_call_function_interrupt(); - irq_exit(); -} - static void stop_this_cpu(void *dummy) { /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e207a43b5f8f..8ea28e6ab37d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task, void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; + mm_segment_t old_fs = get_fs(); if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; @@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp) prepare_frametrace(®s); } } + /* + * show_stack() deals exclusively with kernel mode, so be sure to access + * the stack in the kernel (not user) address space. + */ + set_fs(KERNEL_DS); show_stacktrace(task, ®s); + set_fs(old_fs); } static void show_code(unsigned int __user *pc) @@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; enum ctx_state prev_state; + mm_segment_t old_fs = get_fs(); prev_state = exception_enter(); show_regs(regs); @@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs) dump_tlb_all(); } + if (!user_mode(regs)) + set_fs(KERNEL_DS); + show_code((unsigned int __user *) regs->cp0_epc); + set_fs(old_fs); + /* * Some chips may have other causes of machine check (e.g. SB1 * graduation timer) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index af84bef0c90d..eb3efd137fd1 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -438,7 +438,7 @@ do { \ : "memory"); \ } while(0) -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 6ab10573490d..2c218c3bbca5 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -466,6 +466,7 @@ int get_c0_perfcount_int(void) { return ltq_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index 509877c6e9d9..1a4738a8f2d3 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } if (action & SMP_ASK_C0COUNT) { BUG_ON(cpu != 0); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 77d96db8253c..aab218c36e0d 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -160,18 +160,18 @@ static inline void setup_protection_map(void) protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); + protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); } else { diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 36c0f26fac6b..852a41c6da45 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -133,7 +133,8 @@ good_area: #endif goto bad_area; } - if (!(vma->vm_flags & VM_READ)) { + if (!(vma->vm_flags & VM_READ) && + exception_epc(regs) != address) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", raw_smp_processor_id(), diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index d1392f8f5811..fa8f591f3713 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 5625b190edc0..b7bf721eabf5 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -154,6 +154,7 @@ int get_c0_perfcount_int(void) return mips_cpu_perf_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { @@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void) static void __init init_rtc(void) { - /* stop the clock whilst setting it up */ - CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL); + unsigned char freq, ctrl; - /* 32KHz time base */ - CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); + /* Set 32KHz time base if not already set */ + freq = CMOS_READ(RTC_FREQ_SELECT); + if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ) + CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); - /* start the clock */ - CMOS_WRITE(RTC_24H, RTC_CONTROL); + /* Ensure SET bit is clear so RTC can run */ + ctrl = CMOS_READ(RTC_CONTROL); + if (ctrl & RTC_SET) + CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL); } void __init plat_time_init(void) diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index e1d69895fb1d..a120b7a5a8fe 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -77,6 +77,7 @@ int get_c0_perfcount_int(void) return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; return -1; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index dc3e327fbbac..f5fff228b347 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) { clear_c0_eimr(irq); ack_c0_eirr(irq); - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); set_c0_eimr(irq); } diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 42181c7105df..f8d3e081b2eb 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id) static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index b8a0bf5766f2..c6996cf67a5c 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -311,12 +311,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) void pcibios_fixup_bus(struct pci_bus *bus) { - struct pci_dev *dev = bus->self; - - if (pci_has_flag(PCI_PROBE_ONLY) && dev && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_read_bridge_bases(bus); - } } EXPORT_SYMBOL(PCIBIOS_MIN_IO); diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 7c73fcb92a10..8a377346f0ca 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -26,6 +26,7 @@ int get_c0_perfcount_int(void) { return gic_get_c0_perfcount_int(); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); int get_c0_fdc_int(void) { diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c index 10170580a2de..ffa0f7101a97 100644 --- a/arch/mips/pmcs-msp71xx/msp_smp.c +++ b/arch/mips/pmcs-msp71xx/msp_smp.c @@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 53707aacc0f8..8c624a8b9ea2 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -89,6 +89,7 @@ int get_c0_perfcount_int(void) { return rt_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 3fbaef97a1b8..16ec4e12daa3 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void) scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else #endif { diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig index a8bb972fd9fd..cb9a095f5c5e 100644 --- a/arch/mips/sibyte/Kconfig +++ b/arch/mips/sibyte/Kconfig @@ -81,11 +81,6 @@ choice prompt "SiByte SOC Stepping" depends on SIBYTE_SB1xxx_SOC -config CPU_SB1_PASS_1 - bool "1250 Pass1" - depends on SIBYTE_SB1250 - select CPU_HAS_PREFETCH - config CPU_SB1_PASS_2_1250 bool "1250 An" depends on SIBYTE_SB1250 diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index af7d44edd9a8..4c71aea25663 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -29,8 +29,6 @@ #include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_int.h> -extern void smp_call_function_interrupt(void); - /* * These are routines for dealing with the bcm1480 smp capabilities * independent of board/firmware @@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c index 5581844c9194..41a1d2242211 100644 --- a/arch/mips/sibyte/common/bus_watcher.c +++ b/arch/mips/sibyte/common/bus_watcher.c @@ -81,10 +81,7 @@ void check_bus_watcher(void) { u32 status, l2_err, memio_err; -#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS - /* Destructive read, clears register and interrupt */ - status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS)); -#elif defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250) +#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250) /* Use non-destructive register */ status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG)); #elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c index 3c02b2a77ae9..9d3c24efdf4a 100644 --- a/arch/mips/sibyte/sb1250/setup.c +++ b/arch/mips/sibyte/sb1250/setup.c @@ -202,12 +202,10 @@ void __init sb1250_setup(void) switch (war_pass) { case K_SYS_REVISION_BCM1250_PASS1: -#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, " "and the kernel doesn't have the proper " "workarounds compiled in. @@@@\n"); bad_config = 1; -#endif break; case K_SYS_REVISION_BCM1250_PASS2: /* Pass 2 - easiest as default for now - so many numbers */ diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index c0c4b3f88a08..1cf66f5ff23d 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index de30b0c88796..6edb9ee6128e 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -5,6 +5,7 @@ generic-y += cputime.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h index 07c5b4a3903b..62189353d2f6 100644 --- a/arch/mn10300/include/asm/io.h +++ b/arch/mn10300/include/asm/io.h @@ -283,6 +283,7 @@ static inline void __iomem *ioremap_nocache(unsigned long offset, unsigned long #define ioremap_wc ioremap_nocache #define ioremap_wt ioremap_nocache +#define ioremap_uc ioremap_nocache static inline void iounmap(void __iomem *addr) { diff --git a/arch/mn10300/include/asm/mm-arch-hooks.h b/arch/mn10300/include/asm/mm-arch-hooks.h deleted file mode 100644 index e2029a652f4c..000000000000 --- a/arch/mn10300/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_MN10300_MM_ARCH_HOOKS_H -#define _ASM_MN10300_MM_ARCH_HOOKS_H - -#endif /* _ASM_MN10300_MM_ARCH_HOOKS_H */ diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c index b5b036f64275..b7ab8378964c 100644 --- a/arch/mn10300/unit-asb2305/pci-asb2305.c +++ b/arch/mn10300/unit-asb2305/pci-asb2305.c @@ -183,18 +183,16 @@ static int __init pcibios_assign_resources(void) struct pci_dev *dev = NULL; struct resource *r; - if (!(pci_probe & PCI_ASSIGN_ROMS)) { - /* Try to use BIOS settings for ROMs, otherwise let - pci_assign_unassigned_resources() allocate the new - addresses. */ - for_each_pci_dev(dev) { - r = &dev->resource[PCI_ROM_RESOURCE]; - if (!r->flags || !r->start) - continue; - if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { - r->end -= r->start; - r->start = 0; - } + /* Try to use BIOS settings for ROMs, otherwise let + pci_assign_unassigned_resources() allocate the new + addresses. */ + for_each_pci_dev(dev) { + r = &dev->resource[PCI_ROM_RESOURCE]; + if (!r->flags || !r->start) + continue; + if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { + r->end -= r->start; + r->start = 0; } } diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h index 9e17aca5a2a1..96c484b12226 100644 --- a/arch/mn10300/unit-asb2305/pci-asb2305.h +++ b/arch/mn10300/unit-asb2305/pci-asb2305.h @@ -20,13 +20,6 @@ #define DBG(x...) #endif -#define PCI_PROBE_BIOS 1 -#define PCI_PROBE_CONF1 2 -#define PCI_PROBE_CONF2 4 -#define PCI_NO_CHECKS 0x400 -#define PCI_ASSIGN_ROMS 0x1000 -#define PCI_BIOS_IRQ_SCAN 0x2000 - extern unsigned int pci_probe; /* pci-asb2305.c */ diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c index 3dfe2d31c67b..deaa893efba5 100644 --- a/arch/mn10300/unit-asb2305/pci.c +++ b/arch/mn10300/unit-asb2305/pci.c @@ -324,7 +324,6 @@ void pcibios_fixup_bus(struct pci_bus *bus) struct pci_dev *dev; if (bus->self) { - pci_read_bridge_bases(bus); pcibios_fixup_bridge_resources(bus->self); } diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 434639d510b3..914864eb5a25 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -30,6 +30,7 @@ generic-y += kmap_types.h generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += module.h generic-y += msgbuf.h diff --git a/arch/nios2/include/asm/mm-arch-hooks.h b/arch/nios2/include/asm/mm-arch-hooks.h deleted file mode 100644 index d7290dc68558..000000000000 --- a/arch/nios2/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_NIOS2_MM_ARCH_HOOKS_H -#define _ASM_NIOS2_MM_ARCH_HOOKS_H - -#endif /* _ASM_NIOS2_MM_ARCH_HOOKS_H */ diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index e5a693b16da2..443f44de1020 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -17,6 +17,7 @@ config OPENRISC select GENERIC_IRQ_SHOW select GENERIC_IOMAP select GENERIC_CPU_DEVICES + select HAVE_UID16 select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_STRNCPY_FROM_USER @@ -31,9 +32,6 @@ config MMU config HAVE_DMA_ATTRS def_bool y -config UID16 - def_bool y - config RWSEM_GENERIC_SPINLOCK def_bool y diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 2a2e39b8109a..2832f031fb11 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -36,6 +36,7 @@ generic-y += kmap_types.h generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += module.h generic-y += msgbuf.h diff --git a/arch/openrisc/include/asm/mm-arch-hooks.h b/arch/openrisc/include/asm/mm-arch-hooks.h deleted file mode 100644 index 6d33cb555fe1..000000000000 --- a/arch/openrisc/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_OPENRISC_MM_ARCH_HOOKS_H -#define _ASM_OPENRISC_MM_ARCH_HOOKS_H - -#endif /* _ASM_OPENRISC_MM_ARCH_HOOKS_H */ diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 12b341d04f88..f9b3a81aefcd 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -15,6 +15,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mutex.h generic-y += param.h generic-y += percpu.h diff --git a/arch/parisc/include/asm/mm-arch-hooks.h b/arch/parisc/include/asm/mm-arch-hooks.h deleted file mode 100644 index 654ec63b0ee9..000000000000 --- a/arch/parisc/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_PARISC_MM_ARCH_HOOKS_H -#define _ASM_PARISC_MM_ARCH_HOOKS_H - -#endif /* _ASM_PARISC_MM_ARCH_HOOKS_H */ diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 3a08eae3318f..3edbb9fc91b4 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) + if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { /* * This is the permanent pmd attached to the pgd; * cannot free it. @@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) */ mm_inc_nr_pmds(mm); return; + } free_pages((unsigned long)pmd, PMD_ORDER); } diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index a8d2ef30d473..5879fde56f3c 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -721,6 +721,7 @@ extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, unsigned long flags); extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); #define ioremap_nocache(addr, size) ioremap((addr), (size)) +#define ioremap_uc(addr, size) ioremap((addr), (size)) extern void iounmap(volatile void __iomem *addr); diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h index d0ece257d310..04c7e8fc24c2 100644 --- a/arch/powerpc/include/asm/mpc52xx_psc.h +++ b/arch/powerpc/include/asm/mpc52xx_psc.h @@ -150,7 +150,10 @@ /* Structure of the hardware registers */ struct mpc52xx_psc { - u8 mode; /* PSC + 0x00 */ + union { + u8 mode; /* PSC + 0x00 */ + u8 mr2; + }; u8 reserved0[3]; union { /* PSC + 0x04 */ u16 status; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 4dbe072eecbe..523673d7583c 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,8 +28,6 @@ #include <asm/synch.h> #include <asm/ppc-opcode.h> -#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ - #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ #ifdef __BIG_ENDIAN__ diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 58abeda64cb7..15cca17cba4b 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {} extern void enable_kernel_fp(void); extern void enable_kernel_altivec(void); +extern void enable_kernel_vsx(void); extern int emulate_altivec(struct pt_regs *); extern void __giveup_vsx(struct task_struct *); extern void giveup_vsx(struct task_struct *); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index b9de34d44fcb..02c1d5dcee4d 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1044,13 +1044,7 @@ void pcibios_set_master(struct pci_dev *dev) void pcibios_fixup_bus(struct pci_bus *bus) { - /* When called from the generic PCI probe, read PCI<->PCI bridge - * bases. This is -not- called when generating the PCI tree from - * the OF device-tree. - */ - pci_read_bridge_bases(bus); - - /* Now fixup the bus bus */ + /* Fixup the bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 42e02a2d570b..c8c62c7fc31c 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -126,7 +126,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, { struct pci_dev *dev; const char *type; - struct pci_slot *slot; dev = pci_alloc_dev(bus); if (!dev) @@ -145,10 +144,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, dev->needs_freset = 0; /* pcie fundamental reset required */ set_pcie_port_type(dev); - list_for_each_entry(slot, &dev->bus->slots, list) - if (PCI_SLOT(dev->devfn) == slot->number) - dev->slot = slot; - + pci_dev_assign_slot(dev); dev->vendor = get_int_prop(node, "vendor-id", 0xffff); dev->device = get_int_prop(node, "device-id", 0xffff); dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); @@ -191,6 +187,9 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, pci_device_add(dev, bus); + /* Setup MSI caps & disable MSI/MSI-X interrupts */ + pci_msi_setup_pci_dev(dev); + return dev; } EXPORT_SYMBOL(of_create_pci_dev); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8005e18d1b40..64e6e9d9e656 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX -#if 0 -/* not currently used, but some crazy RAID module might want to later */ void enable_kernel_vsx(void) { WARN_ON(preemptible()); @@ -220,7 +218,6 @@ void enable_kernel_vsx(void) #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_vsx); -#endif void giveup_vsx(struct task_struct *tsk) { diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d3a831ac0f92..da50e0c9c57e 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 68d067ad4222..a9f753fb73a8 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) vc->runner = vcpu; if (n_ceded == vc->n_runnable) { kvmppc_vcore_blocked(vc); - } else if (should_resched()) { + } else if (need_resched()) { vc->vcore_state = VCORE_PREEMPT; /* Let something else run */ cond_resched_lock(&vc->lock); diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index f691bcabd710..c50ea76ba66c 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -12,6 +12,7 @@ */ #include <linux/bitops.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/device.h> diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 5cf5e6ea213b..7cf0df859d05 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) } /* Unmask the event */ - if (eeh_enabled()) + if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) enable_irq(eeh_event_irq); return ret; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 5738d315248b..85cbc96eff6c 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb) static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, unsigned levels, unsigned long limit, - unsigned long *current_offset) + unsigned long *current_offset, unsigned long *total_allocated) { struct page *tce_mem = NULL; __be64 *addr, *tmp; @@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, } addr = page_address(tce_mem); memset(addr, 0, allocated); + *total_allocated += allocated; --levels; if (!levels) { @@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, for (i = 0; i < entries; ++i) { tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, - levels, limit, current_offset); + levels, limit, current_offset, total_allocated); if (!tmp) break; @@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, struct iommu_table *tbl) { void *addr; - unsigned long offset = 0, level_shift; + unsigned long offset = 0, level_shift, total_allocated = 0; const unsigned window_shift = ilog2(window_size); unsigned entries_shift = window_shift - page_shift; unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); @@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - levels, tce_table_size, &offset); + levels, tce_table_size, &offset, &total_allocated); /* addr==NULL means that the first level allocation failed */ if (!addr) @@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, page_shift); tbl->it_level_size = 1ULL << (level_shift - 3); tbl->it_indirect_levels = levels - 1; - tbl->it_allocated_size = offset; + tbl->it_allocated_size = total_allocated; pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", window_size, tce_table_size, bus_offset); diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 2938934c6518..e256592eb66e 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild @@ -6,3 +6,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ obj-$(CONFIG_APPLDATA_BASE) += appldata/ obj-y += net/ obj-$(CONFIG_PCI) += pci/ +obj-$(CONFIG_NUMA) += numa/ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b06dc3839268..4827870f7a6d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -99,18 +99,22 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_SAVE_PAGE_KEYS if HIBERNATION select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_WANTS_PROT_NUMA_PROT_NONE select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS2 select DYNAMIC_FTRACE if FUNCTION_TRACER select GENERIC_CLOCKEVENTS + select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_DEVICES if !SMP select GENERIC_FIND_FIRST_BIT select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_EARLY_PFN_TO_NID select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK @@ -153,6 +157,7 @@ config S390 select VIRT_CPU_ACCOUNTING select VIRT_TO_BUS + config SCHED_OMIT_FRAME_POINTER def_bool y @@ -385,6 +390,76 @@ config HOTPLUG_CPU config SCHED_SMT def_bool n +# Some NUMA nodes have memory ranges that span +# other nodes. Even though a pfn is valid and +# between a node's start and end pfns, it may not +# reside on that node. See memmap_init_zone() +# for details. <- They meant memory holes! +config NODES_SPAN_OTHER_NODES + def_bool NUMA + +config NUMA + bool "NUMA support" + depends on SMP && 64BIT && SCHED_TOPOLOGY + default n + help + Enable NUMA support + + This option adds NUMA support to the kernel. + + An operation mode can be selected by appending + numa=<method> to the kernel command line. + + The default behaviour is identical to appending numa=plain to + the command line. This will create just one node with all + available memory and all CPUs in it. + +config NODES_SHIFT + int "Maximum NUMA nodes (as a power of 2)" + range 1 10 + depends on NUMA + default "4" + help + Specify the maximum number of NUMA nodes available on the target + system. Increases memory reserved to accommodate various tables. + +menu "Select NUMA modes" + depends on NUMA + +config NUMA_EMU + bool "NUMA emulation" + default y + help + Numa emulation mode will split the available system memory into + equal chunks which then are distributed over the configured number + of nodes in a round-robin manner. + + The number of fake nodes is limited by the number of available memory + chunks (i.e. memory size / fake size) and the number of supported + nodes in the kernel. + + The CPUs are assigned to the nodes in a way that partially respects + the original machine topology (if supported by the machine). + Fair distribution of the CPUs is not guaranteed. + +config EMU_SIZE + hex "NUMA emulation memory chunk size" + default 0x10000000 + range 0x400000 0x100000000 + depends on NUMA_EMU + help + Select the default size by which the memory is chopped and then + assigned to emulated NUMA nodes. + + This can be overridden by specifying + + emu_size=<n> + + on the kernel command line where also suffixes K, M, G, and T are + supported. + +endmenu + config SCHED_MC def_bool n diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 667b1bca5681..e8d4423e4f85 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -33,6 +33,8 @@ mflags-$(CONFIG_MARCH_Z196) := -march=z196 mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12 mflags-$(CONFIG_MARCH_Z13) := -march=z13 +export CC_FLAGS_MARCH := $(mflags-y) + aflags-y += $(mflags-y) cflags-y += $(mflags-y) diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 940cbddd9237..0c98f1508542 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -13,6 +13,7 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_RCU_FAST_NO_HZ=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_NUMA_BALANCING=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y @@ -50,6 +51,7 @@ CONFIG_LIVEPATCH=y CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=256 +CONFIG_NUMA=y CONFIG_PREEMPT=y CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index d793fec91797..82083e1fbdc4 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -13,6 +13,7 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_RCU_FAST_NO_HZ=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_NUMA_BALANCING=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y @@ -49,6 +50,7 @@ CONFIG_DEFAULT_DEADLINE=y CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=256 +CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 38a77e9c8aa6..c05c9e0821e3 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -13,6 +13,8 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_RCU_FAST_NO_HZ=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_NUMA_BALANCING=y +# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y @@ -48,6 +50,7 @@ CONFIG_LIVEPATCH=y CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 +CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 5566ce80abdb..0b9b95f3c703 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -24,6 +24,7 @@ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include <linux/init.h> #include <linux/spinlock.h> #include "crypt_s390.h" @@ -976,7 +977,7 @@ static void __exit aes_s390_fini(void) crypto_unregister_alg(&aes_alg); } -module_init(aes_s390_init); +module_cpu_feature_match(MSA, aes_s390_init); module_exit(aes_s390_fini); MODULE_ALIAS_CRYPTO("aes-all"); diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 9e05cc453a40..fba1c10a2dd0 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/des.h> @@ -616,7 +617,7 @@ static void __exit des_s390_exit(void) crypto_unregister_alg(&des_alg); } -module_init(des_s390_init); +module_cpu_feature_match(MSA, des_s390_init); module_exit(des_s390_exit); MODULE_ALIAS_CRYPTO("des"); diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c index b258110da952..26e14efd30a7 100644 --- a/arch/s390/crypto/ghash_s390.c +++ b/arch/s390/crypto/ghash_s390.c @@ -9,6 +9,7 @@ #include <crypto/internal/hash.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include "crypt_s390.h" @@ -158,7 +159,7 @@ static void __exit ghash_mod_exit(void) crypto_unregister_shash(&ghash_alg); } -module_init(ghash_mod_init); +module_cpu_feature_match(MSA, ghash_mod_init); module_exit(ghash_mod_exit); MODULE_ALIAS_CRYPTO("ghash"); diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index 9d5192c94963..b8045b97f4fb 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mutex.h> +#include <linux/cpufeature.h> #include <linux/random.h> #include <linux/slab.h> #include <asm/debug.h> @@ -914,6 +915,5 @@ static void __exit prng_exit(void) } } - -module_init(prng_init); +module_cpu_feature_match(MSA, prng_init); module_exit(prng_exit); diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 5b2bee323694..9208eadae9f0 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -26,6 +26,7 @@ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include <crypto/sha.h> #include "crypt_s390.h" @@ -100,7 +101,7 @@ static void __exit sha1_s390_fini(void) crypto_unregister_shash(&alg); } -module_init(sha1_s390_init); +module_cpu_feature_match(MSA, sha1_s390_init); module_exit(sha1_s390_fini); MODULE_ALIAS_CRYPTO("sha1"); diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index b74ff158108c..667888f5c964 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -16,6 +16,7 @@ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include <crypto/sha.h> #include "crypt_s390.h" @@ -140,7 +141,7 @@ static void __exit sha256_s390_fini(void) crypto_unregister_shash(&sha256_alg); } -module_init(sha256_s390_init); +module_cpu_feature_match(MSA, sha256_s390_init); module_exit(sha256_s390_fini); MODULE_ALIAS_CRYPTO("sha256"); diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 0c36989ba182..2ba66b1518f0 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/cpufeature.h> #include "sha.h" #include "crypt_s390.h" @@ -148,7 +149,7 @@ static void __exit fini(void) crypto_unregister_shash(&sha384_alg); } -module_init(init); +module_cpu_feature_match(MSA, init); module_exit(fini); MODULE_LICENSE("GPL"); diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index dc5385ebb071..5ad26dd94d77 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -3,5 +3,6 @@ generic-y += clkdev.h generic-y += irq_work.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h new file mode 100644 index 000000000000..fa7e69b7c299 --- /dev/null +++ b/arch/s390/include/asm/cpufeature.h @@ -0,0 +1,29 @@ +/* + * Module interface for CPU features + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ + +#ifndef __ASM_S390_CPUFEATURE_H +#define __ASM_S390_CPUFEATURE_H + +#include <asm/elf.h> + +/* Hardware features on Linux on z Systems are indicated by facility bits that + * are mapped to the so-called machine flags. Particular machine flags are + * then used to define ELF hardware capabilities; most notably hardware flags + * that are essential for user space / glibc. + * + * Restrict the set of exposed CPU features to ELF hardware capabilities for + * now. Additional machine flags can be indicated by values larger than + * MAX_ELF_HWCAP_FEATURES. + */ +#define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap)) +#define MAX_CPU_FEATURES MAX_ELF_HWCAP_FEATURES + +#define cpu_feature(feat) ilog2(HWCAP_S390_ ## feat) + +int cpu_have_feature(unsigned int nr); + +#endif /* __ASM_S390_CPUFEATURE_H */ diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index cfad7fca01d6..17a373576868 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -46,6 +46,8 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) __ctl_load(reg, cr, cr); } +void __ctl_set_vx(void); + void smp_ctl_set_bit(int cr, int bit); void smp_ctl_clear_bit(int cr, int bit); @@ -57,7 +59,10 @@ union ctlreg0 { unsigned long lap : 1; /* Low-address-protection control */ unsigned long : 4; unsigned long edat : 1; /* Enhanced-DAT-enablement control */ - unsigned long : 23; + unsigned long : 4; + unsigned long afp : 1; /* AFP-register control */ + unsigned long vx : 1; /* Vector enablement control */ + unsigned long : 17; }; }; diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h index 629b79a93165..f7e5c36688c3 100644 --- a/arch/s390/include/asm/etr.h +++ b/arch/s390/include/asm/etr.h @@ -214,6 +214,9 @@ static inline int etr_ptff(void *ptff_block, unsigned int func) void etr_switch_to_local(void); void etr_sync_check(void); +/* notifier for syncs */ +extern struct atomic_notifier_head s390_epoch_delta_notifier; + /* STP interruption parameter */ struct stp_irq_parm { unsigned int _pad0 : 14; diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu-internal.h new file mode 100644 index 000000000000..55dc2c0fb40a --- /dev/null +++ b/arch/s390/include/asm/fpu-internal.h @@ -0,0 +1,110 @@ +/* + * General floating pointer and vector register helpers + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ + +#ifndef _ASM_S390_FPU_INTERNAL_H +#define _ASM_S390_FPU_INTERNAL_H + +#define FPU_USE_VX 1 /* Vector extension is active */ + +#ifndef __ASSEMBLY__ + +#include <linux/errno.h> +#include <linux/string.h> +#include <asm/linkage.h> +#include <asm/ctl_reg.h> +#include <asm/sigcontext.h> + +struct fpu { + __u32 fpc; /* Floating-point control */ + __u32 flags; + union { + void *regs; + freg_t *fprs; /* Floating-point register save area */ + __vector128 *vxrs; /* Vector register save area */ + }; +}; + +void save_fpu_regs(void); + +#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX)) +#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX)) + +/* VX array structure for address operand constraints in inline assemblies */ +struct vx_array { __vector128 _[__NUM_VXRS]; }; + +static inline int test_fp_ctl(u32 fpc) +{ + u32 orig_fpc; + int rc; + + asm volatile( + " efpc %1\n" + " sfpc %2\n" + "0: sfpc %1\n" + " la %0,0\n" + "1:\n" + EX_TABLE(0b,1b) + : "=d" (rc), "=d" (orig_fpc) + : "d" (fpc), "0" (-EINVAL)); + return rc; +} + +static inline void save_vx_regs_safe(__vector128 *vxrs) +{ + unsigned long cr0, flags; + + flags = arch_local_irq_save(); + __ctl_store(cr0, 0, 0); + __ctl_set_bit(0, 17); + __ctl_set_bit(0, 18); + asm volatile( + " la 1,%0\n" + " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */ + " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */ + : "=Q" (*(struct vx_array *) vxrs) : : "1"); + __ctl_load(cr0, 0, 0); + arch_local_irq_restore(flags); +} + +static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs) +{ + int i; + + for (i = 0; i < __NUM_FPRS; i++) + fprs[i] = *(freg_t *)(vxrs + i); +} + +static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs) +{ + int i; + + for (i = 0; i < __NUM_FPRS; i++) + *(freg_t *)(vxrs + i) = fprs[i]; +} + +static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) +{ + fpregs->pad = 0; + if (is_vx_fpu(fpu)) + convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); + else + memcpy((freg_t *)&fpregs->fprs, fpu->fprs, + sizeof(fpregs->fprs)); +} + +static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) +{ + if (is_vx_fpu(fpu)) + convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); + else + memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, + sizeof(fpregs->fprs)); +} + +#endif + +#endif /* _ASM_S390_FPU_INTERNAL_H */ diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 0130d0379edd..d9be7c0c1291 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -14,6 +14,7 @@ #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range free_pgd_range +#define hugepages_supported() (MACHINE_HAS_HPAGE) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index cb5fdf3a78fc..437e9af96688 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -57,6 +57,8 @@ static inline void ioport_unmap(void __iomem *p) */ #define pci_iomap pci_iomap #define pci_iounmap pci_iounmap +#define pci_iomap_wc pci_iomap +#define pci_iomap_wc_range pci_iomap_range #define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count) #define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3024acbe1f9d..3d012e071647 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -22,6 +22,7 @@ #include <linux/kvm.h> #include <asm/debug.h> #include <asm/cpu.h> +#include <asm/fpu-internal.h> #include <asm/isc.h> #define KVM_MAX_VCPUS 64 @@ -258,6 +259,9 @@ struct kvm_vcpu_stat { u32 diagnose_10; u32 diagnose_44; u32 diagnose_9c; + u32 diagnose_258; + u32 diagnose_308; + u32 diagnose_500; }; #define PGM_OPERATION 0x01 @@ -498,10 +502,9 @@ struct kvm_guestdbg_info_arch { struct kvm_vcpu_arch { struct kvm_s390_sie_block *sie_block; - s390_fp_regs host_fpregs; unsigned int host_acrs[NUM_ACRS]; - s390_fp_regs guest_fpregs; - struct kvm_s390_vregs *host_vregs; + struct fpu host_fpregs; + struct fpu guest_fpregs; struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; struct kvm_s390_pgm_info pgm; @@ -630,7 +633,6 @@ extern char sie_exit; static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_check_processor_compat(void *rtn) {} -static inline void kvm_arch_exit(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h index fc8a8284778e..27da78cf416d 100644 --- a/arch/s390/include/asm/linkage.h +++ b/arch/s390/include/asm/linkage.h @@ -6,4 +6,26 @@ #define __ALIGN .align 4, 0x07 #define __ALIGN_STR __stringify(__ALIGN) +#ifndef __ASSEMBLY__ + +/* + * Helper macro for exception table entries + */ +#define EX_TABLE(_fault, _target) \ + ".section __ex_table,\"a\"\n" \ + ".align 4\n" \ + ".long (" #_fault ") - .\n" \ + ".long (" #_target ") - .\n" \ + ".previous\n" + +#else /* __ASSEMBLY__ */ + +#define EX_TABLE(_fault, _target) \ + .section __ex_table,"a" ; \ + .align 4 ; \ + .long (_fault) - . ; \ + .long (_target) - . ; \ + .previous + +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/s390/include/asm/mm-arch-hooks.h b/arch/s390/include/asm/mm-arch-hooks.h deleted file mode 100644 index 07680b2f3c59..000000000000 --- a/arch/s390/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_S390_MM_ARCH_HOOKS_H -#define _ASM_S390_MM_ARCH_HOOKS_H - -#endif /* _ASM_S390_MM_ARCH_HOOKS_H */ diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h new file mode 100644 index 000000000000..a9e834e60b84 --- /dev/null +++ b/arch/s390/include/asm/mmzone.h @@ -0,0 +1,16 @@ +/* + * NUMA support for s390 + * + * Copyright IBM Corp. 2015 + */ + +#ifndef _ASM_S390_MMZONE_H +#define _ASM_S390_MMZONE_H + +#ifdef CONFIG_NUMA + +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[nid]) + +#endif /* CONFIG_NUMA */ +#endif /* _ASM_S390_MMZONE_H */ diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h new file mode 100644 index 000000000000..2a0efc63b9e5 --- /dev/null +++ b/arch/s390/include/asm/numa.h @@ -0,0 +1,35 @@ +/* + * NUMA support for s390 + * + * Declare the NUMA core code structures and functions. + * + * Copyright IBM Corp. 2015 + */ + +#ifndef _ASM_S390_NUMA_H +#define _ASM_S390_NUMA_H + +#ifdef CONFIG_NUMA + +#include <linux/numa.h> +#include <linux/cpumask.h> + +void numa_setup(void); +int numa_pfn_to_nid(unsigned long pfn); +int __node_distance(int a, int b); +void numa_update_cpu_topology(void); + +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +extern int numa_debug_enabled; + +#else + +static inline void numa_setup(void) { } +static inline void numa_update_cpu_topology(void) { } +static inline int numa_pfn_to_nid(unsigned long pfn) +{ + return 0; +} + +#endif /* CONFIG_NUMA */ +#endif /* _ASM_S390_NUMA_H */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index dd345238d9a7..53eacbd4f09b 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -17,10 +17,7 @@ #define PAGE_DEFAULT_ACC 0 #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) -#include <asm/setup.h> -#ifndef __ASSEMBLY__ - -extern int HPAGE_SHIFT; +#define HPAGE_SHIFT 20 #define HPAGE_SIZE (1UL << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) @@ -30,6 +27,9 @@ extern int HPAGE_SHIFT; #define ARCH_HAS_PREPARE_HUGEPAGE #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH +#include <asm/setup.h> +#ifndef __ASSEMBLY__ + static inline void storage_key_init_range(unsigned long start, unsigned long end) { #if PAGE_DEFAULT_KEY diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index a648338c434a..34d960353a08 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -170,7 +170,11 @@ static inline void zpci_exit_slot(struct zpci_dev *zdev) {} #endif /* CONFIG_HOTPLUG_PCI_S390 */ /* Helpers */ -struct zpci_dev *get_zdev(struct pci_dev *); +static inline struct zpci_dev *to_zpci(struct pci_dev *pdev) +{ + return pdev->sysdata; +} + struct zpci_dev *get_zdev_by_fid(u32); /* DMA */ @@ -188,4 +192,20 @@ void zpci_debug_init_device(struct zpci_dev *); void zpci_debug_exit_device(struct zpci_dev *); void zpci_debug_info(struct zpci_dev *, struct seq_file *); +#ifdef CONFIG_NUMA + +/* Returns the node based on PCI bus */ +static inline int __pcibus_to_node(const struct pci_bus *bus) +{ + return NUMA_NO_NODE; +} + +static inline const struct cpumask * +cpumask_of_pcibus(const struct pci_bus *bus) +{ + return cpu_online_mask; +} + +#endif /* CONFIG_NUMA */ + #endif diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 4cb19fe76dd9..f897ec73dc8c 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -87,7 +87,15 @@ struct sf_raw_sample { } __packed; /* Perf hardware reserve and release functions */ +#ifdef CONFIG_PERF_EVENTS int perf_reserve_sampling(void); void perf_release_sampling(void); +#else /* CONFIG_PERF_EVENTS */ +static inline int perf_reserve_sampling(void) +{ + return 0; +} +static inline void perf_release_sampling(void) {} +#endif /* CONFIG_PERF_EVENTS */ #endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index f66d82798a6a..bdb2f51124ed 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -576,6 +576,19 @@ static inline int pte_same(pte_t a, pte_t b) return pte_val(a) == pte_val(b); } +#ifdef CONFIG_NUMA_BALANCING +static inline int pte_protnone(pte_t pte) +{ + return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); +} + +static inline int pmd_protnone(pmd_t pmd) +{ + /* pmd_large(pmd) implies pmd_present(pmd) */ + return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); +} +#endif + static inline pgste_t pgste_get_lock(pte_t *ptep) { unsigned long new = 0; diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index dedb6218544b..085fb0d3c54e 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -14,10 +14,12 @@ #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ #define CIF_ASCE 1 /* user asce needs fixup / uaccess */ #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ +#define CIF_FPU 3 /* restore vector registers */ #define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) #define _CIF_ASCE (1<<CIF_ASCE) #define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) +#define _CIF_FPU (1<<CIF_FPU) #ifndef __ASSEMBLY__ @@ -28,6 +30,7 @@ #include <asm/ptrace.h> #include <asm/setup.h> #include <asm/runtime_instr.h> +#include <asm/fpu-internal.h> static inline void set_cpu_flag(int flag) { @@ -85,7 +88,7 @@ typedef struct { * Thread structure */ struct thread_struct { - s390_fp_regs fp_regs; + struct fpu fpu; /* FP and VX register save area */ unsigned int acrs[NUM_ACRS]; unsigned long ksp; /* kernel stack pointer */ mm_segment_t mm_segment; @@ -101,7 +104,6 @@ struct thread_struct { struct runtime_instr_cb *ri_cb; int ri_signum; unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ - __vector128 *vxrs; /* Vector register save area */ }; /* Flag to disable transactions. */ @@ -231,6 +233,17 @@ static inline void __load_psw_mask (unsigned long mask) } /* + * Extract current PSW mask + */ +static inline unsigned long __extract_psw(void) +{ + unsigned int reg1, reg2; + + asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2)); + return (((unsigned long) reg1) << 32) | ((unsigned long) reg2); +} + +/* * Rewind PSW instruction address by specified number of bytes. */ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) @@ -336,25 +349,6 @@ extern void memcpy_absolute(void *, void *, size_t); memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ } -/* - * Helper macro for exception table entries - */ -#define EX_TABLE(_fault, _target) \ - ".section __ex_table,\"a\"\n" \ - ".align 4\n" \ - ".long (" #_fault ") - .\n" \ - ".long (" #_target ") - .\n" \ - ".previous\n" - -#else /* __ASSEMBLY__ */ - -#define EX_TABLE(_fault, _target) \ - .section __ex_table,"a" ; \ - .align 4 ; \ - .long (_fault) - . ; \ - .long (_target) - . ; \ - .previous - #endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_PROCESSOR_H */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index f6ff06077631..821dde5f425d 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -79,6 +79,6 @@ int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); void sclp_early_detect(void); -long _sclp_print_early(const char *); +int _sclp_print_early(const char *); #endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index d62e7a69605f..dcadfde32265 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -8,139 +8,12 @@ #define __ASM_SWITCH_TO_H #include <linux/thread_info.h> +#include <asm/fpu-internal.h> #include <asm/ptrace.h> extern struct task_struct *__switch_to(void *, void *); extern void update_cr_regs(struct task_struct *task); -static inline int test_fp_ctl(u32 fpc) -{ - u32 orig_fpc; - int rc; - - asm volatile( - " efpc %1\n" - " sfpc %2\n" - "0: sfpc %1\n" - " la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (rc), "=d" (orig_fpc) - : "d" (fpc), "0" (-EINVAL)); - return rc; -} - -static inline void save_fp_ctl(u32 *fpc) -{ - asm volatile( - " stfpc %0\n" - : "+Q" (*fpc)); -} - -static inline int restore_fp_ctl(u32 *fpc) -{ - int rc; - - asm volatile( - " lfpc %1\n" - "0: la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); - return rc; -} - -static inline void save_fp_regs(freg_t *fprs) -{ - asm volatile("std 0,%0" : "=Q" (fprs[0])); - asm volatile("std 2,%0" : "=Q" (fprs[2])); - asm volatile("std 4,%0" : "=Q" (fprs[4])); - asm volatile("std 6,%0" : "=Q" (fprs[6])); - asm volatile("std 1,%0" : "=Q" (fprs[1])); - asm volatile("std 3,%0" : "=Q" (fprs[3])); - asm volatile("std 5,%0" : "=Q" (fprs[5])); - asm volatile("std 7,%0" : "=Q" (fprs[7])); - asm volatile("std 8,%0" : "=Q" (fprs[8])); - asm volatile("std 9,%0" : "=Q" (fprs[9])); - asm volatile("std 10,%0" : "=Q" (fprs[10])); - asm volatile("std 11,%0" : "=Q" (fprs[11])); - asm volatile("std 12,%0" : "=Q" (fprs[12])); - asm volatile("std 13,%0" : "=Q" (fprs[13])); - asm volatile("std 14,%0" : "=Q" (fprs[14])); - asm volatile("std 15,%0" : "=Q" (fprs[15])); -} - -static inline void restore_fp_regs(freg_t *fprs) -{ - asm volatile("ld 0,%0" : : "Q" (fprs[0])); - asm volatile("ld 2,%0" : : "Q" (fprs[2])); - asm volatile("ld 4,%0" : : "Q" (fprs[4])); - asm volatile("ld 6,%0" : : "Q" (fprs[6])); - asm volatile("ld 1,%0" : : "Q" (fprs[1])); - asm volatile("ld 3,%0" : : "Q" (fprs[3])); - asm volatile("ld 5,%0" : : "Q" (fprs[5])); - asm volatile("ld 7,%0" : : "Q" (fprs[7])); - asm volatile("ld 8,%0" : : "Q" (fprs[8])); - asm volatile("ld 9,%0" : : "Q" (fprs[9])); - asm volatile("ld 10,%0" : : "Q" (fprs[10])); - asm volatile("ld 11,%0" : : "Q" (fprs[11])); - asm volatile("ld 12,%0" : : "Q" (fprs[12])); - asm volatile("ld 13,%0" : : "Q" (fprs[13])); - asm volatile("ld 14,%0" : : "Q" (fprs[14])); - asm volatile("ld 15,%0" : : "Q" (fprs[15])); -} - -static inline void save_vx_regs(__vector128 *vxrs) -{ - typedef struct { __vector128 _[__NUM_VXRS]; } addrtype; - - asm volatile( - " la 1,%0\n" - " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */ - " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */ - : "=Q" (*(addrtype *) vxrs) : : "1"); -} - -static inline void save_vx_regs_safe(__vector128 *vxrs) -{ - unsigned long cr0, flags; - - flags = arch_local_irq_save(); - __ctl_store(cr0, 0, 0); - __ctl_set_bit(0, 17); - __ctl_set_bit(0, 18); - save_vx_regs(vxrs); - __ctl_load(cr0, 0, 0); - arch_local_irq_restore(flags); -} - -static inline void restore_vx_regs(__vector128 *vxrs) -{ - typedef struct { __vector128 _[__NUM_VXRS]; } addrtype; - - asm volatile( - " la 1,%0\n" - " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */ - " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */ - : : "Q" (*(addrtype *) vxrs) : "1"); -} - -static inline void save_fp_vx_regs(struct task_struct *task) -{ - if (task->thread.vxrs) - save_vx_regs(task->thread.vxrs); - else - save_fp_regs(task->thread.fp_regs.fprs); -} - -static inline void restore_fp_vx_regs(struct task_struct *task) -{ - if (task->thread.vxrs) - restore_vx_regs(task->thread.vxrs); - else - restore_fp_regs(task->thread.fp_regs.fprs); -} - static inline void save_access_regs(unsigned int *acrs) { typedef struct { int _[NUM_ACRS]; } acrstype; @@ -157,15 +30,13 @@ static inline void restore_access_regs(unsigned int *acrs) #define switch_to(prev,next,last) do { \ if (prev->mm) { \ - save_fp_ctl(&prev->thread.fp_regs.fpc); \ - save_fp_vx_regs(prev); \ + save_fpu_regs(); \ save_access_regs(&prev->thread.acrs[0]); \ save_ri_cb(prev->thread.ri_cb); \ } \ if (next->mm) { \ update_cr_regs(next); \ - restore_fp_ctl(&next->thread.fp_regs.fpc); \ - restore_fp_vx_regs(next); \ + set_cpu_flag(CIF_FPU); \ restore_access_regs(&next->thread.acrs[0]); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ } \ diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 4990f6c66288..27ebde643933 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -2,6 +2,7 @@ #define _ASM_S390_TOPOLOGY_H #include <linux/cpumask.h> +#include <asm/numa.h> struct sysinfo_15_1_x; struct cpu; @@ -13,6 +14,7 @@ struct cpu_topology_s390 { unsigned short core_id; unsigned short socket_id; unsigned short book_id; + unsigned short node_id; cpumask_t thread_mask; cpumask_t core_mask; cpumask_t book_mask; @@ -52,6 +54,43 @@ static inline void topology_expect_change(void) { } #define POLARIZATION_VM (2) #define POLARIZATION_VH (3) +#define SD_BOOK_INIT SD_CPU_INIT + +#ifdef CONFIG_NUMA + +#define cpu_to_node cpu_to_node +static inline int cpu_to_node(int cpu) +{ + return per_cpu(cpu_topology, cpu).node_id; +} + +/* Returns a pointer to the cpumask of CPUs on node 'node'. */ +#define cpumask_of_node cpumask_of_node +static inline const struct cpumask *cpumask_of_node(int node) +{ + return node_to_cpumask_map[node]; +} + +/* + * Returns the number of the node containing node 'node'. This + * architecture is flat, so it is a pretty simple function! + */ +#define parent_node(node) (node) + +#define pcibus_to_node(bus) __pcibus_to_node(bus) + +#define node_distance(a, b) __node_distance(a, b) + +#else /* !CONFIG_NUMA */ + +#define numa_node_id numa_node_id +static inline int numa_node_id(void) +{ + return 0; +} + +#endif /* CONFIG_NUMA */ + #include <asm-generic/topology.h> #endif /* _ASM_S390_TOPOLOGY_H */ diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 91f56b1d8156..525cef73b085 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -11,16 +11,24 @@ #define __IGNORE_time -/* Ignore NUMA system calls. Not wired up on s390. */ -#define __IGNORE_mbind -#define __IGNORE_get_mempolicy -#define __IGNORE_set_mempolicy -#define __IGNORE_migrate_pages -#define __IGNORE_move_pages - -/* Ignore system calls that are also reachable via sys_socket */ +/* Ignore system calls that are also reachable via sys_socketcall */ #define __IGNORE_recvmmsg #define __IGNORE_sendmmsg +#define __IGNORE_socket +#define __IGNORE_socketpair +#define __IGNORE_bind +#define __IGNORE_connect +#define __IGNORE_listen +#define __IGNORE_accept4 +#define __IGNORE_getsockopt +#define __IGNORE_setsockopt +#define __IGNORE_getsockname +#define __IGNORE_getpeername +#define __IGNORE_sendto +#define __IGNORE_sendmsg +#define __IGNORE_recvfrom +#define __IGNORE_recvmsg +#define __IGNORE_shutdown #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h new file mode 100644 index 000000000000..4a3135620f5e --- /dev/null +++ b/arch/s390/include/asm/vx-insn.h @@ -0,0 +1,480 @@ +/* + * Support for Vector Instructions + * + * Assembler macros to generate .byte/.word code for particular + * vector instructions that are supported by recent binutils (>= 2.26) only. + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ + +#ifndef __ASM_S390_VX_INSN_H +#define __ASM_S390_VX_INSN_H + +#ifdef __ASSEMBLY__ + + +/* Macros to generate vector instruction byte code */ + +#define REG_NUM_INVALID 255 + +/* GR_NUM - Retrieve general-purpose register number + * + * @opd: Operand to store register number + * @r64: String designation register in the format "%rN" + */ +.macro GR_NUM opd gr + \opd = REG_NUM_INVALID + .ifc \gr,%r0 + \opd = 0 + .endif + .ifc \gr,%r1 + \opd = 1 + .endif + .ifc \gr,%r2 + \opd = 2 + .endif + .ifc \gr,%r3 + \opd = 3 + .endif + .ifc \gr,%r4 + \opd = 4 + .endif + .ifc \gr,%r5 + \opd = 5 + .endif + .ifc \gr,%r6 + \opd = 6 + .endif + .ifc \gr,%r7 + \opd = 7 + .endif + .ifc \gr,%r8 + \opd = 8 + .endif + .ifc \gr,%r9 + \opd = 9 + .endif + .ifc \gr,%r10 + \opd = 10 + .endif + .ifc \gr,%r11 + \opd = 11 + .endif + .ifc \gr,%r12 + \opd = 12 + .endif + .ifc \gr,%r13 + \opd = 13 + .endif + .ifc \gr,%r14 + \opd = 14 + .endif + .ifc \gr,%r15 + \opd = 15 + .endif + .if \opd == REG_NUM_INVALID + .error "Invalid general-purpose register designation: \gr" + .endif +.endm + +/* VX_R() - Macro to encode the VX_NUM into the instruction */ +#define VX_R(v) (v & 0x0F) + +/* VX_NUM - Retrieve vector register number + * + * @opd: Operand to store register number + * @vxr: String designation register in the format "%vN" + * + * The vector register number is used for as input number to the + * instruction and, as well as, to compute the RXB field of the + * instruction. To encode the particular vector register number, + * use the VX_R(v) macro to extract the instruction opcode. + */ +.macro VX_NUM opd vxr + \opd = REG_NUM_INVALID + .ifc \vxr,%v0 + \opd = 0 + .endif + .ifc \vxr,%v1 + \opd = 1 + .endif + .ifc \vxr,%v2 + \opd = 2 + .endif + .ifc \vxr,%v3 + \opd = 3 + .endif + .ifc \vxr,%v4 + \opd = 4 + .endif + .ifc \vxr,%v5 + \opd = 5 + .endif + .ifc \vxr,%v6 + \opd = 6 + .endif + .ifc \vxr,%v7 + \opd = 7 + .endif + .ifc \vxr,%v8 + \opd = 8 + .endif + .ifc \vxr,%v9 + \opd = 9 + .endif + .ifc \vxr,%v10 + \opd = 10 + .endif + .ifc \vxr,%v11 + \opd = 11 + .endif + .ifc \vxr,%v12 + \opd = 12 + .endif + .ifc \vxr,%v13 + \opd = 13 + .endif + .ifc \vxr,%v14 + \opd = 14 + .endif + .ifc \vxr,%v15 + \opd = 15 + .endif + .ifc \vxr,%v16 + \opd = 16 + .endif + .ifc \vxr,%v17 + \opd = 17 + .endif + .ifc \vxr,%v18 + \opd = 18 + .endif + .ifc \vxr,%v19 + \opd = 19 + .endif + .ifc \vxr,%v20 + \opd = 20 + .endif + .ifc \vxr,%v21 + \opd = 21 + .endif + .ifc \vxr,%v22 + \opd = 22 + .endif + .ifc \vxr,%v23 + \opd = 23 + .endif + .ifc \vxr,%v24 + \opd = 24 + .endif + .ifc \vxr,%v25 + \opd = 25 + .endif + .ifc \vxr,%v26 + \opd = 26 + .endif + .ifc \vxr,%v27 + \opd = 27 + .endif + .ifc \vxr,%v28 + \opd = 28 + .endif + .ifc \vxr,%v29 + \opd = 29 + .endif + .ifc \vxr,%v30 + \opd = 30 + .endif + .ifc \vxr,%v31 + \opd = 31 + .endif + .if \opd == REG_NUM_INVALID + .error "Invalid vector register designation: \vxr" + .endif +.endm + +/* RXB - Compute most significant bit used vector registers + * + * @rxb: Operand to store computed RXB value + * @v1: First vector register designated operand + * @v2: Second vector register designated operand + * @v3: Third vector register designated operand + * @v4: Fourth vector register designated operand + */ +.macro RXB rxb v1 v2=0 v3=0 v4=0 + \rxb = 0 + .if \v1 & 0x10 + \rxb = \rxb | 0x08 + .endif + .if \v2 & 0x10 + \rxb = \rxb | 0x04 + .endif + .if \v3 & 0x10 + \rxb = \rxb | 0x02 + .endif + .if \v4 & 0x10 + \rxb = \rxb | 0x01 + .endif +.endm + +/* MRXB - Generate Element Size Control and RXB value + * + * @m: Element size control + * @v1: First vector register designated operand (for RXB) + * @v2: Second vector register designated operand (for RXB) + * @v3: Third vector register designated operand (for RXB) + * @v4: Fourth vector register designated operand (for RXB) + */ +.macro MRXB m v1 v2=0 v3=0 v4=0 + rxb = 0 + RXB rxb, \v1, \v2, \v3, \v4 + .byte (\m << 4) | rxb +.endm + +/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields + * + * @m: Element size control + * @opc: Opcode + * @v1: First vector register designated operand (for RXB) + * @v2: Second vector register designated operand (for RXB) + * @v3: Third vector register designated operand (for RXB) + * @v4: Fourth vector register designated operand (for RXB) + */ +.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0 + MRXB \m, \v1, \v2, \v3, \v4 + .byte \opc +.endm + +/* Vector support instructions */ + +/* VECTOR GENERATE BYTE MASK */ +.macro VGBM vr imm2 + VX_NUM v1, \vr + .word (0xE700 | (VX_R(v1) << 4)) + .word \imm2 + MRXBOPC 0, 0x44, v1 +.endm +.macro VZERO vxr + VGBM \vxr, 0 +.endm +.macro VONE vxr + VGBM \vxr, 0xFFFF +.endm + +/* VECTOR LOAD VR ELEMENT FROM GR */ +.macro VLVG v, gr, disp, m + VX_NUM v1, \v + GR_NUM b2, "%r0" + GR_NUM r3, \gr + .word 0xE700 | (VX_R(v1) << 4) | r3 + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x22, v1 +.endm +.macro VLVGB v, gr, index, base + VLVG \v, \gr, \index, \base, 0 +.endm +.macro VLVGH v, gr, index + VLVG \v, \gr, \index, 1 +.endm +.macro VLVGF v, gr, index + VLVG \v, \gr, \index, 2 +.endm +.macro VLVGG v, gr, index + VLVG \v, \gr, \index, 3 +.endm + +/* VECTOR LOAD */ +.macro VL v, disp, index="%r0", base + VX_NUM v1, \v + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | (VX_R(v1) << 4) | x2 + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x06, v1 +.endm + +/* VECTOR LOAD ELEMENT */ +.macro VLEx vr1, disp, index="%r0", base, m3, opc + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE700 | (VX_R(v1) << 4) | x2 + .word (b2 << 12) | (\disp) + MRXBOPC \m3, \opc, v1 +.endm +.macro VLEB vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x00 +.endm +.macro VLEH vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x01 +.endm +.macro VLEF vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x03 +.endm +.macro VLEG vr1, disp, index="%r0", base, m3 + VLEx \vr1, \disp, \index, \base, \m3, 0x02 +.endm + +/* VECTOR LOAD ELEMENT IMMEDIATE */ +.macro VLEIx vr1, imm2, m3, opc + VX_NUM v1, \vr1 + .word 0xE700 | (VX_R(v1) << 4) + .word \imm2 + MRXBOPC \m3, \opc, v1 +.endm +.macro VLEIB vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x40 +.endm +.macro VLEIH vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x41 +.endm +.macro VLEIF vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x43 +.endm +.macro VLEIG vr1, imm2, index + VLEIx \vr1, \imm2, \index, 0x42 +.endm + +/* VECTOR LOAD GR FROM VR ELEMENT */ +.macro VLGV gr, vr, disp, base="%r0", m + GR_NUM r1, \gr + GR_NUM b2, \base + VX_NUM v3, \vr + .word 0xE700 | (r1 << 4) | VX_R(v3) + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x21, v3 +.endm +.macro VLGVB gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 0 +.endm +.macro VLGVH gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 1 +.endm +.macro VLGVF gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 2 +.endm +.macro VLGVG gr, vr, disp, base="%r0" + VLGV \gr, \vr, \disp, \base, 3 +.endm + +/* VECTOR LOAD MULTIPLE */ +.macro VLM vfrom, vto, disp, base + VX_NUM v1, \vfrom + VX_NUM v3, \vto + GR_NUM b2, \base /* Base register */ + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3) + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x36, v1, v3 +.endm + +/* VECTOR STORE MULTIPLE */ +.macro VSTM vfrom, vto, disp, base + VX_NUM v1, \vfrom + VX_NUM v3, \vto + GR_NUM b2, \base /* Base register */ + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3) + .word (b2 << 12) | (\disp) + MRXBOPC 0, 0x3E, v1, v3 +.endm + +/* VECTOR PERMUTE */ +.macro VPERM vr1, vr2, vr3, vr4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + VX_NUM v4, \vr4 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) + MRXBOPC VX_R(v4), 0x8C, v1, v2, v3, v4 +.endm + +/* VECTOR UNPACK LOGICAL LOW */ +.macro VUPLL vr1, vr2, m3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word 0x0000 + MRXBOPC \m3, 0xD4, v1, v2 +.endm +.macro VUPLLB vr1, vr2 + VUPLL \vr1, \vr2, 0 +.endm +.macro VUPLLH vr1, vr2 + VUPLL \vr1, \vr2, 1 +.endm +.macro VUPLLF vr1, vr2 + VUPLL \vr1, \vr2, 2 +.endm + + +/* Vector integer instructions */ + +/* VECTOR EXCLUSIVE OR */ +.macro VX vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) + MRXBOPC 0, 0x6D, v1, v2, v3 +.endm + +/* VECTOR GALOIS FIELD MULTIPLY SUM */ +.macro VGFM vr1, vr2, vr3, m4 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) + MRXBOPC \m4, 0xB4, v1, v2, v3 +.endm +.macro VGFMB vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 0 +.endm +.macro VGFMH vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 1 +.endm +.macro VGFMF vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 2 +.endm +.macro VGFMG vr1, vr2, vr3 + VGFM \vr1, \vr2, \vr3, 3 +.endm + +/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ +.macro VGFMA vr1, vr2, vr3, vr4, m5 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + VX_NUM v4, \vr4 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) | (\m5 << 8) + MRXBOPC VX_R(v4), 0xBC, v1, v2, v3, v4 +.endm +.macro VGFMAB vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 0 +.endm +.macro VGFMAH vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 1 +.endm +.macro VGFMAF vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 2 +.endm +.macro VGFMAG vr1, vr2, vr3, vr4 + VGFMA \vr1, \vr2, \vr3, \vr4, 3 +.endm + +/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ +.macro VSRLB vr1, vr2, vr3 + VX_NUM v1, \vr1 + VX_NUM v2, \vr2 + VX_NUM v3, \vr3 + .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) + .word (VX_R(v3) << 12) + MRXBOPC 0, 0x7D, v1, v2, v3 +.endm + + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_S390_VX_INSN_H */ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 67878af257a0..59d2bb4e2d0c 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -204,9 +204,9 @@ #define __NR_statfs64 265 #define __NR_fstatfs64 266 #define __NR_remap_file_pages 267 -/* Number 268 is reserved for new sys_mbind */ -/* Number 269 is reserved for new sys_get_mempolicy */ -/* Number 270 is reserved for new sys_set_mempolicy */ +#define __NR_mbind 268 +#define __NR_get_mempolicy 269 +#define __NR_set_mempolicy 270 #define __NR_mq_open 271 #define __NR_mq_unlink 272 #define __NR_mq_timedsend 273 @@ -223,7 +223,7 @@ #define __NR_inotify_init 284 #define __NR_inotify_add_watch 285 #define __NR_inotify_rm_watch 286 -/* Number 287 is reserved for new sys_migrate_pages */ +#define __NR_migrate_pages 287 #define __NR_openat 288 #define __NR_mkdirat 289 #define __NR_mknodat 290 @@ -245,7 +245,7 @@ #define __NR_sync_file_range 307 #define __NR_tee 308 #define __NR_vmsplice 309 -/* Number 310 is reserved for new sys_move_pages */ +#define __NR_move_pages 310 #define __NR_getcpu 311 #define __NR_epoll_pwait 312 #define __NR_utimes 313 diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index ffb87617a36c..b756c6348ac6 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -28,6 +28,17 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' CFLAGS_sysinfo.o += -w +# +# Use -march=z900 for sclp.c to be able to print an error message if +# the kernel is started on a machine which is too old +# +CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE) +ifneq ($(CC_FLAGS_MARCH),-march=z900) +CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH) +CFLAGS_sclp.o += -march=z900 +endif +GCOV_PROFILE_sclp.o := n + obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index c7d1b9d09011..48c9af7a7683 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -23,15 +23,18 @@ int main(void) { - DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); - DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); - DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); - BLANK(); + DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack)); + DEFINE(__TASK_thread, offsetof(struct task_struct, thread)); DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); BLANK(); - DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); - DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); - DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); + DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); + DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc)); + DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags)); + DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs)); + DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); + DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); + DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); + DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); BLANK(); DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_flags, offsetof(struct thread_info, flags)); @@ -176,7 +179,6 @@ int main(void) DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); - DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb)); DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index bff5e3b6d822..8ba32436effe 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu) union cache_topology ct; enum cache_type ctype; + if (!test_facility(34)) + return -EOPNOTSUPP; if (!this_cpu_ci) return -EINVAL; ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index fe8d6924efaa..eb4664238613 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -153,33 +153,14 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) /* Store registers needed to create the signal frame */ static void store_sigregs(void) { - int i; - save_access_regs(current->thread.acrs); - save_fp_ctl(¤t->thread.fp_regs.fpc); - if (current->thread.vxrs) { - save_vx_regs(current->thread.vxrs); - for (i = 0; i < __NUM_FPRS; i++) - current->thread.fp_regs.fprs[i] = - *(freg_t *)(current->thread.vxrs + i); - } else - save_fp_regs(current->thread.fp_regs.fprs); + save_fpu_regs(); } /* Load registers after signal return */ static void load_sigregs(void) { - int i; - restore_access_regs(current->thread.acrs); - /* restore_fp_ctl is done in restore_sigregs */ - if (current->thread.vxrs) { - for (i = 0; i < __NUM_FPRS; i++) - *(freg_t *)(current->thread.vxrs + i) = - current->thread.fp_regs.fprs[i]; - restore_vx_regs(current->thread.vxrs); - } else - restore_fp_regs(current->thread.fp_regs.fprs); } static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) @@ -196,8 +177,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; memcpy(&user_sregs.regs.acrs, current->thread.acrs, sizeof(user_sregs.regs.acrs)); - memcpy(&user_sregs.fpregs, ¤t->thread.fp_regs, - sizeof(user_sregs.fpregs)); + fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) return -EFAULT; return 0; @@ -217,8 +197,8 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI)) return -EINVAL; - /* Loading the floating-point-control word can fail. Do that first. */ - if (restore_fp_ctl(&user_sregs.fpregs.fpc)) + /* Test the floating-point-control word. */ + if (test_fp_ctl(user_sregs.fpregs.fpc)) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ @@ -235,9 +215,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); - - memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, - sizeof(current->thread.fp_regs)); + fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; @@ -258,13 +236,13 @@ static int save_sigregs_ext32(struct pt_regs *regs, return -EFAULT; /* Save vector registers to signal stack */ - if (current->thread.vxrs) { + if (is_vx_task(current)) { for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1); + vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, sizeof(sregs_ext->vxrs_low)) || __copy_to_user(&sregs_ext->vxrs_high, - current->thread.vxrs + __NUM_VXRS_LOW, + current->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } @@ -286,15 +264,15 @@ static int restore_sigregs_ext32(struct pt_regs *regs, *(__u32 *)®s->gprs[i] = gprs_high[i]; /* Restore vector registers from signal stack */ - if (current->thread.vxrs) { + if (is_vx_task(current)) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || - __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW, + __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, &sregs_ext->vxrs_high, sizeof(sregs_ext->vxrs_high))) return -EFAULT; for (i = 0; i < __NUM_VXRS_LOW; i++) - *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; + *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i]; } return 0; } @@ -308,6 +286,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) goto badframe; set_current_blocked(&set); + save_fpu_regs(); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext32(regs, &frame->sregs_ext)) @@ -330,6 +309,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; + save_fpu_regs(); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) @@ -472,7 +452,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, */ uc_flags = UC_GPRS_HIGH; if (MACHINE_HAS_VX) { - if (current->thread.vxrs) + if (is_vx_task(current)) uc_flags |= UC_VXRS; } else frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) + diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 3238893c9d4f..247b7aae4c6d 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -20,6 +20,8 @@ #include <asm/page.h> #include <asm/sigp.h> #include <asm/irq.h> +#include <asm/fpu-internal.h> +#include <asm/vx-insn.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -46,10 +48,10 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_UPROBE) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) -_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) +_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU) _PIF_WORK = (_PIF_PER_TRAP) -#define BASED(name) name-system_call(%r13) +#define BASED(name) name-cleanup_critical(%r13) .macro TRACE_IRQS_ON #ifdef CONFIG_TRACE_IRQFLAGS @@ -73,38 +75,6 @@ _PIF_WORK = (_PIF_PER_TRAP) #endif .endm - .macro LPP newpp -#if IS_ENABLED(CONFIG_KVM) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP - jz .+8 - .insn s,0xb2800000,\newpp -#endif - .endm - - .macro HANDLE_SIE_INTERCEPT scratch,reason -#if IS_ENABLED(CONFIG_KVM) - tmhh %r8,0x0001 # interrupting from user ? - jnz .+62 - lgr \scratch,%r9 - slg \scratch,BASED(.Lsie_critical) - clg \scratch,BASED(.Lsie_critical_length) - .if \reason==1 - # Some program interrupts are suppressing (e.g. protection). - # We must also check the instruction after SIE in that case. - # do_protection_exception will rewind to .Lrewind_pad - jh .+42 - .else - jhe .+42 - .endif - lg %r14,__SF_EMPTY(%r15) # get control block pointer - LPP __SF_EMPTY+16(%r15) # set host id - ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - larl %r9,sie_exit # skip forward to sie_exit - mvi __SF_EMPTY+31(%r15),\reason # set exit reason -#endif - .endm - .macro CHECK_STACK stacksize,savearea #ifdef CONFIG_CHECK_STACK tml %r15,\stacksize - CONFIG_STACK_GUARD @@ -113,7 +83,7 @@ _PIF_WORK = (_PIF_PER_TRAP) #endif .endm - .macro SWITCH_ASYNC savearea,stack,shift + .macro SWITCH_ASYNC savearea,timer tmhh %r8,0x0001 # interrupting from user ? jnz 1f lgr %r14,%r9 @@ -124,26 +94,28 @@ _PIF_WORK = (_PIF_PER_TRAP) brasl %r14,cleanup_critical tmhh %r8,0x0001 # retest problem state after cleanup jnz 1f -0: lg %r14,\stack # are we already on the target stack? +0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack? slgr %r14,%r15 - srag %r14,%r14,\shift - jnz 1f - CHECK_STACK 1<<\shift,\savearea + srag %r14,%r14,STACK_SHIFT + jnz 2f + CHECK_STACK 1<<STACK_SHIFT,\savearea aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 2f -1: lg %r15,\stack # load target stack -2: la %r11,STACK_FRAME_OVERHEAD(%r15) + j 3f +1: LAST_BREAK %r14 + UPDATE_VTIME %r14,%r15,\timer +2: lg %r15,__LC_ASYNC_STACK # load async stack +3: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm - .macro UPDATE_VTIME scratch,enter_timer - lg \scratch,__LC_EXIT_TIMER - slg \scratch,\enter_timer - alg \scratch,__LC_USER_TIMER - stg \scratch,__LC_USER_TIMER - lg \scratch,__LC_LAST_UPDATE_TIMER - slg \scratch,__LC_EXIT_TIMER - alg \scratch,__LC_SYSTEM_TIMER - stg \scratch,__LC_SYSTEM_TIMER + .macro UPDATE_VTIME w1,w2,enter_timer + lg \w1,__LC_EXIT_TIMER + lg \w2,__LC_LAST_UPDATE_TIMER + slg \w1,\enter_timer + slg \w2,__LC_EXIT_TIMER + alg \w1,__LC_USER_TIMER + alg \w2,__LC_SYSTEM_TIMER + stg \w1,__LC_USER_TIMER + stg \w2,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer .endm @@ -178,21 +150,88 @@ _PIF_WORK = (_PIF_PER_TRAP) */ ENTRY(__switch_to) stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev - lg %r4,__THREAD_info(%r2) # get thread_info of prev - lg %r5,__THREAD_info(%r3) # get thread_info of next + lgr %r1,%r2 + aghi %r1,__TASK_thread # thread_struct of prev task + lg %r4,__TASK_thread_info(%r2) # get thread_info of prev + lg %r5,__TASK_thread_info(%r3) # get thread_info of next + stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev + lgr %r1,%r3 + aghi %r1,__TASK_thread # thread_struct of next task lgr %r15,%r5 aghi %r15,STACK_INIT # end of kernel stack of next stg %r3,__LC_CURRENT # store task struct of next stg %r5,__LC_THREAD_INFO # store thread info of next stg %r15,__LC_KERNEL_STACK # store end of kernel stack + lg %r15,__THREAD_ksp(%r1) # load kernel stack of next lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next - lg %r15,__THREAD_ksp(%r3) # load kernel stack of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 .L__critical_start: + +#if IS_ENABLED(CONFIG_KVM) +/* + * sie64a calling convention: + * %r2 pointer to sie control block + * %r3 guest register save area + */ +ENTRY(sie64a) + stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + stg %r2,__SF_EMPTY(%r15) # save control block pointer + stg %r3,__SF_EMPTY+8(%r15) # save guest register save area + xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason + tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? + jno .Lsie_load_guest_gprs + brasl %r14,load_fpu_regs # load guest fp/vx regs +.Lsie_load_guest_gprs: + lmg %r0,%r13,0(%r3) # load guest gprs 0-13 + lg %r14,__LC_GMAP # get gmap pointer + ltgr %r14,%r14 + jz .Lsie_gmap + lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce +.Lsie_gmap: + lg %r14,__SF_EMPTY(%r15) # get control block pointer + oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now + tm __SIE_PROG20+3(%r14),3 # last exit... + jnz .Lsie_skip + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lsie_skip # exit if fp/vx regs changed + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz .Lsie_enter + .insn s,0xb2800000,__LC_CURRENT_PID # set guest id to pid +.Lsie_enter: + sie 0(%r14) + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz .Lsie_skip + .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id +.Lsie_skip: + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce +.Lsie_done: +# some program checks are suppressing. C code (e.g. do_protection_exception) +# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other +# instructions between sie64a and .Lsie_done should not cause program +# interrupts. So lets use a nop (47 00 00 00) as a landing pad. +# See also .Lcleanup_sie +.Lrewind_pad: + nop 0 + .globl sie_exit +sie_exit: + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area + stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers + lg %r2,__SF_EMPTY+24(%r15) # return exit reason code + br %r14 +.Lsie_fault: + lghi %r14,-EFAULT + stg %r14,__SF_EMPTY+24(%r15) # set exit reason code + j sie_exit + + EX_TABLE(.Lrewind_pad,.Lsie_fault) + EX_TABLE(sie_exit,.Lsie_fault) +#endif + /* * SVC interrupt handler routine. System calls are synchronous events and * are executed with interrupts enabled. @@ -208,9 +247,9 @@ ENTRY(system_call) .Lsysc_per: lg %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs -.Lsysc_vtime: - UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER LAST_BREAK %r13 +.Lsysc_vtime: + UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW @@ -240,8 +279,6 @@ ENTRY(system_call) .Lsysc_return: LOCKDEP_SYS_EXIT .Lsysc_tif: - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno .Lsysc_restore tm __PT_FLAGS+7(%r11),_PIF_WORK jnz .Lsysc_work tm __TI_flags+7(%r12),_TIF_WORK @@ -276,6 +313,8 @@ ENTRY(system_call) jo .Lsysc_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo .Lsysc_notify_resume + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lsysc_vxrs tm __LC_CPU_FLAGS+7,_CIF_ASCE jo .Lsysc_uaccess j .Lsysc_return # beware of critical section cleanup @@ -303,6 +342,13 @@ ENTRY(system_call) j .Lsysc_return # +# CIF_FPU is set, restore floating-point controls and floating-point registers. +# +.Lsysc_vxrs: + larl %r14,.Lsysc_return + jg load_fpu_regs + +# # _TIF_SIGPENDING is set, call do_signal # .Lsysc_sigpending: @@ -401,27 +447,35 @@ ENTRY(pgm_check_handler) stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_PGM_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,1 tmhh %r8,0x0001 # test problem state bit - jnz 1f # -> fault in user space - tmhh %r8,0x4000 # PER bit set in old PSW ? - jnz 0f # -> enabled, can't be a double fault + jnz 2f # -> fault in user space +#if IS_ENABLED(CONFIG_KVM) + # cleanup critical section for sie64a + lgr %r14,%r9 + slg %r14,BASED(.Lsie_critical_start) + clg %r14,BASED(.Lsie_critical_length) + jhe 0f + brasl %r14,.Lcleanup_sie +#endif +0: tmhh %r8,0x4000 # PER bit set in old PSW ? + jnz 1f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception jnz .Lpgm_svcper # -> single stepped svc -0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC +1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 2f -1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER - LAST_BREAK %r14 + j 3f +2: LAST_BREAK %r14 + UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER lg %r15,__LC_KERNEL_STACK lg %r14,__TI_task(%r12) + aghi %r14,__TASK_thread # pointer to thread_struct lghi %r13,__LC_PGM_TDB tm __LC_PGM_ILC+2,0x02 # check for transaction abort - jz 2f + jz 3f mvc __THREAD_trap_tdb(256,%r14),0(%r13) -2: la %r11,STACK_FRAME_OVERHEAD(%r15) +3: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -430,24 +484,28 @@ ENTRY(pgm_check_handler) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception - jz 0f + jz 4f tmhh %r8,0x0001 # kernel per event ? jz .Lpgm_kprobe oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -0: REENABLE_IRQS +4: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) nill %r10,0x007f sll %r10,2 - je .Lsysc_return + je .Lpgm_return lgf %r1,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler - j .Lsysc_return +.Lpgm_return: + LOCKDEP_SYS_EXIT + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jno .Lsysc_restore + j .Lsysc_tif # # PER event in supervisor state, must be kprobes @@ -457,7 +515,7 @@ ENTRY(pgm_check_handler) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_per_trap - j .Lsysc_return + j .Lpgm_return # # single stepped system call @@ -478,15 +536,9 @@ ENTRY(io_int_handler) stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_IO_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,2 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - tmhh %r8,0x0001 # interrupting from user? - jz .Lio_skip - UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER - LAST_BREAK %r14 -.Lio_skip: + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -582,6 +634,8 @@ ENTRY(io_int_handler) jo .Lio_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo .Lio_notify_resume + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lio_vxrs tm __LC_CPU_FLAGS+7,_CIF_ASCE jo .Lio_uaccess j .Lio_return # beware of critical section cleanup @@ -604,6 +658,13 @@ ENTRY(io_int_handler) j .Lio_return # +# CIF_FPU is set, restore floating-point controls and floating-point registers. +# +.Lio_vxrs: + larl %r14,.Lio_return + jg load_fpu_regs + +# # _TIF_NEED_RESCHED is set, call schedule # .Lio_reschedule: @@ -647,15 +708,9 @@ ENTRY(ext_int_handler) stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_EXT_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,3 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - tmhh %r8,0x0001 # interrupting from user ? - jz .Lext_skip - UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER - LAST_BREAK %r14 -.Lext_skip: + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -685,6 +740,122 @@ ENTRY(psw_idle) br %r14 .Lpsw_idle_end: +/* Store floating-point controls and floating-point or vector extension + * registers instead. A critical section cleanup assures that the registers + * are stored even if interrupted for some other work. The register %r2 + * designates a struct fpu to store register contents. If the specified + * structure does not contain a register save area, the register store is + * omitted (see also comments in arch_dup_task_struct()). + * + * The CIF_FPU flag is set in any case. The CIF_FPU triggers a lazy restore + * of the register contents at system call or io return. + */ +ENTRY(save_fpu_regs) + lg %r2,__LC_CURRENT + aghi %r2,__TASK_thread + tm __LC_CPU_FLAGS+7,_CIF_FPU + bor %r14 + stfpc __THREAD_FPU_fpc(%r2) +.Lsave_fpu_regs_fpc_end: + lg %r3,__THREAD_FPU_regs(%r2) + ltgr %r3,%r3 + jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU + tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + jz .Lsave_fpu_regs_fp # no -> store FP regs +.Lsave_fpu_regs_vx_low: + VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) +.Lsave_fpu_regs_vx_high: + VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) + j .Lsave_fpu_regs_done # -> set CIF_FPU flag +.Lsave_fpu_regs_fp: + std 0,0(%r3) + std 1,8(%r3) + std 2,16(%r3) + std 3,24(%r3) + std 4,32(%r3) + std 5,40(%r3) + std 6,48(%r3) + std 7,56(%r3) + std 8,64(%r3) + std 9,72(%r3) + std 10,80(%r3) + std 11,88(%r3) + std 12,96(%r3) + std 13,104(%r3) + std 14,112(%r3) + std 15,120(%r3) +.Lsave_fpu_regs_done: + oi __LC_CPU_FLAGS+7,_CIF_FPU + br %r14 +.Lsave_fpu_regs_end: + +/* Load floating-point controls and floating-point or vector extension + * registers. A critical section cleanup assures that the register contents + * are loaded even if interrupted for some other work. Depending on the saved + * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. + * + * There are special calling conventions to fit into sysc and io return work: + * %r15: <kernel stack> + * The function requires: + * %r4 and __SF_EMPTY+32(%r15) + */ +load_fpu_regs: + lg %r4,__LC_CURRENT + aghi %r4,__TASK_thread + tm __LC_CPU_FLAGS+7,_CIF_FPU + bnor %r14 + lfpc __THREAD_FPU_fpc(%r4) + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area + jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs +.Lload_fpu_regs_vx_ctl: + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jo .Lload_fpu_regs_vx + oi __SF_EMPTY+32+5(%r15),2 # set VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +.Lload_fpu_regs_vx: + VLM %v0,%v15,0,%r4 +.Lload_fpu_regs_vx_high: + VLM %v16,%v31,256,%r4 + j .Lload_fpu_regs_done +.Lload_fpu_regs_fp_ctl: + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jz .Lload_fpu_regs_fp + ni __SF_EMPTY+32+5(%r15),253 # clear VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +.Lload_fpu_regs_fp: + ld 0,0(%r4) + ld 1,8(%r4) + ld 2,16(%r4) + ld 3,24(%r4) + ld 4,32(%r4) + ld 5,40(%r4) + ld 6,48(%r4) + ld 7,56(%r4) + ld 8,64(%r4) + ld 9,72(%r4) + ld 10,80(%r4) + ld 11,88(%r4) + ld 12,96(%r4) + ld 13,104(%r4) + ld 14,112(%r4) + ld 15,120(%r4) +.Lload_fpu_regs_done: + ni __LC_CPU_FLAGS+7,255-_CIF_FPU + br %r14 +.Lload_fpu_regs_end: + +/* Test and set the vector enablement control in CR0.46 */ +ENTRY(__ctl_set_vx) + stctg %c0,%c0,__SF_EMPTY(%r15) + tm __SF_EMPTY+5(%r15),2 + bor %r14 + oi __SF_EMPTY+5(%r15),2 + lctlg %c0,%c0,__SF_EMPTY(%r15) + br %r14 +.L__ctl_set_vx_end: + .L__critical_end: /* @@ -697,9 +868,8 @@ ENTRY(mcck_int_handler) lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_MCK_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,4 tm __LC_MCCK_CODE,0x80 # system damage? jo .Lmcck_panic # yes -> rest of mcck code invalid lghi %r14,__LC_CPU_TIMER_SAVE_AREA @@ -720,11 +890,7 @@ ENTRY(mcck_int_handler) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? jno .Lmcck_panic # no -> skip cleanup critical - SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT - tm %r8,0x0001 # interrupting from user ? - jz .Lmcck_skip - UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER - LAST_BREAK %r14 + SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER .Lmcck_skip: lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) @@ -759,12 +925,8 @@ ENTRY(mcck_int_handler) lpswe __LC_RETURN_MCCK_PSW .Lmcck_panic: - lg %r14,__LC_PANIC_STACK - slgr %r14,%r15 - srag %r14,%r14,PAGE_SHIFT - jz 0f lg %r15,__LC_PANIC_STACK -0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j .Lmcck_skip # @@ -814,20 +976,13 @@ stack_overflow: jg kernel_stack_overflow #endif - .align 8 -.Lcleanup_table: - .quad system_call - .quad .Lsysc_do_svc - .quad .Lsysc_tif - .quad .Lsysc_restore - .quad .Lsysc_done - .quad .Lio_tif - .quad .Lio_restore - .quad .Lio_done - .quad psw_idle - .quad .Lpsw_idle_end - cleanup_critical: +#if IS_ENABLED(CONFIG_KVM) + clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap + jl 0f + clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done + jl .Lcleanup_sie +#endif clg %r9,BASED(.Lcleanup_table) # system_call jl 0f clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc @@ -848,8 +1003,54 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end jl .Lcleanup_idle + clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs + jl 0f + clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end + jl .Lcleanup_save_fpu_regs + clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs + jl 0f + clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end + jl .Lcleanup_load_fpu_regs + clg %r9,BASED(.Lcleanup_table+112) # __ctl_set_vx + jl 0f + clg %r9,BASED(.Lcleanup_table+120) # .L__ctl_set_vx_end + jl .Lcleanup___ctl_set_vx 0: br %r14 + .align 8 +.Lcleanup_table: + .quad system_call + .quad .Lsysc_do_svc + .quad .Lsysc_tif + .quad .Lsysc_restore + .quad .Lsysc_done + .quad .Lio_tif + .quad .Lio_restore + .quad .Lio_done + .quad psw_idle + .quad .Lpsw_idle_end + .quad save_fpu_regs + .quad .Lsave_fpu_regs_end + .quad load_fpu_regs + .quad .Lload_fpu_regs_end + .quad __ctl_set_vx + .quad .L__ctl_set_vx_end + +#if IS_ENABLED(CONFIG_KVM) +.Lcleanup_table_sie: + .quad .Lsie_gmap + .quad .Lsie_done + +.Lcleanup_sie: + lg %r9,__SF_EMPTY(%r15) # get control block pointer + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz 0f + .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id +0: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + larl %r9,sie_exit # skip forward to sie_exit + br %r14 +#endif .Lcleanup_system_call: # check if stpt has been executed @@ -910,7 +1111,7 @@ cleanup_critical: .quad system_call .quad .Lsysc_stmg .quad .Lsysc_per - .quad .Lsysc_vtime+18 + .quad .Lsysc_vtime+36 .quad .Lsysc_vtime+42 .Lcleanup_sysc_tif: @@ -976,6 +1177,145 @@ cleanup_critical: .Lcleanup_idle_insn: .quad .Lpsw_idle_lpsw +.Lcleanup_save_fpu_regs: + tm __LC_CPU_FLAGS+7,_CIF_FPU + bor %r14 + clg %r9,BASED(.Lcleanup_save_fpu_regs_done) + jhe 5f + clg %r9,BASED(.Lcleanup_save_fpu_regs_fp) + jhe 4f + clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high) + jhe 3f + clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low) + jhe 2f + clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) + jhe 1f + lg %r2,__LC_CURRENT +0: # Store floating-point controls + stfpc __THREAD_FPU_fpc(%r2) +1: # Load register save area and check if VX is active + lg %r3,__THREAD_FPU_regs(%r2) + ltgr %r3,%r3 + jz 5f # no save area -> set CIF_FPU + tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + jz 4f # no VX -> store FP regs +2: # Store vector registers (V0-V15) + VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) +3: # Store vector registers (V16-V31) + VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) + j 5f # -> done, set CIF_FPU flag +4: # Store floating-point registers + std 0,0(%r3) + std 1,8(%r3) + std 2,16(%r3) + std 3,24(%r3) + std 4,32(%r3) + std 5,40(%r3) + std 6,48(%r3) + std 7,56(%r3) + std 8,64(%r3) + std 9,72(%r3) + std 10,80(%r3) + std 11,88(%r3) + std 12,96(%r3) + std 13,104(%r3) + std 14,112(%r3) + std 15,120(%r3) +5: # Set CIF_FPU flag + oi __LC_CPU_FLAGS+7,_CIF_FPU + lg %r9,48(%r11) # return from save_fpu_regs + br %r14 +.Lcleanup_save_fpu_fpc_end: + .quad .Lsave_fpu_regs_fpc_end +.Lcleanup_save_fpu_regs_vx_low: + .quad .Lsave_fpu_regs_vx_low +.Lcleanup_save_fpu_regs_vx_high: + .quad .Lsave_fpu_regs_vx_high +.Lcleanup_save_fpu_regs_fp: + .quad .Lsave_fpu_regs_fp +.Lcleanup_save_fpu_regs_done: + .quad .Lsave_fpu_regs_done + +.Lcleanup_load_fpu_regs: + tm __LC_CPU_FLAGS+7,_CIF_FPU + bnor %r14 + clg %r9,BASED(.Lcleanup_load_fpu_regs_done) + jhe 1f + clg %r9,BASED(.Lcleanup_load_fpu_regs_fp) + jhe 2f + clg %r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl) + jhe 3f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high) + jhe 4f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx) + jhe 5f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) + jhe 6f + lg %r4,__LC_CURRENT + lfpc __THREAD_FPU_fpc(%r4) + tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area + jz 3f # -> no VX, load FP regs +6: # Set VX-enablement control + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jo 5f + oi __SF_EMPTY+32+5(%r15),2 # set VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +5: # Load V0 ..V15 registers + VLM %v0,%v15,0,%r4 +4: # Load V16..V31 registers + VLM %v16,%v31,256,%r4 + j 1f +3: # Clear VX-enablement control for FP + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jz 2f + ni __SF_EMPTY+32+5(%r15),253 # clear VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +2: # Load floating-point registers + ld 0,0(%r4) + ld 1,8(%r4) + ld 2,16(%r4) + ld 3,24(%r4) + ld 4,32(%r4) + ld 5,40(%r4) + ld 6,48(%r4) + ld 7,56(%r4) + ld 8,64(%r4) + ld 9,72(%r4) + ld 10,80(%r4) + ld 11,88(%r4) + ld 12,96(%r4) + ld 13,104(%r4) + ld 14,112(%r4) + ld 15,120(%r4) +1: # Clear CIF_FPU bit + ni __LC_CPU_FLAGS+7,255-_CIF_FPU + lg %r9,48(%r11) # return from load_fpu_regs + br %r14 +.Lcleanup_load_fpu_regs_vx_ctl: + .quad .Lload_fpu_regs_vx_ctl +.Lcleanup_load_fpu_regs_vx: + .quad .Lload_fpu_regs_vx +.Lcleanup_load_fpu_regs_vx_high: + .quad .Lload_fpu_regs_vx_high +.Lcleanup_load_fpu_regs_fp_ctl: + .quad .Lload_fpu_regs_fp_ctl +.Lcleanup_load_fpu_regs_fp: + .quad .Lload_fpu_regs_fp +.Lcleanup_load_fpu_regs_done: + .quad .Lload_fpu_regs_done + +.Lcleanup___ctl_set_vx: + stctg %c0,%c0,__SF_EMPTY(%r15) + tm __SF_EMPTY+5(%r15),2 + bor %r14 + oi __SF_EMPTY+5(%r15),2 + lctlg %c0,%c0,__SF_EMPTY(%r15) + lg %r9,48(%r11) # return from __ctl_set_vx + br %r14 + /* * Integer constants */ @@ -984,62 +1324,11 @@ cleanup_critical: .quad .L__critical_start .Lcritical_length: .quad .L__critical_end - .L__critical_start - - #if IS_ENABLED(CONFIG_KVM) -/* - * sie64a calling convention: - * %r2 pointer to sie control block - * %r3 guest register save area - */ -ENTRY(sie64a) - stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers - stg %r2,__SF_EMPTY(%r15) # save control block pointer - stg %r3,__SF_EMPTY+8(%r15) # save guest register save area - xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason - lmg %r0,%r13,0(%r3) # load guest gprs 0-13 - lg %r14,__LC_GMAP # get gmap pointer - ltgr %r14,%r14 - jz .Lsie_gmap - lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce -.Lsie_gmap: - lg %r14,__SF_EMPTY(%r15) # get control block pointer - oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now - tm __SIE_PROG20+3(%r14),3 # last exit... - jnz .Lsie_done - LPP __SF_EMPTY(%r15) # set guest id - sie 0(%r14) -.Lsie_done: - LPP __SF_EMPTY+16(%r15) # set host id - ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce -# some program checks are suppressing. C code (e.g. do_protection_exception) -# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions between sie64a and .Lsie_done should not cause program -# interrupts. So lets use a nop (47 00 00 00) as a landing pad. -# See also HANDLE_SIE_INTERCEPT -.Lrewind_pad: - nop 0 - .globl sie_exit -sie_exit: - lg %r14,__SF_EMPTY+8(%r15) # load guest register save area - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers - lg %r2,__SF_EMPTY+24(%r15) # return exit reason code - br %r14 -.Lsie_fault: - lghi %r14,-EFAULT - stg %r14,__SF_EMPTY+24(%r15) # set exit reason code - j sie_exit - - .align 8 -.Lsie_critical: +.Lsie_critical_start: .quad .Lsie_gmap .Lsie_critical_length: .quad .Lsie_done - .Lsie_gmap - - EX_TABLE(.Lrewind_pad,.Lsie_fault) - EX_TABLE(sie_exit,.Lsie_fault) #endif .section .rodata, "a" diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 59b7c6470567..1255c6c5353e 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -370,6 +370,7 @@ ENTRY(startup_kdump) xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 xc 0xe00(256),0xe00 + lctlg %c0,%c15,0x200(%r0) # initialize control registers stck __LC_LAST_UPDATE_CLOCK spt 6f-.LPG0(%r13) mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) @@ -413,9 +414,9 @@ ENTRY(startup_kdump) # followed by the facility words. #if defined(CONFIG_MARCH_Z13) - .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 + .long 2, 0xc100eff2, 0xf46cc800 #elif defined(CONFIG_MARCH_ZEC12) - .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 + .long 2, 0xc100eff2, 0xf46cc800 #elif defined(CONFIG_MARCH_Z196) .long 2, 0xc100eff2, 0xf46c0000 #elif defined(CONFIG_MARCH_Z10) diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index a90299600483..c9dac2139f59 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -44,12 +44,9 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected, unsigned char *ipn = (unsigned char *)new; pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); - pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", - ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); - pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", - ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); - pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", - ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); + pr_emerg("Found: %6ph\n", ipc); + pr_emerg("Expected: %6ph\n", ipe); + pr_emerg("New: %6ph\n", ipn); panic("Corrupted kernel text"); } diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 505c17c0ae1a..0ae6f8e74840 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -21,6 +21,8 @@ #include <asm/nmi.h> #include <asm/crw.h> #include <asm/switch_to.h> +#include <asm/fpu-internal.h> +#include <asm/ctl_reg.h> struct mcck_struct { int kill_task; @@ -129,26 +131,30 @@ static int notrace s390_revalidate_registers(struct mci *mci) } else asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); - asm volatile( - " ld 0,0(%0)\n" - " ld 1,8(%0)\n" - " ld 2,16(%0)\n" - " ld 3,24(%0)\n" - " ld 4,32(%0)\n" - " ld 5,40(%0)\n" - " ld 6,48(%0)\n" - " ld 7,56(%0)\n" - " ld 8,64(%0)\n" - " ld 9,72(%0)\n" - " ld 10,80(%0)\n" - " ld 11,88(%0)\n" - " ld 12,96(%0)\n" - " ld 13,104(%0)\n" - " ld 14,112(%0)\n" - " ld 15,120(%0)\n" - : : "a" (fpt_save_area)); - /* Revalidate vector registers */ - if (MACHINE_HAS_VX && current->thread.vxrs) { + if (!MACHINE_HAS_VX) { + /* Revalidate floating point registers */ + asm volatile( + " ld 0,0(%0)\n" + " ld 1,8(%0)\n" + " ld 2,16(%0)\n" + " ld 3,24(%0)\n" + " ld 4,32(%0)\n" + " ld 5,40(%0)\n" + " ld 6,48(%0)\n" + " ld 7,56(%0)\n" + " ld 8,64(%0)\n" + " ld 9,72(%0)\n" + " ld 10,80(%0)\n" + " ld 11,88(%0)\n" + " ld 12,96(%0)\n" + " ld 13,104(%0)\n" + " ld 14,112(%0)\n" + " ld 15,120(%0)\n" + : : "a" (fpt_save_area)); + } else { + /* Revalidate vector registers */ + union ctlreg0 cr0; + if (!mci->vr) { /* * Vector registers can't be restored and therefore @@ -156,8 +162,16 @@ static int notrace s390_revalidate_registers(struct mci *mci) */ kill_task = 1; } - restore_vx_regs((__vector128 *) - S390_lowcore.vector_save_area_addr); + cr0.val = S390_lowcore.cregs_save_area[0]; + cr0.afp = cr0.vx = 1; + __ctl_load(cr0.val, 0, 0); + asm volatile( + " la 1,%0\n" + " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */ + " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */ + : : "Q" (*(struct vx_array *) + &S390_lowcore.vector_save_area) : "1"); + __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0); } /* Revalidate access registers */ asm volatile( @@ -349,4 +363,4 @@ static int __init machine_check_init(void) ctl_set_bit(14, 24); /* enable warning MCH */ return 0; } -arch_initcall(machine_check_init); +early_initcall(machine_check_init); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index afe05bfb7e00..b973972f6ba5 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1019,12 +1019,9 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) break; } - /* The host-program-parameter (hpp) contains the sie control - * block that is set by sie64a() in entry64.S. Check if hpp - * refers to a valid control block and set sde_regs flags - * accordingly. This would allow to use hpp values for other - * purposes too. - * For now, simply use a non-zero value as guest indicator. + /* The host-program-parameter (hpp) contains the pid of + * the CPU thread as set by sie64a() in entry.S. + * If non-zero assume a guest sample. */ if (sfr->basic.hpp) sde_regs->in_guest = 1; diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index dc5edc29b73a..f2dac9f0799d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -81,8 +81,38 @@ void release_thread(struct task_struct *dead_task) void arch_release_task_struct(struct task_struct *tsk) { - if (tsk->thread.vxrs) - kfree(tsk->thread.vxrs); + /* Free either the floating-point or the vector register save area */ + kfree(tsk->thread.fpu.regs); +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + *dst = *src; + + /* Set up a new floating-point register save area */ + dst->thread.fpu.fpc = 0; + dst->thread.fpu.flags = 0; /* Always start with VX disabled */ + dst->thread.fpu.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, + GFP_KERNEL|__GFP_REPEAT); + if (!dst->thread.fpu.fprs) + return -ENOMEM; + + /* + * Save the floating-point or vector register state of the current + * task. The state is not saved for early kernel threads, for example, + * the init_task, which do not have an allocated save area. + * The CIF_FPU flag is set in any case to lazy clear or restore a saved + * state when switching to a different task or returning to user space. + */ + save_fpu_regs(); + dst->thread.fpu.fpc = current->thread.fpu.fpc; + if (is_vx_task(current)) + convert_vx_to_fp(dst->thread.fpu.fprs, + current->thread.fpu.vxrs); + else + memcpy(dst->thread.fpu.fprs, current->thread.fpu.fprs, + sizeof(freg_t) * __NUM_FPRS); + return 0; } int copy_thread(unsigned long clone_flags, unsigned long new_stackp, @@ -142,11 +172,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, p->thread.ri_signum = 0; frame->childregs.psw.mask &= ~PSW_MASK_RI; - /* Save the fpu registers to new thread structure. */ - save_fp_ctl(&p->thread.fp_regs.fpc); - save_fp_regs(p->thread.fp_regs.fprs); - p->thread.fp_regs.pad = 0; - p->thread.vxrs = NULL; /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { unsigned long tls = frame->childregs.gprs[6]; @@ -162,8 +187,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, asmlinkage void execve_tail(void) { - current->thread.fp_regs.fpc = 0; - asm volatile("sfpc %0,%0" : : "d" (0)); + current->thread.fpu.fpc = 0; + asm volatile("sfpc %0" : : "d" (0)); } /* @@ -171,8 +196,15 @@ asmlinkage void execve_tail(void) */ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) { - save_fp_ctl(&fpregs->fpc); - save_fp_regs(fpregs->fprs); + save_fpu_regs(); + fpregs->fpc = current->thread.fpu.fpc; + fpregs->pad = 0; + if (is_vx_task(current)) + convert_vx_to_fp((freg_t *)&fpregs->fprs, + current->thread.fpu.vxrs); + else + memcpy(&fpregs->fprs, current->thread.fpu.fprs, + sizeof(fpregs->fprs)); return 1; } EXPORT_SYMBOL(dump_fpu); diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index dc488e13b7e3..e6e077ae3990 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -41,6 +41,15 @@ void cpu_init(void) } /* + * cpu_have_feature - Test CPU features on module initialization + */ +int cpu_have_feature(unsigned int num) +{ + return elf_hwcap & (1UL << num); +} +EXPORT_SYMBOL(cpu_have_feature); + +/* * show_cpuinfo - Get information on one CPU for use by procfs. */ static int show_cpuinfo(struct seq_file *m, void *v) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index d363c9c322a1..8b1c8e33f184 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -45,39 +45,27 @@ void update_cr_regs(struct task_struct *task) struct per_regs old, new; /* Take care of the enable/disable of transactional execution. */ - if (MACHINE_HAS_TE || MACHINE_HAS_VX) { + if (MACHINE_HAS_TE) { unsigned long cr, cr_new; __ctl_store(cr, 0, 0); - cr_new = cr; - if (MACHINE_HAS_TE) { - /* Set or clear transaction execution TXC bit 8. */ - cr_new |= (1UL << 55); - if (task->thread.per_flags & PER_FLAG_NO_TE) - cr_new &= ~(1UL << 55); - } - if (MACHINE_HAS_VX) { - /* Enable/disable of vector extension */ - cr_new &= ~(1UL << 17); - if (task->thread.vxrs) - cr_new |= (1UL << 17); - } + /* Set or clear transaction execution TXC bit 8. */ + cr_new = cr | (1UL << 55); + if (task->thread.per_flags & PER_FLAG_NO_TE) + cr_new &= ~(1UL << 55); if (cr_new != cr) __ctl_load(cr_new, 0, 0); - if (MACHINE_HAS_TE) { - /* Set/clear transaction execution TDC bits 62/63. */ - __ctl_store(cr, 2, 2); - cr_new = cr & ~3UL; - if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { - if (task->thread.per_flags & - PER_FLAG_TE_ABORT_RAND_TEND) - cr_new |= 1UL; - else - cr_new |= 2UL; - } - if (cr_new != cr) - __ctl_load(cr_new, 2, 2); + /* Set or clear transaction execution TDC bits 62 and 63. */ + __ctl_store(cr, 2, 2); + cr_new = cr & ~3UL; + if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { + if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) + cr_new |= 1UL; + else + cr_new |= 2UL; } + if (cr_new != cr) + __ctl_load(cr_new, 2, 2); } /* Copy user specified PER registers */ new.control = thread->per_user.control; @@ -242,21 +230,21 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) /* * floating point control reg. is in the thread structure */ - tmp = child->thread.fp_regs.fpc; + tmp = child->thread.fpu.fpc; tmp <<= BITS_PER_LONG - 32; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* - * floating point regs. are either in child->thread.fp_regs - * or the child->thread.vxrs array + * floating point regs. are either in child->thread.fpu + * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; - if (child->thread.vxrs) + if (is_vx_task(child)) tmp = *(addr_t *) - ((addr_t) child->thread.vxrs + 2*offset); + ((addr_t) child->thread.fpu.vxrs + 2*offset); else tmp = *(addr_t *) - ((addr_t) &child->thread.fp_regs.fprs + offset); + ((addr_t) &child->thread.fpu.fprs + offset); } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* @@ -387,20 +375,20 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) if ((unsigned int) data != 0 || test_fp_ctl(data >> (BITS_PER_LONG - 32))) return -EINVAL; - child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32); + child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* - * floating point regs. are either in child->thread.fp_regs - * or the child->thread.vxrs array + * floating point regs. are either in child->thread.fpu + * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; - if (child->thread.vxrs) + if (is_vx_task(child)) *(addr_t *)((addr_t) - child->thread.vxrs + 2*offset) = data; + child->thread.fpu.vxrs + 2*offset) = data; else *(addr_t *)((addr_t) - &child->thread.fp_regs.fprs + offset) = data; + &child->thread.fpu.fprs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* @@ -621,20 +609,20 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) /* * floating point control reg. is in the thread structure */ - tmp = child->thread.fp_regs.fpc; + tmp = child->thread.fpu.fpc; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* - * floating point regs. are either in child->thread.fp_regs - * or the child->thread.vxrs array + * floating point regs. are either in child->thread.fpu + * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; - if (child->thread.vxrs) + if (is_vx_task(child)) tmp = *(__u32 *) - ((addr_t) child->thread.vxrs + 2*offset); + ((addr_t) child->thread.fpu.vxrs + 2*offset); else tmp = *(__u32 *) - ((addr_t) &child->thread.fp_regs.fprs + offset); + ((addr_t) &child->thread.fpu.fprs + offset); } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* @@ -746,20 +734,20 @@ static int __poke_user_compat(struct task_struct *child, */ if (test_fp_ctl(tmp)) return -EINVAL; - child->thread.fp_regs.fpc = data; + child->thread.fpu.fpc = data; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* - * floating point regs. are either in child->thread.fp_regs - * or the child->thread.vxrs array + * floating point regs. are either in child->thread.fpu + * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; - if (child->thread.vxrs) + if (is_vx_task(child)) *(__u32 *)((addr_t) - child->thread.vxrs + 2*offset) = tmp; + child->thread.fpu.vxrs + 2*offset) = tmp; else *(__u32 *)((addr_t) - &child->thread.fp_regs.fprs + offset) = tmp; + &child->thread.fpu.fprs + offset) = tmp; } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* @@ -952,18 +940,16 @@ static int s390_fpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - if (target == current) { - save_fp_ctl(&target->thread.fp_regs.fpc); - save_fp_regs(target->thread.fp_regs.fprs); - } else if (target->thread.vxrs) { - int i; + _s390_fp_regs fp_regs; + + if (target == current) + save_fpu_regs(); + + fp_regs.fpc = target->thread.fpu.fpc; + fpregs_store(&fp_regs, &target->thread.fpu); - for (i = 0; i < __NUM_VXRS_LOW; i++) - target->thread.fp_regs.fprs[i] = - *(freg_t *)(target->thread.vxrs + i); - } return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fp_regs, 0, -1); + &fp_regs, 0, -1); } static int s390_fpregs_set(struct task_struct *target, @@ -972,41 +958,33 @@ static int s390_fpregs_set(struct task_struct *target, const void __user *ubuf) { int rc = 0; + freg_t fprs[__NUM_FPRS]; - if (target == current) { - save_fp_ctl(&target->thread.fp_regs.fpc); - save_fp_regs(target->thread.fp_regs.fprs); - } + if (target == current) + save_fpu_regs(); /* If setting FPC, must validate it first. */ if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { - u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 }; + u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc, 0, offsetof(s390_fp_regs, fprs)); if (rc) return rc; if (ufpc[1] != 0 || test_fp_ctl(ufpc[0])) return -EINVAL; - target->thread.fp_regs.fpc = ufpc[0]; + target->thread.fpu.fpc = ufpc[0]; } if (rc == 0 && count > 0) rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.fp_regs.fprs, - offsetof(s390_fp_regs, fprs), -1); - - if (rc == 0) { - if (target == current) { - restore_fp_ctl(&target->thread.fp_regs.fpc); - restore_fp_regs(target->thread.fp_regs.fprs); - } else if (target->thread.vxrs) { - int i; - - for (i = 0; i < __NUM_VXRS_LOW; i++) - *(freg_t *)(target->thread.vxrs + i) = - target->thread.fp_regs.fprs[i]; - } - } + fprs, offsetof(s390_fp_regs, fprs), -1); + if (rc) + return rc; + + if (is_vx_task(target)) + convert_fp_to_vx(target->thread.fpu.vxrs, fprs); + else + memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); return rc; } @@ -1069,11 +1047,11 @@ static int s390_vxrs_low_get(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (target->thread.vxrs) { + if (is_vx_task(target)) { if (target == current) - save_vx_regs(target->thread.vxrs); + save_fpu_regs(); for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1); + vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); } else memset(vxrs, 0, sizeof(vxrs)); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); @@ -1089,20 +1067,17 @@ static int s390_vxrs_low_set(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (!target->thread.vxrs) { + if (!is_vx_task(target)) { rc = alloc_vector_registers(target); if (rc) return rc; } else if (target == current) - save_vx_regs(target->thread.vxrs); + save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); - if (rc == 0) { + if (rc == 0) for (i = 0; i < __NUM_VXRS_LOW; i++) - *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i]; - if (target == current) - restore_vx_regs(target->thread.vxrs); - } + *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i]; return rc; } @@ -1116,10 +1091,10 @@ static int s390_vxrs_high_get(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (target->thread.vxrs) { + if (is_vx_task(target)) { if (target == current) - save_vx_regs(target->thread.vxrs); - memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW, + save_fpu_regs(); + memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs)); } else memset(vxrs, 0, sizeof(vxrs)); @@ -1135,18 +1110,15 @@ static int s390_vxrs_high_set(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (!target->thread.vxrs) { + if (!is_vx_task(target)) { rc = alloc_vector_registers(target); if (rc) return rc; } else if (target == current) - save_vx_regs(target->thread.vxrs); + save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.vxrs + __NUM_VXRS_LOW, 0, -1); - if (rc == 0 && target == current) - restore_vx_regs(target->thread.vxrs); - + target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); return rc; } diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 9f60467938d1..5090d3dad10b 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -1,5 +1,6 @@ #include <linux/module.h> #include <linux/kvm_host.h> +#include <asm/fpu-internal.h> #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER @@ -8,6 +9,8 @@ EXPORT_SYMBOL(_mcount); #if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL(sie64a); EXPORT_SYMBOL(sie_exit); +EXPORT_SYMBOL(save_fpu_regs); +EXPORT_SYMBOL(__ctl_set_vx); #endif EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S deleted file mode 100644 index 43c3169ea49c..000000000000 --- a/arch/s390/kernel/sclp.S +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Mini SCLP driver. - * - * Copyright IBM Corp. 2004, 2009 - * - * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>, - * Heiko Carstens <heiko.carstens@de.ibm.com>, - * - */ - -#include <linux/linkage.h> -#include <asm/irq.h> - -LC_EXT_NEW_PSW = 0x58 # addr of ext int handler -LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit -LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter -LC_EXT_INT_CODE = 0x86 # addr of ext int code -LC_AR_MODE_ID = 0xa3 - -# -# Subroutine which waits synchronously until either an external interruption -# or a timeout occurs. -# -# Parameters: -# R2 = 0 for no timeout, non-zero for timeout in (approximated) seconds -# -# Returns: -# R2 = 0 on interrupt, 2 on timeout -# R3 = external interruption parameter if R2=0 -# - -_sclp_wait_int: - stm %r6,%r15,24(%r15) # save registers - basr %r13,0 # get base register -.LbaseS1: - ahi %r15,-96 # create stack frame - la %r8,LC_EXT_NEW_PSW # register int handler - la %r9,.LextpswS1-.LbaseS1(%r13) - tm LC_AR_MODE_ID,1 - jno .Lesa1 - la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit - la %r9,.LextpswS1_64-.LbaseS1(%r13) -.Lesa1: - mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) - mvc 0(16,%r8),0(%r9) - epsw %r6,%r7 # set current addressing mode - nill %r6,0x1 # in new psw (31 or 64 bit mode) - nilh %r7,0x8000 - stm %r6,%r7,0(%r8) - lhi %r6,0x0200 # cr mask for ext int (cr0.54) - ltr %r2,%r2 - jz .LsetctS1 - ahi %r6,0x0800 # cr mask for clock int (cr0.52) - stck .LtimeS1-.LbaseS1(%r13) # initiate timeout - al %r2,.LtimeS1-.LbaseS1(%r13) - st %r2,.LtimeS1-.LbaseS1(%r13) - sckc .LtimeS1-.LbaseS1(%r13) - -.LsetctS1: - stctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # enable required interrupts - l %r0,.LctlS1-.LbaseS1(%r13) - lhi %r1,~(0x200 | 0x800) # clear old values - nr %r1,%r0 - or %r1,%r6 # set new value - st %r1,.LctlS1-.LbaseS1(%r13) - lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) - st %r0,.LctlS1-.LbaseS1(%r13) - lhi %r2,2 # return code for timeout -.LloopS1: - lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt -.LwaitS1: - lh %r7,LC_EXT_INT_CODE - chi %r7,EXT_IRQ_CLK_COMP # timeout? - je .LtimeoutS1 - chi %r7,EXT_IRQ_SERVICE_SIG # service int? - jne .LloopS1 - sr %r2,%r2 - l %r3,LC_EXT_INT_PARAM -.LtimeoutS1: - lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting - # restore old handler - mvc 0(16,%r8),.LoldpswS1-.LbaseS1(%r13) - lm %r6,%r15,120(%r15) # restore registers - br %r14 # return to caller - - .align 8 -.LoldpswS1: - .long 0, 0, 0, 0 # old ext int PSW -.LextpswS1: - .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int -.LextpswS1_64: - .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit -.LwaitpswS1: - .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int -.LtimeS1: - .quad 0 # current time -.LctlS1: - .long 0 # CT0 contents - -# -# Subroutine to synchronously issue a service call. -# -# Parameters: -# R2 = command word -# R3 = sccb address -# -# Returns: -# R2 = 0 on success, 1 on failure -# R3 = sccb response code if R2 = 0 -# - -_sclp_servc: - stm %r6,%r15,24(%r15) # save registers - ahi %r15,-96 # create stack frame - lr %r6,%r2 # save command word - lr %r7,%r3 # save sccb address -.LretryS2: - lhi %r2,1 # error return code - .insn rre,0xb2200000,%r6,%r7 # servc - brc 1,.LendS2 # exit if not operational - brc 8,.LnotbusyS2 # go on if not busy - sr %r2,%r2 # wait until no longer busy - bras %r14,_sclp_wait_int - j .LretryS2 # retry -.LnotbusyS2: - sr %r2,%r2 # wait until result - bras %r14,_sclp_wait_int - sr %r2,%r2 - lh %r3,6(%r7) -.LendS2: - lm %r6,%r15,120(%r15) # restore registers - br %r14 - -# -# Subroutine to set up the SCLP interface. -# -# Parameters: -# R2 = 0 to activate, non-zero to deactivate -# -# Returns: -# R2 = 0 on success, non-zero on failure -# - -_sclp_setup: - stm %r6,%r15,24(%r15) # save registers - ahi %r15,-96 # create stack frame - basr %r13,0 # get base register -.LbaseS3: - l %r6,.LsccbS0-.LbaseS3(%r13) # prepare init mask sccb - mvc 0(.LinitendS3-.LinitsccbS3,%r6),.LinitsccbS3-.LbaseS3(%r13) - ltr %r2,%r2 # initialization? - jz .LdoinitS3 # go ahead - # clear masks - xc .LinitmaskS3-.LinitsccbS3(8,%r6),.LinitmaskS3-.LinitsccbS3(%r6) -.LdoinitS3: - l %r2,.LwritemaskS3-.LbaseS3(%r13)# get command word - lr %r3,%r6 # get sccb address - bras %r14,_sclp_servc # issue service call - ltr %r2,%r2 # servc successful? - jnz .LerrorS3 - chi %r3,0x20 # write mask successful? - jne .LerrorS3 - # check masks - la %r2,.LinitmaskS3-.LinitsccbS3(%r6) - l %r1,0(%r2) # receive mask ok? - n %r1,12(%r2) - cl %r1,0(%r2) - jne .LerrorS3 - l %r1,4(%r2) # send mask ok? - n %r1,8(%r2) - cl %r1,4(%r2) - sr %r2,%r2 - je .LendS3 -.LerrorS3: - lhi %r2,1 # error return code -.LendS3: - lm %r6,%r15,120(%r15) # restore registers - br %r14 -.LwritemaskS3: - .long 0x00780005 # SCLP command for write mask -.LinitsccbS3: - .word .LinitendS3-.LinitsccbS3 - .byte 0,0,0,0 - .word 0 - .word 0 - .word 4 -.LinitmaskS3: - .long 0x80000000 - .long 0x40000000 - .long 0 - .long 0 -.LinitendS3: - -# -# Subroutine which prints a given text to the SCLP console. -# -# Parameters: -# R2 = address of nil-terminated ASCII text -# -# Returns: -# R2 = 0 on success, 1 on failure -# - -_sclp_print: - stm %r6,%r15,24(%r15) # save registers - ahi %r15,-96 # create stack frame - basr %r13,0 # get base register -.LbaseS4: - l %r8,.LsccbS0-.LbaseS4(%r13) # prepare write data sccb - mvc 0(.LmtoS4-.LwritesccbS4,%r8),.LwritesccbS4-.LbaseS4(%r13) - la %r7,.LmtoS4-.LwritesccbS4(%r8) # current mto addr - sr %r0,%r0 - l %r10,.Lascebc-.LbaseS4(%r13) # address of translation table -.LinitmtoS4: - # initialize mto - mvc 0(.LmtoendS4-.LmtoS4,%r7),.LmtoS4-.LbaseS4(%r13) - lhi %r6,.LmtoendS4-.LmtoS4 # current mto length -.LloopS4: - ic %r0,0(%r2) # get character - ahi %r2,1 - ltr %r0,%r0 # end of string? - jz .LfinalizemtoS4 - chi %r0,0x0a # end of line (NL)? - jz .LfinalizemtoS4 - stc %r0,0(%r6,%r7) # copy to mto - la %r11,0(%r6,%r7) - tr 0(1,%r11),0(%r10) # translate to EBCDIC - ahi %r6,1 - j .LloopS4 -.LfinalizemtoS4: - sth %r6,0(%r7) # update mto length - lh %r9,.LmdbS4-.LwritesccbS4(%r8) # update mdb length - ar %r9,%r6 - sth %r9,.LmdbS4-.LwritesccbS4(%r8) - lh %r9,.LevbufS4-.LwritesccbS4(%r8)# update evbuf length - ar %r9,%r6 - sth %r9,.LevbufS4-.LwritesccbS4(%r8) - lh %r9,0(%r8) # update sccb length - ar %r9,%r6 - sth %r9,0(%r8) - ar %r7,%r6 # update current mto address - ltr %r0,%r0 # more characters? - jnz .LinitmtoS4 - l %r2,.LwritedataS4-.LbaseS4(%r13)# write data - lr %r3,%r8 - bras %r14,_sclp_servc - ltr %r2,%r2 # servc successful? - jnz .LendS4 - chi %r3,0x20 # write data successful? - je .LendS4 - lhi %r2,1 # error return code -.LendS4: - lm %r6,%r15,120(%r15) # restore registers - br %r14 - -# -# Function which prints a given text to the SCLP console. -# -# Parameters: -# R2 = address of nil-terminated ASCII text -# -# Returns: -# R2 = 0 on success, 1 on failure -# - -ENTRY(_sclp_print_early) - stm %r6,%r15,24(%r15) # save registers - ahi %r15,-96 # create stack frame - tm LC_AR_MODE_ID,1 - jno .Lesa2 - ahi %r15,-80 - stmh %r6,%r15,96(%r15) # store upper register halves -.Lesa2: - lr %r10,%r2 # save string pointer - lhi %r2,0 - bras %r14,_sclp_setup # enable console - ltr %r2,%r2 - jnz .LendS5 - lr %r2,%r10 - bras %r14,_sclp_print # print string - ltr %r2,%r2 - jnz .LendS5 - lhi %r2,1 - bras %r14,_sclp_setup # disable console -.LendS5: - tm LC_AR_MODE_ID,1 - jno .Lesa3 - lgfr %r2,%r2 # sign extend return value - lmh %r6,%r15,96(%r15) # restore upper register halves - ahi %r15,80 -.Lesa3: - lm %r6,%r15,120(%r15) # restore registers - br %r14 - -.LwritedataS4: - .long 0x00760005 # SCLP command for write data -.LwritesccbS4: - # sccb - .word .LmtoS4-.LwritesccbS4 - .byte 0 - .byte 0,0,0 - .word 0 - - # evbuf -.LevbufS4: - .word .LmtoS4-.LevbufS4 - .byte 0x02 - .byte 0 - .word 0 - -.LmdbS4: - # mdb - .word .LmtoS4-.LmdbS4 - .word 1 - .long 0xd4c4c240 - .long 1 - - # go -.LgoS4: - .word .LmtoS4-.LgoS4 - .word 1 - .long 0 - .byte 0,0,0,0,0,0,0,0 - .byte 0,0,0 - .byte 0 - .byte 0,0,0,0,0,0,0 - .byte 0 - .word 0 - .byte 0,0,0,0,0,0,0,0,0,0 - .byte 0,0,0,0,0,0,0,0 - .byte 0,0,0,0,0,0,0,0 - -.LmtoS4: - .word .LmtoendS4-.LmtoS4 - .word 4 - .word 0x1000 - .byte 0 - .byte 0,0,0 -.LmtoendS4: - - # Global constants -.LsccbS0: - .long _sclp_work_area -.Lascebc: - .long _ascebc - -.section .data,"aw",@progbits - .balign 4096 -_sclp_work_area: - .fill 4096 -.previous diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c new file mode 100644 index 000000000000..fa0bdff1d413 --- /dev/null +++ b/arch/s390/kernel/sclp.c @@ -0,0 +1,160 @@ +/* + * Copyright IBM Corp. 2015 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ +#include <linux/kernel.h> +#include <asm/ebcdic.h> +#include <asm/irq.h> +#include <asm/lowcore.h> +#include <asm/processor.h> +#include <asm/sclp.h> + +static char _sclp_work_area[4096] __aligned(PAGE_SIZE); + +static void _sclp_wait_int(void) +{ + unsigned long cr0, cr0_new, psw_mask, addr; + psw_t psw_ext_save, psw_wait; + + __ctl_store(cr0, 0, 0); + cr0_new = cr0 | 0x200; + __ctl_load(cr0_new, 0, 0); + + psw_ext_save = S390_lowcore.external_new_psw; + psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA); + S390_lowcore.external_new_psw.mask = psw_mask; + psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; + S390_lowcore.ext_int_code = 0; + + do { + asm volatile( + " larl %[addr],0f\n" + " stg %[addr],%[psw_wait_addr]\n" + " stg %[addr],%[psw_ext_addr]\n" + " lpswe %[psw_wait]\n" + "0:\n" + : [addr] "=&d" (addr), + [psw_wait_addr] "=Q" (psw_wait.addr), + [psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr) + : [psw_wait] "Q" (psw_wait) + : "cc", "memory"); + } while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG); + + __ctl_load(cr0, 0, 0); + S390_lowcore.external_new_psw = psw_ext_save; +} + +static int _sclp_servc(unsigned int cmd, char *sccb) +{ + unsigned int cc; + + do { + asm volatile( + " .insn rre,0xb2200000,%1,%2\n" + " ipm %0\n" + : "=d" (cc) : "d" (cmd), "a" (sccb) + : "cc", "memory"); + cc >>= 28; + if (cc == 3) + return -EINVAL; + _sclp_wait_int(); + } while (cc != 0); + return (*(unsigned short *)(sccb + 6) == 0x20) ? 0 : -EIO; +} + +static int _sclp_setup(int disable) +{ + static unsigned char init_sccb[] = { + 0x00, 0x1c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, + 0x80, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }; + unsigned int *masks; + int rc; + + memcpy(_sclp_work_area, init_sccb, 28); + masks = (unsigned int *)(_sclp_work_area + 12); + if (disable) + memset(masks, 0, 16); + /* SCLP write mask */ + rc = _sclp_servc(0x00780005, _sclp_work_area); + if (rc) + return rc; + if ((masks[0] & masks[3]) != masks[0] || + (masks[1] & masks[2]) != masks[1]) + return -EIO; + return 0; +} + +static int _sclp_print(const char *str) +{ + static unsigned char write_head[] = { + /* sccb header */ + 0x00, 0x52, /* 0 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */ + /* evbuf */ + 0x00, 0x4a, /* 8 */ + 0x02, 0x00, 0x00, 0x00, /* 10 */ + /* mdb */ + 0x00, 0x44, /* 14 */ + 0x00, 0x01, /* 16 */ + 0xd4, 0xc4, 0xc2, 0x40, /* 18 */ + 0x00, 0x00, 0x00, 0x01, /* 22 */ + /* go */ + 0x00, 0x38, /* 26 */ + 0x00, 0x01, /* 28 */ + 0x00, 0x00, 0x00, 0x00, /* 30 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */ + 0x00, 0x00, 0x00, 0x00, /* 50 */ + 0x00, 0x00, /* 54 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */ + 0x00, 0x00, /* 80 */ + }; + static unsigned char write_mto[] = { + /* mto */ + 0x00, 0x0a, /* 0 */ + 0x00, 0x04, /* 2 */ + 0x10, 0x00, /* 4 */ + 0x00, 0x00, 0x00, 0x00 /* 6 */ + }; + unsigned char *ptr, ch; + unsigned int count; + + memcpy(_sclp_work_area, write_head, sizeof(write_head)); + ptr = _sclp_work_area + sizeof(write_head); + do { + memcpy(ptr, write_mto, sizeof(write_mto)); + for (count = sizeof(write_mto); (ch = *str++) != 0; count++) { + if (ch == 0x0a) + break; + ptr[count] = _ascebc[ch]; + } + /* Update length fields in mto, mdb, evbuf and sccb */ + *(unsigned short *) ptr = count; + *(unsigned short *)(_sclp_work_area + 14) += count; + *(unsigned short *)(_sclp_work_area + 8) += count; + *(unsigned short *)(_sclp_work_area + 0) += count; + ptr += count; + } while (ch != 0); + + /* SCLP write data */ + return _sclp_servc(0x00760005, _sclp_work_area); +} + +int _sclp_print_early(const char *str) +{ + int rc; + + rc = _sclp_setup(0); + if (rc) + return rc; + rc = _sclp_print(str); + if (rc) + return rc; + return _sclp_setup(1); +} diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f7f027caaaaa..ce0cbd6ba7ca 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -62,6 +62,7 @@ #include <asm/os_info.h> #include <asm/sclp.h> #include <asm/sysinfo.h> +#include <asm/numa.h> #include "entry.h" /* @@ -76,7 +77,7 @@ EXPORT_SYMBOL(console_devno); unsigned int console_irq = -1; EXPORT_SYMBOL(console_irq); -unsigned long elf_hwcap = 0; +unsigned long elf_hwcap __read_mostly = 0; char elf_platform[ELF_PLATFORM_SIZE]; int __initdata memory_end_set; @@ -688,7 +689,7 @@ static void __init setup_memory(void) /* * Setup hardware capabilities. */ -static void __init setup_hwcaps(void) +static int __init setup_hwcaps(void) { static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; struct cpuid cpu_id; @@ -754,9 +755,11 @@ static void __init setup_hwcaps(void) elf_hwcap |= HWCAP_S390_TE; /* - * Vector extension HWCAP_S390_VXRS is bit 11. + * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension + * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX + * instead of facility bit 129. */ - if (test_facility(129)) + if (MACHINE_HAS_VX) elf_hwcap |= HWCAP_S390_VXRS; get_cpu_id(&cpu_id); add_device_randomness(&cpu_id, sizeof(cpu_id)); @@ -793,7 +796,9 @@ static void __init setup_hwcaps(void) strcpy(elf_platform, "z13"); break; } + return 0; } +arch_initcall(setup_hwcaps); /* * Add system information as device randomness @@ -879,13 +884,7 @@ void __init setup_arch(char **cmdline_p) setup_lowcore(); smp_fill_possible_mask(); cpu_init(); - - /* - * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). - */ - setup_hwcaps(); - - HPAGE_SHIFT = MACHINE_HAS_HPAGE ? 20 : 0; + numa_setup(); /* * Create kernel page tables and switch to virtual addressing. diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index c551f22ce066..9549af102d75 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -105,32 +105,13 @@ struct rt_sigframe static void store_sigregs(void) { save_access_regs(current->thread.acrs); - save_fp_ctl(¤t->thread.fp_regs.fpc); - if (current->thread.vxrs) { - int i; - - save_vx_regs(current->thread.vxrs); - for (i = 0; i < __NUM_FPRS; i++) - current->thread.fp_regs.fprs[i] = - *(freg_t *)(current->thread.vxrs + i); - } else - save_fp_regs(current->thread.fp_regs.fprs); + save_fpu_regs(); } /* Load registers after signal return */ static void load_sigregs(void) { restore_access_regs(current->thread.acrs); - /* restore_fp_ctl is done in restore_sigregs */ - if (current->thread.vxrs) { - int i; - - for (i = 0; i < __NUM_FPRS; i++) - *(freg_t *)(current->thread.vxrs + i) = - current->thread.fp_regs.fprs[i]; - restore_vx_regs(current->thread.vxrs); - } else - restore_fp_regs(current->thread.fp_regs.fprs); } /* Returns non-zero on fault. */ @@ -146,8 +127,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); memcpy(&user_sregs.regs.acrs, current->thread.acrs, sizeof(user_sregs.regs.acrs)); - memcpy(&user_sregs.fpregs, ¤t->thread.fp_regs, - sizeof(user_sregs.fpregs)); + fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu); if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) return -EFAULT; return 0; @@ -166,8 +146,8 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) return -EINVAL; - /* Loading the floating-point-control word can fail. Do that first. */ - if (restore_fp_ctl(&user_sregs.fpregs.fpc)) + /* Test the floating-point-control word. */ + if (test_fp_ctl(user_sregs.fpregs.fpc)) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ @@ -185,8 +165,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); - memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, - sizeof(current->thread.fp_regs)); + fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; @@ -200,13 +179,13 @@ static int save_sigregs_ext(struct pt_regs *regs, int i; /* Save vector registers to signal stack */ - if (current->thread.vxrs) { + if (is_vx_task(current)) { for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1); + vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, sizeof(sregs_ext->vxrs_low)) || __copy_to_user(&sregs_ext->vxrs_high, - current->thread.vxrs + __NUM_VXRS_LOW, + current->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } @@ -220,15 +199,15 @@ static int restore_sigregs_ext(struct pt_regs *regs, int i; /* Restore vector registers from signal stack */ - if (current->thread.vxrs) { + if (is_vx_task(current)) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || - __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW, + __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, &sregs_ext->vxrs_high, sizeof(sregs_ext->vxrs_high))) return -EFAULT; for (i = 0; i < __NUM_VXRS_LOW; i++) - *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; + *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i]; } return 0; } @@ -243,6 +222,7 @@ SYSCALL_DEFINE0(sigreturn) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; set_current_blocked(&set); + save_fpu_regs(); if (restore_sigregs(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext(regs, &frame->sregs_ext)) @@ -266,6 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (restore_altstack(&frame->uc.uc_stack)) goto badframe; + save_fpu_regs(); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext)) @@ -400,7 +381,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, uc_flags = 0; if (MACHINE_HAS_VX) { frame_size += sizeof(_sigregs_ext); - if (current->thread.vxrs) + if (is_vx_task(current)) uc_flags |= UC_VXRS; } frame = get_sigframe(&ksig->ka, regs, frame_size); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6f54c175f5c9..c6355e6f3fcc 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -532,8 +532,8 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); #ifdef CONFIG_CRASH_DUMP -static void __smp_store_cpu_state(struct save_area_ext *sa_ext, u16 address, - int is_boot_cpu) +static void __init __smp_store_cpu_state(struct save_area_ext *sa_ext, + u16 address, int is_boot_cpu) { void *lc = (void *)(unsigned long) store_prefix(); unsigned long vx_sa; diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 1acad02681c4..f3f4a137aef6 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -276,9 +276,9 @@ SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64) SYSCALL(sys_statfs64,compat_sys_statfs64) SYSCALL(sys_fstatfs64,compat_sys_fstatfs64) SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages) -NI_SYSCALL /* 268 sys_mbind */ -NI_SYSCALL /* 269 sys_get_mempolicy */ -NI_SYSCALL /* 270 sys_set_mempolicy */ +SYSCALL(sys_mbind,compat_sys_mbind) +SYSCALL(sys_get_mempolicy,compat_sys_get_mempolicy) +SYSCALL(sys_set_mempolicy,compat_sys_set_mempolicy) SYSCALL(sys_mq_open,compat_sys_mq_open) SYSCALL(sys_mq_unlink,compat_sys_mq_unlink) SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend) @@ -295,7 +295,7 @@ SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) SYSCALL(sys_inotify_init,sys_inotify_init) SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) -NI_SYSCALL /* 287 sys_migrate_pages */ +SYSCALL(sys_migrate_pages,compat_sys_migrate_pages) SYSCALL(sys_openat,compat_sys_openat) SYSCALL(sys_mkdirat,compat_sys_mkdirat) SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */ @@ -318,7 +318,7 @@ SYSCALL(sys_splice,compat_sys_splice) SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range) SYSCALL(sys_tee,compat_sys_tee) SYSCALL(sys_vmsplice,compat_sys_vmsplice) -NI_SYSCALL /* 310 sys_move_pages */ +SYSCALL(sys_move_pages,compat_sys_move_pages) SYSCALL(sys_getcpu,compat_sys_getcpu) SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait) SYSCALL(sys_utimes,compat_sys_utimes) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 9e733d965e08..627887b075a7 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -58,6 +58,9 @@ EXPORT_SYMBOL_GPL(sched_clock_base_cc); static DEFINE_PER_CPU(struct clock_event_device, comparators); +ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier); +EXPORT_SYMBOL(s390_epoch_delta_notifier); + /* * Scheduler clock - returns current time in nanosec units. */ @@ -752,7 +755,7 @@ static void clock_sync_cpu(struct clock_sync_data *sync) static int etr_sync_clock(void *data) { static int first; - unsigned long long clock, old_clock, delay, delta; + unsigned long long clock, old_clock, clock_delta, delay, delta; struct clock_sync_data *etr_sync; struct etr_aib *sync_port, *aib; int port; @@ -789,6 +792,9 @@ static int etr_sync_clock(void *data) delay = (unsigned long long) (aib->edf2.etv - sync_port->edf2.etv) << 32; delta = adjust_time(old_clock, clock, delay); + clock_delta = clock - old_clock; + atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, + &clock_delta); etr_sync->fixup_cc = delta; fixup_clock_comparator(delta); /* Verify that the clock is properly set. */ @@ -1526,7 +1532,7 @@ void stp_island_check(void) static int stp_sync_clock(void *data) { static int first; - unsigned long long old_clock, delta; + unsigned long long old_clock, delta, new_clock, clock_delta; struct clock_sync_data *stp_sync; int rc; @@ -1551,7 +1557,11 @@ static int stp_sync_clock(void *data) old_clock = get_tod_clock(); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); if (rc == 0) { - delta = adjust_time(old_clock, get_tod_clock(), 0); + new_clock = get_tod_clock(); + delta = adjust_time(old_clock, new_clock, 0); + clock_delta = new_clock - old_clock; + atomic_notifier_call_chain(&s390_epoch_delta_notifier, + 0, &clock_delta); fixup_clock_comparator(delta); rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 5728c5bd44a8..bf05e7fc3e70 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -18,7 +18,10 @@ #include <linux/cpu.h> #include <linux/smp.h> #include <linux/mm.h> +#include <linux/nodemask.h> +#include <linux/node.h> #include <asm/sysinfo.h> +#include <asm/numa.h> #define PTF_HORIZONTAL (0UL) #define PTF_VERTICAL (1UL) @@ -37,8 +40,10 @@ static struct sysinfo_15_1_x *tl_info; static int topology_enabled = 1; static DECLARE_WORK(topology_work, topology_work_fn); -/* topology_lock protects the socket and book linked lists */ -static DEFINE_SPINLOCK(topology_lock); +/* + * Socket/Book linked lists and per_cpu(cpu_topology) updates are + * protected by "sched_domains_mutex". + */ static struct mask_info socket_info; static struct mask_info book_info; @@ -188,7 +193,6 @@ static void tl_to_masks(struct sysinfo_15_1_x *info) { struct cpuid cpu_id; - spin_lock_irq(&topology_lock); get_cpu_id(&cpu_id); clear_masks(); switch (cpu_id.machine) { @@ -199,7 +203,6 @@ static void tl_to_masks(struct sysinfo_15_1_x *info) default: __tl_to_masks_generic(info); } - spin_unlock_irq(&topology_lock); } static void topology_update_polarization_simple(void) @@ -244,10 +247,8 @@ int topology_set_cpu_management(int fc) static void update_cpu_masks(void) { - unsigned long flags; int cpu; - spin_lock_irqsave(&topology_lock, flags); for_each_possible_cpu(cpu) { per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); @@ -259,7 +260,7 @@ static void update_cpu_masks(void) per_cpu(cpu_topology, cpu).book_id = cpu; } } - spin_unlock_irqrestore(&topology_lock, flags); + numa_update_cpu_topology(); } void store_topology(struct sysinfo_15_1_x *info) @@ -274,21 +275,21 @@ int arch_update_cpu_topology(void) { struct sysinfo_15_1_x *info = tl_info; struct device *dev; - int cpu; + int cpu, rc = 0; - if (!MACHINE_HAS_TOPOLOGY) { - update_cpu_masks(); - topology_update_polarization_simple(); - return 0; + if (MACHINE_HAS_TOPOLOGY) { + rc = 1; + store_topology(info); + tl_to_masks(info); } - store_topology(info); - tl_to_masks(info); update_cpu_masks(); + if (!MACHINE_HAS_TOPOLOGY) + topology_update_polarization_simple(); for_each_online_cpu(cpu) { dev = get_cpu_device(cpu); kobject_uevent(&dev->kobj, KOBJ_CHANGE); } - return 1; + return rc; } static void topology_work_fn(struct work_struct *work) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 4d96c9f53455..9861613fb35a 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -19,7 +19,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/slab.h> -#include <asm/switch_to.h> +#include <asm/fpu-internal.h> #include "entry.h" int show_unhandled_signals = 1; @@ -151,7 +151,7 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, "transaction constraint exception") -static inline void do_fp_trap(struct pt_regs *regs, int fpc) +static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc) { int si_code = 0; /* FPC[2] is Data Exception Code */ @@ -227,7 +227,7 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, int alloc_vector_registers(struct task_struct *tsk) { __vector128 *vxrs; - int i; + freg_t *fprs; /* Allocate vector register save area. */ vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS, @@ -236,15 +236,13 @@ int alloc_vector_registers(struct task_struct *tsk) return -ENOMEM; preempt_disable(); if (tsk == current) - save_fp_regs(tsk->thread.fp_regs.fprs); + save_fpu_regs(); /* Copy the 16 floating point registers */ - for (i = 0; i < 16; i++) - *(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i]; - tsk->thread.vxrs = vxrs; - if (tsk == current) { - __ctl_set_bit(0, 17); - restore_vx_regs(vxrs); - } + convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs); + fprs = tsk->thread.fpu.fprs; + tsk->thread.fpu.vxrs = vxrs; + tsk->thread.fpu.flags |= FPU_USE_VX; + kfree(fprs); preempt_enable(); return 0; } @@ -259,8 +257,8 @@ void vector_exception(struct pt_regs *regs) } /* get vector interrupt code from fpc */ - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); - vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; + save_fpu_regs(); + vic = (current->thread.fpu.fpc & 0xf00) >> 8; switch (vic) { case 1: /* invalid vector operation */ si_code = FPE_FLTINV; @@ -297,22 +295,22 @@ void data_exception(struct pt_regs *regs) location = get_trap_ip(regs); - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); + save_fpu_regs(); /* Check for vector register enablement */ - if (MACHINE_HAS_VX && !current->thread.vxrs && - (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { + if (MACHINE_HAS_VX && !is_vx_task(current) && + (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) { alloc_vector_registers(current); /* Vector data exception is suppressing, rewind psw. */ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); clear_pt_regs_flag(regs, PIF_PER_TRAP); return; } - if (current->thread.fp_regs.fpc & FPC_DXC_MASK) + if (current->thread.fpu.fpc & FPC_DXC_MASK) signal = SIGFPE; else signal = SIGILL; if (signal == SIGFPE) - do_fp_trap(regs, current->thread.fp_regs.fpc); + do_fp_trap(regs, current->thread.fpu.fpc); else if (signal) do_trap(regs, signal, ILL_ILLOPN, "data exception"); } diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index 8ad2b34ad151..ee8a18e50a25 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -13,7 +13,7 @@ KBUILD_AFLAGS_31 += -m31 -s KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=both) $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31) $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31) diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 2a8ddfd12a5b..c4b03f9ed228 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -13,7 +13,7 @@ KBUILD_AFLAGS_64 += -m64 -s KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=both) $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e53d3595a7c8..b9ce650e9e99 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -28,6 +28,7 @@ static atomic64_t virt_timer_elapsed; static DEFINE_PER_CPU(u64, mt_cycles[32]); static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; +static DEFINE_PER_CPU(u64, mt_scaling_jiffies); static inline u64 get_vtimer(void) { @@ -85,7 +86,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; /* Do MT utilization calculation */ - if (smp_cpu_mtid) { + if (smp_cpu_mtid && + time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { u64 cycles_new[32], *cycles_old; u64 delta, mult, div; @@ -105,6 +107,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) sizeof(u64) * (smp_cpu_mtid + 1)); } } + __this_cpu_write(mt_scaling_jiffies, jiffies_64); } user = S390_lowcore.user_timer - ti->user_timer; @@ -376,4 +379,11 @@ void vtime_init(void) { /* set initial cpu timer */ set_vtimer(VTIMER_MAX_SLICE); + /* Setup initial MT scaling values */ + if (smp_cpu_mtid) { + __this_cpu_write(mt_scaling_jiffies, jiffies); + __this_cpu_write(mt_scaling_mult, 1); + __this_cpu_write(mt_scaling_div, 1); + stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); + } } diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index fc7ec95848c3..5fbfb88f8477 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -27,13 +27,13 @@ static int diag_release_pages(struct kvm_vcpu *vcpu) start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; + vcpu->stat.diagnose_10++; if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end || start < 2 * PAGE_SIZE) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); - vcpu->stat.diagnose_10++; /* * We checked for start >= end above, so lets check for the @@ -75,6 +75,9 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); + VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx", + vcpu->run->s.regs.gprs[rx]); + vcpu->stat.diagnose_258++; if (vcpu->run->s.regs.gprs[rx] & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); @@ -85,6 +88,9 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) switch (parm.subcode) { case 0: /* TOKEN */ + VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx " + "select mask 0x%llx compare mask 0x%llx", + parm.token_addr, parm.select_mask, parm.compare_mask); if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { /* * If the pagefault handshake is already activated, @@ -114,6 +120,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) * the cancel, therefore to reduce code complexity, we assume * all outstanding tokens are already pending. */ + VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr); if (parm.token_addr || parm.select_mask || parm.compare_mask || parm.zarch) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -174,7 +181,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; - VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); + VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode); + vcpu->stat.diagnose_308++; switch (subcode) { case 3: vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; @@ -202,6 +210,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) { int ret; + vcpu->stat.diagnose_500++; /* No virtio-ccw notification? Get out quickly. */ if (!vcpu->kvm->arch.css_support || (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index e97b3455d7e6..47518a324d75 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -473,10 +473,45 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->iprcc &= ~PGM_PER; } +#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH) +#define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH) +#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1) +#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff) + void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) { + int new_as; + if (debug_exit_required(vcpu)) vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; filter_guest_per_event(vcpu); + + /* + * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger + * a space-switch event. PER events enforce space-switch events + * for these instructions. So if no PER event for the guest is left, + * we might have to filter the space-switch element out, too. + */ + if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) { + vcpu->arch.sie_block->iprcc = 0; + new_as = psw_bits(vcpu->arch.sie_block->gpsw).as; + + /* + * If the AS changed from / to home, we had RP, SAC or SACF + * instruction. Check primary and home space-switch-event + * controls. (theoretically home -> home produced no event) + */ + if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) && + (pssec(vcpu) || hssec(vcpu))) + vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; + + /* + * PT, PTI, PR, PC instruction operate on primary AS only. Check + * if the primary-space-switch-event control was or got set. + */ + if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) && + (pssec(vcpu) || old_ssec(vcpu))) + vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; + } } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c98d89708e99..b277d50dcf76 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -30,7 +30,6 @@ #define IOINT_SCHID_MASK 0x0000ffff #define IOINT_SSID_MASK 0x00030000 #define IOINT_CSSID_MASK 0x03fc0000 -#define IOINT_AI_MASK 0x04000000 #define PFAULT_INIT 0x0600 #define PFAULT_DONE 0x0680 #define VIRTIO_PARAM 0x0d00 @@ -72,9 +71,13 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) static int ckc_irq_pending(struct kvm_vcpu *vcpu) { + preempt_disable(); if (!(vcpu->arch.sie_block->ckc < - get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { + preempt_enable(); return 0; + } + preempt_enable(); return ckc_interrupts_enabled(vcpu); } @@ -311,8 +314,8 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) li->irq.ext.ext_params2 = 0; spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", - 0, ext.ext_params2); + VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx", + ext.ext_params2); trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 0, ext.ext_params2); @@ -368,7 +371,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) spin_unlock(&fi->lock); if (deliver) { - VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", + VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx", mchk.mcic); trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, @@ -403,7 +406,7 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc; - VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); + VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); vcpu->stat.deliver_restart_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); @@ -427,7 +430,6 @@ static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address); vcpu->stat.deliver_prefix_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, @@ -450,7 +452,7 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); + VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg"); vcpu->stat.deliver_emergency_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, cpu_addr, 0); @@ -477,7 +479,7 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); + VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call"); vcpu->stat.deliver_external_call++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, @@ -506,7 +508,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) memset(&li->irq.pgm, 0, sizeof(pgm_info)); spin_unlock(&li->lock); - VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", + VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d", pgm_info.code, ilc); vcpu->stat.deliver_program_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, @@ -622,7 +624,7 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu) clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); spin_unlock(&fi->lock); - VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", + VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", ext.ext_params); vcpu->stat.deliver_service_signal++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, @@ -651,9 +653,6 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) struct kvm_s390_interrupt_info, list); if (inti) { - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, - KVM_S390_INT_PFAULT_DONE, 0, - inti->ext.ext_params2); list_del(&inti->list); fi->counters[FIRQ_CNTR_PFAULT] -= 1; } @@ -662,6 +661,12 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) spin_unlock(&fi->lock); if (inti) { + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_PFAULT_DONE, 0, + inti->ext.ext_params2); + VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx", + inti->ext.ext_params2); + rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); rc |= put_guest_lc(vcpu, PFAULT_DONE, @@ -691,7 +696,7 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) list); if (inti) { VCPU_EVENT(vcpu, 4, - "interrupt: virtio parm:%x,parm64:%llx", + "deliver: virtio parm: 0x%x,parm64: 0x%llx", inti->ext.ext_params, inti->ext.ext_params2); vcpu->stat.deliver_virtio_interrupt++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, @@ -741,7 +746,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_interrupt_info, list); if (inti) { - VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); + VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type); vcpu->stat.deliver_io_int++; trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, @@ -855,7 +860,9 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } + preempt_disable(); now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; + preempt_enable(); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* underflow */ @@ -864,7 +871,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) __set_cpu_idle(vcpu); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); - VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); + VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime); no_timer: srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); @@ -894,7 +901,9 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) u64 now, sltime; vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); + preempt_disable(); now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; + preempt_enable(); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* @@ -968,6 +977,10 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, + irq->u.pgm.code, 0); + li->irq.pgm = irq->u.pgm; set_bit(IRQ_PEND_PROG, &li->pending_irqs); return 0; @@ -978,9 +991,6 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_irq irq; - VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code, - 0, 1); spin_lock(&li->lock); irq.u.pgm.code = code; __inject_prog(vcpu, &irq); @@ -996,10 +1006,6 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, struct kvm_s390_irq irq; int rc; - VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", - pgm_info->code); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, - pgm_info->code, 0, 1); spin_lock(&li->lock); irq.u.pgm = *pgm_info; rc = __inject_prog(vcpu, &irq); @@ -1012,11 +1018,11 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx", - irq->u.ext.ext_params, irq->u.ext.ext_params2); + VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", + irq->u.ext.ext_params2); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, irq->u.ext.ext_params, - irq->u.ext.ext_params2, 2); + irq->u.ext.ext_params2); li->irq.ext = irq->u.ext; set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); @@ -1045,10 +1051,10 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_extcall_info *extcall = &li->irq.extcall; uint16_t src_id = irq->u.extcall.code; - VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", + VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", src_id); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, - src_id, 0, 2); + src_id, 0); /* sending vcpu invalid */ if (src_id >= KVM_MAX_VCPUS || @@ -1070,10 +1076,10 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_prefix_info *prefix = &li->irq.prefix; - VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", + VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", irq->u.prefix.address); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, - irq->u.prefix.address, 0, 2); + irq->u.prefix.address, 0); if (!is_vcpu_stopped(vcpu)) return -EBUSY; @@ -1090,7 +1096,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_stop_info *stop = &li->irq.stop; int rc = 0; - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) return -EINVAL; @@ -1114,8 +1120,8 @@ static int __inject_sigp_restart(struct kvm_vcpu *vcpu, { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type); - trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2); + VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); + trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); set_bit(IRQ_PEND_RESTART, &li->pending_irqs); return 0; @@ -1126,10 +1132,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", + VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", irq->u.emerg.code); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, - irq->u.emerg.code, 0, 2); + irq->u.emerg.code, 0); set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); @@ -1142,10 +1148,10 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_mchk_info *mchk = &li->irq.mchk; - VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", + VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", irq->u.mchk.mcic); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, - irq->u.mchk.mcic, 2); + irq->u.mchk.mcic); /* * Because repressible machine checks can be indicated along with @@ -1172,9 +1178,9 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP); + VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, - 0, 0, 2); + 0, 0); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); @@ -1185,9 +1191,9 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER); + VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, - 0, 0, 2); + 0, 0); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); @@ -1435,20 +1441,20 @@ int kvm_s390_inject_vm(struct kvm *kvm, inti->ext.ext_params2 = s390int->parm64; break; case KVM_S390_INT_SERVICE: - VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); + VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm); inti->ext.ext_params = s390int->parm; break; case KVM_S390_INT_PFAULT_DONE: inti->ext.ext_params2 = s390int->parm64; break; case KVM_S390_MCHK: - VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", + VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx", s390int->parm64); inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ inti->mchk.mcic = s390int->parm64; break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - if (inti->type & IOINT_AI_MASK) + if (inti->type & KVM_S390_INT_IO_AI_MASK) VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); else VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", @@ -1535,8 +1541,6 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) switch (irq->type) { case KVM_S390_PROGRAM_INT: - VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", - irq->u.pgm.code); rc = __inject_prog(vcpu, irq); break; case KVM_S390_SIGP_SET_PREFIX: diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2078f92d15ac..98df53c01343 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -28,6 +28,7 @@ #include <linux/vmalloc.h> #include <asm/asm-offsets.h> #include <asm/lowcore.h> +#include <asm/etr.h> #include <asm/pgtable.h> #include <asm/nmi.h> #include <asm/switch_to.h> @@ -108,6 +109,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "diagnose_10", VCPU_STAT(diagnose_10) }, { "diagnose_44", VCPU_STAT(diagnose_44) }, { "diagnose_9c", VCPU_STAT(diagnose_9c) }, + { "diagnose_258", VCPU_STAT(diagnose_258) }, + { "diagnose_308", VCPU_STAT(diagnose_308) }, + { "diagnose_500", VCPU_STAT(diagnose_500) }, { NULL } }; @@ -124,6 +128,7 @@ unsigned long kvm_s390_fac_list_mask_size(void) } static struct gmap_notifier gmap_notifier; +debug_info_t *kvm_s390_dbf; /* Section: not file related */ int kvm_arch_hardware_enable(void) @@ -134,24 +139,69 @@ int kvm_arch_hardware_enable(void) static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); +/* + * This callback is executed during stop_machine(). All CPUs are therefore + * temporarily stopped. In order not to change guest behavior, we have to + * disable preemption whenever we touch the epoch of kvm and the VCPUs, + * so a CPU won't be stopped while calculating with the epoch. + */ +static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, + void *v) +{ + struct kvm *kvm; + struct kvm_vcpu *vcpu; + int i; + unsigned long long *delta = v; + + list_for_each_entry(kvm, &vm_list, vm_list) { + kvm->arch.epoch -= *delta; + kvm_for_each_vcpu(i, vcpu, kvm) { + vcpu->arch.sie_block->epoch -= *delta; + } + } + return NOTIFY_OK; +} + +static struct notifier_block kvm_clock_notifier = { + .notifier_call = kvm_clock_sync, +}; + int kvm_arch_hardware_setup(void) { gmap_notifier.notifier_call = kvm_gmap_notifier; gmap_register_ipte_notifier(&gmap_notifier); + atomic_notifier_chain_register(&s390_epoch_delta_notifier, + &kvm_clock_notifier); return 0; } void kvm_arch_hardware_unsetup(void) { gmap_unregister_ipte_notifier(&gmap_notifier); + atomic_notifier_chain_unregister(&s390_epoch_delta_notifier, + &kvm_clock_notifier); } int kvm_arch_init(void *opaque) { + kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); + if (!kvm_s390_dbf) + return -ENOMEM; + + if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) { + debug_unregister(kvm_s390_dbf); + return -ENOMEM; + } + /* Register floating interrupt controller interface. */ return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); } +void kvm_arch_exit(void) +{ + debug_unregister(kvm_s390_dbf); +} + /* Section: device related */ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) @@ -281,10 +331,12 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) switch (cap->cap) { case KVM_CAP_S390_IRQCHIP: + VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); kvm->arch.use_irqchip = 1; r = 0; break; case KVM_CAP_S390_USER_SIGP: + VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); kvm->arch.user_sigp = 1; r = 0; break; @@ -295,8 +347,11 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) r = 0; } else r = -EINVAL; + VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", + r ? "(not available)" : "(success)"); break; case KVM_CAP_S390_USER_STSI: + VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); kvm->arch.user_stsi = 1; r = 0; break; @@ -314,6 +369,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att switch (attr->attr) { case KVM_S390_VM_MEM_LIMIT_SIZE: ret = 0; + VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", + kvm->arch.gmap->asce_end); if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) ret = -EFAULT; break; @@ -330,7 +387,13 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att unsigned int idx; switch (attr->attr) { case KVM_S390_VM_MEM_ENABLE_CMMA: + /* enable CMMA only for z10 and later (EDAT_1) */ + ret = -EINVAL; + if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1) + break; + ret = -EBUSY; + VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); mutex_lock(&kvm->lock); if (atomic_read(&kvm->online_vcpus) == 0) { kvm->arch.use_cmma = 1; @@ -339,6 +402,11 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att mutex_unlock(&kvm->lock); break; case KVM_S390_VM_MEM_CLR_CMMA: + ret = -EINVAL; + if (!kvm->arch.use_cmma) + break; + + VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); mutex_lock(&kvm->lock); idx = srcu_read_lock(&kvm->srcu); s390_reset_cmma(kvm->arch.gmap->mm); @@ -374,6 +442,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att } } mutex_unlock(&kvm->lock); + VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit); break; } default: @@ -400,22 +469,26 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) kvm->arch.crypto.crycb->aes_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); kvm->arch.crypto.aes_kw = 1; + VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: get_random_bytes( kvm->arch.crypto.crycb->dea_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); kvm->arch.crypto.dea_kw = 1; + VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: kvm->arch.crypto.aes_kw = 0; memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); + VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: kvm->arch.crypto.dea_kw = 0; memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); + VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); break; default: mutex_unlock(&kvm->lock); @@ -440,6 +513,7 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) if (gtod_high != 0) return -EINVAL; + VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high); return 0; } @@ -459,12 +533,15 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) return r; mutex_lock(&kvm->lock); + preempt_disable(); kvm->arch.epoch = gtod - host_tod; kvm_s390_vcpu_block_all(kvm); kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; kvm_s390_vcpu_unblock_all(kvm); + preempt_enable(); mutex_unlock(&kvm->lock); + VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod); return 0; } @@ -496,6 +573,7 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) if (copy_to_user((void __user *)attr->addr, >od_high, sizeof(gtod_high))) return -EFAULT; + VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high); return 0; } @@ -509,9 +587,12 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) if (r) return r; + preempt_disable(); gtod = host_tod + kvm->arch.epoch; + preempt_enable(); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT; + VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod); return 0; } @@ -821,7 +902,9 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) } /* Enable storage key handling for the guest */ - s390_enable_skey(); + r = s390_enable_skey(); + if (r) + goto out; for (i = 0; i < args->count; i++) { hva = gfn_to_hva(kvm, args->start_gfn + i); @@ -879,8 +962,7 @@ long kvm_arch_vm_ioctl(struct file *filp, if (kvm->arch.use_irqchip) { /* Set up dummy routing. */ memset(&routing, 0, sizeof(routing)); - kvm_set_irq_routing(kvm, &routing, 0, 0); - r = 0; + r = kvm_set_irq_routing(kvm, &routing, 0, 0); } break; } @@ -1043,7 +1125,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) sprintf(debug_name, "kvm-%u", current->pid); - kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); + kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); if (!kvm->arch.dbf) goto out_err; @@ -1086,7 +1168,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) mutex_init(&kvm->arch.ipte_mutex); debug_register_view(kvm->arch.dbf, &debug_sprintf_view); - VM_EVENT(kvm, 3, "%s", "vm created"); + VM_EVENT(kvm, 3, "vm created with type %lu", type); if (type & KVM_VM_S390_UCONTROL) { kvm->arch.gmap = NULL; @@ -1103,6 +1185,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.epoch = 0; spin_lock_init(&kvm->arch.start_stop_lock); + KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid); return 0; out_err: @@ -1110,6 +1193,7 @@ out_err: free_page((unsigned long)kvm->arch.model.fac); debug_unregister(kvm->arch.dbf); free_page((unsigned long)(kvm->arch.sca)); + KVM_EVENT(3, "creation of vm failed: %d", rc); return rc; } @@ -1131,7 +1215,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) if (kvm_is_ucontrol(vcpu->kvm)) gmap_free(vcpu->arch.gmap); - if (kvm_s390_cmma_enabled(vcpu->kvm)) + if (vcpu->kvm->arch.use_cmma) kvm_s390_vcpu_unsetup_cmma(vcpu); free_page((unsigned long)(vcpu->arch.sie_block)); @@ -1166,6 +1250,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) gmap_free(kvm->arch.gmap); kvm_s390_destroy_adapters(kvm); kvm_s390_clear_float_irqs(kvm); + KVM_EVENT(3, "vm 0x%p destroyed", kvm); } /* Section: vcpu related */ @@ -1198,21 +1283,54 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) return 0; } +/* + * Backs up the current FP/VX register save area on a particular + * destination. Used to switch between different register save + * areas. + */ +static inline void save_fpu_to(struct fpu *dst) +{ + dst->fpc = current->thread.fpu.fpc; + dst->flags = current->thread.fpu.flags; + dst->regs = current->thread.fpu.regs; +} + +/* + * Switches the FP/VX register save area from which to lazy + * restore register contents. + */ +static inline void load_fpu_from(struct fpu *from) +{ + current->thread.fpu.fpc = from->fpc; + current->thread.fpu.flags = from->flags; + current->thread.fpu.regs = from->regs; +} + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - save_fp_ctl(&vcpu->arch.host_fpregs.fpc); - if (test_kvm_facility(vcpu->kvm, 129)) - save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); - else - save_fp_regs(vcpu->arch.host_fpregs.fprs); - save_access_regs(vcpu->arch.host_acrs); + /* Save host register state */ + save_fpu_regs(); + save_fpu_to(&vcpu->arch.host_fpregs); + if (test_kvm_facility(vcpu->kvm, 129)) { - restore_fp_ctl(&vcpu->run->s.regs.fpc); - restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); - } else { - restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - restore_fp_regs(vcpu->arch.guest_fpregs.fprs); - } + current->thread.fpu.fpc = vcpu->run->s.regs.fpc; + current->thread.fpu.flags = FPU_USE_VX; + /* + * Use the register save area in the SIE-control block + * for register restore and save in kvm_arch_vcpu_put() + */ + current->thread.fpu.vxrs = + (__vector128 *)&vcpu->run->s.regs.vrs; + /* Always enable the vector extension for KVM */ + __ctl_set_vx(); + } else + load_fpu_from(&vcpu->arch.guest_fpregs); + + if (test_fp_ctl(current->thread.fpu.fpc)) + /* User space provided an invalid FPC, let's clear it */ + current->thread.fpu.fpc = 0; + + save_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); @@ -1222,19 +1340,22 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); - if (test_kvm_facility(vcpu->kvm, 129)) { - save_fp_ctl(&vcpu->run->s.regs.fpc); - save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); - } else { - save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - save_fp_regs(vcpu->arch.guest_fpregs.fprs); - } - save_access_regs(vcpu->run->s.regs.acrs); - restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); + + save_fpu_regs(); + if (test_kvm_facility(vcpu->kvm, 129)) - restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); + /* + * kvm_arch_vcpu_load() set up the register save area to + * the &vcpu->run->s.regs.vrs and, thus, the vector registers + * are already saved. Only the floating-point control must be + * copied. + */ + vcpu->run->s.regs.fpc = current->thread.fpu.fpc; else - restore_fp_regs(vcpu->arch.host_fpregs.fprs); + save_fpu_to(&vcpu->arch.guest_fpregs); + load_fpu_from(&vcpu->arch.host_fpregs); + + save_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->arch.host_acrs); } @@ -1264,7 +1385,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { mutex_lock(&vcpu->kvm->lock); + preempt_disable(); vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; + preempt_enable(); mutex_unlock(&vcpu->kvm->lock); if (!kvm_is_ucontrol(vcpu->kvm)) vcpu->arch.gmap = vcpu->kvm->arch.gmap; @@ -1342,7 +1465,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) } vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; - if (kvm_s390_cmma_enabled(vcpu->kvm)) { + if (vcpu->kvm->arch.use_cmma) { rc = kvm_s390_vcpu_setup_cmma(vcpu); if (rc) return rc; @@ -1377,7 +1500,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block = &sie_page->sie_block; vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; - vcpu->arch.host_vregs = &sie_page->vregs; vcpu->arch.sie_block->icpua = id; if (!kvm_is_ucontrol(kvm)) { @@ -1399,6 +1521,19 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; + /* + * Allocate a save area for floating-point registers. If the vector + * extension is available, register contents are saved in the SIE + * control block. The allocated save area is still required in + * particular places, for example, in kvm_s390_vcpu_store_status(). + */ + vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, + GFP_KERNEL); + if (!vcpu->arch.guest_fpregs.fprs) { + rc = -ENOMEM; + goto out_free_sie_block; + } + rc = kvm_vcpu_init(vcpu, kvm, id); if (rc) goto out_free_sie_block; @@ -1621,16 +1756,16 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { if (test_fp_ctl(fpu->fpc)) return -EINVAL; - memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); + memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); vcpu->arch.guest_fpregs.fpc = fpu->fpc; - restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - restore_fp_regs(vcpu->arch.guest_fpregs.fprs); + save_fpu_regs(); + load_fpu_from(&vcpu->arch.guest_fpregs); return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); + memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); fpu->fpc = vcpu->arch.guest_fpregs.fpc; return 0; } @@ -1723,18 +1858,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return rc; } -bool kvm_s390_cmma_enabled(struct kvm *kvm) -{ - if (!MACHINE_IS_LPAR) - return false; - /* only enable for z10 and later */ - if (!MACHINE_HAS_EDAT1) - return false; - if (!kvm->arch.use_cmma) - return false; - return true; -} - static bool ibs_enabled(struct kvm_vcpu *vcpu) { return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; @@ -1742,10 +1865,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) { - if (!vcpu->requests) - return 0; retry: kvm_s390_vcpu_request_handled(vcpu); + if (!vcpu->requests) + return 0; /* * We use MMU_RELOAD just to re-arm the ipte notifier for the * guest prefix page. gmap_ipte_notify will wait on the ptl lock. @@ -2193,8 +2316,21 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) * copying in vcpu load/put. Lets update our copies before we save * it into the save area */ - save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - save_fp_regs(vcpu->arch.guest_fpregs.fprs); + save_fpu_regs(); + if (test_kvm_facility(vcpu->kvm, 129)) { + /* + * If the vector extension is available, the vector registers + * which overlaps with floating-point registers are saved in + * the SIE-control block. Hence, extract the floating-point + * registers and the FPC value and store them in the + * guest_fpregs structure. + */ + WARN_ON(!is_vx_task(current)); /* XXX remove later */ + vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; + convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, + current->thread.fpu.vxrs); + } else + save_fpu_to(&vcpu->arch.guest_fpregs); save_access_regs(vcpu->run->s.regs.acrs); return kvm_s390_store_status_unloaded(vcpu, addr); @@ -2221,10 +2357,13 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) /* * The guest VXRS are in the host VXRs due to the lazy - * copying in vcpu load/put. Let's update our copies before we save - * it into the save area. + * copying in vcpu load/put. We can simply call save_fpu_regs() + * to save the current register state because we are in the + * middle of a load/put cycle. + * + * Let's update our copies before we save it into the save area. */ - save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); + save_fpu_regs(); return kvm_s390_store_adtl_status_unloaded(vcpu, addr); } @@ -2340,6 +2479,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, case KVM_CAP_S390_CSS_SUPPORT: if (!vcpu->kvm->arch.css_support) { vcpu->kvm->arch.css_support = 1; + VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); trace_kvm_s390_enable_css(vcpu->kvm); } r = 0; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c5704786e473..c446aabf60d3 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -27,6 +27,13 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); #define TDB_FORMAT1 1 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) +extern debug_info_t *kvm_s390_dbf; +#define KVM_EVENT(d_loglevel, d_string, d_args...)\ +do { \ + debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \ + d_args); \ +} while (0) + #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ do { \ debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ @@ -65,6 +72,8 @@ static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu) static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) { + VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id, + prefix); vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); @@ -217,8 +226,6 @@ void exit_sie(struct kvm_vcpu *vcpu); void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); -/* is cmma enabled */ -bool kvm_s390_cmma_enabled(struct kvm *kvm); unsigned long kvm_s390_fac_list_mask_size(void); extern unsigned long kvm_s390_fac_list_mask[]; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index ad4242245771..4d21dc4d1a84 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -53,11 +53,14 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) kvm_s390_set_psw_cc(vcpu, 3); return 0; } + VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); val = (val - hostclk) & ~0x3fUL; mutex_lock(&vcpu->kvm->lock); + preempt_disable(); kvm_for_each_vcpu(i, cpup, vcpu->kvm) cpup->arch.sie_block->epoch = val; + preempt_enable(); mutex_unlock(&vcpu->kvm->lock); kvm_s390_set_psw_cc(vcpu, 0); @@ -98,8 +101,6 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_set_prefix(vcpu, address); - - VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 1, address); return 0; } @@ -129,7 +130,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); + VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); trace_kvm_s390_handle_prefix(vcpu, 0, address); return 0; } @@ -155,7 +156,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); + VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); trace_kvm_s390_handle_stap(vcpu, ga); return 0; } @@ -167,6 +168,7 @@ static int __skey_check_enable(struct kvm_vcpu *vcpu) return rc; rc = s390_enable_skey(); + VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest"); trace_kvm_s390_skey_related_inst(vcpu); vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); return rc; @@ -370,7 +372,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) &fac, sizeof(fac)); if (rc) return rc; - VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); + VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); trace_kvm_s390_handle_stfl(vcpu, fac); return 0; } @@ -468,7 +470,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu) if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); + VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); return 0; } @@ -521,7 +523,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ar_t ar; vcpu->stat.instruction_stsi++; - VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); + VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -758,10 +760,10 @@ static int handle_essa(struct kvm_vcpu *vcpu) struct gmap *gmap; int i; - VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); + VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); gmap = vcpu->arch.gmap; vcpu->stat.instruction_essa++; - if (!kvm_s390_cmma_enabled(vcpu->kvm)) + if (!vcpu->kvm->arch.use_cmma) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) @@ -829,7 +831,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); + VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); nr_regs = ((reg3 - reg1) & 0xf) + 1; @@ -868,7 +870,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); + VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); reg = reg1; @@ -902,7 +904,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); + VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); nr_regs = ((reg3 - reg1) & 0xf) + 1; @@ -940,7 +942,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu) if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); + VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); reg = reg1; diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 72e58bd2bee7..da690b69f9fe 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -205,9 +205,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; return SIGP_CC_STATUS_STORED; - } else if (rc == 0) { - VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", - dst_vcpu->vcpu_id, irq.u.prefix.address); } return rc; @@ -371,7 +368,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, return rc; } -static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) +static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code, + u16 cpu_addr) { if (!vcpu->kvm->arch.user_sigp) return 0; @@ -414,9 +412,8 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) default: vcpu->stat.instruction_sigp_unknown++; } - - VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space", - order_code); + VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace", + order_code, cpu_addr); return 1; } @@ -435,7 +432,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); - if (handle_sigp_order_in_user_space(vcpu, order_code)) + if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) return -EOPNOTSUPP; if (r1 % 2) diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 3208d33a48cb..cc1d6c68356f 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -105,11 +105,22 @@ TRACE_EVENT(kvm_s390_vcpu_start_stop, {KVM_S390_PROGRAM_INT, "program interrupt"}, \ {KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \ {KVM_S390_RESTART, "sigp restart"}, \ + {KVM_S390_INT_PFAULT_INIT, "pfault init"}, \ + {KVM_S390_INT_PFAULT_DONE, "pfault done"}, \ + {KVM_S390_MCHK, "machine check"}, \ + {KVM_S390_INT_CLOCK_COMP, "clock comparator"}, \ + {KVM_S390_INT_CPU_TIMER, "cpu timer"}, \ {KVM_S390_INT_VIRTIO, "virtio interrupt"}, \ {KVM_S390_INT_SERVICE, "sclp interrupt"}, \ {KVM_S390_INT_EMERGENCY, "sigp emergency"}, \ {KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"} +#define get_irq_name(__type) \ + (__type > KVM_S390_INT_IO_MAX ? \ + __print_symbolic(__type, kvm_s390_int_type) : \ + (__type & KVM_S390_INT_IO_AI_MASK ? \ + "adapter I/O interrupt" : "subchannel I/O interrupt")) + TRACE_EVENT(kvm_s390_inject_vm, TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who), TP_ARGS(type, parm, parm64, who), @@ -131,22 +142,19 @@ TRACE_EVENT(kvm_s390_inject_vm, TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx", (__entry->who == 1) ? " (from kernel)" : (__entry->who == 2) ? " (from user)" : "", - __entry->inttype, - __print_symbolic(__entry->inttype, kvm_s390_int_type), + __entry->inttype, get_irq_name(__entry->inttype), __entry->parm, __entry->parm64) ); TRACE_EVENT(kvm_s390_inject_vcpu, - TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64, \ - int who), - TP_ARGS(id, type, parm, parm64, who), + TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64), + TP_ARGS(id, type, parm, parm64), TP_STRUCT__entry( __field(int, id) __field(__u32, inttype) __field(__u32, parm) __field(__u64, parm64) - __field(int, who) ), TP_fast_assign( @@ -154,15 +162,12 @@ TRACE_EVENT(kvm_s390_inject_vcpu, __entry->inttype = type & 0x00000000ffffffff; __entry->parm = parm; __entry->parm64 = parm64; - __entry->who = who; ), - TP_printk("inject%s (vcpu %d): type:%x (%s) parm:%x parm64:%llx", - (__entry->who == 1) ? " (from kernel)" : - (__entry->who == 2) ? " (from user)" : "", + TP_printk("inject (vcpu %d): type:%x (%s) parm:%x parm64:%llx", __entry->id, __entry->inttype, - __print_symbolic(__entry->inttype, kvm_s390_int_type), - __entry->parm, __entry->parm64) + get_irq_name(__entry->inttype), __entry->parm, + __entry->parm64) ); /* @@ -189,8 +194,8 @@ TRACE_EVENT(kvm_s390_deliver_interrupt, TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \ "data:%08llx %016llx", __entry->id, __entry->inttype, - __print_symbolic(__entry->inttype, kvm_s390_int_type), - __entry->data0, __entry->data1) + get_irq_name(__entry->inttype), __entry->data0, + __entry->data1) ); /* diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 16dc42d83f93..246a7eb4b680 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -26,6 +26,7 @@ void __delay(unsigned long loops) */ asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); } +EXPORT_SYMBOL(__delay); static void __udelay_disabled(unsigned long long usecs) { diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 4614d415bb58..0d002a746bec 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -370,22 +370,9 @@ long __strncpy_from_user(char *dst, const char __user *src, long size) } EXPORT_SYMBOL(__strncpy_from_user); -/* - * The "old" uaccess variant without mvcos can be enforced with the - * uaccess_primary kernel parameter. This is mainly for debugging purposes. - */ -static int uaccess_primary __initdata; - -static int __init parse_uaccess_pt(char *__unused) -{ - uaccess_primary = 1; - return 0; -} -early_param("uaccess_primary", parse_uaccess_pt); - static int __init uaccess_init(void) { - if (!uaccess_primary && test_facility(27)) + if (test_facility(27)) static_key_slow_inc(&have_mvcos); return 0; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 4c8f5d7f9c23..f985856a538b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -646,7 +646,7 @@ static void pfault_interrupt(struct ext_code ext_code, return; inc_irq_stat(IRQEXT_PFL); /* Get the token (= pid of the affected task). */ - pid = sizeof(void *) == 4 ? param32 : param64; + pid = param64; rcu_read_lock(); tsk = find_task_by_pid_ns(pid, &init_pid_ns); if (tsk) diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 1eb41bb3010c..12bbf0e8478f 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -30,6 +30,9 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, do { pte = *ptep; barrier(); + /* Similar to the PMD case, NUMA hinting must take slow path */ + if (pte_protnone(pte)) + return 0; if ((pte_val(pte) & mask) != 0) return 0; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); @@ -125,6 +128,13 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_large(pmd))) { + /* + * NUMA hinting faults need to be handled in the GUP + * slowpath for accounting purposes and so that they + * can be serialised against THP migration. + */ + if (pmd_protnone(pmd)) + return 0; if (!gup_huge_pmd(pmdp, pmd, addr, next, write, pages, nr)) return 0; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 76e873748b56..2963b563621c 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -27,6 +27,7 @@ #include <linux/initrd.h> #include <linux/export.h> #include <linux/gfp.h> +#include <linux/memblock.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -138,7 +139,7 @@ void __init mem_init(void) cpumask_set_cpu(0, mm_cpumask(&init_mm)); atomic_set(&init_mm.context.attach_count, 1); - max_mapnr = max_low_pfn; + set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); /* Setup guest page hinting */ @@ -170,37 +171,36 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { - unsigned long zone_start_pfn, zone_end_pfn, nr_pages; + unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); + unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); unsigned long start_pfn = PFN_DOWN(start); unsigned long size_pages = PFN_DOWN(size); - struct zone *zone; - int rc; + unsigned long nr_pages; + int rc, zone_enum; rc = vmem_add_mapping(start, size); if (rc) return rc; - for_each_zone(zone) { - if (zone_idx(zone) != ZONE_MOVABLE) { - /* Add range within existing zone limits */ - zone_start_pfn = zone->zone_start_pfn; - zone_end_pfn = zone->zone_start_pfn + - zone->spanned_pages; + + while (size_pages > 0) { + if (start_pfn < dma_end_pfn) { + nr_pages = (start_pfn + size_pages > dma_end_pfn) ? + dma_end_pfn - start_pfn : size_pages; + zone_enum = ZONE_DMA; + } else if (start_pfn < normal_end_pfn) { + nr_pages = (start_pfn + size_pages > normal_end_pfn) ? + normal_end_pfn - start_pfn : size_pages; + zone_enum = ZONE_NORMAL; } else { - /* Add remaining range to ZONE_MOVABLE */ - zone_start_pfn = start_pfn; - zone_end_pfn = start_pfn + size_pages; + nr_pages = size_pages; + zone_enum = ZONE_MOVABLE; } - if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) - continue; - nr_pages = (start_pfn + size_pages > zone_end_pfn) ? - zone_end_pfn - start_pfn : size_pages; - rc = __add_pages(nid, zone, start_pfn, nr_pages); + rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, + start_pfn, size_pages); if (rc) break; start_pfn += nr_pages; size_pages -= nr_pages; - if (!size_pages) - break; } if (rc) vmem_remove_mapping(start, size); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 33082d0d101b..54ef3bc01b43 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -10,11 +10,7 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> -#include <linux/highmem.h> -#include <linux/pagemap.h> #include <linux/spinlock.h> -#include <linux/module.h> -#include <linux/quicklist.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/swapops.h> @@ -28,14 +24,9 @@ #include <asm/tlbflush.h> #include <asm/mmu_context.h> -#define ALLOC_ORDER 2 -#define FRAG_MASK 0x03 - -int HPAGE_SHIFT; - unsigned long *crst_table_alloc(struct mm_struct *mm) { - struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + struct page *page = alloc_pages(GFP_KERNEL, 2); if (!page) return NULL; @@ -44,7 +35,7 @@ unsigned long *crst_table_alloc(struct mm_struct *mm) void crst_table_free(struct mm_struct *mm, unsigned long *table) { - free_pages((unsigned long) table, ALLOC_ORDER); + free_pages((unsigned long) table, 2); } static void __crst_table_upgrade(void *arg) @@ -178,7 +169,7 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); spin_lock_init(&gmap->guest_table_lock); gmap->mm = mm; - page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + page = alloc_pages(GFP_KERNEL, 2); if (!page) goto out_free; page->index = 0; @@ -249,7 +240,7 @@ void gmap_free(struct gmap *gmap) /* Free all segment & region tables. */ list_for_each_entry_safe(page, next, &gmap->crst_list, lru) - __free_pages(page, ALLOC_ORDER); + __free_pages(page, 2); gmap_radix_tree_free(&gmap->guest_to_host); gmap_radix_tree_free(&gmap->host_to_guest); down_write(&gmap->mm->mmap_sem); @@ -289,7 +280,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, unsigned long *new; /* since we dont free the gmap table until gmap_free we can unlock */ - page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + page = alloc_pages(GFP_KERNEL, 2); if (!page) return -ENOMEM; new = (unsigned long *) page_to_phys(page); @@ -304,7 +295,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, } spin_unlock(&gmap->mm->page_table_lock); if (page) - __free_pages(page, ALLOC_ORDER); + __free_pages(page, 2); return 0; } @@ -797,40 +788,6 @@ void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) } EXPORT_SYMBOL_GPL(gmap_do_ipte_notify); -static inline int page_table_with_pgste(struct page *page) -{ - return atomic_read(&page->_mapcount) == 0; -} - -static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) -{ - struct page *page; - unsigned long *table; - - page = alloc_page(GFP_KERNEL|__GFP_REPEAT); - if (!page) - return NULL; - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; - } - atomic_set(&page->_mapcount, 0); - table = (unsigned long *) page_to_phys(page); - clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); - clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); - return table; -} - -static inline void page_table_free_pgste(unsigned long *table) -{ - struct page *page; - - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); - __free_page(page); -} - int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq) { @@ -959,20 +916,6 @@ __initcall(page_table_register_sysctl); #else /* CONFIG_PGSTE */ -static inline int page_table_with_pgste(struct page *page) -{ - return 0; -} - -static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) -{ - return NULL; -} - -static inline void page_table_free_pgste(unsigned long *table) -{ -} - static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table, unsigned long vmaddr) { @@ -996,44 +939,55 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) */ unsigned long *page_table_alloc(struct mm_struct *mm) { - unsigned long *uninitialized_var(table); - struct page *uninitialized_var(page); + unsigned long *table; + struct page *page; unsigned int mask, bit; - if (mm_alloc_pgste(mm)) - return page_table_alloc_pgste(mm); - /* Allocate fragments of a 4K page as 1K/2K page table */ - spin_lock_bh(&mm->context.list_lock); - mask = FRAG_MASK; - if (!list_empty(&mm->context.pgtable_list)) { - page = list_first_entry(&mm->context.pgtable_list, - struct page, lru); - table = (unsigned long *) page_to_phys(page); - mask = atomic_read(&page->_mapcount); - mask = mask | (mask >> 4); - } - if ((mask & FRAG_MASK) == FRAG_MASK) { - spin_unlock_bh(&mm->context.list_lock); - page = alloc_page(GFP_KERNEL|__GFP_REPEAT); - if (!page) - return NULL; - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; + /* Try to get a fragment of a 4K page as a 2K page table */ + if (!mm_alloc_pgste(mm)) { + table = NULL; + spin_lock_bh(&mm->context.list_lock); + if (!list_empty(&mm->context.pgtable_list)) { + page = list_first_entry(&mm->context.pgtable_list, + struct page, lru); + mask = atomic_read(&page->_mapcount); + mask = (mask | (mask >> 4)) & 3; + if (mask != 3) { + table = (unsigned long *) page_to_phys(page); + bit = mask & 1; /* =1 -> second 2K */ + if (bit) + table += PTRS_PER_PTE; + atomic_xor_bits(&page->_mapcount, 1U << bit); + list_del(&page->lru); + } } + spin_unlock_bh(&mm->context.list_lock); + if (table) + return table; + } + /* Allocate a fresh page */ + page = alloc_page(GFP_KERNEL|__GFP_REPEAT); + if (!page) + return NULL; + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } + /* Initialize page table */ + table = (unsigned long *) page_to_phys(page); + if (mm_alloc_pgste(mm)) { + /* Return 4K page table with PGSTEs */ + atomic_set(&page->_mapcount, 3); + clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); + clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); + } else { + /* Return the first 2K fragment of the page */ atomic_set(&page->_mapcount, 1); - table = (unsigned long *) page_to_phys(page); clear_table(table, _PAGE_INVALID, PAGE_SIZE); spin_lock_bh(&mm->context.list_lock); list_add(&page->lru, &mm->context.pgtable_list); - } else { - for (bit = 1; mask & bit; bit <<= 1) - table += PTRS_PER_PTE; - mask = atomic_xor_bits(&page->_mapcount, bit); - if ((mask & FRAG_MASK) == FRAG_MASK) - list_del(&page->lru); + spin_unlock_bh(&mm->context.list_lock); } - spin_unlock_bh(&mm->context.list_lock); return table; } @@ -1043,37 +997,23 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) unsigned int bit, mask; page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - if (page_table_with_pgste(page)) - return page_table_free_pgste(table); - /* Free 1K/2K page table fragment of a 4K page */ - bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); - spin_lock_bh(&mm->context.list_lock); - if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) - list_del(&page->lru); - mask = atomic_xor_bits(&page->_mapcount, bit); - if (mask & FRAG_MASK) - list_add(&page->lru, &mm->context.pgtable_list); - spin_unlock_bh(&mm->context.list_lock); - if (mask == 0) { - pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); - __free_page(page); + if (!mm_alloc_pgste(mm)) { + /* Free 2K page table fragment of a 4K page */ + bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); + spin_lock_bh(&mm->context.list_lock); + mask = atomic_xor_bits(&page->_mapcount, 1U << bit); + if (mask & 3) + list_add(&page->lru, &mm->context.pgtable_list); + else + list_del(&page->lru); + spin_unlock_bh(&mm->context.list_lock); + if (mask != 0) + return; } -} - -static void __page_table_free_rcu(void *table, unsigned bit) -{ - struct page *page; - if (bit == FRAG_MASK) - return page_table_free_pgste(table); - /* Free 1K/2K page table fragment of a 4K page */ - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - if (atomic_xor_bits(&page->_mapcount, bit) == 0) { - pgtable_page_dtor(page); - atomic_set(&page->_mapcount, -1); - __free_page(page); - } + pgtable_page_dtor(page); + atomic_set(&page->_mapcount, -1); + __free_page(page); } void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, @@ -1085,34 +1025,45 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, mm = tlb->mm; page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - if (page_table_with_pgste(page)) { + if (mm_alloc_pgste(mm)) { gmap_unlink(mm, table, vmaddr); - table = (unsigned long *) (__pa(table) | FRAG_MASK); + table = (unsigned long *) (__pa(table) | 3); tlb_remove_table(tlb, table); return; } - bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); + bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.list_lock); - if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) - list_del(&page->lru); - mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); - if (mask & FRAG_MASK) + mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); + if (mask & 3) list_add_tail(&page->lru, &mm->context.pgtable_list); + else + list_del(&page->lru); spin_unlock_bh(&mm->context.list_lock); - table = (unsigned long *) (__pa(table) | (bit << 4)); + table = (unsigned long *) (__pa(table) | (1U << bit)); tlb_remove_table(tlb, table); } static void __tlb_remove_table(void *_table) { - const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; - void *table = (void *)((unsigned long) _table & ~mask); - unsigned type = (unsigned long) _table & mask; - - if (type) - __page_table_free_rcu(table, type); - else - free_pages((unsigned long) table, ALLOC_ORDER); + unsigned int mask = (unsigned long) _table & 3; + void *table = (void *)((unsigned long) _table ^ mask); + struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + + switch (mask) { + case 0: /* pmd or pud */ + free_pages((unsigned long) table, 2); + break; + case 1: /* lower 2K of a 4K page table */ + case 2: /* higher 2K of a 4K page table */ + if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0) + break; + /* fallthrough */ + case 3: /* 4K page table with pgstes */ + pgtable_page_dtor(page); + atomic_set(&page->_mapcount, -1); + __free_page(page); + break; + } } static void tlb_remove_table_smp_sync(void *arg) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index fee782acc2ee..8d2e5165865f 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit) EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, BPF_REG_1, offsetof(struct sk_buff, data)); } - /* BPF compatibility: clear A (%b7) and X (%b8) registers */ - if (REG_SEEN(BPF_REG_7)) - /* lghi %b7,0 */ - EMIT4_IMM(0xa7090000, BPF_REG_7, 0); - if (REG_SEEN(BPF_REG_8)) - /* lghi %b8,0 */ - EMIT4_IMM(0xa7090000, BPF_REG_8, 0); + /* BPF compatibility: clear A (%b0) and X (%b7) registers */ + if (REG_SEEN(BPF_REG_A)) + /* lghi %ba,0 */ + EMIT4_IMM(0xa7090000, BPF_REG_A, 0); + if (REG_SEEN(BPF_REG_X)) + /* lghi %bx,0 */ + EMIT4_IMM(0xa7090000, BPF_REG_X, 0); } /* diff --git a/arch/s390/numa/Makefile b/arch/s390/numa/Makefile new file mode 100644 index 000000000000..f94ecaffa71b --- /dev/null +++ b/arch/s390/numa/Makefile @@ -0,0 +1,3 @@ +obj-y += numa.o +obj-y += toptree.o +obj-$(CONFIG_NUMA_EMU) += mode_emu.o diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c new file mode 100644 index 000000000000..7de4e2f780d7 --- /dev/null +++ b/arch/s390/numa/mode_emu.c @@ -0,0 +1,530 @@ +/* + * NUMA support for s390 + * + * NUMA emulation (aka fake NUMA) distributes the available memory to nodes + * without using real topology information about the physical memory of the + * machine. + * + * It distributes the available CPUs to nodes while respecting the original + * machine topology information. This is done by trying to avoid to separate + * CPUs which reside on the same book or even on the same MC. + * + * Because the current Linux scheduler code requires a stable cpu to node + * mapping, cores are pinned to nodes when the first CPU thread is set online. + * + * Copyright IBM Corp. 2015 + */ + +#define KMSG_COMPONENT "numa_emu" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/cpumask.h> +#include <linux/memblock.h> +#include <linux/node.h> +#include <linux/memory.h> +#include <linux/slab.h> +#include <asm/smp.h> +#include <asm/topology.h> +#include "numa_mode.h" +#include "toptree.h" + +/* Distances between the different system components */ +#define DIST_EMPTY 0 +#define DIST_CORE 1 +#define DIST_MC 2 +#define DIST_BOOK 3 +#define DIST_MAX 4 + +/* Node distance reported to common code */ +#define EMU_NODE_DIST 10 + +/* Node ID for free (not yet pinned) cores */ +#define NODE_ID_FREE -1 + +/* Different levels of toptree */ +enum toptree_level {CORE, MC, BOOK, NODE, TOPOLOGY}; + +/* The two toptree IDs */ +enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA}; + +/* Number of NUMA nodes */ +static int emu_nodes = 1; +/* NUMA stripe size */ +static unsigned long emu_size; + +/* + * Node to core pinning information updates are protected by + * "sched_domains_mutex". + */ +static struct { + s32 to_node_id[CONFIG_NR_CPUS]; /* Pinned core to node mapping */ + int total; /* Total number of pinned cores */ + int per_node_target; /* Cores per node without extra cores */ + int per_node[MAX_NUMNODES]; /* Number of cores pinned to node */ +} *emu_cores; + +/* + * Pin a core to a node + */ +static void pin_core_to_node(int core_id, int node_id) +{ + if (emu_cores->to_node_id[core_id] == NODE_ID_FREE) { + emu_cores->per_node[node_id]++; + emu_cores->to_node_id[core_id] = node_id; + emu_cores->total++; + } else { + WARN_ON(emu_cores->to_node_id[core_id] != node_id); + } +} + +/* + * Number of pinned cores of a node + */ +static int cores_pinned(struct toptree *node) +{ + return emu_cores->per_node[node->id]; +} + +/* + * ID of the node where the core is pinned (or NODE_ID_FREE) + */ +static int core_pinned_to_node_id(struct toptree *core) +{ + return emu_cores->to_node_id[core->id]; +} + +/* + * Number of cores in the tree that are not yet pinned + */ +static int cores_free(struct toptree *tree) +{ + struct toptree *core; + int count = 0; + + toptree_for_each(core, tree, CORE) { + if (core_pinned_to_node_id(core) == NODE_ID_FREE) + count++; + } + return count; +} + +/* + * Return node of core + */ +static struct toptree *core_node(struct toptree *core) +{ + return core->parent->parent->parent; +} + +/* + * Return book of core + */ +static struct toptree *core_book(struct toptree *core) +{ + return core->parent->parent; +} + +/* + * Return mc of core + */ +static struct toptree *core_mc(struct toptree *core) +{ + return core->parent; +} + +/* + * Distance between two cores + */ +static int dist_core_to_core(struct toptree *core1, struct toptree *core2) +{ + if (core_book(core1)->id != core_book(core2)->id) + return DIST_BOOK; + if (core_mc(core1)->id != core_mc(core2)->id) + return DIST_MC; + /* Same core or sibling on same MC */ + return DIST_CORE; +} + +/* + * Distance of a node to a core + */ +static int dist_node_to_core(struct toptree *node, struct toptree *core) +{ + struct toptree *core_node; + int dist_min = DIST_MAX; + + toptree_for_each(core_node, node, CORE) + dist_min = min(dist_min, dist_core_to_core(core_node, core)); + return dist_min == DIST_MAX ? DIST_EMPTY : dist_min; +} + +/* + * Unify will delete empty nodes, therefore recreate nodes. + */ +static void toptree_unify_tree(struct toptree *tree) +{ + int nid; + + toptree_unify(tree); + for (nid = 0; nid < emu_nodes; nid++) + toptree_get_child(tree, nid); +} + +/* + * Find the best/nearest node for a given core and ensure that no node + * gets more than "emu_cores->per_node_target + extra" cores. + */ +static struct toptree *node_for_core(struct toptree *numa, struct toptree *core, + int extra) +{ + struct toptree *node, *node_best = NULL; + int dist_cur, dist_best, cores_target; + + cores_target = emu_cores->per_node_target + extra; + dist_best = DIST_MAX; + node_best = NULL; + toptree_for_each(node, numa, NODE) { + /* Already pinned cores must use their nodes */ + if (core_pinned_to_node_id(core) == node->id) { + node_best = node; + break; + } + /* Skip nodes that already have enough cores */ + if (cores_pinned(node) >= cores_target) + continue; + dist_cur = dist_node_to_core(node, core); + if (dist_cur < dist_best) { + dist_best = dist_cur; + node_best = node; + } + } + return node_best; +} + +/* + * Find the best node for each core with respect to "extra" core count + */ +static void toptree_to_numa_single(struct toptree *numa, struct toptree *phys, + int extra) +{ + struct toptree *node, *core, *tmp; + + toptree_for_each_safe(core, tmp, phys, CORE) { + node = node_for_core(numa, core, extra); + if (!node) + return; + toptree_move(core, node); + pin_core_to_node(core->id, node->id); + } +} + +/* + * Move structures of given level to specified NUMA node + */ +static void move_level_to_numa_node(struct toptree *node, struct toptree *phys, + enum toptree_level level, bool perfect) +{ + int cores_free, cores_target = emu_cores->per_node_target; + struct toptree *cur, *tmp; + + toptree_for_each_safe(cur, tmp, phys, level) { + cores_free = cores_target - toptree_count(node, CORE); + if (perfect) { + if (cores_free == toptree_count(cur, CORE)) + toptree_move(cur, node); + } else { + if (cores_free >= toptree_count(cur, CORE)) + toptree_move(cur, node); + } + } +} + +/* + * Move structures of a given level to NUMA nodes. If "perfect" is specified + * move only perfectly fitting structures. Otherwise move also smaller + * than needed structures. + */ +static void move_level_to_numa(struct toptree *numa, struct toptree *phys, + enum toptree_level level, bool perfect) +{ + struct toptree *node; + + toptree_for_each(node, numa, NODE) + move_level_to_numa_node(node, phys, level, perfect); +} + +/* + * For the first run try to move the big structures + */ +static void toptree_to_numa_first(struct toptree *numa, struct toptree *phys) +{ + struct toptree *core; + + /* Always try to move perfectly fitting structures first */ + move_level_to_numa(numa, phys, BOOK, true); + move_level_to_numa(numa, phys, BOOK, false); + move_level_to_numa(numa, phys, MC, true); + move_level_to_numa(numa, phys, MC, false); + /* Now pin all the moved cores */ + toptree_for_each(core, numa, CORE) + pin_core_to_node(core->id, core_node(core)->id); +} + +/* + * Allocate new topology and create required nodes + */ +static struct toptree *toptree_new(int id, int nodes) +{ + struct toptree *tree; + int nid; + + tree = toptree_alloc(TOPOLOGY, id); + if (!tree) + goto fail; + for (nid = 0; nid < nodes; nid++) { + if (!toptree_get_child(tree, nid)) + goto fail; + } + return tree; +fail: + panic("NUMA emulation could not allocate topology"); +} + +/* + * Allocate and initialize core to node mapping + */ +static void create_core_to_node_map(void) +{ + int i; + + emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL); + if (emu_cores == NULL) + panic("Could not allocate cores to node memory"); + for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) + emu_cores->to_node_id[i] = NODE_ID_FREE; +} + +/* + * Move cores from physical topology into NUMA target topology + * and try to keep as much of the physical topology as possible. + */ +static struct toptree *toptree_to_numa(struct toptree *phys) +{ + static int first = 1; + struct toptree *numa; + int cores_total; + + cores_total = emu_cores->total + cores_free(phys); + emu_cores->per_node_target = cores_total / emu_nodes; + numa = toptree_new(TOPTREE_ID_NUMA, emu_nodes); + if (first) { + toptree_to_numa_first(numa, phys); + first = 0; + } + toptree_to_numa_single(numa, phys, 0); + toptree_to_numa_single(numa, phys, 1); + toptree_unify_tree(numa); + + WARN_ON(cpumask_weight(&phys->mask)); + return numa; +} + +/* + * Create a toptree out of the physical topology that we got from the hypervisor + */ +static struct toptree *toptree_from_topology(void) +{ + struct toptree *phys, *node, *book, *mc, *core; + struct cpu_topology_s390 *top; + int cpu; + + phys = toptree_new(TOPTREE_ID_PHYS, 1); + + for_each_online_cpu(cpu) { + top = &per_cpu(cpu_topology, cpu); + node = toptree_get_child(phys, 0); + book = toptree_get_child(node, top->book_id); + mc = toptree_get_child(book, top->socket_id); + core = toptree_get_child(mc, top->core_id); + if (!book || !mc || !core) + panic("NUMA emulation could not allocate memory"); + cpumask_set_cpu(cpu, &core->mask); + toptree_update_mask(mc); + } + return phys; +} + +/* + * Add toptree core to topology and create correct CPU masks + */ +static void topology_add_core(struct toptree *core) +{ + struct cpu_topology_s390 *top; + int cpu; + + for_each_cpu(cpu, &core->mask) { + top = &per_cpu(cpu_topology, cpu); + cpumask_copy(&top->thread_mask, &core->mask); + cpumask_copy(&top->core_mask, &core_mc(core)->mask); + cpumask_copy(&top->book_mask, &core_book(core)->mask); + cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); + top->node_id = core_node(core)->id; + } +} + +/* + * Apply toptree to topology and create CPU masks + */ +static void toptree_to_topology(struct toptree *numa) +{ + struct toptree *core; + int i; + + /* Clear all node masks */ + for (i = 0; i < MAX_NUMNODES; i++) + cpumask_clear(node_to_cpumask_map[i]); + + /* Rebuild all masks */ + toptree_for_each(core, numa, CORE) + topology_add_core(core); +} + +/* + * Show the node to core mapping + */ +static void print_node_to_core_map(void) +{ + int nid, cid; + + if (!numa_debug_enabled) + return; + printk(KERN_DEBUG "NUMA node to core mapping\n"); + for (nid = 0; nid < emu_nodes; nid++) { + printk(KERN_DEBUG " node %3d: ", nid); + for (cid = 0; cid < ARRAY_SIZE(emu_cores->to_node_id); cid++) { + if (emu_cores->to_node_id[cid] == nid) + printk(KERN_CONT "%d ", cid); + } + printk(KERN_CONT "\n"); + } +} + +/* + * Transfer physical topology into a NUMA topology and modify CPU masks + * according to the NUMA topology. + * + * Must be called with "sched_domains_mutex" lock held. + */ +static void emu_update_cpu_topology(void) +{ + struct toptree *phys, *numa; + + if (emu_cores == NULL) + create_core_to_node_map(); + phys = toptree_from_topology(); + numa = toptree_to_numa(phys); + toptree_free(phys); + toptree_to_topology(numa); + toptree_free(numa); + print_node_to_core_map(); +} + +/* + * If emu_size is not set, use CONFIG_EMU_SIZE. Then round to minimum + * alignment (needed for memory hotplug). + */ +static unsigned long emu_setup_size_adjust(unsigned long size) +{ + size = size ? : CONFIG_EMU_SIZE; + size = roundup(size, memory_block_size_bytes()); + return size; +} + +/* + * If we have not enough memory for the specified nodes, reduce the node count. + */ +static int emu_setup_nodes_adjust(int nodes) +{ + int nodes_max; + + nodes_max = memblock.memory.total_size / emu_size; + nodes_max = max(nodes_max, 1); + if (nodes_max >= nodes) + return nodes; + pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes); + return nodes_max; +} + +/* + * Early emu setup + */ +static void emu_setup(void) +{ + emu_size = emu_setup_size_adjust(emu_size); + emu_nodes = emu_setup_nodes_adjust(emu_nodes); + pr_info("Creating %d nodes with memory stripe size %ld MB\n", + emu_nodes, emu_size >> 20); +} + +/* + * Return node id for given page number + */ +static int emu_pfn_to_nid(unsigned long pfn) +{ + return (pfn / (emu_size >> PAGE_SHIFT)) % emu_nodes; +} + +/* + * Return stripe size + */ +static unsigned long emu_align(void) +{ + return emu_size; +} + +/* + * Return distance between two nodes + */ +static int emu_distance(int node1, int node2) +{ + return (node1 != node2) * EMU_NODE_DIST; +} + +/* + * Define callbacks for generic s390 NUMA infrastructure + */ +const struct numa_mode numa_mode_emu = { + .name = "emu", + .setup = emu_setup, + .update_cpu_topology = emu_update_cpu_topology, + .__pfn_to_nid = emu_pfn_to_nid, + .align = emu_align, + .distance = emu_distance, +}; + +/* + * Kernel parameter: emu_nodes=<n> + */ +static int __init early_parse_emu_nodes(char *p) +{ + int count; + + if (kstrtoint(p, 0, &count) != 0 || count <= 0) + return 0; + if (count <= 0) + return 0; + emu_nodes = min(count, MAX_NUMNODES); + return 0; +} +early_param("emu_nodes", early_parse_emu_nodes); + +/* + * Kernel parameter: emu_size=[<n>[k|M|G|T]] + */ +static int __init early_parse_emu_size(char *p) +{ + emu_size = memparse(p, NULL); + return 0; +} +early_param("emu_size", early_parse_emu_size); diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c new file mode 100644 index 000000000000..09b1d2355bd9 --- /dev/null +++ b/arch/s390/numa/numa.c @@ -0,0 +1,184 @@ +/* + * NUMA support for s390 + * + * Implement NUMA core code. + * + * Copyright IBM Corp. 2015 + */ + +#define KMSG_COMPONENT "numa" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/mmzone.h> +#include <linux/cpumask.h> +#include <linux/bootmem.h> +#include <linux/memblock.h> +#include <linux/slab.h> +#include <linux/node.h> + +#include <asm/numa.h> +#include "numa_mode.h" + +pg_data_t *node_data[MAX_NUMNODES]; +EXPORT_SYMBOL(node_data); + +cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +EXPORT_SYMBOL(node_to_cpumask_map); + +const struct numa_mode numa_mode_plain = { + .name = "plain", +}; + +static const struct numa_mode *mode = &numa_mode_plain; + +int numa_pfn_to_nid(unsigned long pfn) +{ + return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0; +} + +void numa_update_cpu_topology(void) +{ + if (mode->update_cpu_topology) + mode->update_cpu_topology(); +} + +int __node_distance(int a, int b) +{ + return mode->distance ? mode->distance(a, b) : 0; +} + +int numa_debug_enabled; + +/* + * alloc_node_data() - Allocate node data + */ +static __init pg_data_t *alloc_node_data(void) +{ + pg_data_t *res; + + res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1); + if (!res) + panic("Could not allocate memory for node data!\n"); + memset(res, 0, sizeof(pg_data_t)); + return res; +} + +/* + * numa_setup_memory() - Assign bootmem to nodes + * + * The memory is first added to memblock without any respect to nodes. + * This is fixed before remaining memblock memory is handed over to the + * buddy allocator. + * An important side effect is that large bootmem allocations might easily + * cross node boundaries, which can be needed for large allocations with + * smaller memory stripes in each node (i.e. when using NUMA emulation). + * + * Memory defines nodes: + * Therefore this routine also sets the nodes online with memory. + */ +static void __init numa_setup_memory(void) +{ + unsigned long cur_base, align, end_of_dram; + int nid = 0; + + end_of_dram = memblock_end_of_DRAM(); + align = mode->align ? mode->align() : ULONG_MAX; + + /* + * Step through all available memory and assign it to the nodes + * indicated by the mode implementation. + * All nodes which are seen here will be set online. + */ + cur_base = 0; + do { + nid = numa_pfn_to_nid(PFN_DOWN(cur_base)); + node_set_online(nid); + memblock_set_node(cur_base, align, &memblock.memory, nid); + cur_base += align; + } while (cur_base < end_of_dram); + + /* Allocate and fill out node_data */ + for (nid = 0; nid < MAX_NUMNODES; nid++) + NODE_DATA(nid) = alloc_node_data(); + + for_each_online_node(nid) { + unsigned long start_pfn, end_pfn; + unsigned long t_start, t_end; + int i; + + start_pfn = ULONG_MAX; + end_pfn = 0; + for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) { + if (t_start < start_pfn) + start_pfn = t_start; + if (t_end > end_pfn) + end_pfn = t_end; + } + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; + NODE_DATA(nid)->node_id = nid; + } +} + +/* + * numa_setup() - Earliest initialization + * + * Assign the mode and call the mode's setup routine. + */ +void __init numa_setup(void) +{ + pr_info("NUMA mode: %s\n", mode->name); + if (mode->setup) + mode->setup(); + numa_setup_memory(); + memblock_dump_all(); +} + + +/* + * numa_init_early() - Initialization initcall + * + * This runs when only one CPU is online and before the first + * topology update is called for by the scheduler. + */ +static int __init numa_init_early(void) +{ + /* Attach all possible CPUs to node 0 for now. */ + cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); + return 0; +} +early_initcall(numa_init_early); + +/* + * numa_init_late() - Initialization initcall + * + * Register NUMA nodes. + */ +static int __init numa_init_late(void) +{ + int nid; + + for_each_online_node(nid) + register_one_node(nid); + return 0; +} +device_initcall(numa_init_late); + +static int __init parse_debug(char *parm) +{ + numa_debug_enabled = 1; + return 0; +} +early_param("numa_debug", parse_debug); + +static int __init parse_numa(char *parm) +{ + if (strcmp(parm, numa_mode_plain.name) == 0) + mode = &numa_mode_plain; +#ifdef CONFIG_NUMA_EMU + if (strcmp(parm, numa_mode_emu.name) == 0) + mode = &numa_mode_emu; +#endif + return 0; +} +early_param("numa", parse_numa); diff --git a/arch/s390/numa/numa_mode.h b/arch/s390/numa/numa_mode.h new file mode 100644 index 000000000000..08953b0b1c7f --- /dev/null +++ b/arch/s390/numa/numa_mode.h @@ -0,0 +1,24 @@ +/* + * NUMA support for s390 + * + * Define declarations used for communication between NUMA mode + * implementations and NUMA core functionality. + * + * Copyright IBM Corp. 2015 + */ +#ifndef __S390_NUMA_MODE_H +#define __S390_NUMA_MODE_H + +struct numa_mode { + char *name; /* Name of mode */ + void (*setup)(void); /* Initizalize mode */ + void (*update_cpu_topology)(void); /* Called by topology code */ + int (*__pfn_to_nid)(unsigned long pfn); /* PFN to node ID */ + unsigned long (*align)(void); /* Minimum node alignment */ + int (*distance)(int a, int b); /* Distance between two nodes */ +}; + +extern const struct numa_mode numa_mode_plain; +extern const struct numa_mode numa_mode_emu; + +#endif /* __S390_NUMA_MODE_H */ diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c new file mode 100644 index 000000000000..902d350d859a --- /dev/null +++ b/arch/s390/numa/toptree.c @@ -0,0 +1,342 @@ +/* + * NUMA support for s390 + * + * A tree structure used for machine topology mangling + * + * Copyright IBM Corp. 2015 + */ + +#include <linux/kernel.h> +#include <linux/cpumask.h> +#include <linux/list.h> +#include <linux/list_sort.h> +#include <linux/slab.h> +#include <asm/numa.h> + +#include "toptree.h" + +/** + * toptree_alloc - Allocate and initialize a new tree node. + * @level: The node's vertical level; level 0 contains the leaves. + * @id: ID number, explicitly not unique beyond scope of node's siblings + * + * Allocate a new tree node and initialize it. + * + * RETURNS: + * Pointer to the new tree node or NULL on error + */ +struct toptree *toptree_alloc(int level, int id) +{ + struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL); + + if (!res) + return res; + + INIT_LIST_HEAD(&res->children); + INIT_LIST_HEAD(&res->sibling); + cpumask_clear(&res->mask); + res->level = level; + res->id = id; + return res; +} + +/** + * toptree_remove - Remove a tree node from a tree + * @cand: Pointer to the node to remove + * + * The node is detached from its parent node. The parent node's + * masks will be updated to reflect the loss of the child. + */ +static void toptree_remove(struct toptree *cand) +{ + struct toptree *oldparent; + + list_del_init(&cand->sibling); + oldparent = cand->parent; + cand->parent = NULL; + toptree_update_mask(oldparent); +} + +/** + * toptree_free - discard a tree node + * @cand: Pointer to the tree node to discard + * + * Checks if @cand is attached to a parent node. Detaches it + * cleanly using toptree_remove. Possible children are freed + * recursively. In the end @cand itself is freed. + */ +void toptree_free(struct toptree *cand) +{ + struct toptree *child, *tmp; + + if (cand->parent) + toptree_remove(cand); + toptree_for_each_child_safe(child, tmp, cand) + toptree_free(child); + kfree(cand); +} + +/** + * toptree_update_mask - Update node bitmasks + * @cand: Pointer to a tree node + * + * The node's cpumask will be updated by combining all children's + * masks. Then toptree_update_mask is called recursively for the + * parent if applicable. + * + * NOTE: + * This must not be called on leaves. If called on a leaf, its + * CPU mask is cleared and lost. + */ +void toptree_update_mask(struct toptree *cand) +{ + struct toptree *child; + + cpumask_clear(&cand->mask); + list_for_each_entry(child, &cand->children, sibling) + cpumask_or(&cand->mask, &cand->mask, &child->mask); + if (cand->parent) + toptree_update_mask(cand->parent); +} + +/** + * toptree_insert - Insert a tree node into tree + * @cand: Pointer to the node to insert + * @target: Pointer to the node to which @cand will added as a child + * + * Insert a tree node into a tree. Masks will be updated automatically. + * + * RETURNS: + * 0 on success, -1 if NULL is passed as argument or the node levels + * don't fit. + */ +static int toptree_insert(struct toptree *cand, struct toptree *target) +{ + if (!cand || !target) + return -1; + if (target->level != (cand->level + 1)) + return -1; + list_add_tail(&cand->sibling, &target->children); + cand->parent = target; + toptree_update_mask(target); + return 0; +} + +/** + * toptree_move_children - Move all child nodes of a node to a new place + * @cand: Pointer to the node whose children are to be moved + * @target: Pointer to the node to which @cand's children will be attached + * + * Take all child nodes of @cand and move them using toptree_move. + */ +static void toptree_move_children(struct toptree *cand, struct toptree *target) +{ + struct toptree *child, *tmp; + + toptree_for_each_child_safe(child, tmp, cand) + toptree_move(child, target); +} + +/** + * toptree_unify - Merge children with same ID + * @cand: Pointer to node whose direct children should be made unique + * + * When mangling the tree it is possible that a node has two or more children + * which have the same ID. This routine merges these children into one and + * moves all children of the merged nodes into the unified node. + */ +void toptree_unify(struct toptree *cand) +{ + struct toptree *child, *tmp, *cand_copy; + + /* Threads cannot be split, cores are not split */ + if (cand->level < 2) + return; + + cand_copy = toptree_alloc(cand->level, 0); + toptree_for_each_child_safe(child, tmp, cand) { + struct toptree *tmpchild; + + if (!cpumask_empty(&child->mask)) { + tmpchild = toptree_get_child(cand_copy, child->id); + toptree_move_children(child, tmpchild); + } + toptree_free(child); + } + toptree_move_children(cand_copy, cand); + toptree_free(cand_copy); + + toptree_for_each_child(child, cand) + toptree_unify(child); +} + +/** + * toptree_move - Move a node to another context + * @cand: Pointer to the node to move + * @target: Pointer to the node where @cand should go + * + * In the easiest case @cand is exactly on the level below @target + * and will be immediately moved to the target. + * + * If @target's level is not the direct parent level of @cand, + * nodes for the missing levels are created and put between + * @cand and @target. The "stacking" nodes' IDs are taken from + * @cand's parents. + * + * After this it is likely to have redundant nodes in the tree + * which are addressed by means of toptree_unify. + */ +void toptree_move(struct toptree *cand, struct toptree *target) +{ + struct toptree *stack_target, *real_insert_point, *ptr, *tmp; + + if (cand->level + 1 == target->level) { + toptree_remove(cand); + toptree_insert(cand, target); + return; + } + + real_insert_point = NULL; + ptr = cand; + stack_target = NULL; + + do { + tmp = stack_target; + stack_target = toptree_alloc(ptr->level + 1, + ptr->parent->id); + toptree_insert(tmp, stack_target); + if (!real_insert_point) + real_insert_point = stack_target; + ptr = ptr->parent; + } while (stack_target->level < (target->level - 1)); + + toptree_remove(cand); + toptree_insert(cand, real_insert_point); + toptree_insert(stack_target, target); +} + +/** + * toptree_get_child - Access a tree node's child by its ID + * @cand: Pointer to tree node whose child is to access + * @id: The desired child's ID + * + * @cand's children are searched for a child with matching ID. + * If no match can be found, a new child with the desired ID + * is created and returned. + */ +struct toptree *toptree_get_child(struct toptree *cand, int id) +{ + struct toptree *child; + + toptree_for_each_child(child, cand) + if (child->id == id) + return child; + child = toptree_alloc(cand->level-1, id); + toptree_insert(child, cand); + return child; +} + +/** + * toptree_first - Find the first descendant on specified level + * @context: Pointer to tree node whose descendants are to be used + * @level: The level of interest + * + * RETURNS: + * @context's first descendant on the specified level, or NULL + * if there is no matching descendant + */ +struct toptree *toptree_first(struct toptree *context, int level) +{ + struct toptree *child, *tmp; + + if (context->level == level) + return context; + + if (!list_empty(&context->children)) { + list_for_each_entry(child, &context->children, sibling) { + tmp = toptree_first(child, level); + if (tmp) + return tmp; + } + } + return NULL; +} + +/** + * toptree_next_sibling - Return next sibling + * @cur: Pointer to a tree node + * + * RETURNS: + * If @cur has a parent and is not the last in the parent's children list, + * the next sibling is returned. Or NULL when there are no siblings left. + */ +static struct toptree *toptree_next_sibling(struct toptree *cur) +{ + if (cur->parent == NULL) + return NULL; + + if (cur == list_last_entry(&cur->parent->children, + struct toptree, sibling)) + return NULL; + return (struct toptree *) list_next_entry(cur, sibling); +} + +/** + * toptree_next - Tree traversal function + * @cur: Pointer to current element + * @context: Pointer to the root node of the tree or subtree to + * be traversed. + * @level: The level of interest. + * + * RETURNS: + * Pointer to the next node on level @level + * or NULL when there is no next node. + */ +struct toptree *toptree_next(struct toptree *cur, struct toptree *context, + int level) +{ + struct toptree *cur_context, *tmp; + + if (!cur) + return NULL; + + if (context->level == level) + return NULL; + + tmp = toptree_next_sibling(cur); + if (tmp != NULL) + return tmp; + + cur_context = cur; + while (cur_context->level < context->level - 1) { + /* Step up */ + cur_context = cur_context->parent; + /* Step aside */ + tmp = toptree_next_sibling(cur_context); + if (tmp != NULL) { + /* Step down */ + tmp = toptree_first(tmp, level); + if (tmp != NULL) + return tmp; + } + } + return NULL; +} + +/** + * toptree_count - Count descendants on specified level + * @context: Pointer to node whose descendants are to be considered + * @level: Only descendants on the specified level will be counted + * + * RETURNS: + * Number of descendants on the specified level + */ +int toptree_count(struct toptree *context, int level) +{ + struct toptree *cur; + int cnt = 0; + + toptree_for_each(cur, context, level) + cnt++; + return cnt; +} diff --git a/arch/s390/numa/toptree.h b/arch/s390/numa/toptree.h new file mode 100644 index 000000000000..bdf502027af4 --- /dev/null +++ b/arch/s390/numa/toptree.h @@ -0,0 +1,60 @@ +/* + * NUMA support for s390 + * + * A tree structure used for machine topology mangling + * + * Copyright IBM Corp. 2015 + */ +#ifndef S390_TOPTREE_H +#define S390_TOPTREE_H + +#include <linux/cpumask.h> +#include <linux/list.h> + +struct toptree { + int level; + int id; + cpumask_t mask; + struct toptree *parent; + struct list_head sibling; + struct list_head children; +}; + +struct toptree *toptree_alloc(int level, int id); +void toptree_free(struct toptree *cand); +void toptree_update_mask(struct toptree *cand); +void toptree_unify(struct toptree *cand); +struct toptree *toptree_get_child(struct toptree *cand, int id); +void toptree_move(struct toptree *cand, struct toptree *target); +int toptree_count(struct toptree *context, int level); + +struct toptree *toptree_first(struct toptree *context, int level); +struct toptree *toptree_next(struct toptree *cur, struct toptree *context, + int level); + +#define toptree_for_each_child(child, ptree) \ + list_for_each_entry(child, &ptree->children, sibling) + +#define toptree_for_each_child_safe(child, ptmp, ptree) \ + list_for_each_entry_safe(child, ptmp, &ptree->children, sibling) + +#define toptree_is_last(ptree) \ + ((ptree->parent == NULL) || \ + (ptree->parent->children.prev == &ptree->sibling)) + +#define toptree_for_each(ptree, cont, ttype) \ + for (ptree = toptree_first(cont, ttype); \ + ptree != NULL; \ + ptree = toptree_next(ptree, cont, ttype)) + +#define toptree_for_each_safe(ptree, tmp, cont, ttype) \ + for (ptree = toptree_first(cont, ttype), \ + tmp = toptree_next(ptree, cont, ttype); \ + ptree != NULL; \ + ptree = tmp, \ + tmp = toptree_next(ptree, cont, ttype)) + +#define toptree_for_each_sibling(ptree, start) \ + toptree_for_each(ptree, start->parent, start->level) + +#endif /* S390_TOPTREE_H */ diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index bc927a09a172..9cfa2ffaa9d6 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -16,6 +16,7 @@ #include <linux/fs.h> #include <linux/module.h> #include <asm/processor.h> +#include <asm/perf_event.h> #include "../../../drivers/oprofile/oprof.h" diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 598f023cf8a6..17c04c7269e7 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -76,11 +76,6 @@ EXPORT_SYMBOL_GPL(zpci_iomap_start); static struct kmem_cache *zdev_fmb_cache; -struct zpci_dev *get_zdev(struct pci_dev *pdev) -{ - return (struct zpci_dev *) pdev->sysdata; -} - struct zpci_dev *get_zdev_by_fid(u32 fid) { struct zpci_dev *tmp, *zdev = NULL; @@ -269,7 +264,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, unsigned long offset, unsigned long max) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); u64 addr; int idx; @@ -385,7 +380,7 @@ static void zpci_irq_handler(struct airq_struct *airq) int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); unsigned int hwirq, msi_vecs; unsigned long aisb; struct msi_desc *msi; @@ -460,7 +455,7 @@ out: void arch_teardown_msi_irqs(struct pci_dev *pdev) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); struct msi_desc *msi; int rc; @@ -637,7 +632,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) int i; for (i = 0; i < PCI_BAR_COUNT; i++) { - if (!zdev->bars[i].size) + if (!zdev->bars[i].size || !zdev->bars[i].res) continue; zpci_free_iomap(zdev, zdev->bars[i].map_idx); @@ -648,7 +643,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) int pcibios_add_device(struct pci_dev *pdev) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); struct resource *res; int i; @@ -673,7 +668,7 @@ void pcibios_release_device(struct pci_dev *pdev) int pcibios_enable_device(struct pci_dev *pdev, int mask) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); zdev->pdev = pdev; zpci_debug_init_device(zdev); @@ -684,7 +679,7 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) void pcibios_disable_device(struct pci_dev *pdev) { - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); zpci_fmb_disable_device(zdev); zpci_debug_exit_device(zdev); @@ -695,7 +690,7 @@ void pcibios_disable_device(struct pci_dev *pdev) static int zpci_restore(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); int ret = 0; if (zdev->state != ZPCI_FN_STATE_ONLINE) @@ -717,7 +712,7 @@ out: static int zpci_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); if (zdev->state != ZPCI_FN_STATE_ONLINE) return 0; @@ -777,17 +772,22 @@ static int zpci_scan_bus(struct zpci_dev *zdev) ret = zpci_setup_bus_resources(zdev, &resources); if (ret) - return ret; + goto error; zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, zdev, &resources); if (!zdev->bus) { - zpci_cleanup_bus_resources(zdev); - return -EIO; + ret = -EIO; + goto error; } zdev->bus->max_bus_speed = zdev->max_bus_speed; pci_bus_add_devices(zdev->bus); return 0; + +error: + zpci_cleanup_bus_resources(zdev); + pci_free_resource_list(&resources); + return ret; } int zpci_enable_device(struct zpci_dev *zdev) diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 6fd8d5836138..42b76580c8b8 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -277,7 +277,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, enum dma_data_direction direction, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long nr_pages, iommu_page_index; unsigned long pa = page_to_phys(page) + offset; int flags = ZPCI_PTE_VALID; @@ -316,7 +316,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long iommu_page_index; int npages; @@ -337,7 +337,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct page *page; unsigned long pa; dma_addr_t map; @@ -367,7 +367,7 @@ static void s390_dma_free(struct device *dev, size_t size, void *pa, dma_addr_t dma_handle, struct dma_attrs *attrs) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); size = PAGE_ALIGN(size); atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index ed2394dd14e9..369a3e05d468 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -46,15 +46,13 @@ struct zpci_ccdf_avail { static void __zpci_event_error(struct zpci_ccdf_err *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); + struct pci_dev *pdev = zdev ? zdev->pdev : NULL; zpci_err("error CCDF:\n"); zpci_err_hex(ccdf, sizeof(*ccdf)); - if (!zdev) - return; - pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n", - pci_name(zdev->pdev), ccdf->pec, ccdf->fid); + pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); } void zpci_event_error(void *data) @@ -89,7 +87,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) ret = zpci_enable_device(zdev); if (ret) break; + pci_lock_rescan_remove(); pci_rescan_bus(zdev->bus); + pci_unlock_rescan_remove(); break; case 0x0302: /* Reserved -> Standby */ if (!zdev) @@ -97,7 +97,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; case 0x0303: /* Deconfiguration requested */ if (pdev) - pci_stop_and_remove_bus_device(pdev); + pci_stop_and_remove_bus_device_locked(pdev); ret = zpci_disable_device(zdev); if (ret) @@ -114,7 +114,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) /* Give the driver a hint that the function is * already unusable. */ pdev->error_state = pci_channel_io_perm_failure; - pci_stop_and_remove_bus_device(pdev); + pci_stop_and_remove_bus_device_locked(pdev); } zdev->fh = ccdf->fh; diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index 85267c058af8..dcc2634ccbe2 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -8,10 +8,23 @@ #include <linux/errno.h> #include <linux/delay.h> #include <asm/pci_insn.h> +#include <asm/pci_debug.h> #include <asm/processor.h> #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ +static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset) +{ + struct { + u8 cc; + u8 status; + u64 req; + u64 offset; + } data = {cc, status, req, offset}; + + zpci_err_hex(&data, sizeof(data)); +} + /* Modify PCI Function Controls */ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) { @@ -38,8 +51,8 @@ int zpci_mod_fc(u64 req, struct zpci_fib *fib) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d\n", - __func__, cc, status); + zpci_err_insn(cc, status, req, 0); + return (cc) ? -EIO : 0; } @@ -72,8 +85,8 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n", - __func__, cc, status, addr, range); + zpci_err_insn(cc, status, addr, range); + return (cc) ? -EIO : 0; } @@ -121,8 +134,8 @@ int zpci_load(u64 *data, u64 req, u64 offset) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); + zpci_err_insn(cc, status, req, offset); + return (cc > 0) ? -EIO : cc; } EXPORT_SYMBOL_GPL(zpci_load); @@ -159,8 +172,8 @@ int zpci_store(u64 data, u64 req, u64 offset) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); + zpci_err_insn(cc, status, req, offset); + return (cc > 0) ? -EIO : cc; } EXPORT_SYMBOL_GPL(zpci_store); @@ -195,8 +208,8 @@ int zpci_store_block(const u64 *data, u64 req, u64 offset) } while (cc == 2); if (cc) - printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", - __func__, cc, status, req, offset); + zpci_err_insn(cc, status, req, offset); + return (cc > 0) ? -EIO : cc; } EXPORT_SYMBOL_GPL(zpci_store_block); diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index fa3ce891e597..f37a5808883d 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c @@ -16,7 +16,7 @@ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); \ + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); \ \ return sprintf(buf, fmt, zdev->member); \ } \ @@ -38,23 +38,30 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); int ret; if (!device_remove_file_self(dev, attr)) return count; + pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(pdev); ret = zpci_disable_device(zdev); if (ret) - return ret; + goto error; ret = zpci_enable_device(zdev); if (ret) - return ret; + goto error; pci_rescan_bus(zdev->bus); + pci_unlock_rescan_remove(); + return count; + +error: + pci_unlock_rescan_remove(); + return ret; } static DEVICE_ATTR_WO(recover); @@ -64,7 +71,7 @@ static ssize_t util_string_read(struct file *filp, struct kobject *kobj, { struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); - struct zpci_dev *zdev = get_zdev(pdev); + struct zpci_dev *zdev = to_zpci(pdev); return memory_read_from_buffer(buf, count, &off, zdev->util_str, sizeof(zdev->util_str)); diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 138fb3db45ba..92ffe397b893 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += irq_work.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/score/include/asm/mm-arch-hooks.h b/arch/score/include/asm/mm-arch-hooks.h deleted file mode 100644 index 5e38689f189a..000000000000 --- a/arch/score/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_SCORE_MM_ARCH_HOOKS_H -#define _ASM_SCORE_MM_ARCH_HOOKS_H - -#endif /* _ASM_SCORE_MM_ARCH_HOOKS_H */ diff --git a/arch/score/include/asm/switch_to.h b/arch/score/include/asm/switch_to.h index 031756b59ece..fda3f83308d2 100644 --- a/arch/score/include/asm/switch_to.h +++ b/arch/score/include/asm/switch_to.h @@ -8,6 +8,4 @@ do { \ (last) = resume(prev, next, task_thread_info(next)); \ } while (0) -#define finish_arch_switch(prev) do {} while (0) - #endif /* _ASM_SCORE_SWITCH_TO_H */ diff --git a/arch/sh/drivers/pci/pci-sh4.h b/arch/sh/drivers/pci/pci-sh4.h index cbf763b3015e..0288efc17ff3 100644 --- a/arch/sh/drivers/pci/pci-sh4.h +++ b/arch/sh/drivers/pci/pci-sh4.h @@ -11,14 +11,6 @@ #include <asm/io.h> -/* startup values */ -#define PCI_PROBE_BIOS 1 -#define PCI_PROBE_CONF1 2 -#define PCI_PROBE_CONF2 4 -#define PCI_NO_CHECKS 0x400 -#define PCI_ASSIGN_ROMS 0x1000 -#define PCI_BIOS_IRQ_SCAN 0x2000 - #define SH4_PCICR 0x100 /* PCI Control Register */ #define SH4_PCICR_PREFIX 0xA5000000 /* CR prefix for write */ #define SH4_PCICR_FTO 0x00000400 /* TRDY/IRDY Enable */ diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 9ac4626e7284..aac452b26aa8 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += msgbuf.h generic-y += param.h diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 728c4c571f40..93ec9066dbef 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -368,6 +368,7 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } #endif #define ioremap_nocache ioremap +#define ioremap_uc ioremap #define iounmap __iounmap /* diff --git a/arch/sh/include/asm/mm-arch-hooks.h b/arch/sh/include/asm/mm-arch-hooks.h deleted file mode 100644 index 18087298b728..000000000000 --- a/arch/sh/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_SH_MM_ARCH_HOOKS_H -#define _ASM_SH_MM_ARCH_HOOKS_H - -#endif /* _ASM_SH_MM_ARCH_HOOKS_H */ diff --git a/arch/sh/include/asm/switch_to_32.h b/arch/sh/include/asm/switch_to_32.h index 0c065513e7ac..7661b4ba8259 100644 --- a/arch/sh/include/asm/switch_to_32.h +++ b/arch/sh/include/asm/switch_to_32.h @@ -78,6 +78,8 @@ do { \ \ if (is_dsp_enabled(prev)) \ __save_dsp(prev); \ + if (is_dsp_enabled(next)) \ + __restore_dsp(next); \ \ __ts1 = (u32 *)&prev->thread.sp; \ __ts2 = (u32 *)&prev->thread.pc; \ @@ -125,10 +127,4 @@ do { \ last = __last; \ } while (0) -#define finish_arch_switch(prev) \ -do { \ - if (is_dsp_enabled(prev)) \ - __restore_dsp(prev); \ -} while (0) - #endif /* __ASM_SH_SWITCH_TO_32_H */ diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 0a47bd3e7bee..4ca78ed71ad2 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -355,13 +355,12 @@ static int sq_dev_add(struct device *dev, struct subsys_interface *sif) return error; } -static int sq_dev_remove(struct device *dev, struct subsys_interface *sif) +static void sq_dev_remove(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; struct kobject *kobj = sq_kobject[cpu]; kobject_put(kobj); - return 0; } static struct subsys_interface sq_interface = { diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 2b2a69dcc467..e928618838bc 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -12,6 +12,7 @@ generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += module.h generic-y += mutex.h generic-y += preempt.h diff --git a/arch/sparc/include/asm/mm-arch-hooks.h b/arch/sparc/include/asm/mm-arch-hooks.h deleted file mode 100644 index b89ba44c16f1..000000000000 --- a/arch/sparc/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_SPARC_MM_ARCH_HOOKS_H -#define _ASM_SPARC_MM_ARCH_HOOKS_H - -#endif /* _ASM_SPARC_MM_ARCH_HOOKS_H */ diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h index 1f0aa2024e94..6424249d5f78 100644 --- a/arch/sparc/include/asm/visasm.h +++ b/arch/sparc/include/asm/visasm.h @@ -28,16 +28,10 @@ * Must preserve %o5 between VISEntryHalf and VISExitHalf */ #define VISEntryHalf \ - rd %fprs, %o5; \ - andcc %o5, FPRS_FEF, %g0; \ - be,pt %icc, 297f; \ - sethi %hi(298f), %g7; \ - sethi %hi(VISenterhalf), %g1; \ - jmpl %g1 + %lo(VISenterhalf), %g0; \ - or %g7, %lo(298f), %g7; \ - clr %o5; \ -297: wr %o5, FPRS_FEF, %fprs; \ -298: + VISEntry + +#define VISExitHalf \ + VISExit #define VISEntryHalfFast(fail_label) \ rd %fprs, %o5; \ @@ -47,7 +41,7 @@ ba,a,pt %xcc, fail_label; \ 297: wr %o5, FPRS_FEF, %fprs; -#define VISExitHalf \ +#define VISExitHalfFast \ wr %o5, 0, %fprs; #ifndef __ASSEMBLY__ diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index c928bc64b4ba..3a0e1a986bfe 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -249,7 +249,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, struct pci_bus *bus, int devfn) { struct dev_archdata *sd; - struct pci_slot *slot; struct platform_device *op; struct pci_dev *dev; const char *type; @@ -290,10 +289,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->multifunction = 0; /* maybe a lie? */ set_pcie_port_type(dev); - list_for_each_entry(slot, &dev->bus->slots, list) - if (PCI_SLOT(dev->devfn) == slot->number) - dev->slot = slot; - + pci_dev_assign_slot(dev); dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); dev->device = of_getintprop_default(node, "device-id", 0xffff); dev->subsystem_vendor = diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 50e7b626afe8..c5113c7ce2fd 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -333,11 +333,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); /* - * A new process must start with interrupts closed in 2.5, - * because this is how Mingo's scheduler works (see schedule_tail - * and finish_arch_switch). If we do not do it, a timer interrupt hits - * before we unlock, attempts to re-take the rq->lock, and then we die. - * Thus, kpsr|=PSR_PIL. + * A new process must start with interrupts disabled, see schedule_tail() + * and finish_task_switch(). (If we do not do it and if a timer interrupt + * hits before we unlock and attempts to take the rq->lock, we deadlock.) + * + * Thus, kpsr |= PSR_PIL. */ ti->ksp = (unsigned long) new_stack; p->thread.kregs = childregs; diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 140527a20e7d..83aeeb1dffdb 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) +#ifdef NON_USER_COPY + VISExitHalfFast +#else VISExitHalf - +#endif brz,pn %o2, .Lexit cmp %o2, 19 ble,pn %icc, .Lsmall_unaligned diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S index b320ae9e2e2e..a063d84336d6 100644 --- a/arch/sparc/lib/VISsave.S +++ b/arch/sparc/lib/VISsave.S @@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 stx %g3, [%g6 + TI_GSR] 2: add %g6, %g1, %g3 - cmp %o5, FPRS_DU - be,pn %icc, 6f - sll %g1, 3, %g1 + mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5 + sll %g1, 3, %g1 stb %o5, [%g3 + TI_FPSAVED] rd %gsr, %g2 add %g6, %g1, %g3 @@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 .align 32 80: jmpl %g7 + %g0, %g0 nop - -6: ldub [%g3 + TI_FPSAVED], %o5 - or %o5, FPRS_DU, %o5 - add %g6, TI_FPREGS+0x80, %g2 - stb %o5, [%g3 + TI_FPSAVED] - - sll %g1, 5, %g1 - add %g6, TI_FPREGS+0xc0, %g3 - wr %g0, FPRS_FEF, %fprs - membar #Sync - stda %f32, [%g2 + %g1] ASI_BLK_P - stda %f48, [%g3 + %g1] ASI_BLK_P - membar #Sync - ba,pt %xcc, 80f - nop - - .align 32 -80: jmpl %g7 + %g0, %g0 - nop - - .align 32 -VISenterhalf: - ldub [%g6 + TI_FPDEPTH], %g1 - brnz,a,pn %g1, 1f - cmp %g1, 1 - stb %g0, [%g6 + TI_FPSAVED] - stx %fsr, [%g6 + TI_XFSR] - clr %o5 - jmpl %g7 + %g0, %g0 - wr %g0, FPRS_FEF, %fprs - -1: bne,pn %icc, 2f - srl %g1, 1, %g1 - ba,pt %xcc, vis1 - sub %g7, 8, %g7 -2: addcc %g6, %g1, %g3 - sll %g1, 3, %g1 - andn %o5, FPRS_DU, %g2 - stb %g2, [%g3 + TI_FPSAVED] - - rd %gsr, %g2 - add %g6, %g1, %g3 - stx %g2, [%g3 + TI_GSR] - add %g6, %g1, %g2 - stx %fsr, [%g2 + TI_XFSR] - sll %g1, 5, %g1 -3: andcc %o5, FPRS_DL, %g0 - be,pn %icc, 4f - add %g6, TI_FPREGS, %g2 - - add %g6, TI_FPREGS+0x40, %g3 - membar #Sync - stda %f0, [%g2 + %g1] ASI_BLK_P - stda %f16, [%g3 + %g1] ASI_BLK_P - membar #Sync - ba,pt %xcc, 4f - nop - - .align 32 -4: and %o5, FPRS_DU, %o5 - jmpl %g7 + %g0, %g0 - wr %o5, FPRS_FEF, %fprs diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 1d649a95660c..8069ce12f20b 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page); void VISenter(void); EXPORT_SYMBOL(VISenter); -/* CRYPTO code needs this */ -void VISenterhalf(void); -EXPORT_SYMBOL(VISenterhalf); - extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, unsigned long *); diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index d53654488c2c..d8a843163471 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -19,6 +19,7 @@ generic-y += irq_regs.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += msgbuf.h generic-y += mutex.h generic-y += param.h diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index dc61de15c1f9..322b5fe94781 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -55,6 +55,7 @@ extern void iounmap(volatile void __iomem *addr); #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) #define ioremap_wc(physaddr, size) ioremap(physaddr, size) #define ioremap_wt(physaddr, size) ioremap(physaddr, size) +#define ioremap_uc(physaddr, size) ioremap(physaddr, size) #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) #define mmiowb() diff --git a/arch/tile/include/asm/mm-arch-hooks.h b/arch/tile/include/asm/mm-arch-hooks.h deleted file mode 100644 index d1709ea774f7..000000000000 --- a/arch/tile/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_TILE_MM_ARCH_HOOKS_H -#define _ASM_TILE_MM_ARCH_HOOKS_H - -#endif /* _ASM_TILE_MM_ARCH_HOOKS_H */ diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h index b8f888cbe6b0..34ee72705521 100644 --- a/arch/tile/include/asm/switch_to.h +++ b/arch/tile/include/asm/switch_to.h @@ -53,15 +53,13 @@ extern unsigned long get_switch_to_pc(void); * Kernel threads can check to see if they need to migrate their * stack whenever they return from a context switch; for user * threads, we defer until they are returning to user-space. + * We defer homecache migration until the runqueue lock is released. */ -#define finish_arch_switch(prev) do { \ - if (unlikely((prev)->state == TASK_DEAD)) \ - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ - ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ +#define finish_arch_post_lock_switch() do { \ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ if (current->mm == NULL && !kstack_hash && \ - current_thread_info()->homecache_cpu != smp_processor_id()) \ + current_thread_info()->homecache_cpu != raw_smp_processor_id()) \ homecache_migrate_kthread(); \ } while (0) diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index e8c2c04143cd..c667e104a0c2 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) return -EFAULT; - memset(to, 0, sizeof(*to)); - err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index a45213781ad0..7d5769310bef 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -446,6 +446,11 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, hardwall_switch_tasks(prev, next); #endif + /* Notify the simulator of task exit. */ + if (unlikely(prev->state == TASK_DEAD)) + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | + (prev->pid << _SIM_CONTROL_OPERATOR_BITS)); + /* * Switch kernel SP, PC, and callee-saved registers. * In the context of the new task, return the old task pointer diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 99c9ff87e018..6b755d125783 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void) void __init free_initrd_mem(unsigned long begin, unsigned long end) { - free_bootmem(__pa(begin), end - begin); + free_bootmem_late(__pa(begin), end - begin); } static int __init setup_initrd(char *str) diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c index a3ed12f8f83b..825867c53853 100644 --- a/arch/tile/kernel/sysfs.c +++ b/arch/tile/kernel/sysfs.c @@ -198,16 +198,13 @@ static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif) return err; } -static int hv_stats_device_remove(struct device *dev, - struct subsys_interface *sif) +static void hv_stats_device_remove(struct device *dev, + struct subsys_interface *sif) { int cpu = dev->id; - if (!cpu_online(cpu)) - return 0; - - sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr); - return 0; + if (cpu_online(cpu)) + sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr); } diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 3d63ff6f583f..149ec55f9c46 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += irq_regs.h generic-y += irq_work.h generic-y += kdebug.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mutex.h generic-y += param.h generic-y += pci.h diff --git a/arch/um/include/asm/mm-arch-hooks.h b/arch/um/include/asm/mm-arch-hooks.h deleted file mode 100644 index a7c8b0dfdd4e..000000000000 --- a/arch/um/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_UM_MM_ARCH_HOOKS_H -#define _ASM_UM_MM_ARCH_HOOKS_H - -#endif /* _ASM_UM_MM_ARCH_HOOKS_H */ diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h index 83a91f976330..35ab97e4bb9b 100644 --- a/arch/um/include/shared/kern_util.h +++ b/arch/um/include/shared/kern_util.h @@ -22,7 +22,8 @@ extern int kmalloc_ok; extern unsigned long alloc_stack(int order, int atomic); extern void free_stack(unsigned long stack, int order); -extern int do_signal(void); +struct pt_regs; +extern void do_signal(struct pt_regs *regs); extern void interrupt_end(void); extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 68b9119841cd..a6d922672b9f 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -90,12 +90,14 @@ void *__switch_to(struct task_struct *from, struct task_struct *to) void interrupt_end(void) { + struct pt_regs *regs = ¤t->thread.regs; + if (need_resched()) schedule(); if (test_thread_flag(TIF_SIGPENDING)) - do_signal(); + do_signal(regs); if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) - tracehook_notify_resume(¤t->thread.regs); + tracehook_notify_resume(regs); } void exit_thread(void) diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 4f60e4aad790..57acbd67d85d 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c @@ -64,7 +64,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) signal_setup_done(err, ksig, singlestep); } -static int kern_do_signal(struct pt_regs *regs) +void do_signal(struct pt_regs *regs) { struct ksignal ksig; int handled_sig = 0; @@ -110,10 +110,4 @@ static int kern_do_signal(struct pt_regs *regs) */ if (!handled_sig) restore_saved_sigmask(); - return handled_sig; -} - -int do_signal(void) -{ - return kern_do_signal(¤t->thread.regs); } diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index f1b3eb14b855..2077248e8a72 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -291,7 +291,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, /* We are under mmap_sem, release it such that current can terminate */ up_write(¤t->mm->mmap_sem); force_sig(SIGKILL, current); - do_signal(); + do_signal(¤t->thread.regs); } } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 557232f758b6..d8a9fce6ee2e 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -173,7 +173,7 @@ static void bad_segv(struct faultinfo fi, unsigned long ip) void fatal_sigsegv(void) { force_sigsegv(SIGSEGV, current); - do_signal(); + do_signal(¤t->thread.regs); /* * This is to tell gcc that we're not returning - do_signal * can, in general, return, but in this case, it's not, since diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index d12b377b5a8b..1fc7a286dc6f 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -26,6 +26,7 @@ generic-y += kdebug.h generic-y += kmap_types.h generic-y += local.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += module.h generic-y += msgbuf.h diff --git a/arch/unicore32/include/asm/mm-arch-hooks.h b/arch/unicore32/include/asm/mm-arch-hooks.h deleted file mode 100644 index 4d79a850c509..000000000000 --- a/arch/unicore32/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_UNICORE32_MM_ARCH_HOOKS_H -#define _ASM_UNICORE32_MM_ARCH_HOOKS_H - -#endif /* _ASM_UNICORE32_MM_ARCH_HOOKS_H */ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3dbb7e7909ca..48f7433dac6f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -41,6 +41,7 @@ config X86 select ARCH_USE_CMPXCHG_LOCKREF if X86_64 select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_IPC_PARSE_VERSION if X86_32 select ARCH_WANT_OPTIONAL_GPIOLIB @@ -132,7 +133,7 @@ config X86 select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS - select HAVE_UID16 if X86_32 + select HAVE_UID16 if X86_32 || IA32_EMULATION select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_USER_RETURN_NOTIFIER select IRQ_FORCED_THREADING @@ -954,6 +955,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS config X86_MCE bool "Machine Check / overheating reporting" + select GENERIC_ALLOCATOR default y ---help--- Machine Check support allows the processor to notify the @@ -1001,19 +1003,41 @@ config X86_THERMAL_VECTOR def_bool y depends on X86_MCE_INTEL -config VM86 - bool "Enable VM86 support" if EXPERT - default y +config X86_LEGACY_VM86 + bool "Legacy VM86 support (obsolete)" + default n depends on X86_32 ---help--- - This option is required by programs like DOSEMU to run - 16-bit real mode legacy code on x86 processors. It also may - be needed by software like XFree86 to initialize some video - cards via BIOS. Disabling this option saves about 6K. + This option allows user programs to put the CPU into V8086 + mode, which is an 80286-era approximation of 16-bit real mode. + + Some very old versions of X and/or vbetool require this option + for user mode setting. Similarly, DOSEMU will use it if + available to accelerate real mode DOS programs. However, any + recent version of DOSEMU, X, or vbetool should be fully + functional even without kernel VM86 support, as they will all + fall back to (pretty well performing) software emulation. + + Anything that works on a 64-bit kernel is unlikely to need + this option, as 64-bit kernels don't, and can't, support V8086 + mode. This option is also unrelated to 16-bit protected mode + and is not needed to run most 16-bit programs under Wine. + + Enabling this option adds considerable attack surface to the + kernel and slows down system calls and exception handling. + + Unless you use very old userspace or need the last drop of + performance in your real mode DOS games and can't use KVM, + say N here. + +config VM86 + bool + default X86_LEGACY_VM86 config X86_16BIT bool "Enable support for 16-bit segments" if EXPERT default y + depends on MODIFY_LDT_SYSCALL ---help--- This option is required by programs like Wine to run 16-bit protected mode legacy code on x86 processors. Disabling @@ -1508,6 +1532,7 @@ config X86_RESERVE_LOW config MATH_EMULATION bool + depends on MODIFY_LDT_SYSCALL prompt "Math emulation" if X86_32 ---help--- Linux can emulate a math coprocessor (used for floating point @@ -2052,6 +2077,22 @@ config CMDLINE_OVERRIDE This is used to work around broken boot loaders. This should be set to 'N' under normal conditions. +config MODIFY_LDT_SYSCALL + bool "Enable the LDT (local descriptor table)" if EXPERT + default y + ---help--- + Linux can allow user programs to install a per-process x86 + Local Descriptor Table (LDT) using the modify_ldt(2) system + call. This is required to run 16-bit or segmented code such as + DOSEMU or some Wine programs. It is also used by some very old + threading libraries. + + Enabling this feature adds a small amount of overhead to + context switches and increases the low-level kernel attack + surface. Disabling it removes the modify_ldt(2) system call. + + Saying 'N' here may make sense for embedded or server kernels. + source "kernel/livepatch/Kconfig" endmenu @@ -2521,7 +2562,7 @@ config IA32_EMULATION depends on X86_64 select BINFMT_ELF select COMPAT_BINFMT_ELF - select HAVE_UID16 + select ARCH_WANT_OLD_COMPAT_IPC ---help--- Include code to run legacy 32-bit programs under a 64-bit kernel. You should likely turn this on, unless you're @@ -2535,7 +2576,7 @@ config IA32_AOUT config X86_X32 bool "x32 ABI for 64-bit mode" - depends on X86_64 && IA32_EMULATION + depends on X86_64 ---help--- Include code to run binaries for the x32 native 32-bit ABI for 64-bit processors. An x32 process gets access to the @@ -2549,7 +2590,6 @@ config X86_X32 config COMPAT def_bool y depends on IA32_EMULATION || X86_X32 - select ARCH_WANT_OLD_COMPAT_IPC if COMPAT config COMPAT_FOR_U64_ALIGNMENT diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index a15893d17c55..d8c0d3266173 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -297,6 +297,18 @@ config OPTIMIZE_INLINING If unsure, say N. +config DEBUG_ENTRY + bool "Debug low-level entry code" + depends on DEBUG_KERNEL + ---help--- + This option enables sanity checks in x86's low-level entry code. + Some of these sanity checks may slow down kernel entries and + exits or otherwise impact performance. + + This is currently used to help test NMI code. + + If unsure, say N. + config DEBUG_NMI_SELFTEST bool "NMI Selftest" depends on DEBUG_KERNEL && X86_LOCAL_APIC diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 118e6debc483..747860c696e1 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -39,6 +39,16 @@ ifdef CONFIG_X86_NEED_RELOCS LDFLAGS_vmlinux := --emit-relocs endif +# +# Prevent GCC from generating any FP code by mistake. +# +# This must happen before we try the -mpreferred-stack-boundary, see: +# +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383 +# +KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow +KBUILD_CFLAGS += $(call cc-option,-mno-avx,) + ifeq ($(CONFIG_X86_32),y) BITS := 32 UTS_MACHINE := i386 @@ -167,9 +177,6 @@ KBUILD_CFLAGS += -pipe KBUILD_CFLAGS += -Wno-sign-compare # KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -# prevent gcc from generating any FP code by mistake -KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -KBUILD_CFLAGS += $(call cc-option,-mno-avx,) KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) @@ -212,6 +219,8 @@ drivers-$(CONFIG_PM) += arch/x86/power/ drivers-$(CONFIG_FB) += arch/x86/video/ +drivers-$(CONFIG_RAS) += arch/x86/ras/ + #### # boot loader support. Several targets are kept for legacy purposes diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 57bbf2fb21f6..0d553e54171b 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -23,7 +23,7 @@ targets += fdimage fdimage144 fdimage288 image.iso mtools.conf subdir- := compressed setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o -setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o +setup-y += early_serial_console.o edd.o header.o main.o memory.o setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o setup-y += video-mode.o version.o setup-$(CONFIG_X86_APM_BOOT) += apm.o diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index bd49ec61255c..0033e96c3f09 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -307,9 +307,6 @@ void query_edd(void); /* header.S */ void __attribute__((noreturn)) die(void); -/* mca.c */ -int query_mca(void); - /* memory.c */ int detect_memory(void); diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index d7b1f655b3ef..6a9b96b4624d 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -82,7 +82,7 @@ static unsigned long get_random_long(void) if (has_cpuflag(X86_FEATURE_TSC)) { debug_putstr(" RDTSC"); - rdtscll(raw); + raw = rdtsc(); random ^= raw; use_i8254 = false; diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 2c82bd150d43..ee1b6d346b98 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1041,7 +1041,6 @@ void setup_graphics(struct boot_params *boot_params) struct boot_params *make_boot_params(struct efi_config *c) { struct boot_params *boot_params; - struct sys_desc_table *sdt; struct apm_bios_info *bi; struct setup_header *hdr; struct efi_info *efi; @@ -1089,7 +1088,6 @@ struct boot_params *make_boot_params(struct efi_config *c) hdr = &boot_params->hdr; efi = &boot_params->efi_info; bi = &boot_params->apm_bios_info; - sdt = &boot_params->sys_desc_table; /* Copy the second sector to boot_params */ memcpy(&hdr->jump, image->image_base + 512, 512); @@ -1118,8 +1116,6 @@ struct boot_params *make_boot_params(struct efi_config *c) /* Clear APM BIOS info */ memset(bi, 0, sizeof(*bi)); - memset(sdt, 0, sizeof(*sdt)); - status = efi_parse_options(cmdline_ptr); if (status != EFI_SUCCESS) goto fail2; @@ -1193,6 +1189,10 @@ static efi_status_t setup_e820(struct boot_params *params, unsigned int e820_type = 0; unsigned long m = efi->efi_memmap; +#ifdef CONFIG_X86_64 + m |= (u64)efi->efi_memmap_hi << 32; +#endif + d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); switch (d->type) { case EFI_RESERVED_TYPE: diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index a107b935e22f..f63797942bb5 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -220,6 +220,23 @@ void __putstr(const char *s) outb(0xff & (pos >> 1), vidport+1); } +void __puthex(unsigned long value) +{ + char alpha[2] = "0"; + int bits; + + for (bits = sizeof(value) * 8 - 4; bits >= 0; bits -= 4) { + unsigned long digit = (value >> bits) & 0xf; + + if (digit < 0xA) + alpha[0] = '0' + digit; + else + alpha[0] = 'a' + (digit - 0xA); + + __putstr(alpha); + } +} + static void error(char *x) { error_putstr("\n\n"); @@ -399,6 +416,13 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; + /* Report initial kernel position details. */ + debug_putaddr(input_data); + debug_putaddr(input_len); + debug_putaddr(output); + debug_putaddr(output_len); + debug_putaddr(run_size); + /* * The memory hole needed for the kernel is the larger of either * the entire decompressed kernel plus relocation table, or the diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 805d25ca5f1d..3783dc3e10b3 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -34,16 +34,27 @@ extern memptr free_mem_ptr; extern memptr free_mem_end_ptr; extern struct boot_params *real_mode; /* Pointer to real-mode data */ void __putstr(const char *s); +void __puthex(unsigned long value); #define error_putstr(__x) __putstr(__x) +#define error_puthex(__x) __puthex(__x) #ifdef CONFIG_X86_VERBOSE_BOOTUP #define debug_putstr(__x) __putstr(__x) +#define debug_puthex(__x) __puthex(__x) +#define debug_putaddr(__x) { \ + debug_putstr(#__x ": 0x"); \ + debug_puthex((unsigned long)(__x)); \ + debug_putstr("\n"); \ + } #else static inline void debug_putstr(const char *s) { } +static inline void debug_puthex(const char *s) +{ } +#define debug_putaddr(x) /* */ #endif diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c index fd6c9f236996..9bcea386db65 100644 --- a/arch/x86/boot/main.c +++ b/arch/x86/boot/main.c @@ -161,9 +161,6 @@ void main(void) /* Set keyboard repeat rate (why?) and query the lock flags */ keyboard_init(); - /* Query MCA information */ - query_mca(); - /* Query Intel SpeedStep (IST) information */ query_ist(); diff --git a/arch/x86/boot/mca.c b/arch/x86/boot/mca.c deleted file mode 100644 index a95a531148ef..000000000000 --- a/arch/x86/boot/mca.c +++ /dev/null @@ -1,38 +0,0 @@ -/* -*- linux-c -*- ------------------------------------------------------- * - * - * Copyright (C) 1991, 1992 Linus Torvalds - * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2. - * - * ----------------------------------------------------------------------- */ - -/* - * Get the MCA system description table - */ - -#include "boot.h" - -int query_mca(void) -{ - struct biosregs ireg, oreg; - u16 len; - - initregs(&ireg); - ireg.ah = 0xc0; - intcall(0x15, &ireg, &oreg); - - if (oreg.eflags & X86_EFLAGS_CF) - return -1; /* No MCA present */ - - set_fs(oreg.es); - len = rdfs16(oreg.bx); - - if (len > sizeof(boot_params.sys_desc_table)) - len = sizeof(boot_params.sys_desc_table); - - copy_from_fs(&boot_params.sys_desc_table, oreg.bx, len); - return 0; -} diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index aaa1118bf01e..028be48c8839 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -23,6 +23,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 315b86106572..962297d244b3 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -22,6 +22,7 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 5a4a089e8b1f..9a2838cf0591 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o +obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o @@ -30,6 +31,7 @@ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o +obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o # These modules require assembler to support AVX. ifeq ($(avx_supported),yes) @@ -60,6 +62,7 @@ blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o +chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o ifeq ($(avx_supported),yes) @@ -75,6 +78,7 @@ endif ifeq ($(avx2_supported),yes) camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o + chacha20-x86_64-y += chacha20-avx2-x86_64.o serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o endif @@ -82,8 +86,10 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o +poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o ifeq ($(avx2_supported),yes) sha1-ssse3-y += sha1_avx2_x86_64_asm.o +poly1305-x86_64-y += poly1305-avx2-x86_64.o endif crc32c-intel-y := crc32c-intel_glue.o crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index dccad38b59a8..3633ad6145c5 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -803,10 +803,7 @@ static int rfc4106_init(struct crypto_aead *aead) return PTR_ERR(cryptd_tfm); *ctx = cryptd_tfm; - crypto_aead_set_reqsize( - aead, - sizeof(struct aead_request) + - crypto_aead_reqsize(&cryptd_tfm->base)); + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); return 0; } @@ -955,8 +952,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req) /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length equal */ - /* to 8 or 12 bytes */ - if (unlikely(req->assoclen != 8 && req->assoclen != 12)) + /* to 16 or 20 bytes */ + if (unlikely(req->assoclen != 16 && req->assoclen != 20)) return -EINVAL; /* IV below built */ @@ -992,9 +989,9 @@ static int helper_rfc4106_encrypt(struct aead_request *req) } kernel_fpu_begin(); - aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, - ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst - + ((unsigned long)req->cryptlen), auth_tag_len); + aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, + ctx->hash_subkey, assoc, req->assoclen - 8, + dst + req->cryptlen, auth_tag_len); kernel_fpu_end(); /* The authTag (aka the Integrity Check Value) needs to be written @@ -1033,12 +1030,12 @@ static int helper_rfc4106_decrypt(struct aead_request *req) struct scatter_walk dst_sg_walk; unsigned int i; - if (unlikely(req->assoclen != 8 && req->assoclen != 12)) + if (unlikely(req->assoclen != 16 && req->assoclen != 20)) return -EINVAL; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length */ - /* equal to 8 or 12 bytes */ + /* equal to 16 or 20 bytes */ tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); /* IV below built */ @@ -1075,8 +1072,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req) kernel_fpu_begin(); aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, - ctx->hash_subkey, assoc, (unsigned long)req->assoclen, - authTag, auth_tag_len); + ctx->hash_subkey, assoc, req->assoclen - 8, + authTag, auth_tag_len); kernel_fpu_end(); /* Compare generated tag with passed in tag. */ @@ -1105,19 +1102,12 @@ static int rfc4106_encrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm = *ctx; - struct aead_request *subreq = aead_request_ctx(req); - aead_request_set_tfm(subreq, irq_fpu_usable() ? - cryptd_aead_child(cryptd_tfm) : - &cryptd_tfm->base); + aead_request_set_tfm(req, irq_fpu_usable() ? + cryptd_aead_child(cryptd_tfm) : + &cryptd_tfm->base); - aead_request_set_callback(subreq, req->base.flags, - req->base.complete, req->base.data); - aead_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - aead_request_set_ad(subreq, req->assoclen); - - return crypto_aead_encrypt(subreq); + return crypto_aead_encrypt(req); } static int rfc4106_decrypt(struct aead_request *req) @@ -1125,19 +1115,12 @@ static int rfc4106_decrypt(struct aead_request *req) struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm = *ctx; - struct aead_request *subreq = aead_request_ctx(req); - - aead_request_set_tfm(subreq, irq_fpu_usable() ? - cryptd_aead_child(cryptd_tfm) : - &cryptd_tfm->base); - aead_request_set_callback(subreq, req->base.flags, - req->base.complete, req->base.data); - aead_request_set_crypt(subreq, req->src, req->dst, - req->cryptlen, req->iv); - aead_request_set_ad(subreq, req->assoclen); + aead_request_set_tfm(req, irq_fpu_usable() ? + cryptd_aead_child(cryptd_tfm) : + &cryptd_tfm->base); - return crypto_aead_decrypt(subreq); + return crypto_aead_decrypt(req); } #endif diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S new file mode 100644 index 000000000000..16694e625f77 --- /dev/null +++ b/arch/x86/crypto/chacha20-avx2-x86_64.S @@ -0,0 +1,443 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.data +.align 32 + +ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 + .octa 0x0e0d0c0f0a09080b0605040702010003 +ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 + .octa 0x0d0c0f0e09080b0a0504070601000302 +CTRINC: .octa 0x00000003000000020000000100000000 + .octa 0x00000007000000060000000500000004 + +.text + +ENTRY(chacha20_8block_xor_avx2) + # %rdi: Input state matrix, s + # %rsi: 8 data blocks output, o + # %rdx: 8 data blocks input, i + + # This function encrypts eight consecutive ChaCha20 blocks by loading + # the state matrix in AVX registers eight times. As we need some + # scratch registers, we save the first four registers on the stack. The + # algorithm performs each operation on the corresponding word of each + # state matrix, hence requires no word shuffling. For final XORing step + # we transpose the matrix by interleaving 32-, 64- and then 128-bit + # words, which allows us to do XOR in AVX registers. 8/16-bit word + # rotation is done with the slightly better performing byte shuffling, + # 7/12-bit word rotation uses traditional shift+OR. + + vzeroupper + # 4 * 32 byte stack, 32-byte aligned + mov %rsp, %r8 + and $~31, %rsp + sub $0x80, %rsp + + # x0..15[0-7] = s[0..15] + vpbroadcastd 0x00(%rdi),%ymm0 + vpbroadcastd 0x04(%rdi),%ymm1 + vpbroadcastd 0x08(%rdi),%ymm2 + vpbroadcastd 0x0c(%rdi),%ymm3 + vpbroadcastd 0x10(%rdi),%ymm4 + vpbroadcastd 0x14(%rdi),%ymm5 + vpbroadcastd 0x18(%rdi),%ymm6 + vpbroadcastd 0x1c(%rdi),%ymm7 + vpbroadcastd 0x20(%rdi),%ymm8 + vpbroadcastd 0x24(%rdi),%ymm9 + vpbroadcastd 0x28(%rdi),%ymm10 + vpbroadcastd 0x2c(%rdi),%ymm11 + vpbroadcastd 0x30(%rdi),%ymm12 + vpbroadcastd 0x34(%rdi),%ymm13 + vpbroadcastd 0x38(%rdi),%ymm14 + vpbroadcastd 0x3c(%rdi),%ymm15 + # x0..3 on stack + vmovdqa %ymm0,0x00(%rsp) + vmovdqa %ymm1,0x20(%rsp) + vmovdqa %ymm2,0x40(%rsp) + vmovdqa %ymm3,0x60(%rsp) + + vmovdqa CTRINC(%rip),%ymm1 + vmovdqa ROT8(%rip),%ymm2 + vmovdqa ROT16(%rip),%ymm3 + + # x12 += counter values 0-3 + vpaddd %ymm1,%ymm12,%ymm12 + + mov $10,%ecx + +.Ldoubleround8: + # x0 += x4, x12 = rotl32(x12 ^ x0, 16) + vpaddd 0x00(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm3,%ymm12,%ymm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 16) + vpaddd 0x20(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm3,%ymm13,%ymm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 16) + vpaddd 0x40(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm3,%ymm14,%ymm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 16) + vpaddd 0x60(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm3,%ymm15,%ymm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 12) + vpaddd %ymm12,%ymm8,%ymm8 + vpxor %ymm8,%ymm4,%ymm4 + vpslld $12,%ymm4,%ymm0 + vpsrld $20,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 12) + vpaddd %ymm13,%ymm9,%ymm9 + vpxor %ymm9,%ymm5,%ymm5 + vpslld $12,%ymm5,%ymm0 + vpsrld $20,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 12) + vpaddd %ymm14,%ymm10,%ymm10 + vpxor %ymm10,%ymm6,%ymm6 + vpslld $12,%ymm6,%ymm0 + vpsrld $20,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 12) + vpaddd %ymm15,%ymm11,%ymm11 + vpxor %ymm11,%ymm7,%ymm7 + vpslld $12,%ymm7,%ymm0 + vpsrld $20,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + + # x0 += x4, x12 = rotl32(x12 ^ x0, 8) + vpaddd 0x00(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm2,%ymm12,%ymm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 8) + vpaddd 0x20(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm2,%ymm13,%ymm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 8) + vpaddd 0x40(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm2,%ymm14,%ymm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 8) + vpaddd 0x60(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm2,%ymm15,%ymm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 7) + vpaddd %ymm12,%ymm8,%ymm8 + vpxor %ymm8,%ymm4,%ymm4 + vpslld $7,%ymm4,%ymm0 + vpsrld $25,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 7) + vpaddd %ymm13,%ymm9,%ymm9 + vpxor %ymm9,%ymm5,%ymm5 + vpslld $7,%ymm5,%ymm0 + vpsrld $25,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 7) + vpaddd %ymm14,%ymm10,%ymm10 + vpxor %ymm10,%ymm6,%ymm6 + vpslld $7,%ymm6,%ymm0 + vpsrld $25,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 7) + vpaddd %ymm15,%ymm11,%ymm11 + vpxor %ymm11,%ymm7,%ymm7 + vpslld $7,%ymm7,%ymm0 + vpsrld $25,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 16) + vpaddd 0x00(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm3,%ymm15,%ymm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0 + vpaddd 0x20(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm3,%ymm12,%ymm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 16) + vpaddd 0x40(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm3,%ymm13,%ymm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 16) + vpaddd 0x60(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm3,%ymm14,%ymm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 12) + vpaddd %ymm15,%ymm10,%ymm10 + vpxor %ymm10,%ymm5,%ymm5 + vpslld $12,%ymm5,%ymm0 + vpsrld $20,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 12) + vpaddd %ymm12,%ymm11,%ymm11 + vpxor %ymm11,%ymm6,%ymm6 + vpslld $12,%ymm6,%ymm0 + vpsrld $20,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 12) + vpaddd %ymm13,%ymm8,%ymm8 + vpxor %ymm8,%ymm7,%ymm7 + vpslld $12,%ymm7,%ymm0 + vpsrld $20,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 12) + vpaddd %ymm14,%ymm9,%ymm9 + vpxor %ymm9,%ymm4,%ymm4 + vpslld $12,%ymm4,%ymm0 + vpsrld $20,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 8) + vpaddd 0x00(%rsp),%ymm5,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpxor %ymm0,%ymm15,%ymm15 + vpshufb %ymm2,%ymm15,%ymm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 8) + vpaddd 0x20(%rsp),%ymm6,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpxor %ymm0,%ymm12,%ymm12 + vpshufb %ymm2,%ymm12,%ymm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 8) + vpaddd 0x40(%rsp),%ymm7,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpxor %ymm0,%ymm13,%ymm13 + vpshufb %ymm2,%ymm13,%ymm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 8) + vpaddd 0x60(%rsp),%ymm4,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpxor %ymm0,%ymm14,%ymm14 + vpshufb %ymm2,%ymm14,%ymm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 7) + vpaddd %ymm15,%ymm10,%ymm10 + vpxor %ymm10,%ymm5,%ymm5 + vpslld $7,%ymm5,%ymm0 + vpsrld $25,%ymm5,%ymm5 + vpor %ymm0,%ymm5,%ymm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 7) + vpaddd %ymm12,%ymm11,%ymm11 + vpxor %ymm11,%ymm6,%ymm6 + vpslld $7,%ymm6,%ymm0 + vpsrld $25,%ymm6,%ymm6 + vpor %ymm0,%ymm6,%ymm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 7) + vpaddd %ymm13,%ymm8,%ymm8 + vpxor %ymm8,%ymm7,%ymm7 + vpslld $7,%ymm7,%ymm0 + vpsrld $25,%ymm7,%ymm7 + vpor %ymm0,%ymm7,%ymm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 7) + vpaddd %ymm14,%ymm9,%ymm9 + vpxor %ymm9,%ymm4,%ymm4 + vpslld $7,%ymm4,%ymm0 + vpsrld $25,%ymm4,%ymm4 + vpor %ymm0,%ymm4,%ymm4 + + dec %ecx + jnz .Ldoubleround8 + + # x0..15[0-3] += s[0..15] + vpbroadcastd 0x00(%rdi),%ymm0 + vpaddd 0x00(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x00(%rsp) + vpbroadcastd 0x04(%rdi),%ymm0 + vpaddd 0x20(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x20(%rsp) + vpbroadcastd 0x08(%rdi),%ymm0 + vpaddd 0x40(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x40(%rsp) + vpbroadcastd 0x0c(%rdi),%ymm0 + vpaddd 0x60(%rsp),%ymm0,%ymm0 + vmovdqa %ymm0,0x60(%rsp) + vpbroadcastd 0x10(%rdi),%ymm0 + vpaddd %ymm0,%ymm4,%ymm4 + vpbroadcastd 0x14(%rdi),%ymm0 + vpaddd %ymm0,%ymm5,%ymm5 + vpbroadcastd 0x18(%rdi),%ymm0 + vpaddd %ymm0,%ymm6,%ymm6 + vpbroadcastd 0x1c(%rdi),%ymm0 + vpaddd %ymm0,%ymm7,%ymm7 + vpbroadcastd 0x20(%rdi),%ymm0 + vpaddd %ymm0,%ymm8,%ymm8 + vpbroadcastd 0x24(%rdi),%ymm0 + vpaddd %ymm0,%ymm9,%ymm9 + vpbroadcastd 0x28(%rdi),%ymm0 + vpaddd %ymm0,%ymm10,%ymm10 + vpbroadcastd 0x2c(%rdi),%ymm0 + vpaddd %ymm0,%ymm11,%ymm11 + vpbroadcastd 0x30(%rdi),%ymm0 + vpaddd %ymm0,%ymm12,%ymm12 + vpbroadcastd 0x34(%rdi),%ymm0 + vpaddd %ymm0,%ymm13,%ymm13 + vpbroadcastd 0x38(%rdi),%ymm0 + vpaddd %ymm0,%ymm14,%ymm14 + vpbroadcastd 0x3c(%rdi),%ymm0 + vpaddd %ymm0,%ymm15,%ymm15 + + # x12 += counter values 0-3 + vpaddd %ymm1,%ymm12,%ymm12 + + # interleave 32-bit words in state n, n+1 + vmovdqa 0x00(%rsp),%ymm0 + vmovdqa 0x20(%rsp),%ymm1 + vpunpckldq %ymm1,%ymm0,%ymm2 + vpunpckhdq %ymm1,%ymm0,%ymm1 + vmovdqa %ymm2,0x00(%rsp) + vmovdqa %ymm1,0x20(%rsp) + vmovdqa 0x40(%rsp),%ymm0 + vmovdqa 0x60(%rsp),%ymm1 + vpunpckldq %ymm1,%ymm0,%ymm2 + vpunpckhdq %ymm1,%ymm0,%ymm1 + vmovdqa %ymm2,0x40(%rsp) + vmovdqa %ymm1,0x60(%rsp) + vmovdqa %ymm4,%ymm0 + vpunpckldq %ymm5,%ymm0,%ymm4 + vpunpckhdq %ymm5,%ymm0,%ymm5 + vmovdqa %ymm6,%ymm0 + vpunpckldq %ymm7,%ymm0,%ymm6 + vpunpckhdq %ymm7,%ymm0,%ymm7 + vmovdqa %ymm8,%ymm0 + vpunpckldq %ymm9,%ymm0,%ymm8 + vpunpckhdq %ymm9,%ymm0,%ymm9 + vmovdqa %ymm10,%ymm0 + vpunpckldq %ymm11,%ymm0,%ymm10 + vpunpckhdq %ymm11,%ymm0,%ymm11 + vmovdqa %ymm12,%ymm0 + vpunpckldq %ymm13,%ymm0,%ymm12 + vpunpckhdq %ymm13,%ymm0,%ymm13 + vmovdqa %ymm14,%ymm0 + vpunpckldq %ymm15,%ymm0,%ymm14 + vpunpckhdq %ymm15,%ymm0,%ymm15 + + # interleave 64-bit words in state n, n+2 + vmovdqa 0x00(%rsp),%ymm0 + vmovdqa 0x40(%rsp),%ymm2 + vpunpcklqdq %ymm2,%ymm0,%ymm1 + vpunpckhqdq %ymm2,%ymm0,%ymm2 + vmovdqa %ymm1,0x00(%rsp) + vmovdqa %ymm2,0x40(%rsp) + vmovdqa 0x20(%rsp),%ymm0 + vmovdqa 0x60(%rsp),%ymm2 + vpunpcklqdq %ymm2,%ymm0,%ymm1 + vpunpckhqdq %ymm2,%ymm0,%ymm2 + vmovdqa %ymm1,0x20(%rsp) + vmovdqa %ymm2,0x60(%rsp) + vmovdqa %ymm4,%ymm0 + vpunpcklqdq %ymm6,%ymm0,%ymm4 + vpunpckhqdq %ymm6,%ymm0,%ymm6 + vmovdqa %ymm5,%ymm0 + vpunpcklqdq %ymm7,%ymm0,%ymm5 + vpunpckhqdq %ymm7,%ymm0,%ymm7 + vmovdqa %ymm8,%ymm0 + vpunpcklqdq %ymm10,%ymm0,%ymm8 + vpunpckhqdq %ymm10,%ymm0,%ymm10 + vmovdqa %ymm9,%ymm0 + vpunpcklqdq %ymm11,%ymm0,%ymm9 + vpunpckhqdq %ymm11,%ymm0,%ymm11 + vmovdqa %ymm12,%ymm0 + vpunpcklqdq %ymm14,%ymm0,%ymm12 + vpunpckhqdq %ymm14,%ymm0,%ymm14 + vmovdqa %ymm13,%ymm0 + vpunpcklqdq %ymm15,%ymm0,%ymm13 + vpunpckhqdq %ymm15,%ymm0,%ymm15 + + # interleave 128-bit words in state n, n+4 + vmovdqa 0x00(%rsp),%ymm0 + vperm2i128 $0x20,%ymm4,%ymm0,%ymm1 + vperm2i128 $0x31,%ymm4,%ymm0,%ymm4 + vmovdqa %ymm1,0x00(%rsp) + vmovdqa 0x20(%rsp),%ymm0 + vperm2i128 $0x20,%ymm5,%ymm0,%ymm1 + vperm2i128 $0x31,%ymm5,%ymm0,%ymm5 + vmovdqa %ymm1,0x20(%rsp) + vmovdqa 0x40(%rsp),%ymm0 + vperm2i128 $0x20,%ymm6,%ymm0,%ymm1 + vperm2i128 $0x31,%ymm6,%ymm0,%ymm6 + vmovdqa %ymm1,0x40(%rsp) + vmovdqa 0x60(%rsp),%ymm0 + vperm2i128 $0x20,%ymm7,%ymm0,%ymm1 + vperm2i128 $0x31,%ymm7,%ymm0,%ymm7 + vmovdqa %ymm1,0x60(%rsp) + vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 + vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 + vmovdqa %ymm0,%ymm8 + vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 + vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 + vmovdqa %ymm0,%ymm9 + vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 + vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 + vmovdqa %ymm0,%ymm10 + vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 + vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 + vmovdqa %ymm0,%ymm11 + + # xor with corresponding input, write to output + vmovdqa 0x00(%rsp),%ymm0 + vpxor 0x0000(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0000(%rsi) + vmovdqa 0x20(%rsp),%ymm0 + vpxor 0x0080(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0080(%rsi) + vmovdqa 0x40(%rsp),%ymm0 + vpxor 0x0040(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x0040(%rsi) + vmovdqa 0x60(%rsp),%ymm0 + vpxor 0x00c0(%rdx),%ymm0,%ymm0 + vmovdqu %ymm0,0x00c0(%rsi) + vpxor 0x0100(%rdx),%ymm4,%ymm4 + vmovdqu %ymm4,0x0100(%rsi) + vpxor 0x0180(%rdx),%ymm5,%ymm5 + vmovdqu %ymm5,0x00180(%rsi) + vpxor 0x0140(%rdx),%ymm6,%ymm6 + vmovdqu %ymm6,0x0140(%rsi) + vpxor 0x01c0(%rdx),%ymm7,%ymm7 + vmovdqu %ymm7,0x01c0(%rsi) + vpxor 0x0020(%rdx),%ymm8,%ymm8 + vmovdqu %ymm8,0x0020(%rsi) + vpxor 0x00a0(%rdx),%ymm9,%ymm9 + vmovdqu %ymm9,0x00a0(%rsi) + vpxor 0x0060(%rdx),%ymm10,%ymm10 + vmovdqu %ymm10,0x0060(%rsi) + vpxor 0x00e0(%rdx),%ymm11,%ymm11 + vmovdqu %ymm11,0x00e0(%rsi) + vpxor 0x0120(%rdx),%ymm12,%ymm12 + vmovdqu %ymm12,0x0120(%rsi) + vpxor 0x01a0(%rdx),%ymm13,%ymm13 + vmovdqu %ymm13,0x01a0(%rsi) + vpxor 0x0160(%rdx),%ymm14,%ymm14 + vmovdqu %ymm14,0x0160(%rsi) + vpxor 0x01e0(%rdx),%ymm15,%ymm15 + vmovdqu %ymm15,0x01e0(%rsi) + + vzeroupper + mov %r8,%rsp + ret +ENDPROC(chacha20_8block_xor_avx2) diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S new file mode 100644 index 000000000000..712b13047b41 --- /dev/null +++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S @@ -0,0 +1,625 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.data +.align 16 + +ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 +ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 +CTRINC: .octa 0x00000003000000020000000100000000 + +.text + +ENTRY(chacha20_block_xor_ssse3) + # %rdi: Input state matrix, s + # %rsi: 1 data block output, o + # %rdx: 1 data block input, i + + # This function encrypts one ChaCha20 block by loading the state matrix + # in four SSE registers. It performs matrix operation on four words in + # parallel, but requireds shuffling to rearrange the words after each + # round. 8/16-bit word rotation is done with the slightly better + # performing SSSE3 byte shuffling, 7/12-bit word rotation uses + # traditional shift+OR. + + # x0..3 = s0..3 + movdqa 0x00(%rdi),%xmm0 + movdqa 0x10(%rdi),%xmm1 + movdqa 0x20(%rdi),%xmm2 + movdqa 0x30(%rdi),%xmm3 + movdqa %xmm0,%xmm8 + movdqa %xmm1,%xmm9 + movdqa %xmm2,%xmm10 + movdqa %xmm3,%xmm11 + + movdqa ROT8(%rip),%xmm4 + movdqa ROT16(%rip),%xmm5 + + mov $10,%ecx + +.Ldoubleround: + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm5,%xmm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm6 + pslld $12,%xmm6 + psrld $20,%xmm1 + por %xmm6,%xmm1 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm4,%xmm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm7 + pslld $7,%xmm7 + psrld $25,%xmm1 + por %xmm7,%xmm1 + + # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) + pshufd $0x39,%xmm1,%xmm1 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + pshufd $0x4e,%xmm2,%xmm2 + # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) + pshufd $0x93,%xmm3,%xmm3 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 16) + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm5,%xmm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 12) + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm6 + pslld $12,%xmm6 + psrld $20,%xmm1 + por %xmm6,%xmm1 + + # x0 += x1, x3 = rotl32(x3 ^ x0, 8) + paddd %xmm1,%xmm0 + pxor %xmm0,%xmm3 + pshufb %xmm4,%xmm3 + + # x2 += x3, x1 = rotl32(x1 ^ x2, 7) + paddd %xmm3,%xmm2 + pxor %xmm2,%xmm1 + movdqa %xmm1,%xmm7 + pslld $7,%xmm7 + psrld $25,%xmm1 + por %xmm7,%xmm1 + + # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) + pshufd $0x93,%xmm1,%xmm1 + # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) + pshufd $0x4e,%xmm2,%xmm2 + # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) + pshufd $0x39,%xmm3,%xmm3 + + dec %ecx + jnz .Ldoubleround + + # o0 = i0 ^ (x0 + s0) + movdqu 0x00(%rdx),%xmm4 + paddd %xmm8,%xmm0 + pxor %xmm4,%xmm0 + movdqu %xmm0,0x00(%rsi) + # o1 = i1 ^ (x1 + s1) + movdqu 0x10(%rdx),%xmm5 + paddd %xmm9,%xmm1 + pxor %xmm5,%xmm1 + movdqu %xmm1,0x10(%rsi) + # o2 = i2 ^ (x2 + s2) + movdqu 0x20(%rdx),%xmm6 + paddd %xmm10,%xmm2 + pxor %xmm6,%xmm2 + movdqu %xmm2,0x20(%rsi) + # o3 = i3 ^ (x3 + s3) + movdqu 0x30(%rdx),%xmm7 + paddd %xmm11,%xmm3 + pxor %xmm7,%xmm3 + movdqu %xmm3,0x30(%rsi) + + ret +ENDPROC(chacha20_block_xor_ssse3) + +ENTRY(chacha20_4block_xor_ssse3) + # %rdi: Input state matrix, s + # %rsi: 4 data blocks output, o + # %rdx: 4 data blocks input, i + + # This function encrypts four consecutive ChaCha20 blocks by loading the + # the state matrix in SSE registers four times. As we need some scratch + # registers, we save the first four registers on the stack. The + # algorithm performs each operation on the corresponding word of each + # state matrix, hence requires no word shuffling. For final XORing step + # we transpose the matrix by interleaving 32- and then 64-bit words, + # which allows us to do XOR in SSE registers. 8/16-bit word rotation is + # done with the slightly better performing SSSE3 byte shuffling, + # 7/12-bit word rotation uses traditional shift+OR. + + sub $0x40,%rsp + + # x0..15[0-3] = s0..3[0..3] + movq 0x00(%rdi),%xmm1 + pshufd $0x00,%xmm1,%xmm0 + pshufd $0x55,%xmm1,%xmm1 + movq 0x08(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + movq 0x10(%rdi),%xmm5 + pshufd $0x00,%xmm5,%xmm4 + pshufd $0x55,%xmm5,%xmm5 + movq 0x18(%rdi),%xmm7 + pshufd $0x00,%xmm7,%xmm6 + pshufd $0x55,%xmm7,%xmm7 + movq 0x20(%rdi),%xmm9 + pshufd $0x00,%xmm9,%xmm8 + pshufd $0x55,%xmm9,%xmm9 + movq 0x28(%rdi),%xmm11 + pshufd $0x00,%xmm11,%xmm10 + pshufd $0x55,%xmm11,%xmm11 + movq 0x30(%rdi),%xmm13 + pshufd $0x00,%xmm13,%xmm12 + pshufd $0x55,%xmm13,%xmm13 + movq 0x38(%rdi),%xmm15 + pshufd $0x00,%xmm15,%xmm14 + pshufd $0x55,%xmm15,%xmm15 + # x0..3 on stack + movdqa %xmm0,0x00(%rsp) + movdqa %xmm1,0x10(%rsp) + movdqa %xmm2,0x20(%rsp) + movdqa %xmm3,0x30(%rsp) + + movdqa CTRINC(%rip),%xmm1 + movdqa ROT8(%rip),%xmm2 + movdqa ROT16(%rip),%xmm3 + + # x12 += counter values 0-3 + paddd %xmm1,%xmm12 + + mov $10,%ecx + +.Ldoubleround4: + # x0 += x4, x12 = rotl32(x12 ^ x0, 16) + movdqa 0x00(%rsp),%xmm0 + paddd %xmm4,%xmm0 + movdqa %xmm0,0x00(%rsp) + pxor %xmm0,%xmm12 + pshufb %xmm3,%xmm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 16) + movdqa 0x10(%rsp),%xmm0 + paddd %xmm5,%xmm0 + movdqa %xmm0,0x10(%rsp) + pxor %xmm0,%xmm13 + pshufb %xmm3,%xmm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 16) + movdqa 0x20(%rsp),%xmm0 + paddd %xmm6,%xmm0 + movdqa %xmm0,0x20(%rsp) + pxor %xmm0,%xmm14 + pshufb %xmm3,%xmm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 16) + movdqa 0x30(%rsp),%xmm0 + paddd %xmm7,%xmm0 + movdqa %xmm0,0x30(%rsp) + pxor %xmm0,%xmm15 + pshufb %xmm3,%xmm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 12) + paddd %xmm12,%xmm8 + pxor %xmm8,%xmm4 + movdqa %xmm4,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm4 + por %xmm0,%xmm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 12) + paddd %xmm13,%xmm9 + pxor %xmm9,%xmm5 + movdqa %xmm5,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm5 + por %xmm0,%xmm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 12) + paddd %xmm14,%xmm10 + pxor %xmm10,%xmm6 + movdqa %xmm6,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm6 + por %xmm0,%xmm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 12) + paddd %xmm15,%xmm11 + pxor %xmm11,%xmm7 + movdqa %xmm7,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm7 + por %xmm0,%xmm7 + + # x0 += x4, x12 = rotl32(x12 ^ x0, 8) + movdqa 0x00(%rsp),%xmm0 + paddd %xmm4,%xmm0 + movdqa %xmm0,0x00(%rsp) + pxor %xmm0,%xmm12 + pshufb %xmm2,%xmm12 + # x1 += x5, x13 = rotl32(x13 ^ x1, 8) + movdqa 0x10(%rsp),%xmm0 + paddd %xmm5,%xmm0 + movdqa %xmm0,0x10(%rsp) + pxor %xmm0,%xmm13 + pshufb %xmm2,%xmm13 + # x2 += x6, x14 = rotl32(x14 ^ x2, 8) + movdqa 0x20(%rsp),%xmm0 + paddd %xmm6,%xmm0 + movdqa %xmm0,0x20(%rsp) + pxor %xmm0,%xmm14 + pshufb %xmm2,%xmm14 + # x3 += x7, x15 = rotl32(x15 ^ x3, 8) + movdqa 0x30(%rsp),%xmm0 + paddd %xmm7,%xmm0 + movdqa %xmm0,0x30(%rsp) + pxor %xmm0,%xmm15 + pshufb %xmm2,%xmm15 + + # x8 += x12, x4 = rotl32(x4 ^ x8, 7) + paddd %xmm12,%xmm8 + pxor %xmm8,%xmm4 + movdqa %xmm4,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm4 + por %xmm0,%xmm4 + # x9 += x13, x5 = rotl32(x5 ^ x9, 7) + paddd %xmm13,%xmm9 + pxor %xmm9,%xmm5 + movdqa %xmm5,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm5 + por %xmm0,%xmm5 + # x10 += x14, x6 = rotl32(x6 ^ x10, 7) + paddd %xmm14,%xmm10 + pxor %xmm10,%xmm6 + movdqa %xmm6,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm6 + por %xmm0,%xmm6 + # x11 += x15, x7 = rotl32(x7 ^ x11, 7) + paddd %xmm15,%xmm11 + pxor %xmm11,%xmm7 + movdqa %xmm7,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm7 + por %xmm0,%xmm7 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 16) + movdqa 0x00(%rsp),%xmm0 + paddd %xmm5,%xmm0 + movdqa %xmm0,0x00(%rsp) + pxor %xmm0,%xmm15 + pshufb %xmm3,%xmm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 16) + movdqa 0x10(%rsp),%xmm0 + paddd %xmm6,%xmm0 + movdqa %xmm0,0x10(%rsp) + pxor %xmm0,%xmm12 + pshufb %xmm3,%xmm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 16) + movdqa 0x20(%rsp),%xmm0 + paddd %xmm7,%xmm0 + movdqa %xmm0,0x20(%rsp) + pxor %xmm0,%xmm13 + pshufb %xmm3,%xmm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 16) + movdqa 0x30(%rsp),%xmm0 + paddd %xmm4,%xmm0 + movdqa %xmm0,0x30(%rsp) + pxor %xmm0,%xmm14 + pshufb %xmm3,%xmm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 12) + paddd %xmm15,%xmm10 + pxor %xmm10,%xmm5 + movdqa %xmm5,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm5 + por %xmm0,%xmm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 12) + paddd %xmm12,%xmm11 + pxor %xmm11,%xmm6 + movdqa %xmm6,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm6 + por %xmm0,%xmm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 12) + paddd %xmm13,%xmm8 + pxor %xmm8,%xmm7 + movdqa %xmm7,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm7 + por %xmm0,%xmm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 12) + paddd %xmm14,%xmm9 + pxor %xmm9,%xmm4 + movdqa %xmm4,%xmm0 + pslld $12,%xmm0 + psrld $20,%xmm4 + por %xmm0,%xmm4 + + # x0 += x5, x15 = rotl32(x15 ^ x0, 8) + movdqa 0x00(%rsp),%xmm0 + paddd %xmm5,%xmm0 + movdqa %xmm0,0x00(%rsp) + pxor %xmm0,%xmm15 + pshufb %xmm2,%xmm15 + # x1 += x6, x12 = rotl32(x12 ^ x1, 8) + movdqa 0x10(%rsp),%xmm0 + paddd %xmm6,%xmm0 + movdqa %xmm0,0x10(%rsp) + pxor %xmm0,%xmm12 + pshufb %xmm2,%xmm12 + # x2 += x7, x13 = rotl32(x13 ^ x2, 8) + movdqa 0x20(%rsp),%xmm0 + paddd %xmm7,%xmm0 + movdqa %xmm0,0x20(%rsp) + pxor %xmm0,%xmm13 + pshufb %xmm2,%xmm13 + # x3 += x4, x14 = rotl32(x14 ^ x3, 8) + movdqa 0x30(%rsp),%xmm0 + paddd %xmm4,%xmm0 + movdqa %xmm0,0x30(%rsp) + pxor %xmm0,%xmm14 + pshufb %xmm2,%xmm14 + + # x10 += x15, x5 = rotl32(x5 ^ x10, 7) + paddd %xmm15,%xmm10 + pxor %xmm10,%xmm5 + movdqa %xmm5,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm5 + por %xmm0,%xmm5 + # x11 += x12, x6 = rotl32(x6 ^ x11, 7) + paddd %xmm12,%xmm11 + pxor %xmm11,%xmm6 + movdqa %xmm6,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm6 + por %xmm0,%xmm6 + # x8 += x13, x7 = rotl32(x7 ^ x8, 7) + paddd %xmm13,%xmm8 + pxor %xmm8,%xmm7 + movdqa %xmm7,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm7 + por %xmm0,%xmm7 + # x9 += x14, x4 = rotl32(x4 ^ x9, 7) + paddd %xmm14,%xmm9 + pxor %xmm9,%xmm4 + movdqa %xmm4,%xmm0 + pslld $7,%xmm0 + psrld $25,%xmm4 + por %xmm0,%xmm4 + + dec %ecx + jnz .Ldoubleround4 + + # x0[0-3] += s0[0] + # x1[0-3] += s0[1] + movq 0x00(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd 0x00(%rsp),%xmm2 + movdqa %xmm2,0x00(%rsp) + paddd 0x10(%rsp),%xmm3 + movdqa %xmm3,0x10(%rsp) + # x2[0-3] += s0[2] + # x3[0-3] += s0[3] + movq 0x08(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd 0x20(%rsp),%xmm2 + movdqa %xmm2,0x20(%rsp) + paddd 0x30(%rsp),%xmm3 + movdqa %xmm3,0x30(%rsp) + + # x4[0-3] += s1[0] + # x5[0-3] += s1[1] + movq 0x10(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm4 + paddd %xmm3,%xmm5 + # x6[0-3] += s1[2] + # x7[0-3] += s1[3] + movq 0x18(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm6 + paddd %xmm3,%xmm7 + + # x8[0-3] += s2[0] + # x9[0-3] += s2[1] + movq 0x20(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm8 + paddd %xmm3,%xmm9 + # x10[0-3] += s2[2] + # x11[0-3] += s2[3] + movq 0x28(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm10 + paddd %xmm3,%xmm11 + + # x12[0-3] += s3[0] + # x13[0-3] += s3[1] + movq 0x30(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm12 + paddd %xmm3,%xmm13 + # x14[0-3] += s3[2] + # x15[0-3] += s3[3] + movq 0x38(%rdi),%xmm3 + pshufd $0x00,%xmm3,%xmm2 + pshufd $0x55,%xmm3,%xmm3 + paddd %xmm2,%xmm14 + paddd %xmm3,%xmm15 + + # x12 += counter values 0-3 + paddd %xmm1,%xmm12 + + # interleave 32-bit words in state n, n+1 + movdqa 0x00(%rsp),%xmm0 + movdqa 0x10(%rsp),%xmm1 + movdqa %xmm0,%xmm2 + punpckldq %xmm1,%xmm2 + punpckhdq %xmm1,%xmm0 + movdqa %xmm2,0x00(%rsp) + movdqa %xmm0,0x10(%rsp) + movdqa 0x20(%rsp),%xmm0 + movdqa 0x30(%rsp),%xmm1 + movdqa %xmm0,%xmm2 + punpckldq %xmm1,%xmm2 + punpckhdq %xmm1,%xmm0 + movdqa %xmm2,0x20(%rsp) + movdqa %xmm0,0x30(%rsp) + movdqa %xmm4,%xmm0 + punpckldq %xmm5,%xmm4 + punpckhdq %xmm5,%xmm0 + movdqa %xmm0,%xmm5 + movdqa %xmm6,%xmm0 + punpckldq %xmm7,%xmm6 + punpckhdq %xmm7,%xmm0 + movdqa %xmm0,%xmm7 + movdqa %xmm8,%xmm0 + punpckldq %xmm9,%xmm8 + punpckhdq %xmm9,%xmm0 + movdqa %xmm0,%xmm9 + movdqa %xmm10,%xmm0 + punpckldq %xmm11,%xmm10 + punpckhdq %xmm11,%xmm0 + movdqa %xmm0,%xmm11 + movdqa %xmm12,%xmm0 + punpckldq %xmm13,%xmm12 + punpckhdq %xmm13,%xmm0 + movdqa %xmm0,%xmm13 + movdqa %xmm14,%xmm0 + punpckldq %xmm15,%xmm14 + punpckhdq %xmm15,%xmm0 + movdqa %xmm0,%xmm15 + + # interleave 64-bit words in state n, n+2 + movdqa 0x00(%rsp),%xmm0 + movdqa 0x20(%rsp),%xmm1 + movdqa %xmm0,%xmm2 + punpcklqdq %xmm1,%xmm2 + punpckhqdq %xmm1,%xmm0 + movdqa %xmm2,0x00(%rsp) + movdqa %xmm0,0x20(%rsp) + movdqa 0x10(%rsp),%xmm0 + movdqa 0x30(%rsp),%xmm1 + movdqa %xmm0,%xmm2 + punpcklqdq %xmm1,%xmm2 + punpckhqdq %xmm1,%xmm0 + movdqa %xmm2,0x10(%rsp) + movdqa %xmm0,0x30(%rsp) + movdqa %xmm4,%xmm0 + punpcklqdq %xmm6,%xmm4 + punpckhqdq %xmm6,%xmm0 + movdqa %xmm0,%xmm6 + movdqa %xmm5,%xmm0 + punpcklqdq %xmm7,%xmm5 + punpckhqdq %xmm7,%xmm0 + movdqa %xmm0,%xmm7 + movdqa %xmm8,%xmm0 + punpcklqdq %xmm10,%xmm8 + punpckhqdq %xmm10,%xmm0 + movdqa %xmm0,%xmm10 + movdqa %xmm9,%xmm0 + punpcklqdq %xmm11,%xmm9 + punpckhqdq %xmm11,%xmm0 + movdqa %xmm0,%xmm11 + movdqa %xmm12,%xmm0 + punpcklqdq %xmm14,%xmm12 + punpckhqdq %xmm14,%xmm0 + movdqa %xmm0,%xmm14 + movdqa %xmm13,%xmm0 + punpcklqdq %xmm15,%xmm13 + punpckhqdq %xmm15,%xmm0 + movdqa %xmm0,%xmm15 + + # xor with corresponding input, write to output + movdqa 0x00(%rsp),%xmm0 + movdqu 0x00(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x00(%rsi) + movdqa 0x10(%rsp),%xmm0 + movdqu 0x80(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x80(%rsi) + movdqa 0x20(%rsp),%xmm0 + movdqu 0x40(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0x40(%rsi) + movdqa 0x30(%rsp),%xmm0 + movdqu 0xc0(%rdx),%xmm1 + pxor %xmm1,%xmm0 + movdqu %xmm0,0xc0(%rsi) + movdqu 0x10(%rdx),%xmm1 + pxor %xmm1,%xmm4 + movdqu %xmm4,0x10(%rsi) + movdqu 0x90(%rdx),%xmm1 + pxor %xmm1,%xmm5 + movdqu %xmm5,0x90(%rsi) + movdqu 0x50(%rdx),%xmm1 + pxor %xmm1,%xmm6 + movdqu %xmm6,0x50(%rsi) + movdqu 0xd0(%rdx),%xmm1 + pxor %xmm1,%xmm7 + movdqu %xmm7,0xd0(%rsi) + movdqu 0x20(%rdx),%xmm1 + pxor %xmm1,%xmm8 + movdqu %xmm8,0x20(%rsi) + movdqu 0xa0(%rdx),%xmm1 + pxor %xmm1,%xmm9 + movdqu %xmm9,0xa0(%rsi) + movdqu 0x60(%rdx),%xmm1 + pxor %xmm1,%xmm10 + movdqu %xmm10,0x60(%rsi) + movdqu 0xe0(%rdx),%xmm1 + pxor %xmm1,%xmm11 + movdqu %xmm11,0xe0(%rsi) + movdqu 0x30(%rdx),%xmm1 + pxor %xmm1,%xmm12 + movdqu %xmm12,0x30(%rsi) + movdqu 0xb0(%rdx),%xmm1 + pxor %xmm1,%xmm13 + movdqu %xmm13,0xb0(%rsi) + movdqu 0x70(%rdx),%xmm1 + pxor %xmm1,%xmm14 + movdqu %xmm14,0x70(%rsi) + movdqu 0xf0(%rdx),%xmm1 + pxor %xmm1,%xmm15 + movdqu %xmm15,0xf0(%rsi) + + add $0x40,%rsp + ret +ENDPROC(chacha20_4block_xor_ssse3) diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c new file mode 100644 index 000000000000..effe2160b7c5 --- /dev/null +++ b/arch/x86/crypto/chacha20_glue.c @@ -0,0 +1,150 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <crypto/algapi.h> +#include <crypto/chacha20.h> +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/simd.h> + +#define CHACHA20_STATE_ALIGN 16 + +asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src); +asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src); +#ifdef CONFIG_AS_AVX2 +asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src); +static bool chacha20_use_avx2; +#endif + +static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes) +{ + u8 buf[CHACHA20_BLOCK_SIZE]; + +#ifdef CONFIG_AS_AVX2 + if (chacha20_use_avx2) { + while (bytes >= CHACHA20_BLOCK_SIZE * 8) { + chacha20_8block_xor_avx2(state, dst, src); + bytes -= CHACHA20_BLOCK_SIZE * 8; + src += CHACHA20_BLOCK_SIZE * 8; + dst += CHACHA20_BLOCK_SIZE * 8; + state[12] += 8; + } + } +#endif + while (bytes >= CHACHA20_BLOCK_SIZE * 4) { + chacha20_4block_xor_ssse3(state, dst, src); + bytes -= CHACHA20_BLOCK_SIZE * 4; + src += CHACHA20_BLOCK_SIZE * 4; + dst += CHACHA20_BLOCK_SIZE * 4; + state[12] += 4; + } + while (bytes >= CHACHA20_BLOCK_SIZE) { + chacha20_block_xor_ssse3(state, dst, src); + bytes -= CHACHA20_BLOCK_SIZE; + src += CHACHA20_BLOCK_SIZE; + dst += CHACHA20_BLOCK_SIZE; + state[12]++; + } + if (bytes) { + memcpy(buf, src, bytes); + chacha20_block_xor_ssse3(state, buf, buf); + memcpy(dst, buf, bytes); + } +} + +static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1]; + struct blkcipher_walk walk; + int err; + + if (!may_use_simd()) + return crypto_chacha20_crypt(desc, dst, src, nbytes); + + state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); + + crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); + + kernel_fpu_begin(); + + while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { + chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, + rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); + err = blkcipher_walk_done(desc, &walk, + walk.nbytes % CHACHA20_BLOCK_SIZE); + } + + if (walk.nbytes) { + chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); + err = blkcipher_walk_done(desc, &walk, 0); + } + + kernel_fpu_end(); + + return err; +} + +static struct crypto_alg alg = { + .cra_name = "chacha20", + .cra_driver_name = "chacha20-simd", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_type = &crypto_blkcipher_type, + .cra_ctxsize = sizeof(struct chacha20_ctx), + .cra_alignmask = sizeof(u32) - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = CHACHA20_KEY_SIZE, + .max_keysize = CHACHA20_KEY_SIZE, + .ivsize = CHACHA20_IV_SIZE, + .geniv = "seqiv", + .setkey = crypto_chacha20_setkey, + .encrypt = chacha20_simd, + .decrypt = chacha20_simd, + }, + }, +}; + +static int __init chacha20_simd_mod_init(void) +{ + if (!cpu_has_ssse3) + return -ENODEV; + +#ifdef CONFIG_AS_AVX2 + chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 && + cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); +#endif + return crypto_register_alg(&alg); +} + +static void __exit chacha20_simd_mod_fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(chacha20_simd_mod_init); +module_exit(chacha20_simd_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-simd"); diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S new file mode 100644 index 000000000000..eff2f414e22b --- /dev/null +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S @@ -0,0 +1,386 @@ +/* + * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.data +.align 32 + +ANMASK: .octa 0x0000000003ffffff0000000003ffffff + .octa 0x0000000003ffffff0000000003ffffff +ORMASK: .octa 0x00000000010000000000000001000000 + .octa 0x00000000010000000000000001000000 + +.text + +#define h0 0x00(%rdi) +#define h1 0x04(%rdi) +#define h2 0x08(%rdi) +#define h3 0x0c(%rdi) +#define h4 0x10(%rdi) +#define r0 0x00(%rdx) +#define r1 0x04(%rdx) +#define r2 0x08(%rdx) +#define r3 0x0c(%rdx) +#define r4 0x10(%rdx) +#define u0 0x00(%r8) +#define u1 0x04(%r8) +#define u2 0x08(%r8) +#define u3 0x0c(%r8) +#define u4 0x10(%r8) +#define w0 0x14(%r8) +#define w1 0x18(%r8) +#define w2 0x1c(%r8) +#define w3 0x20(%r8) +#define w4 0x24(%r8) +#define y0 0x28(%r8) +#define y1 0x2c(%r8) +#define y2 0x30(%r8) +#define y3 0x34(%r8) +#define y4 0x38(%r8) +#define m %rsi +#define hc0 %ymm0 +#define hc1 %ymm1 +#define hc2 %ymm2 +#define hc3 %ymm3 +#define hc4 %ymm4 +#define hc0x %xmm0 +#define hc1x %xmm1 +#define hc2x %xmm2 +#define hc3x %xmm3 +#define hc4x %xmm4 +#define t1 %ymm5 +#define t2 %ymm6 +#define t1x %xmm5 +#define t2x %xmm6 +#define ruwy0 %ymm7 +#define ruwy1 %ymm8 +#define ruwy2 %ymm9 +#define ruwy3 %ymm10 +#define ruwy4 %ymm11 +#define ruwy0x %xmm7 +#define ruwy1x %xmm8 +#define ruwy2x %xmm9 +#define ruwy3x %xmm10 +#define ruwy4x %xmm11 +#define svxz1 %ymm12 +#define svxz2 %ymm13 +#define svxz3 %ymm14 +#define svxz4 %ymm15 +#define d0 %r9 +#define d1 %r10 +#define d2 %r11 +#define d3 %r12 +#define d4 %r13 + +ENTRY(poly1305_4block_avx2) + # %rdi: Accumulator h[5] + # %rsi: 64 byte input block m + # %rdx: Poly1305 key r[5] + # %rcx: Quadblock count + # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5], + + # This four-block variant uses loop unrolled block processing. It + # requires 4 Poly1305 keys: r, r^2, r^3 and r^4: + # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r + + vzeroupper + push %rbx + push %r12 + push %r13 + + # combine r0,u0,w0,y0 + vmovd y0,ruwy0x + vmovd w0,t1x + vpunpcklqdq t1,ruwy0,ruwy0 + vmovd u0,t1x + vmovd r0,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy0,ruwy0 + + # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5 + vmovd y1,ruwy1x + vmovd w1,t1x + vpunpcklqdq t1,ruwy1,ruwy1 + vmovd u1,t1x + vmovd r1,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy1,ruwy1 + vpslld $2,ruwy1,svxz1 + vpaddd ruwy1,svxz1,svxz1 + + # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5 + vmovd y2,ruwy2x + vmovd w2,t1x + vpunpcklqdq t1,ruwy2,ruwy2 + vmovd u2,t1x + vmovd r2,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy2,ruwy2 + vpslld $2,ruwy2,svxz2 + vpaddd ruwy2,svxz2,svxz2 + + # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5 + vmovd y3,ruwy3x + vmovd w3,t1x + vpunpcklqdq t1,ruwy3,ruwy3 + vmovd u3,t1x + vmovd r3,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy3,ruwy3 + vpslld $2,ruwy3,svxz3 + vpaddd ruwy3,svxz3,svxz3 + + # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5 + vmovd y4,ruwy4x + vmovd w4,t1x + vpunpcklqdq t1,ruwy4,ruwy4 + vmovd u4,t1x + vmovd r4,t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,ruwy4,ruwy4 + vpslld $2,ruwy4,svxz4 + vpaddd ruwy4,svxz4,svxz4 + +.Ldoblock4: + # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff, + # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0] + vmovd 0x00(m),hc0x + vmovd 0x10(m),t1x + vpunpcklqdq t1,hc0,hc0 + vmovd 0x20(m),t1x + vmovd 0x30(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc0,hc0 + vpand ANMASK(%rip),hc0,hc0 + vmovd h0,t1x + vpaddd t1,hc0,hc0 + # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff, + # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1] + vmovd 0x03(m),hc1x + vmovd 0x13(m),t1x + vpunpcklqdq t1,hc1,hc1 + vmovd 0x23(m),t1x + vmovd 0x33(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc1,hc1 + vpsrld $2,hc1,hc1 + vpand ANMASK(%rip),hc1,hc1 + vmovd h1,t1x + vpaddd t1,hc1,hc1 + # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff, + # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2] + vmovd 0x06(m),hc2x + vmovd 0x16(m),t1x + vpunpcklqdq t1,hc2,hc2 + vmovd 0x26(m),t1x + vmovd 0x36(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc2,hc2 + vpsrld $4,hc2,hc2 + vpand ANMASK(%rip),hc2,hc2 + vmovd h2,t1x + vpaddd t1,hc2,hc2 + # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff, + # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3] + vmovd 0x09(m),hc3x + vmovd 0x19(m),t1x + vpunpcklqdq t1,hc3,hc3 + vmovd 0x29(m),t1x + vmovd 0x39(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc3,hc3 + vpsrld $6,hc3,hc3 + vpand ANMASK(%rip),hc3,hc3 + vmovd h3,t1x + vpaddd t1,hc3,hc3 + # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24), + # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4] + vmovd 0x0c(m),hc4x + vmovd 0x1c(m),t1x + vpunpcklqdq t1,hc4,hc4 + vmovd 0x2c(m),t1x + vmovd 0x3c(m),t2x + vpunpcklqdq t2,t1,t1 + vperm2i128 $0x20,t1,hc4,hc4 + vpsrld $8,hc4,hc4 + vpor ORMASK(%rip),hc4,hc4 + vmovd h4,t1x + vpaddd t1,hc4,hc4 + + # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ] + vpmuludq hc0,ruwy0,t1 + # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ] + vpmuludq hc1,svxz4,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ] + vpmuludq hc2,svxz3,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ] + vpmuludq hc3,svxz2,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ] + vpmuludq hc4,svxz1,t2 + vpaddq t2,t1,t1 + # d0 = t1[0] + t1[1] + t[2] + t[3] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d0 + + # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ] + vpmuludq hc0,ruwy1,t1 + # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ] + vpmuludq hc1,ruwy0,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ] + vpmuludq hc2,svxz4,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ] + vpmuludq hc3,svxz3,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ] + vpmuludq hc4,svxz2,t2 + vpaddq t2,t1,t1 + # d1 = t1[0] + t1[1] + t1[3] + t1[4] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d1 + + # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ] + vpmuludq hc0,ruwy2,t1 + # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ] + vpmuludq hc1,ruwy1,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ] + vpmuludq hc2,ruwy0,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ] + vpmuludq hc3,svxz4,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ] + vpmuludq hc4,svxz3,t2 + vpaddq t2,t1,t1 + # d2 = t1[0] + t1[1] + t1[2] + t1[3] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d2 + + # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ] + vpmuludq hc0,ruwy3,t1 + # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ] + vpmuludq hc1,ruwy2,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ] + vpmuludq hc2,ruwy1,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ] + vpmuludq hc3,ruwy0,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ] + vpmuludq hc4,svxz4,t2 + vpaddq t2,t1,t1 + # d3 = t1[0] + t1[1] + t1[2] + t1[3] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d3 + + # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ] + vpmuludq hc0,ruwy4,t1 + # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ] + vpmuludq hc1,ruwy3,t2 + vpaddq t2,t1,t1 + # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ] + vpmuludq hc2,ruwy2,t2 + vpaddq t2,t1,t1 + # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ] + vpmuludq hc3,ruwy1,t2 + vpaddq t2,t1,t1 + # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ] + vpmuludq hc4,ruwy0,t2 + vpaddq t2,t1,t1 + # d4 = t1[0] + t1[1] + t1[2] + t1[3] + vpermq $0xee,t1,t2 + vpaddq t2,t1,t1 + vpsrldq $8,t1,t2 + vpaddq t2,t1,t1 + vmovq t1x,d4 + + # d1 += d0 >> 26 + mov d0,%rax + shr $26,%rax + add %rax,d1 + # h0 = d0 & 0x3ffffff + mov d0,%rbx + and $0x3ffffff,%ebx + + # d2 += d1 >> 26 + mov d1,%rax + shr $26,%rax + add %rax,d2 + # h1 = d1 & 0x3ffffff + mov d1,%rax + and $0x3ffffff,%eax + mov %eax,h1 + + # d3 += d2 >> 26 + mov d2,%rax + shr $26,%rax + add %rax,d3 + # h2 = d2 & 0x3ffffff + mov d2,%rax + and $0x3ffffff,%eax + mov %eax,h2 + + # d4 += d3 >> 26 + mov d3,%rax + shr $26,%rax + add %rax,d4 + # h3 = d3 & 0x3ffffff + mov d3,%rax + and $0x3ffffff,%eax + mov %eax,h3 + + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax + lea (%eax,%eax,4),%eax + add %eax,%ebx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 + mov %ebx,%eax + shr $26,%eax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx + mov %ebx,h0 + + add $0x40,m + dec %rcx + jnz .Ldoblock4 + + vzeroupper + pop %r13 + pop %r12 + pop %rbx + ret +ENDPROC(poly1305_4block_avx2) diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S new file mode 100644 index 000000000000..338c748054ed --- /dev/null +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -0,0 +1,582 @@ +/* + * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/linkage.h> + +.data +.align 16 + +ANMASK: .octa 0x0000000003ffffff0000000003ffffff +ORMASK: .octa 0x00000000010000000000000001000000 + +.text + +#define h0 0x00(%rdi) +#define h1 0x04(%rdi) +#define h2 0x08(%rdi) +#define h3 0x0c(%rdi) +#define h4 0x10(%rdi) +#define r0 0x00(%rdx) +#define r1 0x04(%rdx) +#define r2 0x08(%rdx) +#define r3 0x0c(%rdx) +#define r4 0x10(%rdx) +#define s1 0x00(%rsp) +#define s2 0x04(%rsp) +#define s3 0x08(%rsp) +#define s4 0x0c(%rsp) +#define m %rsi +#define h01 %xmm0 +#define h23 %xmm1 +#define h44 %xmm2 +#define t1 %xmm3 +#define t2 %xmm4 +#define t3 %xmm5 +#define t4 %xmm6 +#define mask %xmm7 +#define d0 %r8 +#define d1 %r9 +#define d2 %r10 +#define d3 %r11 +#define d4 %r12 + +ENTRY(poly1305_block_sse2) + # %rdi: Accumulator h[5] + # %rsi: 16 byte input block m + # %rdx: Poly1305 key r[5] + # %rcx: Block count + + # This single block variant tries to improve performance by doing two + # multiplications in parallel using SSE instructions. There is quite + # some quardword packing involved, hence the speedup is marginal. + + push %rbx + push %r12 + sub $0x10,%rsp + + # s1..s4 = r1..r4 * 5 + mov r1,%eax + lea (%eax,%eax,4),%eax + mov %eax,s1 + mov r2,%eax + lea (%eax,%eax,4),%eax + mov %eax,s2 + mov r3,%eax + lea (%eax,%eax,4),%eax + mov %eax,s3 + mov r4,%eax + lea (%eax,%eax,4),%eax + mov %eax,s4 + + movdqa ANMASK(%rip),mask + +.Ldoblock: + # h01 = [0, h1, 0, h0] + # h23 = [0, h3, 0, h2] + # h44 = [0, h4, 0, h4] + movd h0,h01 + movd h1,t1 + movd h2,h23 + movd h3,t2 + movd h4,h44 + punpcklqdq t1,h01 + punpcklqdq t2,h23 + punpcklqdq h44,h44 + + # h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ] + movd 0x00(m),t1 + movd 0x03(m),t2 + psrld $2,t2 + punpcklqdq t2,t1 + pand mask,t1 + paddd t1,h01 + # h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ] + movd 0x06(m),t1 + movd 0x09(m),t2 + psrld $4,t1 + psrld $6,t2 + punpcklqdq t2,t1 + pand mask,t1 + paddd t1,h23 + # h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ] + mov 0x0c(m),%eax + shr $8,%eax + or $0x01000000,%eax + movd %eax,t1 + pshufd $0xc4,t1,t1 + paddd t1,h44 + + # t1[0] = h0 * r0 + h2 * s3 + # t1[1] = h1 * s4 + h3 * s2 + movd r0,t1 + movd s4,t2 + punpcklqdq t2,t1 + pmuludq h01,t1 + movd s3,t2 + movd s2,t3 + punpcklqdq t3,t2 + pmuludq h23,t2 + paddq t2,t1 + # t2[0] = h0 * r1 + h2 * s4 + # t2[1] = h1 * r0 + h3 * s3 + movd r1,t2 + movd r0,t3 + punpcklqdq t3,t2 + pmuludq h01,t2 + movd s4,t3 + movd s3,t4 + punpcklqdq t4,t3 + pmuludq h23,t3 + paddq t3,t2 + # t3[0] = h4 * s1 + # t3[1] = h4 * s2 + movd s1,t3 + movd s2,t4 + punpcklqdq t4,t3 + pmuludq h44,t3 + # d0 = t1[0] + t1[1] + t3[0] + # d1 = t2[0] + t2[1] + t3[1] + movdqa t1,t4 + punpcklqdq t2,t4 + punpckhqdq t2,t1 + paddq t4,t1 + paddq t3,t1 + movq t1,d0 + psrldq $8,t1 + movq t1,d1 + + # t1[0] = h0 * r2 + h2 * r0 + # t1[1] = h1 * r1 + h3 * s4 + movd r2,t1 + movd r1,t2 + punpcklqdq t2,t1 + pmuludq h01,t1 + movd r0,t2 + movd s4,t3 + punpcklqdq t3,t2 + pmuludq h23,t2 + paddq t2,t1 + # t2[0] = h0 * r3 + h2 * r1 + # t2[1] = h1 * r2 + h3 * r0 + movd r3,t2 + movd r2,t3 + punpcklqdq t3,t2 + pmuludq h01,t2 + movd r1,t3 + movd r0,t4 + punpcklqdq t4,t3 + pmuludq h23,t3 + paddq t3,t2 + # t3[0] = h4 * s3 + # t3[1] = h4 * s4 + movd s3,t3 + movd s4,t4 + punpcklqdq t4,t3 + pmuludq h44,t3 + # d2 = t1[0] + t1[1] + t3[0] + # d3 = t2[0] + t2[1] + t3[1] + movdqa t1,t4 + punpcklqdq t2,t4 + punpckhqdq t2,t1 + paddq t4,t1 + paddq t3,t1 + movq t1,d2 + psrldq $8,t1 + movq t1,d3 + + # t1[0] = h0 * r4 + h2 * r2 + # t1[1] = h1 * r3 + h3 * r1 + movd r4,t1 + movd r3,t2 + punpcklqdq t2,t1 + pmuludq h01,t1 + movd r2,t2 + movd r1,t3 + punpcklqdq t3,t2 + pmuludq h23,t2 + paddq t2,t1 + # t3[0] = h4 * r0 + movd r0,t3 + pmuludq h44,t3 + # d4 = t1[0] + t1[1] + t3[0] + movdqa t1,t4 + psrldq $8,t4 + paddq t4,t1 + paddq t3,t1 + movq t1,d4 + + # d1 += d0 >> 26 + mov d0,%rax + shr $26,%rax + add %rax,d1 + # h0 = d0 & 0x3ffffff + mov d0,%rbx + and $0x3ffffff,%ebx + + # d2 += d1 >> 26 + mov d1,%rax + shr $26,%rax + add %rax,d2 + # h1 = d1 & 0x3ffffff + mov d1,%rax + and $0x3ffffff,%eax + mov %eax,h1 + + # d3 += d2 >> 26 + mov d2,%rax + shr $26,%rax + add %rax,d3 + # h2 = d2 & 0x3ffffff + mov d2,%rax + and $0x3ffffff,%eax + mov %eax,h2 + + # d4 += d3 >> 26 + mov d3,%rax + shr $26,%rax + add %rax,d4 + # h3 = d3 & 0x3ffffff + mov d3,%rax + and $0x3ffffff,%eax + mov %eax,h3 + + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax + lea (%eax,%eax,4),%eax + add %eax,%ebx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 + mov %ebx,%eax + shr $26,%eax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx + mov %ebx,h0 + + add $0x10,m + dec %rcx + jnz .Ldoblock + + add $0x10,%rsp + pop %r12 + pop %rbx + ret +ENDPROC(poly1305_block_sse2) + + +#define u0 0x00(%r8) +#define u1 0x04(%r8) +#define u2 0x08(%r8) +#define u3 0x0c(%r8) +#define u4 0x10(%r8) +#define hc0 %xmm0 +#define hc1 %xmm1 +#define hc2 %xmm2 +#define hc3 %xmm5 +#define hc4 %xmm6 +#define ru0 %xmm7 +#define ru1 %xmm8 +#define ru2 %xmm9 +#define ru3 %xmm10 +#define ru4 %xmm11 +#define sv1 %xmm12 +#define sv2 %xmm13 +#define sv3 %xmm14 +#define sv4 %xmm15 +#undef d0 +#define d0 %r13 + +ENTRY(poly1305_2block_sse2) + # %rdi: Accumulator h[5] + # %rsi: 16 byte input block m + # %rdx: Poly1305 key r[5] + # %rcx: Doubleblock count + # %r8: Poly1305 derived key r^2 u[5] + + # This two-block variant further improves performance by using loop + # unrolled block processing. This is more straight forward and does + # less byte shuffling, but requires a second Poly1305 key r^2: + # h = (h + m) * r => h = (h + m1) * r^2 + m2 * r + + push %rbx + push %r12 + push %r13 + + # combine r0,u0 + movd u0,ru0 + movd r0,t1 + punpcklqdq t1,ru0 + + # combine r1,u1 and s1=r1*5,v1=u1*5 + movd u1,ru1 + movd r1,t1 + punpcklqdq t1,ru1 + movdqa ru1,sv1 + pslld $2,sv1 + paddd ru1,sv1 + + # combine r2,u2 and s2=r2*5,v2=u2*5 + movd u2,ru2 + movd r2,t1 + punpcklqdq t1,ru2 + movdqa ru2,sv2 + pslld $2,sv2 + paddd ru2,sv2 + + # combine r3,u3 and s3=r3*5,v3=u3*5 + movd u3,ru3 + movd r3,t1 + punpcklqdq t1,ru3 + movdqa ru3,sv3 + pslld $2,sv3 + paddd ru3,sv3 + + # combine r4,u4 and s4=r4*5,v4=u4*5 + movd u4,ru4 + movd r4,t1 + punpcklqdq t1,ru4 + movdqa ru4,sv4 + pslld $2,sv4 + paddd ru4,sv4 + +.Ldoblock2: + # hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ] + movd 0x00(m),hc0 + movd 0x10(m),t1 + punpcklqdq t1,hc0 + pand ANMASK(%rip),hc0 + movd h0,t1 + paddd t1,hc0 + # hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ] + movd 0x03(m),hc1 + movd 0x13(m),t1 + punpcklqdq t1,hc1 + psrld $2,hc1 + pand ANMASK(%rip),hc1 + movd h1,t1 + paddd t1,hc1 + # hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ] + movd 0x06(m),hc2 + movd 0x16(m),t1 + punpcklqdq t1,hc2 + psrld $4,hc2 + pand ANMASK(%rip),hc2 + movd h2,t1 + paddd t1,hc2 + # hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ] + movd 0x09(m),hc3 + movd 0x19(m),t1 + punpcklqdq t1,hc3 + psrld $6,hc3 + pand ANMASK(%rip),hc3 + movd h3,t1 + paddd t1,hc3 + # hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ] + movd 0x0c(m),hc4 + movd 0x1c(m),t1 + punpcklqdq t1,hc4 + psrld $8,hc4 + por ORMASK(%rip),hc4 + movd h4,t1 + paddd t1,hc4 + + # t1 = [ hc0[1] * r0, hc0[0] * u0 ] + movdqa ru0,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * s4, hc1[0] * v4 ] + movdqa sv4,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * s3, hc2[0] * v3 ] + movdqa sv3,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * s2, hc3[0] * v2 ] + movdqa sv2,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * s1, hc4[0] * v1 ] + movdqa sv1,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d0 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d0 + + # t1 = [ hc0[1] * r1, hc0[0] * u1 ] + movdqa ru1,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * r0, hc1[0] * u0 ] + movdqa ru0,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * s4, hc2[0] * v4 ] + movdqa sv4,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * s3, hc3[0] * v3 ] + movdqa sv3,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * s2, hc4[0] * v2 ] + movdqa sv2,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d1 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d1 + + # t1 = [ hc0[1] * r2, hc0[0] * u2 ] + movdqa ru2,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * r1, hc1[0] * u1 ] + movdqa ru1,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * r0, hc2[0] * u0 ] + movdqa ru0,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * s4, hc3[0] * v4 ] + movdqa sv4,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * s3, hc4[0] * v3 ] + movdqa sv3,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d2 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d2 + + # t1 = [ hc0[1] * r3, hc0[0] * u3 ] + movdqa ru3,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * r2, hc1[0] * u2 ] + movdqa ru2,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * r1, hc2[0] * u1 ] + movdqa ru1,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * r0, hc3[0] * u0 ] + movdqa ru0,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * s4, hc4[0] * v4 ] + movdqa sv4,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d3 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d3 + + # t1 = [ hc0[1] * r4, hc0[0] * u4 ] + movdqa ru4,t1 + pmuludq hc0,t1 + # t1 += [ hc1[1] * r3, hc1[0] * u3 ] + movdqa ru3,t2 + pmuludq hc1,t2 + paddq t2,t1 + # t1 += [ hc2[1] * r2, hc2[0] * u2 ] + movdqa ru2,t2 + pmuludq hc2,t2 + paddq t2,t1 + # t1 += [ hc3[1] * r1, hc3[0] * u1 ] + movdqa ru1,t2 + pmuludq hc3,t2 + paddq t2,t1 + # t1 += [ hc4[1] * r0, hc4[0] * u0 ] + movdqa ru0,t2 + pmuludq hc4,t2 + paddq t2,t1 + # d4 = t1[0] + t1[1] + movdqa t1,t2 + psrldq $8,t2 + paddq t2,t1 + movq t1,d4 + + # d1 += d0 >> 26 + mov d0,%rax + shr $26,%rax + add %rax,d1 + # h0 = d0 & 0x3ffffff + mov d0,%rbx + and $0x3ffffff,%ebx + + # d2 += d1 >> 26 + mov d1,%rax + shr $26,%rax + add %rax,d2 + # h1 = d1 & 0x3ffffff + mov d1,%rax + and $0x3ffffff,%eax + mov %eax,h1 + + # d3 += d2 >> 26 + mov d2,%rax + shr $26,%rax + add %rax,d3 + # h2 = d2 & 0x3ffffff + mov d2,%rax + and $0x3ffffff,%eax + mov %eax,h2 + + # d4 += d3 >> 26 + mov d3,%rax + shr $26,%rax + add %rax,d4 + # h3 = d3 & 0x3ffffff + mov d3,%rax + and $0x3ffffff,%eax + mov %eax,h3 + + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax + lea (%eax,%eax,4),%eax + add %eax,%ebx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 + mov %ebx,%eax + shr $26,%eax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx + mov %ebx,h0 + + add $0x20,m + dec %rcx + jnz .Ldoblock2 + + pop %r13 + pop %r12 + pop %rbx + ret +ENDPROC(poly1305_2block_sse2) diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c new file mode 100644 index 000000000000..f7170d764f32 --- /dev/null +++ b/arch/x86/crypto/poly1305_glue.c @@ -0,0 +1,207 @@ +/* + * Poly1305 authenticator algorithm, RFC7539, SIMD glue code + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <crypto/algapi.h> +#include <crypto/internal/hash.h> +#include <crypto/poly1305.h> +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <asm/fpu/api.h> +#include <asm/simd.h> + +struct poly1305_simd_desc_ctx { + struct poly1305_desc_ctx base; + /* derived key u set? */ + bool uset; +#ifdef CONFIG_AS_AVX2 + /* derived keys r^3, r^4 set? */ + bool wset; +#endif + /* derived Poly1305 key r^2 */ + u32 u[5]; + /* ... silently appended r^3 and r^4 when using AVX2 */ +}; + +asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src, + const u32 *r, unsigned int blocks); +asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r, + unsigned int blocks, const u32 *u); +#ifdef CONFIG_AS_AVX2 +asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, + unsigned int blocks, const u32 *u); +static bool poly1305_use_avx2; +#endif + +static int poly1305_simd_init(struct shash_desc *desc) +{ + struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc); + + sctx->uset = false; +#ifdef CONFIG_AS_AVX2 + sctx->wset = false; +#endif + + return crypto_poly1305_init(desc); +} + +static void poly1305_simd_mult(u32 *a, const u32 *b) +{ + u8 m[POLY1305_BLOCK_SIZE]; + + memset(m, 0, sizeof(m)); + /* The poly1305 block function adds a hi-bit to the accumulator which + * we don't need for key multiplication; compensate for it. */ + a[4] -= 1 << 24; + poly1305_block_sse2(a, m, b, 1); +} + +static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, + const u8 *src, unsigned int srclen) +{ + struct poly1305_simd_desc_ctx *sctx; + unsigned int blocks, datalen; + + BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base)); + sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base); + + if (unlikely(!dctx->sset)) { + datalen = crypto_poly1305_setdesckey(dctx, src, srclen); + src += srclen - datalen; + srclen = datalen; + } + +#ifdef CONFIG_AS_AVX2 + if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) { + if (unlikely(!sctx->wset)) { + if (!sctx->uset) { + memcpy(sctx->u, dctx->r, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u, dctx->r); + sctx->uset = true; + } + memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u + 5, dctx->r); + memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u + 10, dctx->r); + sctx->wset = true; + } + blocks = srclen / (POLY1305_BLOCK_SIZE * 4); + poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u); + src += POLY1305_BLOCK_SIZE * 4 * blocks; + srclen -= POLY1305_BLOCK_SIZE * 4 * blocks; + } +#endif + if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) { + if (unlikely(!sctx->uset)) { + memcpy(sctx->u, dctx->r, sizeof(sctx->u)); + poly1305_simd_mult(sctx->u, dctx->r); + sctx->uset = true; + } + blocks = srclen / (POLY1305_BLOCK_SIZE * 2); + poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u); + src += POLY1305_BLOCK_SIZE * 2 * blocks; + srclen -= POLY1305_BLOCK_SIZE * 2 * blocks; + } + if (srclen >= POLY1305_BLOCK_SIZE) { + poly1305_block_sse2(dctx->h, src, dctx->r, 1); + srclen -= POLY1305_BLOCK_SIZE; + } + return srclen; +} + +static int poly1305_simd_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); + unsigned int bytes; + + /* kernel_fpu_begin/end is costly, use fallback for small updates */ + if (srclen <= 288 || !may_use_simd()) + return crypto_poly1305_update(desc, src, srclen); + + kernel_fpu_begin(); + + if (unlikely(dctx->buflen)) { + bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); + memcpy(dctx->buf + dctx->buflen, src, bytes); + src += bytes; + srclen -= bytes; + dctx->buflen += bytes; + + if (dctx->buflen == POLY1305_BLOCK_SIZE) { + poly1305_simd_blocks(dctx, dctx->buf, + POLY1305_BLOCK_SIZE); + dctx->buflen = 0; + } + } + + if (likely(srclen >= POLY1305_BLOCK_SIZE)) { + bytes = poly1305_simd_blocks(dctx, src, srclen); + src += srclen - bytes; + srclen = bytes; + } + + kernel_fpu_end(); + + if (unlikely(srclen)) { + dctx->buflen = srclen; + memcpy(dctx->buf, src, srclen); + } + + return 0; +} + +static struct shash_alg alg = { + .digestsize = POLY1305_DIGEST_SIZE, + .init = poly1305_simd_init, + .update = poly1305_simd_update, + .final = crypto_poly1305_final, + .setkey = crypto_poly1305_setkey, + .descsize = sizeof(struct poly1305_simd_desc_ctx), + .base = { + .cra_name = "poly1305", + .cra_driver_name = "poly1305-simd", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_alignmask = sizeof(u32) - 1, + .cra_blocksize = POLY1305_BLOCK_SIZE, + .cra_module = THIS_MODULE, + }, +}; + +static int __init poly1305_simd_mod_init(void) +{ + if (!cpu_has_xmm2) + return -ENODEV; + +#ifdef CONFIG_AS_AVX2 + poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 && + cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); + alg.descsize = sizeof(struct poly1305_simd_desc_ctx); + if (poly1305_use_avx2) + alg.descsize += 10 * sizeof(u32); +#endif + return crypto_register_shash(&alg); +} + +static void __exit poly1305_simd_mod_exit(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(poly1305_simd_mod_init); +module_exit(poly1305_simd_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("Poly1305 authenticator"); +MODULE_ALIAS_CRYPTO("poly1305"); +MODULE_ALIAS_CRYPTO("poly1305-simd"); diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile index 7a144971db79..bd55dedd7614 100644 --- a/arch/x86/entry/Makefile +++ b/arch/x86/entry/Makefile @@ -2,6 +2,7 @@ # Makefile for the x86 low level entry code # obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o +obj-y += common.o obj-y += vdso/ obj-y += vsyscall/ diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index f4e6308c4200..3c71dd947c7b 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -135,9 +135,6 @@ For 32-bit we have the following conventions - kernel is built with movq %rbp, 4*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp) .endm - .macro SAVE_EXTRA_REGS_RBP offset=0 - movq %rbp, 4*8+\offset(%rsp) - .endm .macro RESTORE_EXTRA_REGS offset=0 movq 0*8+\offset(%rsp), %r15 @@ -193,12 +190,6 @@ For 32-bit we have the following conventions - kernel is built with .macro RESTORE_C_REGS_EXCEPT_RCX_R11 RESTORE_C_REGS_HELPER 1,0,0,1,1 .endm - .macro RESTORE_RSI_RDI - RESTORE_C_REGS_HELPER 0,0,0,0,0 - .endm - .macro RESTORE_RSI_RDI_RDX - RESTORE_C_REGS_HELPER 0,0,0,0,1 - .endm .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 subq $-(15*8+\addskip), %rsp diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c new file mode 100644 index 000000000000..80dcc9261ca3 --- /dev/null +++ b/arch/x86/entry/common.c @@ -0,0 +1,318 @@ +/* + * common.c - C code for kernel entry and exit + * Copyright (c) 2015 Andrew Lutomirski + * GPL v2 + * + * Based on asm and ptrace code by many authors. The code here originated + * in ptrace.c and signal.c. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/tracehook.h> +#include <linux/audit.h> +#include <linux/seccomp.h> +#include <linux/signal.h> +#include <linux/export.h> +#include <linux/context_tracking.h> +#include <linux/user-return-notifier.h> +#include <linux/uprobes.h> + +#include <asm/desc.h> +#include <asm/traps.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + +#ifdef CONFIG_CONTEXT_TRACKING +/* Called on entry from user mode with IRQs off. */ +__visible void enter_from_user_mode(void) +{ + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit(); +} +#endif + +static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) +{ +#ifdef CONFIG_X86_64 + if (arch == AUDIT_ARCH_X86_64) { + audit_syscall_entry(regs->orig_ax, regs->di, + regs->si, regs->dx, regs->r10); + } else +#endif + { + audit_syscall_entry(regs->orig_ax, regs->bx, + regs->cx, regs->dx, regs->si); + } +} + +/* + * We can return 0 to resume the syscall or anything else to go to phase + * 2. If we resume the syscall, we need to put something appropriate in + * regs->orig_ax. + * + * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax + * are fully functional. + * + * For phase 2's benefit, our return value is: + * 0: resume the syscall + * 1: go to phase 2; no seccomp phase 2 needed + * anything else: go to phase 2; pass return value to seccomp + */ +unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) +{ + unsigned long ret = 0; + u32 work; + + BUG_ON(regs != task_pt_regs(current)); + + work = ACCESS_ONCE(current_thread_info()->flags) & + _TIF_WORK_SYSCALL_ENTRY; + +#ifdef CONFIG_CONTEXT_TRACKING + /* + * If TIF_NOHZ is set, we are required to call user_exit() before + * doing anything that could touch RCU. + */ + if (work & _TIF_NOHZ) { + enter_from_user_mode(); + work &= ~_TIF_NOHZ; + } +#endif + +#ifdef CONFIG_SECCOMP + /* + * Do seccomp first -- it should minimize exposure of other + * code, and keeping seccomp fast is probably more valuable + * than the rest of this. + */ + if (work & _TIF_SECCOMP) { + struct seccomp_data sd; + + sd.arch = arch; + sd.nr = regs->orig_ax; + sd.instruction_pointer = regs->ip; +#ifdef CONFIG_X86_64 + if (arch == AUDIT_ARCH_X86_64) { + sd.args[0] = regs->di; + sd.args[1] = regs->si; + sd.args[2] = regs->dx; + sd.args[3] = regs->r10; + sd.args[4] = regs->r8; + sd.args[5] = regs->r9; + } else +#endif + { + sd.args[0] = regs->bx; + sd.args[1] = regs->cx; + sd.args[2] = regs->dx; + sd.args[3] = regs->si; + sd.args[4] = regs->di; + sd.args[5] = regs->bp; + } + + BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0); + BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1); + + ret = seccomp_phase1(&sd); + if (ret == SECCOMP_PHASE1_SKIP) { + regs->orig_ax = -1; + ret = 0; + } else if (ret != SECCOMP_PHASE1_OK) { + return ret; /* Go directly to phase 2 */ + } + + work &= ~_TIF_SECCOMP; + } +#endif + + /* Do our best to finish without phase 2. */ + if (work == 0) + return ret; /* seccomp and/or nohz only (ret == 0 here) */ + +#ifdef CONFIG_AUDITSYSCALL + if (work == _TIF_SYSCALL_AUDIT) { + /* + * If there is no more work to be done except auditing, + * then audit in phase 1. Phase 2 always audits, so, if + * we audit here, then we can't go on to phase 2. + */ + do_audit_syscall_entry(regs, arch); + return 0; + } +#endif + + return 1; /* Something is enabled that we can't handle in phase 1 */ +} + +/* Returns the syscall nr to run (which should match regs->orig_ax). */ +long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, + unsigned long phase1_result) +{ + long ret = 0; + u32 work = ACCESS_ONCE(current_thread_info()->flags) & + _TIF_WORK_SYSCALL_ENTRY; + + BUG_ON(regs != task_pt_regs(current)); + + /* + * If we stepped into a sysenter/syscall insn, it trapped in + * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. + * If user-mode had set TF itself, then it's still clear from + * do_debug() and we need to set it again to restore the user + * state. If we entered on the slow path, TF was already set. + */ + if (work & _TIF_SINGLESTEP) + regs->flags |= X86_EFLAGS_TF; + +#ifdef CONFIG_SECCOMP + /* + * Call seccomp_phase2 before running the other hooks so that + * they can see any changes made by a seccomp tracer. + */ + if (phase1_result > 1 && seccomp_phase2(phase1_result)) { + /* seccomp failures shouldn't expose any additional code. */ + return -1; + } +#endif + + if (unlikely(work & _TIF_SYSCALL_EMU)) + ret = -1L; + + if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && + tracehook_report_syscall_entry(regs)) + ret = -1L; + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->orig_ax); + + do_audit_syscall_entry(regs, arch); + + return ret ?: regs->orig_ax; +} + +long syscall_trace_enter(struct pt_regs *regs) +{ + u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; + unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch); + + if (phase1_result == 0) + return regs->orig_ax; + else + return syscall_trace_enter_phase2(regs, arch, phase1_result); +} + +static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs) +{ + unsigned long top_of_stack = + (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING; + return (struct thread_info *)(top_of_stack - THREAD_SIZE); +} + +/* Called with IRQs disabled. */ +__visible void prepare_exit_to_usermode(struct pt_regs *regs) +{ + if (WARN_ON(!irqs_disabled())) + local_irq_disable(); + + /* + * In order to return to user mode, we need to have IRQs off with + * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, + * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags + * can be set at any time on preemptable kernels if we have IRQs on, + * so we need to loop. Disabling preemption wouldn't help: doing the + * work to clear some of the flags can sleep. + */ + while (true) { + u32 cached_flags = + READ_ONCE(pt_regs_to_thread_info(regs)->flags); + + if (!(cached_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | + _TIF_UPROBE | _TIF_NEED_RESCHED | + _TIF_USER_RETURN_NOTIFY))) + break; + + /* We have work to do. */ + local_irq_enable(); + + if (cached_flags & _TIF_NEED_RESCHED) + schedule(); + + if (cached_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + + /* deal with pending signal delivery */ + if (cached_flags & _TIF_SIGPENDING) + do_signal(regs); + + if (cached_flags & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + + if (cached_flags & _TIF_USER_RETURN_NOTIFY) + fire_user_return_notifiers(); + + /* Disable IRQs and retry */ + local_irq_disable(); + } + + user_enter(); +} + +/* + * Called with IRQs on and fully valid regs. Returns with IRQs off in a + * state such that we can immediately switch to user mode. + */ +__visible void syscall_return_slowpath(struct pt_regs *regs) +{ + struct thread_info *ti = pt_regs_to_thread_info(regs); + u32 cached_flags = READ_ONCE(ti->flags); + bool step; + + CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + + if (WARN(irqs_disabled(), "syscall %ld left IRQs disabled", + regs->orig_ax)) + local_irq_enable(); + + /* + * First do one-time work. If these work items are enabled, we + * want to run them exactly once per syscall exit with IRQs on. + */ + if (cached_flags & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | + _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)) { + audit_syscall_exit(regs); + + if (cached_flags & _TIF_SYSCALL_TRACEPOINT) + trace_sys_exit(regs, regs->ax); + + /* + * If TIF_SYSCALL_EMU is set, we only get here because of + * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). + * We already reported this syscall instruction in + * syscall_trace_enter(). + */ + step = unlikely( + (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)) + == _TIF_SINGLESTEP); + if (step || cached_flags & _TIF_SYSCALL_TRACE) + tracehook_report_syscall_exit(regs, step); + } + +#ifdef CONFIG_COMPAT + /* + * Compat syscalls set TS_COMPAT. Make sure we clear it before + * returning to user mode. + */ + ti->status &= ~TS_COMPAT; +#endif + + local_irq_disable(); + prepare_exit_to_usermode(regs); +} diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 21dc60a60b5f..b2909bf8cf70 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -45,16 +45,6 @@ #include <asm/asm.h> #include <asm/smap.h> -/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ -#include <linux/elf-em.h> -#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) -#define __AUDIT_ARCH_LE 0x40000000 - -#ifndef CONFIG_AUDITSYSCALL -# define sysenter_audit syscall_trace_entry -# define sysexit_audit syscall_exit_work -#endif - .section .entry.text, "ax" /* @@ -266,14 +256,10 @@ ret_from_intr: ENTRY(resume_userspace) LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx - andl $_TIF_WORK_MASK, %ecx # is there any work to be done on - # int/exception return? - jne work_pending + movl %esp, %eax + call prepare_exit_to_usermode jmp restore_all END(ret_from_exception) @@ -339,7 +325,7 @@ sysenter_past_esp: GET_THREAD_INFO(%ebp) testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) - jnz sysenter_audit + jnz syscall_trace_entry sysenter_do_call: cmpl $(NR_syscalls), %eax jae sysenter_badsys @@ -351,7 +337,7 @@ sysenter_after_call: TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx testl $_TIF_ALLWORK_MASK, %ecx - jnz sysexit_audit + jnz syscall_exit_work_irqs_off sysenter_exit: /* if something modifies registers it must also disable sysexit */ movl PT_EIP(%esp), %edx @@ -362,40 +348,6 @@ sysenter_exit: PTGS_TO_GS ENABLE_INTERRUPTS_SYSEXIT -#ifdef CONFIG_AUDITSYSCALL -sysenter_audit: - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp) - jnz syscall_trace_entry - /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ - movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ - /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ - pushl PT_ESI(%esp) /* a3: 5th arg */ - pushl PT_EDX+4(%esp) /* a2: 4th arg */ - call __audit_syscall_entry - popl %ecx /* get that remapped edx off the stack */ - popl %ecx /* get that remapped esi off the stack */ - movl PT_EAX(%esp), %eax /* reload syscall number */ - jmp sysenter_do_call - -sysexit_audit: - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx - jnz syscall_exit_work - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_ANY) - movl %eax, %edx /* second arg, syscall return value */ - cmpl $-MAX_ERRNO, %eax /* is it an error ? */ - setbe %al /* 1 if so, 0 if not */ - movzbl %al, %eax /* zero-extend that */ - call __audit_syscall_exit - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx - jnz syscall_exit_work - movl PT_EAX(%esp), %eax /* reload syscall return value */ - jmp sysenter_exit -#endif - .pushsection .fixup, "ax" 2: movl $0, PT_FS(%esp) jmp 1b @@ -421,13 +373,7 @@ syscall_after_call: movl %eax, PT_EAX(%esp) # store the return value syscall_exit: LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret - TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx - testl $_TIF_ALLWORK_MASK, %ecx # current->work - jnz syscall_exit_work + jmp syscall_exit_work restore_all: TRACE_IRQS_IRET @@ -504,57 +450,6 @@ ldt_ss: #endif ENDPROC(entry_INT80_32) - # perform work that needs to be done immediately before resumption - ALIGN -work_pending: - testb $_TIF_NEED_RESCHED, %cl - jz work_notifysig -work_resched: - call schedule - LOCKDEP_SYS_EXIT - DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt - # setting need_resched or sigpending - # between sampling and the iret - TRACE_IRQS_OFF - movl TI_flags(%ebp), %ecx - andl $_TIF_WORK_MASK, %ecx # is there any work to be done other - # than syscall tracing? - jz restore_all - testb $_TIF_NEED_RESCHED, %cl - jnz work_resched - -work_notifysig: # deal with pending signals and - # notify-resume requests -#ifdef CONFIG_VM86 - testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) - movl %esp, %eax - jnz work_notifysig_v86 # returning to kernel-space or - # vm86-space -1: -#else - movl %esp, %eax -#endif - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - movb PT_CS(%esp), %bl - andb $SEGMENT_RPL_MASK, %bl - cmpb $USER_RPL, %bl - jb resume_kernel - xorl %edx, %edx - call do_notify_resume - jmp resume_userspace - -#ifdef CONFIG_VM86 - ALIGN -work_notifysig_v86: - pushl %ecx # save ti_flags for do_notify_resume - call save_v86_state # %eax contains pt_regs pointer - popl %ecx - movl %eax, %esp - jmp 1b -#endif -END(work_pending) - # perform syscall exit tracing ALIGN syscall_trace_entry: @@ -569,15 +464,14 @@ END(syscall_trace_entry) # perform syscall exit tracing ALIGN -syscall_exit_work: - testl $_TIF_WORK_SYSCALL_EXIT, %ecx - jz work_pending +syscall_exit_work_irqs_off: TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call - # schedule() instead + ENABLE_INTERRUPTS(CLBR_ANY) + +syscall_exit_work: movl %esp, %eax - call syscall_trace_leave - jmp resume_userspace + call syscall_return_slowpath + jmp restore_all END(syscall_exit_work) syscall_fault: diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 3bb2c4302df1..d3033183ed70 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -33,7 +33,6 @@ #include <asm/paravirt.h> #include <asm/percpu.h> #include <asm/asm.h> -#include <asm/context_tracking.h> #include <asm/smap.h> #include <asm/pgtable_types.h> #include <linux/err.h> @@ -229,6 +228,11 @@ entry_SYSCALL_64_fastpath: */ USERGS_SYSRET64 +GLOBAL(int_ret_from_sys_call_irqs_off) + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + jmp int_ret_from_sys_call + /* Do syscall entry tracing */ tracesys: movq %rsp, %rdi @@ -272,69 +276,11 @@ tracesys_phase2: * Has correct iret frame. */ GLOBAL(int_ret_from_sys_call) - DISABLE_INTERRUPTS(CLBR_NONE) -int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */ - TRACE_IRQS_OFF - movl $_TIF_ALLWORK_MASK, %edi - /* edi: mask to check */ -GLOBAL(int_with_check) - LOCKDEP_SYS_EXIT_IRQ - GET_THREAD_INFO(%rcx) - movl TI_flags(%rcx), %edx - andl %edi, %edx - jnz int_careful - andl $~TS_COMPAT, TI_status(%rcx) - jmp syscall_return - - /* - * Either reschedule or signal or syscall exit tracking needed. - * First do a reschedule test. - * edx: work, edi: workmask - */ -int_careful: - bt $TIF_NEED_RESCHED, %edx - jnc int_very_careful - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - pushq %rdi - SCHEDULE_USER - popq %rdi - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - jmp int_with_check - - /* handle signals and tracing -- both require a full pt_regs */ -int_very_careful: - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) SAVE_EXTRA_REGS - /* Check for syscall exit trace */ - testl $_TIF_WORK_SYSCALL_EXIT, %edx - jz int_signal - pushq %rdi - leaq 8(%rsp), %rdi /* &ptregs -> arg1 */ - call syscall_trace_leave - popq %rdi - andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU), %edi - jmp int_restore_rest - -int_signal: - testl $_TIF_DO_NOTIFY_MASK, %edx - jz 1f - movq %rsp, %rdi /* &ptregs -> arg1 */ - xorl %esi, %esi /* oldset -> arg2 */ - call do_notify_resume -1: movl $_TIF_WORK_MASK, %edi -int_restore_rest: + movq %rsp, %rdi + call syscall_return_slowpath /* returns with IRQs disabled */ RESTORE_EXTRA_REGS - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - jmp int_with_check - -syscall_return: - /* The IRETQ could re-enable interrupts: */ - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_IRETQ + TRACE_IRQS_IRETQ /* we're about to change IF */ /* * Try to use SYSRET instead of IRET if we're returning to @@ -555,23 +501,22 @@ END(irq_entries_start) /* 0(%rsp): ~(interrupt number) */ .macro interrupt func cld - /* - * Since nothing in interrupt handling code touches r12...r15 members - * of "struct pt_regs", and since interrupts can nest, we can save - * four stack slots and simultaneously provide - * an unwind-friendly stack layout by saving "truncated" pt_regs - * exactly up to rbp slot, without these members. - */ - ALLOC_PT_GPREGS_ON_STACK -RBP - SAVE_C_REGS -RBP - /* this goes to 0(%rsp) for unwinder, not for saving the value: */ - SAVE_EXTRA_REGS_RBP -RBP - - leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */ + ALLOC_PT_GPREGS_ON_STACK + SAVE_C_REGS + SAVE_EXTRA_REGS - testb $3, CS-RBP(%rsp) + testb $3, CS(%rsp) jz 1f + + /* + * IRQ from user mode. Switch to kernel gsbase and inform context + * tracking that we're in kernel mode. + */ SWAPGS +#ifdef CONFIG_CONTEXT_TRACKING + call enter_from_user_mode +#endif + 1: /* * Save previous stack pointer, optionally switch to interrupt stack. @@ -580,14 +525,14 @@ END(irq_entries_start) * a little cheaper to use a separate counter in the PDA (short of * moving irq_enter into assembly, which would be too much work) */ - movq %rsp, %rsi + movq %rsp, %rdi incl PER_CPU_VAR(irq_count) cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp - pushq %rsi + pushq %rdi /* We entered an interrupt context - irqs are off: */ TRACE_IRQS_OFF - call \func + call \func /* rdi points to pt_regs */ .endm /* @@ -606,34 +551,19 @@ ret_from_intr: decl PER_CPU_VAR(irq_count) /* Restore saved previous stack */ - popq %rsi - /* return code expects complete pt_regs - adjust rsp accordingly: */ - leaq -RBP(%rsi), %rsp + popq %rsp testb $3, CS(%rsp) jz retint_kernel - /* Interrupt came from user space */ -retint_user: - GET_THREAD_INFO(%rcx) - /* %rcx: thread info. Interrupts are off. */ -retint_with_reschedule: - movl $_TIF_WORK_MASK, %edi -retint_check: + /* Interrupt came from user space */ LOCKDEP_SYS_EXIT_IRQ - movl TI_flags(%rcx), %edx - andl %edi, %edx - jnz retint_careful - -retint_swapgs: /* return to user-space */ - /* - * The iretq could re-enable interrupts: - */ - DISABLE_INTERRUPTS(CLBR_ANY) +GLOBAL(retint_user) + mov %rsp,%rdi + call prepare_exit_to_usermode TRACE_IRQS_IRETQ - SWAPGS - jmp restore_c_regs_and_iret + jmp restore_regs_and_iret /* Returning to kernel space */ retint_kernel: @@ -657,6 +587,8 @@ retint_kernel: * At this label, code paths which return to kernel and to user, * which come from interrupts/exception and from syscalls, merge. */ +restore_regs_and_iret: + RESTORE_EXTRA_REGS restore_c_regs_and_iret: RESTORE_C_REGS REMOVE_PT_GPREGS_FROM_STACK 8 @@ -707,37 +639,6 @@ native_irq_return_ldt: popq %rax jmp native_irq_return_iret #endif - - /* edi: workmask, edx: work */ -retint_careful: - bt $TIF_NEED_RESCHED, %edx - jnc retint_signal - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - pushq %rdi - SCHEDULE_USER - popq %rdi - GET_THREAD_INFO(%rcx) - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - jmp retint_check - -retint_signal: - testl $_TIF_DO_NOTIFY_MASK, %edx - jz retint_swapgs - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - SAVE_EXTRA_REGS - movq $-1, ORIG_RAX(%rsp) - xorl %esi, %esi /* oldset */ - movq %rsp, %rdi /* &pt_regs */ - call do_notify_resume - RESTORE_EXTRA_REGS - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF - GET_THREAD_INFO(%rcx) - jmp retint_with_reschedule - END(common_interrupt) /* @@ -1143,12 +1044,22 @@ ENTRY(error_entry) SAVE_EXTRA_REGS 8 xorl %ebx, %ebx testb $3, CS+8(%rsp) - jz error_kernelspace + jz .Lerror_kernelspace - /* We entered from user mode */ +.Lerror_entry_from_usermode_swapgs: + /* + * We entered from user mode or we're pretending to have entered + * from user mode due to an IRET fault. + */ SWAPGS -error_entry_done: +.Lerror_entry_from_usermode_after_swapgs: +#ifdef CONFIG_CONTEXT_TRACKING + call enter_from_user_mode +#endif + +.Lerror_entry_done: + TRACE_IRQS_OFF ret @@ -1158,31 +1069,30 @@ error_entry_done: * truncated RIP for IRET exceptions returning to compat mode. Check * for these here too. */ -error_kernelspace: +.Lerror_kernelspace: incl %ebx leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) - je error_bad_iret + je .Lerror_bad_iret movl %ecx, %eax /* zero extend */ cmpq %rax, RIP+8(%rsp) - je bstep_iret + je .Lbstep_iret cmpq $gs_change, RIP+8(%rsp) - jne error_entry_done + jne .Lerror_entry_done /* * hack: gs_change can fail with user gsbase. If this happens, fix up * gsbase and proceed. We'll fix up the exception and land in * gs_change's error handler with kernel gsbase. */ - SWAPGS - jmp error_entry_done + jmp .Lerror_entry_from_usermode_swapgs -bstep_iret: +.Lbstep_iret: /* Fix truncated RIP */ movq %rcx, RIP+8(%rsp) /* fall through */ -error_bad_iret: +.Lerror_bad_iret: /* * We came from an IRET to user mode, so we have user gsbase. * Switch to kernel gsbase: @@ -1198,7 +1108,7 @@ error_bad_iret: call fixup_bad_iret mov %rax, %rsp decl %ebx - jmp error_entry_done + jmp .Lerror_entry_from_usermode_after_swapgs END(error_entry) @@ -1209,7 +1119,6 @@ END(error_entry) */ ENTRY(error_exit) movl %ebx, %eax - RESTORE_EXTRA_REGS DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl %eax, %eax @@ -1237,11 +1146,12 @@ ENTRY(nmi) * If the variable is not set and the stack is not the NMI * stack then: * o Set the special variable on the stack - * o Copy the interrupt frame into a "saved" location on the stack - * o Copy the interrupt frame into a "copy" location on the stack + * o Copy the interrupt frame into an "outermost" location on the + * stack + * o Copy the interrupt frame into an "iret" location on the stack * o Continue processing the NMI * If the variable is set or the previous stack is the NMI stack: - * o Modify the "copy" location to jump to the repeate_nmi + * o Modify the "iret" location to jump to the repeat_nmi * o return back to the first NMI * * Now on exit of the first NMI, we first clear the stack variable @@ -1250,31 +1160,151 @@ ENTRY(nmi) * a nested NMI that updated the copy interrupt stack frame, a * jump will be made to the repeat_nmi code that will handle the second * NMI. + * + * However, espfix prevents us from directly returning to userspace + * with a single IRET instruction. Similarly, IRET to user mode + * can fault. We therefore handle NMIs from user space like + * other IST entries. */ /* Use %rdx as our temp variable throughout */ pushq %rdx + testb $3, CS-RIP+8(%rsp) + jz .Lnmi_from_kernel + + /* + * NMI from user mode. We need to run on the thread stack, but we + * can't go through the normal entry paths: NMIs are masked, and + * we don't want to enable interrupts, because then we'll end + * up in an awkward situation in which IRQs are on but NMIs + * are off. + */ + + SWAPGS + cld + movq %rsp, %rdx + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + pushq 5*8(%rdx) /* pt_regs->ss */ + pushq 4*8(%rdx) /* pt_regs->rsp */ + pushq 3*8(%rdx) /* pt_regs->flags */ + pushq 2*8(%rdx) /* pt_regs->cs */ + pushq 1*8(%rdx) /* pt_regs->rip */ + pushq $-1 /* pt_regs->orig_ax */ + pushq %rdi /* pt_regs->di */ + pushq %rsi /* pt_regs->si */ + pushq (%rdx) /* pt_regs->dx */ + pushq %rcx /* pt_regs->cx */ + pushq %rax /* pt_regs->ax */ + pushq %r8 /* pt_regs->r8 */ + pushq %r9 /* pt_regs->r9 */ + pushq %r10 /* pt_regs->r10 */ + pushq %r11 /* pt_regs->r11 */ + pushq %rbx /* pt_regs->rbx */ + pushq %rbp /* pt_regs->rbp */ + pushq %r12 /* pt_regs->r12 */ + pushq %r13 /* pt_regs->r13 */ + pushq %r14 /* pt_regs->r14 */ + pushq %r15 /* pt_regs->r15 */ + + /* + * At this point we no longer need to worry about stack damage + * due to nesting -- we're on the normal thread stack and we're + * done with the NMI stack. + */ + + movq %rsp, %rdi + movq $-1, %rsi + call do_nmi + + /* + * Return back to user mode. We must *not* do the normal exit + * work, because we don't want to enable interrupts. Fortunately, + * do_nmi doesn't modify pt_regs. + */ + SWAPGS + jmp restore_c_regs_and_iret + +.Lnmi_from_kernel: + /* + * Here's what our stack frame will look like: + * +---------------------------------------------------------+ + * | original SS | + * | original Return RSP | + * | original RFLAGS | + * | original CS | + * | original RIP | + * +---------------------------------------------------------+ + * | temp storage for rdx | + * +---------------------------------------------------------+ + * | "NMI executing" variable | + * +---------------------------------------------------------+ + * | iret SS } Copied from "outermost" frame | + * | iret Return RSP } on each loop iteration; overwritten | + * | iret RFLAGS } by a nested NMI to force another | + * | iret CS } iteration if needed. | + * | iret RIP } | + * +---------------------------------------------------------+ + * | outermost SS } initialized in first_nmi; | + * | outermost Return RSP } will not be changed before | + * | outermost RFLAGS } NMI processing is done. | + * | outermost CS } Copied to "iret" frame on each | + * | outermost RIP } iteration. | + * +---------------------------------------------------------+ + * | pt_regs | + * +---------------------------------------------------------+ + * + * The "original" frame is used by hardware. Before re-enabling + * NMIs, we need to be done with it, and we need to leave enough + * space for the asm code here. + * + * We return by executing IRET while RSP points to the "iret" frame. + * That will either return for real or it will loop back into NMI + * processing. + * + * The "outermost" frame is copied to the "iret" frame on each + * iteration of the loop, so each iteration starts with the "iret" + * frame pointing to the final return target. + */ + /* - * If %cs was not the kernel segment, then the NMI triggered in user - * space, which means it is definitely not nested. + * Determine whether we're a nested NMI. + * + * If we interrupted kernel code between repeat_nmi and + * end_repeat_nmi, then we are a nested NMI. We must not + * modify the "iret" frame because it's being written by + * the outer NMI. That's okay; the outer NMI handler is + * about to about to call do_nmi anyway, so we can just + * resume the outer NMI. */ - cmpl $__KERNEL_CS, 16(%rsp) - jne first_nmi + + movq $repeat_nmi, %rdx + cmpq 8(%rsp), %rdx + ja 1f + movq $end_repeat_nmi, %rdx + cmpq 8(%rsp), %rdx + ja nested_nmi_out +1: /* - * Check the special variable on the stack to see if NMIs are - * executing. + * Now check "NMI executing". If it's set, then we're nested. + * This will not detect if we interrupted an outer NMI just + * before IRET. */ cmpl $1, -8(%rsp) je nested_nmi /* - * Now test if the previous stack was an NMI stack. - * We need the double check. We check the NMI stack to satisfy the - * race when the first NMI clears the variable before returning. - * We check the variable because the first NMI could be in a - * breakpoint routine using a breakpoint stack. + * Now test if the previous stack was an NMI stack. This covers + * the case where we interrupt an outer NMI after it clears + * "NMI executing" but before IRET. We need to be careful, though: + * there is one case in which RSP could point to the NMI stack + * despite there being no NMI active: naughty userspace controls + * RSP at the very beginning of the SYSCALL targets. We can + * pull a fast one on naughty userspace, though: we program + * SYSCALL to mask DF, so userspace cannot cause DF to be set + * if it controls the kernel's RSP. We set DF before we clear + * "NMI executing". */ lea 6*8(%rsp), %rdx /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ @@ -1286,25 +1316,20 @@ ENTRY(nmi) cmpq %rdx, 4*8(%rsp) /* If it is below the NMI stack, it is a normal NMI */ jb first_nmi - /* Ah, it is within the NMI stack, treat it as nested */ + + /* Ah, it is within the NMI stack. */ + + testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) + jz first_nmi /* RSP was user controlled. */ + + /* This is a nested NMI. */ nested_nmi: /* - * Do nothing if we interrupted the fixup in repeat_nmi. - * It's about to repeat the NMI handler, so we are fine - * with ignoring this one. + * Modify the "iret" frame to point to repeat_nmi, forcing another + * iteration of NMI handling. */ - movq $repeat_nmi, %rdx - cmpq 8(%rsp), %rdx - ja 1f - movq $end_repeat_nmi, %rdx - cmpq 8(%rsp), %rdx - ja nested_nmi_out - -1: - /* Set up the interrupted NMIs stack to jump to repeat_nmi */ - leaq -1*8(%rsp), %rdx - movq %rdx, %rsp + subq $8, %rsp leaq -10*8(%rsp), %rdx pushq $__KERNEL_DS pushq %rdx @@ -1318,61 +1343,42 @@ nested_nmi: nested_nmi_out: popq %rdx - /* No need to check faults here */ + /* We are returning to kernel mode, so this cannot result in a fault. */ INTERRUPT_RETURN first_nmi: - /* - * Because nested NMIs will use the pushed location that we - * stored in rdx, we must keep that space available. - * Here's what our stack frame will look like: - * +-------------------------+ - * | original SS | - * | original Return RSP | - * | original RFLAGS | - * | original CS | - * | original RIP | - * +-------------------------+ - * | temp storage for rdx | - * +-------------------------+ - * | NMI executing variable | - * +-------------------------+ - * | copied SS | - * | copied Return RSP | - * | copied RFLAGS | - * | copied CS | - * | copied RIP | - * +-------------------------+ - * | Saved SS | - * | Saved Return RSP | - * | Saved RFLAGS | - * | Saved CS | - * | Saved RIP | - * +-------------------------+ - * | pt_regs | - * +-------------------------+ - * - * The saved stack frame is used to fix up the copied stack frame - * that a nested NMI may change to make the interrupted NMI iret jump - * to the repeat_nmi. The original stack frame and the temp storage - * is also used by nested NMIs and can not be trusted on exit. - */ - /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ + /* Restore rdx. */ movq (%rsp), %rdx - /* Set the NMI executing variable on the stack. */ - pushq $1 + /* Make room for "NMI executing". */ + pushq $0 - /* Leave room for the "copied" frame */ + /* Leave room for the "iret" frame */ subq $(5*8), %rsp - /* Copy the stack frame to the Saved frame */ + /* Copy the "original" frame to the "outermost" frame */ .rept 5 pushq 11*8(%rsp) .endr /* Everything up to here is safe from nested NMIs */ +#ifdef CONFIG_DEBUG_ENTRY + /* + * For ease of testing, unmask NMIs right away. Disabled by + * default because IRET is very expensive. + */ + pushq $0 /* SS */ + pushq %rsp /* RSP (minus 8 because of the previous push) */ + addq $8, (%rsp) /* Fix up RSP */ + pushfq /* RFLAGS */ + pushq $__KERNEL_CS /* CS */ + pushq $1f /* RIP */ + INTERRUPT_RETURN /* continues at repeat_nmi below */ +1: +#endif + +repeat_nmi: /* * If there was a nested NMI, the first NMI's iret will return * here. But NMIs are still enabled and we can take another @@ -1381,16 +1387,20 @@ first_nmi: * it will just return, as we are about to repeat an NMI anyway. * This makes it safe to copy to the stack frame that a nested * NMI will update. + * + * RSP is pointing to "outermost RIP". gsbase is unknown, but, if + * we're repeating an NMI, gsbase has the same value that it had on + * the first iteration. paranoid_entry will load the kernel + * gsbase if needed before we call do_nmi. "NMI executing" + * is zero. */ -repeat_nmi: + movq $1, 10*8(%rsp) /* Set "NMI executing". */ + /* - * Update the stack variable to say we are still in NMI (the update - * is benign for the non-repeat case, where 1 was pushed just above - * to this very stack slot). + * Copy the "outermost" frame to the "iret" frame. NMIs that nest + * here must not modify the "iret" frame while we're writing to + * it or it will end up containing garbage. */ - movq $1, 10*8(%rsp) - - /* Make another copy, this one may be modified by nested NMIs */ addq $(10*8), %rsp .rept 5 pushq -6*8(%rsp) @@ -1399,9 +1409,9 @@ repeat_nmi: end_repeat_nmi: /* - * Everything below this point can be preempted by a nested - * NMI if the first NMI took an exception and reset our iret stack - * so that we repeat another NMI. + * Everything below this point can be preempted by a nested NMI. + * If this happens, then the inner NMI will change the "iret" + * frame to point back to repeat_nmi. */ pushq $-1 /* ORIG_RAX: no syscall to restart */ ALLOC_PT_GPREGS_ON_STACK @@ -1415,28 +1425,11 @@ end_repeat_nmi: */ call paranoid_entry - /* - * Save off the CR2 register. If we take a page fault in the NMI then - * it could corrupt the CR2 value. If the NMI preempts a page fault - * handler before it was able to read the CR2 register, and then the - * NMI itself takes a page fault, the page fault that was preempted - * will read the information from the NMI page fault and not the - * origin fault. Save it off and restore it if it changes. - * Use the r12 callee-saved register. - */ - movq %cr2, %r12 - /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp, %rdi movq $-1, %rsi call do_nmi - /* Did the NMI take a page fault? Restore cr2 if it did */ - movq %cr2, %rcx - cmpq %rcx, %r12 - je 1f - movq %r12, %cr2 -1: testl %ebx, %ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: @@ -1444,11 +1437,26 @@ nmi_swapgs: nmi_restore: RESTORE_EXTRA_REGS RESTORE_C_REGS - /* Pop the extra iret frame at once */ + + /* Point RSP at the "iret" frame. */ REMOVE_PT_GPREGS_FROM_STACK 6*8 - /* Clear the NMI executing stack variable */ - movq $0, 5*8(%rsp) + /* + * Clear "NMI executing". Set DF first so that we can easily + * distinguish the remaining code between here and IRET from + * the SYSCALL entry and exit paths. On a native kernel, we + * could just inspect RIP, but, on paravirt kernels, + * INTERRUPT_RETURN can translate into a jump into a + * hypercall page. + */ + std + movq $0, 5*8(%rsp) /* clear "NMI executing" */ + + /* + * INTERRUPT_RETURN reads the "iret" frame and exits the NMI + * stack in a single instruction. We are returning to kernel + * mode, so this cannot result in a fault. + */ INTERRUPT_RETURN END(nmi) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index bb187a6a877c..a9360d40fb7f 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -22,8 +22,8 @@ #define __AUDIT_ARCH_LE 0x40000000 #ifndef CONFIG_AUDITSYSCALL -# define sysexit_audit ia32_ret_from_sys_call -# define sysretl_audit ia32_ret_from_sys_call +# define sysexit_audit ia32_ret_from_sys_call_irqs_off +# define sysretl_audit ia32_ret_from_sys_call_irqs_off #endif .section .entry.text, "ax" @@ -140,7 +140,9 @@ sysexit_from_sys_call: */ andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) movl RIP(%rsp), %ecx /* User %eip */ - RESTORE_RSI_RDI + movq RAX(%rsp), %rax + movl RSI(%rsp), %esi + movl RDI(%rsp), %edi xorl %edx, %edx /* Do not leak kernel information */ xorq %r8, %r8 xorq %r9, %r9 @@ -205,14 +207,13 @@ sysexit_from_sys_call: movl RDX(%rsp), %edx /* arg3 */ movl RSI(%rsp), %ecx /* arg4 */ movl RDI(%rsp), %r8d /* arg5 */ - movl %ebp, %r9d /* arg6 */ .endm .macro auditsys_exit exit - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) - jnz ia32_ret_from_sys_call TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) + jnz ia32_ret_from_sys_call movl %eax, %esi /* second arg, syscall return value */ cmpl $-MAX_ERRNO, %eax /* is it an error ? */ jbe 1f @@ -220,7 +221,6 @@ sysexit_from_sys_call: 1: setbe %al /* 1 if error, 0 if not */ movzbl %al, %edi /* zero-extend that into %edi */ call __audit_syscall_exit - movq RAX(%rsp), %rax /* reload syscall return value */ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF @@ -231,11 +231,12 @@ sysexit_from_sys_call: movq %rax, R10(%rsp) movq %rax, R9(%rsp) movq %rax, R8(%rsp) - jmp int_with_check + jmp int_ret_from_sys_call_irqs_off .endm sysenter_auditsys: auditsys_entry_common + movl %ebp, %r9d /* reload 6th syscall arg */ jmp sysenter_dispatch sysexit_audit: @@ -336,7 +337,7 @@ ENTRY(entry_SYSCALL_compat) * 32-bit zero extended: */ ASM_STAC -1: movl (%r8), %ebp +1: movl (%r8), %r9d _ASM_EXTABLE(1b, ia32_badarg) ASM_CLAC orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) @@ -346,7 +347,7 @@ ENTRY(entry_SYSCALL_compat) cstar_do_call: /* 32-bit syscall -> 64-bit C ABI argument conversion */ movl %edi, %r8d /* arg5 */ - movl %ebp, %r9d /* arg6 */ + /* r9 already loaded */ /* arg6 */ xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ movl %ebx, %edi /* arg1 */ movl %edx, %edx /* arg3 (zero extension) */ @@ -358,7 +359,6 @@ cstar_dispatch: call *ia32_sys_call_table(, %rax, 8) movq %rax, RAX(%rsp) 1: - movl RCX(%rsp), %ebp DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) @@ -366,9 +366,12 @@ cstar_dispatch: sysretl_from_sys_call: andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) - RESTORE_RSI_RDI_RDX + movl RDX(%rsp), %edx + movl RSI(%rsp), %esi + movl RDI(%rsp), %edi movl RIP(%rsp), %ecx movl EFLAGS(%rsp), %r11d + movq RAX(%rsp), %rax xorq %r10, %r10 xorq %r9, %r9 xorq %r8, %r8 @@ -392,7 +395,9 @@ sysretl_from_sys_call: #ifdef CONFIG_AUDITSYSCALL cstar_auditsys: + movl %r9d, R9(%rsp) /* register to be clobbered by call */ auditsys_entry_common + movl R9(%rsp), %r9d /* reload 6th syscall arg */ jmp cstar_dispatch sysretl_audit: @@ -404,14 +409,16 @@ cstar_tracesys: testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) jz cstar_auditsys #endif + xchgl %r9d, %ebp SAVE_EXTRA_REGS xorl %eax, %eax /* Do not leak kernel information */ movq %rax, R11(%rsp) movq %rax, R10(%rsp) - movq %rax, R9(%rsp) + movq %r9, R9(%rsp) movq %rax, R8(%rsp) movq %rsp, %rdi /* &pt_regs -> arg1 */ call syscall_trace_enter + movl R9(%rsp), %r9d /* Reload arg registers from stack. (see sysenter_tracesys) */ movl RCX(%rsp), %ecx @@ -421,12 +428,53 @@ cstar_tracesys: movl %eax, %eax /* zero extension */ RESTORE_EXTRA_REGS + xchgl %ebp, %r9d jmp cstar_do_call END(entry_SYSCALL_compat) ia32_badarg: - ASM_CLAC - movq $-EFAULT, RAX(%rsp) + /* + * So far, we've entered kernel mode, set AC, turned on IRQs, and + * saved C regs except r8-r11. We haven't done any of the other + * standard entry work, though. We want to bail, but we shouldn't + * treat this as a syscall entry since we don't even know what the + * args are. Instead, treat this as a non-syscall entry, finish + * the entry work, and immediately exit after setting AX = -EFAULT. + * + * We're really just being polite here. Killing the task outright + * would be a reasonable action, too. Given that the only valid + * way to have gotten here is through the vDSO, and we already know + * that the stack pointer is bad, the task isn't going to survive + * for long no matter what we do. + */ + + ASM_CLAC /* undo STAC */ + movq $-EFAULT, RAX(%rsp) /* return -EFAULT if possible */ + + /* Fill in the rest of pt_regs */ + xorl %eax, %eax + movq %rax, R11(%rsp) + movq %rax, R10(%rsp) + movq %rax, R9(%rsp) + movq %rax, R8(%rsp) + SAVE_EXTRA_REGS + + /* Turn IRQs back off. */ + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + + /* Now finish entering normal kernel mode. */ +#ifdef CONFIG_CONTEXT_TRACKING + call enter_from_user_mode +#endif + + /* And exit again. */ + jmp retint_user + +ia32_ret_from_sys_call_irqs_off: + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + ia32_ret_from_sys_call: xorl %eax, %eax /* Do not leak kernel information */ movq %rax, R11(%rsp) diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index ef8187f9d28d..25e3cf1cd8fd 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -365,3 +365,18 @@ 356 i386 memfd_create sys_memfd_create 357 i386 bpf sys_bpf 358 i386 execveat sys_execveat stub32_execveat +359 i386 socket sys_socket +360 i386 socketpair sys_socketpair +361 i386 bind sys_bind +362 i386 connect sys_connect +363 i386 listen sys_listen +364 i386 accept4 sys_accept4 +365 i386 getsockopt sys_getsockopt compat_sys_getsockopt +366 i386 setsockopt sys_setsockopt compat_sys_setsockopt +367 i386 getsockname sys_getsockname +368 i386 getpeername sys_getpeername +369 i386 sendto sys_sendto +370 i386 sendmsg sys_sendmsg compat_sys_sendmsg +371 i386 recvfrom sys_recvfrom compat_sys_recvfrom +372 i386 recvmsg sys_recvmsg compat_sys_recvmsg +373 i386 shutdown sys_shutdown diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index e97032069f88..a3d0767a6b29 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -8,7 +8,7 @@ KASAN_SANITIZE := n VDSO64-$(CONFIG_X86_64) := y VDSOX32-$(CONFIG_X86_X32_ABI) := y VDSO32-$(CONFIG_X86_32) := y -VDSO32-$(CONFIG_COMPAT) := y +VDSO32-$(CONFIG_IA32_EMULATION) := y # files to link into the vdso vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o @@ -20,7 +20,7 @@ obj-y += vma.o vdso_img-$(VDSO64-y) += 64 vdso_img-$(VDSOX32-y) += x32 vdso_img-$(VDSO32-y) += 32-int80 -vdso_img-$(CONFIG_COMPAT) += 32-syscall +vdso_img-$(CONFIG_IA32_EMULATION) += 32-syscall vdso_img-$(VDSO32-y) += 32-sysenter obj-$(VDSO32-y) += vdso32-setup.o @@ -126,7 +126,7 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE # Build multiple 32-bit vDSO images to choose from at boot time. # vdso32.so-$(VDSO32-y) += int80 -vdso32.so-$(CONFIG_COMPAT) += syscall +vdso32.so-$(CONFIG_IA32_EMULATION) += syscall vdso32.so-$(VDSO32-y) += sysenter vdso32-images = $(vdso32.so-y:%=vdso32-%.so) @@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ +VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \ $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) GCOV_PROFILE := n diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index 9793322751e0..ca94fa649251 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c @@ -175,20 +175,8 @@ static notrace cycle_t vread_pvclock(int *mode) notrace static cycle_t vread_tsc(void) { - cycle_t ret; - u64 last; - - /* - * Empirically, a fence (of type that depends on the CPU) - * before rdtsc is enough to ensure that rdtsc is ordered - * with respect to loads. The various CPU manuals are unclear - * as to whether rdtsc can be reordered with later loads, - * but no one has ever seen it happen. - */ - rdtsc_barrier(); - ret = (cycle_t)__native_read_tsc(); - - last = gtod->cycle_last; + cycle_t ret = (cycle_t)rdtsc_ordered(); + u64 last = gtod->cycle_last; if (likely(ret >= last)) return ret; diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 1c9f750c3859..434543145d78 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -177,7 +177,7 @@ up_fail: return ret; } -#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT) +#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static int load_vdso32(void) { int ret; @@ -219,8 +219,11 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm, return map_vdso(&vdso_image_x32, true); } #endif - +#ifdef CONFIG_IA32_EMULATION return load_vdso32(); +#else + return 0; +#endif } #endif #else diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 2dcc6ff6fdcc..26a46f44e298 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -290,7 +290,7 @@ static struct vm_area_struct gate_vma = { struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { -#ifdef CONFIG_IA32_EMULATION +#ifdef CONFIG_COMPAT if (!mm || mm->context.ia32_compat) return NULL; #endif diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index ae3a29ae875b..a0a19b7ba22d 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -34,99 +34,6 @@ #include <asm/sys_ia32.h> #include <asm/smap.h> -int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) -{ - int err = 0; - bool ia32 = test_thread_flag(TIF_IA32); - - if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) - return -EFAULT; - - put_user_try { - /* If you change siginfo_t structure, please make sure that - this code is fixed accordingly. - It should never copy any pad contained in the structure - to avoid security leaks, but must copy the generic - 3 ints plus the relevant union member. */ - put_user_ex(from->si_signo, &to->si_signo); - put_user_ex(from->si_errno, &to->si_errno); - put_user_ex((short)from->si_code, &to->si_code); - - if (from->si_code < 0) { - put_user_ex(from->si_pid, &to->si_pid); - put_user_ex(from->si_uid, &to->si_uid); - put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); - } else { - /* - * First 32bits of unions are always present: - * si_pid === si_band === si_tid === si_addr(LS half) - */ - put_user_ex(from->_sifields._pad[0], - &to->_sifields._pad[0]); - switch (from->si_code >> 16) { - case __SI_FAULT >> 16: - break; - case __SI_SYS >> 16: - put_user_ex(from->si_syscall, &to->si_syscall); - put_user_ex(from->si_arch, &to->si_arch); - break; - case __SI_CHLD >> 16: - if (ia32) { - put_user_ex(from->si_utime, &to->si_utime); - put_user_ex(from->si_stime, &to->si_stime); - } else { - put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime); - put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime); - } - put_user_ex(from->si_status, &to->si_status); - /* FALL THROUGH */ - default: - case __SI_KILL >> 16: - put_user_ex(from->si_uid, &to->si_uid); - break; - case __SI_POLL >> 16: - put_user_ex(from->si_fd, &to->si_fd); - break; - case __SI_TIMER >> 16: - put_user_ex(from->si_overrun, &to->si_overrun); - put_user_ex(ptr_to_compat(from->si_ptr), - &to->si_ptr); - break; - /* This is not generated by the kernel as of now. */ - case __SI_RT >> 16: - case __SI_MESGQ >> 16: - put_user_ex(from->si_uid, &to->si_uid); - put_user_ex(from->si_int, &to->si_int); - break; - } - } - } put_user_catch(err); - - return err; -} - -int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) -{ - int err = 0; - u32 ptr32; - - if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) - return -EFAULT; - - get_user_try { - get_user_ex(to->si_signo, &from->si_signo); - get_user_ex(to->si_errno, &from->si_errno); - get_user_ex(to->si_code, &from->si_code); - - get_user_ex(to->si_pid, &from->si_pid); - get_user_ex(to->si_uid, &from->si_uid); - get_user_ex(ptr32, &from->si_ptr); - to->si_ptr = compat_ptr(ptr32); - } get_user_catch(err); - - return err; -} - /* * Do a signal return; undo the signal stack. */ diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 4dd1f2d770af..aeac434c9feb 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += cputime.h generic-y += dma-contiguous.h generic-y += early_ioremap.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index c8393634ca0c..ebf6d5e5668c 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -313,7 +313,6 @@ struct apic { /* wakeup_secondary_cpu */ int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); - bool wait_for_init_deassert; void (*inquire_remote_apic)(int apicid); /* apic ops */ @@ -378,7 +377,6 @@ extern struct apic *__apicdrivers[], *__apicdrivers_end[]; * APIC functionality to boot other CPUs - only used on SMP: */ #ifdef CONFIG_SMP -extern atomic_t init_deasserted; extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); #endif diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h index 9686c3d9ff73..259a7c1ef709 100644 --- a/arch/x86/include/asm/arch_hweight.h +++ b/arch/x86/include/asm/arch_hweight.h @@ -21,7 +21,7 @@ * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective * compiler switches. */ -static inline unsigned int __arch_hweight32(unsigned int w) +static __always_inline unsigned int __arch_hweight32(unsigned int w) { unsigned int res = 0; @@ -42,20 +42,23 @@ static inline unsigned int __arch_hweight8(unsigned int w) return __arch_hweight32(w & 0xff); } +#ifdef CONFIG_X86_32 static inline unsigned long __arch_hweight64(__u64 w) { - unsigned long res = 0; - -#ifdef CONFIG_X86_32 return __arch_hweight32((u32)w) + __arch_hweight32((u32)(w >> 32)); +} #else +static __always_inline unsigned long __arch_hweight64(__u64 w) +{ + unsigned long res = 0; + asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); -#endif /* CONFIG_X86_32 */ return res; } +#endif /* CONFIG_X86_32 */ #endif diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index e51a8f803f55..818cb8788225 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -91,15 +91,4 @@ do { \ #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() -/* - * Stop RDTSC speculation. This is needed when you need to use RDTSC - * (or get_cycles or vread that possibly accesses the TSC) in a defined - * code region. - */ -static __always_inline void rdtsc_barrier(void) -{ - alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, - "lfence", X86_FEATURE_LFENCE_RDTSC); -} - #endif /* _ASM_X86_BARRIER_H */ diff --git a/arch/x86/include/asm/context_tracking.h b/arch/x86/include/asm/context_tracking.h deleted file mode 100644 index 1fe49704b146..000000000000 --- a/arch/x86/include/asm/context_tracking.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _ASM_X86_CONTEXT_TRACKING_H -#define _ASM_X86_CONTEXT_TRACKING_H - -#ifdef CONFIG_CONTEXT_TRACKING -# define SCHEDULE_USER call schedule_user -#else -# define SCHEDULE_USER call schedule -#endif - -#endif diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 3d6606fb97d0..477fc28050e4 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -119,6 +119,7 @@ #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ #define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ #define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ @@ -176,6 +177,7 @@ #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ #define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ /* * Auxiliary flags: Linux defined - For features scattered in various diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h index 9b3b4f2754c7..36a760bda462 100644 --- a/arch/x86/include/asm/delay.h +++ b/arch/x86/include/asm/delay.h @@ -4,5 +4,6 @@ #include <asm-generic/delay.h> void use_tsc_delay(void); +void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index a0bf89fd2647..4e10d73cf018 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -280,21 +280,6 @@ static inline void clear_LDT(void) set_ldt(NULL, 0); } -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock(mm_context_t *pc) -{ - set_ldt(pc->ldt, pc->size); -} - -static inline void load_LDT(mm_context_t *pc) -{ - preempt_disable(); - load_LDT_nolock(pc); - preempt_enable(); -} - static inline unsigned long get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index f161c189c27b..141c561f4664 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -78,7 +78,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t; #ifdef CONFIG_X86_64 extern unsigned int vdso64_enabled; #endif -#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT) +#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) extern unsigned int vdso32_enabled; #endif @@ -187,8 +187,8 @@ static inline void elf_common_init(struct thread_struct *t, #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ elf_common_init(¤t->thread, regs, __USER_DS) -void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); -#define compat_start_thread start_thread_ia32 +void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp); +#define compat_start_thread compat_start_thread void set_personality_ia32(bool); #define COMPAT_SET_PERSONALITY(ex) \ @@ -344,14 +344,9 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm, */ static inline int mmap_is_ia32(void) { -#ifdef CONFIG_X86_32 - return 1; -#endif -#ifdef CONFIG_IA32_EMULATION - if (test_thread_flag(TIF_ADDR32)) - return 1; -#endif - return 0; + return config_enabled(CONFIG_X86_32) || + (config_enabled(CONFIG_COMPAT) && + test_thread_flag(TIF_ADDR32)); } /* Do not change the values. See get_align_mask() */ diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 0637826292de..c49c5173158e 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -189,6 +189,7 @@ union fpregs_state { struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; + u8 __padding[PAGE_SIZE]; }; /* @@ -198,40 +199,6 @@ union fpregs_state { */ struct fpu { /* - * @state: - * - * In-memory copy of all FPU registers that we save/restore - * over context switches. If the task is using the FPU then - * the registers in the FPU are more recent than this state - * copy. If the task context-switches away then they get - * saved here and represent the FPU state. - * - * After context switches there may be a (short) time period - * during which the in-FPU hardware registers are unchanged - * and still perfectly match this state, if the tasks - * scheduled afterwards are not using the FPU. - * - * This is the 'lazy restore' window of optimization, which - * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'. - * - * We detect whether a subsequent task uses the FPU via setting - * CR0::TS to 1, which causes any FPU use to raise a #NM fault. - * - * During this window, if the task gets scheduled again, we - * might be able to skip having to do a restore from this - * memory buffer to the hardware registers - at the cost of - * incurring the overhead of #NM fault traps. - * - * Note that on modern CPUs that support the XSAVEOPT (or other - * optimized XSAVE instructions), we don't use #NM traps anymore, - * as the hardware can track whether FPU registers need saving - * or not. On such CPUs we activate the non-lazy ('eagerfpu') - * logic, which unconditionally saves/restores all FPU state - * across context switches. (if FPU state exists.) - */ - union fpregs_state state; - - /* * @last_cpu: * * Records the last CPU on which this context was loaded into @@ -288,6 +255,43 @@ struct fpu { * deal with bursty apps that only use the FPU for a short time: */ unsigned char counter; + /* + * @state: + * + * In-memory copy of all FPU registers that we save/restore + * over context switches. If the task is using the FPU then + * the registers in the FPU are more recent than this state + * copy. If the task context-switches away then they get + * saved here and represent the FPU state. + * + * After context switches there may be a (short) time period + * during which the in-FPU hardware registers are unchanged + * and still perfectly match this state, if the tasks + * scheduled afterwards are not using the FPU. + * + * This is the 'lazy restore' window of optimization, which + * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'. + * + * We detect whether a subsequent task uses the FPU via setting + * CR0::TS to 1, which causes any FPU use to raise a #NM fault. + * + * During this window, if the task gets scheduled again, we + * might be able to skip having to do a restore from this + * memory buffer to the hardware registers - at the cost of + * incurring the overhead of #NM fault traps. + * + * Note that on modern CPUs that support the XSAVEOPT (or other + * optimized XSAVE instructions), we don't use #NM traps anymore, + * as the hardware can track whether FPU registers need saving + * or not. On such CPUs we activate the non-lazy ('eagerfpu') + * logic, which unconditionally saves/restores all FPU state + * across context switches. (if FPU state exists.) + */ + union fpregs_state state; + /* + * WARNING: 'state' is dynamically-sized. Do not put + * anything after it here. + */ }; #endif /* _ASM_X86_FPU_H */ diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index d0e8e0141041..28019765442e 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h @@ -22,15 +22,6 @@ struct ucontext_ia32 { compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; -struct ucontext_x32 { - unsigned int uc_flags; - unsigned int uc_link; - compat_stack_t uc_stack; - unsigned int uc__pad0; /* needed for alignment */ - struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */ - compat_sigset_t uc_sigmask; /* mask last for extensibility */ -}; - /* This matches struct stat64 in glibc2.2, hence the absolutely * insane amounts of padding around dev_t's. */ diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h index 200ec2e7821d..cd0310e186f4 100644 --- a/arch/x86/include/asm/intel_pmc_ipc.h +++ b/arch/x86/include/asm/intel_pmc_ipc.h @@ -25,36 +25,9 @@ #if IS_ENABLED(CONFIG_INTEL_PMC_IPC) -/* - * intel_pmc_ipc_simple_command - * @cmd: command - * @sub: sub type - */ int intel_pmc_ipc_simple_command(int cmd, int sub); - -/* - * intel_pmc_ipc_raw_cmd - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords - * @sptr: data writing to SPTR register - * @dptr: data writing to DPTR register - */ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen, u32 dptr, u32 sptr); - -/* - * intel_pmc_ipc_command - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords - */ int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen); diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index cc9c61bc1abe..7cfc085b6879 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -180,6 +180,8 @@ static inline unsigned int isa_virt_to_bus(volatile void *address) */ extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); +#define ioremap_uc ioremap_uc + extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h index 57995f0596a6..b72ad0faa6c5 100644 --- a/arch/x86/include/asm/iosf_mbi.h +++ b/arch/x86/include/asm/iosf_mbi.h @@ -52,20 +52,20 @@ /* Quark available units */ #define QRK_MBI_UNIT_HBA 0x00 -#define QRK_MBI_UNIT_HB 0x03 +#define QRK_MBI_UNIT_HB 0x03 #define QRK_MBI_UNIT_RMU 0x04 -#define QRK_MBI_UNIT_MM 0x05 +#define QRK_MBI_UNIT_MM 0x05 #define QRK_MBI_UNIT_MMESRAM 0x05 #define QRK_MBI_UNIT_SOC 0x31 /* Quark read/write opcodes */ #define QRK_MBI_HBA_READ 0x10 #define QRK_MBI_HBA_WRITE 0x11 -#define QRK_MBI_HB_READ 0x10 +#define QRK_MBI_HB_READ 0x10 #define QRK_MBI_HB_WRITE 0x11 #define QRK_MBI_RMU_READ 0x10 #define QRK_MBI_RMU_WRITE 0x11 -#define QRK_MBI_MM_READ 0x10 +#define QRK_MBI_MM_READ 0x10 #define QRK_MBI_MM_WRITE 0x11 #define QRK_MBI_MMESRAM_READ 0x12 #define QRK_MBI_MMESRAM_WRITE 0x13 diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 4c2d2eb2060a..6ca9fd6234e1 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -117,16 +117,6 @@ #define FPU_IRQ 13 -#define FIRST_VM86_IRQ 3 -#define LAST_VM86_IRQ 15 - -#ifndef __ASSEMBLY__ -static inline int invalid_vm86_irq(int irq) -{ - return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; -} -#endif - /* * Size the maximum number of interrupts. * diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index 74a2a8dc9908..1410b567ecde 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h @@ -1,6 +1,9 @@ #ifndef _ASM_X86_KASAN_H #define _ASM_X86_KASAN_H +#include <linux/const.h> +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) + /* * Compiler uses shadow offset assuming that addresses start * from 0. Kernel addresses don't start from 0, so shadow diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2a7f5d782c33..c12e845f59e6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -252,6 +252,11 @@ struct kvm_pio_request { int size; }; +struct rsvd_bits_validate { + u64 rsvd_bits_mask[2][4]; + u64 bad_mt_xwr; +}; + /* * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level * 32-bit). The kvm_mmu structure abstracts the details of the current mmu @@ -289,8 +294,15 @@ struct kvm_mmu { u64 *pae_root; u64 *lm_root; - u64 rsvd_bits_mask[2][4]; - u64 bad_mt_xwr; + + /* + * check zero bits on shadow page table entries, these + * bits include not only hardware reserved bits but also + * the bits spte never used. + */ + struct rsvd_bits_validate shadow_zero_check; + + struct rsvd_bits_validate guest_rsvd_check; /* * Bitmap: bit set = last pte in walk @@ -358,6 +370,11 @@ struct kvm_mtrr { struct list_head head; }; +/* Hyper-V per vcpu emulation context */ +struct kvm_vcpu_hv { + u64 hv_vapic; +}; + struct kvm_vcpu_arch { /* * rip and regs accesses must go through @@ -514,8 +531,7 @@ struct kvm_vcpu_arch { /* used for guest single stepping over the given code position */ unsigned long singlestep_rip; - /* fields used by HYPER-V emulation */ - u64 hv_vapic; + struct kvm_vcpu_hv hyperv; cpumask_var_t wbinvd_dirty_mask; @@ -586,6 +602,17 @@ struct kvm_apic_map { struct kvm_lapic *logical_map[16][16]; }; +/* Hyper-V emulation context */ +struct kvm_hv { + u64 hv_guest_os_id; + u64 hv_hypercall; + u64 hv_tsc_page; + + /* Hyper-v based guest crash (NT kernel bugcheck) parameters */ + u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; + u64 hv_crash_ctl; +}; + struct kvm_arch { unsigned int n_used_mmu_pages; unsigned int n_requested_mmu_pages; @@ -604,6 +631,8 @@ struct kvm_arch { bool iommu_noncoherent; #define __KVM_HAVE_ARCH_NONCOHERENT_DMA atomic_t noncoherent_dma_count; +#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE + atomic_t assigned_device_count; struct kvm_pic *vpic; struct kvm_ioapic *vioapic; struct kvm_pit *vpit; @@ -643,16 +672,14 @@ struct kvm_arch { /* reads protected by irq_srcu, writes by irq_lock */ struct hlist_head mask_notifier_list; - /* fields used by HYPER-V emulation */ - u64 hv_guest_os_id; - u64 hv_hypercall; - u64 hv_tsc_page; + struct kvm_hv hyperv; #ifdef CONFIG_KVM_MMU_AUDIT int audit_point; #endif bool boot_vcpu_runs_old_kvmclock; + u32 bsp_vcpu_id; u64 disabled_quirks; }; @@ -1201,5 +1228,7 @@ int __x86_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); int x86_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); +bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); +bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/include/asm/math_emu.h b/arch/x86/include/asm/math_emu.h index 031f6266f425..0d9b14f60d2c 100644 --- a/arch/x86/include/asm/math_emu.h +++ b/arch/x86/include/asm/math_emu.h @@ -2,7 +2,6 @@ #define _ASM_X86_MATH_EMU_H #include <asm/ptrace.h> -#include <asm/vm86.h> /* This structure matches the layout of the data saved to the stack following a device-not-present interrupt, part of it saved @@ -10,9 +9,6 @@ */ struct math_emu_info { long ___orig_eip; - union { - struct pt_regs *regs; - struct kernel_vm86_regs *vm86; - }; + struct pt_regs *regs; }; #endif /* _ASM_X86_MATH_EMU_H */ diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 982dfc3679ad..2dbc0bf2b9f3 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -151,10 +151,12 @@ extern int mce_p5_enabled; #ifdef CONFIG_X86_MCE int mcheck_init(void); void mcheck_cpu_init(struct cpuinfo_x86 *c); +void mcheck_cpu_clear(struct cpuinfo_x86 *c); void mcheck_vendor_init_severity(void); #else static inline int mcheck_init(void) { return 0; } static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} +static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {} static inline void mcheck_vendor_init_severity(void) {} #endif @@ -181,20 +183,18 @@ DECLARE_PER_CPU(struct device *, mce_device); #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); +void mce_intel_feature_clear(struct cpuinfo_x86 *c); void cmci_clear(void); void cmci_reenable(void); void cmci_rediscover(void); void cmci_recheck(void); -void lmce_clear(void); -void lmce_enable(void); #else static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } +static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { } static inline void cmci_clear(void) {} static inline void cmci_reenable(void) {} static inline void cmci_rediscover(void) {} static inline void cmci_recheck(void) {} -static inline void lmce_clear(void) {} -static inline void lmce_enable(void) {} #endif #ifdef CONFIG_X86_MCE_AMD diff --git a/arch/x86/include/asm/mm-arch-hooks.h b/arch/x86/include/asm/mm-arch-hooks.h deleted file mode 100644 index 4e881a342236..000000000000 --- a/arch/x86/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_X86_MM_ARCH_HOOKS_H -#define _ASM_X86_MM_ARCH_HOOKS_H - -#endif /* _ASM_X86_MM_ARCH_HOOKS_H */ diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 09b9620a73b4..55234d5e7160 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -9,8 +9,9 @@ * we put the segment information here. */ typedef struct { - void *ldt; - int size; +#ifdef CONFIG_MODIFY_LDT_SYSCALL + struct ldt_struct *ldt; +#endif #ifdef CONFIG_X86_64 /* True if mm supports a task running in 32 bit compatibility mode. */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 5e8daee7c5c9..379cd3658799 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available; static inline void load_mm_cr4(struct mm_struct *mm) { - if (static_key_true(&rdpmc_always_available) || + if (static_key_false(&rdpmc_always_available) || atomic_read(&mm->context.perf_rdpmc_allowed)) cr4_set_bits(X86_CR4_PCE); else @@ -33,12 +33,68 @@ static inline void load_mm_cr4(struct mm_struct *mm) static inline void load_mm_cr4(struct mm_struct *mm) {} #endif +#ifdef CONFIG_MODIFY_LDT_SYSCALL +/* + * ldt_structs can be allocated, used, and freed, but they are never + * modified while live. + */ +struct ldt_struct { + /* + * Xen requires page-aligned LDTs with special permissions. This is + * needed to prevent us from installing evil descriptors such as + * call gates. On native, we could merge the ldt_struct and LDT + * allocations, but it's not worth trying to optimize. + */ + struct desc_struct *entries; + int size; +}; + /* * Used for LDT copy/destruction. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); +#else /* CONFIG_MODIFY_LDT_SYSCALL */ +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + return 0; +} +static inline void destroy_context(struct mm_struct *mm) {} +#endif +static inline void load_mm_ldt(struct mm_struct *mm) +{ +#ifdef CONFIG_MODIFY_LDT_SYSCALL + struct ldt_struct *ldt; + + /* lockless_dereference synchronizes with smp_store_release */ + ldt = lockless_dereference(mm->context.ldt); + + /* + * Any change to mm->context.ldt is followed by an IPI to all + * CPUs with the mm active. The LDT will not be freed until + * after the IPI is handled by all such CPUs. This means that, + * if the ldt_struct changes before we return, the values we see + * will be safe, and the new values will be loaded before we run + * any user code. + * + * NB: don't try to convert this to use RCU without extreme care. + * We would still need IRQs off, because we don't want to change + * the local LDT after an IPI loaded a newer value than the one + * that we can see. + */ + + if (unlikely(ldt)) + set_ldt(ldt->entries, ldt->size); + else + clear_LDT(); +#else + clear_LDT(); +#endif + + DEBUG_LOCKS_WARN_ON(preemptible()); +} static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { @@ -70,6 +126,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* Load per-mm CR4 state */ load_mm_cr4(next); +#ifdef CONFIG_MODIFY_LDT_SYSCALL /* * Load the LDT, if the LDT is different. * @@ -78,12 +135,13 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * was called and then modify_ldt changed * prev->context.ldt but suppressed an IPI to this CPU. * In this case, prev->context.ldt != NULL, because we - * never free an LDT while the mm still exists. That - * means that next->context.ldt != prev->context.ldt, - * because mms never share an LDT. + * never set context.ldt to NULL while the mm still + * exists. That means that next->context.ldt != + * prev->context.ldt, because mms never share an LDT. */ if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context); + load_mm_ldt(next); +#endif } #ifdef CONFIG_SMP else { @@ -106,7 +164,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, load_cr3(next->pgd); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); load_mm_cr4(next); - load_LDT_nolock(&next->context); + load_mm_ldt(next); } } #endif diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index c163215abb9a..aaf59b7da98a 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -7,6 +7,7 @@ struct ms_hyperv_info { u32 features; + u32 misc_features; u32 hints; }; @@ -20,4 +21,8 @@ void hyperv_vector_handler(struct pt_regs *regs); void hv_setup_vmbus_irq(void (*handler)(void)); void hv_remove_vmbus_irq(void); +void hv_setup_kexec_handler(void (*handler)(void)); +void hv_remove_kexec_handler(void); +void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); +void hv_remove_crash_handler(void); #endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 9ebc3d009373..fcd17c1fc0c6 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -73,6 +73,12 @@ #define MSR_LBR_CORE_FROM 0x00000040 #define MSR_LBR_CORE_TO 0x00000060 +#define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */ +#define LBR_INFO_MISPRED BIT_ULL(63) +#define LBR_INFO_IN_TX BIT_ULL(62) +#define LBR_INFO_ABORT BIT_ULL(61) +#define LBR_INFO_CYCLES 0xffff + #define MSR_IA32_PEBS_ENABLE 0x000003f1 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 @@ -80,13 +86,21 @@ #define MSR_IA32_RTIT_CTL 0x00000570 #define RTIT_CTL_TRACEEN BIT(0) +#define RTIT_CTL_CYCLEACC BIT(1) #define RTIT_CTL_OS BIT(2) #define RTIT_CTL_USR BIT(3) #define RTIT_CTL_CR3EN BIT(7) #define RTIT_CTL_TOPA BIT(8) +#define RTIT_CTL_MTC_EN BIT(9) #define RTIT_CTL_TSC_EN BIT(10) #define RTIT_CTL_DISRETC BIT(11) #define RTIT_CTL_BRANCH_EN BIT(13) +#define RTIT_CTL_MTC_RANGE_OFFSET 14 +#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) +#define RTIT_CTL_CYC_THRESH_OFFSET 19 +#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET) +#define RTIT_CTL_PSB_FREQ_OFFSET 24 +#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET) #define MSR_IA32_RTIT_STATUS 0x00000571 #define RTIT_STATUS_CONTEXTEN BIT(1) #define RTIT_STATUS_TRIGGEREN BIT(2) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index e6a707eb5081..77d8b284e4a7 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -47,14 +47,13 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) * it means rax *or* rdx. */ #ifdef CONFIG_X86_64 -#define DECLARE_ARGS(val, low, high) unsigned low, high -#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) -#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) +/* Using 64-bit values saves one instruction clearing the high half of low */ +#define DECLARE_ARGS(val, low, high) unsigned long low, high +#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) #else #define DECLARE_ARGS(val, low, high) unsigned long long val #define EAX_EDX_VAL(val, low, high) (val) -#define EAX_EDX_ARGS(val, low, high) "A" (val) #define EAX_EDX_RET(val, low, high) "=A" (val) #endif @@ -106,12 +105,19 @@ notrace static inline int native_write_msr_safe(unsigned int msr, return err; } -extern unsigned long long native_read_tsc(void); - extern int rdmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]); -static __always_inline unsigned long long __native_read_tsc(void) +/** + * rdtsc() - returns the current TSC without ordering constraints + * + * rdtsc() returns the result of RDTSC as a 64-bit integer. The + * only ordering constraint it supplies is the ordering implied by + * "asm volatile": it will put the RDTSC in the place you expect. The + * CPU can and will speculatively execute that RDTSC, though, so the + * results can be non-monotonic if compared on different CPUs. + */ +static __always_inline unsigned long long rdtsc(void) { DECLARE_ARGS(val, low, high); @@ -120,6 +126,35 @@ static __always_inline unsigned long long __native_read_tsc(void) return EAX_EDX_VAL(val, low, high); } +/** + * rdtsc_ordered() - read the current TSC in program order + * + * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. + * It is ordered like a load to a global in-memory counter. It should + * be impossible to observe non-monotonic rdtsc_unordered() behavior + * across multiple CPUs as long as the TSC is synced. + */ +static __always_inline unsigned long long rdtsc_ordered(void) +{ + /* + * The RDTSC instruction is not ordered relative to memory + * access. The Intel SDM and the AMD APM are both vague on this + * point, but empirically an RDTSC instruction can be + * speculatively executed before prior loads. An RDTSC + * immediately after an appropriate barrier appears to be + * ordered as a normal load, that is, it provides the same + * ordering guarantees as reading from a global memory location + * that some other imaginary CPU is updating continuously with a + * time stamp. + */ + alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, + "lfence", X86_FEATURE_LFENCE_RDTSC); + return rdtsc(); +} + +/* Deprecated, keep it for a cycle for easier merging: */ +#define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0) + static inline unsigned long long native_read_pmc(int counter) { DECLARE_ARGS(val, low, high); @@ -153,8 +188,10 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high) #define rdmsrl(msr, val) \ ((val) = native_read_msr((msr))) -#define wrmsrl(msr, val) \ - native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) +static inline void wrmsrl(unsigned msr, u64 val) +{ + native_write_msr(msr, (u32)val, (u32)(val >> 32)); +} /* wrmsr with exception handling */ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) @@ -180,12 +217,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) return err; } -#define rdtscl(low) \ - ((low) = (u32)__native_read_tsc()) - -#define rdtscll(val) \ - ((val) = __native_read_tsc()) - #define rdpmc(counter, low, high) \ do { \ u64 _l = native_read_pmc((counter)); \ @@ -195,15 +226,6 @@ do { \ #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) -#define rdtscp(low, high, aux) \ -do { \ - unsigned long long _val = native_read_tscp(&(aux)); \ - (low) = (u32)_val; \ - (high) = (u32)(_val >> 32); \ -} while (0) - -#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) - #endif /* !CONFIG_PARAVIRT */ /* diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 653dfa7662e1..c70689b5e5aa 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -14,6 +14,9 @@ #define CPUID5_ECX_INTERRUPT_BREAK 0x2 #define MWAIT_ECX_INTERRUPT_BREAK 0x1 +#define MWAITX_ECX_TIMER_ENABLE BIT(1) +#define MWAITX_MAX_LOOPS ((u32)-1) +#define MWAITX_DISABLE_CSTATES 0xf static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) @@ -23,6 +26,14 @@ static inline void __monitor(const void *eax, unsigned long ecx, :: "a" (eax), "c" (ecx), "d"(edx)); } +static inline void __monitorx(const void *eax, unsigned long ecx, + unsigned long edx) +{ + /* "monitorx %eax, %ecx, %edx;" */ + asm volatile(".byte 0x0f, 0x01, 0xfa;" + :: "a" (eax), "c" (ecx), "d"(edx)); +} + static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax, %ecx;" */ @@ -30,6 +41,40 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) :: "a" (eax), "c" (ecx)); } +/* + * MWAITX allows for a timer expiration to get the core out a wait state in + * addition to the default MWAIT exit condition of a store appearing at a + * monitored virtual address. + * + * Registers: + * + * MWAITX ECX[1]: enable timer if set + * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0 + * frequency is the same as the TSC frequency. + * + * Below is a comparison between MWAIT and MWAITX on AMD processors: + * + * MWAIT MWAITX + * opcode 0f 01 c9 | 0f 01 fb + * ECX[0] value of RFLAGS.IF seen by instruction + * ECX[1] unused/#GP if set | enable timer if set + * ECX[31:2] unused/#GP if set + * EAX unused (reserve for hint) + * EBX[31:0] unused | max wait time (P0 clocks) + * + * MONITOR MONITORX + * opcode 0f 01 c8 | 0f 01 fa + * EAX (logical) address to monitor + * ECX #GP if not zero + */ +static inline void __mwaitx(unsigned long eax, unsigned long ebx, + unsigned long ecx) +{ + /* "mwaitx %eax, %ebx, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xfb;" + :: "a" (eax), "b" (ebx), "c" (ecx)); +} + static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { trace_hardirqs_on(); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index d143bfad45d7..10d0596433f8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -153,7 +153,11 @@ do { \ val = paravirt_read_msr(msr, &_err); \ } while (0) -#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) +static inline void wrmsrl(unsigned msr, u64 val) +{ + wrmsr(msr, (u32)val, (u32)(val>>32)); +} + #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) /* rdmsr with exception handling */ @@ -174,19 +178,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) return err; } -static inline u64 paravirt_read_tsc(void) -{ - return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); -} - -#define rdtscl(low) \ -do { \ - u64 _l = paravirt_read_tsc(); \ - low = (int)_l; \ -} while (0) - -#define rdtscll(val) (val = paravirt_read_tsc()) - static inline unsigned long long paravirt_sched_clock(void) { return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); @@ -215,27 +206,6 @@ do { \ #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) -static inline unsigned long long paravirt_rdtscp(unsigned int *aux) -{ - return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); -} - -#define rdtscp(low, high, aux) \ -do { \ - int __aux; \ - unsigned long __val = paravirt_rdtscp(&__aux); \ - (low) = (u32)__val; \ - (high) = (u32)(__val >> 32); \ - (aux) = __aux; \ -} while (0) - -#define rdtscpll(val, aux) \ -do { \ - unsigned long __aux; \ - val = paravirt_rdtscp(&__aux); \ - (aux) = __aux; \ -} while (0) - static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) { PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index a6b8f9fadb06..ce029e4fa7c6 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -156,9 +156,7 @@ struct pv_cpu_ops { u64 (*read_msr)(unsigned int msr, int *err); int (*write_msr)(unsigned int msr, unsigned low, unsigned high); - u64 (*read_tsc)(void); u64 (*read_pmc)(int counter); - unsigned long long (*read_tscp)(unsigned int *aux); #ifdef CONFIG_X86_32 /* diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 164e3f8d3c3d..fa1195dae425 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -93,8 +93,6 @@ extern raw_spinlock_t pci_config_lock; extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); -extern bool mp_should_keep_irq(struct device *dev); - struct pci_raw_ops { int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index dc0f6ed35b08..7bcb861a04e5 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -159,6 +159,13 @@ struct x86_pmu_capability { */ #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) +#define GLOBAL_STATUS_COND_CHG BIT_ULL(63) +#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62) +#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) +#define GLOBAL_STATUS_ASIF BIT_ULL(60) +#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) +#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58) + /* * IBS cpuid feature detection */ diff --git a/arch/x86/include/asm/pmc_atom.h b/arch/x86/include/asm/pmc_atom.h index bc0fc0866553..aa8744c77c6d 100644 --- a/arch/x86/include/asm/pmc_atom.h +++ b/arch/x86/include/asm/pmc_atom.h @@ -18,6 +18,8 @@ /* ValleyView Power Control Unit PCI Device ID */ #define PCI_DEVICE_ID_VLV_PMC 0x0F1C +/* CherryTrail Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_CHT_PMC 0x229C /* PMC Memory mapped IO registers */ #define PMC_BASE_ADDR_OFFSET 0x44 @@ -29,6 +31,10 @@ #define PMC_FUNC_DIS 0x34 #define PMC_FUNC_DIS_2 0x38 +/* CHT specific bits in FUNC_DIS2 register */ +#define BIT_FD_GMM BIT(3) +#define BIT_FD_ISH BIT(4) + /* S0ix wake event control */ #define PMC_S0IX_WAKE_EN 0x3C @@ -75,6 +81,21 @@ #define PMC_PSS_BIT_USB BIT(16) #define PMC_PSS_BIT_USB_SUS BIT(17) +/* CHT specific bits in PSS register */ +#define PMC_PSS_BIT_CHT_UFS BIT(7) +#define PMC_PSS_BIT_CHT_UXD BIT(11) +#define PMC_PSS_BIT_CHT_UXD_FD BIT(12) +#define PMC_PSS_BIT_CHT_UX_ENG BIT(15) +#define PMC_PSS_BIT_CHT_USB_SUS BIT(16) +#define PMC_PSS_BIT_CHT_GMM BIT(17) +#define PMC_PSS_BIT_CHT_ISH BIT(18) +#define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31) + /* These registers reflect D3 status of functions */ #define PMC_D3_STS_0 0xA0 @@ -117,6 +138,10 @@ #define BIT_USH_SS_PHY BIT(2) #define BIT_DFX BIT(3) +/* CHT specific bits in PMC_D3_STS_1 register */ +#define BIT_STS_GMM BIT(1) +#define BIT_STS_ISH BIT(2) + /* PMC I/O Registers */ #define ACPI_BASE_ADDR_OFFSET 0x40 #define ACPI_BASE_ADDR_MASK 0xFFFFFE00 @@ -126,4 +151,8 @@ #define SLEEP_TYPE_MASK 0xFFFFECFF #define SLEEP_TYPE_S5 0x1C00 #define SLEEP_ENABLE 0x2000 + +extern int pmc_atom_read(int offset, u32 *value); +extern int pmc_atom_write(int offset, u32 value); + #endif /* PMC_ATOM_H */ diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index dca71714f860..b12f81022a6b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void) /* * Returns true when we need to resched and can (barring IRQ state). */ -static __always_inline bool should_resched(void) +static __always_inline bool should_resched(int preempt_offset) { - return unlikely(!raw_cpu_read_4(__preempt_count)); + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); } #ifdef CONFIG_PREEMPT diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 43e6519df0d5..19577dd325fa 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -6,8 +6,8 @@ /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; +struct vm86; -#include <asm/vm86.h> #include <asm/math_emu.h> #include <asm/segment.h> #include <asm/types.h> @@ -390,9 +390,6 @@ struct thread_struct { #endif unsigned long gs; - /* Floating point and extended processor state */ - struct fpu fpu; - /* Save middle states of ptrace breakpoints */ struct perf_event *ptrace_bps[HBP_NUM]; /* Debug status used for traps, single steps, etc... */ @@ -403,21 +400,22 @@ struct thread_struct { unsigned long cr2; unsigned long trap_nr; unsigned long error_code; -#ifdef CONFIG_X86_32 +#ifdef CONFIG_VM86 /* Virtual 86 mode info */ - struct vm86_struct __user *vm86_info; - unsigned long screen_bitmap; - unsigned long v86flags; - unsigned long v86mask; - unsigned long saved_sp0; - unsigned int saved_fs; - unsigned int saved_gs; + struct vm86 *vm86; #endif /* IO permissions: */ unsigned long *io_bitmap_ptr; unsigned long iopl; /* Max allowed port in the bitmap, in bytes: */ unsigned io_bitmap_max; + + /* Floating point and extended processor state */ + struct fpu fpu; + /* + * WARNING: 'fpu' is dynamically-sized. It *MUST* be at + * the end. + */ }; /* @@ -647,14 +645,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr) extern void set_task_blockstep(struct task_struct *task, bool on); -/* - * from system description table in BIOS. Mostly for MCA use, but - * others may find it useful: - */ -extern unsigned int machine_id; -extern unsigned int machine_submodel_id; -extern unsigned int BIOS_revision; - /* Boot loader type from the setup header: */ extern int bootloader_type; extern int bootloader_version; @@ -716,7 +706,6 @@ static inline void spin_lock_prefetch(const void *x) #define INIT_THREAD { \ .sp0 = TOP_OF_INIT_STACK, \ - .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 5fabf1362942..6271281f947d 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -88,7 +88,6 @@ extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, unsigned long phase1_result); extern long syscall_trace_enter(struct pt_regs *); -extern void syscall_trace_leave(struct pt_regs *); static inline unsigned long regs_return_value(struct pt_regs *regs) { diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 628954ceede1..7a6bed5c08bc 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) static __always_inline u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) { - u64 delta = __native_read_tsc() - src->tsc_timestamp; + u64 delta = rdtsc_ordered() - src->tsc_timestamp; return pvclock_scale_delta(delta, src->tsc_to_system_mul, src->tsc_shift); } @@ -76,13 +76,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u8 ret_flags; version = src->version; - /* Note: emulated platforms which do not advertise SSE2 support - * result in kvmclock not using the necessary RDTSC barriers. - * Without barriers, it is possible that RDTSC instruction reads from - * the time stamp counter outside rdtsc_barrier protected section - * below, resulting in violation of monotonicity. - */ - rdtsc_barrier(); + offset = pvclock_get_nsec_offset(src); ret = src->system_time + offset; ret_flags = src->flags; diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 6fe6b182c998..9dfce4e0417d 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -57,9 +57,9 @@ struct sigcontext { unsigned long ip; unsigned long flags; unsigned short cs; - unsigned short __pad2; /* Was called gs, but was always zero. */ - unsigned short __pad1; /* Was called fs, but was always zero. */ - unsigned short ss; + unsigned short gs; + unsigned short fs; + unsigned short __pad0; unsigned long err; unsigned long trapno; unsigned long oldmask; diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h index 7c7c27c97daa..1f3175bb994e 100644 --- a/arch/x86/include/asm/sigframe.h +++ b/arch/x86/include/asm/sigframe.h @@ -4,6 +4,7 @@ #include <asm/sigcontext.h> #include <asm/siginfo.h> #include <asm/ucontext.h> +#include <linux/compat.h> #ifdef CONFIG_X86_32 #define sigframe_ia32 sigframe @@ -69,6 +70,15 @@ struct rt_sigframe { #ifdef CONFIG_X86_X32_ABI +struct ucontext_x32 { + unsigned int uc_flags; + unsigned int uc_link; + compat_stack_t uc_stack; + unsigned int uc__pad0; /* needed for alignment */ + struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */ + compat_sigset_t uc_sigmask; /* mask last for extensibility */ +}; + struct rt_sigframe_x32 { u64 pretcode; struct ucontext_x32 uc; diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 31eab867e6d3..c481be78fcf1 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -30,7 +30,7 @@ typedef sigset_t compat_sigset_t; #endif /* __ASSEMBLY__ */ #include <uapi/asm/signal.h> #ifndef __ASSEMBLY__ -extern void do_notify_resume(struct pt_regs *, void *, __u32); +extern void do_signal(struct pt_regs *regs); #define __ARCH_HAS_SA_RESTORER diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index c2e00bb2a136..58505f01962f 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void) * on during the bootup the random pool has true entropy too. */ get_random_bytes(&canary, sizeof(canary)); - tsc = __native_read_tsc(); + tsc = rdtsc(); canary += tsc + (tsc << 32UL); current->stack_canary = canary; diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 751bf4b7bf11..d7f3b3b78ac3 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -79,12 +79,12 @@ do { \ #else /* CONFIG_X86_32 */ /* frame pointer must be last for get_wchan */ -#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" +#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" #define __EXTRA_CLOBBER \ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ - "r12", "r13", "r14", "r15", "flags" + "r12", "r13", "r14", "r15" #ifdef CONFIG_CC_STACKPROTECTOR #define __switch_canary \ @@ -100,11 +100,7 @@ do { \ #define __switch_canary_iparam #endif /* CC_STACKPROTECTOR */ -/* - * There is no need to save or restore flags, because flags are always - * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL - * has no effect. - */ +/* Save restore flags to clear handle leaking NT */ #define switch_to(prev, next, last) \ asm volatile(SAVE_CONTEXT \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 592a6a672e07..91dfcafe27a6 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -37,6 +37,7 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *); asmlinkage unsigned long sys_sigreturn(void); /* kernel/vm86_32.c */ +struct vm86_struct; asmlinkage long sys_vm86old(struct vm86_struct __user *); asmlinkage long sys_vm86(unsigned long, unsigned long); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 225ee545e1a0..8afdc3e44247 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -27,14 +27,17 @@ * Without this offset, that can result in a page fault. (We are * careful that, in this case, the value we read doesn't matter.) * - * In vm86 mode, the hardware frame is much longer still, but we neither - * access the extra members from NMI context, nor do we write such a - * frame at sp0 at all. + * In vm86 mode, the hardware frame is much longer still, so add 16 + * bytes to make room for the real-mode segments. * * x86_64 has a fixed-length stack frame. */ #ifdef CONFIG_X86_32 -# define TOP_OF_KERNEL_STACK_PADDING 8 +# ifdef CONFIG_VM86 +# define TOP_OF_KERNEL_STACK_PADDING 16 +# else +# define TOP_OF_KERNEL_STACK_PADDING 8 +# endif #else # define TOP_OF_KERNEL_STACK_PADDING 0 #endif @@ -140,27 +143,11 @@ struct thread_info { _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \ _TIF_NOHZ) -/* work to do in syscall_trace_leave() */ -#define _TIF_WORK_SYSCALL_EXIT \ - (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ - _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ) - -/* work to do on interrupt/exception return */ -#define _TIF_WORK_MASK \ - (0x0000FFFF & \ - ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \ - _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) - /* work to do on any return to user space */ #define _TIF_ALLWORK_MASK \ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ _TIF_NOHZ) -/* Only used for 64 bit */ -#define _TIF_DO_NOTIFY_MASK \ - (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ - _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE) - /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index c5380bea2a36..c3496619740a 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -112,8 +112,8 @@ asmlinkage void smp_threshold_interrupt(void); asmlinkage void smp_deferred_error_interrupt(void); #endif -extern enum ctx_state ist_enter(struct pt_regs *regs); -extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state); +extern void ist_enter(struct pt_regs *regs); +extern void ist_exit(struct pt_regs *regs); extern void ist_begin_non_atomic(struct pt_regs *regs); extern void ist_end_non_atomic(void); diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 94605c0e9cee..6d7c5479bcea 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -21,28 +21,12 @@ extern void disable_TSC(void); static inline cycles_t get_cycles(void) { - unsigned long long ret = 0; - #ifndef CONFIG_X86_TSC if (!cpu_has_tsc) return 0; #endif - rdtscll(ret); - - return ret; -} -static __always_inline cycles_t vget_cycles(void) -{ - /* - * We only do VDSOs on TSC capable CPUs, so this shouldn't - * access boot_cpu_data (which is not VDSO-safe): - */ -#ifndef CONFIG_X86_TSC - if (!cpu_has_tsc) - return 0; -#endif - return (cycles_t)__native_read_tsc(); + return rdtsc(); } extern void tsc_init(void); @@ -51,6 +35,7 @@ extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); extern int check_tsc_disabled(void); extern unsigned long native_calibrate_tsc(void); +extern unsigned long long native_sched_clock_from_tsc(u64 tsc); extern int tsc_clocksource_reliable; diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h index 1d8de3f3feca..1e491f3af317 100644 --- a/arch/x86/include/asm/vm86.h +++ b/arch/x86/include/asm/vm86.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_VM86_H #define _ASM_X86_VM86_H - #include <asm/ptrace.h> #include <uapi/asm/vm86.h> @@ -28,43 +27,49 @@ struct kernel_vm86_regs { unsigned short gs, __gsh; }; -struct kernel_vm86_struct { - struct kernel_vm86_regs regs; -/* - * the below part remains on the kernel stack while we are in VM86 mode. - * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we - * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above - * 'struct kernel_vm86_regs' with the then actual values. - * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct' - * in kernelspace, hence we need not reget the data from userspace. - */ -#define VM86_TSS_ESP0 flags +struct vm86 { + struct vm86plus_struct __user *user_vm86; + struct pt_regs regs32; + unsigned long veflags; + unsigned long veflags_mask; + unsigned long saved_sp0; + unsigned long flags; unsigned long screen_bitmap; unsigned long cpu_type; struct revectored_struct int_revectored; struct revectored_struct int21_revectored; struct vm86plus_info_struct vm86plus; - struct pt_regs *regs32; /* here we save the pointer to the old regs */ -/* - * The below is not part of the structure, but the stack layout continues - * this way. In front of 'return-eip' may be some data, depending on - * compilation, so we don't rely on this and save the pointer to 'oldregs' - * in 'regs32' above. - * However, with GCC-2.7.2 and the current CFLAGS you see exactly this: - - long return-eip; from call to vm86() - struct pt_regs oldregs; user space registers as saved by syscall - */ }; #ifdef CONFIG_VM86 void handle_vm86_fault(struct kernel_vm86_regs *, long); int handle_vm86_trap(struct kernel_vm86_regs *, long, int); -struct pt_regs *save_v86_state(struct kernel_vm86_regs *); +void save_v86_state(struct kernel_vm86_regs *, int); struct task_struct; + +#define free_vm86(t) do { \ + struct thread_struct *__t = (t); \ + if (__t->vm86 != NULL) { \ + kfree(__t->vm86); \ + __t->vm86 = NULL; \ + } \ +} while (0) + +/* + * Support for VM86 programs to request interrupts for + * real mode hardware drivers: + */ +#define FIRST_VM86_IRQ 3 +#define LAST_VM86_IRQ 15 + +static inline int invalid_vm86_irq(int irq) +{ + return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ; +} + void release_vm86_irqs(struct task_struct *); #else @@ -77,6 +82,10 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) return 0; } +static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { } + +#define free_vm86(t) do { } while(0) + #endif /* CONFIG_VM86 */ #endif /* _ASM_X86_VM86_H */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index da772edd19ab..448b7ca61aee 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -47,6 +47,7 @@ #define CPU_BASED_MOV_DR_EXITING 0x00800000 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 #define CPU_BASED_USE_IO_BITMAPS 0x02000000 +#define CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 #define CPU_BASED_USE_MSR_BITMAPS 0x10000000 #define CPU_BASED_MONITOR_EXITING 0x20000000 #define CPU_BASED_PAUSE_EXITING 0x40000000 @@ -367,29 +368,29 @@ enum vmcs_field { #define TYPE_PHYSICAL_APIC_EVENT (10 << 12) #define TYPE_PHYSICAL_APIC_INST (15 << 12) -/* segment AR */ -#define SEGMENT_AR_L_MASK (1 << 13) - -#define AR_TYPE_ACCESSES_MASK 1 -#define AR_TYPE_READABLE_MASK (1 << 1) -#define AR_TYPE_WRITEABLE_MASK (1 << 2) -#define AR_TYPE_CODE_MASK (1 << 3) -#define AR_TYPE_MASK 0x0f -#define AR_TYPE_BUSY_64_TSS 11 -#define AR_TYPE_BUSY_32_TSS 11 -#define AR_TYPE_BUSY_16_TSS 3 -#define AR_TYPE_LDT 2 - -#define AR_UNUSABLE_MASK (1 << 16) -#define AR_S_MASK (1 << 4) -#define AR_P_MASK (1 << 7) -#define AR_L_MASK (1 << 13) -#define AR_DB_MASK (1 << 14) -#define AR_G_MASK (1 << 15) -#define AR_DPL_SHIFT 5 -#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3) - -#define AR_RESERVD_MASK 0xfffe0f00 +/* segment AR in VMCS -- these are different from what LAR reports */ +#define VMX_SEGMENT_AR_L_MASK (1 << 13) + +#define VMX_AR_TYPE_ACCESSES_MASK 1 +#define VMX_AR_TYPE_READABLE_MASK (1 << 1) +#define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2) +#define VMX_AR_TYPE_CODE_MASK (1 << 3) +#define VMX_AR_TYPE_MASK 0x0f +#define VMX_AR_TYPE_BUSY_64_TSS 11 +#define VMX_AR_TYPE_BUSY_32_TSS 11 +#define VMX_AR_TYPE_BUSY_16_TSS 3 +#define VMX_AR_TYPE_LDT 2 + +#define VMX_AR_UNUSABLE_MASK (1 << 16) +#define VMX_AR_S_MASK (1 << 4) +#define VMX_AR_P_MASK (1 << 7) +#define VMX_AR_L_MASK (1 << 13) +#define VMX_AR_DB_MASK (1 << 14) +#define VMX_AR_G_MASK (1 << 15) +#define VMX_AR_DPL_SHIFT 5 +#define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3) + +#define VMX_AR_RESERVD_MASK 0xfffe0f00 #define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0) #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1) diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index ab456dc233b5..329254373479 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -120,7 +120,7 @@ struct boot_params { __u8 _pad3[16]; /* 0x070 */ __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ - struct sys_desc_table sys_desc_table; /* 0x0a0 */ + struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */ struct olpc_ofw_header olpc_ofw_header; /* 0x0b0 */ __u32 ext_ramdisk_image; /* 0x0c0 */ __u32 ext_ramdisk_size; /* 0x0c4 */ diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 8fba544e9cc4..f0412c50c47b 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -27,6 +27,8 @@ #define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) +/* Partition reference TSC MSR is available */ +#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE (1 << 9) /* A partition's reference time stamp counter (TSC) page */ #define HV_X64_MSR_REFERENCE_TSC 0x40000021 @@ -108,6 +110,8 @@ #define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) /* Support for a virtual guest idle state is available */ #define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) +/* Guest crash data handler available */ +#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10) /* * Implementation recommendations. Indicates which behaviors the hypervisor diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index a4ae82eb82aa..cd54147cb365 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -354,7 +354,7 @@ struct kvm_xcrs { struct kvm_sync_regs { }; -#define KVM_QUIRK_LINT0_REENABLED (1 << 0) -#define KVM_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) +#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index a0eab85ce7b8..76880ede9a35 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -15,7 +15,8 @@ struct mce { __u64 time; /* wall time_t when error was detected */ __u8 cpuvendor; /* cpu vendor as encoded in system.h */ __u8 inject_flags; /* software inject flags */ - __u16 pad; + __u8 severity; + __u8 usable_addr; __u32 cpuid; /* CPUID 1 EAX */ __u8 cs; /* code segment */ __u8 bank; /* machine check bank */ diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 180a0c3c224d..79887abcb5e1 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -37,8 +37,6 @@ #define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT) #define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ #define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) -#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ -#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) #define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT) #define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */ diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h index 0e8a973de9ee..40836a9a7250 100644 --- a/arch/x86/include/uapi/asm/sigcontext.h +++ b/arch/x86/include/uapi/asm/sigcontext.h @@ -177,24 +177,9 @@ struct sigcontext { __u64 rip; __u64 eflags; /* RFLAGS */ __u16 cs; - - /* - * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"), - * Linux saved and restored fs and gs in these slots. This - * was counterproductive, as fsbase and gsbase were never - * saved, so arch_prctl was presumably unreliable. - * - * If these slots are ever needed for any other purpose, there - * is some risk that very old 64-bit binaries could get - * confused. I doubt that many such binaries still work, - * though, since the same patch in 2.5.64 also removed the - * 64-bit set_thread_area syscall, so it appears that there is - * no TLS API that works in both pre- and post-2.5.64 kernels. - */ - __u16 __pad2; /* Was gs. */ - __u16 __pad1; /* Was fs. */ - - __u16 ss; + __u16 gs; + __u16 fs; + __u16 __pad0; __u64 err; __u64 trapno; __u64 oldmask; diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index 1fe92181ee9e..37fee272618f 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -58,6 +58,7 @@ #define EXIT_REASON_INVALID_STATE 33 #define EXIT_REASON_MSR_LOAD_FAIL 34 #define EXIT_REASON_MWAIT_INSTRUCTION 36 +#define EXIT_REASON_MONITOR_TRAP_FLAG 37 #define EXIT_REASON_MONITOR_INSTRUCTION 39 #define EXIT_REASON_PAUSE_INSTRUCTION 40 #define EXIT_REASON_MCE_DURING_VMENTRY 41 @@ -106,6 +107,7 @@ { EXIT_REASON_MSR_READ, "MSR_READ" }, \ { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ + { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \ { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \ { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0f15af41bd80..3c3622176340 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -23,8 +23,10 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n CFLAGS_irq.o := -I$(src)/../include/asm/trace obj-y := process_$(BITS).o signal.o +obj-$(CONFIG_COMPAT) += signal_compat.o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o -obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o +obj-y += time.o ioport.o dumpstack.o nmi.o +obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o @@ -107,8 +109,6 @@ obj-$(CONFIG_EFI) += sysfb_efi.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o -obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o -obj-$(CONFIG_PMC_ATOM) += pmc_atom.o ### # 64 bit specific files diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index e49ee24da85e..75e8bad53798 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -710,7 +710,7 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) #endif } -static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu) { int cpu; @@ -726,12 +726,6 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) *pcpu = cpu; return 0; } - -/* wrapper to silence section mismatch warning */ -int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu) -{ - return _acpi_map_lsapic(handle, physid, pcpu); -} EXPORT_SYMBOL(acpi_map_cpu); int acpi_unmap_cpu(int cpu) diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index ede92c3364d3..222a57076039 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -263,7 +263,7 @@ static int apbt_clocksource_register(void) /* Verify whether apbt counter works */ t1 = dw_apb_clocksource_read(clocksource_apbt); - rdtscll(start); + start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for @@ -273,7 +273,7 @@ static int apbt_clocksource_register(void) */ do { rep_nop(); - rdtscll(now); + now = rdtsc(); } while ((now - start) < 200000UL); /* APBT is the only always on clocksource, it has to work! */ @@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void) old = dw_apb_clocksource_read(clocksource_apbt); old += loop; - t1 = __native_read_tsc(); + t1 = rdtsc(); do { new = dw_apb_clocksource_read(clocksource_apbt); } while (new < old); - t2 = __native_read_tsc(); + t2 = rdtsc(); shift = 5; if (unlikely(loop >> shift == 0)) { diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index dcb52850a28f..5aba9220a5ac 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta, { u64 tsc; - rdtscll(tsc); + tsc = rdtsc(); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); return 0; } @@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev) unsigned long pm = acpi_pm_read_early(); if (cpu_has_tsc) - rdtscll(tsc); + tsc = rdtsc(); switch (lapic_cal_loops++) { case 0: @@ -1209,7 +1209,7 @@ void setup_local_APIC(void) long long max_loops = cpu_khz ? cpu_khz : 1000000; if (cpu_has_tsc) - rdtscll(tsc); + tsc = rdtsc(); if (disable_apic) { disable_ioapic_support(); @@ -1293,7 +1293,7 @@ void setup_local_APIC(void) } if (queued) { if (cpu_has_tsc && cpu_khz) { - rdtscll(ntsc); + ntsc = rdtsc(); max_loops = (cpu_khz << 10) - (ntsc - tsc); } else max_loops--; @@ -1424,7 +1424,7 @@ static inline void __x2apic_disable(void) { u64 msr; - if (cpu_has_apic) + if (!cpu_has_apic) return; rdmsrl(MSR_IA32_APICBASE, msr); @@ -1483,10 +1483,13 @@ void x2apic_setup(void) static __init void x2apic_disable(void) { - u32 x2apic_id; + u32 x2apic_id, state = x2apic_state; - if (x2apic_state != X2APIC_ON) - goto out; + x2apic_mode = 0; + x2apic_state = X2APIC_DISABLED; + + if (state != X2APIC_ON) + return; x2apic_id = read_apic_id(); if (x2apic_id >= 255) @@ -1494,9 +1497,6 @@ static __init void x2apic_disable(void) __x2apic_disable(); register_lapic_address(mp_lapic_addr); -out: - x2apic_state = X2APIC_DISABLED; - x2apic_mode = 0; } static __init void x2apic_enable(void) diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index de918c410eae..f92ab36979a2 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -191,7 +191,6 @@ static struct apic apic_flat = { .send_IPI_all = flat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, - .wait_for_init_deassert = false, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, @@ -299,7 +298,6 @@ static struct apic apic_physflat = { .send_IPI_all = physflat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, - .wait_for_init_deassert = false, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index b205cdbdbe6a..0d96749cfcac 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -152,7 +152,6 @@ struct apic apic_noop = { .wakeup_secondary_cpu = noop_wakeup_secondary_cpu, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, .read = noop_apic_read, diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 017149cded07..b548fd3b764b 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -92,7 +92,6 @@ static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v); - atomic_set(&init_deasserted, 1); return 0; } @@ -235,7 +234,6 @@ static const struct apic apic_numachip __refconst = { .send_IPI_self = numachip_send_IPI_self, .wakeup_secondary_cpu = numachip_wakeup_secondary, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, /* REMRD not supported */ .read = native_apic_mem_read, diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index c4a8d63f8220..971cf8875939 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -186,7 +186,6 @@ static struct apic apic_bigsmp = { .send_IPI_all = bigsmp_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wait_for_init_deassert = true, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 845dc0df2002..206052e55517 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) */ if (irq < nr_legacy_irqs() && data->count == 1) { if (info->ioapic_trigger != data->trigger) - mp_register_handler(irq, data->trigger); + mp_register_handler(irq, info->ioapic_trigger); data->entry.trigger = data->trigger = info->ioapic_trigger; data->entry.polarity = data->polarity = info->ioapic_polarity; } diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index bda488680dbc..7694ae6c1199 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -111,7 +111,6 @@ static struct apic apic_default = { .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, - .wait_for_init_deassert = true, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index f813261d9740..2683f36e4e0a 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irq_data->chip = &lapic_controller; irq_data->chip_data = data; irq_data->hwirq = virq + i; - err = assign_irq_vector_policy(virq, irq_data->node, data, + err = assign_irq_vector_policy(virq + i, irq_data->node, data, info); if (err) goto error; diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index ab3219b3fbda..cc8311c4d298 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) return notifier_from_errno(err); } -static struct notifier_block __refdata x2apic_cpu_notifier = { +static struct notifier_block x2apic_cpu_notifier = { .notifier_call = update_clusterinfo, }; @@ -272,7 +272,6 @@ static struct apic apic_x2apic_cluster = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 3ffd925655e0..662e9150ea6f 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -128,7 +128,6 @@ static struct apic apic_x2apic_phys = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index c8d92950bc04..4a139465f1d4 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -248,7 +248,6 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) APIC_DM_STARTUP; uv_write_global_mmr64(pnode, UVH_IPI_INT, val); - atomic_set(&init_deasserted, 1); return 0; } @@ -414,7 +413,6 @@ static struct apic __refdata apic_x2apic_uv_x = { .send_IPI_self = uv_send_IPI_self, .wakeup_secondary_cpu = uv_wakeup_secondary, - .wait_for_init_deassert = false, .inquire_remote_apic = NULL, .read = native_apic_msr_read, diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 927ec9235947..052c9c3026cc 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -919,7 +919,7 @@ recalc: } else if (jiffies_since_last_check > idle_period) { unsigned int idle_percentage; - idle_percentage = stime - last_stime; + idle_percentage = cputime_to_jiffies(stime - last_stime); idle_percentage *= 100; idle_percentage /= jiffies_since_last_check; use_apm_idle = (idle_percentage > idle_threshold); diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 58118e207a69..145863d4d343 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -1,4 +1,4 @@ -#include <linux/module.h> +#include <linux/init.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/workqueue.h> @@ -163,6 +163,5 @@ static int start_periodic_check_for_corruption(void) schedule_delayed_work(&bios_check_work, 0); return 0; } - -module_init(start_periodic_check_for_corruption); +device_initcall(start_periodic_check_for_corruption); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 9bff68798836..4eb065c6bed2 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -46,6 +46,8 @@ obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \ perf_event_intel_uncore_snb.o \ perf_event_intel_uncore_snbep.o \ perf_event_intel_uncore_nhmex.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_msr.o +obj-$(CONFIG_CPU_SUP_AMD) += perf_event_msr.o endif diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index dd3a4baffe50..4a70fc6d400a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -11,6 +11,7 @@ #include <asm/cpu.h> #include <asm/smp.h> #include <asm/pci-direct.h> +#include <asm/delay.h> #ifdef CONFIG_X86_64 # include <asm/mmconfig.h> @@ -114,7 +115,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); - unsigned long d, d2; + u64 d, d2; printk(KERN_INFO "AMD K6 stepping B detected - "); @@ -125,10 +126,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c) n = K6_BUG_LOOP; f_vide = vide; - rdtscl(d); + d = rdtsc(); while (n--) f_vide(); - rdtscl(d2); + d2 = rdtsc(); d = d2-d; if (d > 20*K6_BUG_LOOP) @@ -506,6 +507,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) /* A random value per boot for bit slice [12:upper_bit) */ va_align.bits = get_random_int() & va_align.mask; } + + if (cpu_has(c, X86_FEATURE_MWAITX)) + use_mwaitx_delay(); } static void early_init_amd(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 922c5e0cea4c..07ce52c22ec8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -13,6 +13,7 @@ #include <linux/kgdb.h> #include <linux/smp.h> #include <linux/io.h> +#include <linux/syscore_ops.h> #include <asm/stackprotector.h> #include <asm/perf_event.h> @@ -1185,10 +1186,10 @@ void syscall_init(void) * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. */ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); - wrmsrl(MSR_LSTAR, entry_SYSCALL_64); + wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION - wrmsrl(MSR_CSTAR, entry_SYSCALL_compat); + wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); /* * This only works on Intel CPUs. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. @@ -1199,7 +1200,7 @@ void syscall_init(void) wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else - wrmsrl(MSR_CSTAR, ignore_sysret); + wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); @@ -1410,7 +1411,7 @@ void cpu_init(void) load_sp0(t, ¤t->thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); clear_all_debug_regs(); dbg_restore_debug_regs(); @@ -1459,7 +1460,7 @@ void cpu_init(void) load_sp0(t, thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); @@ -1488,3 +1489,20 @@ inline bool __static_cpu_has_safe(u16 bit) return boot_cpu_has(bit); } EXPORT_SYMBOL_GPL(__static_cpu_has_safe); + +static void bsp_resume(void) +{ + if (this_cpu->c_bsp_resume) + this_cpu->c_bsp_resume(&boot_cpu_data); +} + +static struct syscore_ops cpu_syscore_ops = { + .resume = bsp_resume, +}; + +static int __init init_cpu_syscore(void) +{ + register_syscore_ops(&cpu_syscore_ops); + return 0; +} +core_initcall(init_cpu_syscore); diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index c37dc37e8317..2584265d4745 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -13,6 +13,7 @@ struct cpu_dev { void (*c_init)(struct cpuinfo_x86 *); void (*c_identify)(struct cpuinfo_x86 *); void (*c_detect_tlb)(struct cpuinfo_x86 *); + void (*c_bsp_resume)(struct cpuinfo_x86 *); int c_x86_vendor; #ifdef CONFIG_X86_32 /* Optional vendor specific routine to obtain the cache size. */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 50163fa9034f..98a13db5f4be 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -371,6 +371,36 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c) } } +static void init_intel_energy_perf(struct cpuinfo_x86 *c) +{ + u64 epb; + + /* + * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized. + * (x86_energy_perf_policy(8) is available to change it at run-time.) + */ + if (!cpu_has(c, X86_FEATURE_EPB)) + return; + + rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); + if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE) + return; + + pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); + pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); + epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; + wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); +} + +static void intel_bsp_resume(struct cpuinfo_x86 *c) +{ + /* + * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume, + * so reinitialize it properly like during bootup: + */ + init_intel_energy_perf(c); +} + static void init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; @@ -478,21 +508,7 @@ static void init_intel(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); - /* - * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not. - * x86_energy_perf_policy(8) is available to change it at run-time - */ - if (cpu_has(c, X86_FEATURE_EPB)) { - u64 epb; - - rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); - if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) { - pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); - pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); - epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; - wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); - } - } + init_intel_energy_perf(c); } #ifdef CONFIG_X86_32 @@ -747,6 +763,7 @@ static const struct cpu_dev intel_cpu_dev = { .c_detect_tlb = intel_detect_tlb, .c_early_init = early_init_intel, .c_init = init_intel, + .c_bsp_resume = intel_bsp_resume, .c_x86_vendor = X86_VENDOR_INTEL, }; diff --git a/arch/x86/kernel/cpu/intel_pt.h b/arch/x86/kernel/cpu/intel_pt.h index 1c338b0eba05..336878a5d205 100644 --- a/arch/x86/kernel/cpu/intel_pt.h +++ b/arch/x86/kernel/cpu/intel_pt.h @@ -25,32 +25,11 @@ */ #define TOPA_PMI_MARGIN 512 -/* - * Table of Physical Addresses bits - */ -enum topa_sz { - TOPA_4K = 0, - TOPA_8K, - TOPA_16K, - TOPA_32K, - TOPA_64K, - TOPA_128K, - TOPA_256K, - TOPA_512K, - TOPA_1MB, - TOPA_2MB, - TOPA_4MB, - TOPA_8MB, - TOPA_16MB, - TOPA_32MB, - TOPA_64MB, - TOPA_128MB, - TOPA_SZ_END, -}; +#define TOPA_SHIFT 12 -static inline unsigned int sizes(enum topa_sz tsz) +static inline unsigned int sizes(unsigned int tsz) { - return 1 << (tsz + 12); + return 1 << (tsz + TOPA_SHIFT); }; struct topa_entry { @@ -66,20 +45,26 @@ struct topa_entry { u64 rsvd4 : 16; }; -#define TOPA_SHIFT 12 -#define PT_CPUID_LEAVES 2 +#define PT_CPUID_LEAVES 2 +#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */ enum pt_capabilities { PT_CAP_max_subleaf = 0, PT_CAP_cr3_filtering, + PT_CAP_psb_cyc, + PT_CAP_mtc, PT_CAP_topa_output, PT_CAP_topa_multiple_entries, + PT_CAP_single_range_output, PT_CAP_payloads_lip, + PT_CAP_mtc_periods, + PT_CAP_cycle_thresholds, + PT_CAP_psb_periods, }; struct pt_pmu { struct pmu pmu; - u32 caps[4 * PT_CPUID_LEAVES]; + u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; }; /** diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index bb34b03af252..a3311c886194 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile @@ -1,4 +1,4 @@ -obj-y = mce.o mce-severity.o +obj-y = mce.o mce-severity.o mce-genpool.o obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c index a1aef9533154..34c89a3e8260 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-apei.c +++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c @@ -57,7 +57,6 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) m.addr = mem_err->physical_addr; mce_log(&m); - mce_notify_irq(); } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c new file mode 100644 index 000000000000..0a850100c594 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c @@ -0,0 +1,99 @@ +/* + * MCE event pool management in MCE context + * + * Copyright (C) 2015 Intel Corp. + * Author: Chen, Gong <gong.chen@linux.intel.com> + * + * This file is licensed under GPLv2. + */ +#include <linux/smp.h> +#include <linux/mm.h> +#include <linux/genalloc.h> +#include <linux/llist.h> +#include "mce-internal.h" + +/* + * printk() is not safe in MCE context. This is a lock-less memory allocator + * used to save error information organized in a lock-less list. + * + * This memory pool is only to be used to save MCE records in MCE context. + * MCE events are rare, so a fixed size memory pool should be enough. Use + * 2 pages to save MCE events for now (~80 MCE records at most). + */ +#define MCE_POOLSZ (2 * PAGE_SIZE) + +static struct gen_pool *mce_evt_pool; +static LLIST_HEAD(mce_event_llist); +static char gen_pool_buf[MCE_POOLSZ]; + +void mce_gen_pool_process(void) +{ + struct llist_node *head; + struct mce_evt_llist *node; + struct mce *mce; + + head = llist_del_all(&mce_event_llist); + if (!head) + return; + + head = llist_reverse_order(head); + llist_for_each_entry(node, head, llnode) { + mce = &node->mce; + atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); + gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node)); + } +} + +bool mce_gen_pool_empty(void) +{ + return llist_empty(&mce_event_llist); +} + +int mce_gen_pool_add(struct mce *mce) +{ + struct mce_evt_llist *node; + + if (!mce_evt_pool) + return -EINVAL; + + node = (void *)gen_pool_alloc(mce_evt_pool, sizeof(*node)); + if (!node) { + pr_warn_ratelimited("MCE records pool full!\n"); + return -ENOMEM; + } + + memcpy(&node->mce, mce, sizeof(*mce)); + llist_add(&node->llnode, &mce_event_llist); + + return 0; +} + +static int mce_gen_pool_create(void) +{ + struct gen_pool *tmpp; + int ret = -ENOMEM; + + tmpp = gen_pool_create(ilog2(sizeof(struct mce_evt_llist)), -1); + if (!tmpp) + goto out; + + ret = gen_pool_add(tmpp, (unsigned long)gen_pool_buf, MCE_POOLSZ, -1); + if (ret) { + gen_pool_destroy(tmpp); + goto out; + } + + mce_evt_pool = tmpp; + +out: + return ret; +} + +int mce_gen_pool_init(void) +{ + /* Just init mce_gen_pool once. */ + if (mce_evt_pool) + return 0; + + return mce_gen_pool_create(); +} diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index fe32074b865b..547720efd923 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -13,6 +13,8 @@ enum severity_level { MCE_PANIC_SEVERITY, }; +extern struct atomic_notifier_head x86_mce_decoder_chain; + #define ATTR_LEN 16 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ @@ -24,6 +26,16 @@ struct mce_bank { char attrname[ATTR_LEN]; /* attribute name */ }; +struct mce_evt_llist { + struct llist_node llnode; + struct mce mce; +}; + +void mce_gen_pool_process(void); +bool mce_gen_pool_empty(void); +int mce_gen_pool_add(struct mce *mce); +int mce_gen_pool_init(void); + extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp); struct dentry *mce_get_debugfs_dir(void); @@ -67,3 +79,5 @@ static inline int apei_clear_mce(u64 record_id) return -EINVAL; } #endif + +void mce_inject_log(struct mce *m); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index df919ff103c3..9d014b82a124 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -52,11 +52,11 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); -#define rcu_dereference_check_mce(p) \ +#define mce_log_get_idx_check(p) \ ({ \ - rcu_lockdep_assert(rcu_read_lock_sched_held() || \ - lockdep_is_held(&mce_chrdev_read_mutex), \ - "suspicious rcu_dereference_check_mce() usage"); \ + RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ + !lockdep_is_held(&mce_chrdev_read_mutex), \ + "suspicious mce_log_get_idx_check() usage"); \ smp_load_acquire(&(p)); \ }) @@ -110,22 +110,24 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { */ mce_banks_t mce_banks_ce_disabled; -static DEFINE_PER_CPU(struct work_struct, mce_work); +static struct work_struct mce_work; +static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); +static int mce_usable_address(struct mce *m); /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. */ -static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); +ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); /* Do initial initialization of a struct mce */ void mce_setup(struct mce *m) { memset(m, 0, sizeof(struct mce)); m->cpu = m->extcpu = smp_processor_id(); - rdtscll(m->tsc); + m->tsc = rdtsc(); /* We hope get_seconds stays lockless */ m->time = get_seconds(); m->cpuvendor = boot_cpu_data.x86_vendor; @@ -157,12 +159,13 @@ void mce_log(struct mce *mce) /* Emit the trace record: */ trace_mce_record(mce); - atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); + if (!mce_gen_pool_add(mce)) + irq_work_queue(&mce_irq_work); mce->finished = 0; wmb(); for (;;) { - entry = rcu_dereference_check_mce(mcelog.next); + entry = mce_log_get_idx_check(mcelog.next); for (;;) { /* @@ -196,48 +199,23 @@ void mce_log(struct mce *mce) set_bit(0, &mce_need_notify); } -static void drain_mcelog_buffer(void) +void mce_inject_log(struct mce *m) { - unsigned int next, i, prev = 0; - - next = ACCESS_ONCE(mcelog.next); - - do { - struct mce *m; - - /* drain what was logged during boot */ - for (i = prev; i < next; i++) { - unsigned long start = jiffies; - unsigned retries = 1; - - m = &mcelog.entry[i]; - - while (!m->finished) { - if (time_after_eq(jiffies, start + 2*retries)) - retries++; - - cpu_relax(); - - if (!m->finished && retries >= 4) { - pr_err("skipping error being logged currently!\n"); - break; - } - } - smp_rmb(); - atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); - } - - memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m)); - prev = next; - next = cmpxchg(&mcelog.next, prev, 0); - } while (next != prev); + mutex_lock(&mce_chrdev_read_mutex); + mce_log(m); + mutex_unlock(&mce_chrdev_read_mutex); } +EXPORT_SYMBOL_GPL(mce_inject_log); +static struct notifier_block mce_srao_nb; void mce_register_decode_chain(struct notifier_block *nb) { + /* Ensure SRAO notifier has the highest priority in the decode chain. */ + if (nb != &mce_srao_nb && nb->priority == INT_MAX) + nb->priority -= 1; + atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); - drain_mcelog_buffer(); } EXPORT_SYMBOL_GPL(mce_register_decode_chain); @@ -461,61 +439,6 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) } } -/* - * Simple lockless ring to communicate PFNs from the exception handler with the - * process context work function. This is vastly simplified because there's - * only a single reader and a single writer. - */ -#define MCE_RING_SIZE 16 /* we use one entry less */ - -struct mce_ring { - unsigned short start; - unsigned short end; - unsigned long ring[MCE_RING_SIZE]; -}; -static DEFINE_PER_CPU(struct mce_ring, mce_ring); - -/* Runs with CPU affinity in workqueue */ -static int mce_ring_empty(void) -{ - struct mce_ring *r = this_cpu_ptr(&mce_ring); - - return r->start == r->end; -} - -static int mce_ring_get(unsigned long *pfn) -{ - struct mce_ring *r; - int ret = 0; - - *pfn = 0; - get_cpu(); - r = this_cpu_ptr(&mce_ring); - if (r->start == r->end) - goto out; - *pfn = r->ring[r->start]; - r->start = (r->start + 1) % MCE_RING_SIZE; - ret = 1; -out: - put_cpu(); - return ret; -} - -/* Always runs in MCE context with preempt off */ -static int mce_ring_add(unsigned long pfn) -{ - struct mce_ring *r = this_cpu_ptr(&mce_ring); - unsigned next; - - next = (r->end + 1) % MCE_RING_SIZE; - if (next == r->start) - return -1; - r->ring[r->end] = pfn; - wmb(); - r->end = next; - return 0; -} - int mce_available(struct cpuinfo_x86 *c) { if (mca_cfg.disabled) @@ -525,12 +448,10 @@ int mce_available(struct cpuinfo_x86 *c) static void mce_schedule_work(void) { - if (!mce_ring_empty()) - schedule_work(this_cpu_ptr(&mce_work)); + if (!mce_gen_pool_empty() && keventd_up()) + schedule_work(&mce_work); } -static DEFINE_PER_CPU(struct irq_work, mce_irq_work); - static void mce_irq_work_cb(struct irq_work *entry) { mce_notify_irq(); @@ -551,8 +472,29 @@ static void mce_report_event(struct pt_regs *regs) return; } - irq_work_queue(this_cpu_ptr(&mce_irq_work)); + irq_work_queue(&mce_irq_work); +} + +static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *mce = (struct mce *)data; + unsigned long pfn; + + if (!mce) + return NOTIFY_DONE; + + if (mce->usable_addr && (mce->severity == MCE_AO_SEVERITY)) { + pfn = mce->addr >> PAGE_SHIFT; + memory_failure(pfn, MCE_VECTOR, 0); + } + + return NOTIFY_OK; } +static struct notifier_block mce_srao_nb = { + .notifier_call = srao_decode_notifier, + .priority = INT_MAX, +}; /* * Read ADDR and MISC registers. @@ -672,8 +614,11 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) */ if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) { if (m.status & MCI_STATUS_ADDRV) { - mce_ring_add(m.addr >> PAGE_SHIFT); - mce_schedule_work(); + m.severity = severity; + m.usable_addr = mce_usable_address(&m); + + if (!mce_gen_pool_add(&m)) + mce_schedule_work(); } } @@ -1029,7 +974,6 @@ void do_machine_check(struct pt_regs *regs, long error_code) { struct mca_config *cfg = &mca_cfg; struct mce m, *final; - enum ctx_state prev_state; int i; int worst = 0; int severity; @@ -1055,7 +999,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) int flags = MF_ACTION_REQUIRED; int lmce = 0; - prev_state = ist_enter(regs); + ist_enter(regs); this_cpu_inc(mce_exception_count); @@ -1143,15 +1087,9 @@ void do_machine_check(struct pt_regs *regs, long error_code) mce_read_aux(&m, i); - /* - * Action optional error. Queue address for later processing. - * When the ring overflows we just ignore the AO error. - * RED-PEN add some logging mechanism when - * usable_address or mce_add_ring fails. - * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0 - */ - if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) - mce_ring_add(m.addr >> PAGE_SHIFT); + /* assuming valid severity level != 0 */ + m.severity = severity; + m.usable_addr = mce_usable_address(&m); mce_log(&m); @@ -1227,7 +1165,7 @@ out: local_irq_disable(); ist_end_non_atomic(); done: - ist_exit(regs, prev_state); + ist_exit(regs); } EXPORT_SYMBOL_GPL(do_machine_check); @@ -1247,14 +1185,11 @@ int memory_failure(unsigned long pfn, int vector, int flags) /* * Action optional processing happens here (picking up * from the list of faulting pages that do_machine_check() - * placed into the "ring"). + * placed into the genpool). */ static void mce_process_work(struct work_struct *dummy) { - unsigned long pfn; - - while (mce_ring_get(&pfn)) - memory_failure(pfn, MCE_VECTOR, 0); + mce_gen_pool_process(); } #ifdef CONFIG_X86_MCE_INTEL @@ -1678,6 +1613,17 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) } } +static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) +{ + switch (c->x86_vendor) { + case X86_VENDOR_INTEL: + mce_intel_feature_clear(c); + break; + default: + break; + } +} + static void mce_start_timer(unsigned int cpu, struct timer_list *t) { unsigned long iv = check_interval * HZ; @@ -1731,13 +1677,36 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) return; } + if (mce_gen_pool_init()) { + mca_cfg.disabled = true; + pr_emerg("Couldn't allocate MCE records pool!\n"); + return; + } + machine_check_vector = do_machine_check; __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(c); __mcheck_cpu_init_timer(); - INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); - init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); +} + +/* + * Called for each booted CPU to clear some machine checks opt-ins + */ +void mcheck_cpu_clear(struct cpuinfo_x86 *c) +{ + if (mca_cfg.disabled) + return; + + if (!mce_available(c)) + return; + + /* + * Possibly to clear general settings generic to x86 + * __mcheck_cpu_clear_generic(c); + */ + __mcheck_cpu_clear_vendor(c); + } /* @@ -1784,7 +1753,7 @@ static void collect_tscs(void *data) { unsigned long *cpu_tsc = (unsigned long *)data; - rdtscll(cpu_tsc[smp_processor_id()]); + cpu_tsc[smp_processor_id()] = rdtsc(); } static int mce_apei_read_done; @@ -1850,7 +1819,7 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf, goto out; } - next = rcu_dereference_check_mce(mcelog.next); + next = mce_log_get_idx_check(mcelog.next); /* Only supports full reads right now */ err = -EINVAL; @@ -2056,8 +2025,12 @@ __setup("mce", mcheck_enable); int __init mcheck_init(void) { mcheck_intel_therm_init(); + mce_register_decode_chain(&mce_srao_nb); mcheck_vendor_init_severity(); + INIT_WORK(&mce_work, mce_process_work); + init_irq_work(&mce_irq_work, mce_irq_work_cb); + return 0; } @@ -2591,5 +2564,20 @@ static int __init mcheck_debugfs_init(void) return 0; } -late_initcall(mcheck_debugfs_init); +#else +static int __init mcheck_debugfs_init(void) { return -EINVAL; } #endif + +static int __init mcheck_late_init(void) +{ + mcheck_debugfs_init(); + + /* + * Flush out everything that has been logged during early boot, now that + * everything has been initialized (workqueues, decoders, ...). + */ + mce_schedule_work(); + + return 0; +} +late_initcall(mcheck_late_init); diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 844f56c5616d..1e8bb6c94f14 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -146,6 +146,27 @@ void mce_intel_hcpu_update(unsigned long cpu) per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; } +static void cmci_toggle_interrupt_mode(bool on) +{ + unsigned long flags, *owned; + int bank; + u64 val; + + raw_spin_lock_irqsave(&cmci_discover_lock, flags); + owned = this_cpu_ptr(mce_banks_owned); + for_each_set_bit(bank, owned, MAX_NR_BANKS) { + rdmsrl(MSR_IA32_MCx_CTL2(bank), val); + + if (on) + val |= MCI_CTL2_CMCI_EN; + else + val &= ~MCI_CTL2_CMCI_EN; + + wrmsrl(MSR_IA32_MCx_CTL2(bank), val); + } + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); +} + unsigned long cmci_intel_adjust_timer(unsigned long interval) { if ((this_cpu_read(cmci_backoff_cnt) > 0) && @@ -175,7 +196,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval) */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); - cmci_reenable(); + cmci_toggle_interrupt_mode(true); cmci_recheck(); } return CMCI_POLL_INTERVAL; @@ -186,22 +207,6 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval) } } -static void cmci_storm_disable_banks(void) -{ - unsigned long flags, *owned; - int bank; - u64 val; - - raw_spin_lock_irqsave(&cmci_discover_lock, flags); - owned = this_cpu_ptr(mce_banks_owned); - for_each_set_bit(bank, owned, MAX_NR_BANKS) { - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); - val &= ~MCI_CTL2_CMCI_EN; - wrmsrl(MSR_IA32_MCx_CTL2(bank), val); - } - raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); -} - static bool cmci_storm_detect(void) { unsigned int cnt = __this_cpu_read(cmci_storm_cnt); @@ -223,7 +228,7 @@ static bool cmci_storm_detect(void) if (cnt <= CMCI_STORM_THRESHOLD) return false; - cmci_storm_disable_banks(); + cmci_toggle_interrupt_mode(false); __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); r = atomic_add_return(1, &cmci_storm_on_cpus); mce_timer_kick(CMCI_STORM_INTERVAL); @@ -246,7 +251,6 @@ static void intel_threshold_interrupt(void) return; machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); - mce_notify_irq(); } /* @@ -435,7 +439,7 @@ static void intel_init_cmci(void) cmci_recheck(); } -void intel_init_lmce(void) +static void intel_init_lmce(void) { u64 val; @@ -448,9 +452,26 @@ void intel_init_lmce(void) wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); } +static void intel_clear_lmce(void) +{ + u64 val; + + if (!lmce_supported()) + return; + + rdmsrl(MSR_IA32_MCG_EXT_CTL, val); + val &= ~MCG_EXT_CTL_LMCE_EN; + wrmsrl(MSR_IA32_MCG_EXT_CTL, val); +} + void mce_intel_feature_init(struct cpuinfo_x86 *c) { intel_init_thermal(c); intel_init_cmci(); intel_init_lmce(); } + +void mce_intel_feature_clear(struct cpuinfo_x86 *c) +{ + intel_clear_lmce(); +} diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 737b0ad4e61a..12402e10aeff 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c @@ -19,10 +19,9 @@ int mce_p5_enabled __read_mostly; /* Machine check handler for Pentium class Intel CPUs: */ static void pentium_machine_check(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; u32 loaddr, hi, lotype; - prev_state = ist_enter(regs); + ist_enter(regs); rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); @@ -39,7 +38,7 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code) add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - ist_exit(regs, prev_state); + ist_exit(regs); } /* Set up machine check reporting for processors with Intel style MCE: */ diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index 44f138296fbe..01dd8702880b 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c @@ -15,12 +15,12 @@ /* Machine check handler for WinChip C6: */ static void winchip_machine_check(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state = ist_enter(regs); + ist_enter(regs); printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - ist_exit(regs, prev_state); + ist_exit(regs); } /* Set up machine check reporting on the Winchip C6 series */ diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 6236a54a63f4..9e3f3c7dd5d7 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -377,17 +377,16 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif) return err; } -static int mc_device_remove(struct device *dev, struct subsys_interface *sif) +static void mc_device_remove(struct device *dev, struct subsys_interface *sif) { int cpu = dev->id; if (!cpu_online(cpu)) - return 0; + return; pr_debug("CPU%d removed\n", cpu); microcode_fini_cpu(cpu); sysfs_remove_group(&dev->kobj, &mc_attr_group); - return 0; } static struct subsys_interface mc_cpu_interface = { @@ -460,7 +459,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) return NOTIFY_OK; } -static struct notifier_block __refdata mc_cpu_notifier = { +static struct notifier_block mc_cpu_notifier = { .notifier_call = mc_cpu_callback, }; diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index 8187b7247d1c..37ea89c11520 100644 --- a/arch/x86/kernel/cpu/microcode/intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c @@ -390,7 +390,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci) } #ifdef DEBUG -static void __ref show_saved_mc(void) +static void show_saved_mc(void) { int i, j; unsigned int sig, pf, rev, total_size, data_size, date; diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index aad4bd84b475..381c8b9b3a33 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -18,6 +18,7 @@ #include <linux/efi.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/kexec.h> #include <asm/processor.h> #include <asm/hypervisor.h> #include <asm/hyperv.h> @@ -28,10 +29,14 @@ #include <asm/i8259.h> #include <asm/apic.h> #include <asm/timer.h> +#include <asm/reboot.h> struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); +static void (*hv_kexec_handler)(void); +static void (*hv_crash_handler)(struct pt_regs *regs); + #if IS_ENABLED(CONFIG_HYPERV) static void (*vmbus_handler)(void); @@ -67,8 +72,47 @@ void hv_remove_vmbus_irq(void) } EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq); EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq); + +void hv_setup_kexec_handler(void (*handler)(void)) +{ + hv_kexec_handler = handler; +} +EXPORT_SYMBOL_GPL(hv_setup_kexec_handler); + +void hv_remove_kexec_handler(void) +{ + hv_kexec_handler = NULL; +} +EXPORT_SYMBOL_GPL(hv_remove_kexec_handler); + +void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)) +{ + hv_crash_handler = handler; +} +EXPORT_SYMBOL_GPL(hv_setup_crash_handler); + +void hv_remove_crash_handler(void) +{ + hv_crash_handler = NULL; +} +EXPORT_SYMBOL_GPL(hv_remove_crash_handler); #endif +static void hv_machine_shutdown(void) +{ + if (kexec_in_progress && hv_kexec_handler) + hv_kexec_handler(); + native_machine_shutdown(); +} + +static void hv_machine_crash_shutdown(struct pt_regs *regs) +{ + if (hv_crash_handler) + hv_crash_handler(regs); + native_machine_crash_shutdown(regs); +} + + static uint32_t __init ms_hyperv_platform(void) { u32 eax; @@ -114,6 +158,7 @@ static void __init ms_hyperv_init_platform(void) * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); + ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", @@ -141,6 +186,9 @@ static void __init ms_hyperv_init_platform(void) no_timer_check = 1; #endif + machine_ops.shutdown = hv_machine_shutdown; + machine_ops.crash_shutdown = hv_machine_crash_shutdown; + mark_tsc_unstable("running on Hyper-V"); } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index e7ed0d8ebacb..f891b4750f04 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -448,7 +448,6 @@ int mtrr_add(unsigned long base, unsigned long size, unsigned int type, return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, increment); } -EXPORT_SYMBOL(mtrr_add); /** * mtrr_del_page - delete a memory type region @@ -537,7 +536,6 @@ int mtrr_del(int reg, unsigned long base, unsigned long size) return -EINVAL; return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); } -EXPORT_SYMBOL(mtrr_del); /** * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3658de47900f..66dd3fe99b82 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1551,7 +1551,7 @@ static void __init filter_events(struct attribute **attrs) } /* Merge two pointer arrays */ -static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b) +__init struct attribute **merge_attr(struct attribute **a, struct attribute **b) { struct attribute **new; int j, i; @@ -2179,24 +2179,32 @@ static unsigned long get_segment_base(unsigned int segment) int idx = segment >> 3; if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { +#ifdef CONFIG_MODIFY_LDT_SYSCALL + struct ldt_struct *ldt; + if (idx > LDT_ENTRIES) return 0; - if (idx > current->active_mm->context.size) + /* IRQs are off, so this synchronizes with smp_store_release */ + ldt = lockless_dereference(current->active_mm->context.ldt); + if (!ldt || idx > ldt->size) return 0; - desc = current->active_mm->context.ldt; + desc = &ldt->entries[idx]; +#else + return 0; +#endif } else { if (idx > GDT_ENTRIES) return 0; - desc = raw_cpu_ptr(gdt_page.gdt); + desc = raw_cpu_ptr(gdt_page.gdt) + idx; } - return get_desc_base(desc + idx); + return get_desc_base(desc); } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_IA32_EMULATION #include <asm/compat.h> diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 3e7fd27dfe20..5edf6d868fc1 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -165,7 +165,7 @@ struct intel_excl_cntrs { unsigned core_id; /* per-core: core id */ }; -#define MAX_LBR_ENTRIES 16 +#define MAX_LBR_ENTRIES 32 enum { X86_PERF_KFREE_SHARED = 0, @@ -594,6 +594,7 @@ struct x86_pmu { struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); int max_pebs_events; + unsigned long free_running_flags; /* * Intel LBR @@ -624,6 +625,7 @@ struct x86_pmu { struct x86_perf_task_context { u64 lbr_from[MAX_LBR_ENTRIES]; u64 lbr_to[MAX_LBR_ENTRIES]; + u64 lbr_info[MAX_LBR_ENTRIES]; int lbr_callstack_users; int lbr_stack_state; }; @@ -793,6 +795,8 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); ssize_t intel_event_sysfs_show(char *page, u64 config); +struct attribute **merge_attr(struct attribute **a, struct attribute **b); + #ifdef CONFIG_CPU_SUP_AMD int amd_pmu_init(void); @@ -808,20 +812,6 @@ static inline int amd_pmu_init(void) #ifdef CONFIG_CPU_SUP_INTEL -static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) -{ - /* user explicitly requested branch sampling */ - if (has_branch_stack(event)) - return true; - - /* implicit branch sampling to correct PEBS skid */ - if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 && - x86_pmu.intel_cap.pebs_format < 2) - return true; - - return false; -} - static inline bool intel_pmu_has_bts(struct perf_event *event) { if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && @@ -873,6 +863,8 @@ extern struct event_constraint intel_ivb_pebs_event_constraints[]; extern struct event_constraint intel_hsw_pebs_event_constraints[]; +extern struct event_constraint intel_skl_pebs_event_constraints[]; + struct event_constraint *intel_pebs_constraints(struct perf_event *event); void intel_pmu_pebs_enable(struct perf_event *event); @@ -911,6 +903,8 @@ void intel_pmu_lbr_init_snb(void); void intel_pmu_lbr_init_hsw(void); +void intel_pmu_lbr_init_skl(void); + int intel_pmu_setup_lbr_filter(struct perf_event *event); void intel_pt_interrupt(void); @@ -934,6 +928,7 @@ static inline int is_ht_workaround_enabled(void) { return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); } + #else /* CONFIG_CPU_SUP_INTEL */ static inline void reserve_ds_buffers(void) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index b9826a981fb2..3f124d553c5a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -177,6 +177,14 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; +struct event_constraint intel_skl_event_constraints[] = { + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ + EVENT_CONSTRAINT_END +}; + static struct extra_reg intel_snb_extra_regs[] __read_mostly = { /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), @@ -193,6 +201,13 @@ static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; +static struct extra_reg intel_skl_extra_regs[] __read_mostly = { + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + EVENT_EXTRA_END +}; + EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); @@ -244,6 +259,200 @@ static u64 intel_pmu_event_map(int hw_event) return intel_perfmon_event_map[hw_event]; } +/* + * Notes on the events: + * - data reads do not include code reads (comparable to earlier tables) + * - data counts include speculative execution (except L1 write, dtlb, bpu) + * - remote node access includes remote memory, remote cache, remote mmio. + * - prefetches are not included in the counts. + * - icache miss does not include decoded icache + */ + +#define SKL_DEMAND_DATA_RD BIT_ULL(0) +#define SKL_DEMAND_RFO BIT_ULL(1) +#define SKL_ANY_RESPONSE BIT_ULL(16) +#define SKL_SUPPLIER_NONE BIT_ULL(17) +#define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26) +#define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27) +#define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28) +#define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29) +#define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \ + SKL_L3_MISS_REMOTE_HOP0_DRAM| \ + SKL_L3_MISS_REMOTE_HOP1_DRAM| \ + SKL_L3_MISS_REMOTE_HOP2P_DRAM) +#define SKL_SPL_HIT BIT_ULL(30) +#define SKL_SNOOP_NONE BIT_ULL(31) +#define SKL_SNOOP_NOT_NEEDED BIT_ULL(32) +#define SKL_SNOOP_MISS BIT_ULL(33) +#define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34) +#define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35) +#define SKL_SNOOP_HITM BIT_ULL(36) +#define SKL_SNOOP_NON_DRAM BIT_ULL(37) +#define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \ + SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ + SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ + SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM) +#define SKL_DEMAND_READ SKL_DEMAND_DATA_RD +#define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \ + SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ + SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ + SKL_SNOOP_HITM|SKL_SPL_HIT) +#define SKL_DEMAND_WRITE SKL_DEMAND_RFO +#define SKL_LLC_ACCESS SKL_ANY_RESPONSE +#define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \ + SKL_L3_MISS_REMOTE_HOP1_DRAM| \ + SKL_L3_MISS_REMOTE_HOP2P_DRAM) + +static __initconst const u64 skl_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ + [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ + [ C(RESULT_MISS) ] = 0x0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(L1I ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ + [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ + [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */ + [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ + [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, +}; + +static __initconst const u64 skl_hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| + SKL_LLC_ACCESS|SKL_ANY_SNOOP, + [ C(RESULT_MISS) ] = SKL_DEMAND_READ| + SKL_L3_MISS|SKL_ANY_SNOOP| + SKL_SUPPLIER_NONE, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| + SKL_LLC_ACCESS|SKL_ANY_SNOOP, + [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| + SKL_L3_MISS|SKL_ANY_SNOOP| + SKL_SUPPLIER_NONE, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| + SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, + [ C(RESULT_MISS) ] = SKL_DEMAND_READ| + SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| + SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, + [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| + SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x0, + [ C(RESULT_MISS) ] = 0x0, + }, + }, +}; + #define SNB_DMND_DATA_RD (1ULL << 0) #define SNB_DMND_RFO (1ULL << 1) #define SNB_DMND_IFETCH (1ULL << 2) @@ -1114,7 +1323,7 @@ static struct extra_reg intel_slm_extra_regs[] __read_mostly = { /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), - INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1), + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1), EVENT_EXTRA_END }; @@ -1594,6 +1803,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) loops = 0; again: + intel_pmu_lbr_read(); intel_pmu_ack_status(status); if (++loops > 100) { static bool warned = false; @@ -1608,16 +1818,16 @@ again: inc_irq_stat(apic_perf_irqs); - intel_pmu_lbr_read(); /* - * CondChgd bit 63 doesn't mean any overflow status. Ignore - * and clear the bit. + * Ignore a range of extra bits in status that do not indicate + * overflow by themselves. */ - if (__test_and_clear_bit(63, (unsigned long *)&status)) { - if (!status) - goto done; - } + status &= ~(GLOBAL_STATUS_COND_CHG | + GLOBAL_STATUS_ASIF | + GLOBAL_STATUS_LBRS_FROZEN); + if (!status) + goto done; /* * PEBS overflow sets bit 62 in the global status register @@ -1699,18 +1909,22 @@ intel_bts_constraints(struct perf_event *event) return NULL; } -static int intel_alt_er(int idx) +static int intel_alt_er(int idx, u64 config) { + int alt_idx; if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) return idx; if (idx == EXTRA_REG_RSP_0) - return EXTRA_REG_RSP_1; + alt_idx = EXTRA_REG_RSP_1; if (idx == EXTRA_REG_RSP_1) - return EXTRA_REG_RSP_0; + alt_idx = EXTRA_REG_RSP_0; + + if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask) + return idx; - return idx; + return alt_idx; } static void intel_fixup_er(struct perf_event *event, int idx) @@ -1799,7 +2013,7 @@ again: */ c = NULL; } else { - idx = intel_alt_er(idx); + idx = intel_alt_er(idx, reg->config); if (idx != reg->idx) { raw_spin_unlock_irqrestore(&era->lock, flags); goto again; @@ -2253,6 +2467,15 @@ static void intel_pebs_aliases_snb(struct perf_event *event) } } +static unsigned long intel_pmu_free_running_flags(struct perf_event *event) +{ + unsigned long flags = x86_pmu.free_running_flags; + + if (event->attr.use_clockid) + flags &= ~PERF_SAMPLE_TIME; + return flags; +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); @@ -2263,7 +2486,8 @@ static int intel_pmu_hw_config(struct perf_event *event) if (event->attr.precise_ip) { if (!event->attr.freq) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; - if (!(event->attr.sample_type & ~PEBS_FREERUNNING_FLAGS)) + if (!(event->attr.sample_type & + ~intel_pmu_free_running_flags(event))) event->hw.flags |= PERF_X86_EVENT_FREERUNNING; } if (x86_pmu.pebs_aliases) @@ -2534,7 +2758,7 @@ static int intel_pmu_cpu_prepare(int cpu) if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { cpuc->shared_regs = allocate_shared_regs(cpu); if (!cpuc->shared_regs) - return NOTIFY_BAD; + goto err; } if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { @@ -2542,18 +2766,27 @@ static int intel_pmu_cpu_prepare(int cpu) cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); if (!cpuc->constraint_list) - return NOTIFY_BAD; + goto err_shared_regs; cpuc->excl_cntrs = allocate_excl_cntrs(cpu); - if (!cpuc->excl_cntrs) { - kfree(cpuc->constraint_list); - kfree(cpuc->shared_regs); - return NOTIFY_BAD; - } + if (!cpuc->excl_cntrs) + goto err_constraint_list; + cpuc->excl_thread_id = 0; } return NOTIFY_OK; + +err_constraint_list: + kfree(cpuc->constraint_list); + cpuc->constraint_list = NULL; + +err_shared_regs: + kfree(cpuc->shared_regs); + cpuc->shared_regs = NULL; + +err: + return NOTIFY_BAD; } static void intel_pmu_cpu_starting(int cpu) @@ -2685,6 +2918,8 @@ static __initconst const struct x86_pmu core_pmu = { .event_map = intel_pmu_event_map, .max_events = ARRAY_SIZE(intel_perfmon_event_map), .apic = 1, + .free_running_flags = PEBS_FREERUNNING_FLAGS, + /* * Intel PMCs cannot be accessed sanely above 32-bit width, * so we install an artificial 1<<31 period regardless of @@ -2723,6 +2958,7 @@ static __initconst const struct x86_pmu intel_pmu = { .event_map = intel_pmu_event_map, .max_events = ARRAY_SIZE(intel_perfmon_event_map), .apic = 1, + .free_running_flags = PEBS_FREERUNNING_FLAGS, /* * Intel PMCs cannot be accessed sanely above 32 bit width, * so we install an artificial 1<<31 period regardless of @@ -3260,6 +3496,29 @@ __init int intel_pmu_init(void) pr_cont("Broadwell events, "); break; + case 78: /* 14nm Skylake Mobile */ + case 94: /* 14nm Skylake Desktop */ + x86_pmu.late_ack = true; + memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + intel_pmu_lbr_init_skl(); + + x86_pmu.event_constraints = intel_skl_event_constraints; + x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints; + x86_pmu.extra_regs = intel_skl_extra_regs; + x86_pmu.pebs_aliases = intel_pebs_aliases_snb; + /* all extra regs are per-cpu when HT is on */ + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_NO_HT_SHARING; + + x86_pmu.hw_config = hsw_hw_config; + x86_pmu.get_event_constraints = hsw_get_event_constraints; + x86_pmu.cpu_events = hsw_events_attrs; + WARN_ON(!x86_pmu.format_attrs); + x86_pmu.cpu_events = hsw_events_attrs; + pr_cont("Skylake events, "); + break; + default: switch (x86_pmu.version) { case 1: @@ -3329,7 +3588,7 @@ __init int intel_pmu_init(void) */ if (x86_pmu.extra_regs) { for (er = x86_pmu.extra_regs; er->msr; er++) { - er->extra_msr_access = check_msr(er->msr, 0x1ffUL); + er->extra_msr_access = check_msr(er->msr, 0x11UL); /* Disable LBR select mapping */ if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) x86_pmu.lbr_sel_map = NULL; diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c index 43dd672d788b..54690e885759 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_bts.c +++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c @@ -62,9 +62,6 @@ struct bts_buffer { struct pmu bts_pmu; -void intel_pmu_enable_bts(u64 config); -void intel_pmu_disable_bts(void); - static size_t buf_size(struct page *page) { return 1 << (PAGE_SHIFT + page_private(page)); diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index 188076161c1b..377e8f8ed391 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c @@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event) return 0; /* + * Getting up-to-date values requires an SMP IPI which is not + * possible if we're being called in interrupt context. Return + * the cached values instead. + */ + if (unlikely(in_interrupt())) + goto out; + + /* * Notice that we don't perform the reading of an RMID * atomically, because we can't hold a spin lock across the * IPIs. @@ -1247,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu) cpumask_set_cpu(cpu, &cqm_cpumask); } -static void intel_cqm_cpu_prepare(unsigned int cpu) +static void intel_cqm_cpu_starting(unsigned int cpu) { struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -1288,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb, unsigned int cpu = (unsigned long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - intel_cqm_cpu_prepare(cpu); - break; case CPU_DOWN_PREPARE: intel_cqm_cpu_exit(cpu); break; case CPU_STARTING: + intel_cqm_cpu_starting(cpu); cqm_pick_event_reader(cpu); break; } @@ -1365,7 +1371,7 @@ static int __init intel_cqm_init(void) goto out; for_each_online_cpu(i) { - intel_cqm_cpu_prepare(i); + intel_cqm_cpu_starting(i); cqm_pick_event_reader(i); } diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 71fc40238843..84f236ab96b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -224,6 +224,19 @@ union hsw_tsx_tuning { #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL +/* Same as HSW, plus TSC */ + +struct pebs_record_skl { + u64 flags, ip; + u64 ax, bx, cx, dx; + u64 si, di, bp, sp; + u64 r8, r9, r10, r11; + u64 r12, r13, r14, r15; + u64 status, dla, dse, lat; + u64 real_ip, tsx_tuning; + u64 tsc; +}; + void init_debug_store_on_cpu(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; @@ -675,6 +688,28 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; +struct event_constraint intel_skl_pebs_event_constraints[] = { + INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ + /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */ + /* Allow all events as PEBS with no flags */ + INTEL_ALL_EVENT_CONSTRAINT(0, 0xf), + EVENT_CONSTRAINT_END +}; + struct event_constraint *intel_pebs_constraints(struct perf_event *event) { struct event_constraint *c; @@ -754,6 +789,11 @@ void intel_pmu_pebs_disable(struct perf_event *event) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; struct debug_store *ds = cpuc->ds; + bool large_pebs = ds->pebs_interrupt_threshold > + ds->pebs_buffer_base + x86_pmu.pebs_record_size; + + if (large_pebs) + intel_pmu_drain_pebs_buffer(); cpuc->pebs_enabled &= ~(1ULL << hwc->idx); @@ -762,12 +802,8 @@ void intel_pmu_pebs_disable(struct perf_event *event) else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled &= ~(1ULL << 63); - if (ds->pebs_interrupt_threshold > - ds->pebs_buffer_base + x86_pmu.pebs_record_size) { - intel_pmu_drain_pebs_buffer(); - if (!pebs_is_enabled(cpuc)) - perf_sched_cb_dec(event->ctx->pmu); - } + if (large_pebs && !pebs_is_enabled(cpuc)) + perf_sched_cb_dec(event->ctx->pmu); if (cpuc->enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); @@ -885,7 +921,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) return 0; } -static inline u64 intel_hsw_weight(struct pebs_record_hsw *pebs) +static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs) { if (pebs->tsx_tuning) { union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning }; @@ -894,7 +930,7 @@ static inline u64 intel_hsw_weight(struct pebs_record_hsw *pebs) return 0; } -static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs) +static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs) { u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; @@ -918,7 +954,7 @@ static void setup_pebs_sample_data(struct perf_event *event, * unconditionally access the 'extra' entries. */ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - struct pebs_record_hsw *pebs = __pebs; + struct pebs_record_skl *pebs = __pebs; u64 sample_type; int fll, fst, dsrc; int fl = event->hw.flags; @@ -1016,6 +1052,16 @@ static void setup_pebs_sample_data(struct perf_event *event, data->txn = intel_hsw_transaction(pebs); } + /* + * v3 supplies an accurate time stamp, so we use that + * for the time stamp. + * + * We can only do this for the default trace clock. + */ + if (x86_pmu.intel_cap.pebs_format >= 3 && + event->attr.use_clockid == 0) + data->time = native_sched_clock_from_tsc(pebs->tsc); + if (has_branch_stack(event)) data->br_stack = &cpuc->lbr_stack; } @@ -1142,6 +1188,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) for (at = base; at < top; at += x86_pmu.pebs_record_size) { struct pebs_record_nhm *p = at; + u64 pebs_status; /* PEBS v3 has accurate status bits */ if (x86_pmu.intel_cap.pebs_format >= 3) { @@ -1152,12 +1199,17 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) continue; } - bit = find_first_bit((unsigned long *)&p->status, + pebs_status = p->status & cpuc->pebs_enabled; + pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1; + + bit = find_first_bit((unsigned long *)&pebs_status, x86_pmu.max_pebs_events); - if (bit >= x86_pmu.max_pebs_events) - continue; - if (!test_bit(bit, cpuc->active_mask)) + if (WARN(bit >= x86_pmu.max_pebs_events, + "PEBS record without PEBS event! status=%Lx pebs_enabled=%Lx active_mask=%Lx", + (unsigned long long)p->status, (unsigned long long)cpuc->pebs_enabled, + *(unsigned long long *)cpuc->active_mask)) continue; + /* * The PEBS hardware does not deal well with the situation * when events happen near to each other and multiple bits @@ -1172,27 +1224,21 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * one, and it's not possible to reconstruct all events * that caused the PEBS record. It's called collision. * If collision happened, the record will be dropped. - * */ - if (p->status != (1 << bit)) { - u64 pebs_status; - - /* slow path */ - pebs_status = p->status & cpuc->pebs_enabled; - pebs_status &= (1ULL << MAX_PEBS_EVENTS) - 1; - if (pebs_status != (1 << bit)) { - for_each_set_bit(i, (unsigned long *)&pebs_status, - MAX_PEBS_EVENTS) - error[i]++; - continue; - } + if (p->status != (1ULL << bit)) { + for_each_set_bit(i, (unsigned long *)&pebs_status, + x86_pmu.max_pebs_events) + error[i]++; + continue; } + counts[bit]++; } for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) { if ((counts[bit] == 0) && (error[bit] == 0)) continue; + event = cpuc->events[bit]; WARN_ON_ONCE(!event); WARN_ON_ONCE(!event->attr.precise_ip); @@ -1245,6 +1291,14 @@ void __init intel_ds_init(void) x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; break; + case 3: + pr_cont("PEBS fmt3%c, ", pebs_type); + x86_pmu.pebs_record_size = + sizeof(struct pebs_record_skl); + x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; + x86_pmu.free_running_flags |= PERF_SAMPLE_TIME; + break; + default: printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); x86_pmu.pebs = 0; diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 452a7bd2dedb..b2c9475b7ff2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -13,7 +13,8 @@ enum { LBR_FORMAT_EIP = 0x02, LBR_FORMAT_EIP_FLAGS = 0x03, LBR_FORMAT_EIP_FLAGS2 = 0x04, - LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_EIP_FLAGS2, + LBR_FORMAT_INFO = 0x05, + LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO, }; static enum { @@ -140,6 +141,13 @@ static void __intel_pmu_lbr_enable(bool pmi) u64 debugctl, lbr_select = 0, orig_debugctl; /* + * No need to unfreeze manually, as v4 can do that as part + * of the GLOBAL_STATUS ack. + */ + if (pmi && x86_pmu.version >= 4) + return; + + /* * No need to reprogram LBR_SELECT in a PMI, as it * did not change. */ @@ -186,6 +194,8 @@ static void intel_pmu_lbr_reset_64(void) for (i = 0; i < x86_pmu.lbr_nr; i++) { wrmsrl(x86_pmu.lbr_from + i, 0); wrmsrl(x86_pmu.lbr_to + i, 0); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + wrmsrl(MSR_LBR_INFO_0 + i, 0); } } @@ -230,10 +240,12 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = intel_pmu_lbr_tos(); - for (i = 0; i < x86_pmu.lbr_nr; i++) { + for (i = 0; i < tos; i++) { lbr_idx = (tos - i) & mask; wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } task_ctx->lbr_stack_state = LBR_NONE; } @@ -251,10 +263,12 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) mask = x86_pmu.lbr_nr - 1; tos = intel_pmu_lbr_tos(); - for (i = 0; i < x86_pmu.lbr_nr; i++) { + for (i = 0; i < tos; i++) { lbr_idx = (tos - i) & mask; rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) + rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); } task_ctx->lbr_stack_state = LBR_VALID; } @@ -411,16 +425,31 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) u64 tos = intel_pmu_lbr_tos(); int i; int out = 0; + int num = x86_pmu.lbr_nr; - for (i = 0; i < x86_pmu.lbr_nr; i++) { + if (cpuc->lbr_sel->config & LBR_CALL_STACK) + num = tos; + + for (i = 0; i < num; i++) { unsigned long lbr_idx = (tos - i) & mask; u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0; int skip = 0; + u16 cycles = 0; int lbr_flags = lbr_desc[lbr_format]; rdmsrl(x86_pmu.lbr_from + lbr_idx, from); rdmsrl(x86_pmu.lbr_to + lbr_idx, to); + if (lbr_format == LBR_FORMAT_INFO) { + u64 info; + + rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info); + mis = !!(info & LBR_INFO_MISPRED); + pred = !mis; + in_tx = !!(info & LBR_INFO_IN_TX); + abort = !!(info & LBR_INFO_ABORT); + cycles = (info & LBR_INFO_CYCLES); + } if (lbr_flags & LBR_EIP_FLAGS) { mis = !!(from & LBR_FROM_FLAG_MISPRED); pred = !mis; @@ -450,6 +479,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) cpuc->lbr_entries[out].predicted = pred; cpuc->lbr_entries[out].in_tx = in_tx; cpuc->lbr_entries[out].abort = abort; + cpuc->lbr_entries[out].cycles = cycles; cpuc->lbr_entries[out].reserved = 0; out++; } @@ -947,6 +977,26 @@ void intel_pmu_lbr_init_hsw(void) pr_cont("16-deep LBR, "); } +/* skylake */ +__init void intel_pmu_lbr_init_skl(void) +{ + x86_pmu.lbr_nr = 32; + x86_pmu.lbr_tos = MSR_LBR_TOS; + x86_pmu.lbr_from = MSR_LBR_NHM_FROM; + x86_pmu.lbr_to = MSR_LBR_NHM_TO; + + x86_pmu.lbr_sel_mask = LBR_SEL_MASK; + x86_pmu.lbr_sel_map = hsw_lbr_sel_map; + + /* + * SW branch filter usage: + * - support syscall, sysret capture. + * That requires LBR_FAR but that means far + * jmp need to be filtered out + */ + pr_cont("32-deep LBR, "); +} + /* atom */ void __init intel_pmu_lbr_init_atom(void) { diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c index 183de719628d..42169283448b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c @@ -65,15 +65,21 @@ static struct pt_cap_desc { } pt_caps[] = { PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff), PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)), + PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)), + PT_CAP(mtc, 0, CR_EBX, BIT(3)), PT_CAP(topa_output, 0, CR_ECX, BIT(0)), PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)), + PT_CAP(single_range_output, 0, CR_ECX, BIT(2)), PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)), + PT_CAP(mtc_periods, 1, CR_EAX, 0xffff0000), + PT_CAP(cycle_thresholds, 1, CR_EBX, 0xffff), + PT_CAP(psb_periods, 1, CR_EBX, 0xffff0000), }; static u32 pt_cap_get(enum pt_capabilities cap) { struct pt_cap_desc *cd = &pt_caps[cap]; - u32 c = pt_pmu.caps[cd->leaf * 4 + cd->reg]; + u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; unsigned int shift = __ffs(cd->mask); return (c & cd->mask) >> shift; @@ -94,12 +100,22 @@ static struct attribute_group pt_cap_group = { .name = "caps", }; +PMU_FORMAT_ATTR(cyc, "config:1" ); +PMU_FORMAT_ATTR(mtc, "config:9" ); PMU_FORMAT_ATTR(tsc, "config:10" ); PMU_FORMAT_ATTR(noretcomp, "config:11" ); +PMU_FORMAT_ATTR(mtc_period, "config:14-17" ); +PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" ); +PMU_FORMAT_ATTR(psb_period, "config:24-27" ); static struct attribute *pt_formats_attr[] = { + &format_attr_cyc.attr, + &format_attr_mtc.attr, &format_attr_tsc.attr, &format_attr_noretcomp.attr, + &format_attr_mtc_period.attr, + &format_attr_cyc_thresh.attr, + &format_attr_psb_period.attr, NULL, }; @@ -129,10 +145,10 @@ static int __init pt_pmu_hw_init(void) for (i = 0; i < PT_CPUID_LEAVES; i++) { cpuid_count(20, i, - &pt_pmu.caps[CR_EAX + i*4], - &pt_pmu.caps[CR_EBX + i*4], - &pt_pmu.caps[CR_ECX + i*4], - &pt_pmu.caps[CR_EDX + i*4]); + &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM], + &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM], + &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM], + &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]); } ret = -ENOMEM; @@ -170,15 +186,65 @@ fail: return ret; } -#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC) +#define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \ + RTIT_CTL_CYC_THRESH | \ + RTIT_CTL_PSB_FREQ) + +#define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \ + RTIT_CTL_MTC_RANGE) + +#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \ + RTIT_CTL_DISRETC | \ + RTIT_CTL_CYC_PSB | \ + RTIT_CTL_MTC) static bool pt_event_valid(struct perf_event *event) { u64 config = event->attr.config; + u64 allowed, requested; if ((config & PT_CONFIG_MASK) != config) return false; + if (config & RTIT_CTL_CYC_PSB) { + if (!pt_cap_get(PT_CAP_psb_cyc)) + return false; + + allowed = pt_cap_get(PT_CAP_psb_periods); + requested = (config & RTIT_CTL_PSB_FREQ) >> + RTIT_CTL_PSB_FREQ_OFFSET; + if (requested && (!(allowed & BIT(requested)))) + return false; + + allowed = pt_cap_get(PT_CAP_cycle_thresholds); + requested = (config & RTIT_CTL_CYC_THRESH) >> + RTIT_CTL_CYC_THRESH_OFFSET; + if (requested && (!(allowed & BIT(requested)))) + return false; + } + + if (config & RTIT_CTL_MTC) { + /* + * In the unlikely case that CPUID lists valid mtc periods, + * but not the mtc capability, drop out here. + * + * Spec says that setting mtc period bits while mtc bit in + * CPUID is 0 will #GP, so better safe than sorry. + */ + if (!pt_cap_get(PT_CAP_mtc)) + return false; + + allowed = pt_cap_get(PT_CAP_mtc_periods); + if (!allowed) + return false; + + requested = (config & RTIT_CTL_MTC_RANGE) >> + RTIT_CTL_MTC_RANGE_OFFSET; + + if (!(allowed & BIT(requested))) + return false; + } + return true; } @@ -191,6 +257,11 @@ static void pt_config(struct perf_event *event) { u64 reg; + if (!event->hw.itrace_started) { + event->hw.itrace_started = 1; + wrmsrl(MSR_IA32_RTIT_STATUS, 0); + } + reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN; if (!event->attr.exclude_kernel) @@ -910,7 +981,6 @@ void intel_pt_interrupt(void) pt_config_buffer(buf->cur->table, buf->cur_idx, buf->output_off); - wrmsrl(MSR_IA32_RTIT_STATUS, 0); pt_config(event); } } @@ -934,7 +1004,6 @@ static void pt_event_start(struct perf_event *event, int mode) pt_config_buffer(buf->cur->table, buf->cur_idx, buf->output_off); - wrmsrl(MSR_IA32_RTIT_STATUS, 0); pt_config(event); } diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 5cbd4e64feb5..81431c0f0614 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -86,6 +86,10 @@ static const char *rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { 1<<RAPL_IDX_RAM_NRG_STAT|\ 1<<RAPL_IDX_PP1_NRG_STAT) +/* Knights Landing has PKG, RAM */ +#define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\ + 1<<RAPL_IDX_RAM_NRG_STAT) + /* * event code: LSB 8 bits, passed in attr->config * any other bit is reserved @@ -486,6 +490,18 @@ static struct attribute *rapl_events_hsw_attr[] = { NULL, }; +static struct attribute *rapl_events_knl_attr[] = { + EVENT_PTR(rapl_pkg), + EVENT_PTR(rapl_ram), + + EVENT_PTR(rapl_pkg_unit), + EVENT_PTR(rapl_ram_unit), + + EVENT_PTR(rapl_pkg_scale), + EVENT_PTR(rapl_ram_scale), + NULL, +}; + static struct attribute_group rapl_pmu_events_group = { .name = "events", .attrs = NULL, /* patched at runtime */ @@ -730,6 +746,10 @@ static int __init rapl_pmu_init(void) rapl_cntr_mask = RAPL_IDX_SRV; rapl_pmu_events_group.attrs = rapl_events_srv_attr; break; + case 87: /* Knights Landing */ + rapl_add_quirk(rapl_hsw_server_quirk); + rapl_cntr_mask = RAPL_IDX_KNL; + rapl_pmu_events_group.attrs = rapl_events_knl_attr; default: /* unsupported */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 21b5e38c921b..560e5255b15e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -911,6 +911,9 @@ static int __init uncore_pci_init(void) case 63: /* Haswell-EP */ ret = hswep_uncore_pci_init(); break; + case 86: /* BDX-DE */ + ret = bdx_uncore_pci_init(); + break; case 42: /* Sandy Bridge */ ret = snb_uncore_pci_init(); break; @@ -1209,6 +1212,11 @@ static int __init uncore_cpu_init(void) break; case 42: /* Sandy Bridge */ case 58: /* Ivy Bridge */ + case 60: /* Haswell */ + case 69: /* Haswell */ + case 70: /* Haswell */ + case 61: /* Broadwell */ + case 71: /* Broadwell */ snb_uncore_cpu_init(); break; case 45: /* Sandy Bridge-EP */ @@ -1224,6 +1232,9 @@ static int __init uncore_cpu_init(void) case 63: /* Haswell-EP */ hswep_uncore_cpu_init(); break; + case 86: /* BDX-DE */ + bdx_uncore_cpu_init(); + break; default: return 0; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 0f77f0a196e4..72c54c2e5b1a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -336,6 +336,8 @@ int ivbep_uncore_pci_init(void); void ivbep_uncore_cpu_init(void); int hswep_uncore_pci_init(void); void hswep_uncore_cpu_init(void); +int bdx_uncore_pci_init(void); +void bdx_uncore_cpu_init(void); /* perf_event_intel_uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c index b005a78c7012..f78574b3cb55 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c @@ -45,6 +45,11 @@ #define SNB_UNC_CBO_0_PER_CTR0 0x706 #define SNB_UNC_CBO_MSR_OFFSET 0x10 +/* SNB ARB register */ +#define SNB_UNC_ARB_PER_CTR0 0x3b0 +#define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 +#define SNB_UNC_ARB_MSR_OFFSET 0x10 + /* NHM global control register */ #define NHM_UNC_PERF_GLOBAL_CTL 0x391 #define NHM_UNC_FIXED_CTR 0x394 @@ -115,7 +120,7 @@ static struct intel_uncore_ops snb_uncore_msr_ops = { .read_counter = uncore_msr_read_counter, }; -static struct event_constraint snb_uncore_cbox_constraints[] = { +static struct event_constraint snb_uncore_arb_constraints[] = { UNCORE_EVENT_CONSTRAINT(0x80, 0x1), UNCORE_EVENT_CONSTRAINT(0x83, 0x1), EVENT_CONSTRAINT_END @@ -134,14 +139,28 @@ static struct intel_uncore_type snb_uncore_cbox = { .single_fixed = 1, .event_mask = SNB_UNC_RAW_EVENT_MASK, .msr_offset = SNB_UNC_CBO_MSR_OFFSET, - .constraints = snb_uncore_cbox_constraints, .ops = &snb_uncore_msr_ops, .format_group = &snb_uncore_format_group, .event_descs = snb_uncore_events, }; +static struct intel_uncore_type snb_uncore_arb = { + .name = "arb", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 44, + .perf_ctr = SNB_UNC_ARB_PER_CTR0, + .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .msr_offset = SNB_UNC_ARB_MSR_OFFSET, + .constraints = snb_uncore_arb_constraints, + .ops = &snb_uncore_msr_ops, + .format_group = &snb_uncore_format_group, +}; + static struct intel_uncore_type *snb_msr_uncores[] = { &snb_uncore_cbox, + &snb_uncore_arb, NULL, }; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index 6d6e85dd5849..694510a887dc 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -2215,7 +2215,7 @@ static struct intel_uncore_type *hswep_pci_uncores[] = { NULL, }; -static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = { +static const struct pci_device_id hswep_uncore_pci_ids[] = { { /* Home Agent 0 */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30), .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0), @@ -2321,3 +2321,167 @@ int hswep_uncore_pci_init(void) return 0; } /* end of Haswell-EP uncore support */ + +/* BDX-DE uncore support */ + +static struct intel_uncore_type bdx_uncore_ubox = { + .name = "ubox", + .num_counters = 2, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = HSWEP_U_MSR_PMON_CTR0, + .event_ctl = HSWEP_U_MSR_PMON_CTL0, + .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, + .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, + .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, + .num_shared_regs = 1, + .ops = &ivbep_uncore_msr_ops, + .format_group = &ivbep_uncore_ubox_format_group, +}; + +static struct event_constraint bdx_uncore_cbox_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x09, 0x3), + UNCORE_EVENT_CONSTRAINT(0x11, 0x1), + UNCORE_EVENT_CONSTRAINT(0x36, 0x1), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type bdx_uncore_cbox = { + .name = "cbox", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = HSWEP_C0_MSR_PMON_CTL0, + .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, + .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, + .msr_offset = HSWEP_CBO_MSR_OFFSET, + .num_shared_regs = 1, + .constraints = bdx_uncore_cbox_constraints, + .ops = &hswep_uncore_cbox_ops, + .format_group = &hswep_uncore_cbox_format_group, +}; + +static struct intel_uncore_type *bdx_msr_uncores[] = { + &bdx_uncore_ubox, + &bdx_uncore_cbox, + &hswep_uncore_pcu, + NULL, +}; + +void bdx_uncore_cpu_init(void) +{ + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) + bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; + uncore_msr_uncores = bdx_msr_uncores; +} + +static struct intel_uncore_type bdx_uncore_ha = { + .name = "ha", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type bdx_uncore_imc = { + .name = "imc", + .num_counters = 5, + .num_boxes = 2, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, + .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, + .event_descs = hswep_uncore_imc_events, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +static struct intel_uncore_type bdx_uncore_irp = { + .name = "irp", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .ops = &hswep_uncore_irp_ops, + .format_group = &snbep_uncore_format_group, +}; + + +static struct event_constraint bdx_uncore_r2pcie_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x10, 0x3), + UNCORE_EVENT_CONSTRAINT(0x11, 0x3), + UNCORE_EVENT_CONSTRAINT(0x13, 0x1), + UNCORE_EVENT_CONSTRAINT(0x23, 0x1), + UNCORE_EVENT_CONSTRAINT(0x25, 0x1), + UNCORE_EVENT_CONSTRAINT(0x26, 0x3), + UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type bdx_uncore_r2pcie = { + .name = "r2pcie", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .constraints = bdx_uncore_r2pcie_constraints, + SNBEP_UNCORE_PCI_COMMON_INIT(), +}; + +enum { + BDX_PCI_UNCORE_HA, + BDX_PCI_UNCORE_IMC, + BDX_PCI_UNCORE_IRP, + BDX_PCI_UNCORE_R2PCIE, +}; + +static struct intel_uncore_type *bdx_pci_uncores[] = { + [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha, + [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc, + [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp, + [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie, + NULL, +}; + +static DEFINE_PCI_DEVICE_TABLE(bdx_uncore_pci_ids) = { + { /* Home Agent 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0), + }, + { /* MC0 Channel 0 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0), + }, + { /* MC0 Channel 1 */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1), + }, + { /* IRP */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0), + }, + { /* R2PCIe */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34), + .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0), + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver bdx_uncore_pci_driver = { + .name = "bdx_uncore", + .id_table = bdx_uncore_pci_ids, +}; + +int bdx_uncore_pci_init(void) +{ + int ret = snbep_pci2phy_map_init(0x6f1e); + + if (ret) + return ret; + uncore_pci_uncores = bdx_pci_uncores; + uncore_pci_driver = &bdx_uncore_pci_driver; + return 0; +} + +/* end of BDX-DE uncore support */ diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c new file mode 100644 index 000000000000..086b12eae794 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_msr.c @@ -0,0 +1,242 @@ +#include <linux/perf_event.h> + +enum perf_msr_id { + PERF_MSR_TSC = 0, + PERF_MSR_APERF = 1, + PERF_MSR_MPERF = 2, + PERF_MSR_PPERF = 3, + PERF_MSR_SMI = 4, + + PERF_MSR_EVENT_MAX, +}; + +bool test_aperfmperf(int idx) +{ + return boot_cpu_has(X86_FEATURE_APERFMPERF); +} + +bool test_intel(int idx) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || + boot_cpu_data.x86 != 6) + return false; + + switch (boot_cpu_data.x86_model) { + case 30: /* 45nm Nehalem */ + case 26: /* 45nm Nehalem-EP */ + case 46: /* 45nm Nehalem-EX */ + + case 37: /* 32nm Westmere */ + case 44: /* 32nm Westmere-EP */ + case 47: /* 32nm Westmere-EX */ + + case 42: /* 32nm SandyBridge */ + case 45: /* 32nm SandyBridge-E/EN/EP */ + + case 58: /* 22nm IvyBridge */ + case 62: /* 22nm IvyBridge-EP/EX */ + + case 60: /* 22nm Haswell Core */ + case 63: /* 22nm Haswell Server */ + case 69: /* 22nm Haswell ULT */ + case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ + + case 61: /* 14nm Broadwell Core-M */ + case 86: /* 14nm Broadwell Xeon D */ + case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */ + case 79: /* 14nm Broadwell Server */ + + case 55: /* 22nm Atom "Silvermont" */ + case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ + case 76: /* 14nm Atom "Airmont" */ + if (idx == PERF_MSR_SMI) + return true; + break; + + case 78: /* 14nm Skylake Mobile */ + case 94: /* 14nm Skylake Desktop */ + if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) + return true; + break; + } + + return false; +} + +struct perf_msr { + u64 msr; + struct perf_pmu_events_attr *attr; + bool (*test)(int idx); +}; + +PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00"); +PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01"); +PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02"); +PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03"); +PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04"); + +static struct perf_msr msr[] = { + [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, }, + [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, }, + [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, }, + [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, }, + [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, }, +}; + +static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = { + NULL, +}; + +static struct attribute_group events_attr_group = { + .name = "events", + .attrs = events_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); +static struct attribute *format_attrs[] = { + &format_attr_event.attr, + NULL, +}; +static struct attribute_group format_attr_group = { + .name = "format", + .attrs = format_attrs, +}; + +static const struct attribute_group *attr_groups[] = { + &events_attr_group, + &format_attr_group, + NULL, +}; + +static int msr_event_init(struct perf_event *event) +{ + u64 cfg = event->attr.config; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (cfg >= PERF_MSR_EVENT_MAX) + return -EINVAL; + + /* unsupported modes and filters */ + if (event->attr.exclude_user || + event->attr.exclude_kernel || + event->attr.exclude_hv || + event->attr.exclude_idle || + event->attr.exclude_host || + event->attr.exclude_guest || + event->attr.sample_period) /* no sampling */ + return -EINVAL; + + if (!msr[cfg].attr) + return -EINVAL; + + event->hw.idx = -1; + event->hw.event_base = msr[cfg].msr; + event->hw.config = cfg; + + return 0; +} + +static inline u64 msr_read_counter(struct perf_event *event) +{ + u64 now; + + if (event->hw.event_base) + rdmsrl(event->hw.event_base, now); + else + rdtscll(now); + + return now; +} +static void msr_event_update(struct perf_event *event) +{ + u64 prev, now; + s64 delta; + + /* Careful, an NMI might modify the previous event value. */ +again: + prev = local64_read(&event->hw.prev_count); + now = msr_read_counter(event); + + if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev) + goto again; + + delta = now - prev; + if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) { + delta <<= 32; + delta >>= 32; /* sign extend */ + } + local64_add(now - prev, &event->count); +} + +static void msr_event_start(struct perf_event *event, int flags) +{ + u64 now; + + now = msr_read_counter(event); + local64_set(&event->hw.prev_count, now); +} + +static void msr_event_stop(struct perf_event *event, int flags) +{ + msr_event_update(event); +} + +static void msr_event_del(struct perf_event *event, int flags) +{ + msr_event_stop(event, PERF_EF_UPDATE); +} + +static int msr_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + msr_event_start(event, flags); + + return 0; +} + +static struct pmu pmu_msr = { + .task_ctx_nr = perf_sw_context, + .attr_groups = attr_groups, + .event_init = msr_event_init, + .add = msr_event_add, + .del = msr_event_del, + .start = msr_event_start, + .stop = msr_event_stop, + .read = msr_event_update, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT, +}; + +static int __init msr_init(void) +{ + int i, j = 0; + + if (!boot_cpu_has(X86_FEATURE_TSC)) { + pr_cont("no MSR PMU driver.\n"); + return 0; + } + + /* Probe the MSRs. */ + for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) { + u64 val; + + /* + * Virt sucks arse; you cannot tell if a R/O MSR is present :/ + */ + if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val)) + msr[i].attr = NULL; + } + + /* List remaining MSRs in the sysfs attrs. */ + for (i = 0; i < PERF_MSR_EVENT_MAX; i++) { + if (msr[i].attr) + events_attrs[j++] = &msr[i].attr->attr.attr; + } + events_attrs[j] = NULL; + + perf_pmu_register(&pmu_msr, "msr", -1); + + return 0; +} +device_initcall(msr_init); diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 83741a71558f..bd3507da39f0 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, return notifier_from_errno(err); } -static struct notifier_block __refdata cpuid_class_cpu_notifier = +static struct notifier_block cpuid_class_cpu_notifier = { .notifier_call = cpuid_class_cpu_callback, }; diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index ce95676abd60..4d38416e2a7f 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -110,7 +110,7 @@ static void init_espfix_random(void) */ if (!arch_get_random_long(&rand)) { /* The constant is an arbitrary large prime */ - rdtscll(rand); + rand = rdtsc(); rand *= 0xc345c6b72fd16123UL; } diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 79de954626fd..d25097c3fc1d 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) dst_fpu->fpregs_active = 0; dst_fpu->last_cpu = -1; - if (src_fpu->fpstate_active) + if (src_fpu->fpstate_active && cpu_has_fpu) fpu_copy(dst_fpu, src_fpu); return 0; diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 32826791e675..d14e9ac3235a 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -4,6 +4,8 @@ #include <asm/fpu/internal.h> #include <asm/tlbflush.h> +#include <linux/sched.h> + /* * Initialize the TS bit in CR0 according to the style of context-switches * we are using: @@ -38,7 +40,12 @@ static void fpu__init_cpu_generic(void) write_cr0(cr0); /* Flush out any pending x87 state: */ - asm volatile ("fninit"); +#ifdef CONFIG_MATH_EMULATION + if (!cpu_has_fpu) + fpstate_init_soft(¤t->thread.fpu.state.soft); + else +#endif + asm volatile ("fninit"); } /* @@ -136,6 +143,43 @@ static void __init fpu__init_system_generic(void) unsigned int xstate_size; EXPORT_SYMBOL_GPL(xstate_size); +/* Enforce that 'MEMBER' is the last field of 'TYPE': */ +#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \ + BUILD_BUG_ON(sizeof(TYPE) != offsetofend(TYPE, MEMBER)) + +/* + * We append the 'struct fpu' to the task_struct: + */ +static void __init fpu__init_task_struct_size(void) +{ + int task_size = sizeof(struct task_struct); + + /* + * Subtract off the static size of the register state. + * It potentially has a bunch of padding. + */ + task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state); + + /* + * Add back the dynamically-calculated register state + * size. + */ + task_size += xstate_size; + + /* + * We dynamically size 'struct fpu', so we require that + * it be at the end of 'thread_struct' and that + * 'thread_struct' be at the end of 'task_struct'. If + * you hit a compile error here, check the structure to + * see if something got added to the end. + */ + CHECK_MEMBER_AT_END_OF(struct fpu, state); + CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu); + CHECK_MEMBER_AT_END_OF(struct task_struct, thread); + + arch_task_struct_size = task_size; +} + /* * Set up the xstate_size based on the legacy FPU context size. * @@ -287,6 +331,7 @@ void __init fpu__init_system(struct cpuinfo_x86 *c) fpu__init_system_generic(); fpu__init_system_xstate_size_legacy(); fpu__init_system_xstate(); + fpu__init_task_struct_size(); fpu__init_system_ctx_switch(); } @@ -311,9 +356,15 @@ static int __init x86_noxsave_setup(char *s) setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + setup_clear_cpu_cap(X86_FEATURE_XSAVEC); setup_clear_cpu_cap(X86_FEATURE_XSAVES); setup_clear_cpu_cap(X86_FEATURE_AVX); setup_clear_cpu_cap(X86_FEATURE_AVX2); + setup_clear_cpu_cap(X86_FEATURE_AVX512F); + setup_clear_cpu_cap(X86_FEATURE_AVX512PF); + setup_clear_cpu_cap(X86_FEATURE_AVX512ER); + setup_clear_cpu_cap(X86_FEATURE_AVX512CD); + setup_clear_cpu_cap(X86_FEATURE_MPX); return 1; } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 10757d0a3fcf..f75c5908c7a6 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -735,7 +735,7 @@ static int hpet_clocksource_register(void) /* Verify whether hpet counter works */ t1 = hpet_readl(HPET_COUNTER); - rdtscll(start); + start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for @@ -745,7 +745,7 @@ static int hpet_clocksource_register(void) */ do { rep_nop(); - rdtscll(now); + now = rdtsc(); } while ((now - start) < 200000UL); if (t1 == hpet_readl(HPET_COUNTER)) { diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 7114ba220fd4..50a3fad5b89f 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -32,6 +32,7 @@ #include <linux/irqflags.h> #include <linux/notifier.h> #include <linux/kallsyms.h> +#include <linux/kprobes.h> #include <linux/percpu.h> #include <linux/kdebug.h> #include <linux/kernel.h> @@ -179,7 +180,11 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp) va = info->address; len = bp->attr.bp_len; - return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); + /* + * We don't need to worry about va + len - 1 overflowing: + * we already require that va is aligned to a multiple of len. + */ + return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX); } int arch_bp_generic_fields(int x86_len, int x86_type, @@ -243,6 +248,20 @@ static int arch_build_bp_info(struct perf_event *bp) info->type = X86_BREAKPOINT_RW; break; case HW_BREAKPOINT_X: + /* + * We don't allow kernel breakpoints in places that are not + * acceptable for kprobes. On non-kprobes kernels, we don't + * allow kernel breakpoints at all. + */ + if (bp->attr.bp_addr >= TASK_SIZE_MAX) { +#ifdef CONFIG_KPROBES + if (within_kprobe_blacklist(bp->attr.bp_addr)) + return -EINVAL; +#else + return -EINVAL; +#endif + } + info->type = X86_BREAKPOINT_EXECUTE; /* * x86 inst breakpoints need to have a specific undefined len. @@ -276,8 +295,18 @@ static int arch_build_bp_info(struct perf_event *bp) break; #endif default: + /* AMD range breakpoint */ if (!is_power_of_2(bp->attr.bp_len)) return -EINVAL; + if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) + return -EINVAL; + /* + * It's impossible to use a range breakpoint to fake out + * user vs kernel detection because bp_len - 1 can't + * have the high bit set. If we ever allow range instruction + * breakpoints, then we'll have to check for kprobe-blacklisted + * addresses anywhere in the range. + */ if (!cpu_has_bpext) return -EOPNOTSUPP; info->mask = bp->attr.bp_len - 1; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index c7dfe1be784e..ae00b355114d 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -139,10 +139,13 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_puts(p, " Machine check polls\n"); #endif #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) - seq_printf(p, "%*s: ", prec, "HYP"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count); - seq_puts(p, " Hypervisor callback interrupts\n"); + if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) { + seq_printf(p, "%*s: ", prec, "HYP"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + irq_stats(j)->irq_hv_callback_count); + seq_puts(p, " Hypervisor callback interrupts\n"); + } #endif seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) @@ -216,8 +219,23 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) unsigned vector = ~regs->orig_ax; unsigned irq; + /* + * NB: Unlike exception entries, IRQ entries do not reliably + * handle context tracking in the low-level entry code. This is + * because syscall entries execute briefly with IRQs on before + * updating context tracking state, so we can take an IRQ from + * kernel mode with CONTEXT_USER. The low-level entry code only + * updates the context if we came from user mode, so we won't + * switch to CONTEXT_KERNEL. We'll fix that once the syscall + * code is cleaned up enough that we can cleanly defer enabling + * IRQs. + */ + entering_irq(); + /* entering_irq() tells RCU that we're not quiescent. Check it. */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); + irq = __this_cpu_read(vector_irq[vector]); if (!handle_irq(irq, regs)) { diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index ca83f7ac388b..961e51e9c6f6 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -223,9 +223,6 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, memset(¶ms->hd0_info, 0, sizeof(params->hd0_info)); memset(¶ms->hd1_info, 0, sizeof(params->hd1_info)); - /* Default sysdesc table */ - params->sys_desc_table.length = 0; - if (image->type == KEXEC_TYPE_CRASH) { ret = crash_setup_memmap_entries(image, params); if (ret) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index c37886d759cc..2bcc0525f1c1 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> @@ -20,82 +21,82 @@ #include <asm/mmu_context.h> #include <asm/syscalls.h> -#ifdef CONFIG_SMP +/* context.lock is held for us, so we don't need any locking. */ static void flush_ldt(void *current_mm) { - if (current->active_mm == current_mm) - load_LDT(¤t->active_mm->context); + mm_context_t *pc; + + if (current->active_mm != current_mm) + return; + + pc = ¤t->active_mm->context; + set_ldt(pc->ldt->entries, pc->ldt->size); } -#endif -static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ +static struct ldt_struct *alloc_ldt_struct(int size) { - void *oldldt, *newldt; - int oldsize; - - if (mincount <= pc->size) - return 0; - oldsize = pc->size; - mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & - (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); - if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) - newldt = vmalloc(mincount * LDT_ENTRY_SIZE); + struct ldt_struct *new_ldt; + int alloc_size; + + if (size > LDT_ENTRIES) + return NULL; + + new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); + if (!new_ldt) + return NULL; + + BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); + alloc_size = size * LDT_ENTRY_SIZE; + + /* + * Xen is very picky: it requires a page-aligned LDT that has no + * trailing nonzero bytes in any page that contains LDT descriptors. + * Keep it simple: zero the whole allocation and never allocate less + * than PAGE_SIZE. + */ + if (alloc_size > PAGE_SIZE) + new_ldt->entries = vzalloc(alloc_size); else - newldt = (void *)__get_free_page(GFP_KERNEL); - - if (!newldt) - return -ENOMEM; + new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (oldsize) - memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); - oldldt = pc->ldt; - memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, - (mincount - oldsize) * LDT_ENTRY_SIZE); + if (!new_ldt->entries) { + kfree(new_ldt); + return NULL; + } - paravirt_alloc_ldt(newldt, mincount); + new_ldt->size = size; + return new_ldt; +} -#ifdef CONFIG_X86_64 - /* CHECKME: Do we really need this ? */ - wmb(); -#endif - pc->ldt = newldt; - wmb(); - pc->size = mincount; - wmb(); - - if (reload) { -#ifdef CONFIG_SMP - preempt_disable(); - load_LDT(pc); - if (!cpumask_equal(mm_cpumask(current->mm), - cpumask_of(smp_processor_id()))) - smp_call_function(flush_ldt, current->mm, 1); - preempt_enable(); -#else - load_LDT(pc); -#endif - } - if (oldsize) { - paravirt_free_ldt(oldldt, oldsize); - if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(oldldt); - else - put_page(virt_to_page(oldldt)); - } - return 0; +/* After calling this, the LDT is immutable. */ +static void finalize_ldt_struct(struct ldt_struct *ldt) +{ + paravirt_alloc_ldt(ldt->entries, ldt->size); } -static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +/* context.lock is held */ +static void install_ldt(struct mm_struct *current_mm, + struct ldt_struct *ldt) { - int err = alloc_ldt(new, old->size, 0); - int i; + /* Synchronizes with lockless_dereference in load_mm_ldt. */ + smp_store_release(¤t_mm->context.ldt, ldt); + + /* Activate the LDT for all CPUs using current_mm. */ + on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); +} - if (err < 0) - return err; +static void free_ldt_struct(struct ldt_struct *ldt) +{ + if (likely(!ldt)) + return; - for (i = 0; i < old->size; i++) - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); - return 0; + paravirt_free_ldt(ldt->entries, ldt->size); + if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(ldt->entries); + else + kfree(ldt->entries); + kfree(ldt); } /* @@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + struct ldt_struct *new_ldt; struct mm_struct *old_mm; int retval = 0; mutex_init(&mm->context.lock); - mm->context.size = 0; old_mm = current->mm; - if (old_mm && old_mm->context.size > 0) { - mutex_lock(&old_mm->context.lock); - retval = copy_ldt(&mm->context, &old_mm->context); - mutex_unlock(&old_mm->context.lock); + if (!old_mm) { + mm->context.ldt = NULL; + return 0; } + + mutex_lock(&old_mm->context.lock); + if (!old_mm->context.ldt) { + mm->context.ldt = NULL; + goto out_unlock; + } + + new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); + if (!new_ldt) { + retval = -ENOMEM; + goto out_unlock; + } + + memcpy(new_ldt->entries, old_mm->context.ldt->entries, + new_ldt->size * LDT_ENTRY_SIZE); + finalize_ldt_struct(new_ldt); + + mm->context.ldt = new_ldt; + +out_unlock: + mutex_unlock(&old_mm->context.lock); return retval; } @@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) */ void destroy_context(struct mm_struct *mm) { - if (mm->context.size) { -#ifdef CONFIG_X86_32 - /* CHECKME: Can this ever happen ? */ - if (mm == current->active_mm) - clear_LDT(); -#endif - paravirt_free_ldt(mm->context.ldt, mm->context.size); - if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(mm->context.ldt); - else - put_page(virt_to_page(mm->context.ldt)); - mm->context.size = 0; - } + free_ldt_struct(mm->context.ldt); + mm->context.ldt = NULL; } static int read_ldt(void __user *ptr, unsigned long bytecount) { - int err; + int retval; unsigned long size; struct mm_struct *mm = current->mm; - if (!mm->context.size) - return 0; + mutex_lock(&mm->context.lock); + + if (!mm->context.ldt) { + retval = 0; + goto out_unlock; + } + if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; - mutex_lock(&mm->context.lock); - size = mm->context.size * LDT_ENTRY_SIZE; + size = mm->context.ldt->size * LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; - err = 0; - if (copy_to_user(ptr, mm->context.ldt, size)) - err = -EFAULT; - mutex_unlock(&mm->context.lock); - if (err < 0) - goto error_return; + if (copy_to_user(ptr, mm->context.ldt->entries, size)) { + retval = -EFAULT; + goto out_unlock; + } + if (size != bytecount) { - /* zero-fill the rest */ - if (clear_user(ptr + size, bytecount - size) != 0) { - err = -EFAULT; - goto error_return; + /* Zero-fill the rest and pretend we read bytecount bytes. */ + if (clear_user(ptr + size, bytecount - size)) { + retval = -EFAULT; + goto out_unlock; } } - return bytecount; -error_return: - return err; + retval = bytecount; + +out_unlock: + mutex_unlock(&mm->context.lock); + return retval; } static int read_default_ldt(void __user *ptr, unsigned long bytecount) @@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) struct desc_struct ldt; int error; struct user_desc ldt_info; + int oldsize, newsize; + struct ldt_struct *new_ldt, *old_ldt; error = -EINVAL; if (bytecount != sizeof(ldt_info)) @@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) goto out; } - mutex_lock(&mm->context.lock); - if (ldt_info.entry_number >= mm->context.size) { - error = alloc_ldt(¤t->mm->context, - ldt_info.entry_number + 1, 1); - if (error < 0) - goto out_unlock; - } - - /* Allow LDTs to be cleared by the user. */ - if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { - if (oldmode || LDT_empty(&ldt_info)) { - memset(&ldt, 0, sizeof(ldt)); - goto install; + if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || + LDT_empty(&ldt_info)) { + /* The user wants to clear the entry. */ + memset(&ldt, 0, sizeof(ldt)); + } else { + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { + error = -EINVAL; + goto out; } + + fill_ldt(&ldt, &ldt_info); + if (oldmode) + ldt.avl = 0; } - if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { - error = -EINVAL; + mutex_lock(&mm->context.lock); + + old_ldt = mm->context.ldt; + oldsize = old_ldt ? old_ldt->size : 0; + newsize = max((int)(ldt_info.entry_number + 1), oldsize); + + error = -ENOMEM; + new_ldt = alloc_ldt_struct(newsize); + if (!new_ldt) goto out_unlock; - } - fill_ldt(&ldt, &ldt_info); - if (oldmode) - ldt.avl = 0; + if (old_ldt) + memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); + new_ldt->entries[ldt_info.entry_number] = ldt; + finalize_ldt_struct(new_ldt); - /* Install the new entry ... */ -install: - write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt); + install_ldt(mm, new_ldt); + free_ldt_struct(old_ldt); error = 0; out_unlock: diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index c3e985d1751c..697f90db0e37 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -110,7 +110,7 @@ static void nmi_max_handler(struct irq_work *w) a->handler, whole_msecs, decimal_msecs); } -static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) +static int nmi_handle(unsigned int type, struct pt_regs *regs) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *a; @@ -213,7 +213,7 @@ static void pci_serr_error(unsigned char reason, struct pt_regs *regs) { /* check to see if anyone registered against these types of errors */ - if (nmi_handle(NMI_SERR, regs, false)) + if (nmi_handle(NMI_SERR, regs)) return; pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", @@ -247,7 +247,7 @@ io_check_error(unsigned char reason, struct pt_regs *regs) unsigned long i; /* check to see if anyone registered against these types of errors */ - if (nmi_handle(NMI_IO_CHECK, regs, false)) + if (nmi_handle(NMI_IO_CHECK, regs)) return; pr_emerg( @@ -284,7 +284,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) * as only the first one is ever run (unless it can actually determine * if it caused the NMI) */ - handled = nmi_handle(NMI_UNKNOWN, regs, false); + handled = nmi_handle(NMI_UNKNOWN, regs); if (handled) { __this_cpu_add(nmi_stats.unknown, handled); return; @@ -332,7 +332,7 @@ static void default_do_nmi(struct pt_regs *regs) __this_cpu_write(last_nmi_rip, regs->ip); - handled = nmi_handle(NMI_LOCAL, regs, b2b); + handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { /* @@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs) NOKPROBE_SYMBOL(default_do_nmi); /* - * NMIs can hit breakpoints which will cause it to lose its - * NMI context with the CPU when the breakpoint does an iret. - */ -#ifdef CONFIG_X86_32 -/* - * For i386, NMIs use the same stack as the kernel, and we can - * add a workaround to the iret problem in C (preventing nested - * NMIs if an NMI takes a trap). Simply have 3 states the NMI - * can be in: + * NMIs can page fault or hit breakpoints which will cause it to lose + * its NMI context with the CPU when the breakpoint or page fault does an IRET. + * + * As a result, NMIs can nest if NMIs get unmasked due an IRET during + * NMI processing. On x86_64, the asm glue protects us from nested NMIs + * if the outer NMI came from kernel mode, but we can still nest if the + * outer NMI came from user mode. + * + * To handle these nested NMIs, we have three states: * * 1) not running * 2) executing @@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi); * (Note, the latch is binary, thus multiple NMIs triggering, * when one is running, are ignored. Only one NMI is restarted.) * - * If an NMI hits a breakpoint that executes an iret, another - * NMI can preempt it. We do not want to allow this new NMI - * to run, but we want to execute it when the first one finishes. - * We set the state to "latched", and the exit of the first NMI will - * perform a dec_return, if the result is zero (NOT_RUNNING), then - * it will simply exit the NMI handler. If not, the dec_return - * would have set the state to NMI_EXECUTING (what we want it to - * be when we are running). In this case, we simply jump back - * to rerun the NMI handler again, and restart the 'latched' NMI. + * If an NMI executes an iret, another NMI can preempt it. We do not + * want to allow this new NMI to run, but we want to execute it when the + * first one finishes. We set the state to "latched", and the exit of + * the first NMI will perform a dec_return, if the result is zero + * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the + * dec_return would have set the state to NMI_EXECUTING (what we want it + * to be when we are running). In this case, we simply jump back to + * rerun the NMI handler again, and restart the 'latched' NMI. * * No trap (breakpoint or page fault) should be hit before nmi_restart, * thus there is no race between the first check of state for NOT_RUNNING @@ -461,49 +460,36 @@ enum nmi_states { static DEFINE_PER_CPU(enum nmi_states, nmi_state); static DEFINE_PER_CPU(unsigned long, nmi_cr2); -#define nmi_nesting_preprocess(regs) \ - do { \ - if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \ - this_cpu_write(nmi_state, NMI_LATCHED); \ - return; \ - } \ - this_cpu_write(nmi_state, NMI_EXECUTING); \ - this_cpu_write(nmi_cr2, read_cr2()); \ - } while (0); \ - nmi_restart: - -#define nmi_nesting_postprocess() \ - do { \ - if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \ - write_cr2(this_cpu_read(nmi_cr2)); \ - if (this_cpu_dec_return(nmi_state)) \ - goto nmi_restart; \ - } while (0) -#else /* x86_64 */ +#ifdef CONFIG_X86_64 /* - * In x86_64 things are a bit more difficult. This has the same problem - * where an NMI hitting a breakpoint that calls iret will remove the - * NMI context, allowing a nested NMI to enter. What makes this more - * difficult is that both NMIs and breakpoints have their own stack. - * When a new NMI or breakpoint is executed, the stack is set to a fixed - * point. If an NMI is nested, it will have its stack set at that same - * fixed address that the first NMI had, and will start corrupting the - * stack. This is handled in entry_64.S, but the same problem exists with - * the breakpoint stack. + * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without + * some care, the inner breakpoint will clobber the outer breakpoint's + * stack. * - * If a breakpoint is being processed, and the debug stack is being used, - * if an NMI comes in and also hits a breakpoint, the stack pointer - * will be set to the same fixed address as the breakpoint that was - * interrupted, causing that stack to be corrupted. To handle this case, - * check if the stack that was interrupted is the debug stack, and if - * so, change the IDT so that new breakpoints will use the current stack - * and not switch to the fixed address. On return of the NMI, switch back - * to the original IDT. + * If a breakpoint is being processed, and the debug stack is being + * used, if an NMI comes in and also hits a breakpoint, the stack + * pointer will be set to the same fixed address as the breakpoint that + * was interrupted, causing that stack to be corrupted. To handle this + * case, check if the stack that was interrupted is the debug stack, and + * if so, change the IDT so that new breakpoints will use the current + * stack and not switch to the fixed address. On return of the NMI, + * switch back to the original IDT. */ static DEFINE_PER_CPU(int, update_debug_stack); +#endif -static inline void nmi_nesting_preprocess(struct pt_regs *regs) +dotraplinkage notrace void +do_nmi(struct pt_regs *regs, long error_code) { + if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { + this_cpu_write(nmi_state, NMI_LATCHED); + return; + } + this_cpu_write(nmi_state, NMI_EXECUTING); + this_cpu_write(nmi_cr2, read_cr2()); +nmi_restart: + +#ifdef CONFIG_X86_64 /* * If we interrupted a breakpoint, it is possible that * the nmi handler will have breakpoints too. We need to @@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs) debug_stack_set_zero(); this_cpu_write(update_debug_stack, 1); } -} - -static inline void nmi_nesting_postprocess(void) -{ - if (unlikely(this_cpu_read(update_debug_stack))) { - debug_stack_reset(); - this_cpu_write(update_debug_stack, 0); - } -} #endif -dotraplinkage notrace void -do_nmi(struct pt_regs *regs, long error_code) -{ - nmi_nesting_preprocess(regs); - nmi_enter(); inc_irq_stat(__nmi_count); @@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code) nmi_exit(); - /* On i386, may loop back to preprocess */ - nmi_nesting_postprocess(); +#ifdef CONFIG_X86_64 + if (unlikely(this_cpu_read(update_debug_stack))) { + debug_stack_reset(); + this_cpu_write(update_debug_stack, 0); + } +#endif + + if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) + write_cr2(this_cpu_read(nmi_cr2)); + if (this_cpu_dec_return(nmi_state)) + goto nmi_restart; } NOKPROBE_SYMBOL(do_nmi); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 58bcfb67c01f..f68e48f5f6c2 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -351,9 +351,7 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, .write_msr = native_write_msr_safe, - .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, - .read_tscp = native_read_tscp, .load_tr_desc = native_load_tr_desc, .set_ldt = native_set_ldt, .load_gdt = native_load_gdt, diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index e1b013696dde..c89f50a76e97 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -10,7 +10,6 @@ DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); DEF_NATIVE(pv_cpu_ops, clts, "clts"); -DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)"); @@ -52,7 +51,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); PATCH_SITE(pv_cpu_ops, clts); - PATCH_SITE(pv_cpu_ops, read_tsc); #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): if (pv_is_native_spin_unlock()) { diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 9cad694ed7c4..6d0e62ae8516 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -29,6 +29,8 @@ #include <asm/debugreg.h> #include <asm/nmi.h> #include <asm/tlbflush.h> +#include <asm/mce.h> +#include <asm/vm86.h> /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -81,7 +83,7 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister); */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - *dst = *src; + memcpy(dst, src, arch_task_struct_size); return fpu__copy(&dst->thread.fpu, &src->thread.fpu); } @@ -110,6 +112,8 @@ void exit_thread(void) kfree(bp); } + free_vm86(t); + fpu__drop(fpu); } @@ -319,6 +323,7 @@ void stop_this_cpu(void *dummy) */ set_cpu_online(smp_processor_id(), false); disable_local_APIC(); + mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); for (;;) halt(); @@ -408,6 +413,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) static void mwait_idle(void) { if (!current_set_polling_and_test()) { + trace_cpu_idle_rcuidle(1, smp_processor_id()); if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { smp_mb(); /* quirk */ clflush((void *)¤t_thread_info()->flags); @@ -419,6 +425,7 @@ static void mwait_idle(void) __sti_mwait(0, 0); else local_irq_enable(); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } else { local_irq_enable(); } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index f73c962fe636..c13df2c735f8 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -53,6 +53,7 @@ #include <asm/syscalls.h> #include <asm/debugreg.h> #include <asm/switch_to.h> +#include <asm/vm86.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71d7849a07f7..3c1bbcf12924 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -121,13 +121,15 @@ void __show_regs(struct pt_regs *regs, int all) void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { - if (dead_task->mm->context.size) { +#ifdef CONFIG_MODIFY_LDT_SYSCALL + if (dead_task->mm->context.ldt) { pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", dead_task->comm, dead_task->mm->context.ldt, - dead_task->mm->context.size); + dead_task->mm->context.ldt->size); BUG(); } +#endif } } @@ -248,8 +250,8 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) __USER_CS, __USER_DS, 0); } -#ifdef CONFIG_IA32_EMULATION -void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) +#ifdef CONFIG_COMPAT +void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) { start_thread_common(regs, new_ip, new_sp, test_thread_flag(TIF_X32) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 9be72bc3613f..558f50edebca 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -37,12 +37,10 @@ #include <asm/proto.h> #include <asm/hw_breakpoint.h> #include <asm/traps.h> +#include <asm/syscall.h> #include "tls.h" -#define CREATE_TRACE_POINTS -#include <trace/events/syscalls.h> - enum x86_regset { REGSET_GENERAL, REGSET_FP, @@ -1123,6 +1121,73 @@ static int genregs32_set(struct task_struct *target, return ret; } +static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) +{ + unsigned long addr = caddr; + unsigned long data = cdata; + void __user *datap = compat_ptr(data); + int ret; + __u32 val; + + switch (request) { + case PTRACE_PEEKUSR: + ret = getreg32(child, addr, &val); + if (ret == 0) + ret = put_user(val, (__u32 __user *)datap); + break; + + case PTRACE_POKEUSR: + ret = putreg32(child, addr, data); + break; + + case PTRACE_GETREGS: /* Get all gp regs from the child. */ + return copy_regset_to_user(child, &user_x86_32_view, + REGSET_GENERAL, + 0, sizeof(struct user_regs_struct32), + datap); + + case PTRACE_SETREGS: /* Set all gp regs in the child. */ + return copy_regset_from_user(child, &user_x86_32_view, + REGSET_GENERAL, 0, + sizeof(struct user_regs_struct32), + datap); + + case PTRACE_GETFPREGS: /* Get the child FPU state. */ + return copy_regset_to_user(child, &user_x86_32_view, + REGSET_FP, 0, + sizeof(struct user_i387_ia32_struct), + datap); + + case PTRACE_SETFPREGS: /* Set the child FPU state. */ + return copy_regset_from_user( + child, &user_x86_32_view, REGSET_FP, + 0, sizeof(struct user_i387_ia32_struct), datap); + + case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ + return copy_regset_to_user(child, &user_x86_32_view, + REGSET_XFP, 0, + sizeof(struct user32_fxsr_struct), + datap); + + case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ + return copy_regset_from_user(child, &user_x86_32_view, + REGSET_XFP, 0, + sizeof(struct user32_fxsr_struct), + datap); + + case PTRACE_GET_THREAD_AREA: + case PTRACE_SET_THREAD_AREA: + return arch_ptrace(child, request, addr, data); + + default: + return compat_ptrace_request(child, request, addr, data); + } + + return ret; +} +#endif /* CONFIG_IA32_EMULATION */ + #ifdef CONFIG_X86_X32_ABI static long x32_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, @@ -1211,78 +1276,21 @@ static long x32_arch_ptrace(struct task_struct *child, } #endif +#ifdef CONFIG_COMPAT long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { - unsigned long addr = caddr; - unsigned long data = cdata; - void __user *datap = compat_ptr(data); - int ret; - __u32 val; - #ifdef CONFIG_X86_X32_ABI if (!is_ia32_task()) return x32_arch_ptrace(child, request, caddr, cdata); #endif - - switch (request) { - case PTRACE_PEEKUSR: - ret = getreg32(child, addr, &val); - if (ret == 0) - ret = put_user(val, (__u32 __user *)datap); - break; - - case PTRACE_POKEUSR: - ret = putreg32(child, addr, data); - break; - - case PTRACE_GETREGS: /* Get all gp regs from the child. */ - return copy_regset_to_user(child, &user_x86_32_view, - REGSET_GENERAL, - 0, sizeof(struct user_regs_struct32), - datap); - - case PTRACE_SETREGS: /* Set all gp regs in the child. */ - return copy_regset_from_user(child, &user_x86_32_view, - REGSET_GENERAL, 0, - sizeof(struct user_regs_struct32), - datap); - - case PTRACE_GETFPREGS: /* Get the child FPU state. */ - return copy_regset_to_user(child, &user_x86_32_view, - REGSET_FP, 0, - sizeof(struct user_i387_ia32_struct), - datap); - - case PTRACE_SETFPREGS: /* Set the child FPU state. */ - return copy_regset_from_user( - child, &user_x86_32_view, REGSET_FP, - 0, sizeof(struct user_i387_ia32_struct), datap); - - case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ - return copy_regset_to_user(child, &user_x86_32_view, - REGSET_XFP, 0, - sizeof(struct user32_fxsr_struct), - datap); - - case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ - return copy_regset_from_user(child, &user_x86_32_view, - REGSET_XFP, 0, - sizeof(struct user32_fxsr_struct), - datap); - - case PTRACE_GET_THREAD_AREA: - case PTRACE_SET_THREAD_AREA: - return arch_ptrace(child, request, addr, data); - - default: - return compat_ptrace_request(child, request, addr, data); - } - - return ret; +#ifdef CONFIG_IA32_EMULATION + return ia32_arch_ptrace(child, request, caddr, cdata); +#else + return 0; +#endif } - -#endif /* CONFIG_IA32_EMULATION */ +#endif /* CONFIG_COMPAT */ #ifdef CONFIG_X86_64 @@ -1434,201 +1442,3 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, /* Send us the fake SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); } - -static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) -{ -#ifdef CONFIG_X86_64 - if (arch == AUDIT_ARCH_X86_64) { - audit_syscall_entry(regs->orig_ax, regs->di, - regs->si, regs->dx, regs->r10); - } else -#endif - { - audit_syscall_entry(regs->orig_ax, regs->bx, - regs->cx, regs->dx, regs->si); - } -} - -/* - * We can return 0 to resume the syscall or anything else to go to phase - * 2. If we resume the syscall, we need to put something appropriate in - * regs->orig_ax. - * - * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax - * are fully functional. - * - * For phase 2's benefit, our return value is: - * 0: resume the syscall - * 1: go to phase 2; no seccomp phase 2 needed - * anything else: go to phase 2; pass return value to seccomp - */ -unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) -{ - unsigned long ret = 0; - u32 work; - - BUG_ON(regs != task_pt_regs(current)); - - work = ACCESS_ONCE(current_thread_info()->flags) & - _TIF_WORK_SYSCALL_ENTRY; - - /* - * If TIF_NOHZ is set, we are required to call user_exit() before - * doing anything that could touch RCU. - */ - if (work & _TIF_NOHZ) { - user_exit(); - work &= ~_TIF_NOHZ; - } - -#ifdef CONFIG_SECCOMP - /* - * Do seccomp first -- it should minimize exposure of other - * code, and keeping seccomp fast is probably more valuable - * than the rest of this. - */ - if (work & _TIF_SECCOMP) { - struct seccomp_data sd; - - sd.arch = arch; - sd.nr = regs->orig_ax; - sd.instruction_pointer = regs->ip; -#ifdef CONFIG_X86_64 - if (arch == AUDIT_ARCH_X86_64) { - sd.args[0] = regs->di; - sd.args[1] = regs->si; - sd.args[2] = regs->dx; - sd.args[3] = regs->r10; - sd.args[4] = regs->r8; - sd.args[5] = regs->r9; - } else -#endif - { - sd.args[0] = regs->bx; - sd.args[1] = regs->cx; - sd.args[2] = regs->dx; - sd.args[3] = regs->si; - sd.args[4] = regs->di; - sd.args[5] = regs->bp; - } - - BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0); - BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1); - - ret = seccomp_phase1(&sd); - if (ret == SECCOMP_PHASE1_SKIP) { - regs->orig_ax = -1; - ret = 0; - } else if (ret != SECCOMP_PHASE1_OK) { - return ret; /* Go directly to phase 2 */ - } - - work &= ~_TIF_SECCOMP; - } -#endif - - /* Do our best to finish without phase 2. */ - if (work == 0) - return ret; /* seccomp and/or nohz only (ret == 0 here) */ - -#ifdef CONFIG_AUDITSYSCALL - if (work == _TIF_SYSCALL_AUDIT) { - /* - * If there is no more work to be done except auditing, - * then audit in phase 1. Phase 2 always audits, so, if - * we audit here, then we can't go on to phase 2. - */ - do_audit_syscall_entry(regs, arch); - return 0; - } -#endif - - return 1; /* Something is enabled that we can't handle in phase 1 */ -} - -/* Returns the syscall nr to run (which should match regs->orig_ax). */ -long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, - unsigned long phase1_result) -{ - long ret = 0; - u32 work = ACCESS_ONCE(current_thread_info()->flags) & - _TIF_WORK_SYSCALL_ENTRY; - - BUG_ON(regs != task_pt_regs(current)); - - /* - * If we stepped into a sysenter/syscall insn, it trapped in - * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. - * If user-mode had set TF itself, then it's still clear from - * do_debug() and we need to set it again to restore the user - * state. If we entered on the slow path, TF was already set. - */ - if (work & _TIF_SINGLESTEP) - regs->flags |= X86_EFLAGS_TF; - -#ifdef CONFIG_SECCOMP - /* - * Call seccomp_phase2 before running the other hooks so that - * they can see any changes made by a seccomp tracer. - */ - if (phase1_result > 1 && seccomp_phase2(phase1_result)) { - /* seccomp failures shouldn't expose any additional code. */ - return -1; - } -#endif - - if (unlikely(work & _TIF_SYSCALL_EMU)) - ret = -1L; - - if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && - tracehook_report_syscall_entry(regs)) - ret = -1L; - - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_enter(regs, regs->orig_ax); - - do_audit_syscall_entry(regs, arch); - - return ret ?: regs->orig_ax; -} - -long syscall_trace_enter(struct pt_regs *regs) -{ - u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; - unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch); - - if (phase1_result == 0) - return regs->orig_ax; - else - return syscall_trace_enter_phase2(regs, arch, phase1_result); -} - -void syscall_trace_leave(struct pt_regs *regs) -{ - bool step; - - /* - * We may come here right after calling schedule_user() - * or do_notify_resume(), in which case we can be in RCU - * user mode. - */ - user_exit(); - - audit_syscall_exit(regs); - - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_exit(regs, regs->ax); - - /* - * If TIF_SYSCALL_EMU is set, we only get here because of - * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). - * We already reported this syscall instruction in - * syscall_trace_enter(). - */ - step = unlikely(test_thread_flag(TIF_SINGLESTEP)) && - !test_thread_flag(TIF_SYSCALL_EMU); - if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); - - user_enter(); -} diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 80f874bf999e..b143c2d04420 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -916,11 +916,6 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_32 apm_info.bios = boot_params.apm_bios_info; ist_info = boot_params.ist_info; - if (boot_params.sys_desc_table.length != 0) { - machine_id = boot_params.sys_desc_table.table[0]; - machine_submodel_id = boot_params.sys_desc_table.table[1]; - BIOS_revision = boot_params.sys_desc_table.table[2]; - } #endif saved_video_mode = boot_params.hdr.vid_mode; bootloader_type = boot_params.hdr.type_of_loader; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 206996c1669d..da52e6bb5c7f 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -31,11 +31,11 @@ #include <asm/vdso.h> #include <asm/mce.h> #include <asm/sighandling.h> +#include <asm/vm86.h> #ifdef CONFIG_X86_64 #include <asm/proto.h> #include <asm/ia32_unistd.h> -#include <asm/sys_ia32.h> #endif /* CONFIG_X86_64 */ #include <asm/syscall.h> @@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) COPY(r15); #endif /* CONFIG_X86_64 */ +#ifdef CONFIG_X86_32 COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); +#else /* !CONFIG_X86_32 */ + /* Kernel saves and restores only the CS segment register on signals, + * which is the bare minimum needed to allow mixed 32/64-bit code. + * App's signal handler can save/restore other segments if needed. */ + COPY_SEG_CPL3(cs); +#endif /* CONFIG_X86_32 */ get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); @@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, #else /* !CONFIG_X86_32 */ put_user_ex(regs->flags, &sc->flags); put_user_ex(regs->cs, &sc->cs); - put_user_ex(0, &sc->__pad2); - put_user_ex(0, &sc->__pad1); - put_user_ex(regs->ss, &sc->ss); + put_user_ex(0, &sc->gs); + put_user_ex(0, &sc->fs); #endif /* CONFIG_X86_32 */ put_user_ex(fpstate, &sc->fpstate); @@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, regs->sp = (unsigned long)frame; - /* - * Set up the CS and SS registers to run signal handlers in - * 64-bit mode, even if the handler happens to be interrupting - * 32-bit or 16-bit code. - * - * SS is subtle. In 64-bit mode, we don't need any particular - * SS descriptor, but we do need SS to be valid. It's possible - * that the old SS is entirely bogus -- this can happen if the - * signal we're trying to deliver is #GP or #SS caused by a bad - * SS value. - */ + /* Set up the CS register to run signal handlers in 64-bit mode, + even if the handler happens to be interrupting 32-bit code. */ regs->cs = __USER_CS; - regs->ss = __USER_DS; return 0; } @@ -636,6 +632,9 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) bool stepping, failed; struct fpu *fpu = ¤t->thread.fpu; + if (v8086_mode(regs)) + save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); + /* Are we from a system call? */ if (syscall_get_nr(current, regs) >= 0) { /* If so, check system call restarting.. */ @@ -701,7 +700,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -static void do_signal(struct pt_regs *regs) +void do_signal(struct pt_regs *regs) { struct ksignal ksig; @@ -736,32 +735,6 @@ static void do_signal(struct pt_regs *regs) restore_saved_sigmask(); } -/* - * notification of userspace execution resumption - * - triggered by the TIF_WORK_MASK flags - */ -__visible void -do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) -{ - user_exit(); - - if (thread_info_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - - /* deal with pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) - do_signal(regs); - - if (thread_info_flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } - if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) - fire_user_return_notifiers(); - - user_enter(); -} - void signal_fault(struct pt_regs *regs, void __user *frame, char *where) { struct task_struct *me = current; diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c new file mode 100644 index 000000000000..dc3c0b1c816f --- /dev/null +++ b/arch/x86/kernel/signal_compat.c @@ -0,0 +1,95 @@ +#include <linux/compat.h> +#include <linux/uaccess.h> + +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) +{ + int err = 0; + bool ia32 = test_thread_flag(TIF_IA32); + + if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) + return -EFAULT; + + put_user_try { + /* If you change siginfo_t structure, please make sure that + this code is fixed accordingly. + It should never copy any pad contained in the structure + to avoid security leaks, but must copy the generic + 3 ints plus the relevant union member. */ + put_user_ex(from->si_signo, &to->si_signo); + put_user_ex(from->si_errno, &to->si_errno); + put_user_ex((short)from->si_code, &to->si_code); + + if (from->si_code < 0) { + put_user_ex(from->si_pid, &to->si_pid); + put_user_ex(from->si_uid, &to->si_uid); + put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); + } else { + /* + * First 32bits of unions are always present: + * si_pid === si_band === si_tid === si_addr(LS half) + */ + put_user_ex(from->_sifields._pad[0], + &to->_sifields._pad[0]); + switch (from->si_code >> 16) { + case __SI_FAULT >> 16: + break; + case __SI_SYS >> 16: + put_user_ex(from->si_syscall, &to->si_syscall); + put_user_ex(from->si_arch, &to->si_arch); + break; + case __SI_CHLD >> 16: + if (ia32) { + put_user_ex(from->si_utime, &to->si_utime); + put_user_ex(from->si_stime, &to->si_stime); + } else { + put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime); + put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime); + } + put_user_ex(from->si_status, &to->si_status); + /* FALL THROUGH */ + default: + case __SI_KILL >> 16: + put_user_ex(from->si_uid, &to->si_uid); + break; + case __SI_POLL >> 16: + put_user_ex(from->si_fd, &to->si_fd); + break; + case __SI_TIMER >> 16: + put_user_ex(from->si_overrun, &to->si_overrun); + put_user_ex(ptr_to_compat(from->si_ptr), + &to->si_ptr); + break; + /* This is not generated by the kernel as of now. */ + case __SI_RT >> 16: + case __SI_MESGQ >> 16: + put_user_ex(from->si_uid, &to->si_uid); + put_user_ex(from->si_int, &to->si_int); + break; + } + } + } put_user_catch(err); + + return err; +} + +int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) +{ + int err = 0; + u32 ptr32; + + if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) + return -EFAULT; + + get_user_try { + get_user_ex(to->si_signo, &from->si_signo); + get_user_ex(to->si_errno, &from->si_errno); + get_user_ex(to->si_code, &from->si_code); + + get_user_ex(to->si_pid, &from->si_pid); + get_user_ex(to->si_uid, &from->si_uid); + get_user_ex(ptr32, &from->si_ptr); + to->si_ptr = compat_ptr(ptr32); + } get_user_catch(err); + + return err; +} diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 15aaa69bbb5e..12c8286206ce 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -30,6 +30,7 @@ #include <asm/proto.h> #include <asm/apic.h> #include <asm/nmi.h> +#include <asm/mce.h> #include <asm/trace/irq_vectors.h> /* * Some notes on x86 processor bugs affecting SMP operation: @@ -243,6 +244,7 @@ static void native_stop_other_cpus(int wait) finish: local_irq_save(flags); disable_local_APIC(); + mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); local_irq_restore(flags); } diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index d3010aa79daf..e0c198e5f920 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -97,8 +97,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); -atomic_t init_deasserted; - static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) { unsigned long flags; @@ -146,16 +144,11 @@ static void smp_callin(void) /* * If waken up by an INIT in an 82489DX configuration - * we may get here before an INIT-deassert IPI reaches - * our local APIC. We have to wait for the IPI or we'll - * lock up on an APIC access. - * - * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI. + * cpu_callout_mask guarantees we don't get here before + * an INIT_deassert IPI reaches our local APIC, so it is + * now safe to touch our local APIC. */ cpuid = smp_processor_id(); - if (apic->wait_for_init_deassert && cpuid) - while (!atomic_read(&init_deasserted)) - cpu_relax(); /* * (This works even if the APIC is not enabled.) @@ -620,7 +613,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) send_status = safe_apic_wait_icr_idle(); mb(); - atomic_set(&init_deasserted, 1); /* * Should we send STARTUP IPIs ? @@ -665,7 +657,8 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Give the other CPU some time to accept the IPI. */ - udelay(300); + if (init_udelay) + udelay(300); pr_debug("Startup point 1\n"); @@ -675,7 +668,8 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Give the other CPU some time to accept the IPI. */ - udelay(200); + if (init_udelay) + udelay(200); if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ apic_write(APIC_ESR, 0); @@ -859,8 +853,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) * the targeted processor. */ - atomic_set(&init_deasserted, 0); - if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { pr_debug("Setting warm reset code and vector.\n"); @@ -898,7 +890,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) if (!boot_error) { /* - * Wait 10s total for a response from AP + * Wait 10s total for first sign of life from AP */ boot_error = -1; timeout = jiffies + 10*HZ; @@ -911,7 +903,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) boot_error = 0; break; } - udelay(100); schedule(); } } @@ -927,7 +918,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) * for the MTRR work(triggered by the AP coming online) * to be completed in the stop machine context. */ - udelay(100); schedule(); } } @@ -992,8 +982,17 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) common_cpu_up(cpu, tidle); + /* + * We have to walk the irq descriptors to setup the vector + * space for the cpu which comes online. Prevent irq + * alloc/free across the bringup. + */ + irq_lock_sparse(); + err = do_boot_cpu(apicid, cpu, tidle); + if (err) { + irq_unlock_sparse(); pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); return -EIO; } @@ -1011,6 +1010,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) touch_nmi_watchdog(); } + irq_unlock_sparse(); + return 0; } @@ -1347,7 +1348,7 @@ static void remove_siblinginfo(int cpu) cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); } -static void __ref remove_cpu_from_maps(int cpu) +static void remove_cpu_from_maps(int cpu) { set_cpu_online(cpu, false); cpumask_clear_cpu(cpu, cpu_callout_mask); diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 9b4d51d0c0d0..c9a073866ca7 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -5,6 +5,7 @@ #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> +#include <asm/mmu_context.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { @@ -17,6 +18,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re return addr; } +#ifdef CONFIG_MODIFY_LDT_SYSCALL /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the @@ -27,13 +29,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re struct desc_struct *desc; unsigned long base; - seg &= ~7UL; + seg >>= 3; mutex_lock(&child->mm->context.lock); - if (unlikely((seg >> 3) >= child->mm->context.size)) + if (unlikely(!child->mm->context.ldt || + seg >= child->mm->context.ldt->size)) addr = -1L; /* bogus selector, access would fault */ else { - desc = child->mm->context.ldt + seg; + desc = &child->mm->context.ldt->entries[seg]; base = get_desc_base(desc); /* 16-bit code segment? */ @@ -43,6 +46,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re } mutex_unlock(&child->mm->context.lock); } +#endif return addr; } diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index 649b010da00b..12cbe2b88c0f 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -57,7 +57,7 @@ __setup("cpu0_hotplug", enable_cpu0_hotplug); * * This is only called for debugging CPU offline/online feature. */ -int __ref _debug_hotplug_cpu(int cpu, int action) +int _debug_hotplug_cpu(int cpu, int action) { struct device *dev = get_cpu_device(cpu); int ret; @@ -104,7 +104,7 @@ static int __init debug_hotplug_cpu(void) late_initcall_sync(debug_hotplug_cpu); #endif /* CONFIG_DEBUG_HOTPLUG_CPU0 */ -int __ref arch_register_cpu(int num) +int arch_register_cpu(int num) { struct cpuinfo_x86 *c = &cpu_data(num); diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c index 25b993729f9b..80bb24d9b880 100644 --- a/arch/x86/kernel/trace_clock.c +++ b/arch/x86/kernel/trace_clock.c @@ -12,10 +12,5 @@ */ u64 notrace trace_clock_x86_tsc(void) { - u64 ret; - - rdtsc_barrier(); - rdtscll(ret); - - return ret; + return rdtsc_ordered(); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index f5791927aa64..346eec73f7db 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -62,6 +62,7 @@ #include <asm/fpu/xstate.h> #include <asm/trace/mpx.h> #include <asm/mpx.h> +#include <asm/vm86.h> #ifdef CONFIG_X86_64 #include <asm/x86_init.h> @@ -108,13 +109,10 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) preempt_count_dec(); } -enum ctx_state ist_enter(struct pt_regs *regs) +void ist_enter(struct pt_regs *regs) { - enum ctx_state prev_state; - if (user_mode(regs)) { - /* Other than that, we're just an exception. */ - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); } else { /* * We might have interrupted pretty much anything. In @@ -123,32 +121,25 @@ enum ctx_state ist_enter(struct pt_regs *regs) * but we need to notify RCU. */ rcu_nmi_enter(); - prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */ } /* - * We are atomic because we're on the IST stack (or we're on x86_32, - * in which case we still shouldn't schedule). - * - * This must be after exception_enter(), because exception_enter() - * won't do anything if in_interrupt() returns true. + * We are atomic because we're on the IST stack; or we're on + * x86_32, in which case we still shouldn't schedule; or we're + * on x86_64 and entered from user mode, in which case we're + * still atomic unless ist_begin_non_atomic is called. */ preempt_count_add(HARDIRQ_OFFSET); /* This code is a bit fragile. Test it. */ - rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work"); - - return prev_state; + RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); } -void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) +void ist_exit(struct pt_regs *regs) { - /* Must be before exception_exit. */ preempt_count_sub(HARDIRQ_OFFSET); - if (user_mode(regs)) - return exception_exit(prev_state); - else + if (!user_mode(regs)) rcu_nmi_exit(); } @@ -162,7 +153,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) * a double fault, it can be safe to schedule. ist_begin_non_atomic() * begins a non-atomic section within an ist_enter()/ist_exit() region. * Callers are responsible for enabling interrupts themselves inside - * the non-atomic section, and callers must call is_end_non_atomic() + * the non-atomic section, and callers must call ist_end_non_atomic() * before ist_exit(). */ void ist_begin_non_atomic(struct pt_regs *regs) @@ -289,17 +280,16 @@ NOKPROBE_SYMBOL(do_trap); static void do_error_trap(struct pt_regs *regs, long error_code, char *str, unsigned long trapnr, int signr) { - enum ctx_state prev_state = exception_enter(); siginfo_t info; + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != NOTIFY_STOP) { conditional_sti(regs); do_trap(trapnr, signr, str, regs, error_code, fill_trap_info(regs, signr, trapnr, &info)); } - - exception_exit(prev_state); } #define DO_ERROR(trapnr, signr, str, name) \ @@ -351,7 +341,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) } #endif - ist_enter(regs); /* Discard prev_state because we won't return. */ + ist_enter(regs); notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); tsk->thread.error_code = error_code; @@ -371,14 +361,13 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; const struct bndcsr *bndcsr; siginfo_t *info; - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); if (notify_die(DIE_TRAP, "bounds", regs, error_code, X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) - goto exit; + return; conditional_sti(regs); if (!user_mode(regs)) @@ -435,9 +424,8 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) die("bounds", regs, error_code); } -exit: - exception_exit(prev_state); return; + exit_trap: /* * This path out is for all the cases where we could not @@ -447,35 +435,33 @@ exit_trap: * time.. */ do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); - exception_exit(prev_state); } dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code) { struct task_struct *tsk; - enum ctx_state prev_state; - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); conditional_sti(regs); if (v8086_mode(regs)) { local_irq_enable(); handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); - goto exit; + return; } tsk = current; if (!user_mode(regs)) { if (fixup_exception(regs)) - goto exit; + return; tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; if (notify_die(DIE_GPF, "general protection fault", regs, error_code, X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) die("general protection fault", regs, error_code); - goto exit; + return; } tsk->thread.error_code = error_code; @@ -491,16 +477,12 @@ do_general_protection(struct pt_regs *regs, long error_code) } force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); -exit: - exception_exit(prev_state); } NOKPROBE_SYMBOL(do_general_protection); /* May run on IST stack. */ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; - #ifdef CONFIG_DYNAMIC_FTRACE /* * ftrace must be first, everything else may cause a recursive crash. @@ -513,7 +495,8 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) if (poke_int3_handler(regs)) return; - prev_state = ist_enter(regs); + ist_enter(regs); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -539,7 +522,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) preempt_conditional_cli(regs); debug_stack_usage_dec(); exit: - ist_exit(regs, prev_state); + ist_exit(regs); } NOKPROBE_SYMBOL(do_int3); @@ -615,12 +598,11 @@ NOKPROBE_SYMBOL(fixup_bad_iret); dotraplinkage void do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; - enum ctx_state prev_state; int user_icebp = 0; unsigned long dr6; int si_code; - prev_state = ist_enter(regs); + ist_enter(regs); get_debugreg(dr6, 6); @@ -695,7 +677,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_dec(); exit: - ist_exit(regs, prev_state); + ist_exit(regs); } NOKPROBE_SYMBOL(do_debug); @@ -747,21 +729,15 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; - - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); math_error(regs, error_code, X86_TRAP_MF); - exception_exit(prev_state); } dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; - - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); math_error(regs, error_code, X86_TRAP_XF); - exception_exit(prev_state); } dotraplinkage void @@ -773,9 +749,7 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code) { - enum ctx_state prev_state; - - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); BUG_ON(use_eager_fpu()); #ifdef CONFIG_MATH_EMULATION @@ -786,7 +760,6 @@ do_device_not_available(struct pt_regs *regs, long error_code) info.regs = regs; math_emulate(&info); - exception_exit(prev_state); return; } #endif @@ -794,7 +767,6 @@ do_device_not_available(struct pt_regs *regs, long error_code) #ifdef CONFIG_X86_32 conditional_sti(regs); #endif - exception_exit(prev_state); } NOKPROBE_SYMBOL(do_device_not_available); @@ -802,9 +774,8 @@ NOKPROBE_SYMBOL(do_device_not_available); dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) { siginfo_t info; - enum ctx_state prev_state; - prev_state = exception_enter(); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); local_irq_enable(); info.si_signo = SIGILL; @@ -816,7 +787,6 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, &info); } - exception_exit(prev_state); } #endif diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7437b41f6a47..79055cf2c497 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) data = cyc2ns_write_begin(cpu); - rdtscll(tsc_now); + tsc_now = rdtsc(); ns_now = cycles_2_ns(tsc_now); /* @@ -290,12 +290,20 @@ u64 native_sched_clock(void) } /* read the Time Stamp Counter: */ - rdtscll(tsc_now); + tsc_now = rdtsc(); /* return the value in ns */ return cycles_2_ns(tsc_now); } +/* + * Generate a sched_clock if you already have a TSC value. + */ +u64 native_sched_clock_from_tsc(u64 tsc) +{ + return cycles_2_ns(tsc); +} + /* We need to define a real function for sched_clock, to override the weak default version */ #ifdef CONFIG_PARAVIRT @@ -308,12 +316,6 @@ unsigned long long sched_clock(void) __attribute__((alias("native_sched_clock"))); #endif -unsigned long long native_read_tsc(void) -{ - return __native_read_tsc(); -} -EXPORT_SYMBOL(native_read_tsc); - int check_tsc_unstable(void) { return tsc_unstable; @@ -976,7 +978,7 @@ static struct clocksource clocksource_tsc; */ static cycle_t read_tsc(struct clocksource *cs) { - return (cycle_t)get_cycles(); + return (cycle_t)rdtsc_ordered(); } /* diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index dd8d0791dfb5..78083bf23ed1 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -39,16 +39,15 @@ static cycles_t max_warp; static int nr_warps; /* - * TSC-warp measurement loop running on both CPUs: + * TSC-warp measurement loop running on both CPUs. This is not called + * if there is no TSC. */ static void check_tsc_warp(unsigned int timeout) { cycles_t start, now, prev, end; int i; - rdtsc_barrier(); - start = get_cycles(); - rdtsc_barrier(); + start = rdtsc_ordered(); /* * The measurement runs for 'timeout' msecs: */ @@ -63,9 +62,7 @@ static void check_tsc_warp(unsigned int timeout) */ arch_spin_lock(&sync_lock); prev = last_tsc; - rdtsc_barrier(); - now = get_cycles(); - rdtsc_barrier(); + now = rdtsc_ordered(); last_tsc = now; arch_spin_unlock(&sync_lock); @@ -126,7 +123,7 @@ void check_tsc_sync_source(int cpu) /* * No need to check if we already know that the TSC is not - * synchronized: + * synchronized or if we have no TSC. */ if (unsynchronized_tsc()) return; @@ -190,6 +187,7 @@ void check_tsc_sync_target(void) { int cpus = 2; + /* Also aborts if there is no TSC. */ if (unsynchronized_tsc() || tsc_clocksource_reliable) return; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 66476244731e..bf4db6eaec8f 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -985,3 +985,12 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs return -1; } + +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */ + return regs->sp < ret->stack; + else + return regs->sp <= ret->stack; +} diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index fc9db6ef2a95..abd8b856bd2b 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -44,11 +44,14 @@ #include <linux/ptrace.h> #include <linux/audit.h> #include <linux/stddef.h> +#include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/irq.h> +#include <asm/traps.h> +#include <asm/vm86.h> /* * Known problems: @@ -66,10 +69,6 @@ */ -#define KVM86 ((struct kernel_vm86_struct *)regs) -#define VMPI KVM86->vm86plus - - /* * 8- and 16-bit register defines.. */ @@ -81,8 +80,8 @@ /* * virtual flags (16 and 32-bit versions) */ -#define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) -#define VEFLAGS (current->thread.v86flags) +#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags)) +#define VEFLAGS (current->thread.vm86->veflags) #define set_flags(X, new, mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) @@ -90,46 +89,13 @@ #define SAFE_MASK (0xDD5) #define RETURN_MASK (0xDFF) -/* convert kernel_vm86_regs to vm86_regs */ -static int copy_vm86_regs_to_user(struct vm86_regs __user *user, - const struct kernel_vm86_regs *regs) -{ - int ret = 0; - - /* - * kernel_vm86_regs is missing gs, so copy everything up to - * (but not including) orig_eax, and then rest including orig_eax. - */ - ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); - ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, - sizeof(struct kernel_vm86_regs) - - offsetof(struct kernel_vm86_regs, pt.orig_ax)); - - return ret; -} - -/* convert vm86_regs to kernel_vm86_regs */ -static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, - const struct vm86_regs __user *user, - unsigned extra) -{ - int ret = 0; - - /* copy ax-fs inclusive */ - ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); - /* copy orig_ax-__gsh+extra */ - ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax, - sizeof(struct kernel_vm86_regs) - - offsetof(struct kernel_vm86_regs, pt.orig_ax) + - extra); - return ret; -} - -struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) +void save_v86_state(struct kernel_vm86_regs *regs, int retval) { struct tss_struct *tss; - struct pt_regs *ret; - unsigned long tmp; + struct task_struct *tsk = current; + struct vm86plus_struct __user *user; + struct vm86 *vm86 = current->thread.vm86; + long err = 0; /* * This gets called from entry.S with interrupts disabled, but @@ -138,31 +104,57 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) */ local_irq_enable(); - if (!current->thread.vm86_info) { - pr_alert("no vm86_info: BAD\n"); + if (!vm86 || !vm86->user_vm86) { + pr_alert("no user_vm86: BAD\n"); do_exit(SIGSEGV); } - set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); - tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); - tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); - if (tmp) { - pr_alert("could not access userspace vm86_info\n"); + set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); + user = vm86->user_vm86; + + if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ? + sizeof(struct vm86plus_struct) : + sizeof(struct vm86_struct))) { + pr_alert("could not access userspace vm86 info\n"); + do_exit(SIGSEGV); + } + + put_user_try { + put_user_ex(regs->pt.bx, &user->regs.ebx); + put_user_ex(regs->pt.cx, &user->regs.ecx); + put_user_ex(regs->pt.dx, &user->regs.edx); + put_user_ex(regs->pt.si, &user->regs.esi); + put_user_ex(regs->pt.di, &user->regs.edi); + put_user_ex(regs->pt.bp, &user->regs.ebp); + put_user_ex(regs->pt.ax, &user->regs.eax); + put_user_ex(regs->pt.ip, &user->regs.eip); + put_user_ex(regs->pt.cs, &user->regs.cs); + put_user_ex(regs->pt.flags, &user->regs.eflags); + put_user_ex(regs->pt.sp, &user->regs.esp); + put_user_ex(regs->pt.ss, &user->regs.ss); + put_user_ex(regs->es, &user->regs.es); + put_user_ex(regs->ds, &user->regs.ds); + put_user_ex(regs->fs, &user->regs.fs); + put_user_ex(regs->gs, &user->regs.gs); + + put_user_ex(vm86->screen_bitmap, &user->screen_bitmap); + } put_user_catch(err); + if (err) { + pr_alert("could not access userspace vm86 info\n"); do_exit(SIGSEGV); } tss = &per_cpu(cpu_tss, get_cpu()); - current->thread.sp0 = current->thread.saved_sp0; - current->thread.sysenter_cs = __KERNEL_CS; - load_sp0(tss, ¤t->thread); - current->thread.saved_sp0 = 0; + tsk->thread.sp0 = vm86->saved_sp0; + tsk->thread.sysenter_cs = __KERNEL_CS; + load_sp0(tss, &tsk->thread); + vm86->saved_sp0 = 0; put_cpu(); - ret = KVM86->regs32; + memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs)); - ret->fs = current->thread.saved_fs; - set_user_gs(ret, current->thread.saved_gs); + lazy_load_gs(vm86->regs32.gs); - return ret; + regs->pt.ax = retval; } static void mark_screen_rdonly(struct mm_struct *mm) @@ -200,45 +192,16 @@ out: static int do_vm86_irq_handling(int subfunction, int irqnumber); -static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); +static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus); -SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86) +SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86) { - struct kernel_vm86_struct info; /* declare this _on top_, - * this avoids wasting of stack space. - * This remains on the stack until we - * return to 32 bit user space. - */ - struct task_struct *tsk = current; - int tmp; - - if (tsk->thread.saved_sp0) - return -EPERM; - tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, - offsetof(struct kernel_vm86_struct, vm86plus) - - sizeof(info.regs)); - if (tmp) - return -EFAULT; - memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); - info.regs32 = current_pt_regs(); - tsk->thread.vm86_info = v86; - do_sys_vm86(&info, tsk); - return 0; /* we never return here */ + return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false); } SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) { - struct kernel_vm86_struct info; /* declare this _on top_, - * this avoids wasting of stack space. - * This remains on the stack until we - * return to 32 bit user space. - */ - struct task_struct *tsk; - int tmp; - struct vm86plus_struct __user *v86; - - tsk = current; switch (cmd) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: @@ -256,114 +219,133 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ - if (tsk->thread.saved_sp0) - return -EPERM; - v86 = (struct vm86plus_struct __user *)arg; - tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, - offsetof(struct kernel_vm86_struct, regs32) - - sizeof(info.regs)); - if (tmp) - return -EFAULT; - info.regs32 = current_pt_regs(); - info.vm86plus.is_vm86pus = 1; - tsk->thread.vm86_info = (struct vm86_struct __user *)v86; - do_sys_vm86(&info, tsk); - return 0; /* we never return here */ + return do_sys_vm86((struct vm86plus_struct __user *) arg, true); } -static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) +static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) { struct tss_struct *tss; -/* - * make sure the vm86() system call doesn't try to do anything silly - */ - info->regs.pt.ds = 0; - info->regs.pt.es = 0; - info->regs.pt.fs = 0; -#ifndef CONFIG_X86_32_LAZY_GS - info->regs.pt.gs = 0; -#endif + struct task_struct *tsk = current; + struct vm86 *vm86 = tsk->thread.vm86; + struct kernel_vm86_regs vm86regs; + struct pt_regs *regs = current_pt_regs(); + unsigned long err = 0; + + if (!vm86) { + if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) + return -ENOMEM; + tsk->thread.vm86 = vm86; + } + if (vm86->saved_sp0) + return -EPERM; + + if (!access_ok(VERIFY_READ, user_vm86, plus ? + sizeof(struct vm86_struct) : + sizeof(struct vm86plus_struct))) + return -EFAULT; + + memset(&vm86regs, 0, sizeof(vm86regs)); + get_user_try { + unsigned short seg; + get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx); + get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx); + get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx); + get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi); + get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi); + get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp); + get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax); + get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip); + get_user_ex(seg, &user_vm86->regs.cs); + vm86regs.pt.cs = seg; + get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags); + get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp); + get_user_ex(seg, &user_vm86->regs.ss); + vm86regs.pt.ss = seg; + get_user_ex(vm86regs.es, &user_vm86->regs.es); + get_user_ex(vm86regs.ds, &user_vm86->regs.ds); + get_user_ex(vm86regs.fs, &user_vm86->regs.fs); + get_user_ex(vm86regs.gs, &user_vm86->regs.gs); + + get_user_ex(vm86->flags, &user_vm86->flags); + get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap); + get_user_ex(vm86->cpu_type, &user_vm86->cpu_type); + } get_user_catch(err); + if (err) + return err; + + if (copy_from_user(&vm86->int_revectored, + &user_vm86->int_revectored, + sizeof(struct revectored_struct))) + return -EFAULT; + if (copy_from_user(&vm86->int21_revectored, + &user_vm86->int21_revectored, + sizeof(struct revectored_struct))) + return -EFAULT; + if (plus) { + if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus, + sizeof(struct vm86plus_info_struct))) + return -EFAULT; + vm86->vm86plus.is_vm86pus = 1; + } else + memset(&vm86->vm86plus, 0, + sizeof(struct vm86plus_info_struct)); + + memcpy(&vm86->regs32, regs, sizeof(struct pt_regs)); + vm86->user_vm86 = user_vm86; /* * The flags register is also special: we cannot trust that the user * has set it up safely, so this makes sure interrupt etc flags are * inherited from protected mode. */ - VEFLAGS = info->regs.pt.flags; - info->regs.pt.flags &= SAFE_MASK; - info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; - info->regs.pt.flags |= X86_VM_MASK; + VEFLAGS = vm86regs.pt.flags; + vm86regs.pt.flags &= SAFE_MASK; + vm86regs.pt.flags |= regs->flags & ~SAFE_MASK; + vm86regs.pt.flags |= X86_VM_MASK; + + vm86regs.pt.orig_ax = regs->orig_ax; - switch (info->cpu_type) { + switch (vm86->cpu_type) { case CPU_286: - tsk->thread.v86mask = 0; + vm86->veflags_mask = 0; break; case CPU_386: - tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; case CPU_486: - tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; default: - tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; + vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; } /* - * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL) + * Save old state */ - info->regs32->ax = VM86_SIGNAL; - tsk->thread.saved_sp0 = tsk->thread.sp0; - tsk->thread.saved_fs = info->regs32->fs; - tsk->thread.saved_gs = get_user_gs(info->regs32); + vm86->saved_sp0 = tsk->thread.sp0; + lazy_save_gs(vm86->regs32.gs); tss = &per_cpu(cpu_tss, get_cpu()); - tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; + /* make room for real-mode segments */ + tsk->thread.sp0 += 16; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; load_sp0(tss, &tsk->thread); put_cpu(); - tsk->thread.screen_bitmap = info->screen_bitmap; - if (info->flags & VM86_SCREEN_BITMAP) + if (vm86->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); - /*call __audit_syscall_exit since we do not exit via the normal paths */ -#ifdef CONFIG_AUDITSYSCALL - if (unlikely(current->audit_context)) - __audit_syscall_exit(1, 0); -#endif - - __asm__ __volatile__( - "movl %0,%%esp\n\t" - "movl %1,%%ebp\n\t" -#ifdef CONFIG_X86_32_LAZY_GS - "mov %2, %%gs\n\t" -#endif - "jmp resume_userspace" - : /* no outputs */ - :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); - /* we never return here */ -} - -static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) -{ - struct pt_regs *regs32; - - regs32 = save_v86_state(regs16); - regs32->ax = retval; - __asm__ __volatile__("movl %0,%%esp\n\t" - "movl %1,%%ebp\n\t" - "jmp resume_userspace" - : : "r" (regs32), "r" (current_thread_info())); + memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs)); + force_iret(); + return regs->ax; } static inline void set_IF(struct kernel_vm86_regs *regs) { VEFLAGS |= X86_EFLAGS_VIF; - if (VEFLAGS & X86_EFLAGS_VIP) - return_to_32bit(regs, VM86_STI); } static inline void clear_IF(struct kernel_vm86_regs *regs) @@ -395,7 +377,7 @@ static inline void clear_AC(struct kernel_vm86_regs *regs) static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) { - set_flags(VEFLAGS, flags, current->thread.v86mask); + set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); @@ -405,7 +387,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) { - set_flags(VFLAGS, flags, current->thread.v86mask); + set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); @@ -420,7 +402,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) if (VEFLAGS & X86_EFLAGS_VIF) flags |= X86_EFLAGS_IF; flags |= X86_EFLAGS_IOPL; - return flags | (VEFLAGS & current->thread.v86mask); + return flags | (VEFLAGS & current->thread.vm86->veflags_mask); } static inline int is_revectored(int nr, struct revectored_struct *bitmap) @@ -518,12 +500,13 @@ static void do_int(struct kernel_vm86_regs *regs, int i, { unsigned long __user *intr_ptr; unsigned long segoffs; + struct vm86 *vm86 = current->thread.vm86; if (regs->pt.cs == BIOSSEG) goto cannot_handle; - if (is_revectored(i, &KVM86->int_revectored)) + if (is_revectored(i, &vm86->int_revectored)) goto cannot_handle; - if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) + if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) @@ -542,18 +525,16 @@ static void do_int(struct kernel_vm86_regs *regs, int i, return; cannot_handle: - return_to_32bit(regs, VM86_INTx + (i << 8)); + save_v86_state(regs, VM86_INTx + (i << 8)); } int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { - if (VMPI.is_vm86pus) { + struct vm86 *vm86 = current->thread.vm86; + + if (vm86->vm86plus.is_vm86pus) { if ((trapno == 3) || (trapno == 1)) { - KVM86->regs32->ax = VM86_TRAP + (trapno << 8); - /* setting this flag forces the code in entry_32.S to - the path where we call save_v86_state() and change - the stack pointer to KVM86->regs32 */ - set_thread_flag(TIF_NOTIFY_RESUME); + save_v86_state(regs, VM86_TRAP + (trapno << 8)); return 0; } do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); @@ -574,16 +555,11 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) unsigned char __user *ssp; unsigned short ip, sp, orig_flags; int data32, pref_done; + struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus; #define CHECK_IF_IN_TRAP \ - if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ + if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \ newflags |= X86_EFLAGS_TF -#define VM86_FAULT_RETURN do { \ - if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ - return_to_32bit(regs, VM86_PICRETURN); \ - if (orig_flags & X86_EFLAGS_TF) \ - handle_vm86_trap(regs, 0, 1); \ - return; } while (0) orig_flags = *(unsigned short *)®s->pt.flags; @@ -622,7 +598,7 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) SP(regs) -= 2; } IP(regs) = ip; - VM86_FAULT_RETURN; + goto vm86_fault_return; /* popf */ case 0x9d: @@ -642,16 +618,18 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) else set_vflags_short(newflags, regs); - VM86_FAULT_RETURN; + goto check_vip; } /* int xx */ case 0xcd: { int intno = popb(csp, ip, simulate_sigsegv); IP(regs) = ip; - if (VMPI.vm86dbg_active) { - if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) - return_to_32bit(regs, VM86_INTx + (intno << 8)); + if (vmpi->vm86dbg_active) { + if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) { + save_v86_state(regs, VM86_INTx + (intno << 8)); + return; + } } do_int(regs, intno, ssp, sp); return; @@ -682,14 +660,14 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) } else { set_vflags_short(newflags, regs); } - VM86_FAULT_RETURN; + goto check_vip; } /* cli */ case 0xfa: IP(regs) = ip; clear_IF(regs); - VM86_FAULT_RETURN; + goto vm86_fault_return; /* sti */ /* @@ -701,14 +679,29 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) case 0xfb: IP(regs) = ip; set_IF(regs); - VM86_FAULT_RETURN; + goto check_vip; default: - return_to_32bit(regs, VM86_UNKNOWN); + save_v86_state(regs, VM86_UNKNOWN); } return; +check_vip: + if (VEFLAGS & X86_EFLAGS_VIP) { + save_v86_state(regs, VM86_STI); + return; + } + +vm86_fault_return: + if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) { + save_v86_state(regs, VM86_PICRETURN); + return; + } + if (orig_flags & X86_EFLAGS_TF) + handle_vm86_trap(regs, 0, X86_TRAP_DB); + return; + simulate_sigsegv: /* FIXME: After a long discussion with Stas we finally * agreed, that this is wrong. Here we should @@ -720,7 +713,7 @@ simulate_sigsegv: * should be a mixture of the two, but how do we * get the information? [KD] */ - return_to_32bit(regs, VM86_UNKNOWN); + save_v86_state(regs, VM86_UNKNOWN); } /* ---------------- vm86 special IRQ passing stuff ----------------- */ diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 67d215cb8953..a1ff508bb423 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -12,7 +12,9 @@ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ - i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o + i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ + hyperv.o + kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o kvm-intel-y += vmx.o pmu_intel.o kvm-amd-y += svm.o pmu_amd.o diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 64dd46793099..2fbea2544f24 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -98,6 +98,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); + if (vcpu->arch.eager_fpu) + kvm_x86_ops->fpu_activate(vcpu); /* * The existing code assumes virtual address is 48-bit in the canonical diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c new file mode 100644 index 000000000000..a8160d2ae362 --- /dev/null +++ b/arch/x86/kvm/hyperv.c @@ -0,0 +1,377 @@ +/* + * KVM Microsoft Hyper-V emulation + * + * derived from arch/x86/kvm/x86.c + * + * Copyright (C) 2006 Qumranet, Inc. + * Copyright (C) 2008 Qumranet, Inc. + * Copyright IBM Corporation, 2008 + * Copyright 2010 Red Hat, Inc. and/or its affiliates. + * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> + * + * Authors: + * Avi Kivity <avi@qumranet.com> + * Yaniv Kamay <yaniv@qumranet.com> + * Amit Shah <amit.shah@qumranet.com> + * Ben-Ami Yassour <benami@il.ibm.com> + * Andrey Smetanin <asmetanin@virtuozzo.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "x86.h" +#include "lapic.h" +#include "hyperv.h" + +#include <linux/kvm_host.h> +#include <trace/events/kvm.h> + +#include "trace.h" + +static bool kvm_hv_msr_partition_wide(u32 msr) +{ + bool r = false; + + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + case HV_X64_MSR_HYPERCALL: + case HV_X64_MSR_REFERENCE_TSC: + case HV_X64_MSR_TIME_REF_COUNT: + case HV_X64_MSR_CRASH_CTL: + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + r = true; + break; + } + + return r; +} + +static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, + u32 index, u64 *pdata) +{ + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + + if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) + return -EINVAL; + + *pdata = hv->hv_crash_param[index]; + return 0; +} + +static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) +{ + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + + *pdata = hv->hv_crash_ctl; + return 0; +} + +static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) +{ + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + + if (host) + hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY; + + if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) { + + vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", + hv->hv_crash_param[0], + hv->hv_crash_param[1], + hv->hv_crash_param[2], + hv->hv_crash_param[3], + hv->hv_crash_param[4]); + + /* Send notification about crash to user space */ + kvm_make_request(KVM_REQ_HV_CRASH, vcpu); + } + + return 0; +} + +static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, + u32 index, u64 data) +{ + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + + if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) + return -EINVAL; + + hv->hv_crash_param[index] = data; + return 0; +} + +static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, + bool host) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_hv *hv = &kvm->arch.hyperv; + + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + hv->hv_guest_os_id = data; + /* setting guest os id to zero disables hypercall page */ + if (!hv->hv_guest_os_id) + hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; + break; + case HV_X64_MSR_HYPERCALL: { + u64 gfn; + unsigned long addr; + u8 instructions[4]; + + /* if guest os id is not set hypercall should remain disabled */ + if (!hv->hv_guest_os_id) + break; + if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { + hv->hv_hypercall = data; + break; + } + gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; + addr = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(addr)) + return 1; + kvm_x86_ops->patch_hypercall(vcpu, instructions); + ((unsigned char *)instructions)[3] = 0xc3; /* ret */ + if (__copy_to_user((void __user *)addr, instructions, 4)) + return 1; + hv->hv_hypercall = data; + mark_page_dirty(kvm, gfn); + break; + } + case HV_X64_MSR_REFERENCE_TSC: { + u64 gfn; + HV_REFERENCE_TSC_PAGE tsc_ref; + + memset(&tsc_ref, 0, sizeof(tsc_ref)); + hv->hv_tsc_page = data; + if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) + break; + gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; + if (kvm_write_guest( + kvm, + gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, + &tsc_ref, sizeof(tsc_ref))) + return 1; + mark_page_dirty(kvm, gfn); + break; + } + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + return kvm_hv_msr_set_crash_data(vcpu, + msr - HV_X64_MSR_CRASH_P0, + data); + case HV_X64_MSR_CRASH_CTL: + return kvm_hv_msr_set_crash_ctl(vcpu, data, host); + default: + vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", + msr, data); + return 1; + } + return 0; +} + +static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) +{ + struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + + switch (msr) { + case HV_X64_MSR_APIC_ASSIST_PAGE: { + u64 gfn; + unsigned long addr; + + if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { + hv->hv_vapic = data; + if (kvm_lapic_enable_pv_eoi(vcpu, 0)) + return 1; + break; + } + gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; + addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); + if (kvm_is_error_hva(addr)) + return 1; + if (__clear_user((void __user *)addr, PAGE_SIZE)) + return 1; + hv->hv_vapic = data; + kvm_vcpu_mark_page_dirty(vcpu, gfn); + if (kvm_lapic_enable_pv_eoi(vcpu, + gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) + return 1; + break; + } + case HV_X64_MSR_EOI: + return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); + case HV_X64_MSR_ICR: + return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); + case HV_X64_MSR_TPR: + return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); + default: + vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", + msr, data); + return 1; + } + + return 0; +} + +static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) +{ + u64 data = 0; + struct kvm *kvm = vcpu->kvm; + struct kvm_hv *hv = &kvm->arch.hyperv; + + switch (msr) { + case HV_X64_MSR_GUEST_OS_ID: + data = hv->hv_guest_os_id; + break; + case HV_X64_MSR_HYPERCALL: + data = hv->hv_hypercall; + break; + case HV_X64_MSR_TIME_REF_COUNT: { + data = + div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); + break; + } + case HV_X64_MSR_REFERENCE_TSC: + data = hv->hv_tsc_page; + break; + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + return kvm_hv_msr_get_crash_data(vcpu, + msr - HV_X64_MSR_CRASH_P0, + pdata); + case HV_X64_MSR_CRASH_CTL: + return kvm_hv_msr_get_crash_ctl(vcpu, pdata); + default: + vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); + return 1; + } + + *pdata = data; + return 0; +} + +static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) +{ + u64 data = 0; + struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + + switch (msr) { + case HV_X64_MSR_VP_INDEX: { + int r; + struct kvm_vcpu *v; + + kvm_for_each_vcpu(r, v, vcpu->kvm) { + if (v == vcpu) { + data = r; + break; + } + } + break; + } + case HV_X64_MSR_EOI: + return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); + case HV_X64_MSR_ICR: + return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); + case HV_X64_MSR_TPR: + return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); + case HV_X64_MSR_APIC_ASSIST_PAGE: + data = hv->hv_vapic; + break; + default: + vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); + return 1; + } + *pdata = data; + return 0; +} + +int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) +{ + if (kvm_hv_msr_partition_wide(msr)) { + int r; + + mutex_lock(&vcpu->kvm->lock); + r = kvm_hv_set_msr_pw(vcpu, msr, data, host); + mutex_unlock(&vcpu->kvm->lock); + return r; + } else + return kvm_hv_set_msr(vcpu, msr, data); +} + +int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) +{ + if (kvm_hv_msr_partition_wide(msr)) { + int r; + + mutex_lock(&vcpu->kvm->lock); + r = kvm_hv_get_msr_pw(vcpu, msr, pdata); + mutex_unlock(&vcpu->kvm->lock); + return r; + } else + return kvm_hv_get_msr(vcpu, msr, pdata); +} + +bool kvm_hv_hypercall_enabled(struct kvm *kvm) +{ + return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; +} + +int kvm_hv_hypercall(struct kvm_vcpu *vcpu) +{ + u64 param, ingpa, outgpa, ret; + uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; + bool fast, longmode; + + /* + * hypercall generates UD from non zero cpl and real mode + * per HYPER-V spec + */ + if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + + longmode = is_64_bit_mode(vcpu); + + if (!longmode) { + param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); + ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); + outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | + (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); + } +#ifdef CONFIG_X86_64 + else { + param = kvm_register_read(vcpu, VCPU_REGS_RCX); + ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); + outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); + } +#endif + + code = param & 0xffff; + fast = (param >> 16) & 0x1; + rep_cnt = (param >> 32) & 0xfff; + rep_idx = (param >> 48) & 0xfff; + + trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); + + switch (code) { + case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: + kvm_vcpu_on_spin(vcpu); + break; + default: + res = HV_STATUS_INVALID_HYPERCALL_CODE; + break; + } + + ret = res | (((u64)rep_done & 0xfff) << 32); + if (longmode) { + kvm_register_write(vcpu, VCPU_REGS_RAX, ret); + } else { + kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); + kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); + } + + return 1; +} diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h new file mode 100644 index 000000000000..c7bce559f67b --- /dev/null +++ b/arch/x86/kvm/hyperv.h @@ -0,0 +1,32 @@ +/* + * KVM Microsoft Hyper-V emulation + * + * derived from arch/x86/kvm/x86.c + * + * Copyright (C) 2006 Qumranet, Inc. + * Copyright (C) 2008 Qumranet, Inc. + * Copyright IBM Corporation, 2008 + * Copyright 2010 Red Hat, Inc. and/or its affiliates. + * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> + * + * Authors: + * Avi Kivity <avi@qumranet.com> + * Yaniv Kamay <yaniv@qumranet.com> + * Amit Shah <amit.shah@qumranet.com> + * Ben-Ami Yassour <benami@il.ibm.com> + * Andrey Smetanin <asmetanin@virtuozzo.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef __ARCH_X86_KVM_HYPERV_H__ +#define __ARCH_X86_KVM_HYPERV_H__ + +int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); +int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); +bool kvm_hv_hypercall_enabled(struct kvm *kvm); +int kvm_hv_hypercall(struct kvm_vcpu *vcpu); + +#endif diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index fef922ff2635..7cc2360f1848 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -651,15 +651,10 @@ fail_unlock: return NULL; } -void kvm_destroy_pic(struct kvm *kvm) +void kvm_destroy_pic(struct kvm_pic *vpic) { - struct kvm_pic *vpic = kvm->arch.vpic; - - if (vpic) { - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); - kvm->arch.vpic = NULL; - kfree(vpic); - } + kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); + kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); + kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); + kfree(vpic); } diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c index 7dbced309ddb..5c520ebf6343 100644 --- a/arch/x86/kvm/iommu.c +++ b/arch/x86/kvm/iommu.c @@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev) goto out_unmap; } + kvm_arch_start_assignment(kvm); pci_set_dev_assigned(pdev); dev_info(&pdev->dev, "kvm assign device\n"); @@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev) iommu_detach_device(domain, &pdev->dev); pci_clear_dev_assigned(pdev); + kvm_arch_end_assignment(kvm); dev_info(&pdev->dev, "kvm deassign device\n"); diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index ad68c73008c5..3d782a2c336a 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -74,7 +74,7 @@ struct kvm_pic { }; struct kvm_pic *kvm_create_pic(struct kvm *kvm); -void kvm_destroy_pic(struct kvm *kvm); +void kvm_destroy_pic(struct kvm_pic *vpic); int kvm_pic_read_irq(struct kvm *kvm); void kvm_pic_update_irq(struct kvm_pic *s); @@ -85,11 +85,11 @@ static inline struct kvm_pic *pic_irqchip(struct kvm *kvm) static inline int irqchip_in_kernel(struct kvm *kvm) { - int ret; + struct kvm_pic *vpic = pic_irqchip(kvm); - ret = (pic_irqchip(kvm) != NULL); + /* Read vpic before kvm->irq_routing. */ smp_rmb(); - return ret; + return vpic != NULL; } void kvm_pic_reset(struct kvm_kpic_state *s); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 954e98a8c2e3..8d9013c5e1ee 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1172,7 +1172,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) tsc_deadline = apic->lapic_timer.expired_tscdeadline; apic->lapic_timer.expired_tscdeadline = 0; - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ @@ -1240,7 +1240,7 @@ static void start_apic_timer(struct kvm_lapic *apic) local_irq_save(flags); now = apic->lapic_timer.timer.base->get_time(); - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); if (likely(tscdeadline > guest_tsc)) { ns = (tscdeadline - guest_tsc) * 1000000ULL; do_div(ns, this_tsc_khz); @@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) for (i = 0; i < APIC_LVT_NUM; i++) apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); apic_update_lvtt(apic); - if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) apic_set_reg(apic, APIC_LVT0, SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); @@ -1900,8 +1900,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; - kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, - sizeof(u32)); + if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, + sizeof(u32))) + return; apic_set_tpr(vcpu->arch.apic, data & 0xff); } diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 71952748222a..764037991d26 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -91,7 +91,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) { - return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE; + return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE; } int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f807496b62c2..fb16a8ea3dee 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -357,12 +357,6 @@ static u64 __get_spte_lockless(u64 *sptep) { return ACCESS_ONCE(*sptep); } - -static bool __check_direct_spte_mmio_pf(u64 spte) -{ - /* It is valid if the spte is zapped. */ - return spte == 0ull; -} #else union split_spte { struct { @@ -478,23 +472,6 @@ retry: return spte.spte; } - -static bool __check_direct_spte_mmio_pf(u64 spte) -{ - union split_spte sspte = (union split_spte)spte; - u32 high_mmio_mask = shadow_mmio_mask >> 32; - - /* It is valid if the spte is zapped. */ - if (spte == 0ull) - return true; - - /* It is valid if the spte is being zapped. */ - if (sspte.spte_low == 0ull && - (sspte.spte_high & high_mmio_mask) == high_mmio_mask) - return true; - - return false; -} #endif static bool spte_is_locklessly_modifiable(u64 spte) @@ -2479,6 +2456,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, return 0; } +static bool kvm_is_mmio_pfn(pfn_t pfn) +{ + if (pfn_valid(pfn)) + return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); + + return true; +} + static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int level, gfn_t gfn, pfn_t pfn, bool speculative, @@ -2506,7 +2491,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, - kvm_is_reserved_pfn(pfn)); + kvm_is_mmio_pfn(pfn)); if (host_writable) spte |= SPTE_HOST_WRITEABLE; @@ -3283,54 +3268,89 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); } -static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) +static bool +__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) { - if (direct) - return vcpu_match_mmio_gpa(vcpu, addr); + int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f; - return vcpu_match_mmio_gva(vcpu, addr); + return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) | + ((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0); } +static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) +{ + return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); +} -/* - * On direct hosts, the last spte is only allows two states - * for mmio page fault: - * - It is the mmio spte - * - It is zapped or it is being zapped. - * - * This function completely checks the spte when the last spte - * is not the mmio spte. - */ -static bool check_direct_spte_mmio_pf(u64 spte) +static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) { - return __check_direct_spte_mmio_pf(spte); + return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); } -static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) +static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) +{ + if (direct) + return vcpu_match_mmio_gpa(vcpu, addr); + + return vcpu_match_mmio_gva(vcpu, addr); +} + +/* return true if reserved bit is detected on spte. */ +static bool +walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) { struct kvm_shadow_walk_iterator iterator; - u64 spte = 0ull; + u64 sptes[PT64_ROOT_LEVEL], spte = 0ull; + int root, leaf; + bool reserved = false; if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - return spte; + goto exit; walk_shadow_page_lockless_begin(vcpu); - for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) + + for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level; + shadow_walk_okay(&iterator); + __shadow_walk_next(&iterator, spte)) { + leaf = iterator.level; + spte = mmu_spte_get_lockless(iterator.sptep); + + sptes[leaf - 1] = spte; + if (!is_shadow_present_pte(spte)) break; + + reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, + leaf); + } + walk_shadow_page_lockless_end(vcpu); - return spte; + if (reserved) { + pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", + __func__, addr); + while (root >= leaf) { + pr_err("------ spte 0x%llx level %d.\n", + sptes[root - 1], root); + root--; + } + } +exit: + *sptep = spte; + return reserved; } int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) { u64 spte; + bool reserved; if (quickly_check_mmio_pf(vcpu, addr, direct)) return RET_MMIO_PF_EMULATE; - spte = walk_shadow_page_get_mmio_spte(vcpu, addr); + reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); + if (unlikely(reserved)) + return RET_MMIO_PF_BUG; if (is_mmio_spte(spte)) { gfn_t gfn = get_mmio_spte_gfn(spte); @@ -3348,13 +3368,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) } /* - * It's ok if the gva is remapped by other cpus on shadow guest, - * it's a BUG if the gfn is not a mmio page. - */ - if (direct && !check_direct_spte_mmio_pf(spte)) - return RET_MMIO_PF_BUG; - - /* * If the page table is zapped by other cpus, let CPU fault again on * the address. */ @@ -3596,19 +3609,21 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gp #include "paging_tmpl.h" #undef PTTYPE -static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) +static void +__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct rsvd_bits_validate *rsvd_check, + int maxphyaddr, int level, bool nx, bool gbpages, + bool pse) { - int maxphyaddr = cpuid_maxphyaddr(vcpu); u64 exb_bit_rsvd = 0; u64 gbpages_bit_rsvd = 0; u64 nonleaf_bit8_rsvd = 0; - context->bad_mt_xwr = 0; + rsvd_check->bad_mt_xwr = 0; - if (!context->nx) + if (!nx) exb_bit_rsvd = rsvd_bits(63, 63); - if (!guest_cpuid_has_gbpages(vcpu)) + if (!gbpages) gbpages_bit_rsvd = rsvd_bits(7, 7); /* @@ -3618,80 +3633,95 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, if (guest_cpuid_is_amd(vcpu)) nonleaf_bit8_rsvd = rsvd_bits(8, 8); - switch (context->root_level) { + switch (level) { case PT32_ROOT_LEVEL: /* no rsvd bits for 2 level 4K page table entries */ - context->rsvd_bits_mask[0][1] = 0; - context->rsvd_bits_mask[0][0] = 0; - context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + rsvd_check->rsvd_bits_mask[0][1] = 0; + rsvd_check->rsvd_bits_mask[0][0] = 0; + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; - if (!is_pse(vcpu)) { - context->rsvd_bits_mask[1][1] = 0; + if (!pse) { + rsvd_check->rsvd_bits_mask[1][1] = 0; break; } if (is_cpuid_PSE36()) /* 36bits PSE 4MB page */ - context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); else /* 32 bits PSE 4MB page */ - context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); break; case PT32E_ROOT_LEVEL: - context->rsvd_bits_mask[0][2] = + rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(maxphyaddr, 63) | rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ - context->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62); /* PDE */ - context->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62); /* PTE */ - context->rsvd_bits_mask[1][1] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62) | rsvd_bits(13, 20); /* large page */ - context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; break; case PT64_ROOT_LEVEL: - context->rsvd_bits_mask[0][3] = exb_bit_rsvd | - nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51); - context->rsvd_bits_mask[0][2] = exb_bit_rsvd | - nonleaf_bit8_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51); - context->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | + nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51); - context->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | + nonleaf_bit8_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51); - context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; - context->rsvd_bits_mask[1][2] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[1][3] = + rsvd_check->rsvd_bits_mask[0][3]; + rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 29); - context->rsvd_bits_mask[1][1] = exb_bit_rsvd | + rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 20); /* large page */ - context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + rsvd_check->rsvd_bits_mask[1][0] = + rsvd_check->rsvd_bits_mask[0][0]; break; } } -static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, - struct kvm_mmu *context, bool execonly) +static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, + cpuid_maxphyaddr(vcpu), context->root_level, + context->nx, guest_cpuid_has_gbpages(vcpu), + is_pse(vcpu)); +} + +static void +__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, + int maxphyaddr, bool execonly) { - int maxphyaddr = cpuid_maxphyaddr(vcpu); int pte; - context->rsvd_bits_mask[0][3] = + rsvd_check->rsvd_bits_mask[0][3] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); - context->rsvd_bits_mask[0][2] = + rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - context->rsvd_bits_mask[0][1] = + rsvd_check->rsvd_bits_mask[0][1] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); - context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); /* large page */ - context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; - context->rsvd_bits_mask[1][2] = + rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; + rsvd_check->rsvd_bits_mask[1][2] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); - context->rsvd_bits_mask[1][1] = + rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); - context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; + rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; for (pte = 0; pte < 64; pte++) { int rwx_bits = pte & 7; @@ -3699,10 +3729,64 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, if (mt == 0x2 || mt == 0x3 || mt == 0x7 || rwx_bits == 0x2 || rwx_bits == 0x6 || (rwx_bits == 0x4 && !execonly)) - context->bad_mt_xwr |= (1ull << pte); + rsvd_check->bad_mt_xwr |= (1ull << pte); } } +static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, + cpuid_maxphyaddr(vcpu), execonly); +} + +/* + * the page table on host is the shadow page table for the page + * table in guest or amd nested guest, its mmu features completely + * follow the features in guest. + */ +void +reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) +{ + __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + context->shadow_root_level, context->nx, + guest_cpuid_has_gbpages(vcpu), is_pse(vcpu)); +} +EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); + +/* + * the direct page table on host, use as much mmu features as + * possible, however, kvm currently does not do execution-protection. + */ +static void +reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) +{ + if (guest_cpuid_is_amd(vcpu)) + __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + context->shadow_root_level, false, + cpu_has_gbpages, true); + else + __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, + false); + +} + +/* + * as the comments in reset_shadow_zero_bits_mask() except it + * is the shadow page table for intel nested guest. + */ +static void +reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, bool execonly) +{ + __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, + boot_cpu_data.x86_phys_bits, execonly); +} + static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, bool ept) { @@ -3881,6 +3965,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) update_permission_bitmask(vcpu, context, false); update_last_pte_bitmap(vcpu, context); + reset_tdp_shadow_zero_bits_mask(vcpu, context); } void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) @@ -3908,6 +3993,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) context->base_role.smap_andnot_wp = smap && !is_write_protection(vcpu); context->base_role.smm = is_smm(vcpu); + reset_shadow_zero_bits_mask(vcpu, context); } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); @@ -3931,6 +4017,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) update_permission_bitmask(vcpu, context, true); reset_rsvds_bits_mask_ept(vcpu, context, execonly); + reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); @@ -4852,28 +4939,6 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) return nr_mmu_pages; } -int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) -{ - struct kvm_shadow_walk_iterator iterator; - u64 spte; - int nr_sptes = 0; - - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - return nr_sptes; - - walk_shadow_page_lockless_begin(vcpu); - for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { - sptes[iterator.level-1] = spte; - nr_sptes++; - if (!is_shadow_present_pte(spte)) - break; - } - walk_shadow_page_lockless_end(vcpu); - - return nr_sptes; -} -EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); - void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { kvm_mmu_unload(vcpu); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 398d21c0f6dd..e4202e41d535 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -50,9 +50,11 @@ static inline u64 rsvd_bits(int s, int e) return ((1ULL << (e - s + 1)) - 1) << s; } -int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); +void +reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); + /* * Return values of handle_mmio_page_fault_common: * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index de1d2d8062e2..9e8bf13572e6 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; } +static u8 mtrr_disabled_type(void) +{ + /* + * Intel SDM 11.11.2.2: all MTRRs are disabled when + * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC + * memory type is applied to all of physical memory. + */ + return MTRR_TYPE_UNCACHABLE; +} + /* * Three terms are used in the following code: * - segment, it indicates the address segments covered by fixed MTRRs. @@ -434,6 +444,8 @@ struct mtrr_iter { /* output fields. */ int mem_type; + /* mtrr is completely disabled? */ + bool mtrr_disabled; /* [start, end) is not fully covered in MTRRs? */ bool partial_map; @@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter) static void mtrr_lookup_start(struct mtrr_iter *iter) { if (!mtrr_is_enabled(iter->mtrr_state)) { - iter->partial_map = true; + iter->mtrr_disabled = true; return; } @@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter, iter->mtrr_state = mtrr_state; iter->start = start; iter->end = end; + iter->mtrr_disabled = false; iter->partial_map = false; iter->fixed = false; iter->range = NULL; @@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) return MTRR_TYPE_WRBACK; } - /* It is not covered by MTRRs. */ - if (iter.partial_map) { - /* - * We just check one page, partially covered by MTRRs is - * impossible. - */ - WARN_ON(type != -1); - type = mtrr_default_type(mtrr_state); - } + if (iter.mtrr_disabled) + return mtrr_disabled_type(); + + /* not contained in any MTRRs. */ + if (type == -1) + return mtrr_default_type(mtrr_state); + + /* + * We just check one page, partially covered by MTRRs is + * impossible. + */ + WARN_ON(iter.partial_map); + return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); @@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, return false; } + if (iter.mtrr_disabled) + return true; + if (!iter.partial_map) return true; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 0f67d7e24800..736e6ab8784d 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -128,14 +128,6 @@ static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) *access &= mask; } -static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) -{ - int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f; - - return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) | - ((mmu->bad_mt_xwr & (1ull << low6)) != 0); -} - static inline int FNAME(is_present_gpte)(unsigned long pte) { #if PTTYPE != PTTYPE_EPT @@ -172,7 +164,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) { - if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) goto no_present; if (!FNAME(is_present_gpte)(gpte)) @@ -353,8 +345,7 @@ retry_walk: if (unlikely(!FNAME(is_present_gpte)(pte))) goto error; - if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, - walker->level))) { + if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK; goto error; } diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c index 886aa25a7131..39b91127ef07 100644 --- a/arch/x86/kvm/pmu_amd.c +++ b/arch/x86/kvm/pmu_amd.c @@ -133,8 +133,6 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* MSR_K7_PERFCTRn */ pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0); if (pmc) { - if (!msr_info->host_initiated) - data = (s64)data; pmc->counter += data - pmc_read_counter(pmc); return 0; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 602b974a60a6..fdb8cb63a6c0 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -865,6 +865,64 @@ static void svm_disable_lbrv(struct vcpu_svm *svm) set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); } +#define MTRR_TYPE_UC_MINUS 7 +#define MTRR2PROTVAL_INVALID 0xff + +static u8 mtrr2protval[8]; + +static u8 fallback_mtrr_type(int mtrr) +{ + /* + * WT and WP aren't always available in the host PAT. Treat + * them as UC and UC- respectively. Everything else should be + * there. + */ + switch (mtrr) + { + case MTRR_TYPE_WRTHROUGH: + return MTRR_TYPE_UNCACHABLE; + case MTRR_TYPE_WRPROT: + return MTRR_TYPE_UC_MINUS; + default: + BUG(); + } +} + +static void build_mtrr2protval(void) +{ + int i; + u64 pat; + + for (i = 0; i < 8; i++) + mtrr2protval[i] = MTRR2PROTVAL_INVALID; + + /* Ignore the invalid MTRR types. */ + mtrr2protval[2] = 0; + mtrr2protval[3] = 0; + + /* + * Use host PAT value to figure out the mapping from guest MTRR + * values to nested page table PAT/PCD/PWT values. We do not + * want to change the host PAT value every time we enter the + * guest. + */ + rdmsrl(MSR_IA32_CR_PAT, pat); + for (i = 0; i < 8; i++) { + u8 mtrr = pat >> (8 * i); + + if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID) + mtrr2protval[mtrr] = __cm_idx2pte(i); + } + + for (i = 0; i < 8; i++) { + if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) { + u8 fallback = fallback_mtrr_type(i); + mtrr2protval[i] = mtrr2protval[fallback]; + BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID); + } + } +} + static __init int svm_hardware_setup(void) { int cpu; @@ -931,6 +989,7 @@ static __init int svm_hardware_setup(void) } else kvm_disable_tdp(); + build_mtrr2protval(); return 0; err: @@ -1080,11 +1139,48 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { u64 tsc; - tsc = svm_scale_tsc(vcpu, native_read_tsc()); + tsc = svm_scale_tsc(vcpu, rdtsc()); return target_tsc - tsc; } +static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat) +{ + struct kvm_vcpu *vcpu = &svm->vcpu; + + /* Unlike Intel, AMD takes the guest's CR0.CD into account. + * + * AMD does not have IPAT. To emulate it for the case of guests + * with no assigned devices, just set everything to WB. If guests + * have assigned devices, however, we cannot force WB for RAM + * pages only, so use the guest PAT directly. + */ + if (!kvm_arch_has_assigned_device(vcpu->kvm)) + *g_pat = 0x0606060606060606; + else + *g_pat = vcpu->arch.pat; +} + +static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) +{ + u8 mtrr; + + /* + * 1. MMIO: trust guest MTRR, so same as item 3. + * 2. No passthrough: always map as WB, and force guest PAT to WB as well + * 3. Passthrough: can't guarantee the result, try to trust guest. + */ + if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) + return 0; + + if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && + kvm_read_cr0(vcpu) & X86_CR0_CD) + return _PAGE_NOCACHE; + + mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); + return mtrr2protval[mtrr]; +} + static void init_vmcb(struct vcpu_svm *svm, bool init_event) { struct vmcb_control_area *control = &svm->vmcb->control; @@ -1180,6 +1276,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = svm->vcpu.arch.pat; + svm_set_guest_pat(svm, &save->g_pat); save->cr3 = 0; save->cr4 = 0; } @@ -1574,13 +1671,10 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; - /* - * re-enable caching here because the QEMU bios - * does not do it - this results in some delay at - * reboot - */ - if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) - cr0 &= ~(X86_CR0_CD | X86_CR0_NW); + + /* These are emulated via page tables. */ + cr0 &= ~(X86_CR0_CD | X86_CR0_NW); + svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); @@ -2013,6 +2107,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu.shadow_root_level = get_npt_level(); + reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } @@ -3079,7 +3174,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr_info->index) { case MSR_IA32_TSC: { msr_info->data = svm->vmcb->control.tsc_offset + - svm_scale_tsc(vcpu, native_read_tsc()); + svm_scale_tsc(vcpu, rdtsc()); break; } @@ -3254,6 +3349,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; + case MSR_IA32_CR_PAT: + if (npt_enabled) { + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) + return 1; + vcpu->arch.pat = data; + svm_set_guest_pat(svm, &svm->vmcb->save.g_pat); + mark_dirty(svm->vmcb, VMCB_NPT); + break; + } + /* fall through */ default: return kvm_set_msr_common(vcpu, msr); } @@ -4088,11 +4193,6 @@ static bool svm_has_high_real_mode_segbase(void) return true; } -static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) -{ - return 0; -} - static void svm_cpuid_update(struct kvm_vcpu *vcpu) { } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e856dd566f4c..4a4eec30cc08 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void) { u64 host_tsc, tsc_offset; - rdtscll(host_tsc); + host_tsc = rdtsc(); tsc_offset = vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } @@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { - return target_tsc - native_read_tsc(); + return target_tsc - rdtsc(); } static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) @@ -2443,10 +2443,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | - CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | - CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING | - CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW | - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; + CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | + CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | + CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | + CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; /* * We can allow some features even when not supported by the * hardware. For example, L1 can specify an MSR bitmap - and we @@ -3423,12 +3423,12 @@ static void enter_lmode(struct kvm_vcpu *vcpu) vmx_segment_cache_clear(to_vmx(vcpu)); guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); - if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { + if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { pr_debug_ratelimited("%s: tss fixup for long mode. \n", __func__); vmcs_write32(GUEST_TR_AR_BYTES, - (guest_tr_ar & ~AR_TYPE_MASK) - | AR_TYPE_BUSY_64_TSS); + (guest_tr_ar & ~VMX_AR_TYPE_MASK) + | VMX_AR_TYPE_BUSY_64_TSS); } vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); } @@ -3719,7 +3719,7 @@ static int vmx_get_cpl(struct kvm_vcpu *vcpu) return 0; else { int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); - return AR_DPL(ar); + return VMX_AR_DPL(ar); } } @@ -3847,11 +3847,11 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu) if (cs.unusable) return false; - if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) + if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; - if (cs.type & AR_TYPE_WRITEABLE_MASK) { + if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; } else { @@ -3901,7 +3901,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) return false; if (!var.present) return false; - if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) { + if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { if (var.dpl < rpl) /* DPL < RPL */ return false; } @@ -5759,73 +5759,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } -static u64 ept_rsvd_mask(u64 spte, int level) -{ - int i; - u64 mask = 0; - - for (i = 51; i > boot_cpu_data.x86_phys_bits; i--) - mask |= (1ULL << i); - - if (level == 4) - /* bits 7:3 reserved */ - mask |= 0xf8; - else if (spte & (1ULL << 7)) - /* - * 1GB/2MB page, bits 29:12 or 20:12 reserved respectively, - * level == 1 if the hypervisor is using the ignored bit 7. - */ - mask |= (PAGE_SIZE << ((level - 1) * 9)) - PAGE_SIZE; - else if (level > 1) - /* bits 6:3 reserved */ - mask |= 0x78; - - return mask; -} - -static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, - int level) -{ - printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level); - - /* 010b (write-only) */ - WARN_ON((spte & 0x7) == 0x2); - - /* 110b (write/execute) */ - WARN_ON((spte & 0x7) == 0x6); - - /* 100b (execute-only) and value not supported by logical processor */ - if (!cpu_has_vmx_ept_execute_only()) - WARN_ON((spte & 0x7) == 0x4); - - /* not 000b */ - if ((spte & 0x7)) { - u64 rsvd_bits = spte & ept_rsvd_mask(spte, level); - - if (rsvd_bits != 0) { - printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n", - __func__, rsvd_bits); - WARN_ON(1); - } - - /* bits 5:3 are _not_ reserved for large page or leaf page */ - if ((rsvd_bits & 0x38) == 0) { - u64 ept_mem_type = (spte & 0x38) >> 3; - - if (ept_mem_type == 2 || ept_mem_type == 3 || - ept_mem_type == 7) { - printk(KERN_ERR "%s: ept_mem_type=0x%llx\n", - __func__, ept_mem_type); - WARN_ON(1); - } - } - } -} - static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { - u64 sptes[4]; - int nr_sptes, i, ret; + int ret; gpa_t gpa; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); @@ -5846,13 +5782,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) return 1; /* It is the real ept misconfig */ - printk(KERN_ERR "EPT: Misconfiguration.\n"); - printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa); - - nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes); - - for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) - ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); + WARN_ON(1); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; @@ -6246,6 +6176,11 @@ static int handle_mwait(struct kvm_vcpu *vcpu) return handle_nop(vcpu); } +static int handle_monitor_trap(struct kvm_vcpu *vcpu) +{ + return 1; +} + static int handle_monitor(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); @@ -6408,8 +6343,12 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, - u32 vmx_instruction_info, gva_t *ret) + u32 vmx_instruction_info, bool wr, gva_t *ret) { + gva_t off; + bool exn; + struct kvm_segment s; + /* * According to Vol. 3B, "Information for VM Exits Due to Instruction * Execution", on an exit, vmx_instruction_info holds most of the @@ -6434,22 +6373,63 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, /* Addr = segment_base + offset */ /* offset = base + [index * scale] + displacement */ - *ret = vmx_get_segment_base(vcpu, seg_reg); + off = exit_qualification; /* holds the displacement */ if (base_is_valid) - *ret += kvm_register_read(vcpu, base_reg); + off += kvm_register_read(vcpu, base_reg); if (index_is_valid) - *ret += kvm_register_read(vcpu, index_reg)<<scaling; - *ret += exit_qualification; /* holds the displacement */ + off += kvm_register_read(vcpu, index_reg)<<scaling; + vmx_get_segment(vcpu, &s, seg_reg); + *ret = s.base + off; if (addr_size == 1) /* 32 bit */ *ret &= 0xffffffff; - /* - * TODO: throw #GP (and return 1) in various cases that the VM* - * instructions require it - e.g., offset beyond segment limit, - * unusable or unreadable/unwritable segment, non-canonical 64-bit - * address, and so on. Currently these are not checked. - */ + /* Checks for #GP/#SS exceptions. */ + exn = false; + if (is_protmode(vcpu)) { + /* Protected mode: apply checks for segment validity in the + * following order: + * - segment type check (#GP(0) may be thrown) + * - usability check (#GP(0)/#SS(0)) + * - limit check (#GP(0)/#SS(0)) + */ + if (wr) + /* #GP(0) if the destination operand is located in a + * read-only data segment or any code segment. + */ + exn = ((s.type & 0xa) == 0 || (s.type & 8)); + else + /* #GP(0) if the source operand is located in an + * execute-only code segment + */ + exn = ((s.type & 0xa) == 8); + } + if (exn) { + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); + return 1; + } + if (is_long_mode(vcpu)) { + /* Long mode: #GP(0)/#SS(0) if the memory address is in a + * non-canonical form. This is an only check for long mode. + */ + exn = is_noncanonical_address(*ret); + } else if (is_protmode(vcpu)) { + /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. + */ + exn = (s.unusable != 0); + /* Protected mode: #GP(0)/#SS(0) if the memory + * operand is outside the segment limit. + */ + exn = exn || (off + sizeof(u64) > s.limit); + } + if (exn) { + kvm_queue_exception_e(vcpu, + seg_reg == VCPU_SREG_SS ? + SS_VECTOR : GP_VECTOR, + 0); + return 1; + } + return 0; } @@ -6471,7 +6451,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, int maxphyaddr = cpuid_maxphyaddr(vcpu); if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) + vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, @@ -6999,7 +6979,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) field_value); } else { if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, &gva)) + vmx_instruction_info, true, &gva)) return 1; /* _system ok, as nested_vmx_check_permission verified cpl=0 */ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, @@ -7036,7 +7016,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) (((vmx_instruction_info) >> 3) & 0xf)); else { if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, &gva)) + vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { @@ -7128,7 +7108,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) return 1; if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, &vmcs_gva)) + vmx_instruction_info, true, &vmcs_gva)) return 1; /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, @@ -7184,7 +7164,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), - vmx_instruction_info, &gva)) + vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { @@ -7282,6 +7262,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, + [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, [EXIT_REASON_INVVPID] = handle_invvpid, @@ -7542,6 +7523,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) return true; case EXIT_REASON_MWAIT_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); + case EXIT_REASON_MONITOR_TRAP_FLAG: + return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); case EXIT_REASON_MONITOR_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); case EXIT_REASON_PAUSE_INSTRUCTION: @@ -8632,22 +8615,17 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) u64 ipat = 0; /* For VT-d and EPT combination - * 1. MMIO: always map as UC + * 1. MMIO: guest may want to apply WC, trust it. * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the - * result, try to trust guest. + * result, try to trust guest. So the same as item 1. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ - if (is_mmio) { - cache = MTRR_TYPE_UNCACHABLE; - goto exit; - } - - if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { + if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { ipat = VMX_EPT_IPAT_BIT; cache = MTRR_TYPE_WRBACK; goto exit; @@ -8655,7 +8633,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) if (kvm_read_cr0(vcpu) & X86_CR0_CD) { ipat = VMX_EPT_IPAT_BIT; - cache = MTRR_TYPE_UNCACHABLE; + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) + cache = MTRR_TYPE_WRBACK; + else + cache = MTRR_TYPE_UNCACHABLE; goto exit; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bbaf44e8f0d3..1e7e76e14e89 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -29,6 +29,7 @@ #include "cpuid.h" #include "assigned-dev.h" #include "pmu.h" +#include "hyperv.h" #include <linux/clocksource.h> #include <linux/interrupt.h> @@ -221,11 +222,9 @@ static void shared_msr_update(unsigned slot, u32 msr) void kvm_define_shared_msr(unsigned slot, u32 msr) { BUG_ON(slot >= KVM_NR_SHARED_MSRS); + shared_msrs_global.msrs[slot] = msr; if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; - shared_msrs_global.msrs[slot] = msr; - /* we need ensured the shared_msr_global have been updated */ - smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); @@ -526,7 +525,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && - (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { + (pdpte[i] & + vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { ret = 0; goto out; } @@ -949,6 +949,8 @@ static u32 emulated_msrs[] = { MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, + HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, + HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_PV_EOI_EN, @@ -1217,11 +1219,6 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, __func__, base_khz, scaled_khz, shift, *pmultiplier); } -static inline u64 get_kernel_ns(void) -{ - return ktime_get_boot_ns(); -} - #ifdef CONFIG_X86_64 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); #endif @@ -1444,20 +1441,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc); static cycle_t read_tsc(void) { - cycle_t ret; - u64 last; - - /* - * Empirically, a fence (of type that depends on the CPU) - * before rdtsc is enough to ensure that rdtsc is ordered - * with respect to loads. The various CPU manuals are unclear - * as to whether rdtsc can be reordered with later loads, - * but no one has ever seen it happen. - */ - rdtsc_barrier(); - ret = (cycle_t)vget_cycles(); - - last = pvclock_gtod_data.clock.cycle_last; + cycle_t ret = (cycle_t)rdtsc_ordered(); + u64 last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; @@ -1646,7 +1631,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) return 1; } if (!use_master_clock) { - host_tsc = native_read_tsc(); + host_tsc = rdtsc(); kernel_ns = get_kernel_ns(); } @@ -1869,123 +1854,6 @@ out: return r; } -static bool kvm_hv_hypercall_enabled(struct kvm *kvm) -{ - return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; -} - -static bool kvm_hv_msr_partition_wide(u32 msr) -{ - bool r = false; - switch (msr) { - case HV_X64_MSR_GUEST_OS_ID: - case HV_X64_MSR_HYPERCALL: - case HV_X64_MSR_REFERENCE_TSC: - case HV_X64_MSR_TIME_REF_COUNT: - r = true; - break; - } - - return r; -} - -static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) -{ - struct kvm *kvm = vcpu->kvm; - - switch (msr) { - case HV_X64_MSR_GUEST_OS_ID: - kvm->arch.hv_guest_os_id = data; - /* setting guest os id to zero disables hypercall page */ - if (!kvm->arch.hv_guest_os_id) - kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; - break; - case HV_X64_MSR_HYPERCALL: { - u64 gfn; - unsigned long addr; - u8 instructions[4]; - - /* if guest os id is not set hypercall should remain disabled */ - if (!kvm->arch.hv_guest_os_id) - break; - if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { - kvm->arch.hv_hypercall = data; - break; - } - gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; - addr = gfn_to_hva(kvm, gfn); - if (kvm_is_error_hva(addr)) - return 1; - kvm_x86_ops->patch_hypercall(vcpu, instructions); - ((unsigned char *)instructions)[3] = 0xc3; /* ret */ - if (__copy_to_user((void __user *)addr, instructions, 4)) - return 1; - kvm->arch.hv_hypercall = data; - mark_page_dirty(kvm, gfn); - break; - } - case HV_X64_MSR_REFERENCE_TSC: { - u64 gfn; - HV_REFERENCE_TSC_PAGE tsc_ref; - memset(&tsc_ref, 0, sizeof(tsc_ref)); - kvm->arch.hv_tsc_page = data; - if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) - break; - gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; - if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, - &tsc_ref, sizeof(tsc_ref))) - return 1; - mark_page_dirty(kvm, gfn); - break; - } - default: - vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " - "data 0x%llx\n", msr, data); - return 1; - } - return 0; -} - -static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) -{ - switch (msr) { - case HV_X64_MSR_APIC_ASSIST_PAGE: { - u64 gfn; - unsigned long addr; - - if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { - vcpu->arch.hv_vapic = data; - if (kvm_lapic_enable_pv_eoi(vcpu, 0)) - return 1; - break; - } - gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; - addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); - if (kvm_is_error_hva(addr)) - return 1; - if (__clear_user((void __user *)addr, PAGE_SIZE)) - return 1; - vcpu->arch.hv_vapic = data; - kvm_vcpu_mark_page_dirty(vcpu, gfn); - if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) - return 1; - break; - } - case HV_X64_MSR_EOI: - return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); - case HV_X64_MSR_ICR: - return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); - case HV_X64_MSR_TPR: - return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); - default: - vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " - "data 0x%llx\n", msr, data); - return 1; - } - - return 0; -} - static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; @@ -2105,7 +1973,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (guest_cpuid_has_tsc_adjust(vcpu)) { if (!msr_info->host_initiated) { s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; - kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); + adjust_tsc_offset_guest(vcpu, adj); } vcpu->arch.ia32_tsc_adjust_msr = data; } @@ -2224,15 +2092,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: - if (kvm_hv_msr_partition_wide(msr)) { - int r; - mutex_lock(&vcpu->kvm->lock); - r = set_msr_hyperv_pw(vcpu, msr, data); - mutex_unlock(&vcpu->kvm->lock); - return r; - } else - return set_msr_hyperv(vcpu, msr, data); - break; + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + case HV_X64_MSR_CRASH_CTL: + return kvm_hv_set_msr_common(vcpu, msr, data, + msr_info->host_initiated); case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. @@ -2315,68 +2178,6 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) return 0; } -static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) -{ - u64 data = 0; - struct kvm *kvm = vcpu->kvm; - - switch (msr) { - case HV_X64_MSR_GUEST_OS_ID: - data = kvm->arch.hv_guest_os_id; - break; - case HV_X64_MSR_HYPERCALL: - data = kvm->arch.hv_hypercall; - break; - case HV_X64_MSR_TIME_REF_COUNT: { - data = - div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); - break; - } - case HV_X64_MSR_REFERENCE_TSC: - data = kvm->arch.hv_tsc_page; - break; - default: - vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); - return 1; - } - - *pdata = data; - return 0; -} - -static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) -{ - u64 data = 0; - - switch (msr) { - case HV_X64_MSR_VP_INDEX: { - int r; - struct kvm_vcpu *v; - kvm_for_each_vcpu(r, v, vcpu->kvm) { - if (v == vcpu) { - data = r; - break; - } - } - break; - } - case HV_X64_MSR_EOI: - return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); - case HV_X64_MSR_ICR: - return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); - case HV_X64_MSR_TPR: - return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); - case HV_X64_MSR_APIC_ASSIST_PAGE: - data = vcpu->arch.hv_vapic; - break; - default: - vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); - return 1; - } - *pdata = data; - return 0; -} - int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { switch (msr_info->index) { @@ -2493,14 +2294,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: - if (kvm_hv_msr_partition_wide(msr_info->index)) { - int r; - mutex_lock(&vcpu->kvm->lock); - r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data); - mutex_unlock(&vcpu->kvm->lock); - return r; - } else - return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data); + case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: + case HV_X64_MSR_CRASH_CTL: + return kvm_hv_get_msr_common(vcpu, + msr_info->index, &msr_info->data); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current @@ -2651,6 +2448,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_TSC_DEADLINE_TIMER: case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_DISABLE_QUIRKS: + case KVM_CAP_SET_BOOT_CPU_ID: #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_PCI_2_3: @@ -2810,7 +2608,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : - native_read_tsc() - vcpu->arch.last_host_tsc; + rdtsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { @@ -2838,7 +2636,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); - vcpu->arch.last_host_tsc = native_read_tsc(); + vcpu->arch.last_host_tsc = rdtsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, @@ -3157,8 +2955,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) cpuid_count(XSTATE_CPUID, index, &size, &offset, &ecx, &edx); memcpy(dest, src + offset, size); - } else - WARN_ON_ONCE(1); + } valid -= feature; } @@ -3818,30 +3615,25 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, - &vpic->dev_master); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, - &vpic->dev_slave); - kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, - &vpic->dev_eclr); + kvm_destroy_pic(vpic); mutex_unlock(&kvm->slots_lock); - kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; - smp_wmb(); - kvm->arch.vpic = vpic; - smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); - kvm_destroy_pic(kvm); + kvm_destroy_pic(vpic); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); + goto create_irqchip_unlock; } + /* Write kvm->irq_routing before kvm->arch.vpic. */ + smp_wmb(); + kvm->arch.vpic = vpic; create_irqchip_unlock: mutex_unlock(&kvm->lock); break; @@ -3968,6 +3760,15 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_vm_ioctl_reinject(kvm, &control); break; } + case KVM_SET_BOOT_CPU_ID: + r = 0; + mutex_lock(&kvm->lock); + if (atomic_read(&kvm->online_vcpus) != 0) + r = -EBUSY; + else + kvm->arch.bsp_vcpu_id = arg; + mutex_unlock(&kvm->lock); + break; case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, @@ -5883,66 +5684,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_emulate_halt); -int kvm_hv_hypercall(struct kvm_vcpu *vcpu) -{ - u64 param, ingpa, outgpa, ret; - uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; - bool fast, longmode; - - /* - * hypercall generates UD from non zero cpl and real mode - * per HYPER-V spec - */ - if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 0; - } - - longmode = is_64_bit_mode(vcpu); - - if (!longmode) { - param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); - ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); - outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | - (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); - } -#ifdef CONFIG_X86_64 - else { - param = kvm_register_read(vcpu, VCPU_REGS_RCX); - ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); - outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); - } -#endif - - code = param & 0xffff; - fast = (param >> 16) & 0x1; - rep_cnt = (param >> 32) & 0xfff; - rep_idx = (param >> 48) & 0xfff; - - trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); - - switch (code) { - case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: - kvm_vcpu_on_spin(vcpu); - break; - default: - res = HV_STATUS_INVALID_HYPERCALL_CODE; - break; - } - - ret = res | (((u64)rep_done & 0xfff) << 32); - if (longmode) { - kvm_register_write(vcpu, VCPU_REGS_RAX, ret); - } else { - kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); - kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); - } - - return 1; -} - /* * kvm_pv_kick_cpu_op: Kick a vcpu. * @@ -6328,6 +6069,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf) static void process_smi(struct kvm_vcpu *vcpu) { struct kvm_segment cs, ds; + struct desc_ptr dt; char buf[512]; u32 cr0; @@ -6360,6 +6102,10 @@ static void process_smi(struct kvm_vcpu *vcpu) kvm_x86_ops->set_cr4(vcpu, 0); + /* Undocumented: IDT limit is set to zero on entry to SMM. */ + dt.address = dt.size = 0; + kvm_x86_ops->set_idt(vcpu, &dt); + __kvm_set_dr(vcpu, 7, DR7_FIXED_1); cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; @@ -6514,6 +6260,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu_scan_ioapic(vcpu); if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) kvm_vcpu_reload_apic_access_page(vcpu); + if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; + r = 0; + goto out; + } } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { @@ -6623,7 +6375,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, - native_read_tsc()); + rdtsc()); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); @@ -7315,11 +7067,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu = kvm_x86_ops->vcpu_create(kvm, id); - /* - * Activate fpu unconditionally in case the guest needs eager FPU. It will be - * deactivated soon if it doesn't. - */ - kvm_x86_ops->fpu_activate(vcpu); return vcpu; } @@ -7437,7 +7184,7 @@ int kvm_arch_hardware_enable(void) if (ret != 0) return ret; - local_tsc = native_read_tsc(); + local_tsc = rdtsc(); stable = !check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { @@ -7541,6 +7288,17 @@ void kvm_arch_check_processor_compat(void *rtn) kvm_x86_ops->check_processor_compatibility(rtn); } +bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) +{ + return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; +} +EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); + +bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; +} + bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); @@ -8218,6 +7976,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) kvm_x86_ops->interrupt_allowed(vcpu); } +void kvm_arch_start_assignment(struct kvm *kvm) +{ + atomic_inc(&kvm->arch.assigned_device_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); + +void kvm_arch_end_assignment(struct kvm *kvm) +{ + atomic_dec(&kvm->arch.assigned_device_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); + +bool kvm_arch_has_assigned_device(struct kvm *kvm) +{ + return atomic_read(&kvm->arch.assigned_device_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); + void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { atomic_inc(&kvm->arch.noncoherent_dma_count); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index edc8cdcd786b..2f822cd886c2 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -147,6 +147,16 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu, return kvm_register_write(vcpu, reg, val); } +static inline u64 get_kernel_ns(void) +{ + return ktime_get_boot_ns(); +} + +static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) +{ + return !(kvm->arch.disabled_quirks & quirk); +} + void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index f2dc08c003eb..433e5a7dd37f 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -985,23 +985,11 @@ static int lguest_clockevent_set_next_event(unsigned long delta, return 0; } -static void lguest_clockevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - switch (mode) { - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - /* A 0 argument shuts the clock down. */ - hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* This is what we expect. */ - break; - case CLOCK_EVT_MODE_PERIODIC: - BUG(); - case CLOCK_EVT_MODE_RESUME: - break; - } +static int lguest_clockevent_shutdown(struct clock_event_device *evt) +{ + /* A 0 argument shuts the clock down. */ + hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0); + return 0; } /* This describes our primitive timer chip. */ @@ -1009,7 +997,7 @@ static struct clock_event_device lguest_clockevent = { .name = "lguest", .features = CLOCK_EVT_FEAT_ONESHOT, .set_next_event = lguest_clockevent_set_next_event, - .set_mode = lguest_clockevent_set_mode, + .set_state_shutdown = lguest_clockevent_shutdown, .rating = INT_MAX, .mult = 1, .shift = 0, diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 39d6a3db0b96..e912b2f6d36e 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -20,6 +20,7 @@ #include <asm/processor.h> #include <asm/delay.h> #include <asm/timer.h> +#include <asm/mwait.h> #ifdef CONFIG_SMP # include <asm/smp.h> @@ -49,16 +50,14 @@ static void delay_loop(unsigned long loops) /* TSC based delay: */ static void delay_tsc(unsigned long __loops) { - u32 bclock, now, loops = __loops; + u64 bclock, now, loops = __loops; int cpu; preempt_disable(); cpu = smp_processor_id(); - rdtsc_barrier(); - rdtscl(bclock); + bclock = rdtsc_ordered(); for (;;) { - rdtsc_barrier(); - rdtscl(now); + now = rdtsc_ordered(); if ((now - bclock) >= loops) break; @@ -79,14 +78,51 @@ static void delay_tsc(unsigned long __loops) if (unlikely(cpu != smp_processor_id())) { loops -= (now - bclock); cpu = smp_processor_id(); - rdtsc_barrier(); - rdtscl(bclock); + bclock = rdtsc_ordered(); } } preempt_enable(); } /* + * On some AMD platforms, MWAITX has a configurable 32-bit timer, that + * counts with TSC frequency. The input value is the loop of the + * counter, it will exit when the timer expires. + */ +static void delay_mwaitx(unsigned long __loops) +{ + u64 start, end, delay, loops = __loops; + + start = rdtsc_ordered(); + + for (;;) { + delay = min_t(u64, MWAITX_MAX_LOOPS, loops); + + /* + * Use cpu_tss as a cacheline-aligned, seldomly + * accessed per-cpu variable as the monitor target. + */ + __monitorx(this_cpu_ptr(&cpu_tss), 0, 0); + + /* + * AMD, like Intel, supports the EAX hint and EAX=0xf + * means, do not enter any deep C-state and we use it + * here in delay() to minimize wakeup latency. + */ + __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE); + + end = rdtsc_ordered(); + + if (loops <= end - start) + break; + + loops -= end - start; + + start = end; + } +} + +/* * Since we calibrate only once at boot, this * function should be set once at boot and not changed */ @@ -94,13 +130,19 @@ static void (*delay_fn)(unsigned long) = delay_loop; void use_tsc_delay(void) { - delay_fn = delay_tsc; + if (delay_fn == delay_loop) + delay_fn = delay_tsc; +} + +void use_mwaitx_delay(void) +{ + delay_fn = delay_mwaitx; } int read_current_timer(unsigned long *timer_val) { if (delay_fn == delay_tsc) { - rdtscll(*timer_val); + *timer_val = rdtsc(); return 0; } return -1; diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index f37e84ab49f3..3d8f2e421466 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c @@ -29,7 +29,6 @@ #include <asm/uaccess.h> #include <asm/traps.h> -#include <asm/desc.h> #include <asm/user.h> #include <asm/fpu/internal.h> @@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info) math_abort(FPU_info, SIGILL); } - code_descriptor = LDT_DESCRIPTOR(FPU_CS); + code_descriptor = FPU_get_ldt_descriptor(FPU_CS); if (SEG_D_SIZE(code_descriptor)) { /* The above test may be wrong, the book is not clear */ /* Segmented 32 bit protected mode */ diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h index 9ccecb61a4fa..5e044d506b7a 100644 --- a/arch/x86/math-emu/fpu_system.h +++ b/arch/x86/math-emu/fpu_system.h @@ -16,9 +16,24 @@ #include <linux/kernel.h> #include <linux/mm.h> -/* s is always from a cpu register, and the cpu does bounds checking - * during register load --> no further bounds checks needed */ -#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) +#include <asm/desc.h> +#include <asm/mmu_context.h> + +static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg) +{ + static struct desc_struct zero_desc; + struct desc_struct ret = zero_desc; + +#ifdef CONFIG_MODIFY_LDT_SYSCALL + seg >>= 3; + mutex_lock(¤t->mm->context.lock); + if (current->mm->context.ldt && seg < current->mm->context.ldt->size) + ret = current->mm->context.ldt->entries[seg]; + mutex_unlock(¤t->mm->context.lock); +#endif + return ret; +} + #define SEG_D_SIZE(x) ((x).b & (3 << 21)) #define SEG_G_BIT(x) ((x).b & (1 << 23)) #define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c index 6ef5e99380f9..8db26591d91a 100644 --- a/arch/x86/math-emu/get_address.c +++ b/arch/x86/math-emu/get_address.c @@ -20,7 +20,7 @@ #include <linux/stddef.h> #include <asm/uaccess.h> -#include <asm/desc.h> +#include <asm/vm86.h> #include "fpu_system.h" #include "exception.h" @@ -158,7 +158,7 @@ static long pm_address(u_char FPU_modrm, u_char segment, addr->selector = PM_REG_(segment); } - descriptor = LDT_DESCRIPTOR(PM_REG_(segment)); + descriptor = FPU_get_ldt_descriptor(addr->selector); base_address = SEG_BASE_ADDR(descriptor); address = base_address + offset; limit = base_address diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9dc909841739..eef44d9a3f77 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -20,6 +20,7 @@ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ #include <asm/fixmap.h> /* VSYSCALL_ADDR */ #include <asm/vsyscall.h> /* emulate_vsyscall */ +#include <asm/vm86.h> /* struct vm86 */ #define CREATE_TRACE_POINTS #include <asm/trace/exceptions.h> @@ -301,14 +302,16 @@ static inline void check_v8086_mode(struct pt_regs *regs, unsigned long address, struct task_struct *tsk) { +#ifdef CONFIG_VM86 unsigned long bit; - if (!v8086_mode(regs)) + if (!v8086_mode(regs) || !tsk->thread.vm86) return; bit = (address - 0xA0000) >> PAGE_SHIFT; if (bit < 32) - tsk->thread.screen_bitmap |= 1 << bit; + tsk->thread.vm86->screen_bitmap |= 1 << bit; +#endif } static bool low_pfn(unsigned long pfn) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 8533b46e6bee..1d8a83df153a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -30,8 +30,11 @@ /* * Tables translating between page_cache_type_t and pte encoding. * - * Minimal supported modes are defined statically, they are modified - * during bootup if more supported cache modes are available. + * The default values are defined statically as minimal supported mode; + * WC and WT fall back to UC-. pat_init() updates these values to support + * more cache modes, WC and WT, when it is safe to do so. See pat_init() + * for the details. Note, __early_ioremap() used during early boot-time + * takes pgprot_t (pte encoding) and does not use these tables. * * Index into __cachemode2pte_tbl[] is the cachemode. * diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8340e45c891a..68aec42545c2 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end) vaddr = start; pgd_idx = pgd_index(vaddr); + pmd_idx = pmd_index(vaddr); for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index cc5ccc415cc0..b9c78f3bcd67 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, !PageReserved(pfn_to_page(start_pfn + i))) return 1; - WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); - return 0; } @@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, pgprot_t prot; int retval; void __iomem *ret_addr; - int ram_region; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Don't allow anybody to remap normal RAM that we're using.. */ - /* First check if whole region can be identified as RAM or not */ - ram_region = region_is_ram(phys_addr, size); - if (ram_region > 0) { - WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", - (unsigned long int)phys_addr, - (unsigned long int)last_addr); + pfn = phys_addr >> PAGE_SHIFT; + last_pfn = last_addr >> PAGE_SHIFT; + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); return NULL; } - /* If could not be identified(-1), check page by page */ - if (ram_region < 0) { - pfn = phys_addr >> PAGE_SHIFT; - last_pfn = last_addr >> PAGE_SHIFT; - if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, - __ioremap_check_ram) == 1) - return NULL; - } /* * Mappings have to be page-aligned */ diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e1840f3db5b5..9ce5da27b136 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -12,20 +12,6 @@ extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern struct range pfn_mapped[E820_X_MAX]; -static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; -static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; -static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; - -/* - * This page used as early shadow. We don't use empty_zero_page - * at early stages, stack instrumentation could write some garbage - * to this page. - * Latter we reuse it as zero shadow for large ranges of memory - * that allowed to access, but not instrumented by kasan - * (vmalloc/vmemmap ...). - */ -static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; - static int __init map_range(struct range *range) { unsigned long start; @@ -62,106 +48,6 @@ static void __init kasan_map_early_shadow(pgd_t *pgd) } } -static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr, - unsigned long end) -{ - pte_t *pte = pte_offset_kernel(pmd, addr); - - while (addr + PAGE_SIZE <= end) { - WARN_ON(!pte_none(*pte)); - set_pte(pte, __pte(__pa_nodebug(kasan_zero_page) - | __PAGE_KERNEL_RO)); - addr += PAGE_SIZE; - pte = pte_offset_kernel(pmd, addr); - } - return 0; -} - -static int __init zero_pmd_populate(pud_t *pud, unsigned long addr, - unsigned long end) -{ - int ret = 0; - pmd_t *pmd = pmd_offset(pud, addr); - - while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { - WARN_ON(!pmd_none(*pmd)); - set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) - | _KERNPG_TABLE)); - addr += PMD_SIZE; - pmd = pmd_offset(pud, addr); - } - if (addr < end) { - if (pmd_none(*pmd)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pte_populate(pmd, addr, end); - } - return ret; -} - - -static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr, - unsigned long end) -{ - int ret = 0; - pud_t *pud = pud_offset(pgd, addr); - - while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { - WARN_ON(!pud_none(*pud)); - set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) - | _KERNPG_TABLE)); - addr += PUD_SIZE; - pud = pud_offset(pgd, addr); - } - - if (addr < end) { - if (pud_none(*pud)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pmd_populate(pud, addr, end); - } - return ret; -} - -static int __init zero_pgd_populate(unsigned long addr, unsigned long end) -{ - int ret = 0; - pgd_t *pgd = pgd_offset_k(addr); - - while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { - WARN_ON(!pgd_none(*pgd)); - set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) - | _KERNPG_TABLE)); - addr += PGDIR_SIZE; - pgd = pgd_offset_k(addr); - } - - if (addr < end) { - if (pgd_none(*pgd)) { - void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); - if (!p) - return -ENOMEM; - set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE)); - } - ret = zero_pud_populate(pgd, addr, end); - } - return ret; -} - - -static void __init populate_zero_shadow(const void *start, const void *end) -{ - if (zero_pgd_populate((unsigned long)start, (unsigned long)end)) - panic("kasan: unable to map zero shadow!"); -} - - #ifdef CONFIG_KASAN_INLINE static int kasan_die_handler(struct notifier_block *self, unsigned long val, @@ -213,7 +99,7 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); - populate_zero_shadow((void *)KASAN_SHADOW_START, + kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, kasan_mem_to_shadow((void *)PAGE_OFFSET)); for (i = 0; i < E820_X_MAX; i++) { @@ -223,14 +109,15 @@ void __init kasan_init(void) if (map_range(&pfn_mapped[i])) panic("kasan: unable to allocate shadow!"); } - populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), - kasan_mem_to_shadow((void *)__START_KERNEL_map)); + kasan_populate_zero_shadow( + kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), + kasan_mem_to_shadow((void *)__START_KERNEL_map)); vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), (unsigned long)kasan_mem_to_shadow(_end), NUMA_NO_NODE); - populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), + kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), (void *)KASAN_SHADOW_END); memset(kasan_zero_page, 0, PAGE_SIZE); diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 9d518d693b4b..844b06d67df4 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_MPX) + return "[mpx]"; + return NULL; +} diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 7a657f58bbea..db1b0bc5017c 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -20,20 +20,6 @@ #define CREATE_TRACE_POINTS #include <asm/trace/mpx.h> -static const char *mpx_mapping_name(struct vm_area_struct *vma) -{ - return "[mpx]"; -} - -static struct vm_operations_struct mpx_vma_ops = { - .name = mpx_mapping_name, -}; - -static int is_mpx_vma(struct vm_area_struct *vma) -{ - return (vma->vm_ops == &mpx_vma_ops); -} - static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) { if (is_64bit_mm(mm)) @@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) /* * This is really a simplified "vm_mmap". it only handles MPX * bounds tables (the bounds directory is user-allocated). - * - * Later on, we use the vma->vm_ops to uniquely identify these - * VMAs. */ static unsigned long mpx_mmap(unsigned long len) { @@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len) ret = -ENOMEM; goto out; } - vma->vm_ops = &mpx_vma_ops; if (vm_flags & VM_LOCKED) { up_write(&mm->mmap_sem); @@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm, * so stop immediately and return an error. This * probably results in a SIGSEGV. */ - if (!is_mpx_vma(vma)) + if (!(vma->vm_flags & VM_MPX)) return -EINVAL; len = min(vma->vm_end, end) - addr; @@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm, * lots of tables even though we have no actual table * entries in use. */ - while (next && is_mpx_vma(next)) + while (next && (next->vm_flags & VM_MPX)) next = next->vm_next; - while (prev && is_mpx_vma(prev)) + while (prev && (prev->vm_flags & VM_MPX)) prev = prev->vm_prev; /* * We know 'start' and 'end' lie within an area controlled diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index 8ff686aa7e8c..5f169d5d76a8 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -8,6 +8,7 @@ #include <linux/kthread.h> #include <linux/random.h> #include <linux/kernel.h> +#include <linux/init.h> #include <linux/mm.h> #include <linux/vmalloc.h> @@ -256,5 +257,4 @@ static int start_pageattr_test(void) return 0; } - -module_init(start_pageattr_test); +device_initcall(start_pageattr_test); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 727158cb3b3c..2c44c0792301 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -4,7 +4,6 @@ */ #include <linux/highmem.h> #include <linux/bootmem.h> -#include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/interrupt.h> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3250f2371aea..90b924acd982 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -117,7 +117,7 @@ static void flush_tlb_func(void *info) } else { unsigned long addr; unsigned long nr_pages = - f->flush_end - f->flush_start / PAGE_SIZE; + (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 579a8fd74be0..be2e7a2b10d7 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog) EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ offsetof(struct bpf_array, map.max_entries)); EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ -#define OFFSET1 44 /* number of bytes to jump */ +#define OFFSET1 47 /* number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ label1 = cnt; @@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog) */ EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 33 +#define OFFSET2 36 EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ /* prog = array->prog[index]; */ - EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */ - EMIT1(offsetof(struct bpf_array, prog)); + EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */ + offsetof(struct bpf_array, prog)); EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ /* if (prog == NULL) diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 8fd6f44aee83..09d3afc0a181 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -166,7 +166,6 @@ void pcibios_fixup_bus(struct pci_bus *b) { struct pci_dev *dev; - pci_read_bridge_bases(b); list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); } @@ -673,24 +672,22 @@ int pcibios_add_device(struct pci_dev *dev) return 0; } -int pcibios_enable_device(struct pci_dev *dev, int mask) +int pcibios_alloc_irq(struct pci_dev *dev) { - int err; - - if ((err = pci_enable_resources(dev, mask)) < 0) - return err; - - if (!pci_dev_msi_enabled(dev)) - return pcibios_enable_irq(dev); - return 0; + return pcibios_enable_irq(dev); } -void pcibios_disable_device (struct pci_dev *dev) +void pcibios_free_irq(struct pci_dev *dev) { - if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) + if (pcibios_disable_irq) pcibios_disable_irq(dev); } +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + return pci_enable_resources(dev, mask); +} + int pci_ext_cfg_avail(void) { if (raw_pci_ext_ops) diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 9a2b7101ae8a..e58565556703 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -62,19 +62,6 @@ static void pci_fixup_umc_ide(struct pci_dev *d) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); -static void pci_fixup_ncr53c810(struct pci_dev *d) -{ - /* - * NCR 53C810 returns class code 0 (at least on some systems). - * Fix class to be PCI_CLASS_STORAGE_SCSI - */ - if (!d->class) { - dev_warn(&d->dev, "Fixing NCR 53C810 class code\n"); - d->class = PCI_CLASS_STORAGE_SCSI << 8; - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810); - static void pci_fixup_latency(struct pci_dev *d) { /* diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 27062303c881..0d24e7c10145 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -35,6 +35,9 @@ #define PCIE_CAP_OFFSET 0x100 +/* Quirks for the listed devices */ +#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 + /* Fixed BAR fields */ #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */ #define PCI_FIXED_BAR_0_SIZE 0x04 @@ -210,22 +213,41 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) { struct irq_alloc_info info; int polarity; + int ret; - if (dev->irq_managed && dev->irq > 0) + if (pci_has_managed_irq(dev)) return 0; - if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) - polarity = 0; /* active high */ - else - polarity = 1; /* active low */ + switch (intel_mid_identify_cpu()) { + case INTEL_MID_CPU_CHIP_TANGIER: + polarity = IOAPIC_POL_HIGH; + + /* Special treatment for IRQ0 */ + if (dev->irq == 0) { + /* + * TNG has IRQ0 assigned to eMMC controller. But there + * are also other devices with bogus PCI configuration + * that have IRQ0 assigned. This check ensures that + * eMMC gets it. + */ + if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC) + return -EBUSY; + } + break; + default: + polarity = IOAPIC_POL_LOW; + break; + } + ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity); /* * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to * IOAPIC RTE entries, so we just enable RTE for the device. */ - if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info) < 0) - return -EBUSY; + ret = mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info); + if (ret < 0) + return ret; dev->irq_managed = 1; @@ -234,14 +256,17 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) static void intel_mid_pci_irq_disable(struct pci_dev *dev) { - if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && - dev->irq > 0) { + if (pci_has_managed_irq(dev)) { mp_unmap_irq(dev->irq); dev->irq_managed = 0; + /* + * Don't reset dev->irq here, otherwise + * intel_mid_pci_irq_enable() will fail on next call. + */ } } -struct pci_ops intel_mid_pci_ops = { +static struct pci_ops intel_mid_pci_ops = { .read = pci_read, .write = pci_write, }; diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 9bd115484745..32e70343e6fd 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev) struct pci_dev *temp_dev; int irq; - if (dev->irq_managed && dev->irq > 0) + if (pci_has_managed_irq(dev)) return 0; irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, @@ -1230,8 +1230,7 @@ static int pirq_enable_irq(struct pci_dev *dev) } dev = temp_dev; if (irq >= 0) { - dev->irq_managed = 1; - dev->irq = irq; + pci_set_managed_irq(dev, irq); dev_info(&dev->dev, "PCI->APIC IRQ transform: " "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); return 0; @@ -1257,24 +1256,10 @@ static int pirq_enable_irq(struct pci_dev *dev) return 0; } -bool mp_should_keep_irq(struct device *dev) -{ - if (dev->power.is_prepared) - return true; -#ifdef CONFIG_PM - if (dev->power.runtime_status == RPM_SUSPENDING) - return true; -#endif - - return false; -} - static void pirq_disable_irq(struct pci_dev *dev) { - if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && - dev->irq_managed && dev->irq) { + if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) { mp_unmap_irq(dev->irq); - dev->irq = 0; - dev->irq_managed = 0; + pci_reset_managed_irq(dev); } } diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile index f1a6c8e86ddd..184842ef332e 100644 --- a/arch/x86/platform/Makefile +++ b/arch/x86/platform/Makefile @@ -5,6 +5,7 @@ obj-y += efi/ obj-y += geode/ obj-y += goldfish/ obj-y += iris/ +obj-y += intel/ obj-y += intel-mid/ obj-y += intel-quark/ obj-y += olpc/ diff --git a/arch/x86/platform/atom/Makefile b/arch/x86/platform/atom/Makefile index 0a3a40cbc794..40983f5b0858 100644 --- a/arch/x86/platform/atom/Makefile +++ b/arch/x86/platform/atom/Makefile @@ -1 +1,2 @@ -obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o +obj-$(CONFIG_PMC_ATOM) += pmc_atom.o +obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o diff --git a/arch/x86/kernel/pmc_atom.c b/arch/x86/platform/atom/pmc_atom.c index d66a4fe6caee..964ff4fc61f9 100644 --- a/arch/x86/kernel/pmc_atom.c +++ b/arch/x86/platform/atom/pmc_atom.c @@ -15,7 +15,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/device.h> @@ -25,80 +24,149 @@ #include <asm/pmc_atom.h> +struct pmc_bit_map { + const char *name; + u32 bit_mask; +}; + +struct pmc_reg_map { + const struct pmc_bit_map *d3_sts_0; + const struct pmc_bit_map *d3_sts_1; + const struct pmc_bit_map *func_dis; + const struct pmc_bit_map *func_dis_2; + const struct pmc_bit_map *pss; +}; + struct pmc_dev { u32 base_addr; void __iomem *regmap; + const struct pmc_reg_map *map; #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_dir; #endif /* CONFIG_DEBUG_FS */ + bool init; }; static struct pmc_dev pmc_device; static u32 acpi_base_addr; -struct pmc_bit_map { - const char *name; - u32 bit_mask; +static const struct pmc_bit_map d3_sts_0_map[] = { + {"LPSS1_F0_DMA", BIT_LPSS1_F0_DMA}, + {"LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1}, + {"LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2}, + {"LPSS1_F3_HSUART1", BIT_LPSS1_F3_HSUART1}, + {"LPSS1_F4_HSUART2", BIT_LPSS1_F4_HSUART2}, + {"LPSS1_F5_SPI", BIT_LPSS1_F5_SPI}, + {"LPSS1_F6_Reserved", BIT_LPSS1_F6_XXX}, + {"LPSS1_F7_Reserved", BIT_LPSS1_F7_XXX}, + {"SCC_EMMC", BIT_SCC_EMMC}, + {"SCC_SDIO", BIT_SCC_SDIO}, + {"SCC_SDCARD", BIT_SCC_SDCARD}, + {"SCC_MIPI", BIT_SCC_MIPI}, + {"HDA", BIT_HDA}, + {"LPE", BIT_LPE}, + {"OTG", BIT_OTG}, + {"USH", BIT_USH}, + {"GBE", BIT_GBE}, + {"SATA", BIT_SATA}, + {"USB_EHCI", BIT_USB_EHCI}, + {"SEC", BIT_SEC}, + {"PCIE_PORT0", BIT_PCIE_PORT0}, + {"PCIE_PORT1", BIT_PCIE_PORT1}, + {"PCIE_PORT2", BIT_PCIE_PORT2}, + {"PCIE_PORT3", BIT_PCIE_PORT3}, + {"LPSS2_F0_DMA", BIT_LPSS2_F0_DMA}, + {"LPSS2_F1_I2C1", BIT_LPSS2_F1_I2C1}, + {"LPSS2_F2_I2C2", BIT_LPSS2_F2_I2C2}, + {"LPSS2_F3_I2C3", BIT_LPSS2_F3_I2C3}, + {"LPSS2_F3_I2C4", BIT_LPSS2_F4_I2C4}, + {"LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5}, + {"LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6}, + {"LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7}, + {}, +}; + +static struct pmc_bit_map byt_d3_sts_1_map[] = { + {"SMB", BIT_SMB}, + {"OTG_SS_PHY", BIT_OTG_SS_PHY}, + {"USH_SS_PHY", BIT_USH_SS_PHY}, + {"DFX", BIT_DFX}, + {}, }; -static const struct pmc_bit_map dev_map[] = { - {"0 - LPSS1_F0_DMA", BIT_LPSS1_F0_DMA}, - {"1 - LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1}, - {"2 - LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2}, - {"3 - LPSS1_F3_HSUART1", BIT_LPSS1_F3_HSUART1}, - {"4 - LPSS1_F4_HSUART2", BIT_LPSS1_F4_HSUART2}, - {"5 - LPSS1_F5_SPI", BIT_LPSS1_F5_SPI}, - {"6 - LPSS1_F6_Reserved", BIT_LPSS1_F6_XXX}, - {"7 - LPSS1_F7_Reserved", BIT_LPSS1_F7_XXX}, - {"8 - SCC_EMMC", BIT_SCC_EMMC}, - {"9 - SCC_SDIO", BIT_SCC_SDIO}, - {"10 - SCC_SDCARD", BIT_SCC_SDCARD}, - {"11 - SCC_MIPI", BIT_SCC_MIPI}, - {"12 - HDA", BIT_HDA}, - {"13 - LPE", BIT_LPE}, - {"14 - OTG", BIT_OTG}, - {"15 - USH", BIT_USH}, - {"16 - GBE", BIT_GBE}, - {"17 - SATA", BIT_SATA}, - {"18 - USB_EHCI", BIT_USB_EHCI}, - {"19 - SEC", BIT_SEC}, - {"20 - PCIE_PORT0", BIT_PCIE_PORT0}, - {"21 - PCIE_PORT1", BIT_PCIE_PORT1}, - {"22 - PCIE_PORT2", BIT_PCIE_PORT2}, - {"23 - PCIE_PORT3", BIT_PCIE_PORT3}, - {"24 - LPSS2_F0_DMA", BIT_LPSS2_F0_DMA}, - {"25 - LPSS2_F1_I2C1", BIT_LPSS2_F1_I2C1}, - {"26 - LPSS2_F2_I2C2", BIT_LPSS2_F2_I2C2}, - {"27 - LPSS2_F3_I2C3", BIT_LPSS2_F3_I2C3}, - {"28 - LPSS2_F3_I2C4", BIT_LPSS2_F4_I2C4}, - {"29 - LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5}, - {"30 - LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6}, - {"31 - LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7}, - {"32 - SMB", BIT_SMB}, - {"33 - OTG_SS_PHY", BIT_OTG_SS_PHY}, - {"34 - USH_SS_PHY", BIT_USH_SS_PHY}, - {"35 - DFX", BIT_DFX}, +static struct pmc_bit_map cht_d3_sts_1_map[] = { + {"SMB", BIT_SMB}, + {"GMM", BIT_STS_GMM}, + {"ISH", BIT_STS_ISH}, + {}, }; -static const struct pmc_bit_map pss_map[] = { - {"0 - GBE", PMC_PSS_BIT_GBE}, - {"1 - SATA", PMC_PSS_BIT_SATA}, - {"2 - HDA", PMC_PSS_BIT_HDA}, - {"3 - SEC", PMC_PSS_BIT_SEC}, - {"4 - PCIE", PMC_PSS_BIT_PCIE}, - {"5 - LPSS", PMC_PSS_BIT_LPSS}, - {"6 - LPE", PMC_PSS_BIT_LPE}, - {"7 - DFX", PMC_PSS_BIT_DFX}, - {"8 - USH_CTRL", PMC_PSS_BIT_USH_CTRL}, - {"9 - USH_SUS", PMC_PSS_BIT_USH_SUS}, - {"10 - USH_VCCS", PMC_PSS_BIT_USH_VCCS}, - {"11 - USH_VCCA", PMC_PSS_BIT_USH_VCCA}, - {"12 - OTG_CTRL", PMC_PSS_BIT_OTG_CTRL}, - {"13 - OTG_VCCS", PMC_PSS_BIT_OTG_VCCS}, - {"14 - OTG_VCCA_CLK", PMC_PSS_BIT_OTG_VCCA_CLK}, - {"15 - OTG_VCCA", PMC_PSS_BIT_OTG_VCCA}, - {"16 - USB", PMC_PSS_BIT_USB}, - {"17 - USB_SUS", PMC_PSS_BIT_USB_SUS}, +static struct pmc_bit_map cht_func_dis_2_map[] = { + {"SMB", BIT_SMB}, + {"GMM", BIT_FD_GMM}, + {"ISH", BIT_FD_ISH}, + {}, +}; + +static const struct pmc_bit_map byt_pss_map[] = { + {"GBE", PMC_PSS_BIT_GBE}, + {"SATA", PMC_PSS_BIT_SATA}, + {"HDA", PMC_PSS_BIT_HDA}, + {"SEC", PMC_PSS_BIT_SEC}, + {"PCIE", PMC_PSS_BIT_PCIE}, + {"LPSS", PMC_PSS_BIT_LPSS}, + {"LPE", PMC_PSS_BIT_LPE}, + {"DFX", PMC_PSS_BIT_DFX}, + {"USH_CTRL", PMC_PSS_BIT_USH_CTRL}, + {"USH_SUS", PMC_PSS_BIT_USH_SUS}, + {"USH_VCCS", PMC_PSS_BIT_USH_VCCS}, + {"USH_VCCA", PMC_PSS_BIT_USH_VCCA}, + {"OTG_CTRL", PMC_PSS_BIT_OTG_CTRL}, + {"OTG_VCCS", PMC_PSS_BIT_OTG_VCCS}, + {"OTG_VCCA_CLK", PMC_PSS_BIT_OTG_VCCA_CLK}, + {"OTG_VCCA", PMC_PSS_BIT_OTG_VCCA}, + {"USB", PMC_PSS_BIT_USB}, + {"USB_SUS", PMC_PSS_BIT_USB_SUS}, + {}, +}; + +static const struct pmc_bit_map cht_pss_map[] = { + {"SATA", PMC_PSS_BIT_SATA}, + {"HDA", PMC_PSS_BIT_HDA}, + {"SEC", PMC_PSS_BIT_SEC}, + {"PCIE", PMC_PSS_BIT_PCIE}, + {"LPSS", PMC_PSS_BIT_LPSS}, + {"LPE", PMC_PSS_BIT_LPE}, + {"UFS", PMC_PSS_BIT_CHT_UFS}, + {"UXD", PMC_PSS_BIT_CHT_UXD}, + {"UXD_FD", PMC_PSS_BIT_CHT_UXD_FD}, + {"UX_ENG", PMC_PSS_BIT_CHT_UX_ENG}, + {"USB_SUS", PMC_PSS_BIT_CHT_USB_SUS}, + {"GMM", PMC_PSS_BIT_CHT_GMM}, + {"ISH", PMC_PSS_BIT_CHT_ISH}, + {"DFX_MASTER", PMC_PSS_BIT_CHT_DFX_MASTER}, + {"DFX_CLUSTER1", PMC_PSS_BIT_CHT_DFX_CLUSTER1}, + {"DFX_CLUSTER2", PMC_PSS_BIT_CHT_DFX_CLUSTER2}, + {"DFX_CLUSTER3", PMC_PSS_BIT_CHT_DFX_CLUSTER3}, + {"DFX_CLUSTER4", PMC_PSS_BIT_CHT_DFX_CLUSTER4}, + {"DFX_CLUSTER5", PMC_PSS_BIT_CHT_DFX_CLUSTER5}, + {}, +}; + +static const struct pmc_reg_map byt_reg_map = { + .d3_sts_0 = d3_sts_0_map, + .d3_sts_1 = byt_d3_sts_1_map, + .func_dis = d3_sts_0_map, + .func_dis_2 = byt_d3_sts_1_map, + .pss = byt_pss_map, +}; + +static const struct pmc_reg_map cht_reg_map = { + .d3_sts_0 = d3_sts_0_map, + .d3_sts_1 = cht_d3_sts_1_map, + .func_dis = d3_sts_0_map, + .func_dis_2 = cht_func_dis_2_map, + .pss = cht_pss_map, }; static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset) @@ -111,6 +179,30 @@ static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val) writel(val, pmc->regmap + reg_offset); } +int pmc_atom_read(int offset, u32 *value) +{ + struct pmc_dev *pmc = &pmc_device; + + if (!pmc->init) + return -ENODEV; + + *value = pmc_reg_read(pmc, offset); + return 0; +} +EXPORT_SYMBOL_GPL(pmc_atom_read); + +int pmc_atom_write(int offset, u32 value) +{ + struct pmc_dev *pmc = &pmc_device; + + if (!pmc->init) + return -ENODEV; + + pmc_reg_write(pmc, offset, value); + return 0; +} +EXPORT_SYMBOL_GPL(pmc_atom_write); + static void pmc_power_off(void) { u16 pm1_cnt_port; @@ -142,37 +234,39 @@ static void pmc_hw_reg_setup(struct pmc_dev *pmc) } #ifdef CONFIG_DEBUG_FS +static void pmc_dev_state_print(struct seq_file *s, int reg_index, + u32 sts, const struct pmc_bit_map *sts_map, + u32 fd, const struct pmc_bit_map *fd_map) +{ + int offset = PMC_REG_BIT_WIDTH * reg_index; + int index; + + for (index = 0; sts_map[index].name; index++) { + seq_printf(s, "Dev: %-2d - %-32s\tState: %s [%s]\n", + offset + index, sts_map[index].name, + fd_map[index].bit_mask & fd ? "Disabled" : "Enabled ", + sts_map[index].bit_mask & sts ? "D3" : "D0"); + } +} + static int pmc_dev_state_show(struct seq_file *s, void *unused) { struct pmc_dev *pmc = s->private; - u32 func_dis, func_dis_2, func_dis_index; - u32 d3_sts_0, d3_sts_1, d3_sts_index; - int dev_num, dev_index, reg_index; + const struct pmc_reg_map *m = pmc->map; + u32 func_dis, func_dis_2; + u32 d3_sts_0, d3_sts_1; func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS); func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2); d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0); d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1); - dev_num = ARRAY_SIZE(dev_map); - - for (dev_index = 0; dev_index < dev_num; dev_index++) { - reg_index = dev_index / PMC_REG_BIT_WIDTH; - if (reg_index) { - func_dis_index = func_dis_2; - d3_sts_index = d3_sts_1; - } else { - func_dis_index = func_dis; - d3_sts_index = d3_sts_0; - } - - seq_printf(s, "Dev: %-32s\tState: %s [%s]\n", - dev_map[dev_index].name, - dev_map[dev_index].bit_mask & func_dis_index ? - "Disabled" : "Enabled ", - dev_map[dev_index].bit_mask & d3_sts_index ? - "D3" : "D0"); - } + /* Low part */ + pmc_dev_state_print(s, 0, d3_sts_0, m->d3_sts_0, func_dis, m->func_dis); + + /* High part */ + pmc_dev_state_print(s, 1, d3_sts_1, m->d3_sts_1, func_dis_2, m->func_dis_2); + return 0; } @@ -191,13 +285,14 @@ static const struct file_operations pmc_dev_state_ops = { static int pmc_pss_state_show(struct seq_file *s, void *unused) { struct pmc_dev *pmc = s->private; + const struct pmc_bit_map *map = pmc->map->pss; u32 pss = pmc_reg_read(pmc, PMC_PSS); - int pss_index; + int index; - for (pss_index = 0; pss_index < ARRAY_SIZE(pss_map); pss_index++) { - seq_printf(s, "Island: %-32s\tState: %s\n", - pss_map[pss_index].name, - pss_map[pss_index].bit_mask & pss ? "Off" : "On"); + for (index = 0; map[index].name; index++) { + seq_printf(s, "Island: %-2d - %-32s\tState: %s\n", + index, map[index].name, + map[index].bit_mask & pss ? "Off" : "On"); } return 0; } @@ -250,7 +345,7 @@ static void pmc_dbgfs_unregister(struct pmc_dev *pmc) debugfs_remove_recursive(pmc->dbgfs_dir); } -static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev) +static int pmc_dbgfs_register(struct pmc_dev *pmc) { struct dentry *dir, *f; @@ -262,24 +357,18 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev) f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO, dir, pmc, &pmc_dev_state_ops); - if (!f) { - dev_err(&pdev->dev, "dev_state register failed\n"); + if (!f) goto err; - } f = debugfs_create_file("pss_state", S_IFREG | S_IRUGO, dir, pmc, &pmc_pss_state_ops); - if (!f) { - dev_err(&pdev->dev, "pss_state register failed\n"); + if (!f) goto err; - } f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO, dir, pmc, &pmc_sleep_tmr_ops); - if (!f) { - dev_err(&pdev->dev, "sleep_state register failed\n"); + if (!f) goto err; - } return 0; err: @@ -287,15 +376,16 @@ err: return -ENODEV; } #else -static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev) +static int pmc_dbgfs_register(struct pmc_dev *pmc) { return 0; } #endif /* CONFIG_DEBUG_FS */ -static int pmc_setup_dev(struct pci_dev *pdev) +static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pmc_dev *pmc = &pmc_device; + const struct pmc_reg_map *map = (struct pmc_reg_map *)ent->driver_data; int ret; /* Obtain ACPI base address */ @@ -315,32 +405,30 @@ static int pmc_setup_dev(struct pci_dev *pdev) return -ENOMEM; } + pmc->map = map; + /* PMC hardware registers setup */ pmc_hw_reg_setup(pmc); - ret = pmc_dbgfs_register(pmc, pdev); - if (ret) { - iounmap(pmc->regmap); - } + ret = pmc_dbgfs_register(pmc); + if (ret) + dev_warn(&pdev->dev, "debugfs register failed\n"); + pmc->init = true; return ret; } /* * Data for PCI driver interface * - * This data only exists for exporting the supported - * PCI ids via MODULE_DEVICE_TABLE. We do not actually - * register a pci_driver, because lpc_ich will register - * a driver on the same PCI id. + * used by pci_match_id() call below. */ static const struct pci_device_id pmc_pci_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_VLV_PMC) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_reg_map }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_reg_map }, { 0, }, }; -MODULE_DEVICE_TABLE(pci, pmc_pci_ids); - static int __init pmc_atom_init(void) { struct pci_dev *pdev = NULL; @@ -357,15 +445,16 @@ static int __init pmc_atom_init(void) for_each_pci_dev(pdev) { ent = pci_match_id(pmc_pci_ids, pdev); if (ent) - return pmc_setup_dev(pdev); + return pmc_setup_dev(pdev, ent); } /* Device not found. */ return -ENODEV; } -module_init(pmc_atom_init); -/* no module_exit, this driver shouldn't be unloaded */ +device_initcall(pmc_atom_init); +/* MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>"); MODULE_DESCRIPTION("Intel Atom SOC Power Management Controller Interface"); MODULE_LICENSE("GPL v2"); +*/ diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index cfba30f27392..e4308fe6afe8 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr) static int __init arch_parse_efi_cmdline(char *str) { + if (!str) { + pr_warn("need at least one option\n"); + return -EINVAL; + } + if (parse_option_str(str, "old_map")) set_bit(EFI_OLD_MEMMAP, &efi.flags); if (parse_option_str(str, "debug")) diff --git a/arch/x86/platform/intel/Makefile b/arch/x86/platform/intel/Makefile new file mode 100644 index 000000000000..b878032fbc82 --- /dev/null +++ b/arch/x86/platform/intel/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c index 82f8d02f0df2..edf2c54bf131 100644 --- a/arch/x86/kernel/iosf_mbi.c +++ b/arch/x86/platform/intel/iosf_mbi.c @@ -30,7 +30,9 @@ #define PCI_DEVICE_ID_BAYTRAIL 0x0F00 #define PCI_DEVICE_ID_BRASWELL 0x2280 #define PCI_DEVICE_ID_QUARK_X1000 0x0958 +#define PCI_DEVICE_ID_TANGIER 0x1170 +static struct pci_dev *mbi_pdev; static DEFINE_SPINLOCK(iosf_mbi_lock); static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset) @@ -38,8 +40,6 @@ static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset) return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE; } -static struct pci_dev *mbi_pdev; /* one mbi device */ - static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr) { int result; @@ -104,7 +104,7 @@ int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) unsigned long flags; int ret; - /*Access to the GFX unit is handled by GPU code */ + /* Access to the GFX unit is handled by GPU code */ if (port == BT_MBI_UNIT_GFX) { WARN_ON(1); return -EPERM; @@ -127,7 +127,7 @@ int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr) unsigned long flags; int ret; - /*Access to the GFX unit is handled by GPU code */ + /* Access to the GFX unit is handled by GPU code */ if (port == BT_MBI_UNIT_GFX) { WARN_ON(1); return -EPERM; @@ -151,7 +151,7 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask) unsigned long flags; int ret; - /*Access to the GFX unit is handled by GPU code */ + /* Access to the GFX unit is handled by GPU code */ if (port == BT_MBI_UNIT_GFX) { WARN_ON(1); return -EPERM; @@ -240,17 +240,17 @@ static void iosf_sideband_debug_init(void) /* mdr */ d = debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr); - if (IS_ERR_OR_NULL(d)) + if (!d) goto cleanup; /* mcrx */ - debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx); - if (IS_ERR_OR_NULL(d)) + d = debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx); + if (!d) goto cleanup; /* mcr - initiates mailbox tranaction */ - debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops); - if (IS_ERR_OR_NULL(d)) + d = debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops); + if (!d) goto cleanup; return; @@ -292,6 +292,7 @@ static const struct pci_device_id iosf_mbi_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_TANGIER) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids); @@ -314,10 +315,8 @@ static void __exit iosf_mbi_exit(void) iosf_debugfs_remove(); pci_unregister_driver(&iosf_mbi_pci_driver); - if (mbi_pdev) { - pci_dev_put(mbi_pdev); - mbi_pdev = NULL; - } + pci_dev_put(mbi_pdev); + mbi_pdev = NULL; } module_init(iosf_mbi_init); diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index a244237f3cfa..2b158a9fa1d7 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c @@ -32,8 +32,7 @@ static cycle_t uv_read_rtc(struct clocksource *cs); static int uv_rtc_next_event(unsigned long, struct clock_event_device *); -static void uv_rtc_timer_setup(enum clock_event_mode, - struct clock_event_device *); +static int uv_rtc_shutdown(struct clock_event_device *evt); static struct clocksource clocksource_uv = { .name = RTC_NAME, @@ -44,14 +43,14 @@ static struct clocksource clocksource_uv = { }; static struct clock_event_device clock_event_device_uv = { - .name = RTC_NAME, - .features = CLOCK_EVT_FEAT_ONESHOT, - .shift = 20, - .rating = 400, - .irq = -1, - .set_next_event = uv_rtc_next_event, - .set_mode = uv_rtc_timer_setup, - .event_handler = NULL, + .name = RTC_NAME, + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .rating = 400, + .irq = -1, + .set_next_event = uv_rtc_next_event, + .set_state_shutdown = uv_rtc_shutdown, + .event_handler = NULL, }; static DEFINE_PER_CPU(struct clock_event_device, cpu_ced); @@ -321,24 +320,14 @@ static int uv_rtc_next_event(unsigned long delta, } /* - * Setup the RTC timer in oneshot mode + * Shutdown the RTC timer */ -static void uv_rtc_timer_setup(enum clock_event_mode mode, - struct clock_event_device *evt) +static int uv_rtc_shutdown(struct clock_event_device *evt) { int ced_cpu = cpumask_first(evt->cpumask); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_RESUME: - /* Nothing to do here yet */ - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - uv_rtc_unset_timer(ced_cpu, 1); - break; - } + uv_rtc_unset_timer(ced_cpu, 1); + return 0; } static void uv_rtc_interrupt(void) diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 0d7dd1f5ac36..9ab52791fed5 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -22,6 +22,7 @@ #include <asm/fpu/internal.h> #include <asm/debugreg.h> #include <asm/cpu.h> +#include <asm/mmu_context.h> #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -153,7 +154,7 @@ static void fix_processor_context(void) syscall_init(); /* This sets MSR_*STAR and related */ #endif load_TR_desc(); /* This does ltr */ - load_LDT(¤t->active_mm->context); /* This does lldt */ + load_mm_ldt(current->active_mm); /* This does lldt */ fpu__resume_cpu(); } diff --git a/arch/x86/ras/Kconfig b/arch/x86/ras/Kconfig new file mode 100644 index 000000000000..10fea5fc821e --- /dev/null +++ b/arch/x86/ras/Kconfig @@ -0,0 +1,11 @@ +config AMD_MCE_INJ + tristate "Simple MCE injection interface for AMD processors" + depends on RAS && EDAC_DECODE_MCE && DEBUG_FS + default n + help + This is a simple debugfs interface to inject MCEs and test different + aspects of the MCE handling code. + + WARNING: Do not even assume this interface is staying stable! + + diff --git a/arch/x86/ras/Makefile b/arch/x86/ras/Makefile new file mode 100644 index 000000000000..dd2c98b84037 --- /dev/null +++ b/arch/x86/ras/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_AMD_MCE_INJ) += mce_amd_inj.o + diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c new file mode 100644 index 000000000000..17e35b5bf779 --- /dev/null +++ b/arch/x86/ras/mce_amd_inj.c @@ -0,0 +1,375 @@ +/* + * A simple MCE injection facility for testing different aspects of the RAS + * code. This driver should be built as module so that it can be loaded + * on production kernels for testing purposes. + * + * This file may be distributed under the terms of the GNU General Public + * License version 2. + * + * Copyright (c) 2010-15: Borislav Petkov <bp@alien8.de> + * Advanced Micro Devices Inc. + */ + +#include <linux/kobject.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/cpu.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <asm/mce.h> + +#include "../kernel/cpu/mcheck/mce-internal.h" + +/* + * Collect all the MCi_XXX settings + */ +static struct mce i_mce; +static struct dentry *dfs_inj; + +static u8 n_banks; + +#define MAX_FLAG_OPT_SIZE 3 + +enum injection_type { + SW_INJ = 0, /* SW injection, simply decode the error */ + HW_INJ, /* Trigger a #MC */ + N_INJ_TYPES, +}; + +static const char * const flags_options[] = { + [SW_INJ] = "sw", + [HW_INJ] = "hw", + NULL +}; + +/* Set default injection to SW_INJ */ +static enum injection_type inj_type = SW_INJ; + +#define MCE_INJECT_SET(reg) \ +static int inj_##reg##_set(void *data, u64 val) \ +{ \ + struct mce *m = (struct mce *)data; \ + \ + m->reg = val; \ + return 0; \ +} + +MCE_INJECT_SET(status); +MCE_INJECT_SET(misc); +MCE_INJECT_SET(addr); + +#define MCE_INJECT_GET(reg) \ +static int inj_##reg##_get(void *data, u64 *val) \ +{ \ + struct mce *m = (struct mce *)data; \ + \ + *val = m->reg; \ + return 0; \ +} + +MCE_INJECT_GET(status); +MCE_INJECT_GET(misc); +MCE_INJECT_GET(addr); + +DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n"); +DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n"); +DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n"); + +/* + * Caller needs to be make sure this cpu doesn't disappear + * from under us, i.e.: get_cpu/put_cpu. + */ +static int toggle_hw_mce_inject(unsigned int cpu, bool enable) +{ + u32 l, h; + int err; + + err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h); + if (err) { + pr_err("%s: error reading HWCR\n", __func__); + return err; + } + + enable ? (l |= BIT(18)) : (l &= ~BIT(18)); + + err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h); + if (err) + pr_err("%s: error writing HWCR\n", __func__); + + return err; +} + +static int __set_inj(const char *buf) +{ + int i; + + for (i = 0; i < N_INJ_TYPES; i++) { + if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) { + inj_type = i; + return 0; + } + } + return -EINVAL; +} + +static ssize_t flags_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[MAX_FLAG_OPT_SIZE]; + int n; + + n = sprintf(buf, "%s\n", flags_options[inj_type]); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); +} + +static ssize_t flags_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[MAX_FLAG_OPT_SIZE], *__buf; + int err; + size_t ret; + + if (cnt > MAX_FLAG_OPT_SIZE) + cnt = MAX_FLAG_OPT_SIZE; + + ret = cnt; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt - 1] = 0; + + /* strip whitespace */ + __buf = strstrip(buf); + + err = __set_inj(__buf); + if (err) { + pr_err("%s: Invalid flags value: %s\n", __func__, __buf); + return err; + } + + *ppos += ret; + + return ret; +} + +static const struct file_operations flags_fops = { + .read = flags_read, + .write = flags_write, + .llseek = generic_file_llseek, +}; + +/* + * On which CPU to inject? + */ +MCE_INJECT_GET(extcpu); + +static int inj_extcpu_set(void *data, u64 val) +{ + struct mce *m = (struct mce *)data; + + if (val >= nr_cpu_ids || !cpu_online(val)) { + pr_err("%s: Invalid CPU: %llu\n", __func__, val); + return -EINVAL; + } + m->extcpu = val; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n"); + +static void trigger_mce(void *info) +{ + asm volatile("int $18"); +} + +static void do_inject(void) +{ + u64 mcg_status = 0; + unsigned int cpu = i_mce.extcpu; + u8 b = i_mce.bank; + + if (i_mce.misc) + i_mce.status |= MCI_STATUS_MISCV; + + if (inj_type == SW_INJ) { + mce_inject_log(&i_mce); + return; + } + + /* prep MCE global settings for the injection */ + mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV; + + if (!(i_mce.status & MCI_STATUS_PCC)) + mcg_status |= MCG_STATUS_RIPV; + + get_online_cpus(); + if (!cpu_online(cpu)) + goto err; + + toggle_hw_mce_inject(cpu, true); + + wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS, + (u32)mcg_status, (u32)(mcg_status >> 32)); + + wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b), + (u32)i_mce.status, (u32)(i_mce.status >> 32)); + + wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b), + (u32)i_mce.addr, (u32)(i_mce.addr >> 32)); + + wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b), + (u32)i_mce.misc, (u32)(i_mce.misc >> 32)); + + toggle_hw_mce_inject(cpu, false); + + smp_call_function_single(cpu, trigger_mce, NULL, 0); + +err: + put_online_cpus(); + +} + +/* + * This denotes into which bank we're injecting and triggers + * the injection, at the same time. + */ +static int inj_bank_set(void *data, u64 val) +{ + struct mce *m = (struct mce *)data; + + if (val >= n_banks) { + pr_err("Non-existent MCE bank: %llu\n", val); + return -EINVAL; + } + + m->bank = val; + do_inject(); + + return 0; +} + +MCE_INJECT_GET(bank); + +DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n"); + +static const char readme_msg[] = +"Description of the files and their usages:\n" +"\n" +"Note1: i refers to the bank number below.\n" +"Note2: See respective BKDGs for the exact bit definitions of the files below\n" +"as they mirror the hardware registers.\n" +"\n" +"status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n" +"\t attributes of the error which caused the MCE.\n" +"\n" +"misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n" +"\t used for error thresholding purposes and its validity is indicated by\n" +"\t MCi_STATUS[MiscV].\n" +"\n" +"addr:\t Error address value to be written to MCi_ADDR. Log address information\n" +"\t associated with the error.\n" +"\n" +"cpu:\t The CPU to inject the error on.\n" +"\n" +"bank:\t Specify the bank you want to inject the error into: the number of\n" +"\t banks in a processor varies and is family/model-specific, therefore, the\n" +"\t supplied value is sanity-checked. Setting the bank value also triggers the\n" +"\t injection.\n" +"\n" +"flags:\t Injection type to be performed. Writing to this file will trigger a\n" +"\t real machine check, an APIC interrupt or invoke the error decoder routines\n" +"\t for AMD processors.\n" +"\n" +"\t Allowed error injection types:\n" +"\t - \"sw\": Software error injection. Decode error to a human-readable \n" +"\t format only. Safe to use.\n" +"\t - \"hw\": Hardware error injection. Causes the #MC exception handler to \n" +"\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n" +"\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n" +"\t before injecting.\n" +"\n"; + +static ssize_t +inj_readme_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_read_from_buffer(ubuf, cnt, ppos, + readme_msg, strlen(readme_msg)); +} + +static const struct file_operations readme_fops = { + .read = inj_readme_read, +}; + +static struct dfs_node { + char *name; + struct dentry *d; + const struct file_operations *fops; + umode_t perm; +} dfs_fls[] = { + { .name = "status", .fops = &status_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "misc", .fops = &misc_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "addr", .fops = &addr_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "bank", .fops = &bank_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "flags", .fops = &flags_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "cpu", .fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR }, + { .name = "README", .fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH }, +}; + +static int __init init_mce_inject(void) +{ + int i; + u64 cap; + + rdmsrl(MSR_IA32_MCG_CAP, cap); + n_banks = cap & MCG_BANKCNT_MASK; + + dfs_inj = debugfs_create_dir("mce-inject", NULL); + if (!dfs_inj) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) { + dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name, + dfs_fls[i].perm, + dfs_inj, + &i_mce, + dfs_fls[i].fops); + + if (!dfs_fls[i].d) + goto err_dfs_add; + } + + return 0; + +err_dfs_add: + while (--i >= 0) + debugfs_remove(dfs_fls[i].d); + + debugfs_remove(dfs_inj); + dfs_inj = NULL; + + return -ENOMEM; +} + +static void __exit exit_mce_inject(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) + debugfs_remove(dfs_fls[i].d); + + memset(&dfs_fls, 0, sizeof(dfs_fls)); + + debugfs_remove(dfs_inj); + dfs_inj = NULL; +} +module_init(init_mce_inject); +module_exit(exit_mce_inject); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>"); +MODULE_AUTHOR("AMD Inc."); +MODULE_DESCRIPTION("MCE injection facility for RAS testing"); diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index b9531d343134..755481f14d90 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -45,17 +45,4 @@ #define read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0) -/* - * Stop RDTSC speculation. This is needed when you need to use RDTSC - * (or get_cycles or vread that possibly accesses the TSC) in a defined - * code region. - * - * (Could use an alternative three way for this if there was one.) - */ -static inline void rdtsc_barrier(void) -{ - alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, - "lfence", X86_FEATURE_LFENCE_RDTSC); -} - #endif diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index e88fda867a33..484145368a24 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -8,7 +8,7 @@ config XEN select PARAVIRT_CLOCK select XEN_HAVE_PVMMU depends on X86_64 || (X86_32 && X86_PAE) - depends on X86_TSC + depends on X86_LOCAL_APIC && X86_TSC help This is the Linux Xen port. Enabling this will allow the kernel to boot in a paravirtualized environment under the @@ -17,7 +17,7 @@ config XEN config XEN_DOM0 def_bool y depends on XEN && PCI_XEN && SWIOTLB_XEN - depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI + depends on X86_IO_APIC && ACPI && PCI config XEN_PVHVM def_bool y diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 7322755f337a..4b6e29ac0968 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp) obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ time.o xen-asm.o xen-asm_$(BITS).o \ grant-table.o suspend.o platform-pci-unplug.o \ - p2m.o + p2m.o apic.o obj-$(CONFIG_EVENT_TRACING) += trace.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o -obj-$(CONFIG_XEN_DOM0) += apic.o vga.o +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o obj-$(CONFIG_XEN_EFI) += efi.o diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0b95c9b8283f..d9cfa452da9d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte_t pte; unsigned long pfn; struct page *page; + unsigned char dummy; ptep = lookup_address((unsigned long)v, &level); BUG_ON(ptep == NULL); @@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte = pfn_pte(pfn, prot); + /* + * Careful: update_va_mapping() will fail if the virtual address + * we're poking isn't populated in the page tables. We don't + * need to worry about the direct map (that's always in the page + * tables), but we need to be careful about vmap space. In + * particular, the top level page table can lazily propagate + * entries between processes, so if we've switched mms since we + * vmapped the target in the first place, we might not have the + * top-level page table entry populated. + * + * We disable preemption because we want the same mm active when + * we probe the target and when we issue the hypercall. We'll + * have the same nominal mm, but if we're a kernel thread, lazy + * mm dropping could change our pgd. + * + * Out of an abundance of caution, this uses __get_user() to fault + * in the target address just in case there's some obscure case + * in which the target address isn't readable. + */ + + preempt_disable(); + + pagefault_disable(); /* Avoid warnings due to being atomic. */ + __get_user(dummy, (unsigned char __user __force *)v); + pagefault_enable(); + if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) BUG(); @@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot) BUG(); } else kmap_flush_unused(); + + preempt_enable(); } static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) @@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; int i; + /* + * We need to mark the all aliases of the LDT pages RO. We + * don't need to call vm_flush_aliases(), though, since that's + * only responsible for flushing aliases out the TLBs, not the + * page tables, and Xen will flush the TLB for us if needed. + * + * To avoid confusing future readers: none of this is necessary + * to load the LDT. The hypervisor only checks this when the + * LDT is faulted in due to subsequent descriptor access. + */ + for(i = 0; i < entries; i += entries_per_page) set_aliased_prot(ldt + i, PAGE_KERNEL_RO); } @@ -1175,11 +1215,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_msr = xen_read_msr_safe, .write_msr = xen_write_msr_safe, - .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, - .read_tscp = native_read_tscp, - .iret = xen_iret, #ifdef CONFIG_X86_64 .usergs_sysret32 = xen_sysret32, diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 55da33b1d51c..f1ba6a092854 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -274,30 +274,18 @@ static s64 get_abs_timeout(unsigned long delta) return xen_clocksource_read() + delta; } -static void xen_timerop_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int xen_timerop_shutdown(struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* unsupported */ - WARN_ON(1); - break; - - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_RESUME: - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - HYPERVISOR_set_timer_op(0); /* cancel timeout */ - break; - } + /* cancel timeout */ + HYPERVISOR_set_timer_op(0); + + return 0; } static int xen_timerop_set_next_event(unsigned long delta, struct clock_event_device *evt) { - WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); + WARN_ON(!clockevent_state_oneshot(evt)); if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0) BUG(); @@ -310,46 +298,39 @@ static int xen_timerop_set_next_event(unsigned long delta, } static const struct clock_event_device xen_timerop_clockevent = { - .name = "xen", - .features = CLOCK_EVT_FEAT_ONESHOT, + .name = "xen", + .features = CLOCK_EVT_FEAT_ONESHOT, - .max_delta_ns = 0xffffffff, - .min_delta_ns = TIMER_SLOP, + .max_delta_ns = 0xffffffff, + .min_delta_ns = TIMER_SLOP, - .mult = 1, - .shift = 0, - .rating = 500, + .mult = 1, + .shift = 0, + .rating = 500, - .set_mode = xen_timerop_set_mode, - .set_next_event = xen_timerop_set_next_event, + .set_state_shutdown = xen_timerop_shutdown, + .set_next_event = xen_timerop_set_next_event, }; +static int xen_vcpuop_shutdown(struct clock_event_device *evt) +{ + int cpu = smp_processor_id(); + if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) || + HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) + BUG(); + + return 0; +} -static void xen_vcpuop_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int xen_vcpuop_set_oneshot(struct clock_event_device *evt) { int cpu = smp_processor_id(); - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - WARN_ON(1); /* unsupported */ - break; - - case CLOCK_EVT_MODE_ONESHOT: - if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) - BUG(); - break; + if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) + BUG(); - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) || - HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) - BUG(); - break; - case CLOCK_EVT_MODE_RESUME: - break; - } + return 0; } static int xen_vcpuop_set_next_event(unsigned long delta, @@ -359,7 +340,7 @@ static int xen_vcpuop_set_next_event(unsigned long delta, struct vcpu_set_singleshot_timer single; int ret; - WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); + WARN_ON(!clockevent_state_oneshot(evt)); single.timeout_abs_ns = get_abs_timeout(delta); single.flags = VCPU_SSHOTTMR_future; @@ -382,7 +363,8 @@ static const struct clock_event_device xen_vcpuop_clockevent = { .shift = 0, .rating = 500, - .set_mode = xen_vcpuop_set_mode, + .set_state_shutdown = xen_vcpuop_shutdown, + .set_state_oneshot = xen_vcpuop_set_oneshot, .set_next_event = xen_vcpuop_set_next_event, }; diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index c20fe29e65f4..2292721b1d10 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -101,17 +101,15 @@ struct dom0_vga_console_info; #ifdef CONFIG_XEN_DOM0 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); -void __init xen_init_apic(void); #else static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) { } -static inline void __init xen_init_apic(void) -{ -} #endif +void __init xen_init_apic(void); + #ifdef CONFIG_XEN_EFI extern void xen_efi_init(void); #else diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e5b872ba2484..3bd3504a6cc7 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -14,12 +14,15 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select HAVE_DMA_API_DEBUG + select HAVE_DMA_ATTRS select HAVE_FUNCTION_TRACER select HAVE_IRQ_TIME_ACCOUNTING select HAVE_OPROFILE select HAVE_PERF_EVENTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA + select PERF_USE_VMALLOC select VIRT_TO_BUS help Xtensa processors are 32-bit RISC machines designed by Tensilica @@ -61,9 +64,7 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y config MMU - bool - default n if !XTENSA_VARIANT_CUSTOM - default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM + def_bool n config VARIANT_IRQ_SWITCH def_bool n @@ -71,9 +72,6 @@ config VARIANT_IRQ_SWITCH config HAVE_XTENSA_GPIO32 def_bool n -config MAY_HAVE_SMP - def_bool n - menu "Processor type and features" choice @@ -100,7 +98,6 @@ config XTENSA_VARIANT_DC233C config XTENSA_VARIANT_CUSTOM bool "Custom Xtensa processor configuration" - select MAY_HAVE_SMP select HAVE_XTENSA_GPIO32 help Select this variant to use a custom Xtensa processor configuration. @@ -126,10 +123,21 @@ config XTENSA_VARIANT_MMU bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)" depends on XTENSA_VARIANT_CUSTOM default y + select MMU help Build a Conventional Kernel with full MMU support, ie: it supports a TLB with auto-loading, page protection. +config XTENSA_VARIANT_HAVE_PERF_EVENTS + bool "Core variant has Performance Monitor Module" + depends on XTENSA_VARIANT_CUSTOM + default n + help + Enable if core variant has Performance Monitor Module with + External Registers Interface. + + If unsure, say N. + config XTENSA_UNALIGNED_USER bool "Unaligned memory access in use space" help @@ -143,7 +151,7 @@ source "kernel/Kconfig.preempt" config HAVE_SMP bool "System Supports SMP (MX)" - depends on MAY_HAVE_SMP + depends on XTENSA_VARIANT_CUSTOM select XTENSA_MX help This option is use to indicate that the system-on-a-chip (SOC) diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 14d15bf1a95b..63c223dff5f1 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += bitsperlong.h generic-y += bug.h generic-y += clkdev.h generic-y += cputime.h -generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h @@ -19,6 +18,7 @@ generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += percpu.h generic-y += preempt.h generic-y += resource.h diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 00b7d46b35b8..ebcd1f6fc8cb 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -29,7 +29,7 @@ * * Locking interrupts looks like this: * - * rsil a15, LOCKLEVEL + * rsil a15, TOPLEVEL * <code> * wsr a15, PS * rsync @@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15, "__stringify(LOCKLEVEL)"\n"\ + " rsil a15, "__stringify(TOPLEVEL)"\n"\ " l32i %0, %2, 0\n" \ " " #op " %0, %0, %1\n" \ " s32i %0, %2, 0\n" \ @@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(LOCKLEVEL)"\n" \ + " rsil a15,"__stringify(TOPLEVEL)"\n" \ " l32i %0, %2, 0\n" \ " " #op " %0, %0, %1\n" \ " s32i %0, %2, 0\n" \ @@ -272,7 +272,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) unsigned int vval; __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" + " rsil a15,"__stringify(TOPLEVEL)"\n" " l32i %0, %2, 0\n" " xor %1, %4, %3\n" " and %0, %0, %4\n" @@ -306,7 +306,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) unsigned int vval; __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" + " rsil a15,"__stringify(TOPLEVEL)"\n" " l32i %0, %2, 0\n" " or %0, %0, %1\n" " s32i %0, %2, 0\n" diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 370b26f38414..201e9009efd8 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new) return new; #else __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" + " rsil a15, "__stringify(TOPLEVEL)"\n" " l32i %0, %1, 0\n" " bne %0, %2, 1f\n" " s32i %3, %1, 0\n" @@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #else unsigned long tmp; __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" + " rsil a15, "__stringify(TOPLEVEL)"\n" " l32i %0, %1, 0\n" " s32i %2, %1, 0\n" " wsr a15, ps\n" diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h new file mode 100644 index 000000000000..fe1f5c878493 --- /dev/null +++ b/arch/xtensa/include/asm/device.h @@ -0,0 +1,19 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#ifndef _ASM_XTENSA_DEVICE_H +#define _ASM_XTENSA_DEVICE_H + +struct dma_map_ops; + +struct dev_archdata { + /* DMA operations on that device */ + struct dma_map_ops *dma_ops; +}; + +struct pdev_archdata { +}; + +#endif /* _ASM_XTENSA_DEVICE_H */ diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 1f5f6dc09736..f01cb3044e50 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -1,11 +1,10 @@ /* - * include/asm-xtensa/dma-mapping.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_DMA_MAPPING_H @@ -13,142 +12,67 @@ #include <asm/cache.h> #include <asm/io.h> + +#include <asm-generic/dma-coherent.h> + #include <linux/mm.h> #include <linux/scatterlist.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -/* - * DMA-consistent mapping functions. - */ - -extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long); -extern void consistent_free(void*, size_t, dma_addr_t); -extern void consistent_sync(void*, size_t, int); - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); +extern struct dma_map_ops xtensa_dma_map_ops; -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - consistent_sync(ptr, size, direction); - return virt_to_phys(ptr); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - BUG_ON(direction == DMA_NONE); + if (dev && dev->archdata.dma_ops) + return dev->archdata.dma_ops; + else + return &xtensa_dma_map_ops; } -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction) -{ - int i; - struct scatterlist *sg; - - BUG_ON(direction == DMA_NONE); - - for_each_sg(sglist, sg, nents, i) { - BUG_ON(!sg_page(sg)); +#include <asm-generic/dma-mapping-common.h> - sg->dma_address = sg_phys(sg); - consistent_sync(sg_virt(sg), sg->length, direction); - } +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) +#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) +#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { - BUG_ON(direction == DMA_NONE); -} + void *ret; + struct dma_map_ops *ops = get_dma_ops(dev); + if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) + return ret; -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - consistent_sync((void *)bus_to_virt(dma_handle), size, direction); -} + ret = ops->alloc(dev, size, dma_handle, gfp, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, ret); -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - consistent_sync((void *)bus_to_virt(dma_handle), size, direction); + return ret; } -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - - consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { + struct dma_map_ops *ops = get_dma_ops(dev); - consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); -} -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; - for_each_sg(sglist, sg, nelems, i) - consistent_sync(sg_virt(sg), sg->length, dir); + ops->free(dev, size, vaddr, dma_handle, attrs); + debug_dma_free_coherent(dev, size, vaddr, dma_handle); } -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - consistent_sync(sg_virt(sg), sg->length, dir); -} static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return 0; + struct dma_map_ops *ops = get_dma_ops(dev); + + debug_dma_mapping_error(dev, dma_addr); + return ops->mapping_error(dev, dma_addr); } static inline int @@ -168,39 +92,7 @@ dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - consistent_sync(vaddr, size, direction); -} - -/* Not supported for now */ -static inline int dma_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size) -{ - return -EINVAL; -} - -static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) -{ - return -EINVAL; -} - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - return NULL; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ -} +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); #endif /* _XTENSA_DMA_MAPPING_H */ diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h index ea36674c6ec5..8e090c709046 100644 --- a/arch/xtensa/include/asm/irqflags.h +++ b/arch/xtensa/include/asm/irqflags.h @@ -6,6 +6,7 @@ * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_IRQFLAGS_H @@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_irq_save(void) { unsigned long flags; - asm volatile("rsil %0, "__stringify(LOCKLEVEL) +#if XTENSA_FAKE_NMI +#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL + unsigned long tmp; + + asm volatile("rsr %0, ps\t\n" + "extui %1, %0, 0, 4\t\n" + "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n" + "rsil %0, "__stringify(LOCKLEVEL)"\n" + "1:" + : "=a" (flags), "=a" (tmp) :: "memory"); +#else + asm volatile("rsr %0, ps\t\n" + "or %0, %0, %1\t\n" + "xsr %0, ps\t\n" + "rsync" + : "=&a" (flags) : "a" (LOCKLEVEL) : "memory"); +#endif +#else + asm volatile("rsil %0, "__stringify(LOCKLEVEL) : "=a" (flags) :: "memory"); +#endif return flags; } diff --git a/arch/xtensa/include/asm/mm-arch-hooks.h b/arch/xtensa/include/asm/mm-arch-hooks.h deleted file mode 100644 index d2e5cfd3dd02..000000000000 --- a/arch/xtensa/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_XTENSA_MM_ARCH_HOOKS_H -#define _ASM_XTENSA_MM_ARCH_HOOKS_H - -#endif /* _ASM_XTENSA_MM_ARCH_HOOKS_H */ diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index b61bdf0eea25..83e2e4bc01ba 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -1,11 +1,10 @@ /* - * include/asm-xtensa/processor.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2008 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_PROCESSOR_H @@ -45,6 +44,14 @@ #define STACK_TOP_MAX STACK_TOP /* + * General exception cause assigned to fake NMI. Fake NMI needs to be handled + * differently from other interrupts, but it uses common kernel entry/exit + * code. + */ + +#define EXCCAUSE_MAPPED_NMI 62 + +/* * General exception cause assigned to debug exceptions. Debug exceptions go * to their own vector, rather than the general exception vectors (user, * kernel, double); and their specific causes are reported via DEBUGCAUSE @@ -65,10 +72,30 @@ #define VALID_DOUBLE_EXCEPTION_ADDRESS 64 +#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno) +#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL + +#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level) +#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK) + +#define IS_POW2(v) (((v) & ((v) - 1)) == 0) + +#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT) + /* LOCKLEVEL defines the interrupt level that masks all * general-purpose interrupts. */ +#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \ + defined(XCHAL_PROFILING_INTERRUPT) && \ + PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \ + XCHAL_EXCM_LEVEL > 1 && \ + IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)) +#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1) +#else #define LOCKLEVEL XCHAL_EXCM_LEVEL +#endif +#define TOPLEVEL XCHAL_EXCM_LEVEL +#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL) /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE * registers diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h index 6a05fcb0a20d..fe06e8ed162b 100644 --- a/arch/xtensa/include/asm/stacktrace.h +++ b/arch/xtensa/include/asm/stacktrace.h @@ -33,4 +33,12 @@ void walk_stackframe(unsigned long *sp, int (*fn)(struct stackframe *frame, void *data), void *data); +void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth, + int (*kfn)(struct stackframe *frame, void *data), + int (*ufn)(struct stackframe *frame, void *data), + void *data); +void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth, + int (*ufn)(struct stackframe *frame, void *data), + void *data); + #endif /* _XTENSA_STACKTRACE_H */ diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 677bfcf4ee5d..28f33a8b7f5f 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h @@ -25,30 +25,39 @@ static inline void spill_registers(void) { #if XCHAL_NUM_AREGS > 16 __asm__ __volatile__ ( - " call12 1f\n" + " call8 1f\n" " _j 2f\n" " retw\n" " .align 4\n" "1:\n" +#if XCHAL_NUM_AREGS == 32 + " _entry a1, 32\n" + " addi a8, a0, 3\n" + " _entry a1, 16\n" + " mov a12, a12\n" + " retw\n" +#else " _entry a1, 48\n" - " addi a12, a0, 3\n" -#if XCHAL_NUM_AREGS > 32 - " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" + " call12 1f\n" + " retw\n" + " .align 4\n" + "1:\n" + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n" " _entry a1, 48\n" " mov a12, a0\n" " .endr\n" -#endif - " _entry a1, 48\n" + " _entry a1, 16\n" #if XCHAL_NUM_AREGS % 12 == 0 - " mov a8, a8\n" -#elif XCHAL_NUM_AREGS % 12 == 4 " mov a12, a12\n" -#elif XCHAL_NUM_AREGS % 12 == 8 +#elif XCHAL_NUM_AREGS % 12 == 4 " mov a4, a4\n" +#elif XCHAL_NUM_AREGS % 12 == 8 + " mov a8, a8\n" #endif " retw\n" +#endif "2:\n" - : : : "a12", "a13", "memory"); + : : : "a8", "a9", "memory"); #else __asm__ __volatile__ ( " mov a12, a12\n" diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index d3a0f0fd56dd..50137bc9e150 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_SMP) += smp.o mxhead.o +obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o AFLAGS_head.o += -mtext-section-literals @@ -27,10 +28,11 @@ AFLAGS_head.o += -mtext-section-literals # # Replicate rules in scripts/Makefile.build -sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \ - -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \ - -e 's/\*(\(\.text .*\))/*(.literal \1)/g' \ - -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g' +sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unlikely/; ta; ' \ + -e ':b; s/\*(\([^)]*\)\.text\(\.[a-z]*\)/*(\1.{text}\2.literal .{text}\2/; tb; ' \ + -e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \ + -e ':d; s/\*(\([^)]\+ \|\)\.text/*(\1.literal .{text}/; td; ' \ + -e 's/\.{text}/.text/g' quiet_cmd__cpp_lds_S = LDS $@ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 82bbfa5a05b3..504130357597 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1,6 +1,4 @@ /* - * arch/xtensa/kernel/entry.S - * * Low-level exception handling * * This file is subject to the terms and conditions of the GNU General Public @@ -8,6 +6,7 @@ * for more details. * * Copyright (C) 2004 - 2008 by Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * * Chris Zankel <chris@zankel.net> * @@ -75,6 +74,27 @@ #endif .endm + + .macro irq_save flags tmp +#if XTENSA_FAKE_NMI +#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL + rsr \flags, ps + extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + bgei \tmp, LOCKLEVEL, 99f + rsil \tmp, LOCKLEVEL +99: +#else + movi \tmp, LOCKLEVEL + rsr \flags, ps + or \flags, \flags, \tmp + xsr \flags, ps + rsync +#endif +#else + rsil \flags, LOCKLEVEL +#endif + .endm + /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ /* @@ -122,6 +142,7 @@ _user_exception: /* Save SAR and turn off single stepping */ movi a2, 0 + wsr a2, depc # terminate user stack trace with 0 rsr a3, sar xsr a2, icountlevel s32i a3, a1, PT_SAR @@ -301,7 +322,18 @@ _kernel_exception: s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 + _bnei a2, 1, 1f + + /* Copy spill slots of a0 and a1 to imitate movsp + * in order to keep exception stack continuous + */ + l32i a3, a1, PT_SIZE + l32i a0, a1, PT_SIZE + 4 + s32e a3, a1, -16 + s32e a0, a1, -12 1: + l32i a0, a1, PT_AREG0 # restore saved a0 + wsr a0, depc #ifdef KERNEL_STACK_OVERFLOW_CHECK @@ -340,75 +372,88 @@ common_exception: /* It is now save to restore the EXC_TABLE_FIXUP variable. */ - rsr a0, exccause + rsr a2, exccause movi a3, 0 - rsr a2, excsave1 - s32i a0, a1, PT_EXCCAUSE - s32i a3, a2, EXC_TABLE_FIXUP - - /* All unrecoverable states are saved on stack, now, and a1 is valid, - * so we can allow exceptions and interrupts (*) again. - * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) + rsr a0, excsave1 + s32i a2, a1, PT_EXCCAUSE + s32i a3, a0, EXC_TABLE_FIXUP + + /* All unrecoverable states are saved on stack, now, and a1 is valid. + * Now we can allow exceptions again. In case we've got an interrupt + * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, + * otherwise it's left unchanged. * - * (*) We only allow interrupts if they were previously enabled and - * we're not handling an IRQ + * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) */ rsr a3, ps - addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT - movi a2, LOCKLEVEL + s32i a3, a1, PT_PS # save ps + +#if XTENSA_FAKE_NMI + /* Correct PS needs to be saved in the PT_PS: + * - in case of exception or level-1 interrupt it's in the PS, + * and is already saved. + * - in case of medium level interrupt it's in the excsave2. + */ + movi a0, EXCCAUSE_MAPPED_NMI + extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + beq a2, a0, .Lmedium_level_irq + bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception + beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 + +.Lmedium_level_irq: + rsr a0, excsave2 + s32i a0, a1, PT_PS # save medium-level interrupt ps + bgei a3, LOCKLEVEL, .Lexception + +.Llevel1_irq: + movi a3, LOCKLEVEL + +.Lexception: + movi a0, 1 << PS_WOE_BIT + or a3, a3, a0 +#else + addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT + movi a0, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL - moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt + moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt movi a2, 1 << PS_WOE_BIT or a3, a3, a2 - rsr a0, exccause - xsr a3, ps + rsr a2, exccause +#endif - s32i a3, a1, PT_PS # save ps + /* restore return address (or 0 if return to userspace) */ + rsr a0, depc + wsr a3, ps + rsync # PS.WOE => rsync => overflow /* Save lbeg, lend */ - rsr a2, lbeg + rsr a4, lbeg rsr a3, lend - s32i a2, a1, PT_LBEG + s32i a4, a1, PT_LBEG s32i a3, a1, PT_LEND /* Save SCOMPARE1 */ #if XCHAL_HAVE_S32C1I - rsr a2, scompare1 - s32i a2, a1, PT_SCOMPARE1 + rsr a3, scompare1 + s32i a3, a1, PT_SCOMPARE1 #endif /* Save optional registers. */ - save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT + save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT -#ifdef CONFIG_TRACE_IRQFLAGS - l32i a4, a1, PT_DEPC - /* Double exception means we came here with an exception - * while PS.EXCM was set, i.e. interrupts disabled. - */ - bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - l32i a4, a1, PT_EXCCAUSE - bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f - /* We came here with an interrupt means interrupts were enabled - * and we've just disabled them. - */ - movi a4, trace_hardirqs_off - callx4 a4 -1: -#endif - /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ rsr a4, excsave1 mov a6, a1 # pass stack frame - mov a7, a0 # pass EXCCAUSE - addx4 a4, a0, a4 + mov a7, a2 # pass EXCCAUSE + addx4 a4, a2, a4 l32i a4, a4, EXC_TABLE_DEFAULT # load handler /* Call the second-level handler */ @@ -419,8 +464,17 @@ common_exception: .global common_exception_return common_exception_return: +#if XTENSA_FAKE_NMI + l32i a2, a1, PT_EXCCAUSE + movi a3, EXCCAUSE_MAPPED_NMI + beq a2, a3, .LNMIexit +#endif 1: - rsil a2, LOCKLEVEL + irq_save a2, a3 +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_off + callx4 a4 +#endif /* Jump if we are returning from kernel exceptions. */ @@ -445,6 +499,10 @@ common_exception_return: /* Call do_signal() */ +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_on + callx4 a4 +#endif rsil a2, 0 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) mov a6, a1 @@ -453,6 +511,10 @@ common_exception_return: 3: /* Reschedule */ +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_on + callx4 a4 +#endif rsil a2, 0 movi a4, schedule # void schedule (void) callx4 a4 @@ -471,6 +533,12 @@ common_exception_return: j 1b #endif +#if XTENSA_FAKE_NMI +.LNMIexit: + l32i a3, a1, PT_PS + _bbci.l a3, PS_UM_BIT, 4f +#endif + 5: #ifdef CONFIG_DEBUG_TLB_SANITY l32i a4, a1, PT_DEPC @@ -481,16 +549,8 @@ common_exception_return: 6: 4: #ifdef CONFIG_TRACE_IRQFLAGS - l32i a4, a1, PT_DEPC - /* Double exception means we came here with an exception - * while PS.EXCM was set, i.e. interrupts disabled. - */ - bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - l32i a4, a1, PT_EXCCAUSE - bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f - /* We came here with an interrupt means interrupts were enabled - * and we'll reenable them on return. - */ + extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + bgei a4, LOCKLEVEL, 1f movi a4, trace_hardirqs_on callx4 a4 1: @@ -568,12 +628,13 @@ user_exception_exit: * (if we have restored WSBITS-1 frames). */ +2: #if XCHAL_HAVE_THREADPTR l32i a3, a1, PT_THREADPTR wur a3, threadptr #endif -2: j common_exception_exit + j common_exception_exit /* This is the kernel exception exit. * We avoided to do a MOVSP when we entered the exception, but we @@ -1561,6 +1622,13 @@ ENTRY(fast_second_level_miss) rfde 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 + bnez a0, 8b + + /* Even more unlikely case active_mm == 0. + * We can get here with NMI in the middle of context_switch that + * touches vmalloc area. + */ + movi a0, init_mm j 8b #if (DCACHE_WAY_SIZE > PAGE_SIZE) @@ -1820,7 +1888,7 @@ ENDPROC(system_call) mov a12, a0 .endr #endif - _entry a1, 48 + _entry a1, 16 #if XCHAL_NUM_AREGS % 12 == 0 mov a8, a8 #elif XCHAL_NUM_AREGS % 12 == 4 @@ -1844,7 +1912,7 @@ ENDPROC(system_call) ENTRY(_switch_to) - entry a1, 16 + entry a1, 48 mov a11, a3 # and 'next' (a3) @@ -1864,10 +1932,8 @@ ENTRY(_switch_to) /* Disable ints while we manipulate the stack pointer. */ - rsil a14, LOCKLEVEL - rsr a3, excsave1 + irq_save a14, a3 rsync - s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ /* Switch CPENABLE */ @@ -1888,9 +1954,7 @@ ENTRY(_switch_to) */ rsr a3, excsave1 # exc_table - movi a6, 0 addi a7, a5, PT_REGS_OFFSET - s32i a6, a3, EXC_TABLE_FIXUP s32i a7, a3, EXC_TABLE_KSTK /* restore context of the task 'next' */ diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 3eee94f621eb..6df31cacc4b8 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -28,7 +28,7 @@ #include <asm/uaccess.h> #include <asm/platform.h> -atomic_t irq_err_count; +DECLARE_PER_CPU(unsigned long, nmi_count); asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { @@ -57,11 +57,16 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) int arch_show_interrupts(struct seq_file *p, int prec) { + unsigned cpu __maybe_unused; #ifdef CONFIG_SMP show_ipi_list(p, prec); #endif - seq_printf(p, "%*s: ", prec, "ERR"); - seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); +#if XTENSA_FAKE_NMI + seq_printf(p, "%*s:", prec, "NMI"); + for_each_online_cpu(cpu) + seq_printf(p, " %10lu", per_cpu(nmi_count, cpu)); + seq_puts(p, " Non-maskable interrupts\n"); +#endif return 0; } @@ -106,6 +111,12 @@ int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_set_chip_and_handler_name(irq, irq_chip, handle_percpu_irq, "timer"); irq_clear_status_flags(irq, IRQ_LEVEL); +#ifdef XCHAL_INTTYPE_MASK_PROFILING + } else if (mask & XCHAL_INTTYPE_MASK_PROFILING) { + irq_set_chip_and_handler_name(irq, irq_chip, + handle_percpu_irq, "profiling"); + irq_set_status_flags(irq, IRQ_LEVEL); +#endif } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ /* XCHAL_INTTYPE_MASK_NMI */ irq_set_chip_and_handler_name(irq, irq_chip, diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index e8b76b8e4b29..fb75ebf1463a 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -1,6 +1,4 @@ /* - * arch/xtensa/kernel/pci-dma.c - * * DMA coherent memory allocation. * * This program is free software; you can redistribute it and/or modify it @@ -9,6 +7,7 @@ * option) any later version. * * Copyright (C) 2002 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * * Based on version for i386. * @@ -25,13 +24,107 @@ #include <asm/io.h> #include <asm/cacheflush.h> +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + __flush_invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_FROM_DEVICE: + __invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_TO_DEVICE: + __flush_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + } +} +EXPORT_SYMBOL(dma_cache_sync); + +static void xtensa_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + void *vaddr; + + switch (dir) { + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + vaddr = bus_to_virt(dma_handle); + __invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + + default: + break; + } +} + +static void xtensa_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + void *vaddr; + + switch (dir) { + case DMA_BIDIRECTIONAL: + case DMA_TO_DEVICE: + vaddr = bus_to_virt(dma_handle); + __flush_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + + default: + break; + } +} + +static void xtensa_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_sync_single_for_cpu(dev, sg_dma_address(s), + sg_dma_len(s), dir); + } +} + +static void xtensa_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_sync_single_for_device(dev, sg_dma_address(s), + sg_dma_len(s), dir); + } +} + /* * Note: We assume that the full memory space is always mapped to 'kseg' * Otherwise we have to use page attributes (not implemented). */ -void * -dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) +static void *xtensa_dma_alloc(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t flag, + struct dma_attrs *attrs) { unsigned long ret; unsigned long uncached = 0; @@ -52,20 +145,15 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); + uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; + *handle = virt_to_bus((void *)ret); + __invalidate_dcache_range(ret, size); - if (ret != 0) { - memset((void*) ret, 0, size); - uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR; - *handle = virt_to_bus((void*)ret); - __flush_invalidate_dcache_range(ret, size); - } - - return (void*)uncached; + return (void *)uncached; } -EXPORT_SYMBOL(dma_alloc_coherent); -void dma_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) +static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long)vaddr + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; @@ -75,24 +163,79 @@ void dma_free_coherent(struct device *hwdev, size_t size, free_pages(addr, get_order(size)); } -EXPORT_SYMBOL(dma_free_coherent); +static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + dma_addr_t dma_handle = page_to_phys(page) + offset; + + BUG_ON(PageHighMem(page)); + xtensa_sync_single_for_device(dev, dma_handle, size, dir); + return dma_handle; +} -void consistent_sync(void *vaddr, size_t size, int direction) +static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { - switch (direction) { - case PCI_DMA_NONE: - BUG(); - case PCI_DMA_FROMDEVICE: /* invalidate only */ - __invalidate_dcache_range((unsigned long)vaddr, - (unsigned long)size); - break; + xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); +} - case PCI_DMA_TODEVICE: /* writeback only */ - case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ - __flush_invalidate_dcache_range((unsigned long)vaddr, - (unsigned long)size); - break; +static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + } + return nents; +} + +static void xtensa_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_unmap_page(dev, sg_dma_address(s), + sg_dma_len(s), dir, attrs); } } -EXPORT_SYMBOL(consistent_sync); + +int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +struct dma_map_ops xtensa_dma_map_ops = { + .alloc = xtensa_dma_alloc, + .free = xtensa_dma_free, + .map_page = xtensa_map_page, + .unmap_page = xtensa_unmap_page, + .map_sg = xtensa_map_sg, + .unmap_sg = xtensa_unmap_sg, + .sync_single_for_cpu = xtensa_sync_single_for_cpu, + .sync_single_for_device = xtensa_sync_single_for_device, + .sync_sg_for_cpu = xtensa_sync_sg_for_cpu, + .sync_sg_for_device = xtensa_sync_sg_for_device, + .mapping_error = xtensa_dma_mapping_error, +}; +EXPORT_SYMBOL(xtensa_dma_map_ops); + +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) + +static int __init xtensa_dma_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + return 0; +} +fs_initcall(xtensa_dma_init); diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index b848cc3dc913..d27b4dcf221f 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -210,10 +210,6 @@ subsys_initcall(pcibios_init); void pcibios_fixup_bus(struct pci_bus *bus) { - if (bus->parent) { - /* This is a subordinate bridge */ - pci_read_bridge_bases(bus); - } } void pcibios_set_master(struct pci_dev *dev) diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c new file mode 100644 index 000000000000..54f01188c29c --- /dev/null +++ b/arch/xtensa/kernel/perf_event.c @@ -0,0 +1,454 @@ +/* + * Xtensa Performance Monitor Module driver + * See Tensilica Debug User's Guide for PMU registers documentation. + * + * Copyright (C) 2015 Cadence Design Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +#include <asm/processor.h> +#include <asm/stacktrace.h> + +/* Global control/status for all perf counters */ +#define XTENSA_PMU_PMG 0x1000 +/* Perf counter values */ +#define XTENSA_PMU_PM(i) (0x1080 + (i) * 4) +/* Perf counter control registers */ +#define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4) +/* Perf counter status registers */ +#define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4) + +#define XTENSA_PMU_PMG_PMEN 0x1 + +#define XTENSA_PMU_COUNTER_MASK 0xffffffffULL +#define XTENSA_PMU_COUNTER_MAX 0x7fffffff + +#define XTENSA_PMU_PMCTRL_INTEN 0x00000001 +#define XTENSA_PMU_PMCTRL_KRNLCNT 0x00000008 +#define XTENSA_PMU_PMCTRL_TRACELEVEL 0x000000f0 +#define XTENSA_PMU_PMCTRL_SELECT_SHIFT 8 +#define XTENSA_PMU_PMCTRL_SELECT 0x00001f00 +#define XTENSA_PMU_PMCTRL_MASK_SHIFT 16 +#define XTENSA_PMU_PMCTRL_MASK 0xffff0000 + +#define XTENSA_PMU_MASK(select, mask) \ + (((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \ + ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \ + XTENSA_PMU_PMCTRL_TRACELEVEL | \ + XTENSA_PMU_PMCTRL_INTEN) + +#define XTENSA_PMU_PMSTAT_OVFL 0x00000001 +#define XTENSA_PMU_PMSTAT_INTASRT 0x00000010 + +struct xtensa_pmu_events { + /* Array of events currently on this core */ + struct perf_event *event[XCHAL_NUM_PERF_COUNTERS]; + /* Bitmap of used hardware counters */ + unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)]; +}; +static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events); + +static const u32 xtensa_hw_ctl[] = { + [PERF_COUNT_HW_CPU_CYCLES] = XTENSA_PMU_MASK(0, 0x1), + [PERF_COUNT_HW_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0xffff), + [PERF_COUNT_HW_CACHE_REFERENCES] = XTENSA_PMU_MASK(10, 0x1), + [PERF_COUNT_HW_CACHE_MISSES] = XTENSA_PMU_MASK(12, 0x1), + /* Taken and non-taken branches + taken loop ends */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0x490), + /* Instruction-related + other global stall cycles */ + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XTENSA_PMU_MASK(4, 0x1ff), + /* Data-related global stall cycles */ + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = XTENSA_PMU_MASK(3, 0x1ff), +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x + +static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(10, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(10, 0x2), + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(11, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(11, 0x2), + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(8, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(8, 0x2), + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(9, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(9, 0x8), + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(7, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(7, 0x8), + }, + }, +}; + +static int xtensa_pmu_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + int ret; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + + if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) || + cache_op >= C(OP_MAX) || + cache_result >= C(RESULT_MAX)) + return -EINVAL; + + ret = xtensa_cache_ctl[cache_type][cache_op][cache_result]; + + if (ret == 0) + return -EINVAL; + + return ret; +} + +static inline uint32_t xtensa_pmu_read_counter(int idx) +{ + return get_er(XTENSA_PMU_PM(idx)); +} + +static inline void xtensa_pmu_write_counter(int idx, uint32_t v) +{ + set_er(v, XTENSA_PMU_PM(idx)); +} + +static void xtensa_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + uint64_t prev_raw_count, new_raw_count; + int64_t delta; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = xtensa_pmu_read_counter(event->hw.idx); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static bool xtensa_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + bool rc = false; + s64 left; + + if (!is_sampling_event(event)) { + left = XTENSA_PMU_COUNTER_MAX; + } else { + s64 period = hwc->sample_period; + + left = local64_read(&hwc->period_left); + if (left <= -period) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + rc = true; + } else if (left <= 0) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + rc = true; + } + if (left > XTENSA_PMU_COUNTER_MAX) + left = XTENSA_PMU_COUNTER_MAX; + } + + local64_set(&hwc->prev_count, -left); + xtensa_pmu_write_counter(idx, -left); + perf_event_update_userpage(event); + + return rc; +} + +static void xtensa_pmu_enable(struct pmu *pmu) +{ + set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG); +} + +static void xtensa_pmu_disable(struct pmu *pmu) +{ + set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG); +} + +static int xtensa_pmu_event_init(struct perf_event *event) +{ + int ret; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) || + xtensa_hw_ctl[event->attr.config] == 0) + return -EINVAL; + event->hw.config = xtensa_hw_ctl[event->attr.config]; + return 0; + + case PERF_TYPE_HW_CACHE: + ret = xtensa_pmu_cache_event(event->attr.config); + if (ret < 0) + return ret; + event->hw.config = ret; + return 0; + + case PERF_TYPE_RAW: + /* Not 'previous counter' select */ + if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) == + (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT)) + return -EINVAL; + event->hw.config = (event->attr.config & + (XTENSA_PMU_PMCTRL_KRNLCNT | + XTENSA_PMU_PMCTRL_TRACELEVEL | + XTENSA_PMU_PMCTRL_SELECT | + XTENSA_PMU_PMCTRL_MASK)) | + XTENSA_PMU_PMCTRL_INTEN; + return 0; + + default: + return -ENOENT; + } +} + +/* + * Starts/Stops a counter present on the PMU. The PMI handler + * should stop the counter when perf_event_overflow() returns + * !0. ->start() will be used to continue. + */ +static void xtensa_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + xtensa_perf_event_set_period(event, hwc, idx); + } + + hwc->state = 0; + + set_er(hwc->config, XTENSA_PMU_PMCTRL(idx)); +} + +static void xtensa_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!(hwc->state & PERF_HES_STOPPED)) { + set_er(0, XTENSA_PMU_PMCTRL(idx)); + set_er(get_er(XTENSA_PMU_PMSTAT(idx)), + XTENSA_PMU_PMSTAT(idx)); + hwc->state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && + !(event->hw.state & PERF_HES_UPTODATE)) { + xtensa_perf_event_update(event, &event->hw, idx); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +/* + * Adds/Removes a counter to/from the PMU, can be done inside + * a transaction, see the ->*_txn() methods. + */ +static int xtensa_pmu_add(struct perf_event *event, int flags) +{ + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (__test_and_set_bit(idx, ev->used_mask)) { + idx = find_first_zero_bit(ev->used_mask, + XCHAL_NUM_PERF_COUNTERS); + if (idx == XCHAL_NUM_PERF_COUNTERS) + return -EAGAIN; + + __set_bit(idx, ev->used_mask); + hwc->idx = idx; + } + ev->event[idx] = event; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + xtensa_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + return 0; +} + +static void xtensa_pmu_del(struct perf_event *event, int flags) +{ + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + + xtensa_pmu_stop(event, PERF_EF_UPDATE); + __clear_bit(event->hw.idx, ev->used_mask); + perf_event_update_userpage(event); +} + +static void xtensa_pmu_read(struct perf_event *event) +{ + xtensa_perf_event_update(event, &event->hw, event->hw.idx); +} + +static int callchain_trace(struct stackframe *frame, void *data) +{ + struct perf_callchain_entry *entry = data; + + perf_callchain_store(entry, frame->pc); + return 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + xtensa_backtrace_kernel(regs, PERF_MAX_STACK_DEPTH, + callchain_trace, NULL, entry); +} + +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + xtensa_backtrace_user(regs, PERF_MAX_STACK_DEPTH, + callchain_trace, entry); +} + +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned i; + + local_irq_save(flags); + pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(), + get_er(XTENSA_PMU_PMG)); + for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) + pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n", + i, get_er(XTENSA_PMU_PM(i)), + i, get_er(XTENSA_PMU_PMCTRL(i)), + i, get_er(XTENSA_PMU_PMSTAT(i))); + local_irq_restore(flags); +} + +irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) +{ + irqreturn_t rc = IRQ_NONE; + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + unsigned i; + + for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS); + i < XCHAL_NUM_PERF_COUNTERS; + i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) { + uint32_t v = get_er(XTENSA_PMU_PMSTAT(i)); + struct perf_event *event = ev->event[i]; + struct hw_perf_event *hwc = &event->hw; + u64 last_period; + + if (!(v & XTENSA_PMU_PMSTAT_OVFL)) + continue; + + set_er(v, XTENSA_PMU_PMSTAT(i)); + xtensa_perf_event_update(event, hwc, i); + last_period = hwc->last_period; + if (xtensa_perf_event_set_period(event, hwc, i)) { + struct perf_sample_data data; + struct pt_regs *regs = get_irq_regs(); + + perf_sample_data_init(&data, 0, last_period); + if (perf_event_overflow(event, &data, regs)) + xtensa_pmu_stop(event, 0); + } + + rc = IRQ_HANDLED; + } + return rc; +} + +static struct pmu xtensa_pmu = { + .pmu_enable = xtensa_pmu_enable, + .pmu_disable = xtensa_pmu_disable, + .event_init = xtensa_pmu_event_init, + .add = xtensa_pmu_add, + .del = xtensa_pmu_del, + .start = xtensa_pmu_start, + .stop = xtensa_pmu_stop, + .read = xtensa_pmu_read, +}; + +static void xtensa_pmu_setup(void) +{ + unsigned i; + + set_er(0, XTENSA_PMU_PMG); + for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) { + set_er(0, XTENSA_PMU_PMCTRL(i)); + set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i)); + } +} + +static int xtensa_pmu_notifier(struct notifier_block *self, + unsigned long action, void *data) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + xtensa_pmu_setup(); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static int __init xtensa_pmu_init(void) +{ + int ret; + int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); + + perf_cpu_notifier(xtensa_pmu_notifier); +#if XTENSA_FAKE_NMI + enable_irq(irq); +#else + ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU, + "pmu", NULL); + if (ret < 0) + return ret; +#endif + + ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW); + if (ret) + free_irq(irq, NULL); + + return ret; +} +early_initcall(xtensa_pmu_init); diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c index 7d2c317bd98b..7538d802b65a 100644 --- a/arch/xtensa/kernel/stacktrace.c +++ b/arch/xtensa/kernel/stacktrace.c @@ -1,11 +1,12 @@ /* - * arch/xtensa/kernel/stacktrace.c + * Kernel and userspace stack tracing. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2013 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #include <linux/export.h> #include <linux/sched.h> @@ -13,6 +14,170 @@ #include <asm/stacktrace.h> #include <asm/traps.h> +#include <asm/uaccess.h> + +#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS) + +/* Address of common_exception_return, used to check the + * transition from kernel to user space. + */ +extern int common_exception_return; + +/* A struct that maps to the part of the frame containing the a0 and + * a1 registers. + */ +struct frame_start { + unsigned long a0; + unsigned long a1; +}; + +void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth, + int (*ufn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long windowstart = regs->windowstart; + unsigned long windowbase = regs->windowbase; + unsigned long a0 = regs->areg[0]; + unsigned long a1 = regs->areg[1]; + unsigned long pc = regs->pc; + struct stackframe frame; + int index; + + if (!depth--) + return; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + + /* Two steps: + * + * 1. Look through the register window for the + * previous PCs in the call trace. + * + * 2. Look on the stack. + */ + + /* Step 1. */ + /* Rotate WINDOWSTART to move the bit corresponding to + * the current window to the bit #0. + */ + windowstart = (windowstart << WSBITS | windowstart) >> windowbase; + + /* Look for bits that are set, they correspond to + * valid windows. + */ + for (index = WSBITS - 1; (index > 0) && depth; depth--, index--) + if (windowstart & (1 << index)) { + /* Get the PC from a0 and a1. */ + pc = MAKE_PC_FROM_RA(a0, pc); + /* Read a0 and a1 from the + * corresponding position in AREGs. + */ + a0 = regs->areg[index * 4]; + a1 = regs->areg[index * 4 + 1]; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + } + + /* Step 2. */ + /* We are done with the register window, we need to + * look through the stack. + */ + if (!depth) + return; + + /* Start from the a1 register. */ + /* a1 = regs->areg[1]; */ + while (a0 != 0 && depth--) { + struct frame_start frame_start; + /* Get the location for a1, a0 for the + * previous frame from the current a1. + */ + unsigned long *psp = (unsigned long *)a1; + + psp -= 4; + + /* Check if the region is OK to access. */ + if (!access_ok(VERIFY_READ, psp, sizeof(frame_start))) + return; + /* Copy a1, a0 from user space stack frame. */ + if (__copy_from_user_inatomic(&frame_start, psp, + sizeof(frame_start))) + return; + + pc = MAKE_PC_FROM_RA(a0, pc); + a0 = frame_start.a0; + a1 = frame_start.a1; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + } +} +EXPORT_SYMBOL(xtensa_backtrace_user); + +void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth, + int (*kfn)(struct stackframe *frame, void *data), + int (*ufn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ? + regs->depc : regs->pc; + unsigned long sp_start, sp_end; + unsigned long a0 = regs->areg[0]; + unsigned long a1 = regs->areg[1]; + + sp_start = a1 & ~(THREAD_SIZE - 1); + sp_end = sp_start + THREAD_SIZE; + + /* Spill the register window to the stack first. */ + spill_registers(); + + /* Read the stack frames one by one and create the PC + * from the a0 and a1 registers saved there. + */ + while (a1 > sp_start && a1 < sp_end && depth--) { + struct stackframe frame; + unsigned long *psp = (unsigned long *)a1; + + frame.pc = pc; + frame.sp = a1; + + if (kernel_text_address(pc) && kfn(&frame, data)) + return; + + if (pc == (unsigned long)&common_exception_return) { + regs = (struct pt_regs *)a1; + if (user_mode(regs)) { + if (ufn == NULL) + return; + xtensa_backtrace_user(regs, depth, ufn, data); + return; + } + a0 = regs->areg[0]; + a1 = regs->areg[1]; + continue; + } + + sp_start = a1; + + pc = MAKE_PC_FROM_RA(a0, pc); + a0 = *(psp - 4); + a1 = *(psp - 3); + } +} +EXPORT_SYMBOL(xtensa_backtrace_kernel); + +#endif void walk_stackframe(unsigned long *sp, int (*fn)(struct stackframe *frame, void *data), diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 9d2f45f010ef..42d441f7898b 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -62,6 +62,7 @@ extern void fast_coprocessor(void); extern void do_illegal_instruction (struct pt_regs*); extern void do_interrupt (struct pt_regs*); +extern void do_nmi(struct pt_regs *); extern void do_unaligned_user (struct pt_regs*); extern void do_multihit (struct pt_regs*, unsigned long); extern void do_page_fault (struct pt_regs*, unsigned long); @@ -146,6 +147,9 @@ COPROCESSOR(6), #if XTENSA_HAVE_COPROCESSOR(7) COPROCESSOR(7), #endif +#if XTENSA_FAKE_NMI +{ EXCCAUSE_MAPPED_NMI, 0, do_nmi }, +#endif { EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, { -1, -1, 0 } @@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) extern void do_IRQ(int, struct pt_regs *); +#if XTENSA_FAKE_NMI + +irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id); + +DEFINE_PER_CPU(unsigned long, nmi_count); + +void do_nmi(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + + if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL) + trace_hardirqs_off(); + + old_regs = set_irq_regs(regs); + nmi_enter(); + ++*this_cpu_ptr(&nmi_count); + xtensa_pmu_irq_handler(0, NULL); + nmi_exit(); + set_irq_regs(old_regs); +} +#endif + void do_interrupt(struct pt_regs *regs) { static const unsigned int_level_mask[] = { @@ -211,8 +237,11 @@ void do_interrupt(struct pt_regs *regs) XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL7_MASK, }; - struct pt_regs *old_regs = set_irq_regs(regs); + struct pt_regs *old_regs; + + trace_hardirqs_off(); + old_regs = set_irq_regs(regs); irq_enter(); for (;;) { diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 1b397a902292..abcdb527f18a 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector) wsr a0, excsave2 rsr a0, epc\level wsr a0, epc1 + .if \level <= LOCKLEVEL movi a0, EXCCAUSE_LEVEL1_INTERRUPT + .else + movi a0, EXCCAUSE_MAPPED_NMI + .endif wsr a0, exccause rsr a0, eps\level # branch to user or kernel vector @@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4) .align 4 _SimulateUserKernelVectorException: addi a0, a0, (1 << PS_EXCM_BIT) +#if !XTENSA_FAKE_NMI wsr a0, ps +#endif bbsi.l a0, PS_UM_BIT, 1f # branch if user mode - rsr a0, excsave2 # restore a0 + xsr a0, excsave2 # restore a0 j _KernelExceptionVector # simulate kernel vector exception -1: rsr a0, excsave2 # restore a0 +1: xsr a0, excsave2 # restore a0 j _UserExceptionVector # simulate user vector exception #endif diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 83a44a33cfa1..c9784c1b18d8 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -15,6 +15,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/hardirq.h> +#include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> @@ -142,6 +143,12 @@ good_area: } up_read(&mm->mmap_sem); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + if (flags & VM_FAULT_MAJOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + else if (flags & VM_FAULT_MINOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + return; /* Something tried to access memory that isn't in our memory map.. diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c index 5f03a593d84f..8f952034e161 100644 --- a/arch/xtensa/oprofile/backtrace.c +++ b/arch/xtensa/oprofile/backtrace.c @@ -2,168 +2,26 @@ * @file backtrace.c * * @remark Copyright 2008 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * @remark Read the file COPYING * */ #include <linux/oprofile.h> -#include <linux/sched.h> -#include <linux/mm.h> #include <asm/ptrace.h> -#include <asm/uaccess.h> -#include <asm/traps.h> +#include <asm/stacktrace.h> -/* Address of common_exception_return, used to check the - * transition from kernel to user space. - */ -extern int common_exception_return; - -/* A struct that maps to the part of the frame containing the a0 and - * a1 registers. - */ -struct frame_start { - unsigned long a0; - unsigned long a1; -}; - -static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth) -{ - unsigned long windowstart = regs->windowstart; - unsigned long windowbase = regs->windowbase; - unsigned long a0 = regs->areg[0]; - unsigned long a1 = regs->areg[1]; - unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc); - int index; - - /* First add the current PC to the trace. */ - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - - /* Two steps: - * - * 1. Look through the register window for the - * previous PCs in the call trace. - * - * 2. Look on the stack. - */ - - /* Step 1. */ - /* Rotate WINDOWSTART to move the bit corresponding to - * the current window to the bit #0. - */ - windowstart = (windowstart << WSBITS | windowstart) >> windowbase; - - /* Look for bits that are set, they correspond to - * valid windows. - */ - for (index = WSBITS - 1; (index > 0) && depth; depth--, index--) - if (windowstart & (1 << index)) { - /* Read a0 and a1 from the - * corresponding position in AREGs. - */ - a0 = regs->areg[index * 4]; - a1 = regs->areg[index * 4 + 1]; - /* Get the PC from a0 and a1. */ - pc = MAKE_PC_FROM_RA(a0, pc); - - /* Add the PC to the trace. */ - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - } - - /* Step 2. */ - /* We are done with the register window, we need to - * look through the stack. - */ - if (depth > 0) { - /* Start from the a1 register. */ - /* a1 = regs->areg[1]; */ - while (a0 != 0 && depth--) { - - struct frame_start frame_start; - /* Get the location for a1, a0 for the - * previous frame from the current a1. - */ - unsigned long *psp = (unsigned long *)a1; - psp -= 4; - - /* Check if the region is OK to access. */ - if (!access_ok(VERIFY_READ, psp, sizeof(frame_start))) - return; - /* Copy a1, a0 from user space stack frame. */ - if (__copy_from_user_inatomic(&frame_start, psp, - sizeof(frame_start))) - return; - - a0 = frame_start.a0; - a1 = frame_start.a1; - pc = MAKE_PC_FROM_RA(a0, pc); - - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - } - } -} - -static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth) +static int xtensa_backtrace_cb(struct stackframe *frame, void *data) { - unsigned long pc = regs->pc; - unsigned long *psp; - unsigned long sp_start, sp_end; - unsigned long a0 = regs->areg[0]; - unsigned long a1 = regs->areg[1]; - - sp_start = a1 & ~(THREAD_SIZE-1); - sp_end = sp_start + THREAD_SIZE; - - /* Spill the register window to the stack first. */ - spill_registers(); - - /* Read the stack frames one by one and create the PC - * from the a0 and a1 registers saved there. - */ - while (a1 > sp_start && a1 < sp_end && depth--) { - pc = MAKE_PC_FROM_RA(a0, pc); - - /* Add the PC to the trace. */ - oprofile_add_trace(pc); - if (pc == (unsigned long) &common_exception_return) { - regs = (struct pt_regs *)a1; - if (user_mode(regs)) { - pc = regs->pc; - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - return xtensa_backtrace_user(regs, depth); - } - a0 = regs->areg[0]; - a1 = regs->areg[1]; - continue; - } - - psp = (unsigned long *)a1; - - a0 = *(psp - 4); - a1 = *(psp - 3); - - if (a1 <= (unsigned long)psp) - return; - - } - return; + oprofile_add_trace(frame->pc); + return 0; } void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth) { if (user_mode(regs)) - xtensa_backtrace_user(regs, depth); + xtensa_backtrace_user(regs, depth, xtensa_backtrace_cb, NULL); else - xtensa_backtrace_kernel(regs, depth); + xtensa_backtrace_kernel(regs, depth, xtensa_backtrace_cb, + xtensa_backtrace_cb, NULL); } diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 8ab021b1f141..976a38594537 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -105,13 +105,17 @@ static char *split_if_spec(char *str, ...) va_start(ap, str); while ((arg = va_arg(ap, char**)) != NULL) { - if (*str == '\0') + if (*str == '\0') { + va_end(ap); return NULL; + } end = strchr(str, ','); if (end != str) *arg = str; - if (end == NULL) + if (end == NULL) { + va_end(ap); return NULL; + } *end++ = '\0'; str = end; } |