diff options
Diffstat (limited to 'arch')
533 files changed, 8194 insertions, 9060 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index cd5f443865ec..3aff508ffd86 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -895,6 +895,9 @@ config HAVE_ARCH_PREL32_RELOCATIONS architectures, and don't require runtime relocation on relocatable kernels. +config ARCH_USE_MEMREMAP_PROT + bool + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/alpha/include/asm/a.out-core.h b/arch/alpha/include/asm/a.out-core.h deleted file mode 100644 index 1610d078b064..000000000000 --- a/arch/alpha/include/asm/a.out-core.h +++ /dev/null @@ -1,81 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_A_OUT_CORE_H -#define _ASM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include <linux/user.h> -#include <linux/mm_types.h> - -/* - * Fill in the user structure for an ECOFF core dump. - */ -static inline void aout_dump_thread(struct pt_regs *pt, struct user *dump) -{ - /* switch stack follows right below pt_regs: */ - struct switch_stack * sw = ((struct switch_stack *) pt) - 1; - - dump->magic = CMAGIC; - dump->start_code = current->mm->start_code; - dump->start_data = current->mm->start_data; - dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); - dump->u_tsize = ((current->mm->end_code - dump->start_code) - >> PAGE_SHIFT); - dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) - >> PAGE_SHIFT); - dump->u_ssize = (current->mm->start_stack - dump->start_stack - + PAGE_SIZE-1) >> PAGE_SHIFT; - - /* - * We store the registers in an order/format that is - * compatible with DEC Unix/OSF/1 as this makes life easier - * for gdb. - */ - dump->regs[EF_V0] = pt->r0; - dump->regs[EF_T0] = pt->r1; - dump->regs[EF_T1] = pt->r2; - dump->regs[EF_T2] = pt->r3; - dump->regs[EF_T3] = pt->r4; - dump->regs[EF_T4] = pt->r5; - dump->regs[EF_T5] = pt->r6; - dump->regs[EF_T6] = pt->r7; - dump->regs[EF_T7] = pt->r8; - dump->regs[EF_S0] = sw->r9; - dump->regs[EF_S1] = sw->r10; - dump->regs[EF_S2] = sw->r11; - dump->regs[EF_S3] = sw->r12; - dump->regs[EF_S4] = sw->r13; - dump->regs[EF_S5] = sw->r14; - dump->regs[EF_S6] = sw->r15; - dump->regs[EF_A3] = pt->r19; - dump->regs[EF_A4] = pt->r20; - dump->regs[EF_A5] = pt->r21; - dump->regs[EF_T8] = pt->r22; - dump->regs[EF_T9] = pt->r23; - dump->regs[EF_T10] = pt->r24; - dump->regs[EF_T11] = pt->r25; - dump->regs[EF_RA] = pt->r26; - dump->regs[EF_T12] = pt->r27; - dump->regs[EF_AT] = pt->r28; - dump->regs[EF_SP] = rdusp(); - dump->regs[EF_PS] = pt->ps; - dump->regs[EF_PC] = pt->pc; - dump->regs[EF_GP] = pt->gp; - dump->regs[EF_A0] = pt->r16; - dump->regs[EF_A1] = pt->r17; - dump->regs[EF_A2] = pt->r18; - memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h index 4d17cacd1462..432402c8e47f 100644 --- a/arch/alpha/include/asm/irq.h +++ b/arch/alpha/include/asm/irq.h @@ -56,15 +56,15 @@ #elif defined(CONFIG_ALPHA_DP264) || \ defined(CONFIG_ALPHA_LYNX) || \ - defined(CONFIG_ALPHA_SHARK) || \ - defined(CONFIG_ALPHA_EIGER) + defined(CONFIG_ALPHA_SHARK) # define NR_IRQS 64 #elif defined(CONFIG_ALPHA_TITAN) #define NR_IRQS 80 #elif defined(CONFIG_ALPHA_RAWHIDE) || \ - defined(CONFIG_ALPHA_TAKARA) + defined(CONFIG_ALPHA_TAKARA) || \ + defined(CONFIG_ALPHA_EIGER) # define NR_IRQS 128 #elif defined(CONFIG_ALPHA_WILDFIRE) diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index cf4ac791a592..1fe2b56cb861 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -18,7 +18,6 @@ #define USER_DS ((mm_segment_t) { -0x40000000000UL }) #define get_fs() (current_thread_info()->addr_limit) -#define get_ds() (KERNEL_DS) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define segment_eq(a, b) ((a).seg == (b).seg) diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 065fb372e355..0d0fddb7e738 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -3,6 +3,7 @@ #define _UAPI_ASM_SOCKET_H #include <asm/sockios.h> +#include <asm/bitsperlong.h> /* For setsockopt(2) */ /* @@ -30,8 +31,8 @@ #define SO_RCVBUFFORCE 0x100b #define SO_RCVLOWAT 0x1010 #define SO_SNDLOWAT 0x1011 -#define SO_RCVTIMEO 0x1012 -#define SO_SNDTIMEO 0x1013 +#define SO_RCVTIMEO_OLD 0x1012 +#define SO_SNDTIMEO_OLD 0x1013 #define SO_ACCEPTCONN 0x1014 #define SO_PROTOCOL 0x1028 #define SO_DOMAIN 0x1029 @@ -51,13 +52,9 @@ #define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP #define SO_PEERSEC 30 #define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 19 @@ -66,9 +63,6 @@ #define SO_MARK 36 -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 40 #define SO_WIFI_STATUS 41 @@ -115,4 +109,41 @@ #define SO_TXTIME 61 #define SCM_TXTIME SO_TXTIME +#define SO_BINDTOIFINDEX 62 + +#define SO_TIMESTAMP_OLD 29 +#define SO_TIMESTAMPNS_OLD 35 +#define SO_TIMESTAMPING_OLD 37 + +#define SO_TIMESTAMP_NEW 63 +#define SO_TIMESTAMPNS_NEW 64 +#define SO_TIMESTAMPING_NEW 65 + +#define SO_RCVTIMEO_NEW 66 +#define SO_SNDTIMEO_NEW 67 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 5613aa378a83..4341ccf5c0c4 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -630,12 +630,6 @@ static int __hw_perf_event_init(struct perf_event *event) return ev; } - /* The EV67 does not support mode exclusion */ - if (attr->exclude_kernel || attr->exclude_user - || attr->exclude_hv || attr->exclude_idle) { - return -EPERM; - } - /* * We place the event type in event_base here and leave calculation * of the codes to programme the PMU for alpha_pmu_enable() because @@ -771,6 +765,7 @@ static struct pmu pmu = { .start = alpha_pmu_start, .stop = alpha_pmu_stop, .read = alpha_pmu_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index d73dc473fbb9..188fc9256baf 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm) /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ - (r) <= 18 ? (r)+8 : (r)-10]) + (r) <= 18 ? (r)+10 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 1cfe4197146f..2061b652d9c3 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -192,7 +192,6 @@ config NR_CPUS config ARC_SMP_HALT_ON_RESET bool "Enable Halt-on-reset boot mode" - default y if ARC_UBOOT_SUPPORT help In SMP configuration cores can be configured as Halt-on-reset or they could all start at same time. For Halt-on-reset, non @@ -408,6 +407,14 @@ config ARC_HAS_ACCL_REGS (also referred to as r58:r59). These can also be used by gcc as GPR so kernel needs to save/restore per process +config ARC_IRQ_NO_AUTOSAVE + bool "Disable hardware autosave regfile on interrupts" + default n + help + On HS cores, taken interrupt auto saves the regfile on stack. + This is programmable and can be optionally disabled in which case + software INTERRUPT_PROLOGUE/EPILGUE do the needed work + endif # ISA_ARCV2 endmenu # "ARC CPU Configuration" @@ -516,17 +523,6 @@ config ARC_DBG_TLB_PARANOIA endif -config ARC_UBOOT_SUPPORT - bool "Support uboot arg Handling" - help - ARC Linux by default checks for uboot provided args as pointers to - external cmdline or DTB. This however breaks in absence of uboot, - when booting from Metaware debugger directly, as the registers are - not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus - registers look like uboot args to kernel which then chokes. - So only enable the uboot arg checking/processing if users are sure - of uboot being in play. - config ARC_BUILTIN_DTB_NAME string "Built in DTB" help diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 6e84060e7c90..621f59407d76 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5 # CONFIG_ARC_HAS_LLSC is not set CONFIG_ARC_KVADDR_SIZE=402 CONFIG_ARC_EMUL_UNALIGNED=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_PREEMPT=y CONFIG_NET=y CONFIG_UNIX=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 1e59a2e9c602..e447ace6fa1c 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_ARC_PLAT_AXS10X=y CONFIG_AXS103=y CONFIG_ISA_ARCV2=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" CONFIG_PREEMPT=y CONFIG_NET=y diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index b5c3f6c54b03..c82cdb10aaf4 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -15,8 +15,6 @@ CONFIG_AXS103=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y # CONFIG_ARC_TIMERS_64BIT is not set -# CONFIG_ARC_SMP_HALT_ON_RESET is not set -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" CONFIG_PREEMPT=y CONFIG_NET=y diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index f1b86cef0905..a27eafdc8260 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -151,6 +151,14 @@ struct bcr_isa_arcv2 { #endif }; +struct bcr_uarch_build_arcv2 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:8, prod:8, maj:8, min:8; +#else + unsigned int min:8, maj:8, prod:8, pad:8; +#endif +}; + struct bcr_mpy { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index f393b663413e..2ad77fb43639 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -52,6 +52,17 @@ #define cache_line_size() SMP_CACHE_BYTES #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES +/* + * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses + * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit + * alignment for any atomic64_t embedded in buffer. + * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed + * value of 4 (and not 8) in ARC ABI. + */ +#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC) +#define ARCH_SLAB_MINALIGN 8 +#endif + extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 309f4e6721b3..225e7df2d8ed 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -17,6 +17,33 @@ ; ; Now manually save: r12, sp, fp, gp, r25 +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + st.as r9, [sp, -10] ; save r9 in it's final stack slot + sub sp, sp, 12 ; skip JLI, LDI, EI + + PUSH lp_count + PUSHAX lp_start + PUSHAX lp_end + PUSH blink + + PUSH r11 + PUSH r10 + + sub sp, sp, 4 ; skip r9 + + PUSH r8 + PUSH r7 + PUSH r6 + PUSH r5 + PUSH r4 + PUSH r3 + PUSH r2 + PUSH r1 + PUSH r0 +.endif +#endif + #ifdef CONFIG_ARC_HAS_ACCL_REGS PUSH r59 PUSH r58 @@ -86,6 +113,33 @@ POP r59 #endif +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + POP r0 + POP r1 + POP r2 + POP r3 + POP r4 + POP r5 + POP r6 + POP r7 + POP r8 + POP r9 + POP r10 + POP r11 + + POP blink + POPAX lp_end + POPAX lp_start + + POP r9 + mov lp_count, r9 + + add sp, sp, 12 ; skip JLI, LDI, EI + ld.as r9, [sp, -10] ; reload r9 which got clobbered +.endif +#endif + .endm /*------------------------------------------------------------------------*/ diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index c9173c02081c..eabc3efa6c6d 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) " .previous \n" : "+r"(d_char), "+r"(res) : "i"(0) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } @@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) : "g"(-EFAULT), "r"(count) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cc558a25b8fa..562089d62d9d 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -209,7 +209,9 @@ restore_regs: ;####### Return from Intr ####### debug_marker_l1: - bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + btst r0, STATUS_DE_BIT ; Z flag set if bit clear + bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set .Lisr_ret_fast_path: ; Handle special case #1: (Entry via Exception, Return via IRQ) diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 8b90d25a15cc..30e090625916 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -17,6 +17,7 @@ #include <asm/entry.h> #include <asm/arcregs.h> #include <asm/cache.h> +#include <asm/irqflags.h> .macro CPU_EARLY_SETUP @@ -47,6 +48,15 @@ sr r5, [ARC_REG_DC_CTRL] 1: + +#ifdef CONFIG_ISA_ARCV2 + ; Unaligned access is disabled at reset, so re-enable early as + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access + ; by default + lr r5, [status32] + bset r5, r5, STATUS_AD_BIT + kflag r5 +#endif .endm .section .init.text, "ax",@progbits @@ -90,15 +100,13 @@ ENTRY(stext) st.ab 0, [r5, 4] 1: -#ifdef CONFIG_ARC_UBOOT_SUPPORT ; Uboot - kernel ABI ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 - ; r1 = magic number (board identity, unused as of now + ; r1 = magic number (always zero as of now) ; r2 = pointer to uboot provided cmdline or external DTB in mem - ; These are handled later in setup_arch() + ; These are handled later in handle_uboot_args() st r0, [@uboot_tag] st r2, [@uboot_arg] -#endif ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 067ea362fb3e..cf18b3e5a934 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -49,11 +49,13 @@ void arc_init_IRQ(void) *(unsigned int *)&ictrl = 0; +#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ ictrl.save_blink = 1; ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ +#endif WRITE_AUX(AUX_IRQ_CTRL, ictrl); diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index feb90093e6b1..7b2340996cf8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.ret_stk = 4 << bpu.rse; if (cpu->core.family >= 0x54) { - unsigned int exec_ctrl; - READ_BCR(AUX_EXEC_CTRL, exec_ctrl); - cpu->extn.dual_enb = !(exec_ctrl & 1); + struct bcr_uarch_build_arcv2 uarch; - /* dual issue always present for this core */ - cpu->extn.dual = 1; + /* + * The first 0x54 core (uarch maj:min 0:1 or 0:2) was + * dual issue only (HS4x). But next uarch rev (1:0) + * allows it be configured for single issue (HS3x) + * Ensure we fiddle with dual issue only on HS4x + */ + READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch); + + if (uarch.prod == 4) { + unsigned int exec_ctrl; + + /* dual issue hardware always present */ + cpu->extn.dual = 1; + + READ_BCR(AUX_EXEC_CTRL, exec_ctrl); + + /* dual issue hardware enabled ? */ + cpu->extn.dual_enb = !(exec_ctrl & 1); + + } } } READ_BCR(ARC_REG_AP_BCR, ap); if (ap.ver) { cpu->extn.ap_num = 2 << ap.num; - cpu->extn.ap_full = !!ap.min; + cpu->extn.ap_full = !ap.min; } READ_BCR(ARC_REG_SMART_BCR, bcr); @@ -462,43 +478,78 @@ void setup_processor(void) arc_chk_core_config(); } -static inline int is_kernel(unsigned long addr) +static inline bool uboot_arg_invalid(unsigned long addr) { - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) - return 1; - return 0; + /* + * Check that it is a untranslated address (although MMU is not enabled + * yet, it being a high address ensures this is not by fluke) + */ + if (addr < PAGE_OFFSET) + return true; + + /* Check that address doesn't clobber resident kernel image */ + return addr >= (unsigned long)_stext && addr <= (unsigned long)_end; } -void __init setup_arch(char **cmdline_p) +#define IGNORE_ARGS "Ignore U-boot args: " + +/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */ +#define UBOOT_TAG_NONE 0 +#define UBOOT_TAG_CMDLINE 1 +#define UBOOT_TAG_DTB 2 + +void __init handle_uboot_args(void) { -#ifdef CONFIG_ARC_UBOOT_SUPPORT - /* make sure that uboot passed pointer to cmdline/dtb is valid */ - if (uboot_tag && is_kernel((unsigned long)uboot_arg)) - panic("Invalid uboot arg\n"); - - /* See if u-boot passed an external Device Tree blob */ - machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ - if (!machine_desc) -#endif - { - /* No, so try the embedded one */ + bool use_embedded_dtb = true; + bool append_cmdline = false; + + /* check that we know this tag */ + if (uboot_tag != UBOOT_TAG_NONE && + uboot_tag != UBOOT_TAG_CMDLINE && + uboot_tag != UBOOT_TAG_DTB) { + pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag); + goto ignore_uboot_args; + } + + if (uboot_tag != UBOOT_TAG_NONE && + uboot_arg_invalid((unsigned long)uboot_arg)) { + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg); + goto ignore_uboot_args; + } + + /* see if U-boot passed an external Device Tree blob */ + if (uboot_tag == UBOOT_TAG_DTB) { + machine_desc = setup_machine_fdt((void *)uboot_arg); + + /* external Device Tree blob is invalid - use embedded one */ + use_embedded_dtb = !machine_desc; + } + + if (uboot_tag == UBOOT_TAG_CMDLINE) + append_cmdline = true; + +ignore_uboot_args: + + if (use_embedded_dtb) { machine_desc = setup_machine_fdt(__dtb_start); if (!machine_desc) panic("Embedded DT invalid\n"); + } - /* - * If we are here, it is established that @uboot_arg didn't - * point to DT blob. Instead if u-boot says it is cmdline, - * append to embedded DT cmdline. - * setup_machine_fdt() would have populated @boot_command_line - */ - if (uboot_tag == 1) { - /* Ensure a whitespace between the 2 cmdlines */ - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, uboot_arg, - COMMAND_LINE_SIZE); - } + /* + * NOTE: @boot_command_line is populated by setup_machine_fdt() so this + * append processing can only happen after. + */ + if (append_cmdline) { + /* Ensure a whitespace between the 2 cmdlines */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE); } +} + +void __init setup_arch(char **cmdline_p) +{ + handle_uboot_args(); /* Save unparsed command line copy for /proc/cmdline */ *cmdline_p = boot_command_line; diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index d61044dd8b58..ea14b0bf3116 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -25,15 +25,11 @@ #endif #ifdef CONFIG_ARC_HAS_LL64 -# define PREFETCH_READ(RX) prefetch [RX, 56] -# define PREFETCH_WRITE(RX) prefetchw [RX, 64] # define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define STOREX(SRC,RX) std.ab SRC, [RX, 8] # define ZOLSHFT 5 # define ZOLAND 0x1F #else -# define PREFETCH_READ(RX) prefetch [RX, 28] -# define PREFETCH_WRITE(RX) prefetchw [RX, 32] # define LOADX(DST,RX) ld.ab DST, [RX, 4] # define STOREX(SRC,RX) st.ab SRC, [RX, 4] # define ZOLSHFT 4 @@ -41,8 +37,6 @@ #endif ENTRY_CFI(memcpy) - prefetch [r1] ; Prefetch the read location - prefetchw [r0] ; Prefetch the write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -72,8 +66,6 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy32_64bytes ;; LOOP START LOADX (r6, r1) - PREFETCH_READ (r1) - PREFETCH_WRITE (r3) LOADX (r8, r1) LOADX (r10, r1) LOADX (r4, r1) @@ -117,9 +109,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_1 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 24) or r7, r7, r5 @@ -162,9 +152,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_2 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 16) or r7, r7, r5 @@ -204,9 +192,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_3 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index f25c085b9874..23e00216e5a5 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" depends on ISA_ARCV2 select ARC_HAS_ACCL_REGS + select ARC_IRQ_NO_AUTOSAVE select CLK_HSDK select RESET_HSDK select HAVE_PCI diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 8933f7337e56..b5956a175515 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1401,6 +1401,7 @@ config NR_CPUS config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP + select GENERIC_IRQ_MIGRATION help Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index b67f5fee1469..dce5be5df97b 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -729,7 +729,7 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; }; &tscadc { diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 172c0224e7f6..b128998097ce 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -651,13 +651,13 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; dual_emac_res_vlan = <1>; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; dual_emac_res_vlan = <2>; }; diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts index d0fd68873689..5b250060f6dd 100644 --- a/arch/arm/boot/dts/am335x-shc.dts +++ b/arch/arm/boot/dts/am335x-shc.dts @@ -215,7 +215,7 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; bus-width = <0x4>; - cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; cd-inverted; max-frequency = <26000000>; vmmc-supply = <&vmmcsd_fixed>; diff --git a/arch/arm/boot/dts/armada-388-clearfog.dtsi b/arch/arm/boot/dts/armada-388-clearfog.dtsi index 1b0d0680c8b6..0d81600ca247 100644 --- a/arch/arm/boot/dts/armada-388-clearfog.dtsi +++ b/arch/arm/boot/dts/armada-388-clearfog.dtsi @@ -93,6 +93,7 @@ bm,pool-long = <2>; bm,pool-short = <1>; buffer-manager = <&bm>; + phys = <&comphy1 1>; phy-mode = "sgmii"; status = "okay"; }; @@ -103,6 +104,7 @@ bm,pool-short = <1>; buffer-manager = <&bm>; managed = "in-band-status"; + phys = <&comphy5 2>; phy-mode = "sgmii"; sfp = <&sfp>; status = "okay"; diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 929459c42760..7b2e2bd6479b 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -335,6 +335,43 @@ #clock-cells = <1>; }; + comphy: phy@18300 { + compatible = "marvell,armada-380-comphy"; + reg = <0x18300 0x100>; + #address-cells = <1>; + #size-cells = <0>; + + comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + + comphy2: phy@2 { + reg = <2>; + #phy-cells = <1>; + }; + + comphy3: phy@3 { + reg = <3>; + #phy-cells = <1>; + }; + + comphy4: phy@4 { + reg = <4>; + #phy-cells = <1>; + }; + + comphy5: phy@5 { + reg = <5>; + #phy-cells = <1>; + }; + }; + coreclk: mvebu-sar@18600 { compatible = "marvell,armada-380-core-clock"; reg = <0x18600 0x04>; diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index f3ac7483afed..5d04dc68cf57 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts @@ -144,30 +144,32 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "U-Boot"; - reg = <0 0x800000>; - }; - partition@800000 { - label = "Linux"; - reg = <0x800000 0x800000>; - }; - partition@1000000 { - label = "Filesystem"; - reg = <0x1000000 0x3f000000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "U-Boot"; + reg = <0 0x800000>; + }; + partition@800000 { + label = "Linux"; + reg = <0x800000 0x800000>; + }; + partition@1000000 { + label = "Filesystem"; + reg = <0x1000000 0x3f000000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 1139e9469a83..b4cca507cf13 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts @@ -160,12 +160,15 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; + + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts index bbbb38888bb8..87dcb502f72d 100644 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts @@ -81,49 +81,52 @@ }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "u-boot"; - reg = <0x00000000 0x000e0000>; - read-only; - }; - - partition@e0000 { - label = "u-boot-env"; - reg = <0x000e0000 0x00020000>; - read-only; - }; - - partition@100000 { - label = "u-boot-env2"; - reg = <0x00100000 0x00020000>; - read-only; - }; - - partition@120000 { - label = "zImage"; - reg = <0x00120000 0x00400000>; - }; - - partition@520000 { - label = "initrd"; - reg = <0x00520000 0x00400000>; - }; - partition@e00000 { - label = "boot"; - reg = <0x00e00000 0x3f200000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "u-boot"; + reg = <0x00000000 0x000e0000>; + read-only; + }; + + partition@e0000 { + label = "u-boot-env"; + reg = <0x000e0000 0x00020000>; + read-only; + }; + + partition@100000 { + label = "u-boot-env2"; + reg = <0x00100000 0x00020000>; + read-only; + }; + + partition@120000 { + label = "zImage"; + reg = <0x00120000 0x00400000>; + }; + + partition@520000 { + label = "initrd"; + reg = <0x00520000 0x00400000>; + }; + + partition@e00000 { + label = "boot"; + reg = <0x00e00000 0x3f200000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index 47aa53ba6b92..559659b399d0 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -476,7 +476,7 @@ clocksource: timer@20000 { compatible = "ti,da830-timer"; reg = <0x20000 0x1000>; - interrupts = <12>, <13>; + interrupts = <21>, <22>; interrupt-names = "tint12", "tint34"; clocks = <&pll0_auxclk>; }; diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts index cc0c3cf89eaa..592111c8d6fd 100644 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -443,7 +443,7 @@ }; display-controller@6a000000 { - status = "disabled"; + status = "okay"; port@0 { reg = <0>; diff --git a/arch/arm/boot/dts/imx6q-pistachio.dts b/arch/arm/boot/dts/imx6q-pistachio.dts index 5edf858c8b86..a31b17eaf51c 100644 --- a/arch/arm/boot/dts/imx6q-pistachio.dts +++ b/arch/arm/boot/dts/imx6q-pistachio.dts @@ -103,7 +103,7 @@ power { label = "Power Button"; gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; - gpio-key,wakeup; + wakeup-source; linux,code = <KEY_POWER>; }; }; diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index d8163705363e..4a31a415f88e 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts @@ -309,7 +309,7 @@ pinctrl-2 = <&pinctrl_usdhc3_200mhz>; cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; keep-power-in-suspend; - enable-sdio-wakeup; + wakeup-source; vmmc-supply = <®_sd3_vmmc>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 272ff6133ec1..d1375d3650fd 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -467,7 +467,7 @@ }; gpt: gpt@2098000 { - compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt"; + compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt"; reg = <0x02098000 0x4000>; interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6SX_CLK_GPT_BUS>, diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index ed0941292172..ad75959b99c1 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -706,6 +706,7 @@ fsl,tmr-fiper1 = <999999995>; fsl,tmr-fiper2 = <99990>; fsl,max-adj = <499999999>; + fsl,extts-fifo; }; enet0: ethernet@2d10000 { diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index e4645f612712..2ab74860d962 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -274,7 +274,7 @@ compatible = "amlogic,meson6-dwmac", "snps,dwmac"; reg = <0xc9410000 0x10000 0xc1108108 0x4>; - interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>; + interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "macirq"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts index 0872f6e3abf5..d50fc2f60fa3 100644 --- a/arch/arm/boot/dts/meson8b-ec100.dts +++ b/arch/arm/boot/dts/meson8b-ec100.dts @@ -205,8 +205,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts index 58669abda259..0f0a46ddf3ff 100644 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts @@ -221,7 +221,6 @@ /* Realtek RTL8211F (0x001cc916) */ eth_phy: ethernet-phy@0 { reg = <0>; - eee-broken-1000t; interrupt-parent = <&gpio_intc>; /* GPIOH_3 */ interrupts = <17 IRQ_TYPE_LEVEL_LOW>; @@ -273,8 +272,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&tflash_vdd>; vqmmc-supply = <&tf_io>; diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts index f5853610b20b..6ac02beb5fa7 100644 --- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts @@ -206,8 +206,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index ddc7a7bb33c0..f57acf8f66b9 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -105,7 +105,7 @@ interrupts-extended = < &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 - &cpcap 48 1 + &cpcap 48 0 >; interrupt-names = "id_ground", "id_float", "se0conn", "vbusvld", diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index e53d32691308..93b420934e8e 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi @@ -714,11 +714,7 @@ vdda-supply = <&vdac>; - #address-cells = <1>; - #size-cells = <0>; - port { - reg = <0>; venc_out: endpoint { remote-endpoint = <&opa_in>; ti,channels = <1>; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 182a53991c90..826920e6b878 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -814,7 +814,7 @@ /* For debugging, it is often good idea to remove this GPIO. It means you can remove back cover (to reboot by removing battery) and still use the MMC card. */ - cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ + cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ }; /* most boards use vaux3, only some old versions use vmmc2 instead */ diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 0d9b85317529..e142e6c70a59 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi @@ -370,6 +370,19 @@ compatible = "ti,omap2-onenand"; reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ + /* + * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported + * bootloader set values when booted with v4.19 using both N950 + * and N9 devices (OneNAND Manufacturer: Samsung): + * + * gpmc cs0 before gpmc_cs_program_settings: + * cs0 GPMC_CS_CONFIG1: 0xfd001202 + * cs0 GPMC_CS_CONFIG2: 0x00181800 + * cs0 GPMC_CS_CONFIG3: 0x00030300 + * cs0 GPMC_CS_CONFIG4: 0x18001804 + * cs0 GPMC_CS_CONFIG5: 0x03171d1d + * cs0 GPMC_CS_CONFIG6: 0x97080000 + */ gpmc,sync-read; gpmc,sync-write; gpmc,burst-length = <16>; @@ -379,26 +392,27 @@ gpmc,device-width = <2>; gpmc,mux-add-data = <2>; gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <87>; - gpmc,cs-wr-off-ns = <87>; + gpmc,cs-rd-off-ns = <122>; + gpmc,cs-wr-off-ns = <122>; gpmc,adv-on-ns = <0>; - gpmc,adv-rd-off-ns = <10>; - gpmc,adv-wr-off-ns = <10>; - gpmc,oe-on-ns = <15>; - gpmc,oe-off-ns = <87>; + gpmc,adv-rd-off-ns = <15>; + gpmc,adv-wr-off-ns = <15>; + gpmc,oe-on-ns = <20>; + gpmc,oe-off-ns = <122>; gpmc,we-on-ns = <0>; - gpmc,we-off-ns = <87>; - gpmc,rd-cycle-ns = <112>; - gpmc,wr-cycle-ns = <112>; - gpmc,access-ns = <81>; + gpmc,we-off-ns = <122>; + gpmc,rd-cycle-ns = <148>; + gpmc,wr-cycle-ns = <148>; + gpmc,access-ns = <117>; gpmc,page-burst-access-ns = <15>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,wait-monitoring-ns = <0>; - gpmc,clk-activation-ns = <5>; - gpmc,wr-data-mux-bus-ns = <30>; - gpmc,wr-access-ns = <81>; - gpmc,sync-clk-ps = <15000>; + gpmc,clk-activation-ns = <10>; + gpmc,wr-data-mux-bus-ns = <40>; + gpmc,wr-access-ns = <117>; + + gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */ /* * MTD partition table corresponding to Nokia's MeeGo 1.2 diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 04758a2a87f0..67d77eee9433 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -644,6 +644,17 @@ }; }; +/* Configure pwm clock source for timers 8 & 9 */ +&timer8 { + assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; + assigned-clock-parents = <&sys_clkin_ck>; +}; + +&timer9 { + assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; + assigned-clock-parents = <&sys_clkin_ck>; +}; + /* * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for * uart1 wakeirq. diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index bc853ebeda22..61a06f6add3c 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -317,7 +317,8 @@ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { pinctrl-single,pins = < - OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) >; }; @@ -385,7 +386,8 @@ palmas: palmas@48 { compatible = "ti,palmas"; - interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; reg = <0x48>; interrupt-controller; #interrupt-cells = <2>; @@ -651,7 +653,8 @@ pinctrl-names = "default"; pinctrl-0 = <&twl6040_pins>; - interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>; /* audpwron gpio defined in the board specific dts */ diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 5e21fb430a65..e78d3718f145 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -181,6 +181,13 @@ OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ >; }; + + palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { + pinctrl-single,pins = < + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) + >; + }; }; &omap5_pmx_core { @@ -414,8 +421,11 @@ palmas: palmas@48 { compatible = "ti,palmas"; - interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ reg = <0x48>; + pinctrl-0 = <&palmas_sys_nirq_pins>; + pinctrl-names = "default"; + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; #interrupt-cells = <2>; ti,system-power-controller; diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi index 9c7e309d9c2c..0960348002ad 100644 --- a/arch/arm/boot/dts/omap5-l4.dtsi +++ b/arch/arm/boot/dts/omap5-l4.dtsi @@ -1046,8 +1046,6 @@ <SYSC_IDLE_SMART>, <SYSC_IDLE_SMART_WKUP>; ti,syss-mask = <1>; - ti,no-reset-on-init; - ti,no-idle-on-init; /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; clock-names = "fck"; diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index 3cc33f7ff7fe..3adc158a40bb 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi @@ -1681,15 +1681,12 @@ du: display@feb00000 { compatible = "renesas,du-r8a7743"; - reg = <0 0xfeb00000 0 0x40000>, - <0 0xfeb90000 0 0x1c>; - reg-names = "du", "lvds.0"; + reg = <0 0xfeb00000 0 0x40000>; interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 724>, - <&cpg CPG_MOD 723>, - <&cpg CPG_MOD 726>; - clock-names = "du.0", "du.1", "lvds.0"; + <&cpg CPG_MOD 723>; + clock-names = "du.0", "du.1"; status = "disabled"; ports { @@ -1704,6 +1701,33 @@ port@1 { reg = <1>; du_out_lvds0: endpoint { + remote-endpoint = <&lvds0_in>; + }; + }; + }; + }; + + lvds0: lvds@feb90000 { + compatible = "renesas,r8a7743-lvds"; + reg = <0 0xfeb90000 0 0x1c>; + clocks = <&cpg CPG_MOD 726>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 726>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + lvds0_in: endpoint { + remote-endpoint = <&du_out_lvds0>; + }; + }; + port@1 { + reg = <1>; + lvds0_out: endpoint { }; }; }; diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 4acb501dd3f8..3ed49898f4b2 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi @@ -719,7 +719,6 @@ pm_qos = <&qos_lcdc0>, <&qos_lcdc1>, <&qos_cif0>, - <&qos_cif1>, <&qos_ipp>, <&qos_rga>; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 353d90f99b40..13304b8c5139 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -216,6 +216,7 @@ #clock-cells = <0>; compatible = "fixed-clock"; clock-frequency = <24000000>; + clock-output-names = "osc24M"; }; osc32k: clk-32k { diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts index 5d23667dc2d2..25540b7694d5 100644 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts @@ -53,7 +53,7 @@ aliases { serial0 = &uart0; - /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ + ethernet0 = &emac; ethernet1 = &sdiowifi; }; diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi index d5f11d6d987e..bc85b6a166c7 100644 --- a/arch/arm/boot/dts/tegra124-nyan.dtsi +++ b/arch/arm/boot/dts/tegra124-nyan.dtsi @@ -13,10 +13,25 @@ stdout-path = "serial0:115200n8"; }; - memory@80000000 { + /* + * Note that recent version of the device tree compiler (starting with + * version 1.4.2) warn about this node containing a reg property, but + * missing a unit-address. However, the bootloader on these Chromebook + * devices relies on the full name of this node to be exactly /memory. + * Adding the unit-address causes the bootloader to create a /memory + * node and write the memory bank configuration to that node, which in + * turn leads the kernel to believe that the device has 2 GiB of + * memory instead of the amount detected by the bootloader. + * + * The name of this node is effectively ABI and must not be changed. + */ + memory { + device_type = "memory"; reg = <0x0 0x80000000 0x0 0x80000000>; }; + /delete-node/ memory@80000000; + host1x@50000000 { hdmi@54280000 { status = "okay"; diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts index 689c8930dce3..b08d561d6748 100644 --- a/arch/arm/boot/dts/vf610-bk4.dts +++ b/arch/arm/boot/dts/vf610-bk4.dts @@ -110,11 +110,11 @@ bus-num = <3>; status = "okay"; spi-slave; + #address-cells = <0>; - slave@0 { + slave { compatible = "lwn,bk4"; spi-max-frequency = <30000000>; - reg = <0>; }; }; diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S index ba8e6a32fdc9..bc53bcaa772e 100644 --- a/arch/arm/crypto/aes-ce-core.S +++ b/arch/arm/crypto/aes-ce-core.S @@ -317,25 +317,27 @@ ENTRY(ce_aes_ctr_encrypt) .Lctrloop: vmov q0, q6 bl aes_encrypt - subs r4, r4, #1 - bmi .Lctrtailblock @ blocks < 0 means tail block - vld1.8 {q3}, [r1]! - veor q3, q0, q3 - vst1.8 {q3}, [r0]! adds r6, r6, #1 @ increment BE ctr rev ip, r6 vmov s27, ip bcs .Lctrcarry - teq r4, #0 + +.Lctrcarrydone: + subs r4, r4, #1 + bmi .Lctrtailblock @ blocks < 0 means tail block + vld1.8 {q3}, [r1]! + veor q3, q0, q3 + vst1.8 {q3}, [r0]! bne .Lctrloop + .Lctrout: - vst1.8 {q6}, [r5] + vst1.8 {q6}, [r5] @ return next CTR value pop {r4-r6, pc} .Lctrtailblock: - vst1.8 {q0}, [r0, :64] @ return just the key stream - pop {r4-r6, pc} + vst1.8 {q0}, [r0, :64] @ return the key stream + b .Lctrout .Lctrcarry: .irp sreg, s26, s25, s24 @@ -344,11 +346,9 @@ ENTRY(ce_aes_ctr_encrypt) adds ip, ip, #1 rev ip, ip vmov \sreg, ip - bcc 0f + bcc .Lctrcarrydone .endr -0: teq r4, #0 - beq .Lctrout - b .Lctrloop + b .Lctrcarrydone ENDPROC(ce_aes_ctr_encrypt) /* diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S index ce45ba0c0687..86be258a803f 100644 --- a/arch/arm/crypto/crct10dif-ce-core.S +++ b/arch/arm/crypto/crct10dif-ce-core.S @@ -2,12 +2,14 @@ // Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions // // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> +// Copyright (C) 2019 Google LLC <ebiggers@google.com> // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // +// Derived from the x86 version: // // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions // @@ -54,19 +56,11 @@ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Function API: -// UINT16 crc_t10dif_pcl( -// UINT16 init_crc, //initial CRC value, 16 bits -// const unsigned char *buf, //buffer pointer to calculate CRC on -// UINT64 len //buffer length in bytes (64-bit data) -// ); -// // Reference paper titled "Fast CRC Computation for Generic // Polynomials Using PCLMULQDQ Instruction" // URL: http://www.intel.com/content/dam/www/public/us/en/documents // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // -// #include <linux/linkage.h> #include <asm/assembler.h> @@ -78,13 +72,14 @@ #endif .text + .arch armv7-a .fpu crypto-neon-fp-armv8 - arg1_low32 .req r0 - arg2 .req r1 - arg3 .req r2 + init_crc .req r0 + buf .req r1 + len .req r2 - qzr .req q13 + fold_consts_ptr .req ip q0l .req d0 q0h .req d1 @@ -102,82 +97,35 @@ q6h .req d13 q7l .req d14 q7h .req d15 - -ENTRY(crc_t10dif_pmull) - vmov.i8 qzr, #0 // init zero register - - // adjust the 16-bit initial_crc value, scale it to 32 bits - lsl arg1_low32, arg1_low32, #16 - - // check if smaller than 256 - cmp arg3, #256 - - // for sizes less than 128, we can't fold 64B at a time... - blt _less_than_128 - - // load the initial crc value - // crc value does not need to be byte-reflected, but it needs - // to be moved to the high part of the register. - // because data will be byte-reflected and will align with - // initial crc at correct place. - vmov s0, arg1_low32 // initial crc - vext.8 q10, qzr, q0, #4 - - // receive the initial 64B data, xor the initial crc value - vld1.64 {q0-q1}, [arg2, :128]! - vld1.64 {q2-q3}, [arg2, :128]! - vld1.64 {q4-q5}, [arg2, :128]! - vld1.64 {q6-q7}, [arg2, :128]! -CPU_LE( vrev64.8 q0, q0 ) -CPU_LE( vrev64.8 q1, q1 ) -CPU_LE( vrev64.8 q2, q2 ) -CPU_LE( vrev64.8 q3, q3 ) -CPU_LE( vrev64.8 q4, q4 ) -CPU_LE( vrev64.8 q5, q5 ) -CPU_LE( vrev64.8 q6, q6 ) -CPU_LE( vrev64.8 q7, q7 ) - - vswp d0, d1 - vswp d2, d3 - vswp d4, d5 - vswp d6, d7 - vswp d8, d9 - vswp d10, d11 - vswp d12, d13 - vswp d14, d15 - - // XOR the initial_crc value - veor.8 q0, q0, q10 - - adr ip, rk3 - vld1.64 {q10}, [ip, :128] // xmm10 has rk3 and rk4 - - // - // we subtract 256 instead of 128 to save one instruction from the loop - // - sub arg3, arg3, #256 - - // at this section of the code, there is 64*x+y (0<=y<64) bytes of - // buffer. The _fold_64_B_loop will fold 64B at a time - // until we have 64+y Bytes of buffer - - - // fold 64B at a time. This section of the code folds 4 vector - // registers in parallel -_fold_64_B_loop: - - .macro fold64, reg1, reg2 - vld1.64 {q11-q12}, [arg2, :128]! - - vmull.p64 q8, \reg1\()h, d21 - vmull.p64 \reg1, \reg1\()l, d20 - vmull.p64 q9, \reg2\()h, d21 - vmull.p64 \reg2, \reg2\()l, d20 - -CPU_LE( vrev64.8 q11, q11 ) -CPU_LE( vrev64.8 q12, q12 ) - vswp d22, d23 - vswp d24, d25 + q8l .req d16 + q8h .req d17 + q9l .req d18 + q9h .req d19 + q10l .req d20 + q10h .req d21 + q11l .req d22 + q11h .req d23 + q12l .req d24 + q12h .req d25 + + FOLD_CONSTS .req q10 + FOLD_CONST_L .req q10l + FOLD_CONST_H .req q10h + + // Fold reg1, reg2 into the next 32 data bytes, storing the result back + // into reg1, reg2. + .macro fold_32_bytes, reg1, reg2 + vld1.64 {q11-q12}, [buf]! + + vmull.p64 q8, \reg1\()h, FOLD_CONST_H + vmull.p64 \reg1, \reg1\()l, FOLD_CONST_L + vmull.p64 q9, \reg2\()h, FOLD_CONST_H + vmull.p64 \reg2, \reg2\()l, FOLD_CONST_L + +CPU_LE( vrev64.8 q11, q11 ) +CPU_LE( vrev64.8 q12, q12 ) + vswp q11l, q11h + vswp q12l, q12h veor.8 \reg1, \reg1, q8 veor.8 \reg2, \reg2, q9 @@ -185,242 +133,248 @@ CPU_LE( vrev64.8 q12, q12 ) veor.8 \reg2, \reg2, q12 .endm - fold64 q0, q1 - fold64 q2, q3 - fold64 q4, q5 - fold64 q6, q7 - - subs arg3, arg3, #128 - - // check if there is another 64B in the buffer to be able to fold - bge _fold_64_B_loop - - // at this point, the buffer pointer is pointing at the last y Bytes - // of the buffer the 64B of folded data is in 4 of the vector - // registers: v0, v1, v2, v3 - - // fold the 8 vector registers to 1 vector register with different - // constants - - adr ip, rk9 - vld1.64 {q10}, [ip, :128]! - - .macro fold16, reg, rk - vmull.p64 q8, \reg\()l, d20 - vmull.p64 \reg, \reg\()h, d21 - .ifnb \rk - vld1.64 {q10}, [ip, :128]! + // Fold src_reg into dst_reg, optionally loading the next fold constants + .macro fold_16_bytes, src_reg, dst_reg, load_next_consts + vmull.p64 q8, \src_reg\()l, FOLD_CONST_L + vmull.p64 \src_reg, \src_reg\()h, FOLD_CONST_H + .ifnb \load_next_consts + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! .endif - veor.8 q7, q7, q8 - veor.8 q7, q7, \reg + veor.8 \dst_reg, \dst_reg, q8 + veor.8 \dst_reg, \dst_reg, \src_reg .endm - fold16 q0, rk11 - fold16 q1, rk13 - fold16 q2, rk15 - fold16 q3, rk17 - fold16 q4, rk19 - fold16 q5, rk1 - fold16 q6 - - // instead of 64, we add 48 to the loop counter to save 1 instruction - // from the loop instead of a cmp instruction, we use the negative - // flag with the jl instruction - adds arg3, arg3, #(128-16) - blt _final_reduction_for_128 - - // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 - // and the rest is in memory. We can fold 16 bytes at a time if y>=16 - // continue folding 16B at a time - -_16B_reduction_loop: - vmull.p64 q8, d14, d20 - vmull.p64 q7, d15, d21 - veor.8 q7, q7, q8 + .macro __adrl, out, sym + movw \out, #:lower16:\sym + movt \out, #:upper16:\sym + .endm - vld1.64 {q0}, [arg2, :128]! -CPU_LE( vrev64.8 q0, q0 ) - vswp d0, d1 - veor.8 q7, q7, q0 - subs arg3, arg3, #16 - - // instead of a cmp instruction, we utilize the flags with the - // jge instruction equivalent of: cmp arg3, 16-16 - // check if there is any more 16B in the buffer to be able to fold - bge _16B_reduction_loop - - // now we have 16+z bytes left to reduce, where 0<= z < 16. - // first, we reduce the data in the xmm7 register - -_final_reduction_for_128: - // check if any more data to fold. If not, compute the CRC of - // the final 128 bits - adds arg3, arg3, #16 - beq _128_done - - // here we are getting data that is less than 16 bytes. - // since we know that there was data before the pointer, we can - // offset the input pointer before the actual point, to receive - // exactly 16 bytes. after that the registers need to be adjusted. -_get_last_two_regs: - add arg2, arg2, arg3 - sub arg2, arg2, #16 - vld1.64 {q1}, [arg2] -CPU_LE( vrev64.8 q1, q1 ) - vswp d2, d3 - - // get rid of the extra data that was loaded before - // load the shift constant - adr ip, tbl_shf_table + 16 - sub ip, ip, arg3 - vld1.8 {q0}, [ip] - - // shift v2 to the left by arg3 bytes - vtbl.8 d4, {d14-d15}, d0 - vtbl.8 d5, {d14-d15}, d1 - - // shift v7 to the right by 16-arg3 bytes - vmov.i8 q9, #0x80 - veor.8 q0, q0, q9 - vtbl.8 d18, {d14-d15}, d0 - vtbl.8 d19, {d14-d15}, d1 - - // blend - vshr.s8 q0, q0, #7 // convert to 8-bit mask - vbsl.8 q0, q2, q1 - - // fold 16 Bytes - vmull.p64 q8, d18, d20 - vmull.p64 q7, d19, d21 +// +// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// +ENTRY(crc_t10dif_pmull) + + // For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp len, #256 + blt .Lless_than_256_bytes + + __adrl fold_consts_ptr, .Lfold_across_128_bytes_consts + + // Load the first 128 data bytes. Byte swapping is necessary to make + // the bit order match the polynomial coefficient order. + vld1.64 {q0-q1}, [buf]! + vld1.64 {q2-q3}, [buf]! + vld1.64 {q4-q5}, [buf]! + vld1.64 {q6-q7}, [buf]! +CPU_LE( vrev64.8 q0, q0 ) +CPU_LE( vrev64.8 q1, q1 ) +CPU_LE( vrev64.8 q2, q2 ) +CPU_LE( vrev64.8 q3, q3 ) +CPU_LE( vrev64.8 q4, q4 ) +CPU_LE( vrev64.8 q5, q5 ) +CPU_LE( vrev64.8 q6, q6 ) +CPU_LE( vrev64.8 q7, q7 ) + vswp q0l, q0h + vswp q1l, q1h + vswp q2l, q2h + vswp q3l, q3h + vswp q4l, q4h + vswp q5l, q5h + vswp q6l, q6h + vswp q7l, q7h + + // XOR the first 16 data *bits* with the initial CRC value. + vmov.i8 q8h, #0 + vmov.u16 q8h[3], init_crc + veor q0h, q0h, q8h + + // Load the constants for folding across 128 bytes. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + + // Subtract 128 for the 128 data bytes just consumed. Subtract another + // 128 to simplify the termination condition of the following loop. + sub len, len, #256 + + // While >= 128 data bytes remain (not counting q0-q7), fold the 128 + // bytes q0-q7 into them, storing the result back into q0-q7. +.Lfold_128_bytes_loop: + fold_32_bytes q0, q1 + fold_32_bytes q2, q3 + fold_32_bytes q4, q5 + fold_32_bytes q6, q7 + subs len, len, #128 + bge .Lfold_128_bytes_loop + + // Now fold the 112 bytes in q0-q6 into the 16 bytes in q7. + + // Fold across 64 bytes. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + fold_16_bytes q0, q4 + fold_16_bytes q1, q5 + fold_16_bytes q2, q6 + fold_16_bytes q3, q7, 1 + // Fold across 32 bytes. + fold_16_bytes q4, q6 + fold_16_bytes q5, q7, 1 + // Fold across 16 bytes. + fold_16_bytes q6, q7 + + // Add 128 to get the correct number of data bytes remaining in 0...127 + // (not counting q7), following the previous extra subtraction by 128. + // Then subtract 16 to simplify the termination condition of the + // following loop. + adds len, len, #(128-16) + + // While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7 + // into them, storing the result back into q7. + blt .Lfold_16_bytes_loop_done +.Lfold_16_bytes_loop: + vmull.p64 q8, q7l, FOLD_CONST_L + vmull.p64 q7, q7h, FOLD_CONST_H veor.8 q7, q7, q8 + vld1.64 {q0}, [buf]! +CPU_LE( vrev64.8 q0, q0 ) + vswp q0l, q0h veor.8 q7, q7, q0 - -_128_done: - // compute crc of a 128-bit value - vldr d20, rk5 - vldr d21, rk6 // rk5 and rk6 in xmm10 - - // 64b fold - vext.8 q0, qzr, q7, #8 - vmull.p64 q7, d15, d20 + subs len, len, #16 + bge .Lfold_16_bytes_loop + +.Lfold_16_bytes_loop_done: + // Add 16 to get the correct number of data bytes remaining in 0...15 + // (not counting q7), following the previous extra subtraction by 16. + adds len, len, #16 + beq .Lreduce_final_16_bytes + +.Lhandle_partial_segment: + // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first + // 16 bytes are in q7 and the rest are the remaining data in 'buf'. To + // do this without needing a fold constant for each possible 'len', + // redivide the bytes into a first chunk of 'len' bytes and a second + // chunk of 16 bytes, then fold the first chunk into the second. + + // q0 = last 16 original data bytes + add buf, buf, len + sub buf, buf, #16 + vld1.64 {q0}, [buf] +CPU_LE( vrev64.8 q0, q0 ) + vswp q0l, q0h + + // q1 = high order part of second chunk: q7 left-shifted by 'len' bytes. + __adrl r3, .Lbyteshift_table + 16 + sub r3, r3, len + vld1.8 {q2}, [r3] + vtbl.8 q1l, {q7l-q7h}, q2l + vtbl.8 q1h, {q7l-q7h}, q2h + + // q3 = first chunk: q7 right-shifted by '16-len' bytes. + vmov.i8 q3, #0x80 + veor.8 q2, q2, q3 + vtbl.8 q3l, {q7l-q7h}, q2l + vtbl.8 q3h, {q7l-q7h}, q2h + + // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. + vshr.s8 q2, q2, #7 + + // q2 = second chunk: 'len' bytes from q0 (low-order bytes), + // then '16-len' bytes from q1 (high-order bytes). + vbsl.8 q2, q1, q0 + + // Fold the first chunk into the second chunk, storing the result in q7. + vmull.p64 q0, q3l, FOLD_CONST_L + vmull.p64 q7, q3h, FOLD_CONST_H veor.8 q7, q7, q0 + veor.8 q7, q7, q2 + +.Lreduce_final_16_bytes: + // Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC. + + // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + + // Fold the high 64 bits into the low 64 bits, while also multiplying by + // x^64. This produces a 128-bit value congruent to x^64 * M(x) and + // whose low 48 bits are 0. + vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x)) + veor.8 q0h, q0h, q7l // + low bits * x^64 + + // Fold the high 32 bits into the low 96 bits. This produces a 96-bit + // value congruent to x^64 * M(x) and whose low 48 bits are 0. + vmov.i8 q1, #0 + vmov s4, s3 // extract high 32 bits + vmov s3, s5 // zero high 32 bits + vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x)) + veor.8 q0, q0, q1 // + low bits + + // Load G(x) and floor(x^48 / G(x)). + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128] + + // Use Barrett reduction to compute the final CRC value. + vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x)) + vshr.u64 q1l, q1l, #32 // /= x^32 + vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x) + vshr.u64 q0l, q0l, #48 + veor.8 q0l, q0l, q1l // + low 16 nonzero bits + // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0. + + vmov.u16 r0, q0l[0] + bx lr - // 32b fold - vext.8 q0, q7, qzr, #12 - vmov s31, s3 - vmull.p64 q0, d0, d21 - veor.8 q7, q0, q7 - - // barrett reduction -_barrett: - vldr d20, rk7 - vldr d21, rk8 - - vmull.p64 q0, d15, d20 - vext.8 q0, qzr, q0, #12 - vmull.p64 q0, d1, d21 - vext.8 q0, qzr, q0, #12 - veor.8 q7, q7, q0 - vmov r0, s29 +.Lless_than_256_bytes: + // Checksumming a buffer of length 16...255 bytes -_cleanup: - // scale the result back to 16 bits - lsr r0, r0, #16 - bx lr + __adrl fold_consts_ptr, .Lfold_across_16_bytes_consts -_less_than_128: - teq arg3, #0 - beq _cleanup + // Load the first 16 data bytes. + vld1.64 {q7}, [buf]! +CPU_LE( vrev64.8 q7, q7 ) + vswp q7l, q7h - vmov.i8 q0, #0 - vmov s3, arg1_low32 // get the initial crc value + // XOR the first 16 data *bits* with the initial CRC value. + vmov.i8 q0h, #0 + vmov.u16 q0h[3], init_crc + veor.8 q7h, q7h, q0h - vld1.64 {q7}, [arg2, :128]! -CPU_LE( vrev64.8 q7, q7 ) - vswp d14, d15 - veor.8 q7, q7, q0 + // Load the fold-across-16-bytes constants. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! - cmp arg3, #16 - beq _128_done // exactly 16 left - blt _less_than_16_left - - // now if there is, load the constants - vldr d20, rk1 - vldr d21, rk2 // rk1 and rk2 in xmm10 - - // check if there is enough buffer to be able to fold 16B at a time - subs arg3, arg3, #32 - addlt arg3, arg3, #16 - blt _get_last_two_regs - b _16B_reduction_loop - -_less_than_16_left: - // shl r9, 4 - adr ip, tbl_shf_table + 16 - sub ip, ip, arg3 - vld1.8 {q0}, [ip] - vmov.i8 q9, #0x80 - veor.8 q0, q0, q9 - vtbl.8 d18, {d14-d15}, d0 - vtbl.8 d15, {d14-d15}, d1 - vmov d14, d18 - b _128_done + cmp len, #16 + beq .Lreduce_final_16_bytes // len == 16 + subs len, len, #32 + addlt len, len, #16 + blt .Lhandle_partial_segment // 17 <= len <= 31 + b .Lfold_16_bytes_loop // 32 <= len <= 255 ENDPROC(crc_t10dif_pmull) -// precomputed constants -// these constants are precomputed from the poly: -// 0x8bb70000 (0x8bb7 scaled to 32 bits) + .section ".rodata", "a" .align 4 -// Q = 0x18BB70000 -// rk1 = 2^(32*3) mod Q << 32 -// rk2 = 2^(32*5) mod Q << 32 -// rk3 = 2^(32*15) mod Q << 32 -// rk4 = 2^(32*17) mod Q << 32 -// rk5 = 2^(32*3) mod Q << 32 -// rk6 = 2^(32*2) mod Q << 32 -// rk7 = floor(2^64/Q) -// rk8 = Q - -rk3: .quad 0x9d9d000000000000 -rk4: .quad 0x7cf5000000000000 -rk5: .quad 0x2d56000000000000 -rk6: .quad 0x1368000000000000 -rk7: .quad 0x00000001f65a57f8 -rk8: .quad 0x000000018bb70000 -rk9: .quad 0xceae000000000000 -rk10: .quad 0xbfd6000000000000 -rk11: .quad 0x1e16000000000000 -rk12: .quad 0x713c000000000000 -rk13: .quad 0xf7f9000000000000 -rk14: .quad 0x80a6000000000000 -rk15: .quad 0x044c000000000000 -rk16: .quad 0xe658000000000000 -rk17: .quad 0xad18000000000000 -rk18: .quad 0xa497000000000000 -rk19: .quad 0x6ee3000000000000 -rk20: .quad 0xe7b5000000000000 -rk1: .quad 0x2d56000000000000 -rk2: .quad 0x06df000000000000 - -tbl_shf_table: -// use these values for shift constants for the tbl/tbx instruction -// different alignments result in values as shown: -// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 +// Fold constants precomputed from the polynomial 0x18bb7 +// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 // x^(8*128) mod G(x) + .quad 0x0000000000002295 // x^(8*128+64) mod G(x) +// .Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 // x^(4*128) mod G(x) + .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) +// .Lfold_across_32_bytes_consts: + .quad 0x000000000000857d // x^(2*128) mod G(x) + .quad 0x0000000000007acc // x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 // x^(1*128) mod G(x) + .quad 0x0000000000001faa // x^(1*128+64) mod G(x) +// .Lfinal_fold_consts: + .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) +// .Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 // G(x) + .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) + +// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - +// len] is the index vector to shift left by 'len' bytes, and is also {0x80, +// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c index d428355cf38d..3d6b800b8396 100644 --- a/arch/arm/crypto/crct10dif-ce-glue.c +++ b/arch/arm/crypto/crct10dif-ce-glue.c @@ -21,7 +21,7 @@ #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U -asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u32 len); +asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); static int crct10dif_init(struct shash_desc *desc) { @@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); - unsigned int l; - if (!may_use_simd()) { - *crc = crc_t10dif_generic(*crc, data, length); + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull(*crc, data, length); + kernel_neon_end(); } else { - if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - - ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); - - *crc = crc_t10dif_generic(*crc, data, l); - - length -= l; - data += l; - } - if (length > 0) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); - } + *crc = crc_t10dif_generic(*crc, data, length); } + return 0; } diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index b9ec44060ed3..a03cf4dfb781 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -212,10 +212,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 3b58300d611c..054aae0edfce 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -93,10 +93,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index fb5d15048c0b..788c17b56ecc 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index b1c334a49cda..710ea309769e 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b6..46d41140df27 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -25,7 +25,6 @@ #ifndef __ASSEMBLY__ struct irqaction; struct pt_regs; -extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *); diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca56537b61bc..50e89869178a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -48,6 +48,7 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -147,6 +148,13 @@ struct kvm_cpu_context { typedef struct kvm_cpu_context kvm_cpu_context_t; +struct vcpu_reset_state { + unsigned long pc; + unsigned long r0; + bool be; + bool reset; +}; + struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -186,6 +194,8 @@ struct kvm_vcpu_arch { /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; + struct vcpu_reset_state reset_state; + /* Detect first run of a vcpu */ bool has_run_once; }; diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index c4b1d4fb1797..de2089501b8b 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h @@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) #define S2_PMD_MASK PMD_MASK #define S2_PMD_SIZE PMD_SIZE +static inline bool kvm_stage2_has_pmd(struct kvm *kvm) +{ + return true; +} + #endif /* __ARM_S2_PGTABLE_H_ */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 42aa4a22803c..ae5a0df5316e 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -59,7 +59,6 @@ extern int __put_user_bad(void); * Note that this is actually 0x1,0000,0000 */ #define KERNEL_DS 0x00000000 -#define get_ds() (KERNEL_DS) #ifdef CONFIG_MMU diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -31,7 +31,6 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> -#include <linux/ratelimit.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> @@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) return nr_irqs; } #endif - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 16601d1442d1..72cc0862a30e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -150,7 +150,7 @@ void __show_regs(struct pt_regs *regs) if ((domain & domain_mask(DOMAIN_USER)) == domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) segment = "none"; - else if (fs == get_ds()) + else if (fs == KERNEL_DS) segment = "kernel"; else segment = "user"; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..1d6f5ea522f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -254,7 +254,7 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); + irq_migrate_all_off_this_cpu(); /* * Flush user cache and TLB mappings, and then remove this CPU diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 222c1635bc7a..e8bd288fd5be 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) reset_coproc_regs(vcpu, table, num); for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu_cp15(vcpu, num) == 0x42424242) - panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); + WARN(vcpu_cp15(vcpu, num) == 0x42424242, + "Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index 5ed0c3ee33d6..e53327912adc 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -26,6 +26,7 @@ #include <asm/cputype.h> #include <asm/kvm_arm.h> #include <asm/kvm_coproc.h> +#include <asm/kvm_emulate.h> #include <kvm/arm_arch_timer.h> @@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); + /* + * Additional reset state handling that PSCI may have imposed on us. + * Must be done after all the sys_reg reset. + */ + if (READ_ONCE(vcpu->arch.reset_state.reset)) { + unsigned long target_pc = vcpu->arch.reset_state.pc; + + /* Gracefully handle Thumb2 entry point */ + if (target_pc & 1) { + target_pc &= ~1UL; + vcpu_set_thumb(vcpu); + } + + /* Propagate caller endianness */ + if (vcpu->arch.reset_state.be) + kvm_vcpu_set_be(vcpu); + + *vcpu_pc(vcpu) = target_pc; + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); + + vcpu->arch.reset_state.reset = false; + } + /* Reset arch_timer context */ return kvm_timer_vcpu_reset(vcpu); } diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c index 2e1e540f2e5a..d278fb672d40 100644 --- a/arch/arm/mach-imx/mach-mx21ads.c +++ b/arch/arm/mach-imx/mach-mx21ads.c @@ -205,7 +205,6 @@ static struct regulator_init_data mx21ads_lcd_regulator_init_data = { static struct fixed_voltage_config mx21ads_lcd_regulator_pdata = { .supply_name = "LCD", .microvolts = 3300000, - .enable_high = 1, .init_data = &mx21ads_lcd_regulator_init_data, }; diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c index f5e04047ed13..6dd7f57c332f 100644 --- a/arch/arm/mach-imx/mach-mx27ads.c +++ b/arch/arm/mach-imx/mach-mx27ads.c @@ -237,7 +237,7 @@ static struct fixed_voltage_config mx27ads_lcd_regulator_pdata = { static struct gpiod_lookup_table mx27ads_lcd_regulator_gpiod_table = { .dev_id = "reg-fixed-voltage.0", /* Let's hope ID 0 is what we get */ .table = { - GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index e49e06834516..fce4b426c379 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -294,13 +294,7 @@ static int mmdc_pmu_event_init(struct perf_event *event) return -EOPNOTSUPP; } - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) + if (event->attr.sample_period) return -EINVAL; if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS) @@ -456,6 +450,7 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc, .start = mmdc_pmu_event_start, .stop = mmdc_pmu_event_stop, .read = mmdc_pmu_event_update, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }, .mmdc_base = mmdc_base, .dev = dev, diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 3b73813c6b04..23e8c93515d4 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -75,8 +75,7 @@ void __init n2100_map_io(void) /* * N2100 PCI. */ -static int __init -n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c index a04e249c654b..d2560fb1e835 100644 --- a/arch/arm/mach-mmp/brownstone.c +++ b/arch/arm/mach-mmp/brownstone.c @@ -149,7 +149,6 @@ static struct regulator_init_data brownstone_v_5vp_data = { static struct fixed_voltage_config brownstone_v_5vp = { .supply_name = "v_5vp", .microvolts = 5000000, - .enable_high = 1, .enabled_at_boot = 1, .init_data = &brownstone_v_5vp_data, }; diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index c4c0a8ea11e4..be30c3c061b4 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -267,7 +267,6 @@ static struct fixed_voltage_config modem_nreset_config = { .supply_name = "modem_nreset", .microvolts = 3300000, .startup_delay = 25000, - .enable_high = 1, .enabled_at_boot = 1, .init_data = &modem_nreset_data, }; @@ -533,7 +532,6 @@ static struct regulator_init_data keybrd_pwr_initdata = { static struct fixed_voltage_config keybrd_pwr_config = { .supply_name = "keybrd_pwr", .microvolts = 5000000, - .enable_high = 1, .init_data = &keybrd_pwr_initdata, }; diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index a8b291f00109..dae514c8276a 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF); + /* Enter broadcast mode for periodic timers */ + tick_broadcast_enable(); + + /* Enter broadcast mode for one-shot timers */ tick_broadcast_enter(); /* @@ -218,15 +222,6 @@ fail: return index; } -/* - * For each cpu, setup the broadcast timer because local timers - * stops for the states above C1. - */ -static void omap_setup_broadcast_timer(void *arg) -{ - tick_broadcast_enable(); -} - static struct cpuidle_driver omap4_idle_driver = { .name = "omap4_idle", .owner = THIS_MODULE, @@ -319,8 +314,5 @@ int __init omap4_idle_init(void) if (!cpu_clkdm[0] || !cpu_clkdm[1]) return -ENODEV; - /* Configure the broadcast timer on each cpu */ - on_each_cpu(omap_setup_broadcast_timer, NULL, 1); - return cpuidle_register(idle_driver, cpu_online_mask); } diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index f86b72d1d59e..1444b4b4bd9f 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) u32 enable_mask, enable_shift; u32 pipd_mask, pipd_shift; u32 reg; + int ret; if (dsi_id == 0) { enable_mask = OMAP4_DSI1_LANEENABLE_MASK; @@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) return -ENODEV; } - regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); + ret = regmap_read(omap4_dsi_mux_syscon, + OMAP4_DSIPHY_SYSCON_OFFSET, + ®); + if (ret) + return ret; reg &= ~enable_mask; reg &= ~pipd_mask; diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index fc5fb776a710..17558be4bf0a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -50,6 +50,9 @@ #define OMAP4_NR_BANKS 4 #define OMAP4_NR_IRQS 128 +#define SYS_NIRQ1_EXT_SYS_IRQ_1 7 +#define SYS_NIRQ2_EXT_SYS_IRQ_2 119 + static void __iomem *wakeupgen_base; static void __iomem *sar_base; static DEFINE_RAW_SPINLOCK(wakeupgen_lock); @@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d) irq_chip_unmask_parent(d); } +/* + * The sys_nirq pins bypass peripheral modules and are wired directly + * to MPUSS wakeupgen. They get automatically inverted for GIC. + */ +static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) +{ + bool inverted = false; + + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + type &= ~IRQ_TYPE_LEVEL_MASK; + type |= IRQ_TYPE_LEVEL_HIGH; + inverted = true; + break; + case IRQ_TYPE_EDGE_FALLING: + type &= ~IRQ_TYPE_EDGE_BOTH; + type |= IRQ_TYPE_EDGE_RISING; + inverted = true; + break; + default: + break; + } + + if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && + d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) + pr_warn("wakeupgen: irq%li polarity inverted in dts\n", + d->hwirq); + + return irq_chip_set_type_parent(d, type); +} + #ifdef CONFIG_HOTPLUG_CPU static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); @@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = { .irq_mask = wakeupgen_mask, .irq_unmask = wakeupgen_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_set_type = irq_chip_set_type_parent, + .irq_set_type = wakeupgen_irq_set_type, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 8a5b6ed4ec36..a2ecc5e69abb 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -330,7 +330,6 @@ static struct fixed_voltage_config pandora_vwlan = { .supply_name = "vwlan", .microvolts = 1800000, /* 1.8V */ .startup_delay = 50000, /* 50ms */ - .enable_high = 1, .init_data = &pandora_vmmc3, }; diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 83a7ec4c16d0..c67f92bfa30e 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -20,7 +20,7 @@ #include <linux/delay.h> #include <linux/clk-provider.h> #include <linux/cpu.h> -#include <net/dsa.h> +#include <linux/platform_data/dsa.h> #include <asm/page.h> #include <asm/setup.h> #include <asm/system_misc.h> diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c index a3c1336d30c9..c65ab7db36ad 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c @@ -16,7 +16,7 @@ #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> -#include <net/dsa.h> +#include <linux/platform_data/dsa.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c index 252efe29bd1a..76b8138d9d79 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c @@ -17,7 +17,7 @@ #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> #include <linux/i2c.h> -#include <net/dsa.h> +#include <linux/platform_data/dsa.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c index f4f1dbe1d91d..5f388a1ed1e4 100644 --- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c @@ -18,7 +18,7 @@ #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/ethtool.h> -#include <net/dsa.h> +#include <linux/platform_data/dsa.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c index d162d4c7f85d..83589a28a491 100644 --- a/arch/arm/mach-orion5x/wnr854t-setup.c +++ b/arch/arm/mach-orion5x/wnr854t-setup.c @@ -15,7 +15,7 @@ #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> -#include <net/dsa.h> +#include <linux/platform_data/dsa.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c index 9250bb2e429c..cea08d4a2597 100644 --- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c +++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c @@ -18,7 +18,7 @@ #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> -#include <net/dsa.h> +#include <linux/platform_data/dsa.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c index fa8e7dd4d898..4401dfcd7e68 100644 --- a/arch/arm/mach-pxa/cm-x255.c +++ b/arch/arm/mach-pxa/cm-x255.c @@ -98,7 +98,7 @@ static unsigned long cmx255_pin_config[] = { }; #if defined(CONFIG_SPI_PXA2XX) -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index f7081a50dc67..279eeca7add0 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c @@ -313,7 +313,7 @@ static inline void cmx270_init_mmc(void) {} #endif #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master cm_x270_spi_info = { +static struct pxa2xx_spi_controller cm_x270_spi_info = { .num_chipselect = 1, .enable_dma = 1, }; diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index c9732cace5e3..7ecf559bd71c 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -530,7 +530,7 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = { }; #if IS_ENABLED(CONFIG_SPI_PXA2XX) -static struct pxa2xx_spi_master corgi_spi_info = { +static struct pxa2xx_spi_controller corgi_spi_info = { .num_chipselect = 3, }; diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index a24783a03827..524d6093e0c7 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -1065,7 +1065,7 @@ struct platform_device pxa93x_device_gpio = { /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ -void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) +void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info) { struct platform_device *pd; diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 32c1edeb3f14..fa3adb073a0f 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -689,7 +689,7 @@ static inline void em_x270_init_lcd(void) {} #endif #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master em_x270_spi_info = { +static struct pxa2xx_spi_controller em_x270_spi_info = { .num_chipselect = 1, }; @@ -703,7 +703,7 @@ static struct tdo24m_platform_data em_x270_tdo24m_pdata = { .model = TDO35S, }; -static struct pxa2xx_spi_master em_x270_spi_2_info = { +static struct pxa2xx_spi_controller em_x270_spi_2_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -976,7 +976,6 @@ static struct fixed_voltage_config camera_dummy_config = { .supply_name = "camera_vdd", .input_supply = "vcc cam", .microvolts = 2800000, - .enable_high = 0, .init_data = &camera_dummy_initdata, }; diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c index 565965e9acc7..5e110e70ce5a 100644 --- a/arch/arm/mach-pxa/ezx.c +++ b/arch/arm/mach-pxa/ezx.c @@ -714,7 +714,6 @@ static struct regulator_init_data camera_regulator_initdata = { static struct fixed_voltage_config camera_regulator_config = { .supply_name = "camera_vdd", .microvolts = 2800000, - .enable_high = 0, .init_data = &camera_regulator_initdata, }; @@ -730,7 +729,7 @@ static struct gpiod_lookup_table camera_supply_gpiod_table = { .dev_id = "reg-fixed-voltage.1", .table = { GPIO_LOOKUP("gpio-pxa", GPIO50_nCAM_EN, - NULL, GPIO_ACTIVE_HIGH), + NULL, GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index b79b757fdd41..1d6b1d2fb6a9 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/fb.h> +#include <linux/gpio/machine.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> @@ -629,7 +630,7 @@ static struct spi_board_info tsc2046_board_info[] __initdata = { }, }; -static struct pxa2xx_spi_master pxa_ssp2_master_info = { +static struct pxa2xx_spi_controller pxa_ssp2_master_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -702,9 +703,7 @@ static struct regulator_init_data bq24022_init_data = { .consumer_supplies = bq24022_consumers, }; -static struct gpio bq24022_gpios[] = { - { GPIO96_HX4700_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" }, -}; +static enum gpiod_flags bq24022_gpiod_gflags[] = { GPIOD_OUT_LOW }; static struct gpio_regulator_state bq24022_states[] = { { .value = 100000, .gpios = (0 << 0) }, @@ -714,12 +713,10 @@ static struct gpio_regulator_state bq24022_states[] = { static struct gpio_regulator_config bq24022_info = { .supply_name = "bq24022", - .enable_gpio = GPIO72_HX4700_BQ24022_nCHARGE_EN, - .enable_high = 0, .enabled_at_boot = 0, - .gpios = bq24022_gpios, - .nr_gpios = ARRAY_SIZE(bq24022_gpios), + .gflags = bq24022_gpiod_gflags, + .ngpios = ARRAY_SIZE(bq24022_gpiod_gflags), .states = bq24022_states, .nr_states = ARRAY_SIZE(bq24022_states), @@ -736,6 +733,17 @@ static struct platform_device bq24022 = { }, }; +static struct gpiod_lookup_table bq24022_gpiod_table = { + .dev_id = "gpio-regulator", + .table = { + GPIO_LOOKUP("gpio-pxa", GPIO96_HX4700_BQ24022_ISET2, + NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO72_HX4700_BQ24022_nCHARGE_EN, + "enable", GPIO_ACTIVE_LOW), + { }, + }, +}; + /* * StrataFlash */ @@ -878,6 +886,7 @@ static void __init hx4700_init(void) pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); + gpiod_add_lookup_table(&bq24022_gpiod_table); platform_add_devices(devices, ARRAY_SIZE(devices)); pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup)); diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c index cbaf4f6edcda..7e30452e3840 100644 --- a/arch/arm/mach-pxa/icontrol.c +++ b/arch/arm/mach-pxa/icontrol.c @@ -115,12 +115,12 @@ static struct spi_board_info mcp251x_board_info[] = { } }; -static struct pxa2xx_spi_master pxa_ssp3_spi_master_info = { +static struct pxa2xx_spi_controller pxa_ssp3_spi_master_info = { .num_chipselect = 2, .enable_dma = 1 }; -static struct pxa2xx_spi_master pxa_ssp4_spi_master_info = { +static struct pxa2xx_spi_controller pxa_ssp4_spi_master_info = { .num_chipselect = 2, .enable_dma = 1 }; diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index 39db4898dc4a..464b8bd2bcb9 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c @@ -191,7 +191,7 @@ static inline void littleton_init_lcd(void) {}; #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master littleton_spi_info = { +static struct pxa2xx_spi_controller littleton_spi_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index a1391e113ef4..c1bd0d544981 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c @@ -197,7 +197,7 @@ static struct platform_device sa1111_device = { * (to J5) and poking board registers (as done below). Else it's only useful * for the temperature sensors. */ -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 08b079653c3f..75abc21083eb 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -645,9 +645,8 @@ static struct regulator_init_data bq24022_init_data = { .consumer_supplies = bq24022_consumers, }; -static struct gpio bq24022_gpios[] = { - { EGPIO_MAGICIAN_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" }, -}; + +static enum gpiod_flags bq24022_gpiod_gflags[] = { GPIOD_OUT_LOW }; static struct gpio_regulator_state bq24022_states[] = { { .value = 100000, .gpios = (0 << 0) }, @@ -657,12 +656,10 @@ static struct gpio_regulator_state bq24022_states[] = { static struct gpio_regulator_config bq24022_info = { .supply_name = "bq24022", - .enable_gpio = GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, - .enable_high = 0, .enabled_at_boot = 1, - .gpios = bq24022_gpios, - .nr_gpios = ARRAY_SIZE(bq24022_gpios), + .gflags = bq24022_gpiod_gflags, + .ngpios = ARRAY_SIZE(bq24022_gpiod_gflags), .states = bq24022_states, .nr_states = ARRAY_SIZE(bq24022_states), @@ -679,6 +676,17 @@ static struct platform_device bq24022 = { }, }; +static struct gpiod_lookup_table bq24022_gpiod_table = { + .dev_id = "gpio-regulator", + .table = { + GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2, + NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, + "enable", GPIO_ACTIVE_LOW), + { }, + }, +}; + /* * fixed regulator for ads7846 */ @@ -932,7 +940,7 @@ struct pxa2xx_spi_chip tsc2046_chip_info = { .gpio_cs = GPIO14_MAGICIAN_TSC2046_CS, }; -static struct pxa2xx_spi_master magician_spi_info = { +static struct pxa2xx_spi_controller magician_spi_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -1027,6 +1035,7 @@ static void __init magician_init(void) regulator_register_always_on(0, "power", pwm_backlight_supply, ARRAY_SIZE(pwm_backlight_supply), 5000000); + gpiod_add_lookup_table(&bq24022_gpiod_table); platform_add_devices(ARRAY_AND_SIZE(devices)); } diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c index ccca9f7575c3..e2e613449660 100644 --- a/arch/arm/mach-pxa/pcm027.c +++ b/arch/arm/mach-pxa/pcm027.c @@ -132,7 +132,7 @@ static struct platform_device smc91x_device = { /* * SPI host and devices */ -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index c2a43d4cfd3e..9450a523cd0b 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -196,7 +196,7 @@ struct platform_device poodle_locomo_device = { EXPORT_SYMBOL(poodle_locomo_device); #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master poodle_spi_info = { +static struct pxa2xx_spi_controller poodle_spi_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index e1db072756f2..e13bfc9b01d2 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -883,7 +883,6 @@ static struct regulator_init_data audio_va_initdata = { static struct fixed_voltage_config audio_va_config = { .supply_name = "audio_va", .microvolts = 5000000, - .enable_high = 1, .enabled_at_boot = 0, .init_data = &audio_va_initdata, }; diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 306818e2cf54..8dac824a85df 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -572,7 +572,7 @@ static struct spi_board_info spitz_spi_devices[] = { }, }; -static struct pxa2xx_spi_master spitz_spi_info = { +static struct pxa2xx_spi_controller spitz_spi_info = { .num_chipselect = 3, }; diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c index e0d6c872270a..c28d19b126a7 100644 --- a/arch/arm/mach-pxa/stargate2.c +++ b/arch/arm/mach-pxa/stargate2.c @@ -337,15 +337,15 @@ static struct platform_device stargate2_flash_device = { .num_resources = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_0_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_0_info = { .num_chipselect = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_1_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_1_info = { .num_chipselect = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_2_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_2_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index e8a93c088c35..7439798d58e4 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -813,7 +813,7 @@ static struct platform_device tosa_bt_device = { .dev.platform_data = &tosa_bt_data, }; -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index e2353e75bb28..ad082e11e2a4 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -607,12 +607,12 @@ static struct spi_board_info spi_board_info[] __initdata = { }, }; -static struct pxa2xx_spi_master pxa_ssp1_master_info = { +static struct pxa2xx_spi_controller pxa_ssp1_master_info = { .num_chipselect = 1, .enable_dma = 1, }; -static struct pxa2xx_spi_master pxa_ssp2_master_info = { +static struct pxa2xx_spi_controller pxa_ssp2_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index c411f79d4cb5..3fd1119c14d5 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -391,7 +391,7 @@ static struct platform_device zeus_sram_device = { }; /* SPI interface on SSP3 */ -static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = { +static struct pxa2xx_spi_controller pxa2xx_spi_ssp3_master_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -426,7 +426,7 @@ static struct gpiod_lookup_table can_regulator_gpiod_table = { .dev_id = "reg-fixed-voltage.0", .table = { GPIO_LOOKUP("gpio-pxa", ZEUS_CAN_SHDN_GPIO, - NULL, GPIO_ACTIVE_HIGH), + NULL, GPIO_ACTIVE_LOW), { }, }, }; @@ -547,7 +547,6 @@ static struct regulator_init_data zeus_ohci_regulator_data = { static struct fixed_voltage_config zeus_ohci_regulator_config = { .supply_name = "vbus2", .microvolts = 5000000, /* 5.0V */ - .enable_high = 1, .startup_delay = 0, .init_data = &zeus_ohci_regulator_data, }; diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index dfa42496ec27..d09c3f236186 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -469,7 +469,6 @@ static struct regulator_consumer_supply assabet_cf_vcc_consumers[] = { static struct fixed_voltage_config assabet_cf_vcc_pdata __initdata = { .supply_name = "cf-power", .microvolts = 3300000, - .enable_high = 1, }; static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = { diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c index 028e50c6383f..a32c3b631484 100644 --- a/arch/arm/mach-tango/pm.c +++ b/arch/arm/mach-tango/pm.c @@ -3,6 +3,7 @@ #include <linux/suspend.h> #include <asm/suspend.h> #include "smc.h" +#include "pm.h" static int tango_pm_powerdown(unsigned long arg) { @@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = { .valid = suspend_valid_only_mem, }; -static int __init tango_pm_init(void) +void __init tango_pm_init(void) { suspend_set_ops(&tango_pm_ops); - return 0; } - -late_initcall(tango_pm_init); diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h new file mode 100644 index 000000000000..35ea705a0ee2 --- /dev/null +++ b/arch/arm/mach-tango/pm.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifdef CONFIG_SUSPEND +void __init tango_pm_init(void); +#else +#define tango_pm_init NULL +#endif diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c index 677dd7b5efd9..824f90737b04 100644 --- a/arch/arm/mach-tango/setup.c +++ b/arch/arm/mach-tango/setup.c @@ -2,6 +2,7 @@ #include <asm/mach/arch.h> #include <asm/hardware/cache-l2x0.h> #include "smc.h" +#include "pm.h" static void tango_l2c_write(unsigned long val, unsigned int reg) { @@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT") .dt_compat = tango_dt_compat, .l2c_aux_mask = ~0, .l2c_write_sec = tango_l2c_write, + .init_late = tango_pm_init, MACHINE_END diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c index afe5b4c7b164..99bcd074916a 100644 --- a/arch/arm/mm/cache-l2x0-pmu.c +++ b/arch/arm/mm/cache-l2x0-pmu.c @@ -314,14 +314,6 @@ static int l2x0_pmu_event_init(struct perf_event *event) event->attach_state & PERF_ATTACH_TASK) return -EINVAL; - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -544,6 +536,7 @@ static __init int l2x0_pmu_init(void) .del = l2x0_pmu_event_del, .event_init = l2x0_pmu_event_init, .attr_groups = l2x0_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; l2x0_pmu_reset(); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1e2922e447c..1e3e08a1c456 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev) return; arm_teardown_iommu_dma_ops(dev); + /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ + set_dma_ops(dev, NULL); } diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 25b3ee85066e..c8bfbbfdfcc3 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1083,12 +1083,17 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, /* Arithmatic Operation */ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, - const u8 rn, struct jit_ctx *ctx, u8 op) { + const u8 rn, struct jit_ctx *ctx, u8 op, + bool is_jmp64) { switch (op) { case BPF_JSET: - emit(ARM_AND_R(ARM_IP, rt, rn), ctx); - emit(ARM_AND_R(ARM_LR, rd, rm), ctx); - emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); + if (is_jmp64) { + emit(ARM_AND_R(ARM_IP, rt, rn), ctx); + emit(ARM_AND_R(ARM_LR, rd, rm), ctx); + emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); + } else { + emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx); + } break; case BPF_JEQ: case BPF_JNE: @@ -1096,18 +1101,25 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, case BPF_JGE: case BPF_JLE: case BPF_JLT: - emit(ARM_CMP_R(rd, rm), ctx); - _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); + if (is_jmp64) { + emit(ARM_CMP_R(rd, rm), ctx); + /* Only compare low halve if high halve are equal. */ + _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); + } else { + emit(ARM_CMP_R(rt, rn), ctx); + } break; case BPF_JSLE: case BPF_JSGT: emit(ARM_CMP_R(rn, rt), ctx); - emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); + if (is_jmp64) + emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); break; case BPF_JSLT: case BPF_JSGE: emit(ARM_CMP_R(rt, rn), ctx); - emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); + if (is_jmp64) + emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); break; } } @@ -1615,6 +1627,17 @@ exit: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* Setup source registers */ rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); @@ -1641,6 +1664,17 @@ exit: case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: if (off == 0) break; rm = tmp2[0]; @@ -1652,7 +1686,8 @@ go_jmp: rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Check for the condition */ - emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code)); + emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code), + BPF_CLASS(code) == BPF_JMP); /* Setup JUMP instruction */ jmp_offset = bpf2a32_offset(i+off, i, ctx); diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index f4e58bcdaa43..13a05f759552 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -62,6 +62,7 @@ #define ARM_INST_ADDS_I 0x02900000 #define ARM_INST_AND_R 0x00000000 +#define ARM_INST_ANDS_R 0x00100000 #define ARM_INST_AND_I 0x02000000 #define ARM_INST_BIC_R 0x01c00000 @@ -172,6 +173,7 @@ #define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm) #define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) +#define ARM_ANDS_R(rd, rn, rm) _AL3_R(ARM_INST_ANDS, rd, rn, rm) #define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) #define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm) diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index a2399fd66e97..a6c81ce00f52 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -18,7 +18,7 @@ #include <linux/clkdev.h> #include <linux/mv643xx_eth.h> #include <linux/mv643xx_i2c.h> -#include <net/dsa.h> +#include <linux/platform_data/dsa.h> #include <linux/platform_data/dma-mv_xor.h> #include <linux/platform_data/usb-ehci-orion.h> #include <plat/common.h> diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index ed36dcab80f1..f51919974183 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c @@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) if (ssp == NULL) return -ENODEV; - iounmap(ssp->mmio_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); @@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) list_del(&ssp->node); mutex_unlock(&ssp_lock); - kfree(ssp); return 0; } diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 2c118a6ab358..0dc23fc227ed 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or } /* Copy arch-dep-instance from template. */ - memcpy(code, (unsigned char *)optprobe_template_entry, + memcpy(code, (unsigned long *)&optprobe_template_entry, TMPL_END_IDX * sizeof(kprobe_opcode_t)); /* Adjust buffer according to instruction. */ diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index b0b80c0f09f3..b11bba542fac 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -113,8 +113,7 @@ ENTRY(privcmd_call) /* * Disable userspace access from kernel. This is fine to do it - * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is - * called before. + * unconditionally as no set_fs(KERNEL_DS) is called before. */ uaccess_disable r4 diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index cb44aa290e73..e1d44b903dfc 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -7,7 +7,6 @@ #include <linux/of_address.h> #include <linux/slab.h> #include <linux/types.h> -#include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index b0c64f75792c..8974b5a1d3b1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts @@ -188,6 +188,7 @@ reg = <0x3a3>; interrupt-parent = <&r_intc>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 837a03dee875..2abb335145a6 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -390,7 +390,7 @@ }; video-codec@1c0e000 { - compatible = "allwinner,sun50i-h5-video-engine"; + compatible = "allwinner,sun50i-a64-video-engine"; reg = <0x01c0e000 0x1000>; clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, <&ccu CLK_DRAM_VE>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index e14e0ce7e89f..016641a41694 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi @@ -187,8 +187,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 8cd50b75171d..ade2ee09ae96 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts @@ -305,8 +305,7 @@ max-frequency = <200000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddio_ao3v3>; vqmmc-supply = <&vddio_tf>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index 4cf7f6e80c6a..25105ac96d55 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts @@ -238,8 +238,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 2e1cd5e3a246..1cc9dc68ef00 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -258,8 +258,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&tflash_vdd>; vqmmc-supply = <&tf_io>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index ce862266b9aa..0be0f2a5d2fe 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi @@ -196,8 +196,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 93a4acf2c46c..ad4d50bd9d77 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi @@ -154,8 +154,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi index ec09bb5792b7..2d2db783c44c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi @@ -211,8 +211,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vcc_3v3>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index f1c410e2da2b..796baea7a0bf 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts @@ -131,8 +131,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index db293440e4ca..255cede7b447 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -238,8 +238,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; vqmmc-supply = <&vcc_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 6739697be1de..9cbdb85fb591 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts @@ -183,8 +183,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index a1b31013ab6e..bc811a2faf42 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi @@ -137,8 +137,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 3c3a667a8df8..3f086ed7de05 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -356,8 +356,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index f7a1cffab4a8..8acfd40090d2 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts @@ -147,8 +147,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 7212dc4531e4..7fa20a8ede17 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts @@ -170,8 +170,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts index fdeb4176fc33..f86b054a74ae 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts @@ -71,3 +71,20 @@ &duart1 { status = "okay"; }; + +&enetc_port0 { + phy-handle = <&sgmii_phy0>; + phy-connection-type = "sgmii"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + sgmii_phy0: ethernet-phy@2 { + reg = <0x2>; + }; + }; +}; + +&enetc_port1 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index a8cf92af05fb..2896bbcfa3bb 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -335,5 +335,40 @@ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>; }; + + pcie@1f0000000 { /* Integrated Endpoint Root Complex */ + compatible = "pci-host-ecam-generic"; + reg = <0x01 0xf0000000 0x0 0x100000>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + msi-parent = <&its>; + device_type = "pci"; + bus-range = <0x0 0x0>; + dma-coherent; + msi-map = <0 &its 0x17 0xe>; + iommu-map = <0 &smmu 0x17 0xe>; + /* PF0-6 BAR0 - non-prefetchable memory */ + ranges = <0x82000000 0x0 0x00000000 0x1 0xf8000000 0x0 0x160000 + /* PF0-6 BAR2 - prefetchable memory */ + 0xc2000000 0x0 0x00000000 0x1 0xf8160000 0x0 0x070000 + /* PF0: VF0-1 BAR0 - non-prefetchable memory */ + 0x82000000 0x0 0x00000000 0x1 0xf81d0000 0x0 0x020000 + /* PF0: VF0-1 BAR2 - prefetchable memory */ + 0xc2000000 0x0 0x00000000 0x1 0xf81f0000 0x0 0x020000 + /* PF1: VF0-1 BAR0 - non-prefetchable memory */ + 0x82000000 0x0 0x00000000 0x1 0xf8210000 0x0 0x020000 + /* PF1: VF0-1 BAR2 - prefetchable memory */ + 0xc2000000 0x0 0x00000000 0x1 0xf8230000 0x0 0x020000>; + + enetc_port0: ethernet@0,0 { + compatible = "fsl,enetc"; + reg = <0x000000 0 0 0 0>; + }; + enetc_port1: ethernet@0,1 { + compatible = "fsl,enetc"; + reg = <0x000100 0 0 0 0>; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts index 64acccc4bfcb..f74b13aa5aa5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts @@ -227,34 +227,34 @@ pinctrl_usdhc1_100mhz: usdhc1-100grp { fsl,pins = < - MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 - MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 - MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 - MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 - MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 - MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 - MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 - MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 - MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 - MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 - MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 >; }; pinctrl_usdhc1_200mhz: usdhc1-200grp { fsl,pins = < - MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 - MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 - MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 - MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 - MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 - MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 - MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 - MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 - MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 - MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 - MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 8e9d6d5ed7b2..b6d31499fb43 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -360,6 +360,8 @@ <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, <&clk IMX8MQ_CLK_USDHC1_ROOT>; clock-names = "ipg", "ahb", "per"; + assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>; + assigned-clock-rates = <400000000>; fsl,tuning-start-tap = <20>; fsl,tuning-step = <2>; bus-width = <4>; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index 5b4a9609e31f..2468762283a5 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -351,7 +351,7 @@ reg = <0>; pinctrl-names = "default"; pinctrl-0 = <&cp0_copper_eth_phy_reset>; - reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>; + reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>; reset-assert-us = <10000>; }; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 99b7495455a6..838e32cc14c9 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -404,7 +404,7 @@ }; intc: interrupt-controller@9bc0000 { - compatible = "arm,gic-v3"; + compatible = "qcom,msm8996-gic-v3", "arm,gic-v3"; #interrupt-cells = <3>; interrupt-controller; #redistributor-regions = <1>; diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index 8d41b69ec2da..99bccaac31ad 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -37,7 +37,7 @@ }; memory@86200000 { - reg = <0x0 0x86200000 0x0 0x2600000>; + reg = <0x0 0x86200000 0x0 0x2d00000>; no-map; }; diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi index 20745a8528c5..719ed9d9067d 100644 --- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi @@ -1011,6 +1011,9 @@ <&cpg CPG_CORE R8A774A1_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi index afedbf5728ec..0648d12778ed 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi @@ -1262,6 +1262,9 @@ <&cpg CPG_CORE R8A7796_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi index 6dc9b1fef830..4b3730f640ef 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi @@ -1068,6 +1068,9 @@ <&cpg CPG_CORE R8A77965_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index bd937d68ca3b..040b36ef0dd2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -40,6 +40,7 @@ pinctrl-0 = <&usb30_host_drv>; regulator-name = "vcc_host_5v"; regulator-always-on; + regulator-boot-on; vin-supply = <&vcc_sys>; }; @@ -51,6 +52,7 @@ pinctrl-0 = <&usb20_host_drv>; regulator-name = "vcc_host1_5v"; regulator-always-on; + regulator-boot-on; vin-supply = <&vcc_sys>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index 1ee0dc0d9f10..d1cf404b8708 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts @@ -22,7 +22,7 @@ backlight = <&backlight>; power-supply = <&pp3300_disp>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index c400be64170e..931640e9aed4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -200,6 +200,19 @@ pinctrl-0 = <&bl_en>; pwm-delay-us = <10000>; }; + + gpio_keys: gpio-keys { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&bt_host_wake_l>; + + wake_on_bt: wake-on-bt { + label = "Wake-on-Bluetooth"; + gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; + linux,code = <KEY_WAKEUP>; + wakeup-source; + }; + }; }; &ppvar_bigcpu { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 81e73103fa78..15e254a77391 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts @@ -43,7 +43,7 @@ backlight = <&backlight>; power-supply = <&pp3300_disp>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi index fc50b3ef758c..62ea7d6a7d4a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi @@ -175,6 +175,21 @@ pinctrl-0 = <&dmic_en>; wakeup-delay-ms = <250>; }; + + gpio_keys: gpio-keys { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&pen_eject_odl>; + + pen-insert { + label = "Pen Insert"; + /* Insert = low, eject = high */ + gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + linux,code = <SW_PEN_INSERTED>; + linux,input-type = <EV_SW>; + wakeup-source; + }; + }; }; /* pp900_s0 aliases */ @@ -328,20 +343,6 @@ camera: &i2c7 { <400000000>; }; -&gpio_keys { - pinctrl-names = "default"; - pinctrl-0 = <&bt_host_wake_l>, <&pen_eject_odl>; - - pen-insert { - label = "Pen Insert"; - /* Insert = low, eject = high */ - gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; - linux,code = <SW_PEN_INSERTED>; - linux,input-type = <EV_SW>; - wakeup-source; - }; -}; - &i2c_tunnel { google,remote-bus = <0>; }; @@ -437,8 +438,19 @@ camera: &i2c7 { status = "okay"; }; -&wake_on_bt { - gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; +&usb_host0_ohci { + #address-cells = <1>; + #size-cells = <0>; + + qca_bt: bluetooth@1 { + compatible = "usbcf3,e300", "usb4ca,301a"; + reg = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&bt_host_wake_l>; + interrupt-parent = <&gpio1>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "wakeup"; + }; }; /* PINCTRL OVERRIDES */ @@ -455,7 +467,7 @@ camera: &i2c7 { }; &bt_host_wake_l { - rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_up>; + rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_none>; }; &ec_ap_int_l { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index ea607a601a86..da03fa9c5662 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -269,19 +269,6 @@ #clock-cells = <0>; }; - gpio_keys: gpio-keys { - compatible = "gpio-keys"; - pinctrl-names = "default"; - pinctrl-0 = <&bt_host_wake_l>; - - wake_on_bt: wake-on-bt { - label = "Wake-on-Bluetooth"; - gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; - linux,code = <KEY_WAKEUP>; - wakeup-source; - }; - }; - max98357a: max98357a { compatible = "maxim,max98357a"; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index 0b8f1edbd746..b48a63c3efc3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts @@ -91,7 +91,7 @@ pinctrl-0 = <&lcd_panel_reset>; power-supply = <&vcc3v3_s0>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index e3a375c4cb83..1b151442dac1 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data) beq 10f ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ b 7b -8: mov w7, w8 +8: cbz w8, 91f + mov w7, w8 add w8, w8, #16 9: ext v1.16b, v1.16b, v1.16b, #1 adds w7, w7, #1 bne 9b - eor v0.16b, v0.16b, v1.16b +91: eor v0.16b, v0.16b, v1.16b st1 {v0.16b}, [x0] 10: str w8, [x3] ret diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 68b11aa690e4..5fc6f51908fd 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], abytes -= added; } - while (abytes > AES_BLOCK_SIZE) { + while (abytes >= AES_BLOCK_SIZE) { __aes_arm64_encrypt(key->key_enc, mac, mac, num_rounds(key)); crypto_xor(mac, in, AES_BLOCK_SIZE); @@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], num_rounds(key)); crypto_xor(mac, in, abytes); *macp = abytes; - } else { - *macp = 0; } } } @@ -255,7 +253,7 @@ static int ccm_encrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - err = skcipher_walk_aead_encrypt(&walk, req, true); + err = skcipher_walk_aead_encrypt(&walk, req, false); if (may_use_simd()) { while (walk.nbytes) { @@ -313,7 +311,7 @@ static int ccm_decrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - err = skcipher_walk_aead_decrypt(&walk, req, true); + err = skcipher_walk_aead_decrypt(&walk, req, false); if (may_use_simd()) { while (walk.nbytes) { diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index 67700045a0e0..4c7ce231963c 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -320,8 +320,7 @@ AES_ENTRY(aes_ctr_encrypt) .Lctrtailblock: st1 {v0.16b}, [x0] - ldp x29, x30, [sp], #16 - ret + b .Lctrout .Lctrcarry: umov x7, v4.d[0] /* load upper word of ctr */ diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S index e613a87f8b53..8432c8d0dea6 100644 --- a/arch/arm64/crypto/aes-neonbs-core.S +++ b/arch/arm64/crypto/aes-neonbs-core.S @@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 ) 8: next_ctr v0 st1 {v0.16b}, [x24] - cbz x23, 0f + cbz x23, .Lctr_done cond_yield_neon 98b b 99b -0: frame_pop +.Lctr_done: + frame_pop ret /* * If we are handling the tail of the input (x6 != NULL), return the * final keystream block back to the caller. */ +0: cbz x25, 8b + st1 {v0.16b}, [x25] + b 8b 1: cbz x25, 8b st1 {v1.16b}, [x25] b 8b diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S index 021bb9e9784b..706c4e10e9e2 100644 --- a/arch/arm64/crypto/chacha-neon-core.S +++ b/arch/arm64/crypto/chacha-neon-core.S @@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon) mov w3, w2 bl chacha_permute - st1 {v0.16b}, [x1], #16 - st1 {v3.16b}, [x1] + st1 {v0.4s}, [x1], #16 + st1 {v3.4s}, [x1] ldp x29, x30, [sp], #16 ret @@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon) add v3.4s, v3.4s, v19.4s add a2, a2, w8 add a3, a3, w9 +CPU_BE( rev a0, a0 ) +CPU_BE( rev a1, a1 ) +CPU_BE( rev a2, a2 ) +CPU_BE( rev a3, a3 ) ld4r {v24.4s-v27.4s}, [x0], #16 ld4r {v28.4s-v31.4s}, [x0] @@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon) add v7.4s, v7.4s, v23.4s add a6, a6, w8 add a7, a7, w9 +CPU_BE( rev a4, a4 ) +CPU_BE( rev a5, a5 ) +CPU_BE( rev a6, a6 ) +CPU_BE( rev a7, a7 ) // x8[0-3] += s2[0] // x9[0-3] += s2[1] @@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon) add v11.4s, v11.4s, v27.4s add a10, a10, w8 add a11, a11, w9 +CPU_BE( rev a8, a8 ) +CPU_BE( rev a9, a9 ) +CPU_BE( rev a10, a10 ) +CPU_BE( rev a11, a11 ) // x12[0-3] += s3[0] // x13[0-3] += s3[1] @@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon) add v15.4s, v15.4s, v31.4s add a14, a14, w8 add a15, a15, w9 +CPU_BE( rev a12, a12 ) +CPU_BE( rev a13, a13 ) +CPU_BE( rev a14, a14 ) +CPU_BE( rev a15, a15 ) // interleave 32-bit words in state n, n+1 ldp w6, w7, [x2], #64 diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S index 9e82e8e8ed05..e545b42e6a46 100644 --- a/arch/arm64/crypto/crct10dif-ce-core.S +++ b/arch/arm64/crypto/crct10dif-ce-core.S @@ -2,12 +2,14 @@ // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions // // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> +// Copyright (C) 2019 Google LLC <ebiggers@google.com> // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // +// Derived from the x86 version: // // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions // @@ -54,19 +56,11 @@ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Function API: -// UINT16 crc_t10dif_pcl( -// UINT16 init_crc, //initial CRC value, 16 bits -// const unsigned char *buf, //buffer pointer to calculate CRC on -// UINT64 len //buffer length in bytes (64-bit data) -// ); -// // Reference paper titled "Fast CRC Computation for Generic // Polynomials Using PCLMULQDQ Instruction" // URL: http://www.intel.com/content/dam/www/public/us/en/documents // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // -// #include <linux/linkage.h> #include <asm/assembler.h> @@ -74,14 +68,14 @@ .text .cpu generic+crypto - arg1_low32 .req w19 - arg2 .req x20 - arg3 .req x21 + init_crc .req w19 + buf .req x20 + len .req x21 + fold_consts_ptr .req x22 - vzr .req v13 + fold_consts .req v10 ad .req v14 - bd .req v10 k00_16 .req v15 k32_48 .req v16 @@ -143,11 +137,11 @@ __pmull_p8_core: ext t5.8b, ad.8b, ad.8b, #2 // A2 ext t6.8b, ad.8b, ad.8b, #3 // A3 - pmull t4.8h, t4.8b, bd.8b // F = A1*B + pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B pmull t8.8h, ad.8b, bd1.8b // E = A*B1 - pmull t5.8h, t5.8b, bd.8b // H = A2*B + pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B pmull t7.8h, ad.8b, bd2.8b // G = A*B2 - pmull t6.8h, t6.8b, bd.8b // J = A3*B + pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B pmull t9.8h, ad.8b, bd3.8b // I = A*B3 pmull t3.8h, ad.8b, bd4.8b // K = A*B4 b 0f @@ -157,11 +151,11 @@ __pmull_p8_core: tbl t5.16b, {ad.16b}, perm2.16b // A2 tbl t6.16b, {ad.16b}, perm3.16b // A3 - pmull2 t4.8h, t4.16b, bd.16b // F = A1*B + pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1 - pmull2 t5.8h, t5.16b, bd.16b // H = A2*B + pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2 - pmull2 t6.8h, t6.16b, bd.16b // J = A3*B + pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4 @@ -203,14 +197,14 @@ __pmull_p8_core: ENDPROC(__pmull_p8_core) .macro __pmull_p8, rq, ad, bd, i - .ifnc \bd, v10 + .ifnc \bd, fold_consts .err .endif mov ad.16b, \ad\().16b .ifb \i - pmull \rq\().8h, \ad\().8b, bd.8b // D = A*B + pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B .else - pmull2 \rq\().8h, \ad\().16b, bd.16b // D = A*B + pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B .endif bl .L__pmull_p8_core\i @@ -219,17 +213,19 @@ ENDPROC(__pmull_p8_core) eor \rq\().16b, \rq\().16b, t6.16b .endm - .macro fold64, p, reg1, reg2 - ldp q11, q12, [arg2], #0x20 + // Fold reg1, reg2 into the next 32 data bytes, storing the result back + // into reg1, reg2. + .macro fold_32_bytes, p, reg1, reg2 + ldp q11, q12, [buf], #0x20 - __pmull_\p v8, \reg1, v10, 2 - __pmull_\p \reg1, \reg1, v10 + __pmull_\p v8, \reg1, fold_consts, 2 + __pmull_\p \reg1, \reg1, fold_consts CPU_LE( rev64 v11.16b, v11.16b ) CPU_LE( rev64 v12.16b, v12.16b ) - __pmull_\p v9, \reg2, v10, 2 - __pmull_\p \reg2, \reg2, v10 + __pmull_\p v9, \reg2, fold_consts, 2 + __pmull_\p \reg2, \reg2, fold_consts CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) @@ -240,15 +236,16 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) eor \reg2\().16b, \reg2\().16b, v12.16b .endm - .macro fold16, p, reg, rk - __pmull_\p v8, \reg, v10 - __pmull_\p \reg, \reg, v10, 2 - .ifnb \rk - ldr_l q10, \rk, x8 - __pmull_pre_\p v10 + // Fold src_reg into dst_reg, optionally loading the next fold constants + .macro fold_16_bytes, p, src_reg, dst_reg, load_next_consts + __pmull_\p v8, \src_reg, fold_consts + __pmull_\p \src_reg, \src_reg, fold_consts, 2 + .ifnb \load_next_consts + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts .endif - eor v7.16b, v7.16b, v8.16b - eor v7.16b, v7.16b, \reg\().16b + eor \dst_reg\().16b, \dst_reg\().16b, v8.16b + eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b .endm .macro __pmull_p64, rd, rn, rm, n @@ -260,40 +257,27 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) .endm .macro crc_t10dif_pmull, p - frame_push 3, 128 + frame_push 4, 128 - mov arg1_low32, w0 - mov arg2, x1 - mov arg3, x2 - - movi vzr.16b, #0 // init zero register + mov init_crc, w0 + mov buf, x1 + mov len, x2 __pmull_init_\p - // adjust the 16-bit initial_crc value, scale it to 32 bits - lsl arg1_low32, arg1_low32, #16 - - // check if smaller than 256 - cmp arg3, #256 - - // for sizes less than 128, we can't fold 64B at a time... - b.lt .L_less_than_128_\@ + // For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp len, #256 + b.lt .Lless_than_256_bytes_\@ - // load the initial crc value - // crc value does not need to be byte-reflected, but it needs - // to be moved to the high part of the register. - // because data will be byte-reflected and will align with - // initial crc at correct place. - movi v10.16b, #0 - mov v10.s[3], arg1_low32 // initial crc - - // receive the initial 64B data, xor the initial crc value - ldp q0, q1, [arg2] - ldp q2, q3, [arg2, #0x20] - ldp q4, q5, [arg2, #0x40] - ldp q6, q7, [arg2, #0x60] - add arg2, arg2, #0x80 + adr_l fold_consts_ptr, .Lfold_across_128_bytes_consts + // Load the first 128 data bytes. Byte swapping is necessary to make + // the bit order match the polynomial coefficient order. + ldp q0, q1, [buf] + ldp q2, q3, [buf, #0x20] + ldp q4, q5, [buf, #0x40] + ldp q6, q7, [buf, #0x60] + add buf, buf, #0x80 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( rev64 v1.16b, v1.16b ) CPU_LE( rev64 v2.16b, v2.16b ) @@ -302,7 +286,6 @@ CPU_LE( rev64 v4.16b, v4.16b ) CPU_LE( rev64 v5.16b, v5.16b ) CPU_LE( rev64 v6.16b, v6.16b ) CPU_LE( rev64 v7.16b, v7.16b ) - CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 ) @@ -312,36 +295,29 @@ CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 ) CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) - // XOR the initial_crc value - eor v0.16b, v0.16b, v10.16b - - ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4 - // type of pmull instruction - // will determine which constant to use - __pmull_pre_\p v10 + // XOR the first 16 data *bits* with the initial CRC value. + movi v8.16b, #0 + mov v8.h[7], init_crc + eor v0.16b, v0.16b, v8.16b - // - // we subtract 256 instead of 128 to save one instruction from the loop - // - sub arg3, arg3, #256 + // Load the constants for folding across 128 bytes. + ld1 {fold_consts.2d}, [fold_consts_ptr] + __pmull_pre_\p fold_consts - // at this section of the code, there is 64*x+y (0<=y<64) bytes of - // buffer. The _fold_64_B_loop will fold 64B at a time - // until we have 64+y Bytes of buffer + // Subtract 128 for the 128 data bytes just consumed. Subtract another + // 128 to simplify the termination condition of the following loop. + sub len, len, #256 - // fold 64B at a time. This section of the code folds 4 vector - // registers in parallel -.L_fold_64_B_loop_\@: + // While >= 128 data bytes remain (not counting v0-v7), fold the 128 + // bytes v0-v7 into them, storing the result back into v0-v7. +.Lfold_128_bytes_loop_\@: + fold_32_bytes \p, v0, v1 + fold_32_bytes \p, v2, v3 + fold_32_bytes \p, v4, v5 + fold_32_bytes \p, v6, v7 - fold64 \p, v0, v1 - fold64 \p, v2, v3 - fold64 \p, v4, v5 - fold64 \p, v6, v7 - - subs arg3, arg3, #128 - - // check if there is another 64B in the buffer to be able to fold - b.lt .L_fold_64_B_end_\@ + subs len, len, #128 + b.lt .Lfold_128_bytes_loop_done_\@ if_will_cond_yield_neon stp q0, q1, [sp, #.Lframe_local_offset] @@ -353,228 +329,207 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) ldp q2, q3, [sp, #.Lframe_local_offset + 32] ldp q4, q5, [sp, #.Lframe_local_offset + 64] ldp q6, q7, [sp, #.Lframe_local_offset + 96] - ldr_l q10, rk3, x8 - movi vzr.16b, #0 // init zero register + ld1 {fold_consts.2d}, [fold_consts_ptr] __pmull_init_\p - __pmull_pre_\p v10 + __pmull_pre_\p fold_consts endif_yield_neon - b .L_fold_64_B_loop_\@ - -.L_fold_64_B_end_\@: - // at this point, the buffer pointer is pointing at the last y Bytes - // of the buffer the 64B of folded data is in 4 of the vector - // registers: v0, v1, v2, v3 - - // fold the 8 vector registers to 1 vector register with different - // constants - - ldr_l q10, rk9, x8 - __pmull_pre_\p v10 - - fold16 \p, v0, rk11 - fold16 \p, v1, rk13 - fold16 \p, v2, rk15 - fold16 \p, v3, rk17 - fold16 \p, v4, rk19 - fold16 \p, v5, rk1 - fold16 \p, v6 - - // instead of 64, we add 48 to the loop counter to save 1 instruction - // from the loop instead of a cmp instruction, we use the negative - // flag with the jl instruction - adds arg3, arg3, #(128-16) - b.lt .L_final_reduction_for_128_\@ - - // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 - // and the rest is in memory. We can fold 16 bytes at a time if y>=16 - // continue folding 16B at a time - -.L_16B_reduction_loop_\@: - __pmull_\p v8, v7, v10 - __pmull_\p v7, v7, v10, 2 + b .Lfold_128_bytes_loop_\@ + +.Lfold_128_bytes_loop_done_\@: + + // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7. + + // Fold across 64 bytes. + add fold_consts_ptr, fold_consts_ptr, #16 + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + fold_16_bytes \p, v0, v4 + fold_16_bytes \p, v1, v5 + fold_16_bytes \p, v2, v6 + fold_16_bytes \p, v3, v7, 1 + // Fold across 32 bytes. + fold_16_bytes \p, v4, v6 + fold_16_bytes \p, v5, v7, 1 + // Fold across 16 bytes. + fold_16_bytes \p, v6, v7 + + // Add 128 to get the correct number of data bytes remaining in 0...127 + // (not counting v7), following the previous extra subtraction by 128. + // Then subtract 16 to simplify the termination condition of the + // following loop. + adds len, len, #(128-16) + + // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7 + // into them, storing the result back into v7. + b.lt .Lfold_16_bytes_loop_done_\@ +.Lfold_16_bytes_loop_\@: + __pmull_\p v8, v7, fold_consts + __pmull_\p v7, v7, fold_consts, 2 eor v7.16b, v7.16b, v8.16b - - ldr q0, [arg2], #16 + ldr q0, [buf], #16 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) eor v7.16b, v7.16b, v0.16b - subs arg3, arg3, #16 - - // instead of a cmp instruction, we utilize the flags with the - // jge instruction equivalent of: cmp arg3, 16-16 - // check if there is any more 16B in the buffer to be able to fold - b.ge .L_16B_reduction_loop_\@ - - // now we have 16+z bytes left to reduce, where 0<= z < 16. - // first, we reduce the data in the xmm7 register - -.L_final_reduction_for_128_\@: - // check if any more data to fold. If not, compute the CRC of - // the final 128 bits - adds arg3, arg3, #16 - b.eq .L_128_done_\@ - - // here we are getting data that is less than 16 bytes. - // since we know that there was data before the pointer, we can - // offset the input pointer before the actual point, to receive - // exactly 16 bytes. after that the registers need to be adjusted. -.L_get_last_two_regs_\@: - add arg2, arg2, arg3 - ldr q1, [arg2, #-16] -CPU_LE( rev64 v1.16b, v1.16b ) -CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) - - // get rid of the extra data that was loaded before - // load the shift constant - adr_l x4, tbl_shf_table + 16 - sub x4, x4, arg3 - ld1 {v0.16b}, [x4] - - // shift v2 to the left by arg3 bytes - tbl v2.16b, {v7.16b}, v0.16b - - // shift v7 to the right by 16-arg3 bytes - movi v9.16b, #0x80 - eor v0.16b, v0.16b, v9.16b - tbl v7.16b, {v7.16b}, v0.16b - - // blend - sshr v0.16b, v0.16b, #7 // convert to 8-bit mask - bsl v0.16b, v2.16b, v1.16b - - // fold 16 Bytes - __pmull_\p v8, v7, v10 - __pmull_\p v7, v7, v10, 2 - eor v7.16b, v7.16b, v8.16b - eor v7.16b, v7.16b, v0.16b + subs len, len, #16 + b.ge .Lfold_16_bytes_loop_\@ + +.Lfold_16_bytes_loop_done_\@: + // Add 16 to get the correct number of data bytes remaining in 0...15 + // (not counting v7), following the previous extra subtraction by 16. + adds len, len, #16 + b.eq .Lreduce_final_16_bytes_\@ + +.Lhandle_partial_segment_\@: + // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first + // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To + // do this without needing a fold constant for each possible 'len', + // redivide the bytes into a first chunk of 'len' bytes and a second + // chunk of 16 bytes, then fold the first chunk into the second. + + // v0 = last 16 original data bytes + add buf, buf, len + ldr q0, [buf, #-16] +CPU_LE( rev64 v0.16b, v0.16b ) +CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) -.L_128_done_\@: - // compute crc of a 128-bit value - ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10 - __pmull_pre_\p v10 + // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes. + adr_l x4, .Lbyteshift_table + 16 + sub x4, x4, len + ld1 {v2.16b}, [x4] + tbl v1.16b, {v7.16b}, v2.16b - // 64b fold - ext v0.16b, vzr.16b, v7.16b, #8 - mov v7.d[0], v7.d[1] - __pmull_\p v7, v7, v10 - eor v7.16b, v7.16b, v0.16b + // v3 = first chunk: v7 right-shifted by '16-len' bytes. + movi v3.16b, #0x80 + eor v2.16b, v2.16b, v3.16b + tbl v3.16b, {v7.16b}, v2.16b - // 32b fold - ext v0.16b, v7.16b, vzr.16b, #4 - mov v7.s[3], vzr.s[0] - __pmull_\p v0, v0, v10, 2 - eor v7.16b, v7.16b, v0.16b + // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. + sshr v2.16b, v2.16b, #7 - // barrett reduction - ldr_l q10, rk7, x8 - __pmull_pre_\p v10 - mov v0.d[0], v7.d[1] + // v2 = second chunk: 'len' bytes from v0 (low-order bytes), + // then '16-len' bytes from v1 (high-order bytes). + bsl v2.16b, v1.16b, v0.16b - __pmull_\p v0, v0, v10 - ext v0.16b, vzr.16b, v0.16b, #12 - __pmull_\p v0, v0, v10, 2 - ext v0.16b, vzr.16b, v0.16b, #12 + // Fold the first chunk into the second chunk, storing the result in v7. + __pmull_\p v0, v3, fold_consts + __pmull_\p v7, v3, fold_consts, 2 eor v7.16b, v7.16b, v0.16b - mov w0, v7.s[1] - -.L_cleanup_\@: - // scale the result back to 16 bits - lsr x0, x0, #16 + eor v7.16b, v7.16b, v2.16b + +.Lreduce_final_16_bytes_\@: + // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC. + + movi v2.16b, #0 // init zero register + + // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + + // Fold the high 64 bits into the low 64 bits, while also multiplying by + // x^64. This produces a 128-bit value congruent to x^64 * M(x) and + // whose low 48 bits are 0. + ext v0.16b, v2.16b, v7.16b, #8 + __pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x)) + eor v0.16b, v0.16b, v7.16b // + low bits * x^64 + + // Fold the high 32 bits into the low 96 bits. This produces a 96-bit + // value congruent to x^64 * M(x) and whose low 48 bits are 0. + ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits + mov v0.s[3], v2.s[0] // zero high 32 bits + __pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x)) + eor v0.16b, v0.16b, v1.16b // + low bits + + // Load G(x) and floor(x^48 / G(x)). + ld1 {fold_consts.2d}, [fold_consts_ptr] + __pmull_pre_\p fold_consts + + // Use Barrett reduction to compute the final CRC value. + __pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x)) + ushr v1.2d, v1.2d, #32 // /= x^32 + __pmull_\p v1, v1, fold_consts // *= G(x) + ushr v0.2d, v0.2d, #48 + eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits + // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0. + + umov w0, v0.h[0] frame_pop ret -.L_less_than_128_\@: - cbz arg3, .L_cleanup_\@ +.Lless_than_256_bytes_\@: + // Checksumming a buffer of length 16...255 bytes - movi v0.16b, #0 - mov v0.s[3], arg1_low32 // get the initial crc value + adr_l fold_consts_ptr, .Lfold_across_16_bytes_consts - ldr q7, [arg2], #0x10 + // Load the first 16 data bytes. + ldr q7, [buf], #0x10 CPU_LE( rev64 v7.16b, v7.16b ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) - eor v7.16b, v7.16b, v0.16b // xor the initial crc value - - cmp arg3, #16 - b.eq .L_128_done_\@ // exactly 16 left - b.lt .L_less_than_16_left_\@ - - ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10 - __pmull_pre_\p v10 - - // update the counter. subtract 32 instead of 16 to save one - // instruction from the loop - subs arg3, arg3, #32 - b.ge .L_16B_reduction_loop_\@ - - add arg3, arg3, #16 - b .L_get_last_two_regs_\@ - -.L_less_than_16_left_\@: - // shl r9, 4 - adr_l x0, tbl_shf_table + 16 - sub x0, x0, arg3 - ld1 {v0.16b}, [x0] - movi v9.16b, #0x80 - eor v0.16b, v0.16b, v9.16b - tbl v7.16b, {v7.16b}, v0.16b - b .L_128_done_\@ + + // XOR the first 16 data *bits* with the initial CRC value. + movi v0.16b, #0 + mov v0.h[7], init_crc + eor v7.16b, v7.16b, v0.16b + + // Load the fold-across-16-bytes constants. + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + + cmp len, #16 + b.eq .Lreduce_final_16_bytes_\@ // len == 16 + subs len, len, #32 + b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255 + add len, len, #16 + b .Lhandle_partial_segment_\@ // 17 <= len <= 31 .endm +// +// u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// ENTRY(crc_t10dif_pmull_p8) crc_t10dif_pmull p8 ENDPROC(crc_t10dif_pmull_p8) .align 5 +// +// u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// ENTRY(crc_t10dif_pmull_p64) crc_t10dif_pmull p64 ENDPROC(crc_t10dif_pmull_p64) -// precomputed constants -// these constants are precomputed from the poly: -// 0x8bb70000 (0x8bb7 scaled to 32 bits) .section ".rodata", "a" .align 4 -// Q = 0x18BB70000 -// rk1 = 2^(32*3) mod Q << 32 -// rk2 = 2^(32*5) mod Q << 32 -// rk3 = 2^(32*15) mod Q << 32 -// rk4 = 2^(32*17) mod Q << 32 -// rk5 = 2^(32*3) mod Q << 32 -// rk6 = 2^(32*2) mod Q << 32 -// rk7 = floor(2^64/Q) -// rk8 = Q - -rk1: .octa 0x06df0000000000002d56000000000000 -rk3: .octa 0x7cf50000000000009d9d000000000000 -rk5: .octa 0x13680000000000002d56000000000000 -rk7: .octa 0x000000018bb7000000000001f65a57f8 -rk9: .octa 0xbfd6000000000000ceae000000000000 -rk11: .octa 0x713c0000000000001e16000000000000 -rk13: .octa 0x80a6000000000000f7f9000000000000 -rk15: .octa 0xe658000000000000044c000000000000 -rk17: .octa 0xa497000000000000ad18000000000000 -rk19: .octa 0xe7b50000000000006ee3000000000000 - -tbl_shf_table: -// use these values for shift constants for the tbl/tbx instruction -// different alignments result in values as shown: -// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 +// Fold constants precomputed from the polynomial 0x18bb7 +// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 // x^(8*128) mod G(x) + .quad 0x0000000000002295 // x^(8*128+64) mod G(x) +// .Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 // x^(4*128) mod G(x) + .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) +// .Lfold_across_32_bytes_consts: + .quad 0x000000000000857d // x^(2*128) mod G(x) + .quad 0x0000000000007acc // x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 // x^(1*128) mod G(x) + .quad 0x0000000000001faa // x^(1*128+64) mod G(x) +// .Lfinal_fold_consts: + .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) +// .Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 // G(x) + .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) + +// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - +// len] is the index vector to shift left by 'len' bytes, and is also {0x80, +// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c index b461d62023f2..dd325829ee44 100644 --- a/arch/arm64/crypto/crct10dif-ce-glue.c +++ b/arch/arm64/crypto/crct10dif-ce-glue.c @@ -22,10 +22,8 @@ #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U -asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 buf[], u64 len); -asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 buf[], u64 len); - -static u16 (*crc_t10dif_pmull)(u16 init_crc, const u8 buf[], u64 len); +asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); +asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); static int crct10dif_init(struct shash_desc *desc) { @@ -35,30 +33,33 @@ static int crct10dif_init(struct shash_desc *desc) return 0; } -static int crct10dif_update(struct shash_desc *desc, const u8 *data, +static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); - unsigned int l; - if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - - ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull_p8(*crc, data, length); + kernel_neon_end(); + } else { + *crc = crc_t10dif_generic(*crc, data, length); + } - *crc = crc_t10dif_generic(*crc, data, l); + return 0; +} - length -= l; - data += l; - } +static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + u16 *crc = shash_desc_ctx(desc); - if (length > 0) { - if (may_use_simd()) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); - } else { - *crc = crc_t10dif_generic(*crc, data, length); - } + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull_p64(*crc, data, length); + kernel_neon_end(); + } else { + *crc = crc_t10dif_generic(*crc, data, length); } return 0; @@ -72,10 +73,22 @@ static int crct10dif_final(struct shash_desc *desc, u8 *out) return 0; } -static struct shash_alg crc_t10dif_alg = { +static struct shash_alg crc_t10dif_alg[] = {{ .digestsize = CRC_T10DIF_DIGEST_SIZE, .init = crct10dif_init, - .update = crct10dif_update, + .update = crct10dif_update_pmull_p8, + .final = crct10dif_final, + .descsize = CRC_T10DIF_DIGEST_SIZE, + + .base.cra_name = "crct10dif", + .base.cra_driver_name = "crct10dif-arm64-neon", + .base.cra_priority = 100, + .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = crct10dif_init, + .update = crct10dif_update_pmull_p64, .final = crct10dif_final, .descsize = CRC_T10DIF_DIGEST_SIZE, @@ -84,21 +97,25 @@ static struct shash_alg crc_t10dif_alg = { .base.cra_priority = 200, .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, .base.cra_module = THIS_MODULE, -}; +}}; static int __init crc_t10dif_mod_init(void) { if (elf_hwcap & HWCAP_PMULL) - crc_t10dif_pmull = crc_t10dif_pmull_p64; + return crypto_register_shashes(crc_t10dif_alg, + ARRAY_SIZE(crc_t10dif_alg)); else - crc_t10dif_pmull = crc_t10dif_pmull_p8; - - return crypto_register_shash(&crc_t10dif_alg); + /* only register the first array element */ + return crypto_register_shash(crc_t10dif_alg); } static void __exit crc_t10dif_mod_exit(void) { - crypto_unregister_shash(&crc_t10dif_alg); + if (elf_hwcap & HWCAP_PMULL) + crypto_unregister_shashes(crc_t10dif_alg, + ARRAY_SIZE(crc_t10dif_alg)); + else + crypto_unregister_shash(crc_t10dif_alg); } module_cpu_feature_match(ASIMD, crc_t10dif_mod_init); diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 067d8937d5af..791ad422c427 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -60,10 +60,6 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, struct ghash_key const *k, const char *head); -static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); - asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], struct ghash_key const *k, u8 ctr[], u32 const rk[], int rounds, @@ -87,11 +83,15 @@ static int ghash_init(struct shash_desc *desc) } static void ghash_do_update(int blocks, u64 dg[], const char *src, - struct ghash_key *key, const char *head) + struct ghash_key *key, const char *head, + void (*simd_update)(int blocks, u64 dg[], + const char *src, + struct ghash_key const *k, + const char *head)) { if (likely(may_use_simd())) { kernel_neon_begin(); - pmull_ghash_update(blocks, dg, src, key, head); + simd_update(blocks, dg, src, key, head); kernel_neon_end(); } else { be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; @@ -119,8 +119,12 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, /* avoid hogging the CPU for too long */ #define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE) -static int ghash_update(struct shash_desc *desc, const u8 *src, - unsigned int len) +static int __ghash_update(struct shash_desc *desc, const u8 *src, + unsigned int len, + void (*simd_update)(int blocks, u64 dg[], + const char *src, + struct ghash_key const *k, + const char *head)) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -146,7 +150,8 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, int chunk = min(blocks, MAX_BLOCKS); ghash_do_update(chunk, ctx->digest, src, key, - partial ? ctx->buf : NULL); + partial ? ctx->buf : NULL, + simd_update); blocks -= chunk; src += chunk * GHASH_BLOCK_SIZE; @@ -158,7 +163,19 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, return 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_update_p8(struct shash_desc *desc, const u8 *src, + unsigned int len) +{ + return __ghash_update(desc, src, len, pmull_ghash_update_p8); +} + +static int ghash_update_p64(struct shash_desc *desc, const u8 *src, + unsigned int len) +{ + return __ghash_update(desc, src, len, pmull_ghash_update_p64); +} + +static int ghash_final_p8(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -168,7 +185,28 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - ghash_do_update(1, ctx->digest, ctx->buf, key, NULL); + ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, + pmull_ghash_update_p8); + } + put_unaligned_be64(ctx->digest[1], dst); + put_unaligned_be64(ctx->digest[0], dst + 8); + + *ctx = (struct ghash_desc_ctx){}; + return 0; +} + +static int ghash_final_p64(struct shash_desc *desc, u8 *dst) +{ + struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); + unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; + + if (partial) { + struct ghash_key *key = crypto_shash_ctx(desc->tfm); + + memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); + + ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, + pmull_ghash_update_p64); } put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[0], dst + 8); @@ -224,7 +262,21 @@ static int ghash_setkey(struct crypto_shash *tfm, return __ghash_setkey(key, inkey, keylen); } -static struct shash_alg ghash_alg = { +static struct shash_alg ghash_alg[] = {{ + .base.cra_name = "ghash", + .base.cra_driver_name = "ghash-neon", + .base.cra_priority = 100, + .base.cra_blocksize = GHASH_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ghash_key), + .base.cra_module = THIS_MODULE, + + .digestsize = GHASH_DIGEST_SIZE, + .init = ghash_init, + .update = ghash_update_p8, + .final = ghash_final_p8, + .setkey = ghash_setkey, + .descsize = sizeof(struct ghash_desc_ctx), +}, { .base.cra_name = "ghash", .base.cra_driver_name = "ghash-ce", .base.cra_priority = 200, @@ -234,11 +286,11 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, - .update = ghash_update, - .final = ghash_final, + .update = ghash_update_p64, + .final = ghash_final_p64, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), -}; +}}; static int num_rounds(struct crypto_aes_ctx *ctx) { @@ -301,7 +353,8 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], int blocks = count / GHASH_BLOCK_SIZE; ghash_do_update(blocks, dg, src, &ctx->ghash_key, - *buf_count ? buf : NULL); + *buf_count ? buf : NULL, + pmull_ghash_update_p64); src += blocks * GHASH_BLOCK_SIZE; count %= GHASH_BLOCK_SIZE; @@ -345,7 +398,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) if (buf_count) { memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); - ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); + ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL, + pmull_ghash_update_p64); } } @@ -358,7 +412,8 @@ static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx, lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(cryptlen * 8); - ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL); + ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL, + pmull_ghash_update_p64); put_unaligned_be64(dg[1], mac); put_unaligned_be64(dg[0], mac + 8); @@ -434,7 +489,7 @@ static int gcm_encrypt(struct aead_request *req) ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, walk.dst.virt.addr, &ctx->ghash_key, - NULL); + NULL, pmull_ghash_update_p64); err = skcipher_walk_done(&walk, walk.nbytes % (2 * AES_BLOCK_SIZE)); @@ -469,7 +524,8 @@ static int gcm_encrypt(struct aead_request *req) memcpy(buf, dst, nbytes); memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); - ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); + ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head, + pmull_ghash_update_p64); err = skcipher_walk_done(&walk, 0); } @@ -558,7 +614,8 @@ static int gcm_decrypt(struct aead_request *req) u8 *src = walk.src.virt.addr; ghash_do_update(blocks, dg, walk.src.virt.addr, - &ctx->ghash_key, NULL); + &ctx->ghash_key, NULL, + pmull_ghash_update_p64); do { __aes_arm64_encrypt(ctx->aes_key.key_enc, @@ -602,7 +659,8 @@ static int gcm_decrypt(struct aead_request *req) memcpy(buf, src, nbytes); memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); - ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); + ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head, + pmull_ghash_update_p64); crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv, walk.nbytes); @@ -650,26 +708,30 @@ static int __init ghash_ce_mod_init(void) return -ENODEV; if (elf_hwcap & HWCAP_PMULL) - pmull_ghash_update = pmull_ghash_update_p64; - + ret = crypto_register_shashes(ghash_alg, + ARRAY_SIZE(ghash_alg)); else - pmull_ghash_update = pmull_ghash_update_p8; + /* only register the first array element */ + ret = crypto_register_shash(ghash_alg); - ret = crypto_register_shash(&ghash_alg); if (ret) return ret; if (elf_hwcap & HWCAP_PMULL) { ret = crypto_register_aead(&gcm_aes_alg); if (ret) - crypto_unregister_shash(&ghash_alg); + crypto_unregister_shashes(ghash_alg, + ARRAY_SIZE(ghash_alg)); } return ret; } static void __exit ghash_ce_mod_exit(void) { - crypto_unregister_shash(&ghash_alg); + if (elf_hwcap & HWCAP_PMULL) + crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg)); + else + crypto_unregister_shash(ghash_alg); crypto_unregister_aead(&gcm_aes_alg); } diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 9bca54dda75c..1f4e9ee641c9 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -42,124 +42,131 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) - -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_add_return_acquire atomic_add_return_acquire -#define atomic_add_return_release atomic_add_return_release -#define atomic_add_return atomic_add_return - -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_sub_return_acquire atomic_sub_return_acquire -#define atomic_sub_return_release atomic_sub_return_release -#define atomic_sub_return atomic_sub_return - -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_add_acquire atomic_fetch_add_acquire -#define atomic_fetch_add_release atomic_fetch_add_release -#define atomic_fetch_add atomic_fetch_add - -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed -#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#define atomic_fetch_sub_release atomic_fetch_sub_release -#define atomic_fetch_sub atomic_fetch_sub - -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_and_acquire atomic_fetch_and_acquire -#define atomic_fetch_and_release atomic_fetch_and_release -#define atomic_fetch_and atomic_fetch_and - -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#define atomic_fetch_andnot atomic_fetch_andnot - -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_or_acquire atomic_fetch_or_acquire -#define atomic_fetch_or_release atomic_fetch_or_release -#define atomic_fetch_or atomic_fetch_or - -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed -#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#define atomic_fetch_xor_release atomic_fetch_xor_release -#define atomic_fetch_xor atomic_fetch_xor - -#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) -#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new)) -#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new)) -#define atomic_xchg(v, new) xchg(&((v)->counter), (new)) - -#define atomic_cmpxchg_relaxed(v, old, new) \ - cmpxchg_relaxed(&((v)->counter), (old), (new)) -#define atomic_cmpxchg_acquire(v, old, new) \ - cmpxchg_acquire(&((v)->counter), (old), (new)) -#define atomic_cmpxchg_release(v, old, new) \ - cmpxchg_release(&((v)->counter), (old), (new)) -#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) - -#define atomic_andnot atomic_andnot +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire +#define arch_atomic_add_return_release arch_atomic_add_return_release +#define arch_atomic_add_return arch_atomic_add_return + +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire +#define arch_atomic_sub_return_release arch_atomic_sub_return_release +#define arch_atomic_sub_return arch_atomic_sub_return + +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire +#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release +#define arch_atomic_fetch_add arch_atomic_fetch_add + +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire +#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release +#define arch_atomic_fetch_and arch_atomic_fetch_and + +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot + +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire +#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release +#define arch_atomic_fetch_or arch_atomic_fetch_or + +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#define arch_atomic_xchg_relaxed(v, new) \ + arch_xchg_relaxed(&((v)->counter), (new)) +#define arch_atomic_xchg_acquire(v, new) \ + arch_xchg_acquire(&((v)->counter), (new)) +#define arch_atomic_xchg_release(v, new) \ + arch_xchg_release(&((v)->counter), (new)) +#define arch_atomic_xchg(v, new) \ + arch_xchg(&((v)->counter), (new)) + +#define arch_atomic_cmpxchg_relaxed(v, old, new) \ + arch_cmpxchg_relaxed(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg_acquire(v, old, new) \ + arch_cmpxchg_acquire(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg_release(v, old, new) \ + arch_cmpxchg_release(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg(v, old, new) \ + arch_cmpxchg(&((v)->counter), (old), (new)) + +#define arch_atomic_andnot arch_atomic_andnot /* - * 64-bit atomic operations. + * 64-bit arch_atomic operations. */ -#define ATOMIC64_INIT ATOMIC_INIT -#define atomic64_read atomic_read -#define atomic64_set atomic_set - -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#define atomic64_add_return_acquire atomic64_add_return_acquire -#define atomic64_add_return_release atomic64_add_return_release -#define atomic64_add_return atomic64_add_return - -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#define atomic64_sub_return_acquire atomic64_sub_return_acquire -#define atomic64_sub_return_release atomic64_sub_return_release -#define atomic64_sub_return atomic64_sub_return - -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#define atomic64_fetch_add_release atomic64_fetch_add_release -#define atomic64_fetch_add atomic64_fetch_add - -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed -#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#define atomic64_fetch_sub_release atomic64_fetch_sub_release -#define atomic64_fetch_sub atomic64_fetch_sub - -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#define atomic64_fetch_and_release atomic64_fetch_and_release -#define atomic64_fetch_and atomic64_fetch_and - -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#define atomic64_fetch_andnot atomic64_fetch_andnot - -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#define atomic64_fetch_or_release atomic64_fetch_or_release -#define atomic64_fetch_or atomic64_fetch_or - -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed -#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#define atomic64_fetch_xor_release atomic64_fetch_xor_release -#define atomic64_fetch_xor atomic64_fetch_xor - -#define atomic64_xchg_relaxed atomic_xchg_relaxed -#define atomic64_xchg_acquire atomic_xchg_acquire -#define atomic64_xchg_release atomic_xchg_release -#define atomic64_xchg atomic_xchg - -#define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed -#define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire -#define atomic64_cmpxchg_release atomic_cmpxchg_release -#define atomic64_cmpxchg atomic_cmpxchg - -#define atomic64_andnot atomic64_andnot - -#define atomic64_dec_if_positive atomic64_dec_if_positive +#define ATOMIC64_INIT ATOMIC_INIT +#define arch_atomic64_read arch_atomic_read +#define arch_atomic64_set arch_atomic_set + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire +#define arch_atomic64_add_return_release arch_atomic64_add_return_release +#define arch_atomic64_add_return arch_atomic64_add_return + +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire +#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release +#define arch_atomic64_sub_return arch_atomic64_sub_return + +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release +#define arch_atomic64_fetch_add arch_atomic64_fetch_add + +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release +#define arch_atomic64_fetch_and arch_atomic64_fetch_and + +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot + +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release +#define arch_atomic64_fetch_or arch_atomic64_fetch_or + +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + +#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed +#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire +#define arch_atomic64_xchg_release arch_atomic_xchg_release +#define arch_atomic64_xchg arch_atomic_xchg + +#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release +#define arch_atomic64_cmpxchg arch_atomic_cmpxchg + +#define arch_atomic64_andnot arch_atomic64_andnot + +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive + +#include <asm-generic/atomic-instrumented.h> #endif #endif diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index af7b99005453..e321293e0c89 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -39,7 +39,7 @@ #define ATOMIC_OP(op, asm_op) \ __LL_SC_INLINE void \ -__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int result; \ @@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "Ir" (i)); \ } \ -__LL_SC_EXPORT(atomic_##op); +__LL_SC_EXPORT(arch_atomic_##op); #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE int \ -__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int result; \ @@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic_##op##_return##name); +__LL_SC_EXPORT(arch_atomic_##op##_return##name); #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE int \ -__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int val, result; \ @@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic_fetch_##op##name); +__LL_SC_EXPORT(arch_atomic_fetch_##op##name); #define ATOMIC_OPS(...) \ ATOMIC_OP(__VA_ARGS__) \ @@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor) #define ATOMIC64_OP(op, asm_op) \ __LL_SC_INLINE void \ -__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \ { \ long result; \ unsigned long tmp; \ @@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "Ir" (i)); \ } \ -__LL_SC_EXPORT(atomic64_##op); +__LL_SC_EXPORT(arch_atomic64_##op); #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE long \ -__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\ { \ long result; \ unsigned long tmp; \ @@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic64_##op##_return##name); +__LL_SC_EXPORT(arch_atomic64_##op##_return##name); #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE long \ -__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \ { \ long result, val; \ unsigned long tmp; \ @@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic64_fetch_##op##name); +__LL_SC_EXPORT(arch_atomic64_fetch_##op##name); #define ATOMIC64_OPS(...) \ ATOMIC64_OP(__VA_ARGS__) \ @@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor) #undef ATOMIC64_OP __LL_SC_INLINE long -__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) +__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) { long result; unsigned long tmp; @@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) return result; } -__LL_SC_EXPORT(atomic64_dec_if_positive); +__LL_SC_EXPORT(arch_atomic64_dec_if_positive); #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ __LL_SC_INLINE u##sz \ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index a424355240c5..9256a3921e4b 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -25,9 +25,9 @@ #error "please don't include this file directly" #endif -#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) +#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op) #define ATOMIC_OP(op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd) #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OPS #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ -static inline int atomic_add_return##name(int i, atomic_t *v) \ +static inline int arch_atomic_add_return##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") #undef ATOMIC_OP_ADD_RETURN -static inline void atomic_and(int i, atomic_t *v) +static inline void arch_atomic_and(int i, atomic_t *v) { register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; @@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v) } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ -static inline int atomic_fetch_and##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND -static inline void atomic_sub(int i, atomic_t *v) +static inline void arch_atomic_sub(int i, atomic_t *v) { register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; @@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v) } #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ -static inline int atomic_sub_return##name(int i, atomic_t *v) \ +static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") #undef ATOMIC_OP_SUB_RETURN #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ -static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") #undef ATOMIC_FETCH_OP_SUB #undef __LL_SC_ATOMIC -#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) +#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op) #define ATOMIC64_OP(op, asm_op) \ -static inline void atomic64_##op(long i, atomic64_t *v) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd) #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OPS #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ -static inline long atomic64_add_return##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") #undef ATOMIC64_OP_ADD_RETURN -static inline void atomic64_and(long i, atomic64_t *v) +static inline void arch_atomic64_and(long i, atomic64_t *v) { register long x0 asm ("x0") = i; register atomic64_t *x1 asm ("x1") = v; @@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v) } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ -static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND -static inline void atomic64_sub(long i, atomic64_t *v) +static inline void arch_atomic64_sub(long i, atomic64_t *v) { register long x0 asm ("x0") = i; register atomic64_t *x1 asm ("x1") = v; @@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) } #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ -static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") #undef ATOMIC64_OP_SUB_RETURN #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ -static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory") #undef ATOMIC64_FETCH_OP_SUB -static inline long atomic64_dec_if_positive(atomic64_t *v) +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) { register long x0 asm ("x0") = (long)v; diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 3f9376f1c409..e6ea0f42e097 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -110,10 +110,10 @@ __XCHG_GEN(_mb) }) /* xchg */ -#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) -#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) -#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) -#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) +#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) +#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) +#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) +#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) #define __CMPXCHG_GEN(sfx) \ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ @@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb) }) /* cmpxchg */ -#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) -#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) -#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) -#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) -#define cmpxchg_local cmpxchg_relaxed +#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) +#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) +#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) +#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) +#define arch_cmpxchg_local arch_cmpxchg_relaxed /* cmpxchg64 */ -#define cmpxchg64_relaxed cmpxchg_relaxed -#define cmpxchg64_acquire cmpxchg_acquire -#define cmpxchg64_release cmpxchg_release -#define cmpxchg64 cmpxchg -#define cmpxchg64_local cmpxchg_local +#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed +#define arch_cmpxchg64_acquire arch_cmpxchg_acquire +#define arch_cmpxchg64_release arch_cmpxchg_release +#define arch_cmpxchg64 arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg_local /* cmpxchg_double */ #define system_has_cmpxchg_double() 1 @@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb) VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ }) -#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ -({\ - int __ret;\ - __cmpxchg_double_check(ptr1, ptr2); \ - __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ - (unsigned long)(n1), (unsigned long)(n2), \ - ptr1); \ - __ret; \ +#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ +({ \ + int __ret; \ + __cmpxchg_double_check(ptr1, ptr2); \ + __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ + (unsigned long)(n1), (unsigned long)(n2), \ + ptr1); \ + __ret; \ }) -#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ -({\ - int __ret;\ - __cmpxchg_double_check(ptr1, ptr2); \ - __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ - (unsigned long)(n1), (unsigned long)(n2), \ - ptr1); \ - __ret; \ +#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ +({ \ + int __ret; \ + __cmpxchg_double_check(ptr1, ptr2); \ + __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ + (unsigned long)(n1), (unsigned long)(n2), \ + ptr1); \ + __ret; \ }) #define __CMPWAIT_CASE(w, sfx, sz) \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..da3fc7324d68 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -48,6 +48,7 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -208,6 +209,13 @@ struct kvm_cpu_context { typedef struct kvm_cpu_context kvm_cpu_context_t; +struct vcpu_reset_state { + unsigned long pc; + unsigned long r0; + bool be; + bool reset; +}; + struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -297,6 +305,9 @@ struct kvm_vcpu_arch { /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; + /* Additional reset state */ + struct vcpu_reset_state reset_state; + /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..0c656850eeea 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x) #define virt_addr_valid(kaddr) \ (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) +/* + * Given that the GIC architecture permits ITS implementations that can only be + * configured with a LPI table address once, GICv3 systems with many CPUs may + * end up reserving a lot of different regions after a kexec for their LPI + * tables (one per CPU), as we are forced to reuse the same memory after kexec + * (and thus reserve it persistently with EFI beforehand) + */ +#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) +# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) +#endif + #include <asm-generic/memory_model.h> #endif diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h index 2ba6c6b9541f..71abfc7612b2 100644 --- a/arch/arm64/include/asm/neon-intrinsics.h +++ b/arch/arm64/include/asm/neon-intrinsics.h @@ -36,4 +36,8 @@ #include <arm_neon.h> #endif +#ifdef CONFIG_CC_IS_CLANG +#pragma clang diagnostic ignored "-Wincompatible-pointer-types" +#endif + #endif /* __ASM_NEON_INTRINSICS_H */ diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h index eee31a9f72a5..e9c1a02c2154 100644 --- a/arch/arm64/include/asm/sync_bitops.h +++ b/arch/arm64/include/asm/sync_bitops.h @@ -15,13 +15,13 @@ * ops which are SMP safe even on a UP kernel. */ -#define sync_set_bit(nr, p) set_bit(nr, p) -#define sync_clear_bit(nr, p) clear_bit(nr, p) -#define sync_change_bit(nr, p) change_bit(nr, p) -#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) -#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) -#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) -#define sync_test_bit(nr, addr) test_bit(nr, addr) -#define sync_cmpxchg cmpxchg +#define sync_set_bit(nr, p) set_bit(nr, p) +#define sync_clear_bit(nr, p) clear_bit(nr, p) +#define sync_change_bit(nr, p) change_bit(nr, p) +#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) +#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) +#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) +#define sync_test_bit(nr, addr) test_bit(nr, addr) +#define arch_sync_cmpxchg arch_cmpxchg #endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 547d7a0c9d05..f1e5c9165809 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -34,7 +34,6 @@ #include <asm/memory.h> #include <asm/extable.h> -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 15d79a8e5e5e..eecf7927dab0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -539,8 +539,7 @@ set_hcr: /* GICv3 system register access */ mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #24, #4 - cmp x0, #1 - b.ne 3f + cbz x0, 3f mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index f2c211a6229b..58871333737a 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -120,10 +120,12 @@ static int create_dtb(struct kimage *image, { void *buf; size_t buf_size; + size_t cmdline_len; int ret; + cmdline_len = cmdline ? strlen(cmdline) : 0; buf_size = fdt_totalsize(initial_boot_params) - + strlen(cmdline) + DTB_EXTRA_SPACE; + + cmdline_len + DTB_EXTRA_SPACE; for (;;) { buf = vmalloc(buf_size); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9dce33b0e260..ddaea0fd2fa4 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs) } /* - * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a - * We also take into account DIT (bit 24), which is not yet documented, and - * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be - * allocated an EL0 meaning in future. + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. + * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is + * not described in ARM DDI 0487D.a. + * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may + * be allocated an EL0 meaning in future. * Userspace cannot use these until they have an architectural meaning. * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ - GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) static int valid_compat_regs(struct user_pt_regs *regs) { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..009849328289 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p) arm64_memblock_init(); paging_init(); - efi_apply_persistent_mem_reservations(); acpi_table_upgrade(); @@ -340,6 +339,9 @@ void __init setup_arch(char **cmdline_p) smp_init_cpus(); smp_build_mpidr_hash(); + /* Init percpu seeds for random tags after cpus are set up. */ + kasan_init_tags(); + #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Make sure init_thread_info.ttbr0 always generates translation diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..421ebf6f7086 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -23,6 +23,7 @@ #include <kvm/arm_psci.h> #include <asm/cpufeature.h> +#include <asm/kprobes.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_host.h> @@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) write_sysreg(kvm_get_hyp_vector(), vbar_el1); } +NOKPROBE_SYMBOL(activate_traps_vhe); static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) { @@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void) write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(vectors, vbar_el1); } +NOKPROBE_SYMBOL(deactivate_traps_vhe); static void __hyp_text __deactivate_traps_nvhe(void) { @@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) return exit_code; } +NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); /* Switch to the guest for legacy non-VHE systems */ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) @@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, read_sysreg_el2(esr), read_sysreg_el2(far), read_sysreg(hpfar_el2), par, vcpu); } +NOKPROBE_SYMBOL(__hyp_call_panic_vhe); void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) { diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 68d6f7c3b237..b426e2cf973c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -18,6 +18,7 @@ #include <linux/compiler.h> #include <linux/kvm_host.h> +#include <asm/kprobes.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_hyp.h> @@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_save_common_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_save_common_state(ctxt); __sysreg_save_el2_return_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) { @@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_restore_common_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_restore_common_state(ctxt); __sysreg_restore_el2_return_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index b72a3dd56204..f16a5f8ff2b4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -32,6 +32,7 @@ #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_coproc.h> +#include <asm/kvm_emulate.h> #include <asm/kvm_mmu.h> /* Maximum phys_shift supported for any VM on this host */ @@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) * This function finds the right table above and sets the registers on * the virtual CPU struct to their architecturally defined reset * values. + * + * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT + * ioctl or as part of handling a request issued by another VCPU in the PSCI + * handling code. In the first case, the VCPU will not be loaded, and in the + * second case the VCPU will be loaded. Because this function operates purely + * on the memory-backed valus of system registers, we want to do a full put if + * we were loaded (handling a request) and load the values back at the end of + * the function. Otherwise we leave the state alone. In both cases, we + * disable preemption around the vcpu reset as we would otherwise race with + * preempt notifiers which also call put/load. */ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) { const struct kvm_regs *cpu_reset; + int ret = -EINVAL; + bool loaded; + + preempt_disable(); + loaded = (vcpu->cpu != -1); + if (loaded) + kvm_arch_vcpu_put(vcpu); switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (!cpu_has_32bit_el1()) - return -EINVAL; + goto out; cpu_reset = &default_regs_reset32; } else { cpu_reset = &default_regs_reset; @@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset system registers */ kvm_reset_sys_regs(vcpu); + /* + * Additional reset state handling that PSCI may have imposed on us. + * Must be done after all the sys_reg reset. + */ + if (vcpu->arch.reset_state.reset) { + unsigned long target_pc = vcpu->arch.reset_state.pc; + + /* Gracefully handle Thumb2 entry point */ + if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { + target_pc &= ~1UL; + vcpu_set_thumb(vcpu); + } + + /* Propagate caller endianness */ + if (vcpu->arch.reset_state.be) + kvm_vcpu_set_be(vcpu); + + *vcpu_pc(vcpu) = target_pc; + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); + + vcpu->arch.reset_state.reset = false; + } + /* Reset PMU */ kvm_pmu_vcpu_reset(vcpu); @@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; /* Reset timer */ - return kvm_timer_vcpu_reset(vcpu); + ret = kvm_timer_vcpu_reset(vcpu); +out: + if (loaded) + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); + return ret; } void kvm_set_ipa_limit(void) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3e37228ae4e..c936aa40c3f4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, return read_zero(vcpu, p); } -static bool trap_undef(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) +/* + * ARMv8.1 mandates at least a trivial LORegion implementation, where all the + * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 + * system, these registers should UNDEF. LORID_EL1 being a RO register, we + * treat it separately. + */ +static bool trap_loregion(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) { - kvm_inject_undefined(vcpu); - return false; + u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1, + (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); + + if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { + kvm_inject_undefined(vcpu); + return false; + } + + if (p->is_write && sr == SYS_LORID_EL1) + return write_to_read_only(vcpu, p, r); + + return trap_raz_wi(vcpu, p, r); } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, @@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) if (val & ptrauth_mask) kvm_debug("ptrauth unsupported for guests, suppressing\n"); val &= ~ptrauth_mask; - } else if (id == SYS_ID_AA64MMFR1_EL1) { - if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) - kvm_debug("LORegions unsupported for guests, suppressing\n"); - - val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); } return val; @@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, - { SYS_DESC(SYS_LORSA_EL1), trap_undef }, - { SYS_DESC(SYS_LOREA_EL1), trap_undef }, - { SYS_DESC(SYS_LORN_EL1), trap_undef }, - { SYS_DESC(SYS_LORC_EL1), trap_undef }, - { SYS_DESC(SYS_LORID_EL1), trap_undef }, + { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, + { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, + { SYS_DESC(SYS_LORN_EL1), trap_loregion }, + { SYS_DESC(SYS_LORC_EL1), trap_loregion }, + { SYS_DESC(SYS_LORID_EL1), trap_loregion }, { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, @@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) table = get_target_table(vcpu->arch.target, true, &num); reset_sys_reg_descs(vcpu, table, num); - for (num = 1; num < NR_SYS_REGS; num++) - if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) - panic("Didn't reset __vcpu_sys_reg(%zi)", num); + for (num = 1; num < NR_SYS_REGS; num++) { + if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, + "Didn't reset __vcpu_sys_reg(%zi)\n", num)) + break; + } } diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index fcb1f2a6d7c6..99bb8facb5cb 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, } -static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) +static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start, + unsigned long end) { - pte_t *ptep = pte_offset_kernel(pmdp, 0UL); - unsigned long addr; - unsigned i; + unsigned long addr = start; + pte_t *ptep = pte_offset_kernel(pmdp, start); - for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { - addr = start + i * PAGE_SIZE; + do { note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); - } + } while (ptep++, addr += PAGE_SIZE, addr != end); } -static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) +static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start, + unsigned long end) { - pmd_t *pmdp = pmd_offset(pudp, 0UL); - unsigned long addr; - unsigned i; + unsigned long next, addr = start; + pmd_t *pmdp = pmd_offset(pudp, start); - for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { + do { pmd_t pmd = READ_ONCE(*pmdp); + next = pmd_addr_end(addr, end); - addr = start + i * PMD_SIZE; if (pmd_none(pmd) || pmd_sect(pmd)) { note_page(st, addr, 3, pmd_val(pmd)); } else { BUG_ON(pmd_bad(pmd)); - walk_pte(st, pmdp, addr); + walk_pte(st, pmdp, addr, next); } - } + } while (pmdp++, addr = next, addr != end); } -static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) +static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start, + unsigned long end) { - pud_t *pudp = pud_offset(pgdp, 0UL); - unsigned long addr; - unsigned i; + unsigned long next, addr = start; + pud_t *pudp = pud_offset(pgdp, start); - for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { + do { pud_t pud = READ_ONCE(*pudp); + next = pud_addr_end(addr, end); - addr = start + i * PUD_SIZE; if (pud_none(pud) || pud_sect(pud)) { note_page(st, addr, 2, pud_val(pud)); } else { BUG_ON(pud_bad(pud)); - walk_pmd(st, pudp, addr); + walk_pmd(st, pudp, addr, next); } - } + } while (pudp++, addr = next, addr != end); } static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start) { - pgd_t *pgdp = pgd_offset(mm, 0UL); - unsigned i; - unsigned long addr; + unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0; + unsigned long next, addr = start; + pgd_t *pgdp = pgd_offset(mm, start); - for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { + do { pgd_t pgd = READ_ONCE(*pgdp); + next = pgd_addr_end(addr, end); - addr = start + i * PGDIR_SIZE; if (pgd_none(pgd)) { note_page(st, addr, 1, pgd_val(pgd)); } else { BUG_ON(pgd_bad(pgd)); - walk_pud(st, pgdp, addr); + walk_pud(st, pgdp, addr, next); } - } + } while (pgdp++, addr = next, addr != end); } void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 4b55b15707a3..f37a86d2a69d 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -252,8 +252,6 @@ void __init kasan_init(void) memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); - kasan_init_tags(); - /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 1542df00b23c..aaddc0217e73 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -362,7 +362,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; - const bool is64 = BPF_CLASS(code) == BPF_ALU64; + const bool is64 = BPF_CLASS(code) == BPF_ALU64 || + BPF_CLASS(code) == BPF_JMP; const bool isdw = BPF_SIZE(code) == BPF_DW; u8 jmp_cond; s32 jmp_offset; @@ -559,7 +560,17 @@ emit_bswap_uxt: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: - emit(A64_CMP(1, dst, src), ctx); + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + emit(A64_CMP(is64, dst, src), ctx); emit_cond_jmp: jmp_offset = bpf2a64_offset(i + off, i, ctx); check_imm19(jmp_offset); @@ -601,7 +612,8 @@ emit_cond_jmp: emit(A64_B_(jmp_cond, jmp_offset), ctx); break; case BPF_JMP | BPF_JSET | BPF_X: - emit(A64_TST(1, dst, src), ctx); + case BPF_JMP32 | BPF_JSET | BPF_X: + emit(A64_TST(is64, dst, src), ctx); goto emit_cond_jmp; /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: @@ -614,12 +626,23 @@ emit_cond_jmp: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: - emit_a64_mov_i(1, tmp, imm, ctx); - emit(A64_CMP(1, dst, tmp), ctx); + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_CMP(is64, dst, tmp), ctx); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: - emit_a64_mov_i(1, tmp, imm, ctx); - emit(A64_TST(1, dst, tmp), ctx); + case BPF_JMP32 | BPF_JSET | BPF_K: + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_TST(is64, dst, tmp), ctx); goto emit_cond_jmp; /* function call */ case BPF_JMP | BPF_CALL: diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index edfcbb25fd9f..dcea277c09ae 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -45,8 +45,8 @@ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) #define pte_clear(mm, addr, ptep) set_pte((ptep), \ - (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) -#define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) + (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ @@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte) #define pgd_index(address) ((address) >> PGDIR_SHIFT) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); + /* * Macro to make mark a page protection value as "uncacheable". Note * that "protection" is really a misnomer here as the protection value diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 8f454810514f..21e0bd5293dd 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -49,7 +49,7 @@ struct thread_struct { }; #define INIT_THREAD { \ - .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ + .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ .sr = DEFAULT_PSR_VALUE, \ } @@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) #define task_pt_regs(p) \ - ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) + ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) #define cpu_relax() barrier() diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index ffdc4c47ff43..db2640d5f575 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -9,7 +9,6 @@ typedef struct { } mm_segment_t; #define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) -#define get_ds() KERNEL_DS #define USER_DS ((mm_segment_t) { 0x80000000UL }) #define get_fs() (current_thread_info()->addr_limit) diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c index 659253e9989c..d67f9777cfd9 100644 --- a/arch/csky/kernel/dumpstack.c +++ b/arch/csky/kernel/dumpstack.c @@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack) if (task) stack = (unsigned long *)thread_saved_fp(task); else +#ifdef CONFIG_STACKTRACE + asm volatile("mov %0, r8\n":"=r"(stack)::"memory"); +#else stack = (unsigned long *)&stack; +#endif } show_trace(stack); diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 57f1afe19a52..f2f12fff36f7 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c @@ -8,6 +8,7 @@ #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/sched.h> +#include <linux/sched/task_stack.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/uaccess.h> @@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target, static const struct user_regset csky_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, - .n = ELF_NGREG, + .n = sizeof(struct pt_regs) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = &gpr_get, diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index ddc4dd79f282..b07a534b3062 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) { unsigned long mask = 1 << cpu; - secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; + secondary_stack = + (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; secondary_hint = mfcr("cr31"); secondary_ccr = mfcr("cr18"); diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c index cb7c03e5cd21..8473b6bdf512 100644 --- a/arch/csky/mm/ioremap.c +++ b/arch/csky/mm/ioremap.c @@ -46,3 +46,17 @@ void iounmap(void __iomem *addr) vunmap((void *)((unsigned long)addr & PAGE_MASK)); } EXPORT_SYMBOL(iounmap); + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) { + vma_prot.pgprot |= _PAGE_SO; + return pgprot_noncached(vma_prot); + } else if (file->f_flags & O_SYNC) { + return pgprot_noncached(vma_prot); + } + + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h index 9adbf7e1aaa3..a407978f9f9f 100644 --- a/arch/h8300/include/asm/segment.h +++ b/arch/h8300/include/asm/segment.h @@ -33,12 +33,6 @@ static inline mm_segment_t get_fs(void) return USER_DS; } -static inline mm_segment_t get_ds(void) -{ - /* return the supervisor data space code */ - return KERNEL_DS; -} - #define segment_eq(a, b) ((a).seg == (b).seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 306d469e43da..89782ad3fb88 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -48,7 +48,6 @@ #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index 5b819e53c397..b71c5f787783 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild @@ -2,3 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_64.h generic-y += kvm_para.h +generic-y += socket.h diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h deleted file mode 100644 index c872c4e6bafb..000000000000 --- a/arch/ia64/include/uapi/asm/socket.h +++ /dev/null @@ -1,120 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ASM_IA64_SOCKET_H -#define _ASM_IA64_SOCKET_H - -/* - * Socket related defines. - * - * Based on <asm-i386/socket.h>. - * - * Modified 1998-2000 - * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co - */ - -#include <asm/sockios.h> - -/* For setsockopt(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -#define SO_REUSEPORT 15 -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 -#define SO_GET_FILTER SO_ATTACH_FILTER - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#define SO_PROTOCOL 38 -#define SO_DOMAIN 39 - -#define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS -#define SO_PEEK_OFF 42 - -/* Instruct lower device to use last 4-bytes of skb data as FCS */ -#define SO_NOFCS 43 - -#define SO_LOCK_FILTER 44 - -#define SO_SELECT_ERR_QUEUE 45 - -#define SO_BUSY_POLL 46 - -#define SO_MAX_PACING_RATE 47 - -#define SO_BPF_EXTENSIONS 48 - -#define SO_INCOMING_CPU 49 - -#define SO_ATTACH_BPF 50 -#define SO_DETACH_BPF SO_DETACH_FILTER - -#define SO_ATTACH_REUSEPORT_CBPF 51 -#define SO_ATTACH_REUSEPORT_EBPF 52 - -#define SO_CNX_ADVICE 53 - -#define SCM_TIMESTAMPING_OPT_STATS 54 - -#define SO_MEMINFO 55 - -#define SO_INCOMING_NAPI_ID 56 - -#define SO_COOKIE 57 - -#define SCM_TIMESTAMPING_PKTINFO 58 - -#define SO_PEERGROUPS 59 - -#define SO_ZEROCOPY 60 - -#define SO_TXTIME 61 -#define SCM_TXTIME SO_TXTIME - -#endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index f00ca53f8c14..482513b9af2c 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200) cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200) KBUILD_AFLAGS += $(cpuflags-y) -KBUILD_CFLAGS += $(cpuflags-y) -pipe +KBUILD_CFLAGS += $(cpuflags-y) + +KBUILD_CFLAGS += -pipe -ffreestanding + ifdef CONFIG_MMU # without -fno-strength-reduce the 53c7xx.c driver fails ;-( KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2 diff --git a/arch/m68k/apollo/Makefile b/arch/m68k/apollo/Makefile index 76a057962c38..01856a858fda 100644 --- a/arch/m68k/apollo/Makefile +++ b/arch/m68k/apollo/Makefile @@ -1,5 +1,5 @@ # -# Makefile for Linux arch/m68k/amiga source directory +# Makefile for Linux arch/m68k/apollo source directory # obj-y := config.o dn_ints.o diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index bd96702a1ad0..4fcc4b1df1c0 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c @@ -148,7 +148,7 @@ int __init atari_parse_bootinfo(const struct bi_record *record) /* Parse the Atari-specific switches= option. */ static int __init atari_switches_setup(char *str) { - char switches[strlen(str) + 1]; + char switches[COMMAND_LINE_SIZE]; char *p; int ovsc_shift; char *args = switches; diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c index ad1185c68df7..6b3ab583c698 100644 --- a/arch/m68k/coldfire/m5272.c +++ b/arch/m68k/coldfire/m5272.c @@ -127,7 +127,7 @@ static struct fixed_phy_status nettel_fixed_phy_status __initdata = { static int __init init_BSP(void) { m5272_uarts_init(); - fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status, -1); + fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status); return 0; } diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig index 131b4101ae5d..1ba10d57ddb1 100644 --- a/arch/m68k/configs/amcore_defconfig +++ b/arch/m68k/configs/amcore_defconfig @@ -55,27 +55,7 @@ CONFIG_MTD_UCLINUX=y CONFIG_MTD_PLATRAM=y CONFIG_BLK_DEV_RAM=y CONFIG_NETDEVICES=y -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_DM9000=y -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_WLAN is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index bfd4648e76e3..525421ae277d 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -38,7 +38,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -304,7 +303,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -342,7 +340,6 @@ CONFIG_BLK_DEV_GAYLE=y CONFIG_BLK_DEV_BUDDHA=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -399,44 +396,12 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_A2065=y CONFIG_ARIADNE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_XSURF100=y CONFIG_HYDRA=y CONFIG_APNE=y CONFIG_ZORRO8390=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -612,6 +577,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -621,6 +587,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 81112af1e478..db0e654a88d5 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -34,7 +34,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -300,7 +299,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -327,7 +325,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -378,35 +375,6 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -569,6 +537,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -578,6 +547,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 6d4b6023a2f0..1451168eb789 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -41,7 +41,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -307,7 +306,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -342,7 +340,6 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_FALCON_IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -394,37 +391,9 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_ATARILANCE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_NE2000=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set CONFIG_SMC91X=y -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -590,6 +559,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -599,6 +569,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 3306dff09d3c..b0d3609f5bb3 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -31,7 +31,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -297,7 +296,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -324,7 +322,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -376,35 +373,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_BVME6000_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -561,6 +530,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -570,6 +540,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index c15e15b68d39..4ed7c151347c 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -33,7 +33,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -299,7 +298,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -326,7 +324,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -377,36 +374,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_HPLANCE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -571,6 +539,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -580,6 +549,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 1a0ce0d11267..0dc544e1ce1f 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -32,7 +32,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -301,7 +300,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -333,7 +331,6 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_MAC_IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -393,39 +390,10 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_MACMACE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set CONFIG_MACSONIC=y -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_MAC8390=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -593,6 +561,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -602,6 +571,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 9758839b74bd..5a7b7b0d6e72 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -52,7 +52,6 @@ CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -321,7 +320,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -366,7 +364,6 @@ CONFIG_BLK_DEV_MAC_IDE=y CONFIG_BLK_DEV_Q40IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -437,9 +434,6 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_A2065=y CONFIG_ARIADNE=y CONFIG_ATARILANCE=y @@ -447,43 +441,17 @@ CONFIG_HPLANCE=y CONFIG_MVME147_NET=y CONFIG_SUN3LANCE=y CONFIG_MACMACE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_BVME6000_NET=y CONFIG_MVME16x_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set CONFIG_MACSONIC=y -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_XSURF100=y CONFIG_HYDRA=y CONFIG_MAC8390=y CONFIG_NE2000=y CONFIG_APNE=y CONFIG_ZORRO8390=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set CONFIG_SMC91X=y -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -675,6 +643,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -684,6 +653,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index f5526731ccdb..71eb9be1803b 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -30,7 +30,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -296,7 +295,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -323,7 +321,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -375,36 +372,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_MVME147_NET=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -561,6 +529,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -570,6 +539,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 5db58ff4b107..ea2ebd4241c0 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -31,7 +31,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -297,7 +296,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -324,7 +322,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -376,35 +373,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_MVME16x_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -561,6 +530,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -570,6 +540,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index b645230da128..cef6dc47c725 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -32,7 +32,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -298,7 +297,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -332,7 +330,6 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_Q40IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -383,40 +380,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_NE2000=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -584,6 +548,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -593,6 +558,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 4afe2100947e..69f2282dc4e9 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -28,7 +28,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -294,7 +293,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -321,7 +319,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -373,36 +370,8 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_SUN3LANCE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_SUN3_82586=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -563,6 +532,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -572,6 +542,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index bd22893d0dc3..e91267e868b2 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -28,7 +28,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -294,7 +293,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -321,7 +319,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -373,36 +370,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_SUN3LANCE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -563,6 +531,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -572,6 +541,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 38049357d6d3..40712e49381b 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -155,18 +155,22 @@ out: static int __init nfhd_init(void) { u32 blocks, bsize; + int ret; int i; nfhd_id = nf_get_id("XHDI"); if (!nfhd_id) return -ENODEV; - major_num = register_blkdev(major_num, "nfhd"); - if (major_num <= 0) { + ret = register_blkdev(major_num, "nfhd"); + if (ret < 0) { pr_warn("nfhd: unable to get major number\n"); - return major_num; + return ret; } + if (!major_num) + major_num = ret; + for (i = NFHD_DEV_OFFSET; i < 24; i++) { if (nfhd_get_capacity(i, 0, &blocks, &bsize)) continue; diff --git a/arch/m68k/include/asm/a.out-core.h b/arch/m68k/include/asm/a.out-core.h deleted file mode 100644 index ae91ea6bb303..000000000000 --- a/arch/m68k/include/asm/a.out-core.h +++ /dev/null @@ -1,68 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_A_OUT_CORE_H -#define _ASM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include <linux/user.h> -#include <linux/elfcore.h> -#include <linux/mm_types.h> - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) -{ - struct switch_stack *sw; - -/* changed the size calculations - should hopefully work better. lbt */ - dump->magic = CMAGIC; - dump->start_code = 0; - dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); - dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; - dump->u_dsize = ((unsigned long) (current->mm->brk + - (PAGE_SIZE-1))) >> PAGE_SHIFT; - dump->u_dsize -= dump->u_tsize; - dump->u_ssize = 0; - - if (dump->start_stack < TASK_SIZE) - dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; - - dump->u_ar0 = offsetof(struct user, regs); - sw = ((struct switch_stack *)regs) - 1; - dump->regs.d1 = regs->d1; - dump->regs.d2 = regs->d2; - dump->regs.d3 = regs->d3; - dump->regs.d4 = regs->d4; - dump->regs.d5 = regs->d5; - dump->regs.d6 = sw->d6; - dump->regs.d7 = sw->d7; - dump->regs.a0 = regs->a0; - dump->regs.a1 = regs->a1; - dump->regs.a2 = regs->a2; - dump->regs.a3 = sw->a3; - dump->regs.a4 = sw->a4; - dump->regs.a5 = sw->a5; - dump->regs.a6 = sw->a6; - dump->regs.d0 = regs->d0; - dump->regs.orig_d0 = regs->orig_d0; - dump->regs.stkadj = regs->stkadj; - dump->regs.sr = regs->sr; - dump->regs.pc = regs->pc; - dump->regs.fmtvec = (regs->format << 12) | regs->vector; - /* dump floating point stuff */ - dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/m68k/include/asm/macints.h b/arch/m68k/include/asm/macints.h index cddb2d3ea49b..4da172bd048c 100644 --- a/arch/m68k/include/asm/macints.h +++ b/arch/m68k/include/asm/macints.h @@ -121,7 +121,4 @@ #define SLOT2IRQ(x) (x + 47) #define IRQ2SLOT(x) (x - 47) -#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */ -#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */ - #endif /* asm/macints.h */ diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index 0b4cc1e079b5..c6686559e9b7 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h @@ -45,16 +45,9 @@ static inline void set_fs(mm_segment_t val) : /* no outputs */ : "r" (val.seg) : "memory"); } -static inline mm_segment_t get_ds(void) -{ - /* return the supervisor data space code */ - return KERNEL_DS; -} - #else #define USER_DS MAKE_MM_SEG(TASK_SIZE) #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #endif diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index e2a9421c5797..87e7f3639839 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -651,7 +651,8 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, regs->vector = formatvec & 0xfff; } else { struct switch_stack *sw = (struct switch_stack *)regs - 1; - unsigned long buf[fsize / 2]; /* yes, twice as much */ + /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */ + unsigned long buf[sizeof(((struct frame *)0)->un) / 2]; /* that'll make sure that expansion won't crap over data */ if (copy_from_user(buf + fsize / 4, fp, fsize)) diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 71c4735a31ee..1423e1fe0261 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -410,9 +410,8 @@ void mac_poweroff(void) void mac_reset(void) { - if (macintosh_config->adb_type == MAC_ADB_II) { - unsigned long flags; - + if (macintosh_config->adb_type == MAC_ADB_II && + macintosh_config->ident != MAC_MODEL_SE30) { /* need ROMBASE in booter */ /* indeed, plus need to MAP THE ROM !! */ @@ -422,17 +421,8 @@ void mac_reset(void) /* works on some */ rom_reset = (void *) (mac_bi_data.rombase + 0xa); - if (macintosh_config->ident == MAC_MODEL_SE30) { - /* - * MSch: Machines known to crash on ROM reset ... - */ - } else { - local_irq_save(flags); - - rom_reset(); - - local_irq_restore(flags); - } + local_irq_disable(); + rom_reset(); #ifdef CONFIG_ADB_CUDA } else if (macintosh_config->adb_type == MAC_ADB_EGRET || macintosh_config->adb_type == MAC_ADB_CUDA) { diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index acdabbeecfd2..0b0289459173 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -189,7 +189,6 @@ void __init via_init(void) /* * SE/30: disable video IRQ - * XXX: testing for SE/30 VBL */ if (macintosh_config->ident == MAC_MODEL_SE30) { @@ -197,13 +196,18 @@ void __init via_init(void) via1[vBufB] |= 0x40; } - /* - * Set the RTC bits to a known state: all lines to outputs and - * RTC disabled (yes that's 0 to enable and 1 to disable). - */ - - via1[vDirB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData); - via1[vBufB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk); + switch (macintosh_config->adb_type) { + case MAC_ADB_IOP: + case MAC_ADB_II: + case MAC_ADB_PB1: + /* + * Set the RTC bits to a known state: all lines to outputs and + * RTC disabled (yes that's 0 to enable and 1 to disable). + */ + via1[vDirB] |= VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData; + via1[vBufB] |= VIA1B_vRTCEnb | VIA1B_vRTCClk; + break; + } /* Everything below this point is VIA2/RBV only... */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index dbfea093a7c7..bff2a71c828a 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -42,7 +42,6 @@ # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) # endif -# define get_ds() (KERNEL_DS) # define get_fs() (current_thread_info()->addr_limit) # define set_fs(val) (current_thread_info()->addr_limit = (val)) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 616fc853a46d..3d7f1153155f 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -207,7 +207,6 @@ config ATH79 select COMMON_CLK select CLKDEV_LOOKUP select IRQ_MIPS_CPU - select MIPS_MACHINE select SYS_HAS_CPU_MIPS32_R2 select SYS_HAS_EARLY_PRINTK select SYS_SUPPORTS_32BIT_KERNEL @@ -392,7 +391,7 @@ config MACH_INGENIC select GPIOLIB select COMMON_CLK select GENERIC_IRQ_CHIP - select BUILTIN_DTB + select BUILTIN_DTB if MIPS_NO_APPENDED_DTB select USE_OF select LIBFDT @@ -677,6 +676,7 @@ config SGI_IP27 select DEFAULT_SGI_PARTITION select SYS_HAS_EARLY_PRINTK select HAVE_PCI + select IRQ_MIPS_CPU select NR_CPUS_DEFAULT_64 select SYS_HAS_CPU_R10000 select SYS_SUPPORTS_64BIT_KERNEL @@ -1125,7 +1125,6 @@ config DMA_NONCOHERENT bool select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_SYNC_DMA_FOR_CPU select NEED_DMA_MAP_STATE select ARCH_HAS_DMA_COHERENT_TO_PFN select DMA_NONCOHERENT_CACHE_SYNC @@ -1404,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT please say 'N' here. If you want a high-performance kernel to run on new Loongson 3 machines only, please say 'Y' here. +config CPU_LOONGSON3_WORKAROUNDS + bool "Old Loongson 3 LLSC Workarounds" + default y if SMP + depends on CPU_LOONGSON3 + help + Loongson 3 processors have the llsc issues which require workarounds. + Without workarounds the system may hang unexpectedly. + + Newer Loongson 3 will fix these issues and no workarounds are needed. + The workarounds have no significant side effect on them but may + decrease the performance of the system so this option should be + disabled unless the kernel is intended to be run on old systems. + + If unsure, please say Y. + config CPU_LOONGSON2E bool "Loongson 2E" depends on SYS_HAS_CPU_LOONGSON2E @@ -1542,6 +1556,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_HUGEPAGES select CPU_SUPPORTS_MSA select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 select HAVE_KVM @@ -1867,7 +1882,7 @@ config CPU_LOONGSON2 config CPU_LOONGSON1 bool select CPU_MIPS32 - select CPU_MIPSR1 + select CPU_MIPSR2 select CPU_HAS_PREFETCH select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL @@ -1929,9 +1944,11 @@ config SYS_HAS_CPU_MIPS32_R3_5 config SYS_HAS_CPU_MIPS32_R5 bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT config SYS_HAS_CPU_MIPS32_R6 bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT config SYS_HAS_CPU_MIPS64_R1 bool @@ -1941,6 +1958,7 @@ config SYS_HAS_CPU_MIPS64_R2 config SYS_HAS_CPU_MIPS64_R6 bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT config SYS_HAS_CPU_R3000 bool @@ -1977,6 +1995,7 @@ config SYS_HAS_CPU_R8000 config SYS_HAS_CPU_R10000 bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT config SYS_HAS_CPU_RM7000 bool @@ -2005,6 +2024,7 @@ config SYS_HAS_CPU_BMIPS4380 config SYS_HAS_CPU_BMIPS5000 bool select SYS_HAS_CPU_BMIPS + select ARCH_HAS_SYNC_DMA_FOR_CPU config SYS_HAS_CPU_XLR bool diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 5b174c3d0de3..8f4486c4415b 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -233,6 +233,8 @@ toolchain-crc := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mcrc) cflags-$(toolchain-crc) += -DTOOLCHAIN_SUPPORTS_CRC toolchain-dsp := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mdsp) cflags-$(toolchain-dsp) += -DTOOLCHAIN_SUPPORTS_DSP +toolchain-ginv := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mginv) +cflags-$(toolchain-ginv) += -DTOOLCHAIN_SUPPORTS_GINV # # Firmware support diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index f09262e0a72f..10ff07b7721e 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -683,7 +683,7 @@ static int __init ar7_register_devices(void) if (ar7_has_high_cpmac()) { res = fixed_phy_add(PHY_POLL, cpmac_high.id, - &fixed_phy_status, -1); + &fixed_phy_status); if (!res) { cpmac_get_mac(1, cpmac_high_data.dev_addr); @@ -696,7 +696,7 @@ static int __init ar7_register_devices(void) } else cpmac_low_data.phy_mask = 0xffffffff; - res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status, -1); + res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status); if (!res) { cpmac_get_mac(0, cpmac_low_data.dev_addr); res = platform_device_register(&cpmac_low); diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig index 191c3910eac5..7367416642cb 100644 --- a/arch/mips/ath79/Kconfig +++ b/arch/mips/ath79/Kconfig @@ -1,79 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 if ATH79 -menu "Atheros AR71XX/AR724X/AR913X machine selection" - -config ATH79_MACH_AP121 - bool "Atheros AP121 reference board" - select SOC_AR933X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - select ATH79_DEV_WMAC - help - Say 'Y' here if you want your kernel to support the - Atheros AP121 reference board. - -config ATH79_MACH_AP136 - bool "Atheros AP136 reference board" - select SOC_QCA955X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - select ATH79_DEV_WMAC - help - Say 'Y' here if you want your kernel to support the - Atheros AP136 reference board. - -config ATH79_MACH_AP81 - bool "Atheros AP81 reference board" - select SOC_AR913X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - select ATH79_DEV_WMAC - help - Say 'Y' here if you want your kernel to support the - Atheros AP81 reference board. - -config ATH79_MACH_DB120 - bool "Atheros DB120 reference board" - select SOC_AR934X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - select ATH79_DEV_WMAC - help - Say 'Y' here if you want your kernel to support the - Atheros DB120 reference board. - -config ATH79_MACH_PB44 - bool "Atheros PB44 reference board" - select SOC_AR71XX - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - help - Say 'Y' here if you want your kernel to support the - Atheros PB44 reference board. - -config ATH79_MACH_UBNT_XM - bool "Ubiquiti Networks XM (rev 1.0) board" - select SOC_AR724X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - help - Say 'Y' here if you want your kernel to support the - Ubiquiti Networks XM (rev 1.0) board. - -endmenu - config SOC_AR71XX select HAVE_PCI def_bool n diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile index fcc382cfc770..e18d9a2ecf62 100644 --- a/arch/mips/ath79/Makefile +++ b/arch/mips/ath79/Makefile @@ -8,27 +8,6 @@ # under the terms of the GNU General Public License version 2 as published # by the Free Software Foundation. -obj-y := prom.o setup.o irq.o common.o clock.o +obj-y := prom.o setup.o common.o clock.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_PCI) += pci.o - -# -# Devices -# -obj-y += dev-common.o -obj-$(CONFIG_ATH79_DEV_GPIO_BUTTONS) += dev-gpio-buttons.o -obj-$(CONFIG_ATH79_DEV_LEDS_GPIO) += dev-leds-gpio.o -obj-$(CONFIG_ATH79_DEV_SPI) += dev-spi.o -obj-$(CONFIG_ATH79_DEV_USB) += dev-usb.o -obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o - -# -# Machines -# -obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o -obj-$(CONFIG_ATH79_MACH_AP136) += mach-ap136.o -obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o -obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o -obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o -obj-$(CONFIG_ATH79_MACH_UBNT_XM) += mach-ubnt-xm.o diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c index cf9158e3c2d9..d4ca97e2ec6c 100644 --- a/arch/mips/ath79/clock.c +++ b/arch/mips/ath79/clock.c @@ -26,7 +26,6 @@ #include <asm/mach-ath79/ath79.h> #include <asm/mach-ath79/ar71xx_regs.h> #include "common.h" -#include "machtypes.h" #define AR71XX_BASE_FREQ 40000000 #define AR724X_BASE_FREQ 40000000 @@ -37,24 +36,63 @@ static struct clk_onecell_data clk_data = { .clk_num = ARRAY_SIZE(clks), }; -static struct clk *__init ath79_add_sys_clkdev( - const char *id, unsigned long rate) +static const char * const clk_names[ATH79_CLK_END] = { + [ATH79_CLK_CPU] = "cpu", + [ATH79_CLK_DDR] = "ddr", + [ATH79_CLK_AHB] = "ahb", + [ATH79_CLK_REF] = "ref", + [ATH79_CLK_MDIO] = "mdio", +}; + +static const char * __init ath79_clk_name(int type) { - struct clk *clk; - int err; + BUG_ON(type >= ARRAY_SIZE(clk_names) || !clk_names[type]); + return clk_names[type]; +} - clk = clk_register_fixed_rate(NULL, id, NULL, 0, rate); +static void __init __ath79_set_clk(int type, const char *name, struct clk *clk) +{ if (IS_ERR(clk)) - panic("failed to allocate %s clock structure", id); + panic("failed to allocate %s clock structure", clk_names[type]); - err = clk_register_clkdev(clk, id, NULL); - if (err) - panic("unable to register %s clock device", id); + clks[type] = clk; + clk_register_clkdev(clk, name, NULL); +} +static struct clk * __init ath79_set_clk(int type, unsigned long rate) +{ + const char *name = ath79_clk_name(type); + struct clk *clk; + + clk = clk_register_fixed_rate(NULL, name, NULL, 0, rate); + __ath79_set_clk(type, name, clk); return clk; } -static void __init ar71xx_clocks_init(void) +static struct clk * __init ath79_set_ff_clk(int type, const char *parent, + unsigned int mult, unsigned int div) +{ + const char *name = ath79_clk_name(type); + struct clk *clk; + + clk = clk_register_fixed_factor(NULL, name, parent, 0, mult, div); + __ath79_set_clk(type, name, clk); + return clk; +} + +static unsigned long __init ath79_setup_ref_clk(unsigned long rate) +{ + struct clk *clk = clks[ATH79_CLK_REF]; + + if (clk) + rate = clk_get_rate(clk); + else + clk = ath79_set_clk(ATH79_CLK_REF, rate); + + return rate; +} + +static void __init ar71xx_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -64,9 +102,9 @@ static void __init ar71xx_clocks_init(void) u32 freq; u32 div; - ref_rate = AR71XX_BASE_FREQ; + ref_rate = ath79_setup_ref_clk(AR71XX_BASE_FREQ); - pll = ath79_pll_rr(AR71XX_PLL_REG_CPU_CONFIG); + pll = __raw_readl(pll_base + AR71XX_PLL_REG_CPU_CONFIG); div = ((pll >> AR71XX_PLL_FB_SHIFT) & AR71XX_PLL_FB_MASK) + 1; freq = div * ref_rate; @@ -80,31 +118,17 @@ static void __init ar71xx_clocks_init(void) div = (((pll >> AR71XX_AHB_DIV_SHIFT) & AR71XX_AHB_DIV_MASK) + 1) * 2; ahb_rate = cpu_rate / div; - ath79_add_sys_clkdev("ref", ref_rate); - clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate); - clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate); - clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ahb", NULL); - clk_add_alias("uart", NULL, "ahb", NULL); + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); } -static struct clk * __init ath79_reg_ffclk(const char *name, - const char *parent_name, unsigned int mult, unsigned int div) +static void __init ar724x_clocks_init(void __iomem *pll_base) { - struct clk *clk; - - clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div); - if (IS_ERR(clk)) - panic("failed to allocate %s clock structure", name); - - return clk; -} - -static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base) -{ - u32 pll; u32 mult, div, ddr_div, ahb_div; + u32 pll; + + ath79_setup_ref_clk(AR71XX_BASE_FREQ); pll = __raw_readl(pll_base + AR724X_PLL_REG_CPU_CONFIG); @@ -114,30 +138,14 @@ static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base) ddr_div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1; ahb_div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2; - clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref", mult, div); - clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref", mult, div * ddr_div); - clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref", mult, div * ahb_div); -} - -static void __init ar724x_clocks_init(void) -{ - struct clk *ref_clk; - - ref_clk = ath79_add_sys_clkdev("ref", AR724X_BASE_FREQ); - - ar724x_clk_init(ref_clk, ath79_pll_base); - - /* just make happy plat_time_init() from arch/mips/ath79/setup.c */ - clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL); - clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL); - clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL); - - clk_add_alias("wdt", NULL, "ahb", NULL); - clk_add_alias("uart", NULL, "ahb", NULL); + ath79_set_ff_clk(ATH79_CLK_CPU, "ref", mult, div); + ath79_set_ff_clk(ATH79_CLK_DDR, "ref", mult, div * ddr_div); + ath79_set_ff_clk(ATH79_CLK_AHB, "ref", mult, div * ahb_div); } -static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base) +static void __init ar933x_clocks_init(void __iomem *pll_base) { + unsigned long ref_rate; u32 clock_ctrl; u32 ref_div; u32 ninit_mul; @@ -146,6 +154,15 @@ static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base) u32 cpu_div; u32 ddr_div; u32 ahb_div; + u32 t; + + t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP); + if (t & AR933X_BOOTSTRAP_REF_CLK_40) + ref_rate = (40 * 1000 * 1000); + else + ref_rate = (25 * 1000 * 1000); + + ath79_setup_ref_clk(ref_rate); clock_ctrl = __raw_readl(pll_base + AR933X_PLL_CLOCK_CTRL_REG); if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) { @@ -186,37 +203,12 @@ static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base) AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1; } - clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref", - ninit_mul, ref_div * out_div * cpu_div); - clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref", - ninit_mul, ref_div * out_div * ddr_div); - clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref", - ninit_mul, ref_div * out_div * ahb_div); -} - -static void __init ar933x_clocks_init(void) -{ - struct clk *ref_clk; - unsigned long ref_rate; - u32 t; - - t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP); - if (t & AR933X_BOOTSTRAP_REF_CLK_40) - ref_rate = (40 * 1000 * 1000); - else - ref_rate = (25 * 1000 * 1000); - - ref_clk = ath79_add_sys_clkdev("ref", ref_rate); - - ar9330_clk_init(ref_clk, ath79_pll_base); - - /* just make happy plat_time_init() from arch/mips/ath79/setup.c */ - clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL); - clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL); - clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL); - - clk_add_alias("wdt", NULL, "ahb", NULL); - clk_add_alias("uart", NULL, "ref", NULL); + ath79_set_ff_clk(ATH79_CLK_CPU, "ref", ninit_mul, + ref_div * out_div * cpu_div); + ath79_set_ff_clk(ATH79_CLK_DDR, "ref", ninit_mul, + ref_div * out_div * ddr_div); + ath79_set_ff_clk(ATH79_CLK_AHB, "ref", ninit_mul, + ref_div * out_div * ahb_div); } static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac, @@ -239,7 +231,7 @@ static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac, return ret; } -static void __init ar934x_clocks_init(void) +static void __init ar934x_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -258,6 +250,8 @@ static void __init ar934x_clocks_init(void) else ref_rate = 25 * 1000 * 1000; + ref_rate = ath79_setup_ref_clk(ref_rate); + pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG); if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) { out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) & @@ -270,7 +264,7 @@ static void __init ar934x_clocks_init(void) AR934X_SRIF_DPLL1_REFDIV_MASK; frac = 1 << 18; } else { - pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG); + pll = __raw_readl(pll_base + AR934X_PLL_CPU_CONFIG_REG); out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & AR934X_PLL_CPU_CONFIG_OUTDIV_MASK; ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) & @@ -297,7 +291,7 @@ static void __init ar934x_clocks_init(void) AR934X_SRIF_DPLL1_REFDIV_MASK; frac = 1 << 18; } else { - pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG); + pll = __raw_readl(pll_base + AR934X_PLL_DDR_CONFIG_REG); out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & AR934X_PLL_DDR_CONFIG_OUTDIV_MASK; ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) & @@ -312,7 +306,7 @@ static void __init ar934x_clocks_init(void) ddr_pll = ar934x_get_pll_freq(ref_rate, ref_div, nint, nfrac, frac, out_div); - clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG); + clk_ctrl = __raw_readl(pll_base + AR934X_PLL_CPU_DDR_CLK_CTRL_REG); postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK; @@ -344,18 +338,18 @@ static void __init ar934x_clocks_init(void) else ahb_rate = cpu_pll / (postdiv + 1); - ath79_add_sys_clkdev("ref", ref_rate); - clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate); - clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate); - clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate); + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); - clk_add_alias("wdt", NULL, "ref", NULL); - clk_add_alias("uart", NULL, "ref", NULL); + clk_ctrl = __raw_readl(pll_base + AR934X_PLL_SWITCH_CLOCK_CONTROL_REG); + if (clk_ctrl & AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL) + ath79_set_clk(ATH79_CLK_MDIO, 100 * 1000 * 1000); iounmap(dpll_base); } -static void __init qca953x_clocks_init(void) +static void __init qca953x_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -371,7 +365,9 @@ static void __init qca953x_clocks_init(void) else ref_rate = 25 * 1000 * 1000; - pll = ath79_pll_rr(QCA953X_PLL_CPU_CONFIG_REG); + ref_rate = ath79_setup_ref_clk(ref_rate); + + pll = __raw_readl(pll_base + QCA953X_PLL_CPU_CONFIG_REG); out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) & @@ -385,7 +381,7 @@ static void __init qca953x_clocks_init(void) cpu_pll += frac * (ref_rate >> 6) / ref_div; cpu_pll /= (1 << out_div); - pll = ath79_pll_rr(QCA953X_PLL_DDR_CONFIG_REG); + pll = __raw_readl(pll_base + QCA953X_PLL_DDR_CONFIG_REG); out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) & @@ -399,7 +395,7 @@ static void __init qca953x_clocks_init(void) ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4); ddr_pll /= (1 << out_div); - clk_ctrl = ath79_pll_rr(QCA953X_PLL_CLK_CTRL_REG); + clk_ctrl = __raw_readl(pll_base + QCA953X_PLL_CLK_CTRL_REG); postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) & QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK; @@ -431,16 +427,12 @@ static void __init qca953x_clocks_init(void) else ahb_rate = cpu_pll / (postdiv + 1); - ath79_add_sys_clkdev("ref", ref_rate); - ath79_add_sys_clkdev("cpu", cpu_rate); - ath79_add_sys_clkdev("ddr", ddr_rate); - ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ref", NULL); - clk_add_alias("uart", NULL, "ref", NULL); + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); } -static void __init qca955x_clocks_init(void) +static void __init qca955x_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -456,7 +448,9 @@ static void __init qca955x_clocks_init(void) else ref_rate = 25 * 1000 * 1000; - pll = ath79_pll_rr(QCA955X_PLL_CPU_CONFIG_REG); + ref_rate = ath79_setup_ref_clk(ref_rate); + + pll = __raw_readl(pll_base + QCA955X_PLL_CPU_CONFIG_REG); out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) & @@ -470,7 +464,7 @@ static void __init qca955x_clocks_init(void) cpu_pll += frac * ref_rate / (ref_div * (1 << 6)); cpu_pll /= (1 << out_div); - pll = ath79_pll_rr(QCA955X_PLL_DDR_CONFIG_REG); + pll = __raw_readl(pll_base + QCA955X_PLL_DDR_CONFIG_REG); out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) & @@ -484,7 +478,7 @@ static void __init qca955x_clocks_init(void) ddr_pll += frac * ref_rate / (ref_div * (1 << 10)); ddr_pll /= (1 << out_div); - clk_ctrl = ath79_pll_rr(QCA955X_PLL_CLK_CTRL_REG); + clk_ctrl = __raw_readl(pll_base + QCA955X_PLL_CLK_CTRL_REG); postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) & QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK; @@ -516,16 +510,12 @@ static void __init qca955x_clocks_init(void) else ahb_rate = cpu_pll / (postdiv + 1); - ath79_add_sys_clkdev("ref", ref_rate); - clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate); - clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate); - clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ref", NULL); - clk_add_alias("uart", NULL, "ref", NULL); + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); } -static void __init qca956x_clocks_init(void) +static void __init qca956x_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -551,13 +541,15 @@ static void __init qca956x_clocks_init(void) else ref_rate = 25 * 1000 * 1000; - pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG_REG); + ref_rate = ath79_setup_ref_clk(ref_rate); + + pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG_REG); out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) & QCA956X_PLL_CPU_CONFIG_REFDIV_MASK; - pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG1_REG); + pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG1_REG); nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) & QCA956X_PLL_CPU_CONFIG1_NINT_MASK; hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) & @@ -570,12 +562,12 @@ static void __init qca956x_clocks_init(void) cpu_pll += (hfrac >> 13) * ref_rate / ref_div; cpu_pll /= (1 << out_div); - pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG_REG); + pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG_REG); out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) & QCA956X_PLL_DDR_CONFIG_REFDIV_MASK; - pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG1_REG); + pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG1_REG); nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) & QCA956X_PLL_DDR_CONFIG1_NINT_MASK; hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) & @@ -588,7 +580,7 @@ static void __init qca956x_clocks_init(void) ddr_pll += (hfrac >> 13) * ref_rate / ref_div; ddr_pll /= (1 << out_div); - clk_ctrl = ath79_pll_rr(QCA956X_PLL_CLK_CTRL_REG); + clk_ctrl = __raw_readl(pll_base + QCA956X_PLL_CLK_CTRL_REG); postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) & QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK; @@ -620,72 +612,19 @@ static void __init qca956x_clocks_init(void) else ahb_rate = cpu_pll / (postdiv + 1); - ath79_add_sys_clkdev("ref", ref_rate); - ath79_add_sys_clkdev("cpu", cpu_rate); - ath79_add_sys_clkdev("ddr", ddr_rate); - ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ref", NULL); - clk_add_alias("uart", NULL, "ref", NULL); -} - -void __init ath79_clocks_init(void) -{ - if (soc_is_ar71xx()) - ar71xx_clocks_init(); - else if (soc_is_ar724x() || soc_is_ar913x()) - ar724x_clocks_init(); - else if (soc_is_ar933x()) - ar933x_clocks_init(); - else if (soc_is_ar934x()) - ar934x_clocks_init(); - else if (soc_is_qca953x()) - qca953x_clocks_init(); - else if (soc_is_qca955x()) - qca955x_clocks_init(); - else if (soc_is_qca956x() || soc_is_tp9343()) - qca956x_clocks_init(); - else - BUG(); -} - -unsigned long __init -ath79_get_sys_clk_rate(const char *id) -{ - struct clk *clk; - unsigned long rate; - - clk = clk_get(NULL, id); - if (IS_ERR(clk)) - panic("unable to get %s clock, err=%d", id, (int) PTR_ERR(clk)); - - rate = clk_get_rate(clk); - clk_put(clk); - - return rate; + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); } -#ifdef CONFIG_OF static void __init ath79_clocks_init_dt(struct device_node *np) { - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); -} - -CLK_OF_DECLARE(ar7100, "qca,ar7100-pll", ath79_clocks_init_dt); -CLK_OF_DECLARE(ar7240, "qca,ar7240-pll", ath79_clocks_init_dt); -CLK_OF_DECLARE(ar9340, "qca,ar9340-pll", ath79_clocks_init_dt); -CLK_OF_DECLARE(ar9550, "qca,qca9550-pll", ath79_clocks_init_dt); - -static void __init ath79_clocks_init_dt_ng(struct device_node *np) -{ struct clk *ref_clk; void __iomem *pll_base; ref_clk = of_clk_get(np, 0); - if (IS_ERR(ref_clk)) { - pr_err("%pOF: of_clk_get failed\n", np); - goto err; - } + if (!IS_ERR(ref_clk)) + clks[ATH79_CLK_REF] = ref_clk; pll_base = of_iomap(np, 0); if (!pll_base) { @@ -693,14 +632,24 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np) goto err_clk; } - if (of_device_is_compatible(np, "qca,ar9130-pll")) - ar724x_clk_init(ref_clk, pll_base); + if (of_device_is_compatible(np, "qca,ar7100-pll")) + ar71xx_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,ar7240-pll") || + of_device_is_compatible(np, "qca,ar9130-pll")) + ar724x_clocks_init(pll_base); else if (of_device_is_compatible(np, "qca,ar9330-pll")) - ar9330_clk_init(ref_clk, pll_base); - else { - pr_err("%pOF: could not find any appropriate clk_init()\n", np); - goto err_iounmap; - } + ar933x_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,ar9340-pll")) + ar934x_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,qca9530-pll")) + qca953x_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,qca9550-pll")) + qca955x_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,qca9560-pll")) + qca956x_clocks_init(pll_base); + + if (!clks[ATH79_CLK_MDIO]) + clks[ATH79_CLK_MDIO] = clks[ATH79_CLK_REF]; if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) { pr_err("%pOF: could not register clk provider\n", np); @@ -714,10 +663,13 @@ err_iounmap: err_clk: clk_put(ref_clk); - -err: - return; } -CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt_ng); -CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt_ng); -#endif + +CLK_OF_DECLARE(ar7100_clk, "qca,ar7100-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar7240_clk, "qca,ar7240-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9340_clk, "qca,ar9340-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9530_clk, "qca,qca9530-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9550_clk, "qca,qca9550-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9560_clk, "qca,qca9560-pll", ath79_clocks_init_dt); diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h index 870c6b2e97e8..25b96f59e8e8 100644 --- a/arch/mips/ath79/common.h +++ b/arch/mips/ath79/common.h @@ -19,11 +19,6 @@ #define ATH79_MEM_SIZE_MIN (2 * 1024 * 1024) #define ATH79_MEM_SIZE_MAX (256 * 1024 * 1024) -void ath79_clocks_init(void); -unsigned long ath79_get_sys_clk_rate(const char *id); - void ath79_ddr_ctrl_init(void); -void ath79_gpio_init(void); - #endif /* __ATH79_COMMON_H */ diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c deleted file mode 100644 index 9d0172a4dc69..000000000000 --- a/arch/mips/ath79/dev-common.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X common devices - * - * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * Parts of this file are based on Atheros' 2.6.15 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/platform_data/gpio-ath79.h> -#include <linux/serial_8250.h> -#include <linux/clk.h> -#include <linux/err.h> - -#include <asm/mach-ath79/ath79.h> -#include <asm/mach-ath79/ar71xx_regs.h> -#include "common.h" -#include "dev-common.h" - -static struct resource ath79_uart_resources[] = { - { - .start = AR71XX_UART_BASE, - .end = AR71XX_UART_BASE + AR71XX_UART_SIZE - 1, - .flags = IORESOURCE_MEM, - }, -}; - -#define AR71XX_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP) -static struct plat_serial8250_port ath79_uart_data[] = { - { - .mapbase = AR71XX_UART_BASE, - .irq = ATH79_MISC_IRQ(3), - .flags = AR71XX_UART_FLAGS, - .iotype = UPIO_MEM32, - .regshift = 2, - }, { - /* terminating entry */ - } -}; - -static struct platform_device ath79_uart_device = { - .name = "serial8250", - .id = PLAT8250_DEV_PLATFORM, - .resource = ath79_uart_resources, - .num_resources = ARRAY_SIZE(ath79_uart_resources), - .dev = { - .platform_data = ath79_uart_data - }, -}; - -static struct resource ar933x_uart_resources[] = { - { - .start = AR933X_UART_BASE, - .end = AR933X_UART_BASE + AR71XX_UART_SIZE - 1, - .flags = IORESOURCE_MEM, - }, - { - .start = ATH79_MISC_IRQ(3), - .end = ATH79_MISC_IRQ(3), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ar933x_uart_device = { - .name = "ar933x-uart", - .id = -1, - .resource = ar933x_uart_resources, - .num_resources = ARRAY_SIZE(ar933x_uart_resources), -}; - -void __init ath79_register_uart(void) -{ - unsigned long uart_clk_rate; - - uart_clk_rate = ath79_get_sys_clk_rate("uart"); - - if (soc_is_ar71xx() || - soc_is_ar724x() || - soc_is_ar913x() || - soc_is_ar934x() || - soc_is_qca955x()) { - ath79_uart_data[0].uartclk = uart_clk_rate; - platform_device_register(&ath79_uart_device); - } else if (soc_is_ar933x()) { - platform_device_register(&ar933x_uart_device); - } else { - BUG(); - } -} - -void __init ath79_register_wdt(void) -{ - struct resource res; - - memset(&res, 0, sizeof(res)); - - res.flags = IORESOURCE_MEM; - res.start = AR71XX_RESET_BASE + AR71XX_RESET_REG_WDOG_CTRL; - res.end = res.start + 0x8 - 1; - - platform_device_register_simple("ath79-wdt", -1, &res, 1); -} - -static struct ath79_gpio_platform_data ath79_gpio_pdata; - -static struct resource ath79_gpio_resources[] = { - { - .flags = IORESOURCE_MEM, - .start = AR71XX_GPIO_BASE, - .end = AR71XX_GPIO_BASE + AR71XX_GPIO_SIZE - 1, - }, - { - .start = ATH79_MISC_IRQ(2), - .end = ATH79_MISC_IRQ(2), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ath79_gpio_device = { - .name = "ath79-gpio", - .id = -1, - .resource = ath79_gpio_resources, - .num_resources = ARRAY_SIZE(ath79_gpio_resources), - .dev = { - .platform_data = &ath79_gpio_pdata - }, -}; - -void __init ath79_gpio_init(void) -{ - if (soc_is_ar71xx()) { - ath79_gpio_pdata.ngpios = AR71XX_GPIO_COUNT; - } else if (soc_is_ar7240()) { - ath79_gpio_pdata.ngpios = AR7240_GPIO_COUNT; - } else if (soc_is_ar7241() || soc_is_ar7242()) { - ath79_gpio_pdata.ngpios = AR7241_GPIO_COUNT; - } else if (soc_is_ar913x()) { - ath79_gpio_pdata.ngpios = AR913X_GPIO_COUNT; - } else if (soc_is_ar933x()) { - ath79_gpio_pdata.ngpios = AR933X_GPIO_COUNT; - } else if (soc_is_ar934x()) { - ath79_gpio_pdata.ngpios = AR934X_GPIO_COUNT; - ath79_gpio_pdata.oe_inverted = 1; - } else if (soc_is_qca955x()) { - ath79_gpio_pdata.ngpios = QCA955X_GPIO_COUNT; - ath79_gpio_pdata.oe_inverted = 1; - } else { - BUG(); - } - - platform_device_register(&ath79_gpio_device); -} diff --git a/arch/mips/ath79/dev-common.h b/arch/mips/ath79/dev-common.h deleted file mode 100644 index 0f514e1affce..000000000000 --- a/arch/mips/ath79/dev-common.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X common devices - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_COMMON_H -#define _ATH79_DEV_COMMON_H - -void ath79_register_uart(void); -void ath79_register_wdt(void); - -#endif /* _ATH79_DEV_COMMON_H */ diff --git a/arch/mips/ath79/dev-gpio-buttons.c b/arch/mips/ath79/dev-gpio-buttons.c deleted file mode 100644 index 366b35fb164d..000000000000 --- a/arch/mips/ath79/dev-gpio-buttons.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X GPIO button support - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include "linux/init.h" -#include "linux/slab.h" -#include <linux/platform_device.h> - -#include "dev-gpio-buttons.h" - -void __init ath79_register_gpio_keys_polled(int id, - unsigned poll_interval, - unsigned nbuttons, - struct gpio_keys_button *buttons) -{ - struct platform_device *pdev; - struct gpio_keys_platform_data pdata; - struct gpio_keys_button *p; - int err; - - p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL); - if (!p) - return; - - pdev = platform_device_alloc("gpio-keys-polled", id); - if (!pdev) - goto err_free_buttons; - - memset(&pdata, 0, sizeof(pdata)); - pdata.poll_interval = poll_interval; - pdata.nbuttons = nbuttons; - pdata.buttons = p; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto err_put_pdev; - - err = platform_device_add(pdev); - if (err) - goto err_put_pdev; - - return; - -err_put_pdev: - platform_device_put(pdev); - -err_free_buttons: - kfree(p); -} diff --git a/arch/mips/ath79/dev-gpio-buttons.h b/arch/mips/ath79/dev-gpio-buttons.h deleted file mode 100644 index 481847ac1cba..000000000000 --- a/arch/mips/ath79/dev-gpio-buttons.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X GPIO button support - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_GPIO_BUTTONS_H -#define _ATH79_DEV_GPIO_BUTTONS_H - -#include <linux/input.h> -#include <linux/gpio_keys.h> - -void ath79_register_gpio_keys_polled(int id, - unsigned poll_interval, - unsigned nbuttons, - struct gpio_keys_button *buttons); - -#endif /* _ATH79_DEV_GPIO_BUTTONS_H */ diff --git a/arch/mips/ath79/dev-leds-gpio.c b/arch/mips/ath79/dev-leds-gpio.c deleted file mode 100644 index dcb1debcefb8..000000000000 --- a/arch/mips/ath79/dev-leds-gpio.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X common GPIO LEDs support - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/platform_device.h> - -#include "dev-leds-gpio.h" - -void __init ath79_register_leds_gpio(int id, - unsigned num_leds, - struct gpio_led *leds) -{ - struct platform_device *pdev; - struct gpio_led_platform_data pdata; - struct gpio_led *p; - int err; - - p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL); - if (!p) - return; - - pdev = platform_device_alloc("leds-gpio", id); - if (!pdev) - goto err_free_leds; - - memset(&pdata, 0, sizeof(pdata)); - pdata.num_leds = num_leds; - pdata.leds = p; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto err_put_pdev; - - err = platform_device_add(pdev); - if (err) - goto err_put_pdev; - - return; - -err_put_pdev: - platform_device_put(pdev); - -err_free_leds: - kfree(p); -} diff --git a/arch/mips/ath79/dev-leds-gpio.h b/arch/mips/ath79/dev-leds-gpio.h deleted file mode 100644 index 6e5d8851ebcf..000000000000 --- a/arch/mips/ath79/dev-leds-gpio.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X common GPIO LEDs support - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_LEDS_GPIO_H -#define _ATH79_DEV_LEDS_GPIO_H - -#include <linux/leds.h> - -void ath79_register_leds_gpio(int id, - unsigned num_leds, - struct gpio_led *leds); - -#endif /* _ATH79_DEV_LEDS_GPIO_H */ diff --git a/arch/mips/ath79/dev-spi.c b/arch/mips/ath79/dev-spi.c deleted file mode 100644 index aa30163efbfd..000000000000 --- a/arch/mips/ath79/dev-spi.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X SPI controller device - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/platform_device.h> -#include <asm/mach-ath79/ar71xx_regs.h> -#include "dev-spi.h" - -static struct resource ath79_spi_resources[] = { - { - .start = AR71XX_SPI_BASE, - .end = AR71XX_SPI_BASE + AR71XX_SPI_SIZE - 1, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device ath79_spi_device = { - .name = "ath79-spi", - .id = -1, - .resource = ath79_spi_resources, - .num_resources = ARRAY_SIZE(ath79_spi_resources), -}; - -void __init ath79_register_spi(struct ath79_spi_platform_data *pdata, - struct spi_board_info const *info, - unsigned n) -{ - spi_register_board_info(info, n); - ath79_spi_device.dev.platform_data = pdata; - platform_device_register(&ath79_spi_device); -} diff --git a/arch/mips/ath79/dev-spi.h b/arch/mips/ath79/dev-spi.h deleted file mode 100644 index d732565ca736..000000000000 --- a/arch/mips/ath79/dev-spi.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X SPI controller device - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_SPI_H -#define _ATH79_DEV_SPI_H - -#include <linux/spi/spi.h> -#include <asm/mach-ath79/ath79_spi_platform.h> - -void ath79_register_spi(struct ath79_spi_platform_data *pdata, - struct spi_board_info const *info, - unsigned n); - -#endif /* _ATH79_DEV_SPI_H */ diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c deleted file mode 100644 index 8227265bcc2d..000000000000 --- a/arch/mips/ath79/dev-usb.c +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Atheros AR7XXX/AR9XXX USB Host Controller device - * - * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * Parts of this file are based on Atheros' 2.6.15 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/dma-mapping.h> -#include <linux/platform_device.h> -#include <linux/usb/ehci_pdriver.h> -#include <linux/usb/ohci_pdriver.h> - -#include <asm/mach-ath79/ath79.h> -#include <asm/mach-ath79/ar71xx_regs.h> -#include "common.h" -#include "dev-usb.h" - -static u64 ath79_usb_dmamask = DMA_BIT_MASK(32); - -static struct usb_ohci_pdata ath79_ohci_pdata = { -}; - -static struct usb_ehci_pdata ath79_ehci_pdata_v1 = { - .has_synopsys_hc_bug = 1, -}; - -static struct usb_ehci_pdata ath79_ehci_pdata_v2 = { - .caps_offset = 0x100, - .has_tt = 1, -}; - -static void __init ath79_usb_register(const char *name, int id, - unsigned long base, unsigned long size, - int irq, const void *data, - size_t data_size) -{ - struct resource res[2]; - struct platform_device *pdev; - - memset(res, 0, sizeof(res)); - - res[0].flags = IORESOURCE_MEM; - res[0].start = base; - res[0].end = base + size - 1; - - res[1].flags = IORESOURCE_IRQ; - res[1].start = irq; - res[1].end = irq; - - pdev = platform_device_register_resndata(NULL, name, id, - res, ARRAY_SIZE(res), - data, data_size); - - if (IS_ERR(pdev)) { - pr_err("ath79: unable to register USB at %08lx, err=%d\n", - base, (int) PTR_ERR(pdev)); - return; - } - - pdev->dev.dma_mask = &ath79_usb_dmamask; - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); -} - -#define AR71XX_USB_RESET_MASK (AR71XX_RESET_USB_HOST | \ - AR71XX_RESET_USB_PHY | \ - AR71XX_RESET_USB_OHCI_DLL) - -static void __init ath79_usb_setup(void) -{ - void __iomem *usb_ctrl_base; - - ath79_device_reset_set(AR71XX_USB_RESET_MASK); - mdelay(1000); - ath79_device_reset_clear(AR71XX_USB_RESET_MASK); - - usb_ctrl_base = ioremap(AR71XX_USB_CTRL_BASE, AR71XX_USB_CTRL_SIZE); - - /* Turning on the Buff and Desc swap bits */ - __raw_writel(0xf0000, usb_ctrl_base + AR71XX_USB_CTRL_REG_CONFIG); - - /* WAR for HW bug. Here it adjusts the duration between two SOFS */ - __raw_writel(0x20c00, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ); - - iounmap(usb_ctrl_base); - - mdelay(900); - - ath79_usb_register("ohci-platform", -1, - AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE, - ATH79_MISC_IRQ(6), - &ath79_ohci_pdata, sizeof(ath79_ohci_pdata)); - - ath79_usb_register("ehci-platform", -1, - AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1)); -} - -static void __init ar7240_usb_setup(void) -{ - void __iomem *usb_ctrl_base; - - ath79_device_reset_clear(AR7240_RESET_OHCI_DLL); - ath79_device_reset_set(AR7240_RESET_USB_HOST); - - mdelay(1000); - - ath79_device_reset_set(AR7240_RESET_OHCI_DLL); - ath79_device_reset_clear(AR7240_RESET_USB_HOST); - - usb_ctrl_base = ioremap(AR7240_USB_CTRL_BASE, AR7240_USB_CTRL_SIZE); - - /* WAR for HW bug. Here it adjusts the duration between two SOFS */ - __raw_writel(0x3, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ); - - iounmap(usb_ctrl_base); - - ath79_usb_register("ohci-platform", -1, - AR7240_OHCI_BASE, AR7240_OHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ohci_pdata, sizeof(ath79_ohci_pdata)); -} - -static void __init ar724x_usb_setup(void) -{ - ath79_device_reset_set(AR724X_RESET_USBSUS_OVERRIDE); - mdelay(10); - - ath79_device_reset_clear(AR724X_RESET_USB_HOST); - mdelay(10); - - ath79_device_reset_clear(AR724X_RESET_USB_PHY); - mdelay(10); - - ath79_usb_register("ehci-platform", -1, - AR724X_EHCI_BASE, AR724X_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -static void __init ar913x_usb_setup(void) -{ - ath79_device_reset_set(AR913X_RESET_USBSUS_OVERRIDE); - mdelay(10); - - ath79_device_reset_clear(AR913X_RESET_USB_HOST); - mdelay(10); - - ath79_device_reset_clear(AR913X_RESET_USB_PHY); - mdelay(10); - - ath79_usb_register("ehci-platform", -1, - AR913X_EHCI_BASE, AR913X_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -static void __init ar933x_usb_setup(void) -{ - ath79_device_reset_set(AR933X_RESET_USBSUS_OVERRIDE); - mdelay(10); - - ath79_device_reset_clear(AR933X_RESET_USB_HOST); - mdelay(10); - - ath79_device_reset_clear(AR933X_RESET_USB_PHY); - mdelay(10); - - ath79_usb_register("ehci-platform", -1, - AR933X_EHCI_BASE, AR933X_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -static void __init ar934x_usb_setup(void) -{ - u32 bootstrap; - - bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP); - if (bootstrap & AR934X_BOOTSTRAP_USB_MODE_DEVICE) - return; - - ath79_device_reset_set(AR934X_RESET_USBSUS_OVERRIDE); - udelay(1000); - - ath79_device_reset_clear(AR934X_RESET_USB_PHY); - udelay(1000); - - ath79_device_reset_clear(AR934X_RESET_USB_PHY_ANALOG); - udelay(1000); - - ath79_device_reset_clear(AR934X_RESET_USB_HOST); - udelay(1000); - - ath79_usb_register("ehci-platform", -1, - AR934X_EHCI_BASE, AR934X_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -static void __init qca955x_usb_setup(void) -{ - ath79_usb_register("ehci-platform", 0, - QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE, - ATH79_IP3_IRQ(0), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); - - ath79_usb_register("ehci-platform", 1, - QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE, - ATH79_IP3_IRQ(1), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -void __init ath79_register_usb(void) -{ - if (soc_is_ar71xx()) - ath79_usb_setup(); - else if (soc_is_ar7240()) - ar7240_usb_setup(); - else if (soc_is_ar7241() || soc_is_ar7242()) - ar724x_usb_setup(); - else if (soc_is_ar913x()) - ar913x_usb_setup(); - else if (soc_is_ar933x()) - ar933x_usb_setup(); - else if (soc_is_ar934x()) - ar934x_usb_setup(); - else if (soc_is_qca955x()) - qca955x_usb_setup(); - else - BUG(); -} diff --git a/arch/mips/ath79/dev-usb.h b/arch/mips/ath79/dev-usb.h deleted file mode 100644 index 4b86a69ca080..000000000000 --- a/arch/mips/ath79/dev-usb.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X USB Host Controller support - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_USB_H -#define _ATH79_DEV_USB_H - -void ath79_register_usb(void); - -#endif /* _ATH79_DEV_USB_H */ diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c deleted file mode 100644 index da190b1b87ce..000000000000 --- a/arch/mips/ath79/dev-wmac.c +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Atheros AR913X/AR933X SoC built-in WMAC device support - * - * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com> - * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * Parts of this file are based on Atheros 2.6.15/2.6.31 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/platform_device.h> -#include <linux/ath9k_platform.h> - -#include <asm/mach-ath79/ath79.h> -#include <asm/mach-ath79/ar71xx_regs.h> -#include "dev-wmac.h" - -static struct ath9k_platform_data ath79_wmac_data; - -static struct resource ath79_wmac_resources[] = { - { - /* .start and .end fields are filled dynamically */ - .flags = IORESOURCE_MEM, - }, { - /* .start and .end fields are filled dynamically */ - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ath79_wmac_device = { - .name = "ath9k", - .id = -1, - .resource = ath79_wmac_resources, - .num_resources = ARRAY_SIZE(ath79_wmac_resources), - .dev = { - .platform_data = &ath79_wmac_data, - }, -}; - -static void __init ar913x_wmac_setup(void) -{ - /* reset the WMAC */ - ath79_device_reset_set(AR913X_RESET_AMBA2WMAC); - mdelay(10); - - ath79_device_reset_clear(AR913X_RESET_AMBA2WMAC); - mdelay(10); - - ath79_wmac_resources[0].start = AR913X_WMAC_BASE; - ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1; - ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2); - ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2); -} - - -static int ar933x_wmac_reset(void) -{ - ath79_device_reset_set(AR933X_RESET_WMAC); - ath79_device_reset_clear(AR933X_RESET_WMAC); - - return 0; -} - -static int ar933x_r1_get_wmac_revision(void) -{ - return ath79_soc_rev; -} - -static void __init ar933x_wmac_setup(void) -{ - u32 t; - - ar933x_wmac_reset(); - - ath79_wmac_device.name = "ar933x_wmac"; - - ath79_wmac_resources[0].start = AR933X_WMAC_BASE; - ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1; - ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2); - ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2); - - t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP); - if (t & AR933X_BOOTSTRAP_REF_CLK_40) - ath79_wmac_data.is_clk_25mhz = false; - else - ath79_wmac_data.is_clk_25mhz = true; - - if (ath79_soc_rev == 1) - ath79_wmac_data.get_mac_revision = ar933x_r1_get_wmac_revision; - - ath79_wmac_data.external_reset = ar933x_wmac_reset; -} - -static void ar934x_wmac_setup(void) -{ - u32 t; - - ath79_wmac_device.name = "ar934x_wmac"; - - ath79_wmac_resources[0].start = AR934X_WMAC_BASE; - ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1; - ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1); - ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1); - - t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP); - if (t & AR934X_BOOTSTRAP_REF_CLK_40) - ath79_wmac_data.is_clk_25mhz = false; - else - ath79_wmac_data.is_clk_25mhz = true; -} - -static void qca955x_wmac_setup(void) -{ - u32 t; - - ath79_wmac_device.name = "qca955x_wmac"; - - ath79_wmac_resources[0].start = QCA955X_WMAC_BASE; - ath79_wmac_resources[0].end = QCA955X_WMAC_BASE + QCA955X_WMAC_SIZE - 1; - ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1); - ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1); - - t = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP); - if (t & QCA955X_BOOTSTRAP_REF_CLK_40) - ath79_wmac_data.is_clk_25mhz = false; - else - ath79_wmac_data.is_clk_25mhz = true; -} - -void __init ath79_register_wmac(u8 *cal_data) -{ - if (soc_is_ar913x()) - ar913x_wmac_setup(); - else if (soc_is_ar933x()) - ar933x_wmac_setup(); - else if (soc_is_ar934x()) - ar934x_wmac_setup(); - else if (soc_is_qca955x()) - qca955x_wmac_setup(); - else - BUG(); - - if (cal_data) - memcpy(ath79_wmac_data.eeprom_data, cal_data, - sizeof(ath79_wmac_data.eeprom_data)); - - platform_device_register(&ath79_wmac_device); -} diff --git a/arch/mips/ath79/dev-wmac.h b/arch/mips/ath79/dev-wmac.h deleted file mode 100644 index c9cd8709f090..000000000000 --- a/arch/mips/ath79/dev-wmac.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Atheros AR913X/AR933X SoC built-in WMAC device support - * - * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_WMAC_H -#define _ATH79_DEV_WMAC_H - -void ath79_register_wmac(u8 *cal_data); - -#endif /* _ATH79_DEV_WMAC_H */ diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c deleted file mode 100644 index 2dfff1f19004..000000000000 --- a/arch/mips/ath79/irq.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Atheros AR71xx/AR724x/AR913x specific interrupt handling - * - * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com> - * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/irqchip.h> -#include <linux/of_irq.h> - -#include <asm/irq_cpu.h> -#include <asm/mipsregs.h> - -#include <asm/mach-ath79/ath79.h> -#include <asm/mach-ath79/ar71xx_regs.h> -#include "common.h" -#include "machtypes.h" - - -static void ar934x_ip2_irq_dispatch(struct irq_desc *desc) -{ - u32 status; - - status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS); - - if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) { - ath79_ddr_wb_flush(3); - generic_handle_irq(ATH79_IP2_IRQ(0)); - } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) { - ath79_ddr_wb_flush(4); - generic_handle_irq(ATH79_IP2_IRQ(1)); - } else { - spurious_interrupt(); - } -} - -static void ar934x_ip2_irq_init(void) -{ - int i; - - for (i = ATH79_IP2_IRQ_BASE; - i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) - irq_set_chip_and_handler(i, &dummy_irq_chip, - handle_level_irq); - - irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); -} - -static void qca955x_ip2_irq_dispatch(struct irq_desc *desc) -{ - u32 status; - - status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS); - status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL; - - if (status == 0) { - spurious_interrupt(); - return; - } - - if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP2_IRQ(0)); - } - - if (status & QCA955X_EXT_INT_WMAC_ALL) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP2_IRQ(1)); - } -} - -static void qca955x_ip3_irq_dispatch(struct irq_desc *desc) -{ - u32 status; - - status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS); - status &= QCA955X_EXT_INT_PCIE_RC2_ALL | - QCA955X_EXT_INT_USB1 | - QCA955X_EXT_INT_USB2; - - if (status == 0) { - spurious_interrupt(); - return; - } - - if (status & QCA955X_EXT_INT_USB1) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP3_IRQ(0)); - } - - if (status & QCA955X_EXT_INT_USB2) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP3_IRQ(1)); - } - - if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP3_IRQ(2)); - } -} - -static void qca955x_irq_init(void) -{ - int i; - - for (i = ATH79_IP2_IRQ_BASE; - i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) - irq_set_chip_and_handler(i, &dummy_irq_chip, - handle_level_irq); - - irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch); - - for (i = ATH79_IP3_IRQ_BASE; - i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++) - irq_set_chip_and_handler(i, &dummy_irq_chip, - handle_level_irq); - - irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch); -} - -void __init arch_init_irq(void) -{ - unsigned irq_wb_chan2 = -1; - unsigned irq_wb_chan3 = -1; - bool misc_is_ar71xx; - - if (mips_machtype == ATH79_MACH_GENERIC_OF) { - irqchip_init(); - return; - } - - if (soc_is_ar71xx() || soc_is_ar724x() || - soc_is_ar913x() || soc_is_ar933x()) { - irq_wb_chan2 = 3; - irq_wb_chan3 = 2; - } else if (soc_is_ar934x()) { - irq_wb_chan3 = 2; - } - - ath79_cpu_irq_init(irq_wb_chan2, irq_wb_chan3); - - if (soc_is_ar71xx() || soc_is_ar913x()) - misc_is_ar71xx = true; - else if (soc_is_ar724x() || - soc_is_ar933x() || - soc_is_ar934x() || - soc_is_qca955x()) - misc_is_ar71xx = false; - else - BUG(); - ath79_misc_irq_init( - ath79_reset_base + AR71XX_RESET_REG_MISC_INT_STATUS, - ATH79_CPU_IRQ(6), ATH79_MISC_IRQ_BASE, misc_is_ar71xx); - - if (soc_is_ar934x()) - ar934x_ip2_irq_init(); - else if (soc_is_qca955x()) - qca955x_irq_init(); -} diff --git a/arch/mips/ath79/mach-ap121.c b/arch/mips/ath79/mach-ap121.c deleted file mode 100644 index 1bf73f2a069d..000000000000 --- a/arch/mips/ath79/mach-ap121.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Atheros AP121 board support - * - * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" -#include "dev-wmac.h" - -#define AP121_GPIO_LED_WLAN 0 -#define AP121_GPIO_LED_USB 1 - -#define AP121_GPIO_BTN_JUMPSTART 11 -#define AP121_GPIO_BTN_RESET 12 - -#define AP121_KEYS_POLL_INTERVAL 20 /* msecs */ -#define AP121_KEYS_DEBOUNCE_INTERVAL (3 * AP121_KEYS_POLL_INTERVAL) - -#define AP121_CAL_DATA_ADDR 0x1fff1000 - -static struct gpio_led ap121_leds_gpio[] __initdata = { - { - .name = "ap121:green:usb", - .gpio = AP121_GPIO_LED_USB, - .active_low = 0, - }, - { - .name = "ap121:green:wlan", - .gpio = AP121_GPIO_LED_WLAN, - .active_low = 0, - }, -}; - -static struct gpio_keys_button ap121_gpio_keys[] __initdata = { - { - .desc = "jumpstart button", - .type = EV_KEY, - .code = KEY_WPS_BUTTON, - .debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP121_GPIO_BTN_JUMPSTART, - .active_low = 1, - }, - { - .desc = "reset button", - .type = EV_KEY, - .code = KEY_RESTART, - .debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP121_GPIO_BTN_RESET, - .active_low = 1, - } -}; - -static struct spi_board_info ap121_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "mx25l1606e", - } -}; - -static struct ath79_spi_platform_data ap121_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -static void __init ap121_setup(void) -{ - u8 *cal_data = (u8 *) KSEG1ADDR(AP121_CAL_DATA_ADDR); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(ap121_leds_gpio), - ap121_leds_gpio); - ath79_register_gpio_keys_polled(-1, AP121_KEYS_POLL_INTERVAL, - ARRAY_SIZE(ap121_gpio_keys), - ap121_gpio_keys); - - ath79_register_spi(&ap121_spi_data, ap121_spi_info, - ARRAY_SIZE(ap121_spi_info)); - ath79_register_usb(); - ath79_register_wmac(cal_data); -} - -MIPS_MACHINE(ATH79_MACH_AP121, "AP121", "Atheros AP121 reference board", - ap121_setup); diff --git a/arch/mips/ath79/mach-ap136.c b/arch/mips/ath79/mach-ap136.c deleted file mode 100644 index 07eac58c3641..000000000000 --- a/arch/mips/ath79/mach-ap136.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Qualcomm Atheros AP136 reference board support - * - * Copyright (c) 2012 Qualcomm Atheros - * Copyright (c) 2012-2013 Gabor Juhos <juhosg@openwrt.org> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ - -#include <linux/pci.h> -#include <linux/ath9k_platform.h> - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" -#include "dev-wmac.h" -#include "pci.h" - -#define AP136_GPIO_LED_STATUS_RED 14 -#define AP136_GPIO_LED_STATUS_GREEN 19 -#define AP136_GPIO_LED_USB 4 -#define AP136_GPIO_LED_WLAN_2G 13 -#define AP136_GPIO_LED_WLAN_5G 12 -#define AP136_GPIO_LED_WPS_RED 15 -#define AP136_GPIO_LED_WPS_GREEN 20 - -#define AP136_GPIO_BTN_WPS 16 -#define AP136_GPIO_BTN_RFKILL 21 - -#define AP136_KEYS_POLL_INTERVAL 20 /* msecs */ -#define AP136_KEYS_DEBOUNCE_INTERVAL (3 * AP136_KEYS_POLL_INTERVAL) - -#define AP136_WMAC_CALDATA_OFFSET 0x1000 -#define AP136_PCIE_CALDATA_OFFSET 0x5000 - -static struct gpio_led ap136_leds_gpio[] __initdata = { - { - .name = "qca:green:status", - .gpio = AP136_GPIO_LED_STATUS_GREEN, - .active_low = 1, - }, - { - .name = "qca:red:status", - .gpio = AP136_GPIO_LED_STATUS_RED, - .active_low = 1, - }, - { - .name = "qca:green:wps", - .gpio = AP136_GPIO_LED_WPS_GREEN, - .active_low = 1, - }, - { - .name = "qca:red:wps", - .gpio = AP136_GPIO_LED_WPS_RED, - .active_low = 1, - }, - { - .name = "qca:red:wlan-2g", - .gpio = AP136_GPIO_LED_WLAN_2G, - .active_low = 1, - }, - { - .name = "qca:red:usb", - .gpio = AP136_GPIO_LED_USB, - .active_low = 1, - } -}; - -static struct gpio_keys_button ap136_gpio_keys[] __initdata = { - { - .desc = "WPS button", - .type = EV_KEY, - .code = KEY_WPS_BUTTON, - .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP136_GPIO_BTN_WPS, - .active_low = 1, - }, - { - .desc = "RFKILL button", - .type = EV_KEY, - .code = KEY_RFKILL, - .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP136_GPIO_BTN_RFKILL, - .active_low = 1, - }, -}; - -static struct spi_board_info ap136_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "mx25l6405d", - } -}; - -static struct ath79_spi_platform_data ap136_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -#ifdef CONFIG_PCI -static struct ath9k_platform_data ap136_ath9k_data; - -static int ap136_pci_plat_dev_init(struct pci_dev *dev) -{ - if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0) - dev->dev.platform_data = &ap136_ath9k_data; - - return 0; -} - -static void __init ap136_pci_init(u8 *eeprom) -{ - memcpy(ap136_ath9k_data.eeprom_data, eeprom, - sizeof(ap136_ath9k_data.eeprom_data)); - - ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init); - ath79_register_pci(); -} -#else -static inline void ap136_pci_init(u8 *eeprom) {} -#endif /* CONFIG_PCI */ - -static void __init ap136_setup(void) -{ - u8 *art = (u8 *) KSEG1ADDR(0x1fff0000); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio), - ap136_leds_gpio); - ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL, - ARRAY_SIZE(ap136_gpio_keys), - ap136_gpio_keys); - ath79_register_spi(&ap136_spi_data, ap136_spi_info, - ARRAY_SIZE(ap136_spi_info)); - ath79_register_usb(); - ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET); - ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET); -} - -MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010", - "Atheros AP136-010 reference board", - ap136_setup); diff --git a/arch/mips/ath79/mach-ap81.c b/arch/mips/ath79/mach-ap81.c deleted file mode 100644 index 1c78d497f930..000000000000 --- a/arch/mips/ath79/mach-ap81.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Atheros AP81 board support - * - * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include "machtypes.h" -#include "dev-wmac.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" - -#define AP81_GPIO_LED_STATUS 1 -#define AP81_GPIO_LED_AOSS 3 -#define AP81_GPIO_LED_WLAN 6 -#define AP81_GPIO_LED_POWER 14 - -#define AP81_GPIO_BTN_SW4 12 -#define AP81_GPIO_BTN_SW1 21 - -#define AP81_KEYS_POLL_INTERVAL 20 /* msecs */ -#define AP81_KEYS_DEBOUNCE_INTERVAL (3 * AP81_KEYS_POLL_INTERVAL) - -#define AP81_CAL_DATA_ADDR 0x1fff1000 - -static struct gpio_led ap81_leds_gpio[] __initdata = { - { - .name = "ap81:green:status", - .gpio = AP81_GPIO_LED_STATUS, - .active_low = 1, - }, { - .name = "ap81:amber:aoss", - .gpio = AP81_GPIO_LED_AOSS, - .active_low = 1, - }, { - .name = "ap81:green:wlan", - .gpio = AP81_GPIO_LED_WLAN, - .active_low = 1, - }, { - .name = "ap81:green:power", - .gpio = AP81_GPIO_LED_POWER, - .active_low = 1, - } -}; - -static struct gpio_keys_button ap81_gpio_keys[] __initdata = { - { - .desc = "sw1", - .type = EV_KEY, - .code = BTN_0, - .debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP81_GPIO_BTN_SW1, - .active_low = 1, - } , { - .desc = "sw4", - .type = EV_KEY, - .code = BTN_1, - .debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP81_GPIO_BTN_SW4, - .active_low = 1, - } -}; - -static struct spi_board_info ap81_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "m25p64", - } -}; - -static struct ath79_spi_platform_data ap81_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -static void __init ap81_setup(void) -{ - u8 *cal_data = (u8 *) KSEG1ADDR(AP81_CAL_DATA_ADDR); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(ap81_leds_gpio), - ap81_leds_gpio); - ath79_register_gpio_keys_polled(-1, AP81_KEYS_POLL_INTERVAL, - ARRAY_SIZE(ap81_gpio_keys), - ap81_gpio_keys); - ath79_register_spi(&ap81_spi_data, ap81_spi_info, - ARRAY_SIZE(ap81_spi_info)); - ath79_register_wmac(cal_data); - ath79_register_usb(); -} - -MIPS_MACHINE(ATH79_MACH_AP81, "AP81", "Atheros AP81 reference board", - ap81_setup); diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c deleted file mode 100644 index 9423f5aed287..000000000000 --- a/arch/mips/ath79/mach-db120.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Atheros DB120 reference board support - * - * Copyright (c) 2011 Qualcomm Atheros - * Copyright (c) 2011 Gabor Juhos <juhosg@openwrt.org> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ - -#include <linux/pci.h> -#include <linux/ath9k_platform.h> - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" -#include "dev-wmac.h" -#include "pci.h" - -#define DB120_GPIO_LED_WLAN_5G 12 -#define DB120_GPIO_LED_WLAN_2G 13 -#define DB120_GPIO_LED_STATUS 14 -#define DB120_GPIO_LED_WPS 15 - -#define DB120_GPIO_BTN_WPS 16 - -#define DB120_KEYS_POLL_INTERVAL 20 /* msecs */ -#define DB120_KEYS_DEBOUNCE_INTERVAL (3 * DB120_KEYS_POLL_INTERVAL) - -#define DB120_WMAC_CALDATA_OFFSET 0x1000 -#define DB120_PCIE_CALDATA_OFFSET 0x5000 - -static struct gpio_led db120_leds_gpio[] __initdata = { - { - .name = "db120:green:status", - .gpio = DB120_GPIO_LED_STATUS, - .active_low = 1, - }, - { - .name = "db120:green:wps", - .gpio = DB120_GPIO_LED_WPS, - .active_low = 1, - }, - { - .name = "db120:green:wlan-5g", - .gpio = DB120_GPIO_LED_WLAN_5G, - .active_low = 1, - }, - { - .name = "db120:green:wlan-2g", - .gpio = DB120_GPIO_LED_WLAN_2G, - .active_low = 1, - }, -}; - -static struct gpio_keys_button db120_gpio_keys[] __initdata = { - { - .desc = "WPS button", - .type = EV_KEY, - .code = KEY_WPS_BUTTON, - .debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL, - .gpio = DB120_GPIO_BTN_WPS, - .active_low = 1, - }, -}; - -static struct spi_board_info db120_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "s25sl064a", - } -}; - -static struct ath79_spi_platform_data db120_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -#ifdef CONFIG_PCI -static struct ath9k_platform_data db120_ath9k_data; - -static int db120_pci_plat_dev_init(struct pci_dev *dev) -{ - switch (PCI_SLOT(dev->devfn)) { - case 0: - dev->dev.platform_data = &db120_ath9k_data; - break; - } - - return 0; -} - -static void __init db120_pci_init(u8 *eeprom) -{ - memcpy(db120_ath9k_data.eeprom_data, eeprom, - sizeof(db120_ath9k_data.eeprom_data)); - - ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init); - ath79_register_pci(); -} -#else -static inline void db120_pci_init(u8 *eeprom) {} -#endif /* CONFIG_PCI */ - -static void __init db120_setup(void) -{ - u8 *art = (u8 *) KSEG1ADDR(0x1fff0000); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio), - db120_leds_gpio); - ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL, - ARRAY_SIZE(db120_gpio_keys), - db120_gpio_keys); - ath79_register_spi(&db120_spi_data, db120_spi_info, - ARRAY_SIZE(db120_spi_info)); - ath79_register_usb(); - ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET); - db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET); -} - -MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board", - db120_setup); diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c deleted file mode 100644 index 75fb96ca61db..000000000000 --- a/arch/mips/ath79/mach-pb44.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Atheros PB44 reference board support - * - * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/i2c.h> -#include <linux/gpio/machine.h> -#include <linux/platform_data/pcf857x.h> - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" -#include "pci.h" - -#define PB44_GPIO_I2C_SCL 0 -#define PB44_GPIO_I2C_SDA 1 - -#define PB44_GPIO_EXP_BASE 16 -#define PB44_GPIO_SW_RESET (PB44_GPIO_EXP_BASE + 6) -#define PB44_GPIO_SW_JUMP (PB44_GPIO_EXP_BASE + 8) -#define PB44_GPIO_LED_JUMP1 (PB44_GPIO_EXP_BASE + 9) -#define PB44_GPIO_LED_JUMP2 (PB44_GPIO_EXP_BASE + 10) - -#define PB44_KEYS_POLL_INTERVAL 20 /* msecs */ -#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL) - -static struct gpiod_lookup_table pb44_i2c_gpiod_table = { - .dev_id = "i2c-gpio.0", - .table = { - GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA, - NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SCL, - NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - }, -}; - -static struct platform_device pb44_i2c_gpio_device = { - .name = "i2c-gpio", - .id = 0, - .dev = { - .platform_data = NULL, - } -}; - -static struct pcf857x_platform_data pb44_pcf857x_data = { - .gpio_base = PB44_GPIO_EXP_BASE, -}; - -static struct i2c_board_info pb44_i2c_board_info[] __initdata = { - { - I2C_BOARD_INFO("pcf8575", 0x20), - .platform_data = &pb44_pcf857x_data, - }, -}; - -static struct gpio_led pb44_leds_gpio[] __initdata = { - { - .name = "pb44:amber:jump1", - .gpio = PB44_GPIO_LED_JUMP1, - .active_low = 1, - }, { - .name = "pb44:green:jump2", - .gpio = PB44_GPIO_LED_JUMP2, - .active_low = 1, - }, -}; - -static struct gpio_keys_button pb44_gpio_keys[] __initdata = { - { - .desc = "soft_reset", - .type = EV_KEY, - .code = KEY_RESTART, - .debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL, - .gpio = PB44_GPIO_SW_RESET, - .active_low = 1, - } , { - .desc = "jumpstart", - .type = EV_KEY, - .code = KEY_WPS_BUTTON, - .debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL, - .gpio = PB44_GPIO_SW_JUMP, - .active_low = 1, - } -}; - -static struct spi_board_info pb44_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "m25p64", - }, -}; - -static struct ath79_spi_platform_data pb44_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -static void __init pb44_init(void) -{ - gpiod_add_lookup_table(&pb44_i2c_gpiod_table); - i2c_register_board_info(0, pb44_i2c_board_info, - ARRAY_SIZE(pb44_i2c_board_info)); - platform_device_register(&pb44_i2c_gpio_device); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(pb44_leds_gpio), - pb44_leds_gpio); - ath79_register_gpio_keys_polled(-1, PB44_KEYS_POLL_INTERVAL, - ARRAY_SIZE(pb44_gpio_keys), - pb44_gpio_keys); - ath79_register_spi(&pb44_spi_data, pb44_spi_info, - ARRAY_SIZE(pb44_spi_info)); - ath79_register_usb(); - ath79_register_pci(); -} - -MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board", - pb44_init); diff --git a/arch/mips/ath79/mach-ubnt-xm.c b/arch/mips/ath79/mach-ubnt-xm.c deleted file mode 100644 index 4a3c60694c75..000000000000 --- a/arch/mips/ath79/mach-ubnt-xm.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Ubiquiti Networks XM (rev 1.0) board support - * - * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com> - * - * Derived from: mach-pb44.c - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/ath9k_platform.h> - -#include <asm/mach-ath79/irq.h> - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "pci.h" - -#define UBNT_XM_GPIO_LED_L1 0 -#define UBNT_XM_GPIO_LED_L2 1 -#define UBNT_XM_GPIO_LED_L3 11 -#define UBNT_XM_GPIO_LED_L4 7 - -#define UBNT_XM_GPIO_BTN_RESET 12 - -#define UBNT_XM_KEYS_POLL_INTERVAL 20 -#define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL) - -#define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000) - -static struct gpio_led ubnt_xm_leds_gpio[] __initdata = { - { - .name = "ubnt-xm:red:link1", - .gpio = UBNT_XM_GPIO_LED_L1, - .active_low = 0, - }, { - .name = "ubnt-xm:orange:link2", - .gpio = UBNT_XM_GPIO_LED_L2, - .active_low = 0, - }, { - .name = "ubnt-xm:green:link3", - .gpio = UBNT_XM_GPIO_LED_L3, - .active_low = 0, - }, { - .name = "ubnt-xm:green:link4", - .gpio = UBNT_XM_GPIO_LED_L4, - .active_low = 0, - }, -}; - -static struct gpio_keys_button ubnt_xm_gpio_keys[] __initdata = { - { - .desc = "reset", - .type = EV_KEY, - .code = KEY_RESTART, - .debounce_interval = UBNT_XM_KEYS_DEBOUNCE_INTERVAL, - .gpio = UBNT_XM_GPIO_BTN_RESET, - .active_low = 1, - } -}; - -static struct spi_board_info ubnt_xm_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "mx25l6405d", - } -}; - -static struct ath79_spi_platform_data ubnt_xm_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -#ifdef CONFIG_PCI -static struct ath9k_platform_data ubnt_xm_eeprom_data; - -static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev) -{ - switch (PCI_SLOT(dev->devfn)) { - case 0: - dev->dev.platform_data = &ubnt_xm_eeprom_data; - break; - } - - return 0; -} - -static void __init ubnt_xm_pci_init(void) -{ - memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR, - sizeof(ubnt_xm_eeprom_data.eeprom_data)); - - ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init); - ath79_register_pci(); -} -#else -static inline void ubnt_xm_pci_init(void) {} -#endif /* CONFIG_PCI */ - -static void __init ubnt_xm_init(void) -{ - ath79_register_leds_gpio(-1, ARRAY_SIZE(ubnt_xm_leds_gpio), - ubnt_xm_leds_gpio); - - ath79_register_gpio_keys_polled(-1, UBNT_XM_KEYS_POLL_INTERVAL, - ARRAY_SIZE(ubnt_xm_gpio_keys), - ubnt_xm_gpio_keys); - - ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info, - ARRAY_SIZE(ubnt_xm_spi_info)); - - ubnt_xm_pci_init(); -} - -MIPS_MACHINE(ATH79_MACH_UBNT_XM, - "UBNT-XM", - "Ubiquiti Networks XM (rev 1.0) board", - ubnt_xm_init); diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h deleted file mode 100644 index a13db3d15c8f..000000000000 --- a/arch/mips/ath79/machtypes.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X machine type definitions - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_MACHTYPE_H -#define _ATH79_MACHTYPE_H - -#include <asm/mips_machine.h> - -enum ath79_mach_type { - ATH79_MACH_GENERIC_OF = -1, /* Device tree board */ - ATH79_MACH_GENERIC = 0, - ATH79_MACH_AP121, /* Atheros AP121 reference board */ - ATH79_MACH_AP136_010, /* Atheros AP136-010 reference board */ - ATH79_MACH_AP81, /* Atheros AP81 reference board */ - ATH79_MACH_DB120, /* Atheros DB120 reference board */ - ATH79_MACH_PB44, /* Atheros PB44 reference board */ - ATH79_MACH_UBNT_XM, /* Ubiquiti Networks XM board rev 1.0 */ -}; - -#endif /* _ATH79_MACHTYPE_H */ diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c deleted file mode 100644 index b816cb4a25ff..000000000000 --- a/arch/mips/ath79/pci.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Atheros AR71XX/AR724X specific PCI setup code - * - * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com> - * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * Parts of this file are based on Atheros' 2.6.15 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/resource.h> -#include <linux/platform_device.h> -#include <asm/mach-ath79/ar71xx_regs.h> -#include <asm/mach-ath79/ath79.h> -#include <asm/mach-ath79/irq.h> -#include "pci.h" - -static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev); -static const struct ath79_pci_irq *ath79_pci_irq_map; -static unsigned ath79_pci_nr_irqs; - -static const struct ath79_pci_irq ar71xx_pci_irq_map[] = { - { - .slot = 17, - .pin = 1, - .irq = ATH79_PCI_IRQ(0), - }, { - .slot = 18, - .pin = 1, - .irq = ATH79_PCI_IRQ(1), - }, { - .slot = 19, - .pin = 1, - .irq = ATH79_PCI_IRQ(2), - } -}; - -static const struct ath79_pci_irq ar724x_pci_irq_map[] = { - { - .slot = 0, - .pin = 1, - .irq = ATH79_PCI_IRQ(0), - } -}; - -static const struct ath79_pci_irq qca955x_pci_irq_map[] = { - { - .bus = 0, - .slot = 0, - .pin = 1, - .irq = ATH79_PCI_IRQ(0), - }, - { - .bus = 1, - .slot = 0, - .pin = 1, - .irq = ATH79_PCI_IRQ(1), - }, -}; - -int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) -{ - int irq = -1; - int i; - - if (ath79_pci_nr_irqs == 0 || - ath79_pci_irq_map == NULL) { - if (soc_is_ar71xx()) { - ath79_pci_irq_map = ar71xx_pci_irq_map; - ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map); - } else if (soc_is_ar724x() || - soc_is_ar9342() || - soc_is_ar9344()) { - ath79_pci_irq_map = ar724x_pci_irq_map; - ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map); - } else if (soc_is_qca955x()) { - ath79_pci_irq_map = qca955x_pci_irq_map; - ath79_pci_nr_irqs = ARRAY_SIZE(qca955x_pci_irq_map); - } else { - pr_crit("pci %s: invalid irq map\n", - pci_name((struct pci_dev *) dev)); - return irq; - } - } - - for (i = 0; i < ath79_pci_nr_irqs; i++) { - const struct ath79_pci_irq *entry; - - entry = &ath79_pci_irq_map[i]; - if (entry->bus == dev->bus->number && - entry->slot == slot && - entry->pin == pin) { - irq = entry->irq; - break; - } - } - - if (irq < 0) - pr_crit("pci %s: no irq found for pin %u\n", - pci_name((struct pci_dev *) dev), pin); - else - pr_info("pci %s: using irq %d for pin %u\n", - pci_name((struct pci_dev *) dev), irq, pin); - - return irq; -} - -int pcibios_plat_dev_init(struct pci_dev *dev) -{ - if (ath79_pci_plat_dev_init) - return ath79_pci_plat_dev_init(dev); - - return 0; -} - -void __init ath79_pci_set_irq_map(unsigned nr_irqs, - const struct ath79_pci_irq *map) -{ - ath79_pci_nr_irqs = nr_irqs; - ath79_pci_irq_map = map; -} - -void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev)) -{ - ath79_pci_plat_dev_init = func; -} - -static struct platform_device * -ath79_register_pci_ar71xx(void) -{ - struct platform_device *pdev; - struct resource res[4]; - - memset(res, 0, sizeof(res)); - - res[0].name = "cfg_base"; - res[0].flags = IORESOURCE_MEM; - res[0].start = AR71XX_PCI_CFG_BASE; - res[0].end = AR71XX_PCI_CFG_BASE + AR71XX_PCI_CFG_SIZE - 1; - - res[1].flags = IORESOURCE_IRQ; - res[1].start = ATH79_CPU_IRQ(2); - res[1].end = ATH79_CPU_IRQ(2); - - res[2].name = "io_base"; - res[2].flags = IORESOURCE_IO; - res[2].start = 0; - res[2].end = 0; - - res[3].name = "mem_base"; - res[3].flags = IORESOURCE_MEM; - res[3].start = AR71XX_PCI_MEM_BASE; - res[3].end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1; - - pdev = platform_device_register_simple("ar71xx-pci", -1, - res, ARRAY_SIZE(res)); - return pdev; -} - -static struct platform_device * -ath79_register_pci_ar724x(int id, - unsigned long cfg_base, - unsigned long ctrl_base, - unsigned long crp_base, - unsigned long mem_base, - unsigned long mem_size, - unsigned long io_base, - int irq) -{ - struct platform_device *pdev; - struct resource res[6]; - - memset(res, 0, sizeof(res)); - - res[0].name = "cfg_base"; - res[0].flags = IORESOURCE_MEM; - res[0].start = cfg_base; - res[0].end = cfg_base + AR724X_PCI_CFG_SIZE - 1; - - res[1].name = "ctrl_base"; - res[1].flags = IORESOURCE_MEM; - res[1].start = ctrl_base; - res[1].end = ctrl_base + AR724X_PCI_CTRL_SIZE - 1; - - res[2].flags = IORESOURCE_IRQ; - res[2].start = irq; - res[2].end = irq; - - res[3].name = "mem_base"; - res[3].flags = IORESOURCE_MEM; - res[3].start = mem_base; - res[3].end = mem_base + mem_size - 1; - - res[4].name = "io_base"; - res[4].flags = IORESOURCE_IO; - res[4].start = io_base; - res[4].end = io_base; - - res[5].name = "crp_base"; - res[5].flags = IORESOURCE_MEM; - res[5].start = crp_base; - res[5].end = crp_base + AR724X_PCI_CRP_SIZE - 1; - - pdev = platform_device_register_simple("ar724x-pci", id, - res, ARRAY_SIZE(res)); - return pdev; -} - -int __init ath79_register_pci(void) -{ - struct platform_device *pdev = NULL; - - if (soc_is_ar71xx()) { - pdev = ath79_register_pci_ar71xx(); - } else if (soc_is_ar724x()) { - pdev = ath79_register_pci_ar724x(-1, - AR724X_PCI_CFG_BASE, - AR724X_PCI_CTRL_BASE, - AR724X_PCI_CRP_BASE, - AR724X_PCI_MEM_BASE, - AR724X_PCI_MEM_SIZE, - 0, - ATH79_CPU_IRQ(2)); - } else if (soc_is_ar9342() || - soc_is_ar9344()) { - u32 bootstrap; - - bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP); - if ((bootstrap & AR934X_BOOTSTRAP_PCIE_RC) == 0) - return -ENODEV; - - pdev = ath79_register_pci_ar724x(-1, - AR724X_PCI_CFG_BASE, - AR724X_PCI_CTRL_BASE, - AR724X_PCI_CRP_BASE, - AR724X_PCI_MEM_BASE, - AR724X_PCI_MEM_SIZE, - 0, - ATH79_IP2_IRQ(0)); - } else if (soc_is_qca9558()) { - pdev = ath79_register_pci_ar724x(0, - QCA955X_PCI_CFG_BASE0, - QCA955X_PCI_CTRL_BASE0, - QCA955X_PCI_CRP_BASE0, - QCA955X_PCI_MEM_BASE0, - QCA955X_PCI_MEM_SIZE, - 0, - ATH79_IP2_IRQ(0)); - - pdev = ath79_register_pci_ar724x(1, - QCA955X_PCI_CFG_BASE1, - QCA955X_PCI_CTRL_BASE1, - QCA955X_PCI_CRP_BASE1, - QCA955X_PCI_MEM_BASE1, - QCA955X_PCI_MEM_SIZE, - 1, - ATH79_IP3_IRQ(2)); - } else { - /* No PCI support */ - return -ENODEV; - } - - if (!pdev) - pr_err("unable to register PCI controller device\n"); - - return pdev ? 0 : -ENODEV; -} diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h deleted file mode 100644 index 1d00a3803c37..000000000000 --- a/arch/mips/ath79/pci.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Atheros AR71XX/AR724X PCI support - * - * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com> - * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> - * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_PCI_H -#define _ATH79_PCI_H - -struct ath79_pci_irq { - int bus; - u8 slot; - u8 pin; - int irq; -}; - -#ifdef CONFIG_PCI -void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map); -void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev)); -int ath79_register_pci(void); -#else -static inline void -ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {} -static inline void -ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {} -static inline int ath79_register_pci(void) { return 0; } -#endif - -#endif /* _ATH79_PCI_H */ diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 9728abcb18fa..4a70c5de8c92 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -19,6 +19,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/of_fdt.h> +#include <linux/irqchip.h> #include <asm/bootinfo.h> #include <asm/idle.h> @@ -31,8 +32,6 @@ #include <asm/mach-ath79/ath79.h> #include <asm/mach-ath79/ar71xx_regs.h> #include "common.h" -#include "dev-common.h" -#include "machtypes.h" #define ATH79_SYS_TYPE_LEN 64 @@ -235,25 +234,21 @@ void __init plat_mem_setup(void) else if (fw_passed_dtb) __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb)); - if (mips_machtype != ATH79_MACH_GENERIC_OF) { - ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE, - AR71XX_RESET_SIZE); - ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, - AR71XX_PLL_SIZE); - ath79_detect_sys_type(); - ath79_ddr_ctrl_init(); + ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE, + AR71XX_RESET_SIZE); + ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, + AR71XX_PLL_SIZE); + ath79_detect_sys_type(); + ath79_ddr_ctrl_init(); - detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); - - /* OF machines should use the reset driver */ - _machine_restart = ath79_restart; - } + detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); + _machine_restart = ath79_restart; _machine_halt = ath79_halt; pm_power_off = ath79_halt; } -static void __init ath79_of_plat_time_init(void) +void __init plat_time_init(void) { struct device_node *np; struct clk *clk; @@ -283,61 +278,12 @@ static void __init ath79_of_plat_time_init(void) clk_put(clk); } -void __init plat_time_init(void) -{ - unsigned long cpu_clk_rate; - unsigned long ahb_clk_rate; - unsigned long ddr_clk_rate; - unsigned long ref_clk_rate; - - if (IS_ENABLED(CONFIG_OF) && mips_machtype == ATH79_MACH_GENERIC_OF) { - ath79_of_plat_time_init(); - return; - } - - ath79_clocks_init(); - - cpu_clk_rate = ath79_get_sys_clk_rate("cpu"); - ahb_clk_rate = ath79_get_sys_clk_rate("ahb"); - ddr_clk_rate = ath79_get_sys_clk_rate("ddr"); - ref_clk_rate = ath79_get_sys_clk_rate("ref"); - - pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n", - cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000, - ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000, - ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000, - ref_clk_rate / 1000000, (ref_clk_rate / 1000) % 1000); - - mips_hpt_frequency = cpu_clk_rate / 2; -} - -static int __init ath79_setup(void) +void __init arch_init_irq(void) { - if (mips_machtype == ATH79_MACH_GENERIC_OF) - return 0; - - ath79_gpio_init(); - ath79_register_uart(); - ath79_register_wdt(); - - mips_machine_setup(); - - return 0; + irqchip_init(); } -arch_initcall(ath79_setup); - void __init device_tree_init(void) { unflatten_and_copy_device_tree(); } - -MIPS_MACHINE(ATH79_MACH_GENERIC, - "Generic", - "Generic AR71XX/AR724X/AR913X based board", - NULL); - -MIPS_MACHINE(ATH79_MACH_GENERIC_OF, - "DTB", - "Generic AR71XX/AR724X/AR913X based board (DT)", - NULL); diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c index 977990a609ba..67b6a78d670b 100644 --- a/arch/mips/bcm47xx/buttons.c +++ b/arch/mips/bcm47xx/buttons.c @@ -147,7 +147,7 @@ bcm47xx_buttons_buffalo_whr_g125[] __initconst = { static const struct gpio_keys_button bcm47xx_buttons_buffalo_whr_g54s[] __initconst = { BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON), - BCM47XX_GPIO_KEY(4, KEY_RESTART), + BCM47XX_GPIO_KEY_H(4, KEY_RESTART), BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */ }; diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index d85fcdac8bf0..167c42c71e79 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -152,11 +152,11 @@ bcm47xx_leds_buffalo_whr_g125[] __initconst = { static const struct gpio_led bcm47xx_leds_buffalo_whr_g54s[] __initconst = { - BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(1, "green", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(2, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(3, "green", "internal", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(6, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(7, "red", "diag", 1, LEDS_GPIO_DEFSTATE_OFF), }; static const struct gpio_led diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index fe3773539eff..82627c264964 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -274,7 +274,7 @@ static int __init bcm47xx_register_bus_complete(void) bcm47xx_leds_register(); bcm47xx_workarounds(); - fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status, -1); + fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status); return 0; } device_initcall(bcm47xx_register_bus_complete); diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 07b4c65a88a4..8e73d65f3480 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = { static int shared_device_registered; +static u64 enet_dmamask = DMA_BIT_MASK(32); + static struct resource enet0_res[] = { { .start = -1, /* filled at runtime */ @@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = { .resource = enet0_res, .dev = { .platform_data = &enet0_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = { .resource = enet1_res, .dev = { .platform_data = &enet1_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = { .resource = enetsw_res, .dev = { .platform_data = &enetsw_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts index 0fa3dd1819ff..dda0559cef50 100644 --- a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts +++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts @@ -180,14 +180,28 @@ ethernet@0 { phy-handle = <&phy2>; cavium,alt-phy-handle = <&phy100>; + rx-delay = <0>; + tx-delay = <0>; + fixed-link { + speed = <1000>; + full-duplex; + }; }; ethernet@1 { phy-handle = <&phy3>; cavium,alt-phy-handle = <&phy101>; + rx-delay = <0>; + tx-delay = <0>; + fixed-link { + speed = <1000>; + full-duplex; + }; }; ethernet@2 { phy-handle = <&phy4>; cavium,alt-phy-handle = <&phy102>; + rx-delay = <0>; + tx-delay = <0>; }; ethernet@3 { compatible = "cavium,octeon-3860-pip-port"; diff --git a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts index 243e5dc444fb..962f37fbc7db 100644 --- a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts +++ b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts @@ -33,12 +33,18 @@ interface@0 { ethernet@0 { phy-handle = <&phy7>; + rx-delay = <0>; + tx-delay = <0x10>; }; ethernet@1 { phy-handle = <&phy6>; + rx-delay = <0>; + tx-delay = <0x10>; }; ethernet@2 { phy-handle = <&phy5>; + rx-delay = <0>; + tx-delay = <0x10>; }; }; }; diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 50cff3cbcc6d..4f7b1fa31cf5 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -76,7 +76,7 @@ status = "okay"; pinctrl-names = "default"; - pinctrl-0 = <&pins_uart2>; + pinctrl-0 = <&pins_uart3>; }; &uart4 { @@ -196,9 +196,9 @@ bias-disable; }; - pins_uart2: uart2 { - function = "uart2"; - groups = "uart2-data", "uart2-hwflow"; + pins_uart3: uart3 { + function = "uart3"; + groups = "uart3-data", "uart3-hwflow"; bias-disable; }; diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 6fb16fd24035..2beb78a62b7d 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi @@ -161,7 +161,7 @@ #dma-cells = <2>; interrupt-parent = <&intc>; - interrupts = <29>; + interrupts = <20>; clocks = <&cgu JZ4740_CLK_DMA>; diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts index 2152b7ba65fb..cc8dbea0911f 100644 --- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts +++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts @@ -90,11 +90,11 @@ interrupts = <0>; }; - axi_i2c: i2c@10A00000 { + axi_i2c: i2c@10a00000 { compatible = "xlnx,xps-iic-2.00.a"; interrupt-parent = <&axi_intc>; interrupts = <4>; - reg = < 0x10A00000 0x10000 >; + reg = < 0x10a00000 0x10000 >; clocks = <&ext>; xlnx,clk-freq = <0x5f5e100>; xlnx,family = "Artix7"; @@ -106,9 +106,9 @@ #address-cells = <1>; #size-cells = <0>; - ad7420@4B { + ad7420@4b { compatible = "adi,adt7420"; - reg = <0x4B>; + reg = <0x4b>; }; } ; }; diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index ab8362e04461..2e2d45bc850d 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -31,6 +31,7 @@ * network ports from the rest of the cvmx-helper files. */ +#include <linux/bug.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-bootinfo.h> @@ -210,56 +211,18 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port) { cvmx_helper_link_info_t result; + WARN(!octeon_is_simulation(), + "Using deprecated link status - please update your DT"); + /* Unless we fix it later, all links are defaulted to down */ result.u64 = 0; - /* - * This switch statement should handle all ports that either don't use - * Marvell PHYS, or don't support in-band status. - */ - switch (cvmx_sysinfo_get()->board_type) { - case CVMX_BOARD_TYPE_SIM: + if (octeon_is_simulation()) { /* The simulator gives you a simulated 1Gbps full duplex link */ result.s.link_up = 1; result.s.full_duplex = 1; result.s.speed = 1000; return result; - case CVMX_BOARD_TYPE_EBH3100: - case CVMX_BOARD_TYPE_CN3010_EVB_HS5: - case CVMX_BOARD_TYPE_CN3005_EVB_HS5: - case CVMX_BOARD_TYPE_CN3020_EVB_HS5: - /* Port 1 on these boards is always Gigabit */ - if (ipd_port == 1) { - result.s.link_up = 1; - result.s.full_duplex = 1; - result.s.speed = 1000; - return result; - } - /* Fall through to the generic code below */ - break; - case CVMX_BOARD_TYPE_CUST_NB5: - /* Port 1 on these boards is always Gigabit */ - if (ipd_port == 1) { - result.s.link_up = 1; - result.s.full_duplex = 1; - result.s.speed = 1000; - return result; - } - break; - case CVMX_BOARD_TYPE_BBGW_REF: - /* Port 1 on these boards is always Gigabit */ - if (ipd_port == 2) { - /* Port 2 is not hooked up */ - result.u64 = 0; - return result; - } else { - /* Ports 0 and 1 connect to the switch */ - result.s.link_up = 1; - result.s.full_duplex = 1; - result.s.speed = 1000; - return result; - } - break; } if (OCTEON_IS_MODEL(OCTEON_CN3XXX) @@ -358,45 +321,6 @@ int __cvmx_helper_board_interface_probe(int interface, int supported_ports) } /** - * Enable packet input/output from the hardware. This function is - * called after by cvmx_helper_packet_hardware_enable() to - * perform board specific initialization. For most boards - * nothing is needed. - * - * @interface: Interface to enable - * - * Returns Zero on success, negative on failure - */ -int __cvmx_helper_board_hardware_enable(int interface) -{ - if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) { - if (interface == 0) { - /* Different config for switch port */ - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0); - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0); - /* - * Boards with gigabit WAN ports need a - * different setting that is compatible with - * 100 Mbit settings - */ - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), - 0xc); - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), - 0xc); - } - } else if (cvmx_sysinfo_get()->board_type == - CVMX_BOARD_TYPE_UBNT_E100) { - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0); - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10); - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0); - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0x10); - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(2, interface), 0); - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(2, interface), 0x10); - } - return 0; -} - -/** * Get the clock type used for the USB block based on board type. * Used by the USB code for auto configuration of clock type. * diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 38e0444e57e8..de391541d6f7 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -30,6 +30,7 @@ * Helper functions for common, but complicated tasks. * */ +#include <linux/bug.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-config.h> @@ -43,7 +44,6 @@ #include <asm/octeon/cvmx-helper-board.h> #include <asm/octeon/cvmx-pip-defs.h> -#include <asm/octeon/cvmx-smix-defs.h> #include <asm/octeon/cvmx-asxx-defs.h> /* Port count per interface */ @@ -317,22 +317,6 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface) return CVMX_HELPER_INTERFACE_MODE_DISABLED; } - if (interface == 0 - && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5 - && cvmx_sysinfo_get()->board_rev_major == 1) { - /* - * Lie about interface type of CN3005 board. This - * board has a switch on port 1 like the other - * evaluation boards, but it is connected over RGMII - * instead of GMII. Report GMII mode so that the - * speed is forced to 1 Gbit full duplex. Other than - * some initial configuration (which does not use the - * output of this function) there is no difference in - * setup between GMII and RGMII modes. - */ - return CVMX_HELPER_INTERFACE_MODE_GMII; - } - /* Interface 1 is always disabled on CN31XX and CN30XX */ if ((interface == 1) && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) @@ -778,7 +762,6 @@ static int __cvmx_helper_packet_hardware_enable(int interface) result = __cvmx_helper_loop_enable(interface); break; } - result |= __cvmx_helper_board_hardware_enable(interface); return result; } @@ -1026,7 +1009,6 @@ int cvmx_helper_initialize_packet_io_global(void) int result = 0; int interface; union cvmx_l2c_cfg l2c_cfg; - union cvmx_smix_en smix_en; const int num_interfaces = cvmx_helper_get_number_of_interfaces(); /* @@ -1046,24 +1028,6 @@ int cvmx_helper_initialize_packet_io_global(void) l2c_cfg.s.rfb_arb_mode = 0; cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64); - /* Make sure SMI/MDIO is enabled so we can query PHYs */ - smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0)); - if (!smix_en.s.en) { - smix_en.s.en = 1; - cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64); - } - - /* Newer chips actually have two SMI/MDIO interfaces */ - if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && - !OCTEON_IS_MODEL(OCTEON_CN58XX) && - !OCTEON_IS_MODEL(OCTEON_CN50XX)) { - smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1)); - if (!smix_en.s.en) { - smix_en.s.en = 1; - cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64); - } - } - cvmx_pko_initialize_global(); for (interface = 0; interface < num_interfaces; interface++) { result |= cvmx_helper_interface_probe(interface); @@ -1136,6 +1100,7 @@ cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port) if (index == 0) result = __cvmx_helper_rgmii_link_get(ipd_port); else { + WARN(1, "Using deprecated link status - please update your DT"); result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 1000; diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c index 2d68a39f1443..13f6c7716b1e 100644 --- a/arch/mips/cavium-octeon/oct_ilm.c +++ b/arch/mips/cavium-octeon/oct_ilm.c @@ -63,31 +63,11 @@ static int reset_statistics(void *data, u64 value) DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); -static int init_debufs(void) +static void init_debugfs(void) { - struct dentry *show_dentry; dir = debugfs_create_dir("oct_ilm", 0); - if (!dir) { - pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n"); - return -1; - } - - show_dentry = debugfs_create_file("statistics", 0222, dir, NULL, - &oct_ilm_ops); - if (!show_dentry) { - pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n"); - return -1; - } - - show_dentry = debugfs_create_file("reset", 0222, dir, NULL, - &reset_statistics_ops); - if (!show_dentry) { - pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n"); - return -1; - } - - return 0; - + debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_ops); + debugfs_create_file("reset", 0222, dir, NULL, &reset_statistics_ops); } static void init_latency_info(struct latency_info *li, int startup) @@ -169,11 +149,7 @@ static __init int oct_ilm_module_init(void) int rc; int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM; - rc = init_debufs(); - if (rc) { - WARN(1, "Could not create debugfs entries"); - return rc; - } + init_debugfs(); rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD, "oct_ilm", 0); diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 1f9ba60f7375..51685f893eab 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -458,6 +458,23 @@ static bool __init octeon_has_88e1145(void) !OCTEON_IS_MODEL(OCTEON_CN56XX); } +static bool __init octeon_has_fixed_link(int ipd_port) +{ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + case CVMX_BOARD_TYPE_CN3010_EVB_HS5: + case CVMX_BOARD_TYPE_CN3020_EVB_HS5: + case CVMX_BOARD_TYPE_CUST_NB5: + case CVMX_BOARD_TYPE_EBH3100: + /* Port 1 on these boards is always gigabit. */ + return ipd_port == 1; + case CVMX_BOARD_TYPE_BBGW_REF: + /* Ports 0 and 1 connect to the switch. */ + return ipd_port == 0 || ipd_port == 1; + } + return false; +} + static void __init octeon_fdt_set_phy(int eth, int phy_addr) { const __be32 *phy_handle; @@ -586,12 +603,52 @@ static void __init octeon_fdt_rm_ethernet(int node) fdt_nop_node(initial_boot_params, node); } +static void __init _octeon_rx_tx_delay(int eth, int rx_delay, int tx_delay) +{ + fdt_setprop_inplace_cell(initial_boot_params, eth, "rx-delay", + rx_delay); + fdt_setprop_inplace_cell(initial_boot_params, eth, "tx-delay", + tx_delay); +} + +static void __init octeon_rx_tx_delay(int eth, int iface, int port) +{ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + if (iface == 0) { + if (port == 0) { + /* + * Boards with gigabit WAN ports need a + * different setting that is compatible with + * 100 Mbit settings + */ + _octeon_rx_tx_delay(eth, 0xc, 0x0c); + return; + } else if (port == 1) { + /* Different config for switch port. */ + _octeon_rx_tx_delay(eth, 0x0, 0x0); + return; + } + } + break; + case CVMX_BOARD_TYPE_UBNT_E100: + if (iface == 0 && port <= 2) { + _octeon_rx_tx_delay(eth, 0x0, 0x10); + return; + } + break; + } + fdt_nop_property(initial_boot_params, eth, "rx-delay"); + fdt_nop_property(initial_boot_params, eth, "tx-delay"); +} + static void __init octeon_fdt_pip_port(int iface, int i, int p, int max) { char name_buffer[20]; int eth; int phy_addr; int ipd_port; + int fixed_link; snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p); eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer); @@ -609,6 +666,13 @@ static void __init octeon_fdt_pip_port(int iface, int i, int p, int max) phy_addr = cvmx_helper_board_get_mii_address(ipd_port); octeon_fdt_set_phy(eth, phy_addr); + + fixed_link = fdt_subnode_offset(initial_boot_params, eth, "fixed-link"); + if (fixed_link < 0) + WARN_ON(octeon_has_fixed_link(ipd_port)); + else if (!octeon_has_fixed_link(ipd_port)) + fdt_nop_node(initial_boot_params, fixed_link); + octeon_rx_tx_delay(eth, i, p); } static void __init octeon_fdt_pip_iface(int pip, int idx) diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig index c3cac29e8414..2bb02ea9fb4e 100644 --- a/arch/mips/configs/xway_defconfig +++ b/arch/mips/configs/xway_defconfig @@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_LANTIQ=y CONFIG_PCI_LANTIQ=y -CONFIG_XRX200_PHY_FW=y CONFIG_CPU_MIPS32_R2=y CONFIG_MIPS_VPE_LOADER=y CONFIG_NR_CPUS=2 diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index f15d5db5dd67..87b86cdf126a 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -3,7 +3,6 @@ generated-y += syscall_table_32_o32.h generated-y += syscall_table_64_n32.h generated-y += syscall_table_64_n64.h generated-y += syscall_table_64_o32.h -generic-(CONFIG_GENERIC_CSUM) += checksum.h generic-y += current.h generic-y += device.h generic-y += dma-contiguous.h diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 43fcd35e2957..94096299fc56 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index a5eb1bb199a7..b865e317a14f 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -105,6 +105,20 @@ */ #define STYPE_SYNC_MB 0x10 +/* + * stype 0x14 - A completion barrier specific to global invalidations + * + * When a sync instruction of this type completes any preceding GINVI or GINVT + * operation has been globalized & completed on all coherent CPUs. Anything + * that the GINV* instruction should invalidate will have been invalidated on + * all coherent CPUs when this instruction completes. It is implementation + * specific whether the GINV* instructions themselves will ensure completion, + * or this sync type will. + * + * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3) + * this sync type also requires that previous SYNCI operations have completed. + */ +#define STYPE_GINV 0x14 #ifdef CONFIG_CPU_HAS_SYNC #define __sync() \ @@ -222,6 +236,47 @@ #define __smp_mb__before_atomic() __smp_mb__before_llsc() #define __smp_mb__after_atomic() smp_llsc_mb() +/* + * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, + * store or pref) in between an ll & sc can cause the sc instruction to + * erroneously succeed, breaking atomicity. Whilst it's unusual to write code + * containing such sequences, this bug bites harder than we might otherwise + * expect due to reordering & speculation: + * + * 1) A memory access appearing prior to the ll in program order may actually + * be executed after the ll - this is the reordering case. + * + * In order to avoid this we need to place a memory barrier (ie. a sync + * instruction) prior to every ll instruction, in between it & any earlier + * memory access instructions. Many of these cases are already covered by + * smp_mb__before_llsc() but for the remaining cases, typically ones in + * which multiple CPUs may operate on a memory location but ordering is not + * usually guaranteed, we use loongson_llsc_mb() below. + * + * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. + * + * 2) If a conditional branch exists between an ll & sc with a target outside + * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() + * or similar, then misprediction of the branch may allow speculative + * execution of memory accesses from outside of the ll-sc loop. + * + * In order to avoid this we need a memory barrier (ie. a sync instruction) + * at each affected branch target, for which we also use loongson_llsc_mb() + * defined below. + * + * This case affects all current Loongson 3 CPUs. + */ +#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ +#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") +#else +#define loongson_llsc_mb() do { } while (0) +#endif + +static inline void sync_ginv(void) +{ + asm volatile("sync\t%0" :: "i"(STYPE_GINV)); +} + #include <asm-generic/barrier.h> #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index c4675957b21b..830c93a010c3 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { + loongson_llsc_mb(); do { __asm__ __volatile__( " " __LL "%0, %1 # set_bit \n" @@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" @@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) : "ir" (~(1UL << bit))); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { + loongson_llsc_mb(); do { __asm__ __volatile__( " " __LL "%0, %1 # clear_bit \n" @@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" @@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 4812d1fed0c2..d687b40b9fbb 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -25,7 +25,6 @@ * * MIPS specific flush operations: * - * - flush_cache_sigtramp() flush signal trampoline * - flush_icache_all() flush the entire instruction cache * - flush_data_cache_page() flushes a page from the data cache * - __flush_icache_user_range(start, end) flushes range of user instructions @@ -110,7 +109,6 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); -extern void (*flush_cache_sigtramp)(unsigned long addr); extern void (*flush_icache_all)(void); extern void (*local_flush_data_cache_page)(void * addr); extern void (*flush_data_cache_page)(unsigned long addr); diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 638de0c25249..f345a873742d 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -36,6 +36,8 @@ */ extern unsigned long __cmpxchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for cmpxchg"); +extern unsigned long __cmpxchg64_unsupported(void) + __compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false"); extern unsigned long __xchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for xchg"); @@ -204,12 +206,102 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, cmpxchg((ptr), (o), (n)); \ }) #else -#include <asm-generic/cmpxchg-local.h> -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#ifndef CONFIG_SMP -#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) -#endif -#endif + +# include <asm-generic/cmpxchg-local.h> +# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +# ifdef CONFIG_SMP + +static inline unsigned long __cmpxchg64(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long tmp, ret; + unsigned long flags; + + /* + * The assembly below has to combine 32 bit values into a 64 bit + * register, and split 64 bit values from one register into two. If we + * were to take an interrupt in the middle of this we'd only save the + * least significant 32 bits of each register & probably clobber the + * most significant 32 bits of the 64 bit values we're using. In order + * to avoid this we must disable interrupts. + */ + local_irq_save(flags); + + asm volatile( + " .set push \n" + " .set " MIPS_ISA_ARCH_LEVEL " \n" + /* Load 64 bits from ptr */ + "1: lld %L0, %3 # __cmpxchg64 \n" + /* + * Split the 64 bit value we loaded into the 2 registers that hold the + * ret variable. + */ + " dsra %M0, %L0, 32 \n" + " sll %L0, %L0, 0 \n" + /* + * Compare ret against old, breaking out of the loop if they don't + * match. + */ + " bne %M0, %M4, 2f \n" + " bne %L0, %L4, 2f \n" + /* + * Combine the 32 bit halves from the 2 registers that hold the new + * variable into a single 64 bit register. + */ +# if MIPS_ISA_REV >= 2 + " move %L1, %L5 \n" + " dins %L1, %M5, 32, 32 \n" +# else + " dsll %L1, %L5, 32 \n" + " dsrl %L1, %L1, 32 \n" + " .set noat \n" + " dsll $at, %M5, 32 \n" + " or %L1, %L1, $at \n" + " .set at \n" +# endif + /* Attempt to store new at ptr */ + " scd %L1, %2 \n" + /* If we failed, loop! */ + "\t" __scbeqz " %L1, 1b \n" + " .set pop \n" + "2: \n" + : "=&r"(ret), + "=&r"(tmp), + "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr) + : GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr), + "r" (old), + "r" (new) + : "memory"); + + local_irq_restore(flags); + return ret; +} + +# define cmpxchg64(ptr, o, n) ({ \ + unsigned long long __old = (__typeof__(*(ptr)))(o); \ + unsigned long long __new = (__typeof__(*(ptr)))(n); \ + __typeof__(*(ptr)) __res; \ + \ + /* \ + * We can only use cmpxchg64 if we know that the CPU supports \ + * 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported \ + * will cause a build error unless cpu_has_64bits is a \ + * compile-time constant 1. \ + */ \ + if (cpu_has_64bits && kernel_uses_llsc) \ + __res = __cmpxchg64((ptr), __old, __new); \ + else \ + __res = __cmpxchg64_unsupported(); \ + \ + __res; \ +}) + +# else /* !CONFIG_SMP */ +# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) +# endif /* !CONFIG_SMP */ +#endif /* !CONFIG_64BIT */ #undef __scbeqz diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 701e525641b8..6998a9796499 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -591,6 +591,19 @@ #endif /* CONFIG_MIPS_MT_SMP */ /* + * We only enable MMID support for configurations which natively support 64 bit + * atomics because getting good performance from the allocator relies upon + * efficient atomic64_*() functions. + */ +#ifndef cpu_has_mmid +# ifdef CONFIG_GENERIC_ATOMIC64 +# define cpu_has_mmid 0 +# else +# define cpu_has_mmid __isa_ge_and_opt(6, MIPS_CPU_MMID) +# endif +#endif + +/* * Guest capabilities */ #ifndef cpu_guest_has_conf1 diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 532b49b1dbb3..6ad7d3cabd91 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -422,6 +422,7 @@ enum cpu_type_enum { MBIT_ULL(55) /* CPU shares FTLB entries with another */ #define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \ MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */ +#define MIPS_CPU_MMID MBIT_ULL(57) /* CPU supports MemoryMapIDs */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index c14d798f3888..b83b0397462d 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -50,6 +50,7 @@ "i" (-EFAULT) \ : "memory"); \ } else if (cpu_has_llsc) { \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ @@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { + loongson_llsc_mb(); __asm__ __volatile__( "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" @@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); + loongson_llsc_mb(); } else return -ENOSYS; diff --git a/arch/mips/include/asm/ginvt.h b/arch/mips/include/asm/ginvt.h new file mode 100644 index 000000000000..49c6dbe37338 --- /dev/null +++ b/arch/mips/include/asm/ginvt.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MIPS_ASM_GINVT_H__ +#define __MIPS_ASM_GINVT_H__ + +#include <asm/mipsregs.h> + +enum ginvt_type { + GINVT_FULL, + GINVT_VA, + GINVT_MMID, +}; + +#ifdef TOOLCHAIN_SUPPORTS_GINV +# define _ASM_SET_GINV ".set ginv\n" +#else +_ASM_MACRO_1R1I(ginvt, rs, type, + _ASM_INSN_IF_MIPS(0x7c0000bd | (__rs << 21) | (\\type << 8)) + _ASM_INSN32_IF_MM(0x0000717c | (__rs << 16) | (\\type << 9))); +# define _ASM_SET_GINV +#endif + +static inline void ginvt(unsigned long addr, enum ginvt_type type) +{ + asm volatile( + ".set push\n" + _ASM_SET_GINV + " ginvt %0, %1\n" + ".set pop" + : /* no outputs */ + : "r"(addr), "i"(type) + : "memory"); +} + +static inline void ginvt_full(void) +{ + ginvt(0, GINVT_FULL); +} + +static inline void ginvt_va(unsigned long addr) +{ + addr &= PAGE_MASK << 1; + ginvt(addr, GINVT_VA); +} + +static inline void ginvt_mmid(void) +{ + ginvt(0, GINVT_MMID); +} + +static inline void ginvt_va_mmid(unsigned long addr) +{ + addr &= PAGE_MASK << 1; + ginvt(addr, GINVT_VA | GINVT_MMID); +} + +#endif /* __MIPS_ASM_GINVT_H__ */ diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 9d3610be2323..f0b862a83816 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -41,7 +41,7 @@ static inline unsigned long arch_local_irq_save(void) " .set push \n" " .set reorder \n" " .set noat \n" -#if defined(CONFIG_CPU_LOONGSON3) +#if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1) " mfc0 %[flags], $12 \n" " di \n" #else diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h index 73dcd63b8243..47e8827e9564 100644 --- a/arch/mips/include/asm/mach-ath79/ath79.h +++ b/arch/mips/include/asm/mach-ath79/ath79.h @@ -178,8 +178,4 @@ static inline u32 ath79_reset_rr(unsigned reg) void ath79_device_reset_set(u32 mask); void ath79_device_reset_clear(u32 mask); -void ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3); -void ath79_misc_irq_init(void __iomem *regs, int irq, - int irq_base, bool is_ar71xx); - #endif /* __ASM_MACH_ATH79_H */ diff --git a/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h b/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h deleted file mode 100644 index aa71216edf99..000000000000 --- a/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller - * - * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_SPI_PLATFORM_H -#define _ATH79_SPI_PLATFORM_H - -struct ath79_spi_platform_data { - unsigned bus_num; - unsigned num_chipselect; -}; - -#endif /* _ATH79_SPI_PLATFORM_H */ diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h index b0b7261ff3ad..fd91c58aaf7d 100644 --- a/arch/mips/include/asm/mach-ip27/irq.h +++ b/arch/mips/include/asm/mach-ip27/irq.h @@ -10,13 +10,15 @@ #ifndef __ASM_MACH_IP27_IRQ_H #define __ASM_MACH_IP27_IRQ_H -/* - * A hardwired interrupt number is completely stupid for this system - a - * large configuration might have thousands if not tenthousands of - * interrupts. - */ #define NR_IRQS 256 #include_next <irq.h> +#define IP27_HUB_PEND0_IRQ (MIPS_CPU_IRQ_BASE + 2) +#define IP27_HUB_PEND1_IRQ (MIPS_CPU_IRQ_BASE + 3) +#define IP27_RT_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 4) + +#define IP27_HUB_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8) +#define IP27_HUB_IRQ_COUNT 128 + #endif /* __ASM_MACH_IP27_IRQ_H */ diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h index 2ed3094dee07..1cd6a23a84f2 100644 --- a/arch/mips/include/asm/mach-ip27/mmzone.h +++ b/arch/mips/include/asm/mach-ip27/mmzone.h @@ -8,20 +8,11 @@ #define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr)) -#define LEVELS_PER_SLICE 128 - -struct slice_data { - unsigned long irq_enable_mask[2]; - int level_to_irq[LEVELS_PER_SLICE]; -}; - struct hub_data { kern_vars_t kern_vars; DECLARE_BITMAP(h_bigwin_used, HUB_NUM_BIG_WINDOW); cpumask_t h_cpus; unsigned long slice_map; - unsigned long irq_alloc_mask[2]; - struct slice_data slice[2]; }; struct node_data { diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h index 8f8fa43ba095..15d1de2300fe 100644 --- a/arch/mips/include/asm/mach-loongson32/platform.h +++ b/arch/mips/include/asm/mach-loongson32/platform.h @@ -17,19 +17,15 @@ extern struct platform_device ls1x_uart_pdev; extern struct platform_device ls1x_cpufreq_pdev; -extern struct platform_device ls1x_dma_pdev; extern struct platform_device ls1x_eth0_pdev; extern struct platform_device ls1x_eth1_pdev; extern struct platform_device ls1x_ehci_pdev; extern struct platform_device ls1x_gpio0_pdev; extern struct platform_device ls1x_gpio1_pdev; -extern struct platform_device ls1x_nand_pdev; extern struct platform_device ls1x_rtc_pdev; extern struct platform_device ls1x_wdt_pdev; void __init ls1x_clk_init(void); -void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata); -void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata); void __init ls1x_rtc_set_extclk(struct platform_device *pdev); void __init ls1x_serial_set_uartclk(struct platform_device *pdev); diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 402b80af91aa..1e6966e8527e 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -667,6 +667,7 @@ #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) #define MIPS_CONF5_CA2 (_ULCAST_(1) << 14) +#define MIPS_CONF5_MI (_ULCAST_(1) << 17) #define MIPS_CONF5_CRCP (_ULCAST_(1) << 18) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) @@ -1247,6 +1248,13 @@ __asm__(".macro parse_r var r\n\t" ENC \ ".endm") +/* Instructions with 1 register operand & 1 immediate operand */ +#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \ + __asm__(".macro " #OP " " #R1 ", " #I2 "\n\t" \ + "parse_r __" #R1 ", \\" #R1 "\n\t" \ + ENC \ + ".endm") + /* Instructions with 2 register operands */ #define _ASM_MACRO_2R(OP, R1, R2, ENC) \ __asm__(".macro " #OP " " #R1 ", " #R2 "\n\t" \ @@ -1603,6 +1611,9 @@ do { \ #define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3) #define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val) +#define read_c0_memorymapid() __read_32bit_c0_register($4, 5) +#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val) + #define read_c0_pagemask() __read_32bit_c0_register($5, 0) #define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val) diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 88a108ce62c1..5df0238f639b 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -7,7 +7,11 @@ #include <linux/wait.h> typedef struct { - u64 asid[NR_CPUS]; + union { + u64 asid[NR_CPUS]; + atomic64_t mmid; + }; + void *vdso; /* lock to be held whilst modifying fp_bd_emupage_allocmap */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index a589585be21b..cddead91acd4 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -17,8 +17,10 @@ #include <linux/smp.h> #include <linux/slab.h> +#include <asm/barrier.h> #include <asm/cacheflush.h> #include <asm/dsemul.h> +#include <asm/ginvt.h> #include <asm/hazards.h> #include <asm/tlbflush.h> #include <asm-generic/mm_hooks.h> @@ -73,6 +75,19 @@ extern unsigned long pgd_current[]; #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ /* + * The ginvt instruction will invalidate wired entries when its type field + * targets anything other than the entire TLB. That means that if we were to + * allow the kernel to create wired entries with the MMID of current->active_mm + * then those wired entries could be invalidated when we later use ginvt to + * invalidate TLB entries with that MMID. + * + * In order to prevent ginvt from trashing wired entries, we reserve one MMID + * for use by the kernel when creating wired entries. This MMID will never be + * assigned to a struct mm, and we'll never target it with a ginvt instruction. + */ +#define MMID_KERNEL_WIRED 0 + +/* * All unused by hardware upper bits will be considered * as a software asid extension. */ @@ -88,7 +103,23 @@ static inline u64 asid_first_version(unsigned int cpu) return ~asid_version_mask(cpu) + 1; } -#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) +static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm) +{ + if (cpu_has_mmid) + return atomic64_read(&mm->context.mmid); + + return mm->context.asid[cpu]; +} + +static inline void set_cpu_context(unsigned int cpu, + struct mm_struct *mm, u64 ctx) +{ + if (cpu_has_mmid) + atomic64_set(&mm->context.mmid, ctx); + else + mm->context.asid[cpu] = ctx; +} + #define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define cpu_asid(cpu, mm) \ (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) @@ -97,21 +128,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } - -/* Normal, classic MIPS get_new_mmu_context */ -static inline void -get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) -{ - u64 asid = asid_cache(cpu); - - if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { - if (cpu_has_vtag_icache) - flush_icache_all(); - local_flush_tlb_all(); /* start new asid cycle */ - } - - cpu_context(cpu, mm) = asid_cache(cpu) = asid; -} +extern void get_new_mmu_context(struct mm_struct *mm); +extern void check_mmu_context(struct mm_struct *mm); +extern void check_switch_mmu_context(struct mm_struct *mm); /* * Initialize the context related info for a new mm_struct @@ -122,8 +141,12 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; - for_each_possible_cpu(i) - cpu_context(i, mm) = 0; + if (cpu_has_mmid) { + set_cpu_context(0, mm, 0); + } else { + for_each_possible_cpu(i) + set_cpu_context(i, mm, 0); + } mm->context.bd_emupage_allocmap = NULL; spin_lock_init(&mm->context.bd_emupage_lock); @@ -140,11 +163,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, local_irq_save(flags); htw_stop(); - /* Check if our ASID is of an older version and thus invalid */ - if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu)) - get_new_mmu_context(next, cpu); - write_c0_entryhi(cpu_asid(cpu, next)); - TLBMISS_HANDLER_SETUP_PGD(next->pgd); + check_switch_mmu_context(next); /* * Mark current->active_mm as not "active" anymore. @@ -166,55 +185,55 @@ static inline void destroy_context(struct mm_struct *mm) dsemul_mm_cleanup(mm); } +#define activate_mm(prev, next) switch_mm(prev, next, current) #define deactivate_mm(tsk, mm) do { } while (0) -/* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. - */ -static inline void -activate_mm(struct mm_struct *prev, struct mm_struct *next) -{ - unsigned long flags; - unsigned int cpu = smp_processor_id(); - - local_irq_save(flags); - - htw_stop(); - /* Unconditionally get a new ASID. */ - get_new_mmu_context(next, cpu); - - write_c0_entryhi(cpu_asid(cpu, next)); - TLBMISS_HANDLER_SETUP_PGD(next->pgd); - - /* mark mmu ownership change */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - htw_start(); - - local_irq_restore(flags); -} - -/* - * If mm is currently active_mm, we can't really drop it. Instead, - * we will get a new one for it. - */ static inline void -drop_mmu_context(struct mm_struct *mm, unsigned cpu) +drop_mmu_context(struct mm_struct *mm) { unsigned long flags; + unsigned int cpu; + u32 old_mmid; + u64 ctx; local_irq_save(flags); - htw_stop(); - if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { - get_new_mmu_context(mm, cpu); + cpu = smp_processor_id(); + ctx = cpu_context(cpu, mm); + + if (!ctx) { + /* no-op */ + } else if (cpu_has_mmid) { + /* + * Globally invalidating TLB entries associated with the MMID + * is pretty cheap using the GINVT instruction, so we'll do + * that rather than incur the overhead of allocating a new + * MMID. The latter would be especially difficult since MMIDs + * are global & other CPUs may be actively using ctx. + */ + htw_stop(); + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu])); + mtc0_tlbw_hazard(); + ginvt_mmid(); + sync_ginv(); + write_c0_memorymapid(old_mmid); + instruction_hazard(); + htw_start(); + } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { + /* + * mm is currently active, so we can't really drop it. + * Instead we bump the ASID. + */ + htw_stop(); + get_new_mmu_context(mm); write_c0_entryhi(cpu_asid(cpu, mm)); + htw_start(); } else { /* will get a new context next time */ - cpu_context(cpu, mm) = 0; + set_cpu_context(cpu, mm, 0); } - htw_start(); + local_irq_restore(flags); } diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h index b4d19c21b62c..d7fdcf0a0088 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper-board.h +++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h @@ -119,18 +119,6 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port); extern int __cvmx_helper_board_interface_probe(int interface, int supported_ports); -/** - * Enable packet input/output from the hardware. This function is - * called after by cvmx_helper_packet_hardware_enable() to - * perform board specific initialization. For most boards - * nothing is needed. - * - * @interface: Interface to enable - * - * Returns Zero on success, negative on failure - */ -extern int __cvmx_helper_board_hardware_enable(int interface); - enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void); #endif /* __CVMX_HELPER_BOARD_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h deleted file mode 100644 index 7a928230b0c0..000000000000 --- a/arch/mips/include/asm/octeon/cvmx-smix-defs.h +++ /dev/null @@ -1,276 +0,0 @@ -/***********************license start*************** - * Author: Cavium Networks - * - * Contact: support@caviumnetworks.com - * This file is part of the OCTEON SDK - * - * Copyright (c) 2003-2012 Cavium Networks - * - * This file is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, Version 2, as - * published by the Free Software Foundation. - * - * This file is distributed in the hope that it will be useful, but - * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or - * NONINFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this file; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * or visit http://www.gnu.org/licenses/. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium Networks for more information - ***********************license end**************************************/ - -#ifndef __CVMX_SMIX_DEFS_H__ -#define __CVMX_SMIX_DEFS_H__ - -static inline uint64_t CVMX_SMIX_CLK(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003818ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256; -} - -static inline uint64_t CVMX_SMIX_CMD(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003800ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256; -} - -static inline uint64_t CVMX_SMIX_EN(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003820ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256; -} - -static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003810ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256; -} - -static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003808ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256; -} - -union cvmx_smix_clk { - uint64_t u64; - struct cvmx_smix_clk_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_25_63:39; - uint64_t mode:1; - uint64_t reserved_21_23:3; - uint64_t sample_hi:5; - uint64_t sample_mode:1; - uint64_t reserved_14_14:1; - uint64_t clk_idle:1; - uint64_t preamble:1; - uint64_t sample:4; - uint64_t phase:8; -#else - uint64_t phase:8; - uint64_t sample:4; - uint64_t preamble:1; - uint64_t clk_idle:1; - uint64_t reserved_14_14:1; - uint64_t sample_mode:1; - uint64_t sample_hi:5; - uint64_t reserved_21_23:3; - uint64_t mode:1; - uint64_t reserved_25_63:39; -#endif - } s; - struct cvmx_smix_clk_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_21_63:43; - uint64_t sample_hi:5; - uint64_t sample_mode:1; - uint64_t reserved_14_14:1; - uint64_t clk_idle:1; - uint64_t preamble:1; - uint64_t sample:4; - uint64_t phase:8; -#else - uint64_t phase:8; - uint64_t sample:4; - uint64_t preamble:1; - uint64_t clk_idle:1; - uint64_t reserved_14_14:1; - uint64_t sample_mode:1; - uint64_t sample_hi:5; - uint64_t reserved_21_63:43; -#endif - } cn30xx; -}; - -union cvmx_smix_cmd { - uint64_t u64; - struct cvmx_smix_cmd_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_18_63:46; - uint64_t phy_op:2; - uint64_t reserved_13_15:3; - uint64_t phy_adr:5; - uint64_t reserved_5_7:3; - uint64_t reg_adr:5; -#else - uint64_t reg_adr:5; - uint64_t reserved_5_7:3; - uint64_t phy_adr:5; - uint64_t reserved_13_15:3; - uint64_t phy_op:2; - uint64_t reserved_18_63:46; -#endif - } s; - struct cvmx_smix_cmd_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_17_63:47; - uint64_t phy_op:1; - uint64_t reserved_13_15:3; - uint64_t phy_adr:5; - uint64_t reserved_5_7:3; - uint64_t reg_adr:5; -#else - uint64_t reg_adr:5; - uint64_t reserved_5_7:3; - uint64_t phy_adr:5; - uint64_t reserved_13_15:3; - uint64_t phy_op:1; - uint64_t reserved_17_63:47; -#endif - } cn30xx; -}; - -union cvmx_smix_en { - uint64_t u64; - struct cvmx_smix_en_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t en:1; -#else - uint64_t en:1; - uint64_t reserved_1_63:63; -#endif - } s; -}; - -union cvmx_smix_rd_dat { - uint64_t u64; - struct cvmx_smix_rd_dat_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_18_63:46; - uint64_t pending:1; - uint64_t val:1; - uint64_t dat:16; -#else - uint64_t dat:16; - uint64_t val:1; - uint64_t pending:1; - uint64_t reserved_18_63:46; -#endif - } s; -}; - -union cvmx_smix_wr_dat { - uint64_t u64; - struct cvmx_smix_wr_dat_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_18_63:46; - uint64_t pending:1; - uint64_t val:1; - uint64_t dat:16; -#else - uint64_t dat:16; - uint64_t val:1; - uint64_t pending:1; - uint64_t reserved_18_63:46; -#endif - } s; -}; - -#endif diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h index 3206245d1ed6..23574c27eb40 100644 --- a/arch/mips/include/asm/pci/bridge.h +++ b/arch/mips/include/asm/pci/bridge.h @@ -45,18 +45,21 @@ #ifndef __ASSEMBLY__ -/* - * All accesses to bridge hardware registers must be done - * using 32-bit loads and stores. - */ -typedef u32 bridgereg_t; +#define ATE_V 0x01 +#define ATE_CO 0x02 +#define ATE_PREC 0x04 +#define ATE_PREF 0x08 +#define ATE_BAR 0x10 + +#define ATE_PFNSHIFT 12 +#define ATE_TIDSHIFT 8 +#define ATE_RMFSHIFT 48 -typedef u64 bridge_ate_t; +#define mkate(xaddr, xid, attr) (((xaddr) & 0x0000fffffffff000ULL) | \ + ((xid)<<ATE_TIDSHIFT) | \ + (attr)) -/* pointers to bridge ATEs - * are always "pointer to volatile" - */ -typedef volatile bridge_ate_t *bridge_ate_p; +#define BRIDGE_INTERNAL_ATES 128 /* * It is generally preferred that hardware registers on the bridge @@ -65,7 +68,7 @@ typedef volatile bridge_ate_t *bridge_ate_p; * Generated from Bridge spec dated 04oct95 */ -typedef volatile struct bridge_s { +struct bridge_regs { /* Local Registers 0x000000-0x00FFFF */ /* standard widget configuration 0x000000-0x000057 */ @@ -86,105 +89,105 @@ typedef volatile struct bridge_s { #define b_wid_tflush b_widget.w_tflush /* bridge-specific widget configuration 0x000058-0x00007F */ - bridgereg_t _pad_000058; - bridgereg_t b_wid_aux_err; /* 0x00005C */ - bridgereg_t _pad_000060; - bridgereg_t b_wid_resp_upper; /* 0x000064 */ - bridgereg_t _pad_000068; - bridgereg_t b_wid_resp_lower; /* 0x00006C */ - bridgereg_t _pad_000070; - bridgereg_t b_wid_tst_pin_ctrl; /* 0x000074 */ - bridgereg_t _pad_000078[2]; + u32 _pad_000058; + u32 b_wid_aux_err; /* 0x00005C */ + u32 _pad_000060; + u32 b_wid_resp_upper; /* 0x000064 */ + u32 _pad_000068; + u32 b_wid_resp_lower; /* 0x00006C */ + u32 _pad_000070; + u32 b_wid_tst_pin_ctrl; /* 0x000074 */ + u32 _pad_000078[2]; /* PMU & Map 0x000080-0x00008F */ - bridgereg_t _pad_000080; - bridgereg_t b_dir_map; /* 0x000084 */ - bridgereg_t _pad_000088[2]; + u32 _pad_000080; + u32 b_dir_map; /* 0x000084 */ + u32 _pad_000088[2]; /* SSRAM 0x000090-0x00009F */ - bridgereg_t _pad_000090; - bridgereg_t b_ram_perr; /* 0x000094 */ - bridgereg_t _pad_000098[2]; + u32 _pad_000090; + u32 b_ram_perr; /* 0x000094 */ + u32 _pad_000098[2]; /* Arbitration 0x0000A0-0x0000AF */ - bridgereg_t _pad_0000A0; - bridgereg_t b_arb; /* 0x0000A4 */ - bridgereg_t _pad_0000A8[2]; + u32 _pad_0000A0; + u32 b_arb; /* 0x0000A4 */ + u32 _pad_0000A8[2]; /* Number In A Can 0x0000B0-0x0000BF */ - bridgereg_t _pad_0000B0; - bridgereg_t b_nic; /* 0x0000B4 */ - bridgereg_t _pad_0000B8[2]; + u32 _pad_0000B0; + u32 b_nic; /* 0x0000B4 */ + u32 _pad_0000B8[2]; /* PCI/GIO 0x0000C0-0x0000FF */ - bridgereg_t _pad_0000C0; - bridgereg_t b_bus_timeout; /* 0x0000C4 */ + u32 _pad_0000C0; + u32 b_bus_timeout; /* 0x0000C4 */ #define b_pci_bus_timeout b_bus_timeout - bridgereg_t _pad_0000C8; - bridgereg_t b_pci_cfg; /* 0x0000CC */ - bridgereg_t _pad_0000D0; - bridgereg_t b_pci_err_upper; /* 0x0000D4 */ - bridgereg_t _pad_0000D8; - bridgereg_t b_pci_err_lower; /* 0x0000DC */ - bridgereg_t _pad_0000E0[8]; + u32 _pad_0000C8; + u32 b_pci_cfg; /* 0x0000CC */ + u32 _pad_0000D0; + u32 b_pci_err_upper; /* 0x0000D4 */ + u32 _pad_0000D8; + u32 b_pci_err_lower; /* 0x0000DC */ + u32 _pad_0000E0[8]; #define b_gio_err_lower b_pci_err_lower #define b_gio_err_upper b_pci_err_upper /* Interrupt 0x000100-0x0001FF */ - bridgereg_t _pad_000100; - bridgereg_t b_int_status; /* 0x000104 */ - bridgereg_t _pad_000108; - bridgereg_t b_int_enable; /* 0x00010C */ - bridgereg_t _pad_000110; - bridgereg_t b_int_rst_stat; /* 0x000114 */ - bridgereg_t _pad_000118; - bridgereg_t b_int_mode; /* 0x00011C */ - bridgereg_t _pad_000120; - bridgereg_t b_int_device; /* 0x000124 */ - bridgereg_t _pad_000128; - bridgereg_t b_int_host_err; /* 0x00012C */ + u32 _pad_000100; + u32 b_int_status; /* 0x000104 */ + u32 _pad_000108; + u32 b_int_enable; /* 0x00010C */ + u32 _pad_000110; + u32 b_int_rst_stat; /* 0x000114 */ + u32 _pad_000118; + u32 b_int_mode; /* 0x00011C */ + u32 _pad_000120; + u32 b_int_device; /* 0x000124 */ + u32 _pad_000128; + u32 b_int_host_err; /* 0x00012C */ struct { - bridgereg_t __pad; /* 0x0001{30,,,68} */ - bridgereg_t addr; /* 0x0001{34,,,6C} */ + u32 __pad; /* 0x0001{30,,,68} */ + u32 addr; /* 0x0001{34,,,6C} */ } b_int_addr[8]; /* 0x000130 */ - bridgereg_t _pad_000170[36]; + u32 _pad_000170[36]; /* Device 0x000200-0x0003FF */ struct { - bridgereg_t __pad; /* 0x0002{00,,,38} */ - bridgereg_t reg; /* 0x0002{04,,,3C} */ + u32 __pad; /* 0x0002{00,,,38} */ + u32 reg; /* 0x0002{04,,,3C} */ } b_device[8]; /* 0x000200 */ struct { - bridgereg_t __pad; /* 0x0002{40,,,78} */ - bridgereg_t reg; /* 0x0002{44,,,7C} */ + u32 __pad; /* 0x0002{40,,,78} */ + u32 reg; /* 0x0002{44,,,7C} */ } b_wr_req_buf[8]; /* 0x000240 */ struct { - bridgereg_t __pad; /* 0x0002{80,,,88} */ - bridgereg_t reg; /* 0x0002{84,,,8C} */ + u32 __pad; /* 0x0002{80,,,88} */ + u32 reg; /* 0x0002{84,,,8C} */ } b_rrb_map[2]; /* 0x000280 */ #define b_even_resp b_rrb_map[0].reg /* 0x000284 */ #define b_odd_resp b_rrb_map[1].reg /* 0x00028C */ - bridgereg_t _pad_000290; - bridgereg_t b_resp_status; /* 0x000294 */ - bridgereg_t _pad_000298; - bridgereg_t b_resp_clear; /* 0x00029C */ + u32 _pad_000290; + u32 b_resp_status; /* 0x000294 */ + u32 _pad_000298; + u32 b_resp_clear; /* 0x00029C */ - bridgereg_t _pad_0002A0[24]; + u32 _pad_0002A0[24]; char _pad_000300[0x10000 - 0x000300]; /* Internal Address Translation Entry RAM 0x010000-0x0103FF */ union { - bridge_ate_t wr; /* write-only */ + u64 wr; /* write-only */ struct { - bridgereg_t _p_pad; - bridgereg_t rd; /* read-only */ + u32 _p_pad; + u32 rd; /* read-only */ } hi; } b_int_ate_ram[128]; @@ -192,8 +195,8 @@ typedef volatile struct bridge_s { /* Internal Address Translation Entry RAM LOW 0x011000-0x0113FF */ struct { - bridgereg_t _p_pad; - bridgereg_t rd; /* read-only */ + u32 _p_pad; + u32 rd; /* read-only */ } b_int_ate_ram_lo[128]; char _pad_011400[0x20000 - 0x011400]; @@ -212,7 +215,7 @@ typedef volatile struct bridge_s { } f[8]; } b_type0_cfg_dev[8]; /* 0x020000 */ - /* PCI Type 1 Configuration Space 0x028000-0x028FFF */ + /* PCI Type 1 Configuration Space 0x028000-0x028FFF */ union { /* make all access sizes available. */ u8 c[0x1000 / 1]; u16 s[0x1000 / 2]; @@ -233,7 +236,7 @@ typedef volatile struct bridge_s { u8 _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */ /* External Address Translation Entry RAM 0x080000-0x0FFFFF */ - bridge_ate_t b_ext_ate_ram[0x10000]; + u64 b_ext_ate_ram[0x10000]; /* Reserved 0x100000-0x1FFFFF */ char _pad_100000[0x200000-0x100000]; @@ -259,13 +262,13 @@ typedef volatile struct bridge_s { u32 l[0x400000 / 4]; /* read-only */ u64 d[0x400000 / 8]; /* read-only */ } b_external_flash; /* 0xC00000 */ -} bridge_t; +}; /* * Field formats for Error Command Word and Auxiliary Error Command Word * of bridge. */ -typedef struct bridge_err_cmdword_s { +struct bridge_err_cmdword { union { u32 cmd_word; struct { @@ -282,7 +285,7 @@ typedef struct bridge_err_cmdword_s { rsvd:8; } berr_st; } berr_un; -} bridge_err_cmdword_t; +}; #define berr_field berr_un.berr_st #endif /* !__ASSEMBLY__ */ @@ -290,7 +293,7 @@ typedef struct bridge_err_cmdword_s { /* * The values of these macros can and should be crosschecked * regularly against the offsets of the like-named fields - * within the "bridge_t" structure above. + * within the bridge_regs structure above. */ /* Byte offset macros for Bridge internal registers */ @@ -797,49 +800,14 @@ typedef struct bridge_err_cmdword_s { #define PCI64_ATTR_RMF_MASK 0x00ff000000000000 #define PCI64_ATTR_RMF_SHFT 48 -#ifndef __ASSEMBLY__ -/* Address translation entry for mapped pci32 accesses */ -typedef union ate_u { - u64 ent; - struct ate_s { - u64 rmf:16; - u64 addr:36; - u64 targ:4; - u64 reserved:3; - u64 barrier:1; - u64 prefetch:1; - u64 precise:1; - u64 coherent:1; - u64 valid:1; - } field; -} ate_t; -#endif /* !__ASSEMBLY__ */ - -#define ATE_V 0x01 -#define ATE_CO 0x02 -#define ATE_PREC 0x04 -#define ATE_PREF 0x08 -#define ATE_BAR 0x10 - -#define ATE_PFNSHIFT 12 -#define ATE_TIDSHIFT 8 -#define ATE_RMFSHIFT 48 - -#define mkate(xaddr, xid, attr) ((xaddr) & 0x0000fffffffff000ULL) | \ - ((xid)<<ATE_TIDSHIFT) | \ - (attr) - -#define BRIDGE_INTERNAL_ATES 128 - struct bridge_controller { struct pci_controller pc; struct resource mem; struct resource io; struct resource busn; - bridge_t *base; + struct bridge_regs *base; nasid_t nasid; unsigned int widget_id; - unsigned int irq_cpu; u64 baddr; unsigned int pci_int[8]; }; @@ -847,8 +815,14 @@ struct bridge_controller { #define BRIDGE_CONTROLLER(bus) \ ((struct bridge_controller *)((bus)->sysdata)) -extern void register_bridge_irq(unsigned int irq); -extern int request_bridge_irq(struct bridge_controller *bc); +#define bridge_read(bc, reg) __raw_readl(&bc->base->reg) +#define bridge_write(bc, reg, val) __raw_writel(val, &bc->base->reg) +#define bridge_set(bc, reg, val) \ + __raw_writel(__raw_readl(&bc->base->reg) | (val), &bc->base->reg) +#define bridge_clr(bc, reg, val) \ + __raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg) + +extern int request_bridge_irq(struct bridge_controller *bc, int pin); extern struct pci_ops bridge_pci_ops; diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 57933fc8fd98..4ccb465ef3f2 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -17,6 +17,7 @@ #include <asm/pgtable-64.h> #endif +#include <asm/cmpxchg.h> #include <asm/io.h> #include <asm/pgtable-bits.h> @@ -204,49 +205,11 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ -#ifdef CONFIG_SMP - /* - * For SMP, multiple CPUs can race, so we need to do - * this atomically. - */ - unsigned long page_global = _PAGE_GLOBAL; - unsigned long tmp; - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - __asm__ __volatile__ ( - " .set push \n" - " .set arch=r4000 \n" - " .set noreorder \n" - "1:" __LL "%[tmp], %[buddy] \n" - " bnez %[tmp], 2f \n" - " or %[tmp], %[tmp], %[global] \n" - __SC "%[tmp], %[buddy] \n" - " beqzl %[tmp], 1b \n" - " nop \n" - "2: \n" - " .set pop \n" - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) - : [global] "r" (page_global)); - } else if (kernel_uses_llsc) { - __asm__ __volatile__ ( - " .set push \n" - " .set "MIPS_ISA_ARCH_LEVEL" \n" - " .set noreorder \n" - "1:" __LL "%[tmp], %[buddy] \n" - " bnez %[tmp], 2f \n" - " or %[tmp], %[tmp], %[global] \n" - __SC "%[tmp], %[buddy] \n" - " beqz %[tmp], 1b \n" - " nop \n" - "2: \n" - " .set pop \n" - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) - : [global] "r" (page_global)); - } -#else /* !CONFIG_SMP */ - if (pte_none(*buddy)) - pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; -#endif /* CONFIG_SMP */ +# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32) + cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL); +# else + cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL); +# endif } #endif } diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index b7123f9c0785..65618ff1280c 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -29,6 +29,7 @@ struct plat_smp_ops { int (*boot_secondary)(int cpu, struct task_struct *idle); void (*smp_setup)(void); void (*prepare_cpus)(unsigned int max_cpus); + void (*prepare_boot_cpu)(void); #ifdef CONFIG_HOTPLUG_CPU int (*cpu_disable)(void); void (*cpu_die)(unsigned int cpu); diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h index 66814f8ba8e8..837d23e24976 100644 --- a/arch/mips/include/asm/sn/addrs.h +++ b/arch/mips/include/asm/sn/addrs.h @@ -27,16 +27,11 @@ #ifndef __ASSEMBLY__ -#define PS_UINT_CAST (unsigned long) #define UINT64_CAST (unsigned long) -#define HUBREG_CAST (volatile hubreg_t *) - #else /* __ASSEMBLY__ */ -#define PS_UINT_CAST #define UINT64_CAST -#define HUBREG_CAST #endif /* __ASSEMBLY__ */ @@ -256,42 +251,23 @@ * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S(). * They're always safe. */ -#define LOCAL_HUB_ADDR(_x) (HUBREG_CAST (IALIAS_BASE + (_x))) -#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \ - 0x800000 + (_x))) -#ifdef CONFIG_SGI_IP27 -#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \ - 0x800000 + (_x))) -#endif /* CONFIG_SGI_IP27 */ +#define LOCAL_HUB_ADDR(_x) (IALIAS_BASE + (_x)) +#define REMOTE_HUB_ADDR(_n, _x) ((NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x))) #ifndef __ASSEMBLY__ -#define HUB_L(_a) *(_a) -#define HUB_S(_a, _d) *(_a) = (_d) +#define LOCAL_HUB_PTR(_x) ((u64 *)LOCAL_HUB_ADDR((_x))) +#define REMOTE_HUB_PTR(_n, _x) ((u64 *)REMOTE_HUB_ADDR((_n), (_x))) -#define LOCAL_HUB_L(_r) HUB_L(LOCAL_HUB_ADDR(_r)) -#define LOCAL_HUB_S(_r, _d) HUB_S(LOCAL_HUB_ADDR(_r), (_d)) -#define REMOTE_HUB_L(_n, _r) HUB_L(REMOTE_HUB_ADDR((_n), (_r))) -#define REMOTE_HUB_S(_n, _r, _d) HUB_S(REMOTE_HUB_ADDR((_n), (_r)), (_d)) -#define REMOTE_HUB_PI_L(_n, _sn, _r) HUB_L(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r))) -#define REMOTE_HUB_PI_S(_n, _sn, _r, _d) HUB_S(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)), (_d)) +#define LOCAL_HUB_L(_r) __raw_readq(LOCAL_HUB_PTR(_r)) +#define LOCAL_HUB_S(_r, _d) __raw_writeq((_d), LOCAL_HUB_PTR(_r)) +#define REMOTE_HUB_L(_n, _r) __raw_readq(REMOTE_HUB_PTR((_n), (_r))) +#define REMOTE_HUB_S(_n, _r, _d) __raw_writeq((_d), \ + REMOTE_HUB_PTR((_n), (_r))) #endif /* !__ASSEMBLY__ */ /* - * The following macros are used to get to a hub/bridge register, given - * the base of the register space. - */ -#define HUB_REG_PTR(_base, _off) \ - (HUBREG_CAST((__psunsigned_t)(_base) + (__psunsigned_t)(_off))) - -#define HUB_REG_PTR_L(_base, _off) \ - HUB_L(HUB_REG_PTR((_base), (_off))) - -#define HUB_REG_PTR_S(_base, _off, _data) \ - HUB_S(HUB_REG_PTR((_base), (_off)), (_data)) - -/* * Software structure locations -- permanently fixed * See diagram in kldir.h */ @@ -387,44 +363,14 @@ #define SYMMON_STK_END(nasid) (SYMMON_STK_ADDR(nasid, 0) + KLD_SYMMON_STK(nasid)->size) -/* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a - * relocatable program - */ -#define UNIX_DEBUG_LOADADDR 0x300000 -#define SYMMON_LOADADDR(nasid) \ - TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000)) - -#define FREEMEM_OFFSET(nasid) KLD_FREEMEM(nasid)->offset -#define FREEMEM_ADDR(nasid) SYMMON_STK_END(nasid) -/* - * XXX - * Fix this. FREEMEM_ADDR should be aware of if symmon is loaded. - * Also, it should take into account what prom thinks to be a safe - * address - PHYS_TO_K0(NODE_OFFSET(nasid) + FREEMEM_OFFSET(nasid)) - */ -#define FREEMEM_SIZE(nasid) KLD_FREEMEM(nasid)->size - -#define PI_ERROR_OFFSET(nasid) KLD_PI_ERROR(nasid)->offset -#define PI_ERROR_ADDR(nasid) \ - TO_NODE_UNCAC((nasid), PI_ERROR_OFFSET(nasid)) -#define PI_ERROR_SIZE(nasid) KLD_PI_ERROR(nasid)->size - #define NODE_OFFSET_TO_K0(_nasid, _off) \ PHYS_TO_K0((NODE_OFFSET(_nasid) + (_off)) | CAC_BASE) #define NODE_OFFSET_TO_K1(_nasid, _off) \ TO_UNCAC((NODE_OFFSET(_nasid) + (_off)) | UNCAC_BASE) -#define K0_TO_NODE_OFFSET(_k0addr) \ - ((__psunsigned_t)(_k0addr) & NODE_ADDRSPACE_MASK) #define KERN_VARS_ADDR(nasid) KLD_KERN_VARS(nasid)->pointer #define KERN_VARS_SIZE(nasid) KLD_KERN_VARS(nasid)->size -#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer -#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size - -#define GPDA_ADDR(nasid) TO_NODE_CAC(nasid, GPDA_OFFSET) - #endif /* !__ASSEMBLY__ */ diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h index 471e6870d876..3f1fb1454749 100644 --- a/arch/mips/include/asm/sn/arch.h +++ b/arch/mips/include/asm/sn/arch.h @@ -17,8 +17,6 @@ #include <asm/sn/sn0/arch.h> #endif -typedef u64 hubreg_t; - #define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid) #define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice) #define makespnum(_nasid, _slice) \ diff --git a/arch/mips/include/asm/sn/io.h b/arch/mips/include/asm/sn/io.h index d5174d04538c..211f1e83b523 100644 --- a/arch/mips/include/asm/sn/io.h +++ b/arch/mips/include/asm/sn/io.h @@ -44,7 +44,7 @@ IIO_ITTE_PUT((nasid), HUB_PIO_MAP_TO_MEM, \ (bigwin), IIO_ITTE_INVALID_WIDGET, 0) -#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_ADDR((nasid), IIO_ITTE(bigwin)) +#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_PTR((nasid), IIO_ITTE(bigwin)) /* * Macro which takes the widget number, and returns the diff --git a/arch/mips/include/asm/sn/sn0/addrs.h b/arch/mips/include/asm/sn/sn0/addrs.h index 6b53070f400f..f13df84edfdd 100644 --- a/arch/mips/include/asm/sn/sn0/addrs.h +++ b/arch/mips/include/asm/sn/sn0/addrs.h @@ -134,11 +134,6 @@ #define CALIAS_BASE CAC_BASE - - -#define BRIDGE_REG_PTR(_base, _off) ((volatile bridgereg_t *) \ - ((__psunsigned_t)(_base) + (__psunsigned_t)(_off))) - #define SN0_WIDGET_BASE(_nasid, _wid) (NODE_SWIN_BASE((_nasid), (_wid))) /* Turn on sable logging for the processors whose bits are set. */ diff --git a/arch/mips/include/asm/tlbflush.h b/arch/mips/include/asm/tlbflush.h index 40a361092491..9789e7a32def 100644 --- a/arch/mips/include/asm/tlbflush.h +++ b/arch/mips/include/asm/tlbflush.h @@ -14,7 +14,6 @@ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages */ extern void local_flush_tlb_all(void); -extern void local_flush_tlb_mm(struct mm_struct *mm); extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void local_flush_tlb_kernel_range(unsigned long start, @@ -23,6 +22,8 @@ extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void local_flush_tlb_one(unsigned long vaddr); +#include <asm/mmu_context.h> + #ifdef CONFIG_SMP extern void flush_tlb_all(void); @@ -36,7 +37,7 @@ extern void flush_tlb_one(unsigned long vaddr); #else /* CONFIG_SMP */ #define flush_tlb_all() local_flush_tlb_all() -#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_mm(mm) drop_mmu_context(mm) #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) #define flush_tlb_kernel_range(vmaddr,end) \ local_flush_tlb_kernel_range(vmaddr, end) diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index d43c1dc6ef15..62b298c50905 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -69,7 +69,6 @@ extern u64 __ua_limit; #define USER_DS ((mm_segment_t) { __UA_LIMIT }) #endif -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 71370fb3ceef..eb9f33f8a8b3 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -11,6 +11,7 @@ #define _UAPI_ASM_SOCKET_H #include <asm/sockios.h> +#include <asm/bitsperlong.h> /* * For setsockopt(2) @@ -38,8 +39,8 @@ #define SO_RCVBUF 0x1002 /* Receive buffer. */ #define SO_SNDLOWAT 0x1003 /* send low-water mark */ #define SO_RCVLOWAT 0x1004 /* receive low-water mark */ -#define SO_SNDTIMEO 0x1005 /* send timeout */ -#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_SNDTIMEO_OLD 0x1005 /* send timeout */ +#define SO_RCVTIMEO_OLD 0x1006 /* receive timeout */ #define SO_ACCEPTCONN 0x1009 #define SO_PROTOCOL 0x1028 /* protocol type */ #define SO_DOMAIN 0x1029 /* domain/socket family */ @@ -65,21 +66,14 @@ #define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP #define SO_PEERSEC 30 #define SO_SNDBUFFORCE 31 #define SO_RCVBUFFORCE 33 #define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #define SO_MARK 36 -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 40 #define SO_WIFI_STATUS 41 @@ -126,4 +120,41 @@ #define SO_TXTIME 61 #define SCM_TXTIME SO_TXTIME +#define SO_BINDTOIFINDEX 62 + +#define SO_TIMESTAMP_OLD 29 +#define SO_TIMESTAMPNS_OLD 35 +#define SO_TIMESTAMPING_OLD 37 + +#define SO_TIMESTAMP_NEW 63 +#define SO_TIMESTAMPNS_NEW 64 +#define SO_TIMESTAMPING_NEW 65 + +#define SO_RCVTIMEO_NEW 66 +#define SO_SNDTIMEO_NEW 67 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c index afb40f8bce96..7e63c54eb8d2 100644 --- a/arch/mips/jz4740/setup.c +++ b/arch/mips/jz4740/setup.c @@ -31,7 +31,6 @@ #define JZ4740_EMC_SDRAM_CTRL 0x80 - static void __init jz4740_detect_mem(void) { void __iomem *jz_emc_base; @@ -66,15 +65,22 @@ static unsigned long __init get_board_mach_type(const void *fdt) void __init plat_mem_setup(void) { int offset; + void *dtb; jz4740_reset_init(); - __dt_setup_arch(__dtb_start); - offset = fdt_path_offset(__dtb_start, "/memory"); + if (__dtb_start != __dtb_end) + dtb = __dtb_start; + else + dtb = (void *)fw_passed_dtb; + + __dt_setup_arch(dtb); + + offset = fdt_path_offset(dtb, "/memory"); if (offset < 0) jz4740_detect_mem(); - mips_machtype = get_board_mach_type(__dtb_start); + mips_machtype = get_board_mach_type(dtb); } void __init device_tree_init(void) diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index 0b9535bc2c53..6b2a4a902a98 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { - u32 mask, old32, new32, load32; + u32 mask, old32, new32, load32, load; volatile u32 *ptr32; unsigned int shift; - u8 load; /* Check that ptr is naturally aligned */ WARN_ON((unsigned long)ptr & (size - 1)); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 95b18a194f53..d5e335e6846a 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -872,10 +872,19 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) static inline unsigned int decode_config5(struct cpuinfo_mips *c) { - unsigned int config5; + unsigned int config5, max_mmid_width; + unsigned long asid_mask; config5 = read_c0_config5(); config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); + + if (cpu_has_mips_r6) { + if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid) + config5 |= MIPS_CONF5_MI; + else + config5 &= ~MIPS_CONF5_MI; + } + write_c0_config5(config5); if (config5 & MIPS_CONF5_EVA) @@ -894,6 +903,50 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) if (config5 & MIPS_CONF5_CRCP) elf_hwcap |= HWCAP_MIPS_CRC32; + if (cpu_has_mips_r6) { + /* Ensure the write to config5 above takes effect */ + back_to_back_c0_hazard(); + + /* Check whether we successfully enabled MMID support */ + config5 = read_c0_config5(); + if (config5 & MIPS_CONF5_MI) + c->options |= MIPS_CPU_MMID; + + /* + * Warn if we've hardcoded cpu_has_mmid to a value unsuitable + * for the CPU we're running on, or if CPUs in an SMP system + * have inconsistent MMID support. + */ + WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI)); + + if (cpu_has_mmid) { + write_c0_memorymapid(~0ul); + back_to_back_c0_hazard(); + asid_mask = read_c0_memorymapid(); + + /* + * We maintain a bitmap to track MMID allocation, and + * need a sensible upper bound on the size of that + * bitmap. The initial CPU with MMID support (I6500) + * supports 16 bit MMIDs, which gives us an 8KiB + * bitmap. The architecture recommends that hardware + * support 32 bit MMIDs, which would give us a 512MiB + * bitmap - that's too big in most cases. + * + * Cap MMID width at 16 bits for now & we can revisit + * this if & when hardware supports anything wider. + */ + max_mmid_width = 16; + if (asid_mask > GENMASK(max_mmid_width - 1, 0)) { + pr_info("Capping MMID width at %d bits", + max_mmid_width); + asid_mask = GENMASK(max_mmid_width - 1, 0); + } + + set_cpu_asid_mask(c, asid_mask); + } + } + return config5 & MIPS_CONF_M; } diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 2ea0ec95efe9..4b5e1f2bfbce 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -86,7 +86,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return -EFAULT; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); @@ -111,7 +111,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, ip -= 4; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); @@ -135,7 +135,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, return -EFAULT; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index ba150c755fcc..85b6c60f285d 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void) void __init init_IRQ(void) { int i; + unsigned int order = get_order(IRQ_STACK_SIZE); for (i = 0; i < NR_IRQS; i++) irq_set_noprobe(i); @@ -62,8 +63,7 @@ void __init init_IRQ(void) arch_init_irq(); for_each_possible_cpu(i) { - int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE; - void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages); + void *s = (void *)__get_free_pages(GFP_KERNEL, order); irq_stack[i] = s; pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i, diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 149100e1bc7c..6e574c02e4c3 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -212,7 +212,7 @@ void kgdb_call_nmi_hook(void *ignored) mm_segment_t old_fs; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); kgdb_nmicallback(raw_smp_processor_id(), NULL); @@ -318,7 +318,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, /* Kernel mode. Set correct address limit */ old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); if (atomic_read(&kgdb_active) != -1) kgdb_nmicallback(smp_processor_id(), regs); diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 8f5bd04f320a..537e8d091874 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -382,8 +382,8 @@ void mips_cm_error_report(void) sc_bit ? "True" : "False", cm2_cmd[cmd_bits], sport_bits); } - pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error, - cm2_causes[cause], buf); + pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error, + cm2_causes[cause], buf); pr_err("CM_ADDR =%08llx\n", cm_addr); pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]); } else { /* CM3 */ @@ -457,5 +457,5 @@ void mips_cm_error_report(void) } /* reprime cause register */ - write_gcr_error_cause(0); + write_gcr_error_cause(cm_error); } diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index c50c89a978f1..b4d210bfcdae 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -2351,23 +2351,10 @@ DEFINE_SHOW_ATTRIBUTE(mipsr2_clear); static int __init mipsr2_init_debugfs(void) { - struct dentry *mipsr2_emul; - - if (!mips_debugfs_dir) - return -ENODEV; - - mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, - mips_debugfs_dir, NULL, - &mipsr2_emul_fops); - if (!mipsr2_emul) - return -ENOMEM; - - mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, - mips_debugfs_dir, NULL, - &mipsr2_clear_fops); - if (!mipsr2_emul) - return -ENOMEM; - + debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL, + &mipsr2_emul_fops); + debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir, + NULL, &mipsr2_clear_fops); return 0; } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 6829a064aac8..339870ed92f7 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) static int get_frame_info(struct mips_frame_info *info) { bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); - union mips_instruction insn, *ip, *ip_end; + union mips_instruction insn, *ip; const unsigned int max_insns = 128; unsigned int last_insn_size = 0; unsigned int i; @@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info) if (!ip) goto err; - ip_end = (void *)ip + info->func_size; - - for (i = 0; i < max_insns && ip < ip_end; i++) { + for (i = 0; i < max_insns; i++) { ip = (void *)ip + last_insn_size; + if (is_mmips && mm_insn_16bit(ip->halfword[0])) { insn.word = ip->halfword[0] << 16; last_insn_size = 2; diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c index 2703f218202e..0a9bd7b0983b 100644 --- a/arch/mips/kernel/segment.c +++ b/arch/mips/kernel/segment.c @@ -95,18 +95,9 @@ static const struct file_operations segments_fops = { static int __init segments_info(void) { - struct dentry *segments; - - if (cpu_has_segments) { - if (!mips_debugfs_dir) - return -ENODEV; - - segments = debugfs_create_file("segments", S_IRUGO, - mips_debugfs_dir, NULL, - &segments_fops); - if (!segments) - return -ENOMEM; - } + if (cpu_has_segments) + debugfs_create_file("segments", S_IRUGO, mips_debugfs_dir, NULL, + &segments_fops); return 0; } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8c6c48ed786a..5151532ad959 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -384,7 +384,8 @@ static void __init bootmem_init(void) init_initrd(); reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); - memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT); + memblock_reserve(PHYS_OFFSET, + (reserved_end << PAGE_SHIFT) - PHYS_OFFSET); /* * max_low_pfn is not a number of pages. The number of pages @@ -1010,12 +1011,7 @@ unsigned long fw_passed_dtb; struct dentry *mips_debugfs_dir; static int __init debugfs_mips(void) { - struct dentry *d; - - d = debugfs_create_dir("mips", NULL); - if (!d) - return -ENOMEM; - mips_debugfs_dir = d; + mips_debugfs_dir = debugfs_create_dir("mips", NULL); return 0; } arch_initcall(debugfs_mips); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index d84b9066b465..bc4bb3c6bd00 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -39,6 +39,7 @@ #include <linux/atomic.h> #include <asm/cpu.h> +#include <asm/ginvt.h> #include <asm/processor.h> #include <asm/idle.h> #include <asm/r4k-timer.h> @@ -443,6 +444,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* preload SMP state for boot cpu */ void smp_prepare_boot_cpu(void) { + if (mp_ops->prepare_boot_cpu) + mp_ops->prepare_boot_cpu(); set_cpu_possible(0, true); set_cpu_online(0, true); } @@ -482,12 +485,21 @@ static void flush_tlb_all_ipi(void *info) void flush_tlb_all(void) { + if (cpu_has_mmid) { + htw_stop(); + ginvt_full(); + sync_ginv(); + instruction_hazard(); + htw_start(); + return; + } + on_each_cpu(flush_tlb_all_ipi, NULL, 1); } static void flush_tlb_mm_ipi(void *mm) { - local_flush_tlb_mm((struct mm_struct *)mm); + drop_mmu_context((struct mm_struct *)mm); } /* @@ -530,17 +542,22 @@ void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); - if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + if (cpu_has_mmid) { + /* + * No need to worry about other CPUs - the ginvt in + * drop_mmu_context() will be globalized. + */ + } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, mm)) - cpu_context(cpu, mm) = 0; + set_cpu_context(cpu, mm, 0); } } - local_flush_tlb_mm(mm); + drop_mmu_context(mm); preempt_enable(); } @@ -561,9 +578,26 @@ static void flush_tlb_range_ipi(void *info) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; + unsigned long addr; + u32 old_mmid; preempt_disable(); - if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + if (cpu_has_mmid) { + htw_stop(); + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(cpu_asid(0, mm)); + mtc0_tlbw_hazard(); + addr = round_down(start, PAGE_SIZE * 2); + end = round_up(end, PAGE_SIZE * 2); + do { + ginvt_va_mmid(addr); + sync_ginv(); + addr += PAGE_SIZE * 2; + } while (addr < end); + write_c0_memorymapid(old_mmid); + instruction_hazard(); + htw_start(); + } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd = { .vma = vma, .addr1 = start, @@ -571,6 +605,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l }; smp_on_other_tlbs(flush_tlb_range_ipi, &fd); + local_flush_tlb_range(vma, start, end); } else { unsigned int cpu; int exec = vma->vm_flags & VM_EXEC; @@ -583,10 +618,10 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l * mm has been completely unused by that CPU. */ if (cpu != smp_processor_id() && cpu_context(cpu, mm)) - cpu_context(cpu, mm) = !exec; + set_cpu_context(cpu, mm, !exec); } + local_flush_tlb_range(vma, start, end); } - local_flush_tlb_range(vma, start, end); preempt_enable(); } @@ -616,14 +651,28 @@ static void flush_tlb_page_ipi(void *info) void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + u32 old_mmid; + preempt_disable(); - if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { + if (cpu_has_mmid) { + htw_stop(); + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); + mtc0_tlbw_hazard(); + ginvt_va_mmid(page); + sync_ginv(); + write_c0_memorymapid(old_mmid); + instruction_hazard(); + htw_start(); + } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || + (current->mm != vma->vm_mm)) { struct flush_tlb_data fd = { .vma = vma, .addr1 = page, }; smp_on_other_tlbs(flush_tlb_page_ipi, &fd); + local_flush_tlb_page(vma, page); } else { unsigned int cpu; @@ -635,10 +684,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) * by that CPU. */ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) - cpu_context(cpu, vma->vm_mm) = 1; + set_cpu_context(cpu, vma->vm_mm, 1); } + local_flush_tlb_page(vma, page); } - local_flush_tlb_page(vma, page); preempt_enable(); } diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c index eaed550e79a2..ab4e3e1b138d 100644 --- a/arch/mips/kernel/spinlock_test.c +++ b/arch/mips/kernel/spinlock_test.c @@ -118,23 +118,10 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); static int __init spinlock_test(void) { - struct dentry *d; - - if (!mips_debugfs_dir) - return -ENODEV; - - d = debugfs_create_file("spin_single", S_IRUGO, - mips_debugfs_dir, NULL, - &fops_ss); - if (!d) - return -ENOMEM; - - d = debugfs_create_file("spin_multi", S_IRUGO, - mips_debugfs_dir, NULL, - &fops_multi); - if (!d) - return -ENOMEM; - + debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, + &fops_ss); + debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, + &fops_multi); return 0; } device_initcall(spinlock_test); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c91097f7b32f..42d411125690 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1077,7 +1077,7 @@ asmlinkage void do_tr(struct pt_regs *regs) seg = get_fs(); if (!user_mode(regs)) - set_fs(get_ds()); + set_fs(KERNEL_DS); prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; @@ -2223,7 +2223,9 @@ void per_cpu_trap_init(bool is_boot_cpu) cp0_fdc_irq = -1; } - if (!cpu_data[cpu].asid_cache) + if (cpu_has_mmid) + cpu_data[cpu].asid_cache = 0; + else if (!cpu_data[cpu].asid_cache) cpu_data[cpu].asid_cache = asid_first_version(cpu); mmgrab(&init_mm); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 595ca9c85111..76e33f940971 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -89,6 +89,7 @@ #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/inst.h> +#include <asm/mmu_context.h> #include <linux/uaccess.h> #define STR(x) __STR(x) @@ -2374,18 +2375,10 @@ sigbus: #ifdef CONFIG_DEBUG_FS static int __init debugfs_unaligned(void) { - struct dentry *d; - - if (!mips_debugfs_dir) - return -ENODEV; - d = debugfs_create_u32("unaligned_instructions", S_IRUGO, - mips_debugfs_dir, &unaligned_instructions); - if (!d) - return -ENOMEM; - d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, - mips_debugfs_dir, &unaligned_action); - if (!d) - return -ENOMEM; + debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir, + &unaligned_instructions); + debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, + mips_debugfs_dir, &unaligned_action); return 0; } arch_initcall(debugfs_unaligned); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index ec9ed23bca7f..0074427b04fb 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1016,10 +1016,10 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, */ preempt_disable(); cpu = smp_processor_id(); - get_new_mmu_context(kern_mm, cpu); + get_new_mmu_context(kern_mm); for_each_possible_cpu(i) if (i != cpu) - cpu_context(i, kern_mm) = 0; + set_cpu_context(i, kern_mm, 0); preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, entryhi); @@ -1090,8 +1090,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, if (i == cpu) continue; if (user) - cpu_context(i, user_mm) = 0; - cpu_context(i, kern_mm) = 0; + set_cpu_context(i, user_mm, 0); + set_cpu_context(i, kern_mm, 0); } preempt_enable(); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 3734cd58895e..6d0517ac18e5 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1723,6 +1723,11 @@ static int __init kvm_mips_init(void) { int ret; + if (cpu_has_mmid) { + pr_warn("KVM does not yet support MMIDs. KVM Disabled\n"); + return -EOPNOTSUPP; + } + ret = kvm_mips_entry_setup(); if (ret) return ret; diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 6a0d7040d882..73daa6ad33af 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -1056,11 +1056,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ if (current->flags & PF_VCPU) { mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; - if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu)) - get_new_mmu_context(mm, cpu); - write_c0_entryhi(cpu_asid(cpu, mm)); - TLBMISS_HANDLER_SETUP_PGD(mm->pgd); + check_switch_mmu_context(mm); kvm_mips_suspend_mm(cpu); ehb(); } @@ -1074,11 +1070,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) if (current->flags & PF_VCPU) { /* Restore normal Linux process memory map */ - if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu))) - get_new_mmu_context(current->mm, cpu); - write_c0_entryhi(cpu_asid(cpu, current->mm)); - TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); + check_switch_mmu_context(current->mm); kvm_mips_resume_mm(cpu); ehb(); } @@ -1106,14 +1098,14 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); for_each_possible_cpu(i) { - cpu_context(i, kern_mm) = 0; - cpu_context(i, user_mm) = 0; + set_cpu_context(i, kern_mm, 0); + set_cpu_context(i, user_mm, 0); } /* Generate new ASID for current mode */ if (reload_asid) { mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; - get_new_mmu_context(mm, cpu); + get_new_mmu_context(mm); htw_stop(); write_c0_entryhi(cpu_asid(cpu, mm)); TLBMISS_HANDLER_SETUP_PGD(mm->pgd); @@ -1219,7 +1211,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, if (gasid != vcpu->arch.last_user_gasid) { kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); for_each_possible_cpu(i) - cpu_context(i, user_mm) = 0; + set_cpu_context(i, user_mm, 0); vcpu->arch.last_user_gasid = gasid; } } @@ -1228,9 +1220,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, * Check if ASID is stale. This may happen due to a TLB flush request or * a lazy user MM invalidation. */ - if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu)) - get_new_mmu_context(mm, cpu); + check_mmu_context(mm); } static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) @@ -1266,11 +1256,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) cpu = smp_processor_id(); /* Restore normal Linux process memory map */ - if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu))) - get_new_mmu_context(current->mm, cpu); - write_c0_entryhi(cpu_asid(cpu, current->mm)); - TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); + check_switch_mmu_context(current->mm); kvm_mips_resume_mm(cpu); htw_start(); diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 74805035edc8..dde20887a70d 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -2454,10 +2454,10 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) * Root ASID dealiases guest GPA mappings in the root TLB. * Allocate new root ASID if needed. */ - if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) - || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu)) - get_new_mmu_context(gpa_mm, cpu); + if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) + get_new_mmu_context(gpa_mm); + else + check_mmu_context(gpa_mm); } } diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig index 188de95d6dbd..6c6802e482c9 100644 --- a/arch/mips/lantiq/Kconfig +++ b/arch/mips/lantiq/Kconfig @@ -52,8 +52,4 @@ config PCI_LANTIQ bool "PCI Support" depends on SOC_XWAY && PCI -config XRX200_PHY_FW - bool "XRX200 PHY firmware loader" - depends on SOC_XWAY - endif diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c index 577ec81b557d..3deab9a77718 100644 --- a/arch/mips/lantiq/xway/vmmc.c +++ b/arch/mips/lantiq/xway/vmmc.c @@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev) dma_addr_t dma; cp1_base = - (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE, - &dma, GFP_ATOMIC)); + (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE, + &dma, GFP_KERNEL)); gpio_count = of_gpio_count(pdev->dev.of_node); while (gpio_count > 0) { diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 781ad96b78c4..83ed37298e66 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -10,6 +10,7 @@ #include <asm/hazards.h> #include <asm/mipsregs.h> +#include <asm/mmu_context.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> @@ -73,12 +74,13 @@ static inline const char *msk2str(unsigned int mask) static void dump_tlb(int first, int last) { - unsigned long s_entryhi, entryhi, asid; + unsigned long s_entryhi, entryhi, asid, mmid; unsigned long long entrylo0, entrylo1, pa; unsigned int s_index, s_pagemask, s_guestctl1 = 0; unsigned int pagemask, guestctl1 = 0, c0, c1, i; unsigned long asidmask = cpu_asid_mask(¤t_cpu_data); int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4); + unsigned long uninitialized_var(s_mmid); #ifdef CONFIG_32BIT bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); int pwidth = xpa ? 11 : 8; @@ -92,7 +94,12 @@ static void dump_tlb(int first, int last) s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); - asid = s_entryhi & asidmask; + + if (cpu_has_mmid) + asid = s_mmid = read_c0_memorymapid(); + else + asid = s_entryhi & asidmask; + if (cpu_has_guestid) s_guestctl1 = read_c0_guestctl1(); @@ -105,6 +112,12 @@ static void dump_tlb(int first, int last) entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); + + if (cpu_has_mmid) + mmid = read_c0_memorymapid(); + else + mmid = entryhi & asidmask; + if (cpu_has_guestid) guestctl1 = read_c0_guestctl1(); @@ -124,8 +137,7 @@ static void dump_tlb(int first, int last) * leave only a single G bit set after a machine check exception * due to duplicate TLB entry. */ - if (!((entrylo0 | entrylo1) & ENTRYLO_G) && - (entryhi & asidmask) != asid) + if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid)) continue; /* @@ -138,7 +150,7 @@ static void dump_tlb(int first, int last) pr_cont("va=%0*lx asid=%0*lx", vwidth, (entryhi & ~0x1fffUL), - asidwidth, entryhi & asidmask); + asidwidth, mmid); if (cpu_has_guestid) pr_cont(" gid=%02lx", (guestctl1 & MIPS_GCTL1_RID) diff --git a/arch/mips/loongson32/Kconfig b/arch/mips/loongson32/Kconfig index 462b126f45aa..6dacc1438906 100644 --- a/arch/mips/loongson32/Kconfig +++ b/arch/mips/loongson32/Kconfig @@ -15,7 +15,6 @@ config LOONGSON1_LS1B select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_HIGHMEM - select SYS_SUPPORTS_MIPS16 select SYS_HAS_EARLY_PRINTK select USE_GENERIC_EARLY_PRINTK_8250 select COMMON_CLK @@ -31,7 +30,6 @@ config LOONGSON1_LS1C select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_HIGHMEM - select SYS_SUPPORTS_MIPS16 select SYS_HAS_EARLY_PRINTK select USE_GENERIC_EARLY_PRINTK_8250 select COMMON_CLK diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform index a0dbb3b2f2de..333215593092 100644 --- a/arch/mips/loongson32/Platform +++ b/arch/mips/loongson32/Platform @@ -1,4 +1,4 @@ -cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32 -Wa,--trap +cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32r2 -Wa,--trap platform-$(CONFIG_MACH_LOONGSON32) += loongson32/ cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32 -load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80100000 +load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80200000 diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c index ac584c5823d0..0bf355c8bcb2 100644 --- a/arch/mips/loongson32/common/platform.c +++ b/arch/mips/loongson32/common/platform.c @@ -81,42 +81,6 @@ struct platform_device ls1x_cpufreq_pdev = { }, }; -/* DMA */ -static struct resource ls1x_dma_resources[] = { - [0] = { - .start = LS1X_DMAC_BASE, - .end = LS1X_DMAC_BASE + SZ_4 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = LS1X_DMA0_IRQ, - .end = LS1X_DMA0_IRQ, - .flags = IORESOURCE_IRQ, - }, - [2] = { - .start = LS1X_DMA1_IRQ, - .end = LS1X_DMA1_IRQ, - .flags = IORESOURCE_IRQ, - }, - [3] = { - .start = LS1X_DMA2_IRQ, - .end = LS1X_DMA2_IRQ, - .flags = IORESOURCE_IRQ, - }, -}; - -struct platform_device ls1x_dma_pdev = { - .name = "ls1x-dma", - .id = -1, - .num_resources = ARRAY_SIZE(ls1x_dma_resources), - .resource = ls1x_dma_resources, -}; - -void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata) -{ - ls1x_dma_pdev.dev.platform_data = pdata; -} - /* Synopsys Ethernet GMAC */ static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = { .phy_mask = 0, @@ -291,33 +255,6 @@ struct platform_device ls1x_gpio1_pdev = { .resource = ls1x_gpio1_resources, }; -/* NAND Flash */ -static struct resource ls1x_nand_resources[] = { - [0] = { - .start = LS1X_NAND_BASE, - .end = LS1X_NAND_BASE + SZ_32 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - /* DMA channel 0 is dedicated to NAND */ - .start = LS1X_DMA_CHANNEL0, - .end = LS1X_DMA_CHANNEL0, - .flags = IORESOURCE_DMA, - }, -}; - -struct platform_device ls1x_nand_pdev = { - .name = "ls1x-nand", - .id = -1, - .num_resources = ARRAY_SIZE(ls1x_nand_resources), - .resource = ls1x_nand_resources, -}; - -void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata) -{ - ls1x_nand_pdev.dev.platform_data = pdata; -} - /* USB EHCI */ static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32); diff --git a/arch/mips/loongson32/ls1b/board.c b/arch/mips/loongson32/ls1b/board.c index 01aceaace314..447b15fc0a2b 100644 --- a/arch/mips/loongson32/ls1b/board.c +++ b/arch/mips/loongson32/ls1b/board.c @@ -16,30 +16,6 @@ #include <nand.h> #include <platform.h> -struct plat_ls1x_dma ls1x_dma_pdata = { - .nr_channels = 3, -}; - -static struct mtd_partition ls1x_nand_parts[] = { - { - .name = "kernel", - .offset = 0, - .size = SZ_16M, - }, - { - .name = "rootfs", - .offset = MTDPART_OFS_APPEND, - .size = MTDPART_SIZ_FULL, - }, -}; - -struct plat_ls1x_nand ls1x_nand_pdata = { - .parts = ls1x_nand_parts, - .nr_parts = ARRAY_SIZE(ls1x_nand_parts), - .hold_cycle = 0x2, - .wait_cycle = 0xc, -}; - static const struct gpio_led ls1x_gpio_leds[] __initconst = { { .name = "LED9", @@ -64,13 +40,11 @@ static const struct gpio_led_platform_data ls1x_led_pdata __initconst = { static struct platform_device *ls1b_platform_devices[] __initdata = { &ls1x_uart_pdev, &ls1x_cpufreq_pdev, - &ls1x_dma_pdev, &ls1x_eth0_pdev, &ls1x_eth1_pdev, &ls1x_ehci_pdev, &ls1x_gpio0_pdev, &ls1x_gpio1_pdev, - &ls1x_nand_pdev, &ls1x_rtc_pdev, &ls1x_wdt_pdev, }; @@ -78,8 +52,6 @@ static struct platform_device *ls1b_platform_devices[] __initdata = { static int __init ls1b_platform_init(void) { ls1x_serial_set_uartclk(&ls1x_uart_pdev); - ls1x_dma_set_platdata(&ls1x_dma_pdata); - ls1x_nand_set_platdata(&ls1x_nand_pdata); gpio_led_register_device(-1, &ls1x_led_pdata); diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index 0fce4608aa88..c1a4d4dc4665 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform @@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS endif cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap + +# +# Some versions of binutils, not currently mainline as of 2019/02/04, support +# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction +# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a +# description). +# +# We disable this in order to prevent the assembler meddling with the +# instruction that labels refer to, ie. if we label an ll instruction: +# +# 1: ll v0, 0(a0) +# +# ...then with the assembler fix applied the label may actually point at a sync +# instruction inserted by the assembler, and if we were using the label in an +# exception table the table would no longer contain the address of the ll +# instruction. +# +# Avoid this by explicitly disabling that assembler behaviour. If upstream +# binutils does not merge support for the flag then we can revisit & remove +# this later - for now it ensures vendor toolchains don't cause problems. +# +cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) + # # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a # as MIPS64 R2; older versions as just R1. This leaves the possibility open diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c index a60715e11306..b26892ce871c 100644 --- a/arch/mips/loongson64/common/reset.c +++ b/arch/mips/loongson64/common/reset.c @@ -59,7 +59,12 @@ static void loongson_poweroff(void) { #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE mach_prepare_shutdown(); - unreachable(); + + /* + * It needs a wait loop here, but mips/kernel/reset.c already calls + * a generic delay loop, machine_hang(), so simply return. + */ + return; #else void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c index 58798f527356..387724860fa6 100644 --- a/arch/mips/math-emu/me-debugfs.c +++ b/arch/mips/math-emu/me-debugfs.c @@ -189,32 +189,21 @@ static int __init debugfs_fpuemu(void) { struct dentry *fpuemu_debugfs_base_dir; struct dentry *fpuemu_debugfs_inst_dir; - struct dentry *d, *reset_file; - - if (!mips_debugfs_dir) - return -ENODEV; fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir); - if (!fpuemu_debugfs_base_dir) - return -ENOMEM; - reset_file = debugfs_create_file("fpuemustats_clear", 0444, - mips_debugfs_dir, NULL, - &fpuemustats_clear_fops); - if (!reset_file) - return -ENOMEM; + debugfs_create_file("fpuemustats_clear", 0444, mips_debugfs_dir, NULL, + &fpuemustats_clear_fops); #define FPU_EMU_STAT_OFFSET(m) \ offsetof(struct mips_fpu_emulator_stats, m) #define FPU_STAT_CREATE(m) \ do { \ - d = debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \ + debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \ (void *)FPU_EMU_STAT_OFFSET(m), \ &fops_fpuemu_stat); \ - if (!d) \ - return -ENOMEM; \ } while (0) FPU_STAT_CREATE(emulated); @@ -233,8 +222,6 @@ do { \ fpuemu_debugfs_inst_dir = debugfs_create_dir("instructions", fpuemu_debugfs_base_dir); - if (!fpuemu_debugfs_inst_dir) - return -ENOMEM; #define FPU_STAT_CREATE_EX(m) \ do { \ @@ -242,11 +229,9 @@ do { \ \ adjust_instruction_counter_name(name, #m); \ \ - d = debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \ + debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \ (void *)FPU_EMU_STAT_OFFSET(m), \ &fops_fpuemu_stat); \ - if (!d) \ - return -ENOMEM; \ } while (0) FPU_STAT_CREATE_EX(abs_s); diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 3e5bb203c95a..f34d7ff5eb60 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -3,9 +3,19 @@ # Makefile for the Linux/MIPS-specific parts of the memory manager. # -obj-y += cache.o extable.o fault.o \ - gup.o init.o mmap.o page.o page-funcs.o \ - pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o +obj-y += cache.o +obj-y += context.o +obj-y += extable.o +obj-y += fault.o +obj-y += gup.o +obj-y += init.o +obj-y += mmap.o +obj-y += page.o +obj-y += page-funcs.o +obj-y += pgtable.o +obj-y += tlbex.o +obj-y += tlbex-fault.o +obj-y += tlb-funcs.o ifdef CONFIG_CPU_MICROMIPS obj-y += uasm-micromips.o diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 0e45b061e514..8064821e9805 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -128,23 +128,6 @@ static void octeon_flush_icache_range(unsigned long start, unsigned long end) /** - * Flush the icache for a trampoline. These are used for interrupt - * and exception hooking. - * - * @addr: Address to flush - */ -static void octeon_flush_cache_sigtramp(unsigned long addr) -{ - struct vm_area_struct *vma; - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, addr); - octeon_flush_icache_all_cores(vma); - up_read(¤t->mm->mmap_sem); -} - - -/** * Flush a range out of a vma * * @vma: VMA to flush @@ -289,7 +272,6 @@ void octeon_cache_init(void) flush_cache_mm = octeon_flush_cache_mm; flush_cache_page = octeon_flush_cache_page; flush_cache_range = octeon_flush_cache_range; - flush_cache_sigtramp = octeon_flush_cache_sigtramp; flush_icache_all = octeon_flush_icache_all; flush_data_cache_page = octeon_flush_data_cache_page; flush_icache_range = octeon_flush_icache_range; diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 01848cdf2074..0ca401ddf3b7 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -274,30 +274,6 @@ static void r3k_flush_data_cache_page(unsigned long addr) { } -static void r3k_flush_cache_sigtramp(unsigned long addr) -{ - unsigned long flags; - - pr_debug("csigtramp[%08lx]\n", addr); - - flags = read_c0_status(); - - write_c0_status(flags&~ST0_IEC); - - /* Fill the TLB to avoid an exception with caches isolated. */ - asm( "lw\t$0, 0x000(%0)\n\t" - "lw\t$0, 0x004(%0)\n\t" - : : "r" (addr) ); - - write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC); - - asm( "sb\t$0, 0x000(%0)\n\t" - "sb\t$0, 0x004(%0)\n\t" - : : "r" (addr) ); - - write_c0_status(flags); -} - static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size) { BUG(); @@ -331,7 +307,6 @@ void r3k_cache_init(void) __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range; - flush_cache_sigtramp = r3k_flush_cache_sigtramp; local_flush_data_cache_page = local_r3k_flush_data_cache_page; flush_data_cache_page = r3k_flush_data_cache_page; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index d0b64df51eb2..5166e38cd1c6 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -540,6 +540,9 @@ static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type) unsigned int i; const cpumask_t *mask = cpu_present_mask; + if (cpu_has_mmid) + return cpu_context(0, mm) != 0; + /* cpu_sibling_map[] undeclared when !CONFIG_SMP */ #ifdef CONFIG_SMP /* @@ -697,10 +700,7 @@ static inline void local_r4k_flush_cache_page(void *args) } if (exec) { if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } else vaddr ? r4k_blast_icache_page(addr) : r4k_blast_icache_user_page(addr); @@ -937,119 +937,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) } #endif /* CONFIG_DMA_NONCOHERENT */ -struct flush_cache_sigtramp_args { - struct mm_struct *mm; - struct page *page; - unsigned long addr; -}; - -/* - * While we're protected against bad userland addresses we don't care - * very much about what happens in that case. Usually a segmentation - * fault will dump the process later on anyway ... - */ -static void local_r4k_flush_cache_sigtramp(void *args) -{ - struct flush_cache_sigtramp_args *fcs_args = args; - unsigned long addr = fcs_args->addr; - struct page *page = fcs_args->page; - struct mm_struct *mm = fcs_args->mm; - int map_coherent = 0; - void *vaddr; - - unsigned long ic_lsize = cpu_icache_line_size(); - unsigned long dc_lsize = cpu_dcache_line_size(); - unsigned long sc_lsize = cpu_scache_line_size(); - - /* - * If owns no valid ASID yet, cannot possibly have gotten - * this page into the cache. - */ - if (!has_valid_asid(mm, R4K_HIT)) - return; - - if (mm == current->active_mm) { - vaddr = NULL; - } else { - /* - * Use kmap_coherent or kmap_atomic to do flushes for - * another ASID than the current one. - */ - map_coherent = (cpu_has_dc_aliases && - page_mapcount(page) && - !Page_dcache_dirty(page)); - if (map_coherent) - vaddr = kmap_coherent(page, addr); - else - vaddr = kmap_atomic(page); - addr = (unsigned long)vaddr + (addr & ~PAGE_MASK); - } - - R4600_HIT_CACHEOP_WAR_IMPL; - if (!cpu_has_ic_fills_f_dc) { - if (dc_lsize) - vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1)) - : protected_writeback_dcache_line( - addr & ~(dc_lsize - 1)); - if (!cpu_icache_snoops_remote_store && scache_size) - vaddr ? flush_scache_line(addr & ~(sc_lsize - 1)) - : protected_writeback_scache_line( - addr & ~(sc_lsize - 1)); - } - if (ic_lsize) - vaddr ? flush_icache_line(addr & ~(ic_lsize - 1)) - : protected_flush_icache_line(addr & ~(ic_lsize - 1)); - - if (vaddr) { - if (map_coherent) - kunmap_coherent(); - else - kunmap_atomic(vaddr); - } - - if (MIPS4K_ICACHE_REFILL_WAR) { - __asm__ __volatile__ ( - ".set push\n\t" - ".set noat\n\t" - ".set "MIPS_ISA_LEVEL"\n\t" -#ifdef CONFIG_32BIT - "la $at,1f\n\t" -#endif -#ifdef CONFIG_64BIT - "dla $at,1f\n\t" -#endif - "cache %0,($at)\n\t" - "nop; nop; nop\n" - "1:\n\t" - ".set pop" - : - : "i" (Hit_Invalidate_I)); - } - if (MIPS_CACHE_SYNC_WAR) - __asm__ __volatile__ ("sync"); -} - -static void r4k_flush_cache_sigtramp(unsigned long addr) -{ - struct flush_cache_sigtramp_args args; - int npages; - - down_read(¤t->mm->mmap_sem); - - npages = get_user_pages_fast(addr, 1, 0, &args.page); - if (npages < 1) - goto out; - - args.mm = current->mm; - args.addr = addr; - - r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args); - - put_page(args.page); -out: - up_read(¤t->mm->mmap_sem); -} - static void r4k_flush_icache_all(void) { if (cpu_has_vtag_icache) @@ -1978,7 +1865,6 @@ void r4k_cache_init(void) __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; - flush_cache_sigtramp = r4k_flush_cache_sigtramp; flush_icache_all = r4k_flush_icache_all; local_flush_data_cache_page = local_r4k_flush_data_cache_page; flush_data_cache_page = r4k_flush_data_cache_page; @@ -2033,7 +1919,6 @@ void r4k_cache_init(void) /* I$ fills from D$ just by emptying the write buffers */ flush_cache_page = (void *)b5k_instruction_hazard; flush_cache_range = (void *)b5k_instruction_hazard; - flush_cache_sigtramp = (void *)b5k_instruction_hazard; local_flush_data_cache_page = (void *)b5k_instruction_hazard; flush_data_cache_page = (void *)b5k_instruction_hazard; flush_icache_range = (void *)b5k_instruction_hazard; @@ -2052,7 +1937,6 @@ void r4k_cache_init(void) flush_cache_mm = (void *)cache_noop; flush_cache_page = (void *)cache_noop; flush_cache_range = (void *)cache_noop; - flush_cache_sigtramp = (void *)cache_noop; flush_icache_all = (void *)cache_noop; flush_data_cache_page = (void *)cache_noop; local_flush_data_cache_page = (void *)cache_noop; diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index 5f6c099a9457..b7c8a9d79c35 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -290,25 +290,6 @@ static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) } } -static void tx39_flush_cache_sigtramp(unsigned long addr) -{ - unsigned long ic_lsize = current_cpu_data.icache.linesz; - unsigned long dc_lsize = current_cpu_data.dcache.linesz; - unsigned long config; - unsigned long flags; - - protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); - - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - protected_flush_icache_line(addr & ~(ic_lsize - 1)); - write_c0_conf(config); - local_irq_restore(flags); -} - static __init void tx39_probe_cache(void) { unsigned long config; @@ -368,7 +349,6 @@ void tx39_cache_init(void) flush_icache_range = (void *) tx39h_flush_icache_all; local_flush_icache_range = (void *) tx39h_flush_icache_all; - flush_cache_sigtramp = (void *) tx39h_flush_icache_all; local_flush_data_cache_page = (void *) tx39h_flush_icache_all; flush_data_cache_page = (void *) tx39h_flush_icache_all; @@ -397,7 +377,6 @@ void tx39_cache_init(void) __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range; - flush_cache_sigtramp = tx39_flush_cache_sigtramp; local_flush_data_cache_page = local_tx39_flush_data_cache_page; flush_data_cache_page = tx39_flush_data_cache_page; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 55099fbff4e6..3da216988672 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -47,7 +47,6 @@ void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); /* MIPS specific cache operations */ -void (*flush_cache_sigtramp)(unsigned long addr); void (*local_flush_data_cache_page)(void * addr); void (*flush_data_cache_page)(unsigned long addr); void (*flush_icache_all)(void); diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c new file mode 100644 index 000000000000..b25564090939 --- /dev/null +++ b/arch/mips/mm/context.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/atomic.h> +#include <linux/mmu_context.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> + +static DEFINE_RAW_SPINLOCK(cpu_mmid_lock); + +static atomic64_t mmid_version; +static unsigned int num_mmids; +static unsigned long *mmid_map; + +static DEFINE_PER_CPU(u64, reserved_mmids); +static cpumask_t tlb_flush_pending; + +static bool asid_versions_eq(int cpu, u64 a, u64 b) +{ + return ((a ^ b) & asid_version_mask(cpu)) == 0; +} + +void get_new_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu; + u64 asid; + + /* + * This function is specific to ASIDs, and should not be called when + * MMIDs are in use. + */ + if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid)) + return; + + cpu = smp_processor_id(); + asid = asid_cache(cpu); + + if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { + if (cpu_has_vtag_icache) + flush_icache_all(); + local_flush_tlb_all(); /* start new asid cycle */ + } + + set_cpu_context(cpu, mm, asid); + asid_cache(cpu) = asid; +} +EXPORT_SYMBOL_GPL(get_new_mmu_context); + +void check_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + + /* + * This function is specific to ASIDs, and should not be called when + * MMIDs are in use. + */ + if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid)) + return; + + /* Check if our ASID is of an older version and thus invalid */ + if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu))) + get_new_mmu_context(mm); +} +EXPORT_SYMBOL_GPL(check_mmu_context); + +static void flush_context(void) +{ + u64 mmid; + int cpu; + + /* Update the list of reserved MMIDs and the MMID bitmap */ + bitmap_clear(mmid_map, 0, num_mmids); + + /* Reserve an MMID for kmap/wired entries */ + __set_bit(MMID_KERNEL_WIRED, mmid_map); + + for_each_possible_cpu(cpu) { + mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0); + + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * MMID, as this is the only trace we have of + * the process it is still running. + */ + if (mmid == 0) + mmid = per_cpu(reserved_mmids, cpu); + + __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map); + per_cpu(reserved_mmids, cpu) = mmid; + } + + /* + * Queue a TLB invalidation for each CPU to perform on next + * context-switch + */ + cpumask_setall(&tlb_flush_pending); +} + +static bool check_update_reserved_mmid(u64 mmid, u64 newmmid) +{ + bool hit; + int cpu; + + /* + * Iterate over the set of reserved MMIDs looking for a match. + * If we find one, then we can update our mm to use newmmid + * (i.e. the same MMID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old MMID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved MMID in a future + * generation. + */ + hit = false; + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_mmids, cpu) == mmid) { + hit = true; + per_cpu(reserved_mmids, cpu) = newmmid; + } + } + + return hit; +} + +static u64 get_new_mmid(struct mm_struct *mm) +{ + static u32 cur_idx = MMID_KERNEL_WIRED + 1; + u64 mmid, version, mmid_mask; + + mmid = cpu_context(0, mm); + version = atomic64_read(&mmid_version); + mmid_mask = cpu_asid_mask(&boot_cpu_data); + + if (!asid_versions_eq(0, mmid, 0)) { + u64 newmmid = version | (mmid & mmid_mask); + + /* + * If our current MMID was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_mmid(mmid, newmmid)) { + mmid = newmmid; + goto set_context; + } + + /* + * We had a valid MMID in a previous life, so try to re-use + * it if possible. + */ + if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) { + mmid = newmmid; + goto set_context; + } + } + + /* Allocate a free MMID */ + mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx); + if (mmid != num_mmids) + goto reserve_mmid; + + /* We're out of MMIDs, so increment the global version */ + version = atomic64_add_return_relaxed(asid_first_version(0), + &mmid_version); + + /* Note currently active MMIDs & mark TLBs as requiring flushes */ + flush_context(); + + /* We have more MMIDs than CPUs, so this will always succeed */ + mmid = find_first_zero_bit(mmid_map, num_mmids); + +reserve_mmid: + __set_bit(mmid, mmid_map); + cur_idx = mmid; + mmid |= version; +set_context: + set_cpu_context(0, mm, mmid); + return mmid; +} + +void check_switch_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + u64 ctx, old_active_mmid; + unsigned long flags; + + if (!cpu_has_mmid) { + check_mmu_context(mm); + write_c0_entryhi(cpu_asid(cpu, mm)); + goto setup_pgd; + } + + /* + * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's + * unnecessary. + * + * The memory ordering here is subtle. If our active_mmids is non-zero + * and the MMID matches the current version, then we update the CPU's + * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover + * means that either: + * + * - We get a zero back from the cmpxchg and end up waiting on + * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises + * with the rollover and so we are forced to see the updated + * generation. + * + * - We get a valid MMID back from the cmpxchg, which means the + * relaxed xchg in flush_context will treat us as reserved + * because atomic RmWs are totally ordered for a given location. + */ + ctx = cpu_context(cpu, mm); + old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache); + if (!old_active_mmid || + !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) || + !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) { + raw_spin_lock_irqsave(&cpu_mmid_lock, flags); + + ctx = cpu_context(cpu, mm); + if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version))) + ctx = get_new_mmid(mm); + + WRITE_ONCE(cpu_data[cpu].asid_cache, ctx); + raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags); + } + + /* + * Invalidate the local TLB if needed. Note that we must only clear our + * bit in tlb_flush_pending after this is complete, so that the + * cpu_has_shared_ftlb_entries case below isn't misled. + */ + if (cpumask_test_cpu(cpu, &tlb_flush_pending)) { + if (cpu_has_vtag_icache) + flush_icache_all(); + local_flush_tlb_all(); + cpumask_clear_cpu(cpu, &tlb_flush_pending); + } + + write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data)); + + /* + * If this CPU shares FTLB entries with its siblings and one or more of + * those siblings hasn't yet invalidated its TLB following a version + * increase then we need to invalidate any TLB entries for our MMID + * that we might otherwise pick up from a sibling. + * + * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in + * CONFIG_SMP=n kernels. + */ +#ifdef CONFIG_SMP + if (cpu_has_shared_ftlb_entries && + cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) { + /* Ensure we operate on the new MMID */ + mtc0_tlbw_hazard(); + + /* + * Invalidate all TLB entries associated with the new + * MMID, and wait for the invalidation to complete. + */ + ginvt_mmid(); + sync_ginv(); + } +#endif + +setup_pgd: + TLBMISS_HANDLER_SETUP_PGD(mm->pgd); +} +EXPORT_SYMBOL_GPL(check_switch_mmu_context); + +static int mmid_init(void) +{ + if (!cpu_has_mmid) + return 0; + + /* + * Expect allocation after rollover to fail if we don't have at least + * one more MMID than CPUs. + */ + num_mmids = asid_first_version(0); + WARN_ON(num_mmids <= num_possible_cpus()); + + atomic64_set(&mmid_version, asid_first_version(0)); + mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map), + GFP_KERNEL); + if (!mmid_map) + panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids); + + /* Reserve an MMID for kmap/wired entries */ + __set_bit(MMID_KERNEL_WIRED, mmid_map); + + pr_info("MMID allocator initialised with %u entries\n", num_mmids); + return 0; +} +early_initcall(mmid_init); diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index cb38461391cb..b57465733e87 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -120,13 +120,8 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, if (PageHighMem(page)) { void *addr; - if (offset + len > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset >> PAGE_SHIFT; - offset &= ~PAGE_MASK; - } + if (offset + len > PAGE_SIZE) len = PAGE_SIZE - offset; - } addr = kmap_atomic(page); dma_sync_virt(addr + offset, len, dir); @@ -145,12 +140,14 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, dma_sync_phys(paddr, size, dir); } +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { if (cpu_needs_post_dma_flush(dev)) dma_sync_phys(paddr, size, dir); } +#endif void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index b521d8e2d359..c3b45e248806 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -84,6 +84,7 @@ void setup_zero_pages(void) static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) { enum fixed_addresses idx; + unsigned int uninitialized_var(old_mmid); unsigned long vaddr, flags, entrylo; unsigned long old_ctx; pte_t pte; @@ -110,6 +111,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(MMID_KERNEL_WIRED); + } #ifdef CONFIG_XPA if (cpu_has_xpa) { entrylo = (pte.pte_low & _PFNX_MASK); @@ -124,6 +129,8 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) tlb_write_indexed(); tlbw_use_hazard(); write_c0_entryhi(old_ctx); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); local_irq_restore(flags); return (void*) vaddr; diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c index 2a116084216f..9507421de335 100644 --- a/arch/mips/mm/sc-debugfs.c +++ b/arch/mips/mm/sc-debugfs.c @@ -55,20 +55,11 @@ static const struct file_operations sc_prefetch_fops = { static int __init sc_debugfs_init(void) { - struct dentry *dir, *file; - - if (!mips_debugfs_dir) - return -ENODEV; + struct dentry *dir; dir = debugfs_create_dir("l2cache", mips_debugfs_dir); - if (IS_ERR(dir)) - return PTR_ERR(dir); - - file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, - NULL, &sc_prefetch_fops); - if (!file) - return -ENOMEM; - + debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, NULL, + &sc_prefetch_fops); return 0; } late_initcall(sc_debugfs_init); diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 6f589e0112ce..50f207591b6d 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -67,18 +67,6 @@ void local_flush_tlb_all(void) local_irq_restore(flags); } -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { -#ifdef DEBUG_TLB - printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm)); -#endif - drop_mmu_context(mm, cpu); - } -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -117,7 +105,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } write_c0_entryhi(oldpid); } else { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } local_irq_restore(flags); } diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 0596505770db..c13e46ced425 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -104,23 +104,6 @@ void local_flush_tlb_all(void) } EXPORT_SYMBOL(local_flush_tlb_all); -/* All entries common to a mm share an asid. To effectively flush - these entries, we just bump the asid. */ -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu; - - preempt_disable(); - - cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { - drop_mmu_context(mm, cpu); - } - - preempt_enable(); -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -137,14 +120,23 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, if (size <= (current_cpu_data.tlbsizeftlbsets ? current_cpu_data.tlbsize / 8 : current_cpu_data.tlbsize / 2)) { - int oldpid = read_c0_entryhi(); + unsigned long old_entryhi, uninitialized_var(old_mmid); int newpid = cpu_asid(cpu, mm); + old_entryhi = read_c0_entryhi(); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(newpid); + } + htw_stop(); while (start < end) { int idx; - write_c0_entryhi(start | newpid); + if (cpu_has_mmid) + write_c0_entryhi(start); + else + write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); @@ -160,10 +152,12 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, tlb_write_indexed(); } tlbw_use_hazard(); - write_c0_entryhi(oldpid); + write_c0_entryhi(old_entryhi); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); htw_start(); } else { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } flush_micro_tlb(); local_irq_restore(flags); @@ -220,15 +214,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) int cpu = smp_processor_id(); if (cpu_context(cpu, vma->vm_mm) != 0) { - unsigned long flags; - int oldpid, newpid, idx; + unsigned long uninitialized_var(old_mmid); + unsigned long flags, old_entryhi; + int idx; - newpid = cpu_asid(cpu, vma->vm_mm); page &= (PAGE_MASK << 1); local_irq_save(flags); - oldpid = read_c0_entryhi(); + old_entryhi = read_c0_entryhi(); htw_stop(); - write_c0_entryhi(page | newpid); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_entryhi(page); + write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); + } else { + write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); + } mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); @@ -244,7 +244,9 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) tlbw_use_hazard(); finish: - write_c0_entryhi(oldpid); + write_c0_entryhi(old_entryhi); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); htw_start(); flush_micro_tlb_vm(vma); local_irq_restore(flags); @@ -307,9 +309,13 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) local_irq_save(flags); htw_stop(); - pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); address &= (PAGE_MASK << 1); - write_c0_entryhi(address | pid); + if (cpu_has_mmid) { + write_c0_entryhi(address); + } else { + pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); + write_c0_entryhi(address | pid); + } pgdp = pgd_offset(vma->vm_mm, address); mtc0_tlbw_hazard(); tlb_probe(); @@ -375,12 +381,17 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #ifdef CONFIG_XPA panic("Broken for XPA kernels"); #else + unsigned int uninitialized_var(old_mmid); unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; local_irq_save(flags); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(MMID_KERNEL_WIRED); + } /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); htw_stop(); @@ -398,6 +409,8 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, tlbw_use_hazard(); write_c0_entryhi(old_ctx); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); tlbw_use_hazard(); /* What is the hazard here? */ htw_start(); write_c0_pagemask(old_pagemask); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index e86e2e55ad3e..c1e9e144007e 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -50,14 +50,6 @@ void local_flush_tlb_all(void) local_irq_restore(flags); } -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm, cpu); -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -75,7 +67,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); if (size > TFP_TLB_SIZE / 2) { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); goto out_restore; } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 37b1cb246332..65b6e85447b1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * to mimic that here by taking a load/istream page * fault. */ + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(p, 0); UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); uasm_i_jr(p, ptr); @@ -1646,6 +1648,8 @@ static void iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(p, 0); # ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) uasm_i_lld(p, pte, 0, ptr); @@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void) #endif uasm_l_nopage_tlbl(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_0 & 1) { @@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void) #endif uasm_l_nopage_tlbs(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_1 & 1) { @@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void) #endif uasm_l_nopage_tlbm(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_1 & 1) { diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index b16710a8a9e7..0effd3cba9a7 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -79,8 +79,6 @@ enum reg_val_type { REG_64BIT_32BIT, /* 32-bit compatible, need truncation for 64-bit ops. */ REG_32BIT, - /* 32-bit zero extended. */ - REG_32BIT_ZERO_EX, /* 32-bit no sign/zero extension needed. */ REG_32BIT_POS }; @@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) const struct bpf_prog *prog = ctx->skf; int stack_adjust = ctx->stack_size; int store_offset = stack_adjust - 8; + enum reg_val_type td; int r0 = MIPS_R_V0; - if (dest_reg == MIPS_R_RA && - get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) + if (dest_reg == MIPS_R_RA) { /* Don't let zero extended value escape. */ - emit_instr(ctx, sll, r0, r0, 0); + td = get_reg_val_type(ctx, prog->len, BPF_REG_0); + if (td == REG_64BIT) + emit_instr(ctx, sll, r0, r0, 0); + } if (ctx->flags & EBPF_SAVE_RA) { emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); @@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) + if (td == REG_64BIT) /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); if (insn->imm == 1) { @@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (src < 0 || dst < 0) return -EINVAL; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } did_move = false; ts = get_reg_val_type(ctx, this_idx, insn->src_reg); - if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { + if (ts == REG_64BIT) { int tmp_reg = MIPS_R_AT; if (bpf_op == BPF_MOV) { @@ -1254,8 +1255,7 @@ jeq_common: if (insn->imm == 64 && td == REG_32BIT) emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - if (insn->imm != 64 && - (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { + if (insn->imm != 64 && td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -1819,7 +1819,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* Update the icache */ flush_icache_range((unsigned long)ctx.target, - (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); + (unsigned long)&ctx.target[ctx.idx]); if (bpf_jit_enable > 1) /* Dump JIT code */ diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 8185a2bfaf09..c4f976593061 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o # # These are still pretty much in the old state, watch, go blind. # +obj-$(CONFIG_ATH79) += fixup-ath79.o obj-$(CONFIG_LASAT) += pci-lasat.o obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o diff --git a/arch/mips/pci/fixup-ath79.c b/arch/mips/pci/fixup-ath79.c new file mode 100644 index 000000000000..9e651a4af05e --- /dev/null +++ b/arch/mips/pci/fixup-ath79.c @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2018 John Crispin <john@phrozen.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/pci.h> +//#include <linux/of_irq.h> +#include <linux/of_pci.h> + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return PCIBIOS_SUCCESSFUL; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return of_irq_parse_and_map_pci(dev, slot, pin); +} diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c index a1d2c4ae0d1b..df95b0da08f2 100644 --- a/arch/mips/pci/ops-bridge.c +++ b/arch/mips/pci/ops-bridge.c @@ -44,7 +44,7 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); volatile void *addr; @@ -56,11 +56,11 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_DEVICE_NOT_FOUND; /* - * IOC3 is fucking fucked beyond belief ... Don't even give the + * IOC3 is broken beyond belief ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) - goto oh_my_gawd; + goto is_ioc3; addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; @@ -73,21 +73,16 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; -oh_my_gawd: +is_ioc3: /* - * IOC3 is fucking fucked beyond belief ... Don't even give the - * generic PCI code a chance to look at the wrong register. + * IOC3 special handling */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) { *value = emulate_ioc3_cfg(where, size); return PCIBIOS_SUCCESSFUL; } - /* - * IOC3 is fucking fucked beyond belief ... Don't try to access - * anything but 32-bit words ... - */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) @@ -104,7 +99,7 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int busno = bus->number; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); @@ -112,19 +107,19 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn, u32 cf, shift, mask; int res; - bridge->b_pci_cfg = (busno << 16) | (slot << 11); + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; /* - * IOC3 is fucking fucked beyond belief ... Don't even give the + * IOC3 is broken beyond belief ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) - goto oh_my_gawd; + goto is_ioc3; - bridge->b_pci_cfg = (busno << 16) | (slot << 11); + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; if (size == 1) @@ -136,22 +131,17 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn, return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; -oh_my_gawd: +is_ioc3: /* - * IOC3 is fucking fucked beyond belief ... Don't even give the - * generic PCI code a chance to look at the wrong register. + * IOC3 special handling */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) { *value = emulate_ioc3_cfg(where, size); return PCIBIOS_SUCCESSFUL; } - /* - * IOC3 is fucking fucked beyond belief ... Don't try to access - * anything but 32-bit words ... - */ - bridge->b_pci_cfg = (busno << 16) | (slot << 11); + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); addr = &bridge->b_type1_cfg.c[(fn << 8) | where]; if (get_dbe(cf, (u32 *) addr)) @@ -177,7 +167,7 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); volatile void *addr; @@ -189,11 +179,11 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_DEVICE_NOT_FOUND; /* - * IOC3 is fucking fucked beyond belief ... Don't even give the + * IOC3 is broken beyond belief ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) - goto oh_my_gawd; + goto is_ioc3; addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; @@ -210,19 +200,14 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; -oh_my_gawd: +is_ioc3: /* - * IOC3 is fucking fucked beyond belief ... Don't even give the - * generic PCI code a chance to touch the wrong register. + * IOC3 special handling */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) return PCIBIOS_SUCCESSFUL; - /* - * IOC3 is fucking fucked beyond belief ... Don't try to access - * anything but 32-bit words ... - */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) @@ -243,7 +228,7 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); int busno = bus->number; @@ -251,17 +236,17 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn, u32 cf, shift, mask, smask; int res; - bridge->b_pci_cfg = (busno << 16) | (slot << 11); + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; /* - * IOC3 is fucking fucked beyond belief ... Don't even give the + * IOC3 is broken beyond belief ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) - goto oh_my_gawd; + goto is_ioc3; addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; @@ -278,19 +263,14 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; -oh_my_gawd: +is_ioc3: /* - * IOC3 is fucking fucked beyond belief ... Don't even give the - * generic PCI code a chance to touch the wrong register. + * IOC3 special handling */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) return PCIBIOS_SUCCESSFUL; - /* - * IOC3 is fucking fucked beyond belief ... Don't try to access - * anything but 32-bit words ... - */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c index c94a66070a60..3c177b4d0609 100644 --- a/arch/mips/pci/pci-ip27.c +++ b/arch/mips/pci/pci-ip27.c @@ -24,22 +24,11 @@ #define MAX_PCI_BUSSES 40 /* - * Max #PCI devices (like scsi controllers) we handle on a bus. - */ -#define MAX_DEVICES_PER_PCIBUS 8 - -/* * XXX: No kmalloc available when we do our crosstalk scan, * we should try to move it later in the boot process. */ static struct bridge_controller bridges[MAX_PCI_BUSSES]; -/* - * Translate from irq to software PCI bus number and PCI slot. - */ -struct bridge_controller *irq_to_bridge[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; -int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; - extern struct pci_ops bridge_pci_ops; int bridge_probe(nasid_t nasid, int widget_id, int masterwid) @@ -47,7 +36,6 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid) unsigned long offset = NODE_OFFSET(nasid); struct bridge_controller *bc; static int num_bridges = 0; - bridge_t *bridge; int slot; pci_set_flags(PCI_PROBE_ONLY); @@ -78,7 +66,6 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid) bc->io.end = ~0UL; bc->io.flags = IORESOURCE_IO; - bc->irq_cpu = smp_processor_id(); bc->widget_id = widget_id; bc->nasid = nasid; @@ -87,45 +74,43 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid) /* * point to this bridge */ - bridge = (bridge_t *) RAW_NODE_SWIN_BASE(nasid, widget_id); + bc->base = (struct bridge_regs *)RAW_NODE_SWIN_BASE(nasid, widget_id); /* * Clear all pending interrupts. */ - bridge->b_int_rst_stat = BRIDGE_IRR_ALL_CLR; + bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR); /* * Until otherwise set up, assume all interrupts are from slot 0 */ - bridge->b_int_device = 0x0; + bridge_write(bc, b_int_device, 0x0); /* * swap pio's to pci mem and io space (big windows) */ - bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP | - BRIDGE_CTRL_MEM_SWAP; + bridge_set(bc, b_wid_control, BRIDGE_CTRL_IO_SWAP | + BRIDGE_CTRL_MEM_SWAP); #ifdef CONFIG_PAGE_SIZE_4KB - bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE; + bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE); #else /* 16kB or larger */ - bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE; + bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE); #endif /* * Hmm... IRIX sets additional bits in the address which * are documented as reserved in the bridge docs. */ - bridge->b_wid_int_upper = 0x8000 | (masterwid << 16); - bridge->b_wid_int_lower = 0x01800090; /* PI_INT_PEND_MOD off*/ - bridge->b_dir_map = (masterwid << 20); /* DMA */ - bridge->b_int_enable = 0; + bridge_write(bc, b_wid_int_upper, 0x8000 | (masterwid << 16)); + bridge_write(bc, b_wid_int_lower, 0x01800090); /* PI_INT_PEND_MOD off*/ + bridge_write(bc, b_dir_map, (masterwid << 20)); /* DMA */ + bridge_write(bc, b_int_enable, 0); for (slot = 0; slot < 8; slot ++) { - bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR; + bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR); bc->pci_int[slot] = -1; } - bridge->b_wid_tflush; /* wait until Bridge PIO complete */ - - bc->base = bridge; + bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */ register_pci_controller(&bc->pc); @@ -168,16 +153,12 @@ int pcibios_plat_dev_init(struct pci_dev *dev) irq = bc->pci_int[slot]; if (irq == -1) { - irq = request_bridge_irq(bc); + irq = request_bridge_irq(bc, slot); if (irq < 0) return irq; bc->pci_int[slot] = irq; } - - irq_to_bridge[irq] = bc; - irq_to_slot[irq] = slot; - dev->irq = irq; return 0; @@ -206,7 +187,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) static inline void pci_disable_swapping(struct pci_dev *dev) { struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int slot = PCI_SLOT(dev->devfn); /* Turn off byte swapping */ diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 5017d5843c5a..fc29b85cfa92 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void) if (octeon_has_feature(OCTEON_FEATURE_PCIE)) return 0; + if (!octeon_is_pci_host()) { + pr_notice("Not in host mode, PCI Controller not initialized\n"); + return 0; + } + /* Point pcibios_map_irq() to the PCI version of it */ octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; @@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void) else octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; - if (!octeon_is_pci_host()) { - pr_notice("Not in host mode, PCI Controller not initialized\n"); - return 0; - } - /* PCI I/O and PCI MEM values */ set_io_port_base(OCTEON_PCI_IOSPACE_BASE); ioport_resource.start = 0; diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c index e1fa5972a81d..648f5eb2ba68 100644 --- a/arch/mips/ralink/bootrom.c +++ b/arch/mips/ralink/bootrom.c @@ -35,13 +35,7 @@ static const struct file_operations bootrom_file_ops = { static int bootrom_setup(void) { - if (!debugfs_create_file("bootrom", 0444, - NULL, NULL, &bootrom_file_ops)) { - pr_err("Failed to create bootrom debugfs file\n"); - - return -EINVAL; - } - + debugfs_create_file("bootrom", 0444, NULL, NULL, &bootrom_file_ops); return 0; } diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile index 73502fda13ee..27c14ede191e 100644 --- a/arch/mips/sgi-ip27/Makefile +++ b/arch/mips/sgi-ip27/Makefile @@ -3,10 +3,9 @@ # Makefile for the IP27 specific kernel interface routines under Linux. # -obj-y := ip27-berr.o ip27-irq.o ip27-irqno.o ip27-init.o ip27-klconfig.o \ +obj-y := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o \ ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \ ip27-hubio.o ip27-xtalk.o obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o -obj-$(CONFIG_PCI) += ip27-irq-pci.o obj-$(CONFIG_SMP) += ip27-smp.o diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c index 2abe016a0ffc..68fb0cb89d9d 100644 --- a/arch/mips/sgi-ip27/ip27-hubio.c +++ b/arch/mips/sgi-ip27/ip27-hubio.c @@ -63,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, * after we write it. */ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); - (void) HUB_L(IIO_ITTE_GET(nasid, i)); + __raw_readq(IIO_ITTE_GET(nasid, i)); return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); } @@ -135,7 +135,7 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits) **/ static void hub_set_piomode(nasid_t nasid) { - hubreg_t ii_iowa; + u64 ii_iowa; hubii_wcr_t ii_wcr; unsigned i; diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c index e501c43c02db..6074efeff894 100644 --- a/arch/mips/sgi-ip27/ip27-init.c +++ b/arch/mips/sgi-ip27/ip27-init.c @@ -52,13 +52,10 @@ EXPORT_SYMBOL_GPL(sn_cpu_info); extern void pcibr_setup(cnodeid_t); -extern void xtalk_probe_node(cnodeid_t nid); - static void per_hub_init(cnodeid_t cnode) { struct hub_data *hub = hub_data(cnode); nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); - int i; cpumask_set_cpu(smp_processor_id(), &hub->h_cpus); @@ -71,7 +68,6 @@ static void per_hub_init(cnodeid_t cnode) REMOTE_HUB_S(nasid, IIO_ICTO, 0xff); hub_rtc_init(cnode); - xtalk_probe_node(cnode); #ifdef CONFIG_REPLICATE_EXHANDLERS /* @@ -90,24 +86,6 @@ static void per_hub_init(cnodeid_t cnode) __flush_cache_all(); } #endif - - /* - * Some interrupts are reserved by hardware or by software convention. - * Mark these as reserved right away so they won't be used accidentally - * later. - */ - for (i = 0; i <= BASE_PCI_IRQ; i++) { - __set_bit(i, hub->irq_alloc_mask); - LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i); - } - - __set_bit(IP_PEND0_6_63, hub->irq_alloc_mask); - LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63); - - for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) { - __set_bit(i, hub->irq_alloc_mask); - LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i); - } } void per_cpu_init(void) @@ -116,8 +94,6 @@ void per_cpu_init(void) int slice = LOCAL_HUB_L(PI_CPU_NUM); cnodeid_t cnode = get_compact_nodeid(); struct hub_data *hub = hub_data(cnode); - struct slice_data *si = hub->slice + slice; - int i; if (test_and_set_bit(slice, &hub->slice_map)) return; @@ -126,22 +102,14 @@ void per_cpu_init(void) per_hub_init(cnode); - for (i = 0; i < LEVELS_PER_SLICE; i++) - si->level_to_irq[i] = -1; - - /* - * We use this so we can find the local hub's data as fast as only - * possible. - */ - cpu_data[cpu].data = si; - cpu_time_init(); install_ipi(); /* Install our NMI handler if symmon hasn't installed one. */ install_cpu_nmi_handler(cputoslice(cpu)); - set_c0_status(SRB_DEV0 | SRB_DEV1); + enable_percpu_irq(IP27_HUB_PEND0_IRQ, IRQ_TYPE_NONE); + enable_percpu_irq(IP27_HUB_PEND1_IRQ, IRQ_TYPE_NONE); } /* @@ -177,7 +145,7 @@ extern void ip27_reboot_setup(void); void __init plat_mem_setup(void) { - hubreg_t p, e, n_mode; + u64 p, e, n_mode; nasid_t nid; ip27_reboot_setup(); @@ -215,7 +183,6 @@ void __init plat_mem_setup(void) #endif ioc3_eth_init(); - per_cpu_init(); set_io_port_base(IO_BASE); } diff --git a/arch/mips/sgi-ip27/ip27-irq-pci.c b/arch/mips/sgi-ip27/ip27-irq-pci.c deleted file mode 100644 index cd449e90b917..000000000000 --- a/arch/mips/sgi-ip27/ip27-irq-pci.c +++ /dev/null @@ -1,266 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ip27-irq.c: Highlevel interrupt handling for IP27 architecture. - * - * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) - * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 1999 - 2001 Kanoj Sarcar - */ - -#undef DEBUG - -#include <linux/irq.h> -#include <linux/errno.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/types.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/timex.h> -#include <linux/smp.h> -#include <linux/random.h> -#include <linux/kernel.h> -#include <linux/kernel_stat.h> -#include <linux/delay.h> -#include <linux/bitops.h> - -#include <asm/bootinfo.h> -#include <asm/io.h> -#include <asm/mipsregs.h> - -#include <asm/processor.h> -#include <asm/pci/bridge.h> -#include <asm/sn/addrs.h> -#include <asm/sn/agent.h> -#include <asm/sn/arch.h> -#include <asm/sn/hub.h> -#include <asm/sn/intr.h> - -/* - * Linux has a controller-independent x86 interrupt architecture. - * every controller has a 'controller-template', that is used - * by the main code to do the right thing. Each driver-visible - * interrupt source is transparently wired to the appropriate - * controller. Thus drivers need not be aware of the - * interrupt-controller. - * - * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, - * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. - * (IO-APICs assumed to be messaging to Pentium local-APICs) - * - * the code is designed to be easily extended with new/different - * interrupt controllers, without having to do assembly magic. - */ - -extern struct bridge_controller *irq_to_bridge[]; -extern int irq_to_slot[]; - -/* - * use these macros to get the encoded nasid and widget id - * from the irq value - */ -#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)] -#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i] - -static inline int alloc_level(int cpu, int irq) -{ - struct hub_data *hub = hub_data(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - int level; - - level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); - if (level >= LEVELS_PER_SLICE) - panic("Cpu %d flooded with devices", cpu); - - __set_bit(level, hub->irq_alloc_mask); - si->level_to_irq[level] = irq; - - return level; -} - -static inline int find_level(cpuid_t *cpunum, int irq) -{ - int cpu, i; - - for_each_online_cpu(cpu) { - struct slice_data *si = cpu_data[cpu].data; - - for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) - if (si->level_to_irq[i] == irq) { - *cpunum = cpu; - - return i; - } - } - - panic("Could not identify cpu/level for irq %d", irq); -} - -static int intr_connect_level(int cpu, int bit) -{ - nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - - set_bit(bit, si->irq_enable_mask); - - if (!cputoslice(cpu)) { - REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); - } else { - REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); - } - - return 0; -} - -static int intr_disconnect_level(int cpu, int bit) -{ - nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - - clear_bit(bit, si->irq_enable_mask); - - if (!cputoslice(cpu)) { - REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); - } else { - REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); - } - - return 0; -} - -/* Startup one of the (PCI ...) IRQs routes over a bridge. */ -static unsigned int startup_bridge_irq(struct irq_data *d) -{ - struct bridge_controller *bc; - bridgereg_t device; - bridge_t *bridge; - int pin, swlevel; - cpuid_t cpu; - - pin = SLOT_FROM_PCI_IRQ(d->irq); - bc = IRQ_TO_BRIDGE(d->irq); - bridge = bc->base; - - pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin); - /* - * "map" irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - swlevel = find_level(&cpu, d->irq); - bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); - bridge->b_int_enable |= (1 << pin); - bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ - - /* - * Enable sending of an interrupt clear packt to the hub on a high to - * low transition of the interrupt pin. - * - * IRIX sets additional bits in the address which are documented as - * reserved in the bridge docs. - */ - bridge->b_int_mode |= (1UL << pin); - - /* - * We assume the bridge to have a 1:1 mapping between devices - * (slots) and intr pins. - */ - device = bridge->b_int_device; - device &= ~(7 << (pin*3)); - device |= (pin << (pin*3)); - bridge->b_int_device = device; - - bridge->b_wid_tflush; - - intr_connect_level(cpu, swlevel); - - return 0; /* Never anything pending. */ -} - -/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ -static void shutdown_bridge_irq(struct irq_data *d) -{ - struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq); - bridge_t *bridge = bc->base; - int pin, swlevel; - cpuid_t cpu; - - pr_debug("bridge_shutdown: irq 0x%x\n", d->irq); - pin = SLOT_FROM_PCI_IRQ(d->irq); - - /* - * map irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - swlevel = find_level(&cpu, d->irq); - intr_disconnect_level(cpu, swlevel); - - bridge->b_int_enable &= ~(1 << pin); - bridge->b_wid_tflush; -} - -static inline void enable_bridge_irq(struct irq_data *d) -{ - cpuid_t cpu; - int swlevel; - - swlevel = find_level(&cpu, d->irq); /* Criminal offence */ - intr_connect_level(cpu, swlevel); -} - -static inline void disable_bridge_irq(struct irq_data *d) -{ - cpuid_t cpu; - int swlevel; - - swlevel = find_level(&cpu, d->irq); /* Criminal offence */ - intr_disconnect_level(cpu, swlevel); -} - -static struct irq_chip bridge_irq_type = { - .name = "bridge", - .irq_startup = startup_bridge_irq, - .irq_shutdown = shutdown_bridge_irq, - .irq_mask = disable_bridge_irq, - .irq_unmask = enable_bridge_irq, -}; - -void register_bridge_irq(unsigned int irq) -{ - irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); -} - -int request_bridge_irq(struct bridge_controller *bc) -{ - int irq = allocate_irqno(); - int swlevel, cpu; - nasid_t nasid; - - if (irq < 0) - return irq; - - /* - * "map" irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - cpu = bc->irq_cpu; - swlevel = alloc_level(cpu, irq); - if (unlikely(swlevel < 0)) { - free_irqno(irq); - - return -EAGAIN; - } - - /* Make sure it's not already pending when we connect it. */ - nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - REMOTE_HUB_CLR_INTR(nasid, swlevel); - - intr_connect_level(cpu, swlevel); - - register_bridge_irq(irq); - - return irq; -} diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 0dde6164a06f..710a59764b01 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -7,67 +7,234 @@ * Copyright (C) 1999 - 2001 Kanoj Sarcar */ -#undef DEBUG - -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/errno.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/types.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/ioport.h> -#include <linux/timex.h> -#include <linux/smp.h> -#include <linux/random.h> #include <linux/kernel.h> -#include <linux/kernel_stat.h> -#include <linux/delay.h> #include <linux/bitops.h> -#include <asm/bootinfo.h> #include <asm/io.h> -#include <asm/mipsregs.h> - -#include <asm/processor.h> +#include <asm/irq_cpu.h> +#include <asm/pci/bridge.h> #include <asm/sn/addrs.h> #include <asm/sn/agent.h> #include <asm/sn/arch.h> #include <asm/sn/hub.h> #include <asm/sn/intr.h> -/* - * Linux has a controller-independent x86 interrupt architecture. - * every controller has a 'controller-template', that is used - * by the main code to do the right thing. Each driver-visible - * interrupt source is transparently wired to the appropriate - * controller. Thus drivers need not be aware of the - * interrupt-controller. - * - * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, - * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. - * (IO-APICs assumed to be messaging to Pentium local-APICs) - * - * the code is designed to be easily extended with new/different - * interrupt controllers, without having to do assembly magic. - */ +struct hub_irq_data { + struct bridge_controller *bc; + u64 *irq_mask[2]; + cpuid_t cpu; + int bit; + int pin; +}; -extern asmlinkage void ip27_irq(void); +static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT); -/* - * Find first bit set - */ -static int ms1bit(unsigned long x) +static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask); + +static inline int alloc_level(void) +{ + int level; + +again: + level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT); + if (level >= IP27_HUB_IRQ_COUNT) + return -ENOSPC; + + if (test_and_set_bit(level, hub_irq_map)) + goto again; + + return level; +} + +static void enable_hub_irq(struct irq_data *d) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu); + + set_bit(hd->bit, mask); + __raw_writeq(mask[0], hd->irq_mask[0]); + __raw_writeq(mask[1], hd->irq_mask[1]); +} + +static void disable_hub_irq(struct irq_data *d) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu); + + clear_bit(hd->bit, mask); + __raw_writeq(mask[0], hd->irq_mask[0]); + __raw_writeq(mask[1], hd->irq_mask[1]); +} + +static unsigned int startup_bridge_irq(struct irq_data *d) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + struct bridge_controller *bc; + nasid_t nasid; + u32 device; + int pin; + + if (!hd) + return -EINVAL; + + pin = hd->pin; + bc = hd->bc; + + nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(hd->cpu)); + bridge_write(bc, b_int_addr[pin].addr, + (0x20000 | hd->bit | (nasid << 8))); + bridge_set(bc, b_int_enable, (1 << pin)); + bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */ + + /* + * Enable sending of an interrupt clear packt to the hub on a high to + * low transition of the interrupt pin. + * + * IRIX sets additional bits in the address which are documented as + * reserved in the bridge docs. + */ + bridge_set(bc, b_int_mode, (1UL << pin)); + + /* + * We assume the bridge to have a 1:1 mapping between devices + * (slots) and intr pins. + */ + device = bridge_read(bc, b_int_device); + device &= ~(7 << (pin*3)); + device |= (pin << (pin*3)); + bridge_write(bc, b_int_device, device); + + bridge_read(bc, b_wid_tflush); + + enable_hub_irq(d); + + return 0; /* Never anything pending. */ +} + +static void shutdown_bridge_irq(struct irq_data *d) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + struct bridge_controller *bc; + int pin = hd->pin; + + if (!hd) + return; + + disable_hub_irq(d); + + bc = hd->bc; + bridge_clr(bc, b_int_enable, (1 << pin)); + bridge_read(bc, b_wid_tflush); +} + +static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask) +{ + nasid_t nasid; + int cpu; + + cpu = cpumask_first_and(mask, cpu_online_mask); + nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + hd->cpu = cpu; + if (!cputoslice(cpu)) { + hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A); + hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A); + } else { + hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B); + hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B); + } + + /* Make sure it's not already pending when we connect it. */ + REMOTE_HUB_CLR_INTR(nasid, hd->bit); +} + +static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask, + bool force) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + + if (!hd) + return -EINVAL; + + if (irqd_is_started(d)) + disable_hub_irq(d); + + setup_hub_mask(hd, mask); + + if (irqd_is_started(d)) + startup_bridge_irq(d); + + irq_data_update_effective_affinity(d, cpumask_of(hd->cpu)); + + return 0; +} + +static struct irq_chip hub_irq_type = { + .name = "HUB", + .irq_startup = startup_bridge_irq, + .irq_shutdown = shutdown_bridge_irq, + .irq_mask = disable_hub_irq, + .irq_unmask = enable_hub_irq, + .irq_set_affinity = set_affinity_hub_irq, +}; + +int request_bridge_irq(struct bridge_controller *bc, int pin) { - int b = 0, s; + struct hub_irq_data *hd; + struct hub_data *hub; + struct irq_desc *desc; + int swlevel; + int irq; + + hd = kzalloc(sizeof(*hd), GFP_KERNEL); + if (!hd) + return -ENOMEM; + + swlevel = alloc_level(); + if (unlikely(swlevel < 0)) { + kfree(hd); + return -EAGAIN; + } + irq = swlevel + IP27_HUB_IRQ_BASE; + + hd->bc = bc; + hd->bit = swlevel; + hd->pin = pin; + irq_set_chip_data(irq, hd); + + /* use CPU connected to nearest hub */ + hub = hub_data(NASID_TO_COMPACT_NODEID(bc->nasid)); + setup_hub_mask(hd, &hub->h_cpus); - s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s; - s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s; - s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s; - s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s; - s = 1; if (x >> 1 == 0) s = 0; b += s; + desc = irq_to_desc(irq); + desc->irq_common_data.node = bc->nasid; + cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus); - return b; + return irq; +} + +void ip27_hub_irq_init(void) +{ + int i; + + for (i = IP27_HUB_IRQ_BASE; + i < (IP27_HUB_IRQ_BASE + IP27_HUB_IRQ_COUNT); i++) + irq_set_chip_and_handler(i, &hub_irq_type, handle_level_irq); + + /* + * Some interrupts are reserved by hardware or by software convention. + * Mark these as reserved right away so they won't be used accidentally + * later. + */ + for (i = 0; i <= BASE_PCI_IRQ; i++) + set_bit(i, hub_irq_map); + + set_bit(IP_PEND0_6_63, hub_irq_map); + + for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) + set_bit(i, hub_irq_map); } /* @@ -82,23 +249,19 @@ static int ms1bit(unsigned long x) * Kanoj 05.13.00 */ -static void ip27_do_irq_mask0(void) +static void ip27_do_irq_mask0(struct irq_desc *desc) { - int irq, swlevel; - hubreg_t pend0, mask0; cpuid_t cpu = smp_processor_id(); - int pi_int_mask0 = - (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B; + unsigned long *mask = per_cpu(irq_enable_mask, cpu); + u64 pend0; /* copied from Irix intpend0() */ pend0 = LOCAL_HUB_L(PI_INT_PEND0); - mask0 = LOCAL_HUB_L(pi_int_mask0); - pend0 &= mask0; /* Pick intrs we should look at */ + pend0 &= mask[0]; /* Pick intrs we should look at */ if (!pend0) return; - swlevel = ms1bit(pend0); #ifdef CONFIG_SMP if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); @@ -108,106 +271,66 @@ static void ip27_do_irq_mask0(void) scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); } else #endif - { - /* "map" swlevel to irq */ - struct slice_data *si = cpu_data[cpu].data; - - irq = si->level_to_irq[swlevel]; - do_IRQ(irq); - } + generic_handle_irq(__ffs(pend0) + IP27_HUB_IRQ_BASE); LOCAL_HUB_L(PI_INT_PEND0); } -static void ip27_do_irq_mask1(void) +static void ip27_do_irq_mask1(struct irq_desc *desc) { - int irq, swlevel; - hubreg_t pend1, mask1; cpuid_t cpu = smp_processor_id(); - int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B; - struct slice_data *si = cpu_data[cpu].data; + unsigned long *mask = per_cpu(irq_enable_mask, cpu); + u64 pend1; /* copied from Irix intpend0() */ pend1 = LOCAL_HUB_L(PI_INT_PEND1); - mask1 = LOCAL_HUB_L(pi_int_mask1); - pend1 &= mask1; /* Pick intrs we should look at */ + pend1 &= mask[1]; /* Pick intrs we should look at */ if (!pend1) return; - swlevel = ms1bit(pend1); - /* "map" swlevel to irq */ - irq = si->level_to_irq[swlevel]; - LOCAL_HUB_CLR_INTR(swlevel); - do_IRQ(irq); + generic_handle_irq(__ffs(pend1) + IP27_HUB_IRQ_BASE + 64); LOCAL_HUB_L(PI_INT_PEND1); } -static void ip27_prof_timer(void) -{ - panic("CPU %d got a profiling interrupt", smp_processor_id()); -} - -static void ip27_hub_error(void) -{ - panic("CPU %d got a hub error interrupt", smp_processor_id()); -} - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned long pending = read_c0_cause() & read_c0_status(); - extern unsigned int rt_timer_irq; - - if (pending & CAUSEF_IP4) - do_IRQ(rt_timer_irq); - else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */ - ip27_do_irq_mask0(); - else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */ - ip27_do_irq_mask1(); - else if (pending & CAUSEF_IP5) - ip27_prof_timer(); - else if (pending & CAUSEF_IP6) - ip27_hub_error(); -} - -void __init arch_init_irq(void) -{ -} - void install_ipi(void) { - int slice = LOCAL_HUB_L(PI_CPU_NUM); int cpu = smp_processor_id(); - struct slice_data *si = cpu_data[cpu].data; - struct hub_data *hub = hub_data(cpu_to_node(cpu)); + unsigned long *mask = per_cpu(irq_enable_mask, cpu); + int slice = LOCAL_HUB_L(PI_CPU_NUM); int resched, call; resched = CPU_RESCHED_A_IRQ + slice; - __set_bit(resched, hub->irq_alloc_mask); - __set_bit(resched, si->irq_enable_mask); + set_bit(resched, mask); LOCAL_HUB_CLR_INTR(resched); call = CPU_CALL_A_IRQ + slice; - __set_bit(call, hub->irq_alloc_mask); - __set_bit(call, si->irq_enable_mask); + set_bit(call, mask); LOCAL_HUB_CLR_INTR(call); if (slice == 0) { - LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]); - LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]); + LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]); + LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]); } else { - LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]); - LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]); + LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]); + LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]); } } + +void __init arch_init_irq(void) +{ + mips_cpu_irq_init(); + ip27_hub_irq_init(); + + irq_set_percpu_devid(IP27_HUB_PEND0_IRQ); + irq_set_chained_handler(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0); + irq_set_percpu_devid(IP27_HUB_PEND1_IRQ); + irq_set_chained_handler(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1); +} diff --git a/arch/mips/sgi-ip27/ip27-irqno.c b/arch/mips/sgi-ip27/ip27-irqno.c deleted file mode 100644 index 957ab58e1c00..000000000000 --- a/arch/mips/sgi-ip27/ip27-irqno.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/types.h> - -#include <asm/barrier.h> - -static DECLARE_BITMAP(irq_map, NR_IRQS); - -int allocate_irqno(void) -{ - int irq; - -again: - irq = find_first_zero_bit(irq_map, NR_IRQS); - - if (irq >= NR_IRQS) - return -ENOSPC; - - if (test_and_set_bit(irq, irq_map)) - goto again; - - return irq; -} - -/* - * Allocate the 16 legacy interrupts for i8259 devices. This happens early - * in the kernel initialization so treating allocation failure as BUG() is - * ok. - */ -void __init alloc_legacy_irqno(void) -{ - int i; - - for (i = 0; i <= 16; i++) - BUG_ON(test_and_set_bit(i, irq_map)); -} - -void free_irqno(unsigned int irq) -{ - smp_mb__before_atomic(); - clear_bit(irq, irq_map); - smp_mb__after_atomic(); -} diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 813d13f92957..fb077a947575 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -44,7 +44,7 @@ static int is_fine_dirmode(void) return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE; } -static hubreg_t get_region(cnodeid_t cnode) +static u64 get_region(cnodeid_t cnode) { if (fine_mode) return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT; @@ -52,9 +52,9 @@ static hubreg_t get_region(cnodeid_t cnode) return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT; } -static hubreg_t region_mask; +static u64 region_mask; -static void gen_region_mask(hubreg_t *region_mask) +static void gen_region_mask(u64 *region_mask) { cnodeid_t cnode; @@ -154,11 +154,11 @@ static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) } if (router_a == NULL) { - printk("node_distance: router_a NULL\n"); + pr_info("node_distance: router_a NULL\n"); return -1; } if (router_b == NULL) { - printk("node_distance: router_b NULL\n"); + pr_info("node_distance: router_b NULL\n"); return -1; } @@ -203,17 +203,17 @@ static void __init dump_topology(void) klrou_t *router; cnodeid_t row, col; - printk("************** Topology ********************\n"); + pr_info("************** Topology ********************\n"); - printk(" "); + pr_info(" "); for_each_online_node(col) - printk("%02d ", col); - printk("\n"); + pr_cont("%02d ", col); + pr_cont("\n"); for_each_online_node(row) { - printk("%02d ", row); + pr_info("%02d ", row); for_each_online_node(col) - printk("%2d ", node_distance(row, col)); - printk("\n"); + pr_cont("%2d ", node_distance(row, col)); + pr_cont("\n"); } for_each_online_node(cnode) { @@ -230,7 +230,7 @@ static void __init dump_topology(void) do { if (brd->brd_flags & DUPLICATE_BOARD) continue; - printk("Router %d:", router_num); + pr_cont("Router %d:", router_num); router_num++; router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); @@ -244,11 +244,11 @@ static void __init dump_topology(void) router->rou_port[port].port_offset); if (dest_brd->brd_type == KLTYPE_IP27) - printk(" %d", dest_brd->brd_nasid); + pr_cont(" %d", dest_brd->brd_nasid); if (dest_brd->brd_type == KLTYPE_ROUTER) - printk(" r"); + pr_cont(" r"); } - printk("\n"); + pr_cont("\n"); } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) ); } @@ -373,7 +373,7 @@ static void __init szmem(void) if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) > (slot0sz << PAGE_SHIFT)) { - printk("Ignoring slot %d onwards on node %d\n", + pr_info("Ignoring slot %d onwards on node %d\n", slot, node); slot = MAX_MEM_SLOTS; continue; diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c index 8ac2bfa35fb6..3aae388561d9 100644 --- a/arch/mips/sgi-ip27/ip27-nmi.c +++ b/arch/mips/sgi-ip27/ip27-nmi.c @@ -62,75 +62,75 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice) (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) + slice * IP27_NMI_KREGS_CPU_SIZE); - printk("NMI nasid %d: slice %d\n", nasid, slice); + pr_emerg("NMI nasid %d: slice %d\n", nasid, slice); /* * Saved main processor registers */ for (i = 0; i < 32; ) { if ((i % 4) == 0) - printk("$%2d :", i); - printk(" %016lx", nr->gpr[i]); + pr_emerg("$%2d :", i); + pr_cont(" %016lx", nr->gpr[i]); i++; if ((i % 4) == 0) - printk("\n"); + pr_cont("\n"); } - printk("Hi : (value lost)\n"); - printk("Lo : (value lost)\n"); + pr_emerg("Hi : (value lost)\n"); + pr_emerg("Lo : (value lost)\n"); /* * Saved cp0 registers */ - printk("epc : %016lx %pS\n", nr->epc, (void *) nr->epc); - printk("%s\n", print_tainted()); - printk("ErrEPC: %016lx %pS\n", nr->error_epc, (void *) nr->error_epc); - printk("ra : %016lx %pS\n", nr->gpr[31], (void *) nr->gpr[31]); - printk("Status: %08lx ", nr->sr); + pr_emerg("epc : %016lx %pS\n", nr->epc, (void *)nr->epc); + pr_emerg("%s\n", print_tainted()); + pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc); + pr_emerg("ra : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]); + pr_emerg("Status: %08lx ", nr->sr); if (nr->sr & ST0_KX) - printk("KX "); + pr_cont("KX "); if (nr->sr & ST0_SX) - printk("SX "); + pr_cont("SX "); if (nr->sr & ST0_UX) - printk("UX "); + pr_cont("UX "); switch (nr->sr & ST0_KSU) { case KSU_USER: - printk("USER "); + pr_cont("USER "); break; case KSU_SUPERVISOR: - printk("SUPERVISOR "); + pr_cont("SUPERVISOR "); break; case KSU_KERNEL: - printk("KERNEL "); + pr_cont("KERNEL "); break; default: - printk("BAD_MODE "); + pr_cont("BAD_MODE "); break; } if (nr->sr & ST0_ERL) - printk("ERL "); + pr_cont("ERL "); if (nr->sr & ST0_EXL) - printk("EXL "); + pr_cont("EXL "); if (nr->sr & ST0_IE) - printk("IE "); - printk("\n"); + pr_cont("IE "); + pr_cont("\n"); - printk("Cause : %08lx\n", nr->cause); - printk("PrId : %08x\n", read_c0_prid()); - printk("BadVA : %016lx\n", nr->badva); - printk("CErr : %016lx\n", nr->cache_err); - printk("NMI_SR: %016lx\n", nr->nmi_sr); + pr_emerg("Cause : %08lx\n", nr->cause); + pr_emerg("PrId : %08x\n", read_c0_prid()); + pr_emerg("BadVA : %016lx\n", nr->badva); + pr_emerg("CErr : %016lx\n", nr->cache_err); + pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr); - printk("\n"); + pr_emerg("\n"); } void nmi_dump_hub_irq(nasid_t nasid, int slice) { - hubreg_t mask0, mask1, pend0, pend1; + u64 mask0, mask1, pend0, pend1; if (slice == 0) { /* Slice A */ mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A); @@ -143,9 +143,9 @@ void nmi_dump_hub_irq(nasid_t nasid, int slice) pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0); pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1); - printk("PI_INT_MASK0: %16Lx PI_INT_MASK1: %16Lx\n", mask0, mask1); - printk("PI_INT_PEND0: %16Lx PI_INT_PEND1: %16Lx\n", pend0, pend1); - printk("\n\n"); + pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1); + pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1); + pr_emerg("\n\n"); } /* diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index 545446dfe7fa..20b81209c6b8 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -177,7 +177,7 @@ static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action) ip27_send_ipi_single(i, action); } -static void ip27_init_secondary(void) +static void ip27_init_cpu(void) { per_cpu_init(); } @@ -235,9 +235,10 @@ static void __init ip27_prepare_cpus(unsigned int max_cpus) const struct plat_smp_ops ip27_smp_ops = { .send_ipi_single = ip27_send_ipi_single, .send_ipi_mask = ip27_send_ipi_mask, - .init_secondary = ip27_init_secondary, + .init_secondary = ip27_init_cpu, .smp_finish = ip27_smp_finish, .boot_secondary = ip27_boot_secondary, .smp_setup = ip27_smp_setup, .prepare_cpus = ip27_prepare_cpus, + .prepare_boot_cpu = ip27_init_cpu, }; diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index 9d55247533a5..9b4b9ac621a3 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -38,20 +38,6 @@ #include <asm/sn/sn0/hubio.h> #include <asm/pci/bridge.h> -static void enable_rt_irq(struct irq_data *d) -{ -} - -static void disable_rt_irq(struct irq_data *d) -{ -} - -static struct irq_chip rt_irq_type = { - .name = "SN HUB RT timer", - .irq_mask = disable_rt_irq, - .irq_unmask = enable_rt_irq, -}; - static int rt_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); @@ -65,8 +51,6 @@ static int rt_next_event(unsigned long delta, struct clock_event_device *evt) return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0; } -unsigned int rt_timer_irq; - static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent); static DEFINE_PER_CPU(char [11], hub_rt_name); @@ -87,6 +71,7 @@ static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id) struct irqaction hub_rt_irqaction = { .handler = hub_rt_counter_handler, + .percpu_dev_id = &hub_rt_clockevent, .flags = IRQF_PERCPU | IRQF_TIMER, .name = "hub-rt", }; @@ -107,7 +92,6 @@ void hub_rt_clock_event_init(void) unsigned int cpu = smp_processor_id(); struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); unsigned char *name = per_cpu(hub_rt_name, cpu); - int irq = rt_timer_irq; sprintf(name, "hub-rt %d", cpu); cd->name = name; @@ -118,29 +102,19 @@ void hub_rt_clock_event_init(void) cd->min_delta_ns = clockevent_delta2ns(0x300, cd); cd->min_delta_ticks = 0x300; cd->rating = 200; - cd->irq = irq; + cd->irq = IP27_RT_TIMER_IRQ; cd->cpumask = cpumask_of(cpu); cd->set_next_event = rt_next_event; clockevents_register_device(cd); + + enable_percpu_irq(IP27_RT_TIMER_IRQ, IRQ_TYPE_NONE); } static void __init hub_rt_clock_event_global_init(void) { - int irq; - - do { - smp_wmb(); - irq = rt_timer_irq; - if (irq) - break; - - irq = allocate_irqno(); - if (irq < 0) - panic("Allocation of irq number for timer failed"); - } while (xchg(&rt_timer_irq, irq)); - - irq_set_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); - setup_irq(irq, &hub_rt_irqaction); + irq_set_handler(IP27_RT_TIMER_IRQ, handle_percpu_devid_irq); + irq_set_percpu_devid(IP27_RT_TIMER_IRQ); + setup_percpu_irq(IP27_RT_TIMER_IRQ, &hub_rt_irqaction); } static u64 hub_rt_read(struct clocksource *cs) @@ -194,8 +168,6 @@ void cpu_time_init(void) panic("No information about myself?"); printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed); - - set_c0_status(SRB_TIMOCLK); } void hub_rtc_init(cnodeid_t cnode) diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c index 4fe5678ba74d..ce06aaa115ae 100644 --- a/arch/mips/sgi-ip27/ip27-xtalk.c +++ b/arch/mips/sgi-ip27/ip27-xtalk.c @@ -99,7 +99,7 @@ static int xbow_probe(nasid_t nasid) return 0; } -void xtalk_probe_node(cnodeid_t nid) +static void xtalk_probe_node(cnodeid_t nid) { volatile u64 hubreg; nasid_t nasid; @@ -133,3 +133,14 @@ void xtalk_probe_node(cnodeid_t nid) break; } } + +static int __init xtalk_init(void) +{ + cnodeid_t cnode; + + for_each_online_node(cnode) + xtalk_probe_node(cnode); + + return 0; +} +arch_initcall(xtalk_init); diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index f6fd340e39c2..0ede4deb8181 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -8,6 +8,7 @@ ccflags-vdso := \ $(filter -E%,$(KBUILD_CFLAGS)) \ $(filter -mmicromips,$(KBUILD_CFLAGS)) \ $(filter -march=%,$(KBUILD_CFLAGS)) \ + $(filter -m%-float,$(KBUILD_CFLAGS)) \ -D__VDSO__ ifdef CONFIG_CC_IS_CLANG @@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) -$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 +$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) @@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) -$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 +$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 53dcb49b0b12..116598b47c4d 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -37,7 +37,6 @@ extern int fixup_exception(struct pt_regs *regs); #define KERNEL_DS ((mm_segment_t) { ~0UL }) #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define user_addr_max get_fs diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index ab7ab46234b1..9712fd474f2c 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -121,7 +121,7 @@ void show_regs(struct pt_regs *regs) regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]); pr_info(" IRQs o%s Segment %s\n", interrupts_enabled(regs) ? "n" : "ff", - segment_eq(get_fs(), get_ds())? "kernel" : "user"); + segment_eq(get_fs(), KERNEL_DS)? "kernel" : "user"); } EXPORT_SYMBOL(show_regs); diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index e0ea10806491..e83f831a76f9 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -26,7 +26,6 @@ #define USER_DS MAKE_MM_SEG(0x80000000UL) #define KERNEL_DS MAKE_MM_SEG(0) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index a44682c8adc3..45afd9ab78c1 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -42,7 +42,6 @@ */ #define KERNEL_DS (~0UL) -#define get_ds() (KERNEL_DS) #define USER_DS (TASK_SIZE) #define get_fs() (current_thread_info()->addr_limit) diff --git a/arch/parisc/boot/Makefile b/arch/parisc/boot/Makefile index cad68a584884..41cce0706f80 100644 --- a/arch/parisc/boot/Makefile +++ b/arch/parisc/boot/Makefile @@ -2,12 +2,6 @@ # Makefile for the linux parisc-specific parts of the boot image creator. # -COMPILE_VERSION := __linux_compile_version_id__`hostname | \ - tr -c '[0-9A-Za-z]' '_'`__`date | \ - tr -c '[0-9A-Za-z]' '_'`_t - -ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. - targets := image targets += bzImage subdir- := compressed diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 44a9f97194aa..d5bd94247371 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -2,8 +2,6 @@ #ifndef _PARISC_DMA_MAPPING_H #define _PARISC_DMA_MAPPING_H -#include <asm/cacheflush.h> - /* ** We need to support 4 different coherent dma models with one binary: ** @@ -28,48 +26,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return hppa_dma_ops; } -static inline void * -parisc_walk_tree(struct device *dev) -{ - struct device *otherdev; - if(likely(dev->platform_data != NULL)) - return dev->platform_data; - /* OK, just traverse the bus to find it */ - for(otherdev = dev->parent; otherdev; - otherdev = otherdev->parent) { - if(otherdev->platform_data) { - dev->platform_data = otherdev->platform_data; - break; - } - } - return dev->platform_data; -} - -#define GET_IOC(dev) ({ \ - void *__pdata = parisc_walk_tree(dev); \ - __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ -}) - -#ifdef CONFIG_IOMMU_CCIO -struct parisc_device; -struct ioc; -void * ccio_get_iommu(const struct parisc_device *dev); -int ccio_request_resource(const struct parisc_device *dev, - struct resource *res); -int ccio_allocate_resource(const struct parisc_device *dev, - struct resource *res, unsigned long size, - unsigned long min, unsigned long max, unsigned long align); -#else /* !CONFIG_IOMMU_CCIO */ -#define ccio_get_iommu(dev) NULL -#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res) -#define ccio_allocate_resource(dev, res, size, min, max, align) \ - allocate_resource(&iomem_resource, res, size, min, max, \ - align, NULL, NULL) -#endif /* !CONFIG_IOMMU_CCIO */ - -#ifdef CONFIG_IOMMU_SBA -struct parisc_device; -void * sba_get_iommu(struct parisc_device *dev); -#endif - #endif diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index 1a1235a9d533..7f7039516e53 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -22,6 +22,7 @@ typedef struct { unsigned int irq_stack_usage; #ifdef CONFIG_SMP unsigned int irq_resched_count; + unsigned int irq_call_count; #endif unsigned int irq_unaligned_count; unsigned int irq_fpassist_count; diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 3328fd17c19d..f14465b84de4 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -56,7 +56,7 @@ struct pci_hba_data { #define DINO_MAX_LMMIO_RESOURCES 3 unsigned long lmmio_space_offset; /* CPU view - PCI view */ - void * iommu; /* IOMMU this device is under */ + struct ioc *iommu; /* IOMMU this device is under */ /* REVISIT - spinlock to protect resources? */ #define HBA_NAME_SIZE 16 @@ -66,8 +66,6 @@ struct pci_hba_data { char gmmio_name[HBA_NAME_SIZE]; }; -#define HBA_DATA(d) ((struct pci_hba_data *) (d)) - /* ** We support 2^16 I/O ports per HBA. These are set up in the form ** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 5b187d40d604..19bb2e46cd36 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -44,6 +44,7 @@ int pdc_model_sysmodel(char *name); int pdc_model_cpuid(unsigned long *cpu_id); int pdc_model_versions(unsigned long *versions, int id); int pdc_model_capabilities(unsigned long *capabilities); +int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no); int pdc_cache_info(struct pdc_cache_info *cache); int pdc_spaceid_bits(unsigned long *space_bits); #ifndef CONFIG_PA20 diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h index bce9ee1c1c99..24355ed1453a 100644 --- a/arch/parisc/include/asm/pdcpat.h +++ b/arch/parisc/include/asm/pdcpat.h @@ -67,6 +67,10 @@ #define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */ +/* PDC PAT COMPLEX */ + +#define PDC_PAT_COMPLEX 66L + /* PDC PAT CPU -- CPU configuration within the protection domain */ #define PDC_PAT_CPU 67L diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 30ac2865ea73..ebbb9ffe038c 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -16,7 +16,6 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h index 593eeb573138..15211723ebf5 100644 --- a/arch/parisc/include/uapi/asm/pdc.h +++ b/arch/parisc/include/uapi/asm/pdc.h @@ -60,6 +60,8 @@ #define PDC_MODEL_NVA_UNSUPPORTED (3 << 4) #define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */ #define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */ +#define PDC_MODEL_GET_PLATFORM_INFO 10 /* returns platform info */ +#define PDC_MODEL_GET_INSTALL_KERNEL 11 /* returns kernel for installation */ #define PA89_INSTRUCTION_SET 0x4 /* capabilities returned */ #define PA90_INSTRUCTION_SET 0x8 @@ -99,7 +101,7 @@ #define PDC_TOD 9 /* time-of-day clock (TOD) */ #define PDC_TOD_READ 0 /* read TOD */ #define PDC_TOD_WRITE 1 /* write TOD */ - +#define PDC_TOD_CALIBRATE 2 /* calibrate timers */ #define PDC_STABLE 10 /* stable storage (sprockets) */ #define PDC_STABLE_READ 0 @@ -109,15 +111,22 @@ #define PDC_STABLE_INITIALIZE 4 #define PDC_NVOLATILE 11 /* often not implemented */ +#define PDC_NVOLATILE_READ 0 +#define PDC_NVOLATILE_WRITE 1 +#define PDC_NVOLATILE_RETURN_SIZE 2 +#define PDC_NVOLATILE_VERIFY_CONTENTS 3 +#define PDC_NVOLATILE_INITIALIZE 4 #define PDC_ADD_VALID 12 /* Memory validation PDC call */ #define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */ +#define PDC_DEBUG 14 /* Obsolete */ + #define PDC_INSTR 15 /* get instr to invoke PDCE_CHECK() */ #define PDC_PROC 16 /* (sprockets) */ -#define PDC_CONFIG 16 /* (sprockets) */ +#define PDC_CONFIG 17 /* (sprockets) */ #define PDC_CONFIG_DECONFIG 0 #define PDC_CONFIG_DRECONFIG 1 #define PDC_CONFIG_DRETURN_CONFIG 2 @@ -167,6 +176,15 @@ #define PDC_SOFT_POWER_INFO 0 /* return info about the soft power switch */ #define PDC_SOFT_POWER_ENABLE 1 /* enable/disable soft power switch */ +#define PDC_ALLOC 24 /* allocate static storage for PDC & IODC */ + +#define PDC_CRASH_PREP 25 /* Prepare system for crash dump */ +#define PDC_CRASH_DUMP 0 /* Do platform specific preparations for dump */ +#define PDC_CRASH_LOG_CEC_ERROR 1 /* Dump hardware registers */ + +#define PDC_SCSI_PARMS 26 /* Get and set SCSI parameters */ +#define PDC_SCSI_GET_PARMS 0 /* Get SCSI parameters for I/O device */ +#define PDC_SCSI_SET_PARMS 1 /* Set SCSI parameters for I/O device */ /* HVERSION dependent */ @@ -260,6 +278,10 @@ #define PDC_PCI_READ_MON_TYPE 15 #define PDC_PCI_WRITE_MON_TYPE 16 +#define PDC_RELOCATE 149 /* (sprockets) */ +#define PDC_RELOCATE_GET_RELOCINFO 0 +#define PDC_RELOCATE_CHECKSUM 1 +#define PDC_RELOCATE_RELOCATE 2 /* Get SCSI Interface Card info: SDTR, SCSI ID, mode (SE vs LVD) */ #define PDC_INITIATOR 163 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 061b9cf2a779..16e428f03526 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -3,6 +3,7 @@ #define _UAPI_ASM_SOCKET_H #include <asm/sockios.h> +#include <asm/bitsperlong.h> /* For setsockopt(2) */ #define SOL_SOCKET 0xffff @@ -21,8 +22,8 @@ #define SO_RCVBUFFORCE 0x100b #define SO_SNDLOWAT 0x1003 #define SO_RCVLOWAT 0x1004 -#define SO_SNDTIMEO 0x1005 -#define SO_RCVTIMEO 0x1006 +#define SO_SNDTIMEO_OLD 0x1005 +#define SO_RCVTIMEO_OLD 0x1006 #define SO_ERROR 0x1007 #define SO_TYPE 0x1008 #define SO_PROTOCOL 0x1028 @@ -34,10 +35,6 @@ #define SO_BSDCOMPAT 0x400e #define SO_PASSCRED 0x4010 #define SO_PEERCRED 0x4011 -#define SO_TIMESTAMP 0x4012 -#define SCM_TIMESTAMP SO_TIMESTAMP -#define SO_TIMESTAMPNS 0x4013 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x4016 @@ -58,9 +55,6 @@ #define SO_MARK 0x401f -#define SO_TIMESTAMPING 0x4020 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 0x4021 #define SO_WIFI_STATUS 0x4022 @@ -107,4 +101,40 @@ #define SO_TXTIME 0x4036 #define SCM_TXTIME SO_TXTIME +#define SO_BINDTOIFINDEX 0x4037 + +#define SO_TIMESTAMP_OLD 0x4012 +#define SO_TIMESTAMPNS_OLD 0x4013 +#define SO_TIMESTAMPING_OLD 0x4020 + +#define SO_TIMESTAMP_NEW 0x4038 +#define SO_TIMESTAMPNS_NEW 0x4039 +#define SO_TIMESTAMPING_NEW 0x403A + +#define SO_RCVTIMEO_NEW 0x4040 +#define SO_SNDTIMEO_NEW 0x4041 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index e6f3b49f2fd7..7a17551ea31e 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -569,6 +569,30 @@ int pdc_model_capabilities(unsigned long *capabilities) } /** + * pdc_model_platform_info - Returns machine product and serial number. + * @orig_prod_num: Return buffer for original product number. + * @current_prod_num: Return buffer for current product number. + * @serial_no: Return buffer for serial number. + * + * Returns strings containing the original and current product numbers and the + * serial number of the system. + */ +int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, + char *serial_no) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_GET_PLATFORM_INFO, + __pa(orig_prod_num), __pa(current_prod_num), __pa(serial_no)); + convert_to_wide(pdc_result); + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + +/** * pdc_cache_info - Return cache and TLB information. * @cache_info: The return buffer. * diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 0ca254085a66..23040a67583e 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -117,7 +117,10 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) return -EINVAL; /* whatever mask they set, we just allow one CPU */ - cpu_dest = cpumask_first_and(dest, cpu_online_mask); + cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1), + dest, cpu_online_mask); + if (cpu_dest >= nr_cpu_ids) + cpu_dest = cpumask_first_and(dest, cpu_online_mask); return cpu_dest; } @@ -175,10 +178,16 @@ int arch_show_interrupts(struct seq_file *p, int prec) # endif #endif #ifdef CONFIG_SMP - seq_printf(p, "%*s: ", prec, "RES"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); - seq_puts(p, " Rescheduling interrupts\n"); + if (num_online_cpus() > 1) { + seq_printf(p, "%*s: ", prec, "RES"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); + seq_puts(p, " Rescheduling interrupts\n"); + seq_printf(p, "%*s: ", prec, "CAL"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); + seq_puts(p, " Function call interrupts\n"); + } #endif seq_printf(p, "%*s: ", prec, "UAH"); for_each_online_cpu(j) diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 82bd0d0927ce..7f4d042856b5 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -242,6 +242,7 @@ static int __init processor_probe(struct parisc_device *dev) void __init collect_boot_cpu_data(void) { unsigned long cr16_seed; + char orig_prod_num[64], current_prod_num[64], serial_no[64]; memset(&boot_cpu_data, 0, sizeof(boot_cpu_data)); @@ -301,6 +302,14 @@ void __init collect_boot_cpu_data(void) _parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) || (boot_cpu_data.cpu_type == mako2); #endif + + if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) { + printk(KERN_INFO "product %s, original product %s, S/N: %s\n", + current_prod_num, orig_prod_num, serial_no); + add_device_randomness(orig_prod_num, strlen(orig_prod_num)); + add_device_randomness(current_prod_num, strlen(current_prod_num)); + add_device_randomness(serial_no, strlen(serial_no)); + } } diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2582df1c529b..0964c236e3e5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + if (test_thread_flag(TIF_SYSCALL_TRACE)) { + int rc = tracehook_report_syscall_entry(regs); + /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip - * the system call and the system call restart handling. + * As tracesys_next does not set %r28 to -ENOSYS + * when %r20 is set to -1, initialize it here. */ - regs->gr[20] = -1UL; - goto out; + regs->gr[28] = -ENOSYS; + + if (rc) { + /* + * A nonzero return code from + * tracehook_report_syscall_entry() tells us + * to prevent the syscall execution. Skip + * the syscall call and the syscall restart handling. + * + * Note that the tracer may also just change + * regs->gr[20] to an invalid syscall number, + * that is handled by tracesys_next. + */ + regs->gr[20] = -1UL; + return -1; + } } /* Do the secure computing check after ptrace. */ @@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) regs->gr[24] & 0xffffffff, regs->gr[23] & 0xffffffff); -out: /* * Sign extend the syscall number to 64bit since it may have been * modified by a compat ptrace call diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index f2cf86ac279b..15dd9e21be7e 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -40,6 +40,7 @@ #include <linux/sched/clock.h> #include <linux/start_kernel.h> +#include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/pdc.h> diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 5e26dbede5fc..d9e2d69c9e48 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -155,6 +155,7 @@ ipi_interrupt(int irq, void *dev_id) case IPI_CALL_FUNC: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); + inc_irq_stat(irq_call_count); generic_smp_call_function_interrupt(); break; diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 472a818e8c17..7e1ccafadf57 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -218,7 +218,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) return; } - oops_in_progress = 1; + bust_spinlocks(1); oops_enter(); @@ -396,7 +396,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o { static DEFINE_SPINLOCK(terminate_lock); - oops_in_progress = 1; + bust_spinlocks(1); set_eiem(0); local_irq_disable(); diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 059187a3ded7..d0b166256f1a 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; int npmem_ranges __read_mostly; -/* - * get_memblock() allocates pages via memblock. - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it - * doesn't allocate from bottom to top which is needed because we only created - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. - */ -static void * __init get_memblock(unsigned long size) -{ - static phys_addr_t search_addr __initdata; - phys_addr_t phys; - - if (!search_addr) - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); - search_addr = ALIGN(search_addr, size); - while (!memblock_is_region_memory(search_addr, size) || - memblock_is_region_reserved(search_addr, size)) { - search_addr += size; - } - phys = search_addr; - - if (phys) - memblock_reserve(phys, size); - else - panic("get_memblock() failed.\n"); - - memset(__va(phys), 0, size); - - return __va(phys); -} - #ifdef CONFIG_64BIT #define MAX_MEM (~0UL) #else /* !CONFIG_64BIT */ @@ -321,6 +291,13 @@ static void __init setup_bootmem(void) max_pfn = start_pfn + npages; } + /* + * We can't use memblock top-down allocations because we only + * created the initial mapping up to KERNEL_INITIAL_SIZE in + * the assembly bootup code. + */ + memblock_set_bottom_up(true); + /* IOMMU is always used to access "high mem" on those boxes * that can support enough mem that a PCI device couldn't * directly DMA to any physical addresses. @@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr, */ if (!pmd) { - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, + PAGE_SIZE << PMD_ORDER); + if (!pmd) + panic("pmd allocation failed.\n"); pmd = (pmd_t *) __pa(pmd); } @@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr, pg_table = (pte_t *)pmd_address(*pmd); if (!pg_table) { - pg_table = (pte_t *) get_memblock(PAGE_SIZE); + pg_table = memblock_alloc(PAGE_SIZE, + PAGE_SIZE); + if (!pg_table) + panic("page table allocation failed\n"); pg_table = (pte_t *) __pa(pg_table); } @@ -700,7 +683,10 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = get_memblock(PAGE_SIZE); + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!empty_zero_page) + panic("zero page allocation failed.\n"); + } static void __init gateway_init(void) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2e6ada28da64..d8c8d7c9df15 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud) static inline int pud_present(pud_t pud) { - return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); + return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); } extern struct page *pud_page(pud_t pud); @@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd) static inline int pgd_present(pgd_t pgd) { - return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); + return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); } static inline pte_t pgd_pte(pgd_t pgd) @@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, #define pmd_move_must_withdraw pmd_move_must_withdraw struct spinlock; -static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, - struct spinlock *old_pmd_ptl, - struct vm_area_struct *vma) -{ - if (radix_enabled()) - return false; - /* - * Archs like ppc64 use pgtable to store per pmd - * specific information. So when we switch the pmd, - * we should also withdraw and deposit the pgtable - */ - return true; -} - - +extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl, + struct vm_area_struct *vma); +/* + * Hash translation mode use the deposited table to store hash pte + * slot information. + */ #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit static inline bool arch_needs_pgtable_deposit(void) { diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 19a8834e0398..f9513ad38fa6 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -337,6 +337,7 @@ #define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 #define PPC_INST_RLWINM 0x54000000 +#define PPC_INST_RLWINM_DOT 0x54000001 #define PPC_INST_RLWIMI 0x50000000 #define PPC_INST_RLDICL 0x78000000 #define PPC_INST_RLDICR 0x78000004 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index e3a731793ea2..4d6d905e9138 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -28,7 +28,6 @@ #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) #endif -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index 94de465e0920..12aa0c43e775 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -11,8 +11,8 @@ #define SO_RCVLOWAT 16 #define SO_SNDLOWAT 17 -#define SO_RCVTIMEO 18 -#define SO_SNDTIMEO 19 +#define SO_RCVTIMEO_OLD 18 +#define SO_SNDTIMEO_OLD 19 #define SO_PASSCRED 20 #define SO_PEERCRED 21 diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index f3c31f5e1026..ecd31569a120 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -400,3 +400,25 @@ void arch_report_meminfo(struct seq_file *m) atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); } #endif /* CONFIG_PROC_FS */ + +/* + * For hash translation mode, we use the deposited table to store hash slot + * information and they are stored at PTRS_PER_PMD offset from related pmd + * location. Hence a pmd move requires deposit and withdraw. + * + * For radix translation with split pmd ptl, we store the deposited table in the + * pmd page. Hence if we have different pmd page we need to withdraw during pmd + * move. + * + * With hash we use deposited table always irrespective of anon or not. + * With radix we use deposited table only for anonymous mapping. + */ +int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl, + struct vm_area_struct *vma) +{ + if (radix_enabled()) + return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); + + return true; +} diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index c2d5192ed64f..549e9490ff2a 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -165,6 +165,10 @@ #define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \ ___PPC_RS(a) | __PPC_SH(i) | \ __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RLWINM_DOT(d, a, i, mb, me) EMIT(PPC_INST_RLWINM_DOT | \ + ___PPC_RA(d) | ___PPC_RS(a) | \ + __PPC_SH(i) | __PPC_MB(mb) | \ + __PPC_ME(me)) #define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \ ___PPC_RS(a) | __PPC_SH(i) | \ __PPC_MB(mb) | __PPC_ME(me)) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 7ce57657d3b8..4194d3cfb60c 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -768,36 +768,58 @@ emit_clear: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_X: true_cond = COND_GT; goto cond_branch; case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_X: true_cond = COND_LT; goto cond_branch; case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_X: true_cond = COND_GE; goto cond_branch; case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_X: true_cond = COND_LE; goto cond_branch; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_X: true_cond = COND_EQ; goto cond_branch; case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_X: true_cond = COND_NE; goto cond_branch; case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_X: true_cond = COND_NE; /* Fall through */ @@ -809,18 +831,44 @@ cond_branch: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: /* unsigned comparison */ - PPC_CMPLD(dst_reg, src_reg); + if (BPF_CLASS(code) == BPF_JMP32) + PPC_CMPLW(dst_reg, src_reg); + else + PPC_CMPLD(dst_reg, src_reg); break; case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* signed comparison */ - PPC_CMPD(dst_reg, src_reg); + if (BPF_CLASS(code) == BPF_JMP32) + PPC_CMPW(dst_reg, src_reg); + else + PPC_CMPD(dst_reg, src_reg); break; case BPF_JMP | BPF_JSET | BPF_X: - PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg); + case BPF_JMP32 | BPF_JSET | BPF_X: + if (BPF_CLASS(code) == BPF_JMP) { + PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, + src_reg); + } else { + int tmp_reg = b2p[TMP_REG_1]; + + PPC_AND(tmp_reg, dst_reg, src_reg); + PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0, + 31); + } break; case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K: @@ -828,43 +876,87 @@ cond_branch: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + { + bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; + /* * Need sign-extended load, so only positive * values can be used as imm in cmpldi */ - if (imm >= 0 && imm < 32768) - PPC_CMPLDI(dst_reg, imm); - else { + if (imm >= 0 && imm < 32768) { + if (is_jmp32) + PPC_CMPLWI(dst_reg, imm); + else + PPC_CMPLDI(dst_reg, imm); + } else { /* sign-extending load */ PPC_LI32(b2p[TMP_REG_1], imm); /* ... but unsigned comparison */ - PPC_CMPLD(dst_reg, b2p[TMP_REG_1]); + if (is_jmp32) + PPC_CMPLW(dst_reg, + b2p[TMP_REG_1]); + else + PPC_CMPLD(dst_reg, + b2p[TMP_REG_1]); } break; + } case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + { + bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; + /* * signed comparison, so any 16-bit value * can be used in cmpdi */ - if (imm >= -32768 && imm < 32768) - PPC_CMPDI(dst_reg, imm); - else { + if (imm >= -32768 && imm < 32768) { + if (is_jmp32) + PPC_CMPWI(dst_reg, imm); + else + PPC_CMPDI(dst_reg, imm); + } else { PPC_LI32(b2p[TMP_REG_1], imm); - PPC_CMPD(dst_reg, b2p[TMP_REG_1]); + if (is_jmp32) + PPC_CMPW(dst_reg, + b2p[TMP_REG_1]); + else + PPC_CMPD(dst_reg, + b2p[TMP_REG_1]); } break; + } case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: /* andi does not sign-extend the immediate */ if (imm >= 0 && imm < 32768) /* PPC_ANDI is _only/always_ dot-form */ PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm); else { - PPC_LI32(b2p[TMP_REG_1], imm); - PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, - b2p[TMP_REG_1]); + int tmp_reg = b2p[TMP_REG_1]; + + PPC_LI32(tmp_reg, imm); + if (BPF_CLASS(code) == BPF_JMP) { + PPC_AND_DOT(tmp_reg, dst_reg, + tmp_reg); + } else { + PPC_AND(tmp_reg, dst_reg, + tmp_reg); + PPC_RLWINM_DOT(tmp_reg, tmp_reg, + 0, 0, 31); + } } break; } @@ -1093,6 +1185,7 @@ skip_codegen_passes: bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); if (!fp->is_func || extra_pass) { + bpf_prog_fill_jited_linfo(fp, addrs); out_addrs: kfree(addrs); kfree(jit_data); diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 72238eedc360..d2b8e6061933 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1306,15 +1306,6 @@ static int h_24x7_event_init(struct perf_event *event) return -EINVAL; } - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - /* no branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; @@ -1577,6 +1568,7 @@ static struct pmu h_24x7_pmu = { .start_txn = h_24x7_event_start_txn, .commit_txn = h_24x7_event_commit_txn, .cancel_txn = h_24x7_event_cancel_txn, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static int hv_24x7_init(void) diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 43fabb3cae0f..735e77b09cdb 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -232,15 +232,6 @@ static int h_gpci_event_init(struct perf_event *event) return -EINVAL; } - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - /* no branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; @@ -285,6 +276,7 @@ static struct pmu h_gpci_pmu = { .start = h_gpci_event_start, .stop = h_gpci_event_stop, .read = h_gpci_event_update, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static int hv_gpci_init(void) diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index f292a3f284f1..b1c37cc3fa98 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -473,15 +473,6 @@ static int nest_imc_event_init(struct perf_event *event) if (event->hw.sample_period) return -EINVAL; - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -748,15 +739,6 @@ static int core_imc_event_init(struct perf_event *event) if (event->hw.sample_period) return -EINVAL; - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -1069,6 +1051,7 @@ static int update_pmu_ops(struct imc_pmu *pmu) pmu->pmu.stop = imc_event_stop; pmu->pmu.read = imc_event_update; pmu->pmu.attr_groups = pmu->attr_groups; + pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; switch (pmu->domain) { diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7db3119f8a5b..145373f0e5dc 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) pnv_pci_ioda2_setup_dma_pe(phb, pe); #ifdef CONFIG_IOMMU_API + iommu_register_group(&pe->table_group, + pe->phb->hose->global_number, pe->pe_number); pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); #endif } diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 45fb70b4bfa7..ef9448a907c6 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, return 0; pe = &phb->ioda.pe_array[pdn->pe_number]; + if (!pe->table_group.group) + return 0; iommu_add_device(&pe->table_group, dev); return 0; case BUS_NOTIFY_DEL_DEVICE: diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 7d6457ab5d34..bba281b1fe1b 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; uint64_t rc, token; + uint64_t saved = 0; /* * When the hypervisor cannot map all the requested memory in a single @@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p) rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, p->blocks, BIND_ANY_ADDR, token); token = ret[0]; + if (!saved) + saved = ret[1]; cond_resched(); } while (rc == H_BUSY); @@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) return -ENXIO; } - p->bound_addr = ret[1]; + p->bound_addr = saved; dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 515fc3cc9687..bd149905a5b5 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -49,6 +49,7 @@ config RISCV select RISCV_TIMER select GENERIC_IRQ_MULTI_HANDLER select ARCH_HAS_PTE_SPECIAL + select HAVE_EBPF_JIT if 64BIT config MMU def_bool y diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 4b594f2e4f7e..c6342e638ef7 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -77,7 +77,7 @@ KBUILD_IMAGE := $(boot)/Image.gz head-y := arch/riscv/kernel/head.o -core-y += arch/riscv/kernel/ arch/riscv/mm/ +core-y += arch/riscv/kernel/ arch/riscv/mm/ arch/riscv/net/ libs-y += arch/riscv/lib/ diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 2fa2942be221..470755cb7558 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -35,6 +35,12 @@ #define _PAGE_SPECIAL _PAGE_SOFT #define _PAGE_TABLE _PAGE_PRESENT +/* + * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to + * distinguish them from swapped out pages + */ +#define _PAGE_PROT_NONE _PAGE_READ + #define _PAGE_PFN_SHIFT 10 /* Set of bits to preserve across pte_modify() */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 16301966d65b..a8179a8c1491 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -44,7 +44,7 @@ /* Page protection bits */ #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) -#define PAGE_NONE __pgprot(0) +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) @@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; static inline int pmd_present(pmd_t pmd) { - return (pmd_val(pmd) & _PAGE_PRESENT); + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } static inline int pmd_none(pmd_t pmd) @@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) static inline int pte_present(pte_t pte) { - return (pte_val(pte) & _PAGE_PRESENT); + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } static inline int pte_none(pte_t pte) @@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, * * Format of swap PTE: * bit 0: _PAGE_PRESENT (zero) - * bit 1: reserved for future use (zero) + * bit 1: _PAGE_PROT_NONE (zero) * bits 2 to 6: swap type * bits 7 to XLEN-1: swap offset */ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 637b896894fc..a00168b980d2 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -41,7 +41,6 @@ #define KERNEL_DS (~0UL) #define USER_DS (TASK_SIZE) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 1e1395d63dab..65df1dfdc303 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -18,8 +18,6 @@ #include <asm/cache.h> #include <asm/thread_info.h> -#define MAX_BYTES_PER_LONG 0x10 - OUTPUT_ARCH(riscv) ENTRY(_start) @@ -76,6 +74,8 @@ SECTIONS *(.sbss*) } + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) + EXCEPTION_TABLE(0x10) NOTES @@ -83,10 +83,6 @@ SECTIONS *(.rel.dyn*) } - BSS_SECTION(MAX_BYTES_PER_LONG, - MAX_BYTES_PER_LONG, - MAX_BYTES_PER_LONG) - _end = .; STABS_DEBUG diff --git a/arch/riscv/net/Makefile b/arch/riscv/net/Makefile new file mode 100644 index 000000000000..a132220cc582 --- /dev/null +++ b/arch/riscv/net/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c new file mode 100644 index 000000000000..80b12aa5e10d --- /dev/null +++ b/arch/riscv/net/bpf_jit_comp.c @@ -0,0 +1,1602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* BPF JIT compiler for RV64G + * + * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com> + * + */ + +#include <linux/bpf.h> +#include <linux/filter.h> +#include <asm/cacheflush.h> + +enum { + RV_REG_ZERO = 0, /* The constant value 0 */ + RV_REG_RA = 1, /* Return address */ + RV_REG_SP = 2, /* Stack pointer */ + RV_REG_GP = 3, /* Global pointer */ + RV_REG_TP = 4, /* Thread pointer */ + RV_REG_T0 = 5, /* Temporaries */ + RV_REG_T1 = 6, + RV_REG_T2 = 7, + RV_REG_FP = 8, + RV_REG_S1 = 9, /* Saved registers */ + RV_REG_A0 = 10, /* Function argument/return values */ + RV_REG_A1 = 11, /* Function arguments */ + RV_REG_A2 = 12, + RV_REG_A3 = 13, + RV_REG_A4 = 14, + RV_REG_A5 = 15, + RV_REG_A6 = 16, + RV_REG_A7 = 17, + RV_REG_S2 = 18, /* Saved registers */ + RV_REG_S3 = 19, + RV_REG_S4 = 20, + RV_REG_S5 = 21, + RV_REG_S6 = 22, + RV_REG_S7 = 23, + RV_REG_S8 = 24, + RV_REG_S9 = 25, + RV_REG_S10 = 26, + RV_REG_S11 = 27, + RV_REG_T3 = 28, /* Temporaries */ + RV_REG_T4 = 29, + RV_REG_T5 = 30, + RV_REG_T6 = 31, +}; + +#define RV_REG_TCC RV_REG_A6 +#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ + +static const int regmap[] = { + [BPF_REG_0] = RV_REG_A5, + [BPF_REG_1] = RV_REG_A0, + [BPF_REG_2] = RV_REG_A1, + [BPF_REG_3] = RV_REG_A2, + [BPF_REG_4] = RV_REG_A3, + [BPF_REG_5] = RV_REG_A4, + [BPF_REG_6] = RV_REG_S1, + [BPF_REG_7] = RV_REG_S2, + [BPF_REG_8] = RV_REG_S3, + [BPF_REG_9] = RV_REG_S4, + [BPF_REG_FP] = RV_REG_S5, + [BPF_REG_AX] = RV_REG_T0, +}; + +enum { + RV_CTX_F_SEEN_TAIL_CALL = 0, + RV_CTX_F_SEEN_CALL = RV_REG_RA, + RV_CTX_F_SEEN_S1 = RV_REG_S1, + RV_CTX_F_SEEN_S2 = RV_REG_S2, + RV_CTX_F_SEEN_S3 = RV_REG_S3, + RV_CTX_F_SEEN_S4 = RV_REG_S4, + RV_CTX_F_SEEN_S5 = RV_REG_S5, + RV_CTX_F_SEEN_S6 = RV_REG_S6, +}; + +struct rv_jit_context { + struct bpf_prog *prog; + u32 *insns; /* RV insns */ + int ninsns; + int epilogue_offset; + int *offset; /* BPF to RV */ + unsigned long flags; + int stack_size; +}; + +struct rv_jit_data { + struct bpf_binary_header *header; + u8 *image; + struct rv_jit_context ctx; +}; + +static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx) +{ + u8 reg = regmap[bpf_reg]; + + switch (reg) { + case RV_CTX_F_SEEN_S1: + case RV_CTX_F_SEEN_S2: + case RV_CTX_F_SEEN_S3: + case RV_CTX_F_SEEN_S4: + case RV_CTX_F_SEEN_S5: + case RV_CTX_F_SEEN_S6: + __set_bit(reg, &ctx->flags); + } + return reg; +}; + +static bool seen_reg(int reg, struct rv_jit_context *ctx) +{ + switch (reg) { + case RV_CTX_F_SEEN_CALL: + case RV_CTX_F_SEEN_S1: + case RV_CTX_F_SEEN_S2: + case RV_CTX_F_SEEN_S3: + case RV_CTX_F_SEEN_S4: + case RV_CTX_F_SEEN_S5: + case RV_CTX_F_SEEN_S6: + return test_bit(reg, &ctx->flags); + } + return false; +} + +static void mark_call(struct rv_jit_context *ctx) +{ + __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); +} + +static bool seen_call(struct rv_jit_context *ctx) +{ + return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); +} + +static void mark_tail_call(struct rv_jit_context *ctx) +{ + __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); +} + +static bool seen_tail_call(struct rv_jit_context *ctx) +{ + return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); +} + +static u8 rv_tail_call_reg(struct rv_jit_context *ctx) +{ + mark_tail_call(ctx); + + if (seen_call(ctx)) { + __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags); + return RV_REG_S6; + } + return RV_REG_A6; +} + +static void emit(const u32 insn, struct rv_jit_context *ctx) +{ + if (ctx->insns) + ctx->insns[ctx->ninsns] = insn; + + ctx->ninsns++; +} + +static u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, u8 opcode) +{ + return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (rd << 7) | opcode; +} + +static u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode) +{ + return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) | + opcode; +} + +static u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode) +{ + u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f; + + return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (imm4_0 << 7) | opcode; +} + +static u32 rv_sb_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode) +{ + u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4); + u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10); + + return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (imm4_1 << 7) | opcode; +} + +static u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode) +{ + return (imm31_12 << 12) | (rd << 7) | opcode; +} + +static u32 rv_uj_insn(u32 imm20_1, u8 rd, u8 opcode) +{ + u32 imm; + + imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) | + ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11); + + return (imm << 12) | (rd << 7) | opcode; +} + +static u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1, + u8 funct3, u8 rd, u8 opcode) +{ + u8 funct7 = (funct5 << 2) | (aq << 1) | rl; + + return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode); +} + +static u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b); +} + +static u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x13); +} + +static u32 rv_addw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b); +} + +static u32 rv_add(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 0, rd, 0x33); +} + +static u32 rv_subw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b); +} + +static u32 rv_sub(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33); +} + +static u32 rv_and(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 7, rd, 0x33); +} + +static u32 rv_or(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 6, rd, 0x33); +} + +static u32 rv_xor(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 4, rd, 0x33); +} + +static u32 rv_mulw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b); +} + +static u32 rv_mul(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 0, rd, 0x33); +} + +static u32 rv_divuw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b); +} + +static u32 rv_divu(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 5, rd, 0x33); +} + +static u32 rv_remuw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b); +} + +static u32 rv_remu(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 7, rd, 0x33); +} + +static u32 rv_sllw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b); +} + +static u32 rv_sll(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 1, rd, 0x33); +} + +static u32 rv_srlw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b); +} + +static u32 rv_srl(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 5, rd, 0x33); +} + +static u32 rv_sraw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b); +} + +static u32 rv_sra(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33); +} + +static u32 rv_lui(u8 rd, u32 imm31_12) +{ + return rv_u_insn(imm31_12, rd, 0x37); +} + +static u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 1, rd, 0x13); +} + +static u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 7, rd, 0x13); +} + +static u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 6, rd, 0x13); +} + +static u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 4, rd, 0x13); +} + +static u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b); +} + +static u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b); +} + +static u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x13); +} + +static u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b); +} + +static u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13); +} + +static u32 rv_jal(u8 rd, u32 imm20_1) +{ + return rv_uj_insn(imm20_1, rd, 0x6f); +} + +static u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x67); +} + +static u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 0, 0x63); +} + +static u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 6, 0x63); +} + +static u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 7, 0x63); +} + +static u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 1, 0x63); +} + +static u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 4, 0x63); +} + +static u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 5, 0x63); +} + +static u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23); +} + +static u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23); +} + +static u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23); +} + +static u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23); +} + +static u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 4, rd, 0x03); +} + +static u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x03); +} + +static u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 6, rd, 0x03); +} + +static u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 3, rd, 0x03); +} + +static u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) +{ + return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f); +} + +static u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) +{ + return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f); +} + +static bool is_12b_int(s64 val) +{ + return -(1 << 11) <= val && val < (1 << 11); +} + +static bool is_13b_int(s64 val) +{ + return -(1 << 12) <= val && val < (1 << 12); +} + +static bool is_21b_int(s64 val) +{ + return -(1L << 20) <= val && val < (1L << 20); +} + +static bool is_32b_int(s64 val) +{ + return -(1L << 31) <= val && val < (1L << 31); +} + +static int is_12b_check(int off, int insn) +{ + if (!is_12b_int(off)) { + pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", + insn, (int)off); + return -1; + } + return 0; +} + +static int is_13b_check(int off, int insn) +{ + if (!is_13b_int(off)) { + pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", + insn, (int)off); + return -1; + } + return 0; +} + +static int is_21b_check(int off, int insn) +{ + if (!is_21b_int(off)) { + pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", + insn, (int)off); + return -1; + } + return 0; +} + +static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) +{ + /* Note that the immediate from the add is sign-extended, + * which means that we need to compensate this by adding 2^12, + * when the 12th bit is set. A simpler way of doing this, and + * getting rid of the check, is to just add 2**11 before the + * shift. The "Loading a 32-Bit constant" example from the + * "Computer Organization and Design, RISC-V edition" book by + * Patterson/Hennessy highlights this fact. + * + * This also means that we need to process LSB to MSB. + */ + s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff; + int shift; + + if (is_32b_int(val)) { + if (upper) + emit(rv_lui(rd, upper), ctx); + + if (!upper) { + emit(rv_addi(rd, RV_REG_ZERO, lower), ctx); + return; + } + + emit(rv_addiw(rd, rd, lower), ctx); + return; + } + + shift = __ffs(upper); + upper >>= shift; + shift += 12; + + emit_imm(rd, upper, ctx); + + emit(rv_slli(rd, rd, shift), ctx); + if (lower) + emit(rv_addi(rd, rd, lower), ctx); +} + +static int rv_offset(int bpf_to, int bpf_from, struct rv_jit_context *ctx) +{ + int from = ctx->offset[bpf_from] - 1, to = ctx->offset[bpf_to]; + + return (to - from) << 2; +} + +static int epilogue_offset(struct rv_jit_context *ctx) +{ + int to = ctx->epilogue_offset, from = ctx->ninsns; + + return (to - from) << 2; +} + +static void __build_epilogue(u8 reg, struct rv_jit_context *ctx) +{ + int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8; + + if (seen_reg(RV_REG_RA, ctx)) { + emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + if (seen_reg(RV_REG_S1, ctx)) { + emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S2, ctx)) { + emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S3, ctx)) { + emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S4, ctx)) { + emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S5, ctx)) { + emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S6, ctx)) { + emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + + emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx); + /* Set return value. */ + emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx); + emit(rv_jalr(RV_REG_ZERO, reg, 0), ctx); +} + +static void emit_zext_32(u8 reg, struct rv_jit_context *ctx) +{ + emit(rv_slli(reg, reg, 32), ctx); + emit(rv_srli(reg, reg, 32), ctx); +} + +static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) +{ + int tc_ninsn, off, start_insn = ctx->ninsns; + u8 tcc = rv_tail_call_reg(ctx); + + /* a0: &ctx + * a1: &array + * a2: index + * + * if (index >= array->map.max_entries) + * goto out; + */ + tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] : + ctx->offset[0]; + emit_zext_32(RV_REG_A2, ctx); + + off = offsetof(struct bpf_array, map.max_entries); + if (is_12b_check(off, insn)) + return -1; + emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx); + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + if (is_13b_check(off, insn)) + return -1; + emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx); + + /* if (--TCC < 0) + * goto out; + */ + emit(rv_addi(RV_REG_T1, tcc, -1), ctx); + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + if (is_13b_check(off, insn)) + return -1; + emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx); + + /* prog = array->ptrs[index]; + * if (!prog) + * goto out; + */ + emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx); + off = offsetof(struct bpf_array, ptrs); + if (is_12b_check(off, insn)) + return -1; + emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx); + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + if (is_13b_check(off, insn)) + return -1; + emit(rv_beq(RV_REG_T2, RV_REG_ZERO, off >> 1), ctx); + + /* goto *(prog->bpf_func + 4); */ + off = offsetof(struct bpf_prog, bpf_func); + if (is_12b_check(off, insn)) + return -1; + emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx); + emit(rv_addi(RV_REG_T3, RV_REG_T3, 4), ctx); + emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx); + __build_epilogue(RV_REG_T3, ctx); + return 0; +} + +static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, + struct rv_jit_context *ctx) +{ + u8 code = insn->code; + + switch (code) { + case BPF_JMP | BPF_JA: + case BPF_JMP | BPF_CALL: + case BPF_JMP | BPF_EXIT: + case BPF_JMP | BPF_TAIL_CALL: + break; + default: + *rd = bpf_to_rv_reg(insn->dst_reg, ctx); + } + + if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) || + code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) || + code & BPF_LDX || code & BPF_STX) + *rs = bpf_to_rv_reg(insn->src_reg, ctx); +} + +static int rv_offset_check(int *rvoff, s16 off, int insn, + struct rv_jit_context *ctx) +{ + *rvoff = rv_offset(insn + off, insn, ctx); + return is_13b_check(*rvoff, insn); +} + +static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) +{ + emit(rv_addi(RV_REG_T2, *rd, 0), ctx); + emit_zext_32(RV_REG_T2, ctx); + emit(rv_addi(RV_REG_T1, *rs, 0), ctx); + emit_zext_32(RV_REG_T1, ctx); + *rd = RV_REG_T2; + *rs = RV_REG_T1; +} + +static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) +{ + emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); + emit(rv_addiw(RV_REG_T1, *rs, 0), ctx); + *rd = RV_REG_T2; + *rs = RV_REG_T1; +} + +static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) +{ + emit(rv_addi(RV_REG_T2, *rd, 0), ctx); + emit_zext_32(RV_REG_T2, ctx); + emit_zext_32(RV_REG_T1, ctx); + *rd = RV_REG_T2; +} + +static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) +{ + emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); + *rd = RV_REG_T2; +} + +static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + bool extra_pass) +{ + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || + BPF_CLASS(insn->code) == BPF_JMP; + int rvoff, i = insn - ctx->prog->insnsi; + u8 rd = -1, rs = -1, code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; + + init_regs(&rd, &rs, insn, ctx); + + switch (code) { + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_X: + emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + + /* dst = dst OP src */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + emit(rv_and(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + emit(rv_or(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + emit(rv_xor(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: + emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_LSH | BPF_X: + emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx); + break; + + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) : + rv_subw(rd, RV_REG_ZERO, rd), ctx); + break; + + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + { + int shift = 64 - imm; + + emit(rv_slli(rd, rd, shift), ctx); + emit(rv_srli(rd, rd, shift), ctx); + break; + } + case BPF_ALU | BPF_END | BPF_FROM_BE: + emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + if (imm == 16) + goto out_be; + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + if (imm == 32) + goto out_be; + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); +out_be: + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + + emit(rv_addi(rd, RV_REG_T2, 0), ctx); + break; + + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_K: + emit_imm(rd, imm, ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + + /* dst = dst OP imm */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + if (is_12b_int(imm)) { + emit(is64 ? rv_addi(rd, rd, imm) : + rv_addiw(rd, rd, imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_add(rd, rd, RV_REG_T1) : + rv_addw(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + if (is_12b_int(-imm)) { + emit(is64 ? rv_addi(rd, rd, -imm) : + rv_addiw(rd, rd, -imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_sub(rd, rd, RV_REG_T1) : + rv_subw(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + if (is_12b_int(imm)) { + emit(rv_andi(rd, rd, imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(rv_and(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + if (is_12b_int(imm)) { + emit(rv_ori(rd, rd, imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(rv_or(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + if (is_12b_int(imm)) { + emit(rv_xori(rd, rd, imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(rv_xor(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_mul(rd, rd, RV_REG_T1) : + rv_mulw(rd, rd, RV_REG_T1), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : + rv_divuw(rd, rd, RV_REG_T1), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : + rv_remuw(rd, rd, RV_REG_T1), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx); + break; + + /* JUMP off */ + case BPF_JMP | BPF_JA: + rvoff = rv_offset(i + off, i, ctx); + if (!is_21b_int(rvoff)) { + pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", + i, rvoff); + return -1; + } + + emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx); + break; + + /* IF (dst COND src) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_beq(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bltu(rs, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bltu(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bgeu(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bgeu(rs, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bne(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_sext_32_rd_rs(&rd, &rs, ctx); + emit(rv_blt(rs, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_sext_32_rd_rs(&rd, &rs, ctx); + emit(rv_blt(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_sext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bge(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_sext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bge(rs, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_and(RV_REG_T1, rd, rs), ctx); + emit(rv_bne(RV_REG_T1, RV_REG_ZERO, rvoff >> 1), ctx); + break; + + /* IF (dst COND imm) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_beq(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bltu(RV_REG_T1, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bltu(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bgeu(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bgeu(RV_REG_T1, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bne(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_sext_32_rd(&rd, ctx); + emit(rv_blt(RV_REG_T1, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_sext_32_rd(&rd, ctx); + emit(rv_blt(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_sext_32_rd(&rd, ctx); + emit(rv_bge(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_sext_32_rd(&rd, ctx); + emit(rv_bge(RV_REG_T1, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); + emit(rv_bne(RV_REG_T1, RV_REG_ZERO, rvoff >> 1), ctx); + break; + + /* function call */ + case BPF_JMP | BPF_CALL: + { + bool fixed; + int i, ret; + u64 addr; + + mark_call(ctx); + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr, + &fixed); + if (ret < 0) + return ret; + if (fixed) { + emit_imm(RV_REG_T1, addr, ctx); + } else { + i = ctx->ninsns; + emit_imm(RV_REG_T1, addr, ctx); + for (i = ctx->ninsns - i; i < 8; i++) { + /* nop */ + emit(rv_addi(RV_REG_ZERO, RV_REG_ZERO, 0), + ctx); + } + } + emit(rv_jalr(RV_REG_RA, RV_REG_T1, 0), ctx); + rd = bpf_to_rv_reg(BPF_REG_0, ctx); + emit(rv_addi(rd, RV_REG_A0, 0), ctx); + break; + } + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(i, ctx)) + return -1; + break; + + /* function return */ + case BPF_JMP | BPF_EXIT: + if (i == ctx->prog->len - 1) + break; + + rvoff = epilogue_offset(ctx); + if (is_21b_check(rvoff, i)) + return -1; + emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx); + break; + + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + struct bpf_insn insn1 = insn[1]; + u64 imm64; + + imm64 = (u64)insn1.imm << 32 | (u32)imm; + emit_imm(rd, imm64, ctx); + return 1; + } + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + if (is_12b_int(off)) { + emit(rv_lbu(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit(rv_lbu(rd, 0, RV_REG_T1), ctx); + break; + case BPF_LDX | BPF_MEM | BPF_H: + if (is_12b_int(off)) { + emit(rv_lhu(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit(rv_lhu(rd, 0, RV_REG_T1), ctx); + break; + case BPF_LDX | BPF_MEM | BPF_W: + if (is_12b_int(off)) { + emit(rv_lwu(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit(rv_lwu(rd, 0, RV_REG_T1), ctx); + break; + case BPF_LDX | BPF_MEM | BPF_DW: + if (is_12b_int(off)) { + emit(rv_ld(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit(rv_ld(rd, 0, RV_REG_T1), ctx); + break; + + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_B: + emit_imm(RV_REG_T1, imm, ctx); + if (is_12b_int(off)) { + emit(rv_sb(rd, off, RV_REG_T1), ctx); + break; + } + + emit_imm(RV_REG_T2, off, ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx); + break; + + case BPF_ST | BPF_MEM | BPF_H: + emit_imm(RV_REG_T1, imm, ctx); + if (is_12b_int(off)) { + emit(rv_sh(rd, off, RV_REG_T1), ctx); + break; + } + + emit_imm(RV_REG_T2, off, ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx); + break; + case BPF_ST | BPF_MEM | BPF_W: + emit_imm(RV_REG_T1, imm, ctx); + if (is_12b_int(off)) { + emit(rv_sw(rd, off, RV_REG_T1), ctx); + break; + } + + emit_imm(RV_REG_T2, off, ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx); + break; + case BPF_ST | BPF_MEM | BPF_DW: + emit_imm(RV_REG_T1, imm, ctx); + if (is_12b_int(off)) { + emit(rv_sd(rd, off, RV_REG_T1), ctx); + break; + } + + emit_imm(RV_REG_T2, off, ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx); + break; + + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_B: + if (is_12b_int(off)) { + emit(rv_sb(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit(rv_sb(RV_REG_T1, 0, rs), ctx); + break; + case BPF_STX | BPF_MEM | BPF_H: + if (is_12b_int(off)) { + emit(rv_sh(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit(rv_sh(RV_REG_T1, 0, rs), ctx); + break; + case BPF_STX | BPF_MEM | BPF_W: + if (is_12b_int(off)) { + emit(rv_sw(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit(rv_sw(RV_REG_T1, 0, rs), ctx); + break; + case BPF_STX | BPF_MEM | BPF_DW: + if (is_12b_int(off)) { + emit(rv_sd(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit(rv_sd(RV_REG_T1, 0, rs), ctx); + break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + if (off) { + if (is_12b_int(off)) { + emit(rv_addi(RV_REG_T1, rd, off), ctx); + } else { + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + } + + rd = RV_REG_T1; + } + + emit(BPF_SIZE(code) == BPF_W ? + rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) : + rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx); + break; + default: + pr_err("bpf-jit: unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; +} + +static void build_prologue(struct rv_jit_context *ctx) +{ + int stack_adjust = 0, store_offset, bpf_stack_adjust; + + if (seen_reg(RV_REG_RA, ctx)) + stack_adjust += 8; + stack_adjust += 8; /* RV_REG_FP */ + if (seen_reg(RV_REG_S1, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S2, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S3, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S4, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S5, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S6, ctx)) + stack_adjust += 8; + + stack_adjust = round_up(stack_adjust, 16); + bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); + stack_adjust += bpf_stack_adjust; + + store_offset = stack_adjust - 8; + + /* First instruction is always setting the tail-call-counter + * (TCC) register. This instruction is skipped for tail calls. + */ + emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx); + + emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx); + + if (seen_reg(RV_REG_RA, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx); + store_offset -= 8; + } + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx); + store_offset -= 8; + if (seen_reg(RV_REG_S1, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S2, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S3, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S4, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S5, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S6, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx); + store_offset -= 8; + } + + emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx); + + if (bpf_stack_adjust) + emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx); + + /* Program contains calls and tail calls, so RV_REG_TCC need + * to be saved across calls. + */ + if (seen_tail_call(ctx) && seen_call(ctx)) + emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx); + + ctx->stack_size = stack_adjust; +} + +static void build_epilogue(struct rv_jit_context *ctx) +{ + __build_epilogue(RV_REG_RA, ctx); +} + +static int build_body(struct rv_jit_context *ctx, bool extra_pass) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + ret = emit_insn(insn, ctx, extra_pass); + if (ret > 0) { + i++; + if (ctx->insns == NULL) + ctx->offset[i] = ctx->ninsns; + continue; + } + if (ctx->insns == NULL) + ctx->offset[i] = ctx->ninsns; + if (ret) + return ret; + } + return 0; +} + +static void bpf_fill_ill_insns(void *area, unsigned int size) +{ + memset(area, 0, size); +} + +static void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + bool tmp_blinded = false, extra_pass = false; + struct bpf_prog *tmp, *orig_prog = prog; + struct rv_jit_data *jit_data; + struct rv_jit_context *ctx; + unsigned int image_size; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + + ctx = &jit_data->ctx; + + if (ctx->offset) { + extra_pass = true; + image_size = sizeof(u32) * ctx->ninsns; + goto skip_init_ctx; + } + + ctx->prog = prog; + ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + if (!ctx->offset) { + prog = orig_prog; + goto out_offset; + } + + /* First pass generates the ctx->offset, but does not emit an image. */ + if (build_body(ctx, extra_pass)) { + prog = orig_prog; + goto out_offset; + } + build_prologue(ctx); + ctx->epilogue_offset = ctx->ninsns; + build_epilogue(ctx); + + /* Allocate image, now that we know the size. */ + image_size = sizeof(u32) * ctx->ninsns; + jit_data->header = bpf_jit_binary_alloc(image_size, &jit_data->image, + sizeof(u32), + bpf_fill_ill_insns); + if (!jit_data->header) { + prog = orig_prog; + goto out_offset; + } + + /* Second, real pass, that acutally emits the image. */ + ctx->insns = (u32 *)jit_data->image; +skip_init_ctx: + ctx->ninsns = 0; + + build_prologue(ctx); + if (build_body(ctx, extra_pass)) { + bpf_jit_binary_free(jit_data->header); + prog = orig_prog; + goto out_offset; + } + build_epilogue(ctx); + + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, image_size, 2, ctx->insns); + + prog->bpf_func = (void *)ctx->insns; + prog->jited = 1; + prog->jited_len = image_size; + + bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); + + if (!prog->is_func || extra_pass) { +out_offset: + kfree(ctx->offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c index d592e0d90d9f..f902215e9cd9 100644 --- a/arch/s390/boot/als.c +++ b/arch/s390/boot/als.c @@ -7,6 +7,7 @@ #include <asm/facility.h> #include <asm/lowcore.h> #include <asm/sclp.h> +#include "boot.h" /* * The code within this file will be called very early. It may _not_ @@ -58,7 +59,7 @@ static void u16_to_decimal(char *str, u16 val) *str = '\0'; } -static void print_missing_facilities(void) +void print_missing_facilities(void) { static char als_str[80] = "Missing facilities: "; unsigned long val; @@ -90,7 +91,6 @@ static void print_missing_facilities(void) } strcat(als_str, "\n"); sclp_early_printk(als_str); - sclp_early_printk("See Principles of Operations for facility bits\n"); } static void facility_mismatch(void) @@ -98,6 +98,7 @@ static void facility_mismatch(void) sclp_early_printk("The Linux kernel requires more recent processor hardware\n"); print_machine_type(); print_missing_facilities(); + sclp_early_printk("See Principles of Operations for facility bits\n"); disabled_wait(0x8badcccc); } @@ -105,20 +106,7 @@ void verify_facilities(void) { int i; - for (i = 0; i < ARRAY_SIZE(S390_lowcore.stfle_fac_list); i++) - S390_lowcore.stfle_fac_list[i] = 0; - asm volatile( - " stfl 0(0)\n" - : "=m" (S390_lowcore.stfl_fac_list)); - S390_lowcore.stfle_fac_list[0] = (u64)S390_lowcore.stfl_fac_list << 32; - if (S390_lowcore.stfl_fac_list & 0x01000000) { - register unsigned long reg0 asm("0") = ARRAY_SIZE(als) - 1; - - asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */ - : "+d" (reg0) - : "a" (&S390_lowcore.stfle_fac_list) - : "memory", "cc"); - } + __stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list)); for (i = 0; i < ARRAY_SIZE(als); i++) { if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) facility_mismatch(); diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h index fc41e2277ea8..82bc06346e05 100644 --- a/arch/s390/boot/boot.h +++ b/arch/s390/boot/boot.h @@ -6,6 +6,8 @@ void startup_kernel(void); void detect_memory(void); void store_ipl_parmblock(void); void setup_boot_command_line(void); +void parse_boot_command_line(void); void setup_memory_end(void); +void print_missing_facilities(void); #endif /* BOOT_BOOT_H */ diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index 9dab596be98e..36beb56de021 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> #include <linux/init.h> #include <linux/ctype.h> #include <asm/ebcdic.h> #include <asm/sclp.h> #include <asm/sections.h> #include <asm/boot_data.h> +#include <asm/facility.h> #include "boot.h" char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; @@ -143,8 +145,66 @@ void setup_boot_command_line(void) append_ipl_block_parm(); } +static void modify_facility(unsigned long nr, bool clear) +{ + if (clear) + __clear_facility(nr, S390_lowcore.stfle_fac_list); + else + __set_facility(nr, S390_lowcore.stfle_fac_list); +} + +static void check_cleared_facilities(void) +{ + unsigned long als[] = { FACILITIES_ALS }; + int i; + + for (i = 0; i < ARRAY_SIZE(als); i++) { + if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) { + sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n"); + print_missing_facilities(); + break; + } + } +} + +static void modify_fac_list(char *str) +{ + unsigned long val, endval; + char *endp; + bool clear; + + while (*str) { + clear = false; + if (*str == '!') { + clear = true; + str++; + } + val = simple_strtoull(str, &endp, 0); + if (str == endp) + break; + str = endp; + if (*str == '-') { + str++; + endval = simple_strtoull(str, &endp, 0); + if (str == endp) + break; + str = endp; + while (val <= endval) { + modify_facility(val, clear); + val++; + } + } else { + modify_facility(val, clear); + } + if (*str != ',') + break; + str++; + } + check_cleared_facilities(); +} + static char command_line_buf[COMMAND_LINE_SIZE] __section(.data); -static void parse_mem_opt(void) +void parse_boot_command_line(void) { char *param, *val; bool enabled; @@ -165,12 +225,14 @@ static void parse_mem_opt(void) if (!rc && !enabled) noexec_disabled = 1; } + + if (!strcmp(param, "facilities")) + modify_fac_list(val); } } void setup_memory_end(void) { - parse_mem_opt(); #ifdef CONFIG_CRASH_DUMP if (!OLDMEM_BASE && early_ipl_block_valid && early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP && diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 4d441317cdeb..bdfc5549a299 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -53,6 +53,7 @@ void startup_kernel(void) sclp_early_read_info(); store_ipl_parmblock(); setup_boot_command_line(); + parse_boot_command_line(); setup_memory_end(); detect_memory(); if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c index 25aca07898ba..b11e8108773a 100644 --- a/arch/s390/boot/string.c +++ b/arch/s390/boot/string.c @@ -2,6 +2,7 @@ #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/errno.h> +#undef CONFIG_KASAN #include "../lib/string.c" int strncmp(const char *cs, const char *ct, size_t count) diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 5346b5a80bb6..0d15383d0ff1 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -38,7 +38,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, /* check for weak keys */ if (!des_ekey(tmp, key) && - (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -228,7 +228,7 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], DES_KEY_SIZE)) && - (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h index 52348e0a812e..05f3f9aee5fc 100644 --- a/arch/s390/hypfs/hypfs.h +++ b/arch/s390/hypfs/hypfs.h @@ -43,7 +43,7 @@ int hypfs_diag0c_init(void); void hypfs_diag0c_exit(void); /* Set Partition-Resource Parameter */ -int hypfs_sprp_init(void); +void hypfs_sprp_init(void); void hypfs_sprp_exit(void); /* debugfs interface */ @@ -69,9 +69,9 @@ struct hypfs_dbfs_file { struct dentry *dentry; }; -extern int hypfs_dbfs_init(void); +extern void hypfs_dbfs_init(void); extern void hypfs_dbfs_exit(void); -extern int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df); +extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df); extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df); #endif /* _HYPFS_H_ */ diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index b9bdf5c1918e..f4c7dbfaf8ee 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -78,14 +78,11 @@ static const struct file_operations dbfs_ops = { .unlocked_ioctl = dbfs_ioctl, }; -int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df) +void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df) { df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, &dbfs_ops); - if (IS_ERR(df->dentry)) - return PTR_ERR(df->dentry); mutex_init(&df->lock); - return 0; } void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df) @@ -93,10 +90,9 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df) debugfs_remove(df->dentry); } -int hypfs_dbfs_init(void) +void hypfs_dbfs_init(void) { dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); - return PTR_ERR_OR_ZERO(dbfs_dir); } void hypfs_dbfs_exit(void) diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 3452e18bb1ca..f0bc4dc3e9bf 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -440,11 +440,10 @@ __init int hypfs_diag_init(void) pr_err("The hardware system does not support hypfs\n"); return -ENODATA; } - if (diag204_info_type == DIAG204_INFO_EXT) { - rc = hypfs_dbfs_create_file(&dbfs_file_d204); - if (rc) - return rc; - } + + if (diag204_info_type == DIAG204_INFO_EXT) + hypfs_dbfs_create_file(&dbfs_file_d204); + if (MACHINE_IS_LPAR) { rc = diag224_get_name_table(); if (rc) { diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index cebf05150cc1..72e3140fafb5 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -54,8 +54,7 @@ static void *diag0c_store(unsigned int *count) if (!cpu_vec) goto fail_put_online_cpus; /* Note: Diag 0c needs 8 byte alignment and real storage */ - diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) + - cpu_count * sizeof(struct hypfs_diag0c_entry), + diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count), GFP_KERNEL | GFP_DMA); if (!diag0c_data) goto fail_kfree_cpu_vec; @@ -125,7 +124,8 @@ int __init hypfs_diag0c_init(void) { if (!MACHINE_IS_VM) return 0; - return hypfs_dbfs_create_file(&dbfs_file_0c); + hypfs_dbfs_create_file(&dbfs_file_0c); + return 0; } /* diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c index 601b70786dc8..7d9fb496d155 100644 --- a/arch/s390/hypfs/hypfs_sprp.c +++ b/arch/s390/hypfs/hypfs_sprp.c @@ -137,11 +137,11 @@ static struct hypfs_dbfs_file hypfs_sprp_file = { .unlocked_ioctl = hypfs_sprp_ioctl, }; -int hypfs_sprp_init(void) +void hypfs_sprp_init(void) { if (!sclp.has_sprp) - return 0; - return hypfs_dbfs_create_file(&hypfs_sprp_file); + return; + hypfs_dbfs_create_file(&hypfs_sprp_file); } void hypfs_sprp_exit(void) diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index c4b7b681e055..42f2375c203e 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -279,7 +279,8 @@ int hypfs_vm_init(void) guest_query = local_guest; else return -EACCES; - return hypfs_dbfs_create_file(&dbfs_file_2fc); + hypfs_dbfs_create_file(&dbfs_file_2fc); + return 0; } void hypfs_vm_exit(void) diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index c681329fdeec..ccad1398abd4 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -456,9 +456,8 @@ static int __init hypfs_init(void) { int rc; - rc = hypfs_dbfs_init(); - if (rc) - return rc; + hypfs_dbfs_init(); + if (hypfs_diag_init()) { rc = -ENODATA; goto fail_dbfs_exit; @@ -467,10 +466,7 @@ static int __init hypfs_init(void) rc = -ENODATA; goto fail_hypfs_diag_exit; } - if (hypfs_sprp_init()) { - rc = -ENODATA; - goto fail_hypfs_vm_exit; - } + hypfs_sprp_init(); if (hypfs_diag0c_init()) { rc = -ENODATA; goto fail_hypfs_sprp_exit; @@ -489,7 +485,6 @@ fail_hypfs_diag0c_exit: hypfs_diag0c_exit(); fail_hypfs_sprp_exit: hypfs_sprp_exit(); -fail_hypfs_vm_exit: hypfs_vm_exit(); fail_hypfs_diag_exit: hypfs_diag_exit(); diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h new file mode 100644 index 000000000000..649b9fc60685 --- /dev/null +++ b/arch/s390/include/asm/cpu_mcf.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Counter facility support definitions for the Linux perf + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com> + */ +#ifndef _ASM_S390_CPU_MCF_H +#define _ASM_S390_CPU_MCF_H + +#include <linux/perf_event.h> +#include <asm/cpu_mf.h> + +enum cpumf_ctr_set { + CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */ + CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */ + CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */ + CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */ + CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */ + + /* Maximum number of counter sets */ + CPUMF_CTR_SET_MAX, +}; + +#define CPUMF_LCCTL_ENABLE_SHIFT 16 +#define CPUMF_LCCTL_ACTCTL_SHIFT 0 +static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = { + [CPUMF_CTR_SET_BASIC] = 0x02, + [CPUMF_CTR_SET_USER] = 0x04, + [CPUMF_CTR_SET_CRYPTO] = 0x08, + [CPUMF_CTR_SET_EXT] = 0x01, + [CPUMF_CTR_SET_MT_DIAG] = 0x20, +}; + +static inline void ctr_set_enable(u64 *state, int ctr_set) +{ + *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT; +} +static inline void ctr_set_disable(u64 *state, int ctr_set) +{ + *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT); +} +static inline void ctr_set_start(u64 *state, int ctr_set) +{ + *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT; +} +static inline void ctr_set_stop(u64 *state, int ctr_set) +{ + *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); +} + +static inline void ctr_set_multiple_enable(u64 *state, u64 ctrsets) +{ + *state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT; +} + +static inline void ctr_set_multiple_disable(u64 *state, u64 ctrsets) +{ + *state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT); +} + +static inline void ctr_set_multiple_start(u64 *state, u64 ctrsets) +{ + *state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT; +} + +static inline void ctr_set_multiple_stop(u64 *state, u64 ctrsets) +{ + *state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT); +} + +static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest) +{ + switch (set) { + case CPUMF_CTR_SET_BASIC: + return stcctm(BASIC, range, dest); + case CPUMF_CTR_SET_USER: + return stcctm(PROBLEM_STATE, range, dest); + case CPUMF_CTR_SET_CRYPTO: + return stcctm(CRYPTO_ACTIVITY, range, dest); + case CPUMF_CTR_SET_EXT: + return stcctm(EXTENDED, range, dest); + case CPUMF_CTR_SET_MT_DIAG: + return stcctm(MT_DIAG_CLEARING, range, dest); + case CPUMF_CTR_SET_MAX: + return 3; + } + return 3; +} + +struct cpu_cf_events { + struct cpumf_ctr_info info; + atomic_t ctr_set[CPUMF_CTR_SET_MAX]; + atomic64_t alert; + u64 state, tx_state; + unsigned int flags; + unsigned int txn_flags; +}; +DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events); + +bool kernel_cpumcf_avail(void); +int __kernel_cpumcf_begin(void); +unsigned long kernel_cpumcf_alert(int clear); +void __kernel_cpumcf_end(void); + +static inline int kernel_cpumcf_begin(void) +{ + if (!cpum_cf_avail()) + return -ENODEV; + + preempt_disable(); + return __kernel_cpumcf_begin(); +} +static inline void kernel_cpumcf_end(void) +{ + __kernel_cpumcf_end(); + preempt_enable(); +} + +/* Return true if store counter set multiple instruction is available */ +static inline int stccm_avail(void) +{ + return test_facility(142); +} + +#endif /* _ASM_S390_CPU_MCF_H */ diff --git a/arch/s390/include/asm/cpu_mf-insn.h b/arch/s390/include/asm/cpu_mf-insn.h new file mode 100644 index 000000000000..a68b362e0964 --- /dev/null +++ b/arch/s390/include/asm/cpu_mf-insn.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Support for CPU-MF instructions + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ +#ifndef _ASM_S390_CPU_MF_INSN_H +#define _ASM_S390_CPU_MF_INSN_H + +#ifdef __ASSEMBLY__ + +/* Macro to generate the STCCTM instruction with a customized + * M3 field designating the counter set. + */ +.macro STCCTM r1 m3 db2 + .insn rsy,0xeb0000000017,\r1,\m3 & 0xf,\db2 +.endm + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index bf2cbff926ef..ae3e3221d4b5 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -12,6 +12,8 @@ #include <linux/errno.h> #include <asm/facility.h> +asm(".include \"asm/cpu_mf-insn.h\"\n"); + #define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */ #define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */ #define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */ @@ -209,17 +211,25 @@ static inline int ecctr(u64 ctr, u64 *val) return cc; } -/* Store CPU counter multiple for the MT utilization counter set */ -static inline int stcctm5(u64 num, u64 *val) +/* Store CPU counter multiple for a particular counter set */ +enum stcctm_ctr_set { + EXTENDED = 0, + BASIC = 1, + PROBLEM_STATE = 2, + CRYPTO_ACTIVITY = 3, + MT_DIAG = 5, + MT_DIAG_CLEARING = 9, /* clears loss-of-MT-ctr-data alert */ +}; +static inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest) { int cc; asm volatile ( - " .insn rsy,0xeb0000000017,%2,5,%1\n" + " STCCTM %2,%3,%1\n" " ipm %0\n" " srl %0,28\n" : "=d" (cc) - : "Q" (*val), "d" (num) + : "Q" (*dest), "d" (range), "i" (set) : "cc", "memory"); return cc; } diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index cdbaad50c7c7..19562be22b7e 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -32,6 +32,7 @@ enum diag_stat_enum { DIAG_STAT_X2FC, DIAG_STAT_X304, DIAG_STAT_X308, + DIAG_STAT_X318, DIAG_STAT_X500, NR_DIAG_STAT }; @@ -293,6 +294,17 @@ struct diag26c_mac_resp { u8 res[2]; } __aligned(8); +#define CPNC_LINUX 0x4 +union diag318_info { + unsigned long val; + struct { + unsigned int cpnc : 8; + unsigned int cpvc_linux : 24; + unsigned char cpvc_distro[3]; + unsigned char zero; + }; +}; + int diag204(unsigned long subcode, unsigned long size, void *addr); int diag224(void *ptr); int diag26c(void *req, void *resp, enum diag26c_sc subcode); diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 8ea270fdc7fb..5a3c95b11952 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -81,5 +81,30 @@ static inline void ftrace_generate_call_insn(struct ftrace_insn *insn, #endif } +/* + * Even though the system call numbers are identical for s390/s390x a + * different system call table is used for compat tasks. This may lead + * to e.g. incorrect or missing trace event sysfs files. + * Therefore simply do not trace compat system calls at all. + * See kernel/trace/trace_syscalls.c. + */ +#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS +static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) +{ + return is_compat_task(); +} + +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + /* + * Skip __s390_ and __s390x_ prefix - due to compat wrappers + * and aliasing some symbols of 64 bit system call functions + * may get the __s390_ prefix instead of the __s390x_ prefix. + */ + return !strcmp(sym + 7, name) || !strcmp(sym + 8, name); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_FTRACE_H */ diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index e2d3e6c43395..e548ec1ec12c 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -10,6 +10,12 @@ #define JUMP_LABEL_NOP_SIZE 6 #define JUMP_LABEL_NOP_OFFSET 2 +#if __GNUC__ < 9 +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "X" +#else +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "jdd" +#endif + /* * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. @@ -20,9 +26,9 @@ static inline bool arch_static_branch(struct static_key *key, bool branch) ".pushsection __jump_table,\"aw\"\n" ".balign 8\n" ".long 0b-.,%l[label]-.\n" - ".quad %0-.\n" + ".quad %0+%1-.\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: return true; @@ -34,9 +40,9 @@ static inline bool arch_static_branch_jump(struct static_key *key, bool branch) ".pushsection __jump_table,\"aw\"\n" ".balign 8\n" ".long 0b-.,%l[label]-.\n" - ".quad %0-.\n" + ".quad %0+%1-.\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: return true; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 10fe982f2b4b..4e0efebc56a9 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -148,7 +148,6 @@ struct zpci_dev { enum pci_bus_speed max_bus_speed; struct dentry *debugfs_dev; - struct dentry *debugfs_perf; struct s390_domain *s390_domain; /* s390 IOMMU domain data */ }; diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index b9c0e361748b..560d8f766ddf 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -12,7 +12,6 @@ #include <linux/perf_event.h> #include <linux/device.h> -#include <asm/cpu_mf.h> /* Per-CPU flags for PMU states */ #define PMU_F_RESERVED 0x1000 @@ -55,6 +54,7 @@ struct perf_sf_sde_regs { #define PERF_CPUM_SF_MAX_CTR 2 #define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ #define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ +#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */ #define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ #define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ #define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ diff --git a/arch/s390/include/asm/pnet.h b/arch/s390/include/asm/pnet.h index 6e278584f8f1..5739276b458d 100644 --- a/arch/s390/include/asm/pnet.h +++ b/arch/s390/include/asm/pnet.h @@ -11,13 +11,5 @@ #include <linux/device.h> #include <linux/types.h> -#define PNETIDS_LEN 64 /* Total utility string length in bytes - * to cover up to 4 PNETIDs of 16 bytes - * for up to 4 device ports - */ -#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */ -#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN) - /* Max. # of ports with a PNETID */ - int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid); #endif /* _ASM_S390_PNET_H */ diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index d46edde7e458..db5ef22c46e4 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -361,8 +361,8 @@ struct qdio_initialize { unsigned long); int scan_threshold; unsigned long int_parm; - void **input_sbal_addr_array; - void **output_sbal_addr_array; + struct qdio_buffer **input_sbal_addr_array; + struct qdio_buffer **output_sbal_addr_array; struct qdio_outbuf_state *output_sbal_state_array; }; diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 0cd4bda85eb1..ef4c9dec06a4 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -78,6 +78,7 @@ struct sclp_info { unsigned char has_skey : 1; unsigned char has_kss : 1; unsigned char has_gisaf : 1; + unsigned char has_diag318 : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index 116cc15a4b8a..70d87db54e62 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -12,15 +12,21 @@ #include <linux/types.h> #endif -#define __HAVE_ARCH_MEMCHR /* inline & arch function */ -#define __HAVE_ARCH_MEMCMP /* arch function */ #define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */ #define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */ -#define __HAVE_ARCH_MEMSCAN /* inline & arch function */ #define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */ #define __HAVE_ARCH_MEMSET16 /* arch function */ #define __HAVE_ARCH_MEMSET32 /* arch function */ #define __HAVE_ARCH_MEMSET64 /* arch function */ + +void *memcpy(void *dest, const void *src, size_t n); +void *memset(void *s, int c, size_t n); +void *memmove(void *dest, const void *src, size_t n); + +#ifndef CONFIG_KASAN +#define __HAVE_ARCH_MEMCHR /* inline & arch function */ +#define __HAVE_ARCH_MEMCMP /* arch function */ +#define __HAVE_ARCH_MEMSCAN /* inline & arch function */ #define __HAVE_ARCH_STRCAT /* inline & arch function */ #define __HAVE_ARCH_STRCMP /* arch function */ #define __HAVE_ARCH_STRCPY /* inline & arch function */ @@ -35,9 +41,6 @@ /* Prototypes for non-inlined arch strings functions. */ int memcmp(const void *s1, const void *s2, size_t n); -void *memcpy(void *dest, const void *src, size_t n); -void *memset(void *s, int c, size_t n); -void *memmove(void *dest, const void *src, size_t n); int strcmp(const char *s1, const char *s2); size_t strlcat(char *dest, const char *src, size_t n); size_t strlcpy(char *dest, const char *src, size_t size); @@ -45,6 +48,7 @@ char *strncat(char *dest, const char *src, size_t n); char *strncpy(char *dest, const char *src, size_t n); char *strrchr(const char *s, int c); char *strstr(const char *s1, const char *s2); +#endif /* !CONFIG_KASAN */ #undef __HAVE_ARCH_STRCHR #undef __HAVE_ARCH_STRNCHR @@ -95,6 +99,7 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t count) #if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY)) +#ifdef __HAVE_ARCH_MEMCHR static inline void *memchr(const void * s, int c, size_t n) { register int r0 asm("0") = (char) c; @@ -109,7 +114,9 @@ static inline void *memchr(const void * s, int c, size_t n) : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); return (void *) ret; } +#endif +#ifdef __HAVE_ARCH_MEMSCAN static inline void *memscan(void *s, int c, size_t n) { register int r0 asm("0") = (char) c; @@ -121,7 +128,9 @@ static inline void *memscan(void *s, int c, size_t n) : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); return (void *) ret; } +#endif +#ifdef __HAVE_ARCH_STRCAT static inline char *strcat(char *dst, const char *src) { register int r0 asm("0") = 0; @@ -137,7 +146,9 @@ static inline char *strcat(char *dst, const char *src) : "d" (r0), "0" (0) : "cc", "memory" ); return ret; } +#endif +#ifdef __HAVE_ARCH_STRCPY static inline char *strcpy(char *dst, const char *src) { register int r0 asm("0") = 0; @@ -150,7 +161,9 @@ static inline char *strcpy(char *dst, const char *src) : "cc", "memory"); return ret; } +#endif +#ifdef __HAVE_ARCH_STRLEN static inline size_t strlen(const char *s) { register unsigned long r0 asm("0") = 0; @@ -162,7 +175,9 @@ static inline size_t strlen(const char *s) : "+d" (r0), "+a" (tmp) : : "cc", "memory"); return r0 - (unsigned long) s; } +#endif +#ifdef __HAVE_ARCH_STRNLEN static inline size_t strnlen(const char * s, size_t n) { register int r0 asm("0") = 0; @@ -175,6 +190,7 @@ static inline size_t strnlen(const char * s, size_t n) : "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory"); return end - s; } +#endif #else /* IN_ARCH_STRING_C */ void *memchr(const void * s, int c, size_t n); void *memscan(void *s, int c, size_t n); diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index bd2545977ad3..007fcb9aeeb8 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -31,7 +31,6 @@ #define USER_DS (2) #define USER_DS_SACF (3) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.mm_segment) #define segment_eq(a,b) (((a) & 2) == ((b) & 2)) diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h index 266a72320e05..0c05a673811c 100644 --- a/arch/s390/include/asm/vx-insn.h +++ b/arch/s390/include/asm/vx-insn.h @@ -363,23 +363,23 @@ .endm /* VECTOR LOAD MULTIPLE */ -.macro VLM vfrom, vto, disp, base +.macro VLM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom VX_NUM v3, \vto GR_NUM b2, \base /* Base register */ .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) - MRXBOPC 0, 0x36, v1, v3 + MRXBOPC \hint, 0x36, v1, v3 .endm /* VECTOR STORE MULTIPLE */ -.macro VSTM vfrom, vto, disp, base +.macro VSTM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom VX_NUM v3, \vto GR_NUM b2, \base /* Base register */ .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) - MRXBOPC 0, 0x3E, v1, v3 + MRXBOPC \hint, 0x3E, v1, v3 .endm /* VECTOR PERMUTE */ diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index da3e0d48abbc..6b0f30b14642 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -3,3 +3,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generated-y += unistd_64.h +generic-y += socket.h diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h deleted file mode 100644 index 39d901476ee5..000000000000 --- a/arch/s390/include/uapi/asm/socket.h +++ /dev/null @@ -1,117 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * S390 version - * - * Derived from "include/asm-i386/socket.h" - */ - -#ifndef _ASM_SOCKET_H -#define _ASM_SOCKET_H - -#include <asm/sockios.h> - -/* For setsockopt(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -#define SO_REUSEPORT 15 -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 -#define SO_GET_FILTER SO_ATTACH_FILTER - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#define SO_PROTOCOL 38 -#define SO_DOMAIN 39 - -#define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS -#define SO_PEEK_OFF 42 - -/* Instruct lower device to use last 4-bytes of skb data as FCS */ -#define SO_NOFCS 43 - -#define SO_LOCK_FILTER 44 - -#define SO_SELECT_ERR_QUEUE 45 - -#define SO_BUSY_POLL 46 - -#define SO_MAX_PACING_RATE 47 - -#define SO_BPF_EXTENSIONS 48 - -#define SO_INCOMING_CPU 49 - -#define SO_ATTACH_BPF 50 -#define SO_DETACH_BPF SO_DETACH_FILTER - -#define SO_ATTACH_REUSEPORT_CBPF 51 -#define SO_ATTACH_REUSEPORT_EBPF 52 - -#define SO_CNX_ADVICE 53 - -#define SCM_TIMESTAMPING_OPT_STATS 54 - -#define SO_MEMINFO 55 - -#define SO_INCOMING_NAPI_ID 56 - -#define SO_COOKIE 57 - -#define SCM_TIMESTAMPING_PKTINFO 58 - -#define SO_PEERGROUPS 59 - -#define SO_ZEROCOPY 60 - -#define SO_TXTIME 61 -#define SCM_TXTIME SO_TXTIME - -#endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 4e188a7be501..8a62c7f72e1b 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -77,8 +77,10 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o -obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o +obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o +obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o obj-$(CONFIG_TRACEPOINTS) += trace.o diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index d374f9b218b4..0ebf08c3b35e 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1056,12 +1056,6 @@ int debug_register_view(debug_info_t *id, struct debug_view *view) mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH); pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, id, &debug_file_ops); - if (!pde) { - pr_err("Registering view %s/%s failed due to out of " - "memory\n", id->name, view->name); - rc = -1; - goto out; - } spin_lock_irqsave(&id->lock, flags); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (!id->views[i]) diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 53a5316cc4b7..7edaa733a77f 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -45,6 +45,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = { [DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" }, [DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" }, [DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" }, + [DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" }, [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" }, }; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index a8c7789b246b..d6edf45f93b9 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -164,8 +164,6 @@ static noinline __init void setup_lowcore_early(void) static noinline __init void setup_facility_list(void) { - stfle(S390_lowcore.stfle_fac_list, - ARRAY_SIZE(S390_lowcore.stfle_fac_list)); memcpy(S390_lowcore.alt_stfle_fac_list, S390_lowcore.stfle_fac_list, sizeof(S390_lowcore.alt_stfle_fac_list)); diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 57bba24b1c27..56491e636eab 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -27,8 +27,6 @@ ENTRY(startup_continue) mvc 0(16,%r1),__LC_BOOT_CLOCK larl %r13,.LPG1 # get base lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers - lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area - # move IPL device to lowcore larl %r0,boot_vdso_data stg %r0,__LC_VDSO_PER_CPU # diff --git a/arch/s390/kernel/kdebugfs.c b/arch/s390/kernel/kdebugfs.c index 2c46bd6c6fd2..33130c7daf55 100644 --- a/arch/s390/kernel/kdebugfs.c +++ b/arch/s390/kernel/kdebugfs.c @@ -9,8 +9,6 @@ EXPORT_SYMBOL(arch_debugfs_dir); static int __init arch_kdebugfs_init(void) { arch_debugfs_dir = debugfs_create_dir("s390", NULL); - if (IS_ERR(arch_debugfs_dir)) - arch_debugfs_dir = NULL; return 0; } postcore_initcall(arch_kdebugfs_init); diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index d5523adeddbf..e1c54d28713a 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -10,73 +10,11 @@ #include <linux/kernel.h> #include <linux/kernel_stat.h> -#include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/init.h> #include <linux/export.h> -#include <asm/ctl_reg.h> -#include <asm/irq.h> -#include <asm/cpu_mf.h> - -enum cpumf_ctr_set { - CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */ - CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */ - CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */ - CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */ - CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */ - - /* Maximum number of counter sets */ - CPUMF_CTR_SET_MAX, -}; - -#define CPUMF_LCCTL_ENABLE_SHIFT 16 -#define CPUMF_LCCTL_ACTCTL_SHIFT 0 -static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = { - [CPUMF_CTR_SET_BASIC] = 0x02, - [CPUMF_CTR_SET_USER] = 0x04, - [CPUMF_CTR_SET_CRYPTO] = 0x08, - [CPUMF_CTR_SET_EXT] = 0x01, - [CPUMF_CTR_SET_MT_DIAG] = 0x20, -}; - -static void ctr_set_enable(u64 *state, int ctr_set) -{ - *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT; -} -static void ctr_set_disable(u64 *state, int ctr_set) -{ - *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT); -} -static void ctr_set_start(u64 *state, int ctr_set) -{ - *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT; -} -static void ctr_set_stop(u64 *state, int ctr_set) -{ - *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); -} - -/* Local CPUMF event structure */ -struct cpu_hw_events { - struct cpumf_ctr_info info; - atomic_t ctr_set[CPUMF_CTR_SET_MAX]; - u64 state, tx_state; - unsigned int flags; - unsigned int txn_flags; -}; -static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { - .ctr_set = { - [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0), - [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0), - [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0), - [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0), - [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0), - }, - .state = 0, - .flags = 0, - .txn_flags = 0, -}; +#include <asm/cpu_mcf.h> static enum cpumf_ctr_set get_counter_set(u64 event) { @@ -98,11 +36,11 @@ static enum cpumf_ctr_set get_counter_set(u64 event) static int validate_ctr_version(const struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuhw; + struct cpu_cf_events *cpuhw; int err = 0; u16 mtdiag_ctl; - cpuhw = &get_cpu_var(cpu_hw_events); + cpuhw = &get_cpu_var(cpu_cf_events); /* check required version for counter sets */ switch (hwc->config_base) { @@ -135,7 +73,7 @@ static int validate_ctr_version(const struct hw_perf_event *hwc) * Thus, the counters can only be used if SMT is on and the * counter set is enabled and active. */ - mtdiag_ctl = cpumf_state_ctl[CPUMF_CTR_SET_MT_DIAG]; + mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]; if (!((cpuhw->info.auth_ctl & mtdiag_ctl) && (cpuhw->info.enable_ctl & mtdiag_ctl) && (cpuhw->info.act_ctl & mtdiag_ctl))) @@ -143,28 +81,28 @@ static int validate_ctr_version(const struct hw_perf_event *hwc) break; } - put_cpu_var(cpu_hw_events); + put_cpu_var(cpu_cf_events); return err; } static int validate_ctr_auth(const struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuhw; + struct cpu_cf_events *cpuhw; u64 ctrs_state; int err = 0; - cpuhw = &get_cpu_var(cpu_hw_events); + cpuhw = &get_cpu_var(cpu_cf_events); /* Check authorization for cpu counter sets. * If the particular CPU counter set is not authorized, * return with -ENOENT in order to fall back to other * PMUs that might suffice the event request. */ - ctrs_state = cpumf_state_ctl[hwc->config_base]; + ctrs_state = cpumf_ctr_ctl[hwc->config_base]; if (!(ctrs_state & cpuhw->info.auth_ctl)) err = -ENOENT; - put_cpu_var(cpu_hw_events); + put_cpu_var(cpu_cf_events); return err; } @@ -175,7 +113,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc) */ static void cpumf_pmu_enable(struct pmu *pmu) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); int err; if (cpuhw->flags & PMU_F_ENABLED) @@ -198,7 +136,7 @@ static void cpumf_pmu_enable(struct pmu *pmu) */ static void cpumf_pmu_disable(struct pmu *pmu) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); int err; u64 inactive; @@ -222,86 +160,13 @@ static atomic_t num_events = ATOMIC_INIT(0); /* Used to avoid races in calling reserve/release_cpumf_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); -/* CPU-measurement alerts for the counter facility */ -static void cpumf_measurement_alert(struct ext_code ext_code, - unsigned int alert, unsigned long unused) -{ - struct cpu_hw_events *cpuhw; - - if (!(alert & CPU_MF_INT_CF_MASK)) - return; - - inc_irq_stat(IRQEXT_CMC); - cpuhw = this_cpu_ptr(&cpu_hw_events); - - /* Measurement alerts are shared and might happen when the PMU - * is not reserved. Ignore these alerts in this case. */ - if (!(cpuhw->flags & PMU_F_RESERVED)) - return; - - /* counter authorization change alert */ - if (alert & CPU_MF_INT_CF_CACA) - qctri(&cpuhw->info); - - /* loss of counter data alert */ - if (alert & CPU_MF_INT_CF_LCDA) - pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); - - /* loss of MT counter data alert */ - if (alert & CPU_MF_INT_CF_MTDA) - pr_warn("CPU[%i] MT counter data was lost\n", - smp_processor_id()); -} - -#define PMC_INIT 0 -#define PMC_RELEASE 1 -static void setup_pmc_cpu(void *flags) -{ - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); - - switch (*((int *) flags)) { - case PMC_INIT: - memset(&cpuhw->info, 0, sizeof(cpuhw->info)); - qctri(&cpuhw->info); - cpuhw->flags |= PMU_F_RESERVED; - break; - - case PMC_RELEASE: - cpuhw->flags &= ~PMU_F_RESERVED; - break; - } - - /* Disable CPU counter sets */ - lcctl(0); -} - -/* Initialize the CPU-measurement facility */ -static int reserve_pmc_hardware(void) -{ - int flags = PMC_INIT; - - on_each_cpu(setup_pmc_cpu, &flags, 1); - irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); - - return 0; -} - -/* Release the CPU-measurement facility */ -static void release_pmc_hardware(void) -{ - int flags = PMC_RELEASE; - - on_each_cpu(setup_pmc_cpu, &flags, 1); - irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); -} - /* Release the PMU if event is the last perf event */ static void hw_perf_event_destroy(struct perf_event *event) { if (!atomic_add_unless(&num_events, -1, 1)) { mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0) - release_pmc_hardware(); + __kernel_cpumcf_end(); mutex_unlock(&pmc_reserve_mutex); } } @@ -332,7 +197,7 @@ static int __hw_perf_event_init(struct perf_event *event) struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; enum cpumf_ctr_set set; - int err; + int err = 0; u64 ev; switch (attr->type) { @@ -402,12 +267,14 @@ static int __hw_perf_event_init(struct perf_event *event) /* Initialize for using the CPU-measurement counter facility */ if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); - if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) + if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin()) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); } + if (err) + return err; event->destroy = hw_perf_event_destroy; /* Finally, validate version and authorization of the counter set */ @@ -488,7 +355,7 @@ static void cpumf_pmu_read(struct perf_event *event) static void cpumf_pmu_start(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct hw_perf_event *hwc = &event->hw; if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) @@ -519,7 +386,7 @@ static void cpumf_pmu_start(struct perf_event *event, int flags) static void cpumf_pmu_stop(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct hw_perf_event *hwc = &event->hw; if (!(hwc->state & PERF_HES_STOPPED)) { @@ -540,7 +407,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags) static int cpumf_pmu_add(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); /* Check authorization for the counter set to which this * counter belongs. @@ -564,7 +431,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) static void cpumf_pmu_del(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); cpumf_pmu_stop(event, PERF_EF_UPDATE); @@ -592,7 +459,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags) */ static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ @@ -612,7 +479,7 @@ static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) static void cpumf_pmu_cancel_txn(struct pmu *pmu) { unsigned int txn_flags; - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ @@ -633,7 +500,7 @@ static void cpumf_pmu_cancel_txn(struct pmu *pmu) */ static int cpumf_pmu_commit_txn(struct pmu *pmu) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); u64 state; WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ @@ -671,54 +538,17 @@ static struct pmu cpumf_pmu = { .cancel_txn = cpumf_pmu_cancel_txn, }; -static int cpumf_pmf_setup(unsigned int cpu, int flags) -{ - local_irq_disable(); - setup_pmc_cpu(&flags); - local_irq_enable(); - return 0; -} - -static int s390_pmu_online_cpu(unsigned int cpu) -{ - return cpumf_pmf_setup(cpu, PMC_INIT); -} - -static int s390_pmu_offline_cpu(unsigned int cpu) -{ - return cpumf_pmf_setup(cpu, PMC_RELEASE); -} - static int __init cpumf_pmu_init(void) { int rc; - if (!cpum_cf_avail()) + if (!kernel_cpumcf_avail()) return -ENODEV; - /* clear bit 15 of cr0 to unauthorize problem-state to - * extract measurement counters */ - ctl_clear_bit(0, 48); - - /* register handler for measurement-alert interruptions */ - rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, - cpumf_measurement_alert); - if (rc) { - pr_err("Registering for CPU-measurement alerts " - "failed with rc=%i\n", rc); - return rc; - } - cpumf_pmu.attr_groups = cpumf_cf_event_group(); rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); - if (rc) { + if (rc) pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); - unregister_external_irq(EXT_IRQ_MEASURE_ALERT, - cpumf_measurement_alert); - return rc; - } - return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE, - "perf/s390/cf:online", - s390_pmu_online_cpu, s390_pmu_offline_cpu); + return rc; } -early_initcall(cpumf_pmu_init); +subsys_initcall(cpumf_pmu_init); diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c new file mode 100644 index 000000000000..3bced89caffb --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_common.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CPU-Measurement Counter Facility Support - Common Layer + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com> + */ +#define KMSG_COMPONENT "cpum_cf_common" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/percpu.h> +#include <linux/notifier.h> +#include <linux/init.h> +#include <linux/export.h> +#include <asm/ctl_reg.h> +#include <asm/irq.h> +#include <asm/cpu_mcf.h> + +/* Per-CPU event structure for the counter facility */ +DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = { + .ctr_set = { + [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0), + }, + .alert = ATOMIC64_INIT(0), + .state = 0, + .flags = 0, + .txn_flags = 0, +}; +/* Indicator whether the CPU-Measurement Counter Facility Support is ready */ +static bool cpum_cf_initalized; + +/* CPU-measurement alerts for the counter facility */ +static void cpumf_measurement_alert(struct ext_code ext_code, + unsigned int alert, unsigned long unused) +{ + struct cpu_cf_events *cpuhw; + + if (!(alert & CPU_MF_INT_CF_MASK)) + return; + + inc_irq_stat(IRQEXT_CMC); + cpuhw = this_cpu_ptr(&cpu_cf_events); + + /* Measurement alerts are shared and might happen when the PMU + * is not reserved. Ignore these alerts in this case. */ + if (!(cpuhw->flags & PMU_F_RESERVED)) + return; + + /* counter authorization change alert */ + if (alert & CPU_MF_INT_CF_CACA) + qctri(&cpuhw->info); + + /* loss of counter data alert */ + if (alert & CPU_MF_INT_CF_LCDA) + pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); + + /* loss of MT counter data alert */ + if (alert & CPU_MF_INT_CF_MTDA) + pr_warn("CPU[%i] MT counter data was lost\n", + smp_processor_id()); + + /* store alert for special handling by in-kernel users */ + atomic64_or(alert, &cpuhw->alert); +} + +#define PMC_INIT 0 +#define PMC_RELEASE 1 +static void cpum_cf_setup_cpu(void *flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + + switch (*((int *) flags)) { + case PMC_INIT: + memset(&cpuhw->info, 0, sizeof(cpuhw->info)); + qctri(&cpuhw->info); + cpuhw->flags |= PMU_F_RESERVED; + break; + + case PMC_RELEASE: + cpuhw->flags &= ~PMU_F_RESERVED; + break; + } + + /* Disable CPU counter sets */ + lcctl(0); +} + +bool kernel_cpumcf_avail(void) +{ + return cpum_cf_initalized; +} +EXPORT_SYMBOL(kernel_cpumcf_avail); + + +/* Reserve/release functions for sharing perf hardware */ +static DEFINE_SPINLOCK(cpumcf_owner_lock); +static void *cpumcf_owner; + +/* Initialize the CPU-measurement counter facility */ +int __kernel_cpumcf_begin(void) +{ + int flags = PMC_INIT; + int err = 0; + + spin_lock(&cpumcf_owner_lock); + if (cpumcf_owner) + err = -EBUSY; + else + cpumcf_owner = __builtin_return_address(0); + spin_unlock(&cpumcf_owner_lock); + if (err) + return err; + + on_each_cpu(cpum_cf_setup_cpu, &flags, 1); + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + return 0; +} +EXPORT_SYMBOL(__kernel_cpumcf_begin); + +/* Obtain the CPU-measurement alerts for the counter facility */ +unsigned long kernel_cpumcf_alert(int clear) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + unsigned long alert; + + alert = atomic64_read(&cpuhw->alert); + if (clear) + atomic64_set(&cpuhw->alert, 0); + + return alert; +} +EXPORT_SYMBOL(kernel_cpumcf_alert); + +/* Release the CPU-measurement counter facility */ +void __kernel_cpumcf_end(void) +{ + int flags = PMC_RELEASE; + + on_each_cpu(cpum_cf_setup_cpu, &flags, 1); + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + spin_lock(&cpumcf_owner_lock); + cpumcf_owner = NULL; + spin_unlock(&cpumcf_owner_lock); +} +EXPORT_SYMBOL(__kernel_cpumcf_end); + +static int cpum_cf_setup(unsigned int cpu, int flags) +{ + local_irq_disable(); + cpum_cf_setup_cpu(&flags); + local_irq_enable(); + return 0; +} + +static int cpum_cf_online_cpu(unsigned int cpu) +{ + return cpum_cf_setup(cpu, PMC_INIT); +} + +static int cpum_cf_offline_cpu(unsigned int cpu) +{ + return cpum_cf_setup(cpu, PMC_RELEASE); +} + +static int __init cpum_cf_init(void) +{ + int rc; + + if (!cpum_cf_avail()) + return -ENODEV; + + /* clear bit 15 of cr0 to unauthorize problem-state to + * extract measurement counters */ + ctl_clear_bit(0, 48); + + /* register handler for measurement-alert interruptions */ + rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, + cpumf_measurement_alert); + if (rc) { + pr_err("Registering for CPU-measurement alerts " + "failed with rc=%i\n", rc); + return rc; + } + + rc = cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE, + "perf/s390/cf:online", + cpum_cf_online_cpu, cpum_cf_offline_cpu); + if (!rc) + cpum_cf_initalized = true; + + return rc; +} +early_initcall(cpum_cf_init); diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c new file mode 100644 index 000000000000..c6fad208c2fa --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -0,0 +1,693 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance event support for s390x - CPU-measurement Counter Sets + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com> + * Thomas Richer <tmricht@linux.ibm.com> + */ +#define KMSG_COMPONENT "cpum_cf_diag" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/kernel_stat.h> +#include <linux/percpu.h> +#include <linux/notifier.h> +#include <linux/init.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/processor.h> + +#include <asm/ctl_reg.h> +#include <asm/irq.h> +#include <asm/cpu_mcf.h> +#include <asm/timex.h> +#include <asm/debug.h> + +#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */ + +static unsigned int cf_diag_cpu_speed; +static debug_info_t *cf_diag_dbg; + +struct cf_diag_csd { /* Counter set data per CPU */ + size_t used; /* Bytes used in data/start */ + unsigned char start[PAGE_SIZE]; /* Counter set at event start */ + unsigned char data[PAGE_SIZE]; /* Counter set at event delete */ +}; +DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd); + +/* Counter sets are stored as data stream in a page sized memory buffer and + * exported to user space via raw data attached to the event sample data. + * Each counter set starts with an eight byte header consisting of: + * - a two byte eye catcher (0xfeef) + * - a one byte counter set number + * - a two byte counter set size (indicates the number of counters in this set) + * - a three byte reserved value (must be zero) to make the header the same + * size as a counter value. + * All counter values are eight byte in size. + * + * All counter sets are followed by a 64 byte trailer. + * The trailer consists of a: + * - flag field indicating valid fields when corresponding bit set + * - the counter facility first and second version number + * - the CPU speed if nonzero + * - the time stamp the counter sets have been collected + * - the time of day (TOD) base value + * - the machine type. + * + * The counter sets are saved when the process is prepared to be executed on a + * CPU and saved again when the process is going to be removed from a CPU. + * The difference of both counter sets are calculated and stored in the event + * sample data area. + */ + +struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */ + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int set:16; /* 16-31 Counter set identifier */ + unsigned int ctr:16; /* 32-47 Number of stored counters */ + unsigned int res1:16; /* 48-63 Reserved */ +}; + +struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */ + /* 0 - 7 */ + union { + struct { + unsigned int clock_base:1; /* TOD clock base set */ + unsigned int speed:1; /* CPU speed set */ + /* Measurement alerts */ + unsigned int mtda:1; /* Loss of MT ctr. data alert */ + unsigned int caca:1; /* Counter auth. change alert */ + unsigned int lcda:1; /* Loss of counter data alert */ + }; + unsigned long flags; /* 0-63 All indicators */ + }; + /* 8 - 15 */ + unsigned int cfvn:16; /* 64-79 Ctr First Version */ + unsigned int csvn:16; /* 80-95 Ctr Second Version */ + unsigned int cpu_speed:32; /* 96-127 CPU speed */ + /* 16 - 23 */ + unsigned long timestamp; /* 128-191 Timestamp (TOD) */ + /* 24 - 55 */ + union { + struct { + unsigned long progusage1; + unsigned long progusage2; + unsigned long progusage3; + unsigned long tod_base; + }; + unsigned long progusage[4]; + }; + /* 56 - 63 */ + unsigned int mach_type:16; /* Machine type */ + unsigned int res1:16; /* Reserved */ + unsigned int res2:32; /* Reserved */ +}; + +/* Create the trailer data at the end of a page. */ +static void cf_diag_trailer(struct cf_trailer_entry *te) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cpuid cpuid; + + te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */ + te->csvn = cpuhw->info.csvn; + + get_cpu_id(&cpuid); /* Machine type */ + te->mach_type = cpuid.machine; + te->cpu_speed = cf_diag_cpu_speed; + if (te->cpu_speed) + te->speed = 1; + te->clock_base = 1; /* Save clock base */ + memcpy(&te->tod_base, &tod_clock_base[1], 8); + store_tod_clock((__u64 *)&te->timestamp); +} + +/* + * Change the CPUMF state to active. + * Enable and activate the CPU-counter sets according + * to the per-cpu control state. + */ +static void cf_diag_enable(struct pmu *pmu) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + int err; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s pmu %p cpu %d flags %#x state %#llx\n", + __func__, pmu, smp_processor_id(), cpuhw->flags, + cpuhw->state); + if (cpuhw->flags & PMU_F_ENABLED) + return; + + err = lcctl(cpuhw->state); + if (err) { + pr_err("Enabling the performance measuring unit " + "failed with rc=%x\n", err); + return; + } + cpuhw->flags |= PMU_F_ENABLED; +} + +/* + * Change the CPUMF state to inactive. + * Disable and enable (inactive) the CPU-counter sets according + * to the per-cpu control state. + */ +static void cf_diag_disable(struct pmu *pmu) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + u64 inactive; + int err; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s pmu %p cpu %d flags %#x state %#llx\n", + __func__, pmu, smp_processor_id(), cpuhw->flags, + cpuhw->state); + if (!(cpuhw->flags & PMU_F_ENABLED)) + return; + + inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); + err = lcctl(inactive); + if (err) { + pr_err("Disabling the performance measuring unit " + "failed with rc=%x\n", err); + return; + } + cpuhw->flags &= ~PMU_F_ENABLED; +} + +/* Number of perf events counting hardware events */ +static atomic_t cf_diag_events = ATOMIC_INIT(0); + +/* Release the PMU if event is the last perf event */ +static void cf_diag_perf_event_destroy(struct perf_event *event) +{ + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d cf_diag_events %d\n", + __func__, event, event->cpu, + atomic_read(&cf_diag_events)); + if (atomic_dec_return(&cf_diag_events) == 0) + __kernel_cpumcf_end(); +} + +/* Setup the event. Test for authorized counter sets and only include counter + * sets which are authorized at the time of the setup. Including unauthorized + * counter sets result in specification exception (and panic). + */ +static int __hw_perf_event_init(struct perf_event *event) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct perf_event_attr *attr = &event->attr; + enum cpumf_ctr_set i; + int err = 0; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d authorized %#x\n", __func__, + event, event->cpu, cpuhw->info.auth_ctl); + + event->hw.config = attr->config; + event->hw.config_base = 0; + local64_set(&event->count, 0); + + /* Add all authorized counter sets to config_base */ + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) + if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) + event->hw.config_base |= cpumf_ctr_ctl[i]; + + /* No authorized counter sets, nothing to count/sample */ + if (!event->hw.config_base) { + err = -EINVAL; + goto out; + } + + /* Set sample_period to indicate sampling */ + event->hw.sample_period = attr->sample_period; + local64_set(&event->hw.period_left, event->hw.sample_period); + event->hw.last_period = event->hw.sample_period; +out: + debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n", + __func__, err, event->hw.config_base); + return err; +} + +static int cf_diag_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + int err = -ENOENT; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d config %#llx " + "sample_type %#llx cf_diag_events %d\n", __func__, + event, event->cpu, attr->config, attr->sample_type, + atomic_read(&cf_diag_events)); + + if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG || + event->attr.type != PERF_TYPE_RAW) + goto out; + + /* Raw events are used to access counters directly, + * hence do not permit excludes. + * This event is usesless without PERF_SAMPLE_RAW to return counter set + * values as raw data. + */ + if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv || + !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) { + err = -EOPNOTSUPP; + goto out; + } + + /* Initialize for using the CPU-measurement counter facility */ + if (atomic_inc_return(&cf_diag_events) == 1) { + if (__kernel_cpumcf_begin()) { + atomic_dec(&cf_diag_events); + err = -EBUSY; + goto out; + } + } + event->destroy = cf_diag_perf_event_destroy; + + err = __hw_perf_event_init(event); + if (unlikely(err)) + event->destroy(event); +out: + debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err); + return err; +} + +static void cf_diag_read(struct perf_event *event) +{ + debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event); +} + +/* Return the maximum possible counter set size (in number of 8 byte counters) + * depending on type and model number. + */ +static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset, + struct cpumf_ctr_info *info) +{ + size_t ctrset_size = 0; + + switch (ctrset) { + case CPUMF_CTR_SET_BASIC: + if (info->cfvn >= 1) + ctrset_size = 6; + break; + case CPUMF_CTR_SET_USER: + if (info->cfvn == 1) + ctrset_size = 6; + else if (info->cfvn >= 3) + ctrset_size = 2; + break; + case CPUMF_CTR_SET_CRYPTO: + ctrset_size = 16; + break; + case CPUMF_CTR_SET_EXT: + if (info->csvn == 1) + ctrset_size = 32; + else if (info->csvn == 2) + ctrset_size = 48; + else if (info->csvn >= 3) + ctrset_size = 128; + break; + case CPUMF_CTR_SET_MT_DIAG: + if (info->csvn > 3) + ctrset_size = 48; + break; + case CPUMF_CTR_SET_MAX: + break; + } + + return ctrset_size; +} + +/* Calculate memory needed to store all counter sets together with header and + * trailer data. This is independend of the counter set authorization which + * can vary depending on the configuration. + */ +static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info) +{ + size_t max_size = sizeof(struct cf_trailer_entry); + enum cpumf_ctr_set i; + + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { + size_t size = cf_diag_ctrset_size(i, info); + + if (size) + max_size += size * sizeof(u64) + + sizeof(struct cf_ctrset_entry); + } + debug_sprintf_event(cf_diag_dbg, 5, "%s max_size %zu\n", __func__, + max_size); + + return max_size; +} + +/* Read a counter set. The counter set number determines which counter set and + * the CPUM-CF first and second version number determine the number of + * available counters in this counter set. + * Each counter set starts with header containing the counter set number and + * the number of 8 byte counters. + * + * The functions returns the number of bytes occupied by this counter set + * including the header. + * If there is no counter in the counter set, this counter set is useless and + * zero is returned on this case. + */ +static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset, + size_t room) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + size_t ctrset_size, need = 0; + int rc = 3; /* Assume write failure */ + + ctrdata->def = CF_DIAG_CTRSET_DEF; + ctrdata->set = ctrset; + ctrdata->res1 = 0; + ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info); + + if (ctrset_size) { /* Save data */ + need = ctrset_size * sizeof(u64) + sizeof(*ctrdata); + if (need <= room) + rc = ctr_stcctm(ctrset, ctrset_size, + (u64 *)(ctrdata + 1)); + if (rc != 3) + ctrdata->ctr = ctrset_size; + else + need = 0; + } + + debug_sprintf_event(cf_diag_dbg, 6, + "%s ctrset %d ctrset_size %zu cfvn %d csvn %d" + " need %zd rc:%d\n", + __func__, ctrset, ctrset_size, cpuhw->info.cfvn, + cpuhw->info.csvn, need, rc); + return need; +} + +/* Read out all counter sets and save them in the provided data buffer. + * The last 64 byte host an artificial trailer entry. + */ +static size_t cf_diag_getctr(void *data, size_t sz, unsigned long auth) +{ + struct cf_trailer_entry *trailer; + size_t offset = 0, done; + int i; + + memset(data, 0, sz); + sz -= sizeof(*trailer); /* Always room for trailer */ + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { + struct cf_ctrset_entry *ctrdata = data + offset; + + if (!(auth & cpumf_ctr_ctl[i])) + continue; /* Counter set not authorized */ + + done = cf_diag_getctrset(ctrdata, i, sz - offset); + offset += done; + debug_sprintf_event(cf_diag_dbg, 6, + "%s ctrset %d offset %zu done %zu\n", + __func__, i, offset, done); + } + trailer = data + offset; + cf_diag_trailer(trailer); + return offset + sizeof(*trailer); +} + +/* Calculate the difference for each counter in a counter set. */ +static void cf_diag_diffctrset(u64 *pstart, u64 *pstop, int counters) +{ + for (; --counters >= 0; ++pstart, ++pstop) + if (*pstop >= *pstart) + *pstop -= *pstart; + else + *pstop = *pstart - *pstop; +} + +/* Scan the counter sets and calculate the difference of each counter + * in each set. The result is the increment of each counter during the + * period the counter set has been activated. + * + * Return true on success. + */ +static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth) +{ + struct cf_trailer_entry *trailer_start, *trailer_stop; + struct cf_ctrset_entry *ctrstart, *ctrstop; + size_t offset = 0; + + auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1; + do { + ctrstart = (struct cf_ctrset_entry *)(csd->start + offset); + ctrstop = (struct cf_ctrset_entry *)(csd->data + offset); + + if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) { + pr_err("cpum_cf_diag counter set compare error " + "in set %i\n", ctrstart->set); + return 0; + } + auth &= ~cpumf_ctr_ctl[ctrstart->set]; + if (ctrstart->def == CF_DIAG_CTRSET_DEF) { + cf_diag_diffctrset((u64 *)(ctrstart + 1), + (u64 *)(ctrstop + 1), ctrstart->ctr); + offset += ctrstart->ctr * sizeof(u64) + + sizeof(*ctrstart); + } + debug_sprintf_event(cf_diag_dbg, 6, + "%s set %d ctr %d offset %zu auth %lx\n", + __func__, ctrstart->set, ctrstart->ctr, + offset, auth); + } while (ctrstart->def && auth); + + /* Save time_stamp from start of event in stop's trailer */ + trailer_start = (struct cf_trailer_entry *)(csd->start + offset); + trailer_stop = (struct cf_trailer_entry *)(csd->data + offset); + trailer_stop->progusage[0] = trailer_start->timestamp; + + return 1; +} + +/* Create perf event sample with the counter sets as raw data. The sample + * is then pushed to the event subsystem and the function checks for + * possible event overflows. If an event overflow occurs, the PMU is + * stopped. + * + * Return non-zero if an event overflow occurred. + */ +static int cf_diag_push_sample(struct perf_event *event, + struct cf_diag_csd *csd) +{ + struct perf_sample_data data; + struct perf_raw_record raw; + struct pt_regs regs; + int overflow; + + /* Setup perf sample */ + perf_sample_data_init(&data, 0, event->hw.last_period); + memset(®s, 0, sizeof(regs)); + memset(&raw, 0, sizeof(raw)); + + if (event->attr.sample_type & PERF_SAMPLE_CPU) + data.cpu_entry.cpu = event->cpu; + if (event->attr.sample_type & PERF_SAMPLE_RAW) { + raw.frag.size = csd->used; + raw.frag.data = csd->data; + raw.size = csd->used; + data.raw = &raw; + } + + overflow = perf_event_overflow(event, &data, ®s); + debug_sprintf_event(cf_diag_dbg, 6, + "%s event %p cpu %d sample_type %#llx raw %d " + "ov %d\n", __func__, event, event->cpu, + event->attr.sample_type, raw.size, overflow); + if (overflow) + event->pmu->stop(event, 0); + + perf_event_update_userpage(event); + return overflow; +} + +static void cf_diag_start(struct perf_event *event, int flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd); + struct hw_perf_event *hwc = &event->hw; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d flags %#x hwc-state %#x\n", + __func__, event, event->cpu, flags, hwc->state); + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + /* (Re-)enable and activate all counter sets */ + lcctl(0); /* Reset counter sets */ + hwc->state = 0; + ctr_set_multiple_enable(&cpuhw->state, hwc->config_base); + lcctl(cpuhw->state); /* Enable counter sets */ + csd->used = cf_diag_getctr(csd->start, sizeof(csd->start), + event->hw.config_base); + ctr_set_multiple_start(&cpuhw->state, hwc->config_base); + /* Function cf_diag_enable() starts the counter sets. */ +} + +static void cf_diag_stop(struct perf_event *event, int flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd); + struct hw_perf_event *hwc = &event->hw; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d flags %#x hwc-state %#x\n", + __func__, event, event->cpu, flags, hwc->state); + + /* Deactivate all counter sets */ + ctr_set_multiple_stop(&cpuhw->state, hwc->config_base); + local64_inc(&event->count); + csd->used = cf_diag_getctr(csd->data, sizeof(csd->data), + event->hw.config_base); + if (cf_diag_diffctr(csd, event->hw.config_base)) + cf_diag_push_sample(event, csd); + hwc->state |= PERF_HES_STOPPED; +} + +static int cf_diag_add(struct perf_event *event, int flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + int err = 0; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d flags %#x cpuhw:%p\n", + __func__, event, event->cpu, flags, cpuhw); + + if (cpuhw->flags & PMU_F_IN_USE) { + err = -EAGAIN; + goto out; + } + + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + cpuhw->flags |= PMU_F_IN_USE; + if (flags & PERF_EF_START) + cf_diag_start(event, PERF_EF_RELOAD); +out: + debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err); + return err; +} + +static void cf_diag_del(struct perf_event *event, int flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d flags %#x\n", + __func__, event, event->cpu, flags); + + cf_diag_stop(event, PERF_EF_UPDATE); + ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base); + ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base); + cpuhw->flags &= ~PMU_F_IN_USE; +} + +CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG); + +static struct attribute *cf_diag_events_attr[] = { + CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG), + NULL, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cf_diag_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cf_diag_events_group = { + .name = "events", + .attrs = cf_diag_events_attr, +}; +static struct attribute_group cf_diag_format_group = { + .name = "format", + .attrs = cf_diag_format_attr, +}; +static const struct attribute_group *cf_diag_attr_groups[] = { + &cf_diag_events_group, + &cf_diag_format_group, + NULL, +}; + +/* Performance monitoring unit for s390x */ +static struct pmu cf_diag = { + .task_ctx_nr = perf_sw_context, + .pmu_enable = cf_diag_enable, + .pmu_disable = cf_diag_disable, + .event_init = cf_diag_event_init, + .add = cf_diag_add, + .del = cf_diag_del, + .start = cf_diag_start, + .stop = cf_diag_stop, + .read = cf_diag_read, + + .attr_groups = cf_diag_attr_groups +}; + +/* Get the CPU speed, try sampling facility first and CPU attributes second. */ +static void cf_diag_get_cpu_speed(void) +{ + if (cpum_sf_avail()) { /* Sampling facility first */ + struct hws_qsi_info_block si; + + memset(&si, 0, sizeof(si)); + if (!qsi(&si)) { + cf_diag_cpu_speed = si.cpu_speed; + return; + } + } + + if (test_facility(34)) { /* CPU speed extract static part */ + unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0); + + if (mhz != -1UL) + cf_diag_cpu_speed = mhz & 0xffffffff; + } +} + +/* Initialize the counter set PMU to generate complete counter set data as + * event raw data. This relies on the CPU Measurement Counter Facility device + * already being loaded and initialized. + */ +static int __init cf_diag_init(void) +{ + struct cpumf_ctr_info info; + size_t need; + int rc; + + if (!kernel_cpumcf_avail() || !stccm_avail() || qctri(&info)) + return -ENODEV; + cf_diag_get_cpu_speed(); + + /* Make sure the counter set data fits into predefined buffer. */ + need = cf_diag_ctrset_maxsize(&info); + if (need > sizeof(((struct cf_diag_csd *)0)->start)) { + pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n", + need); + return -ENOMEM; + } + + /* Setup s390dbf facility */ + cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128); + if (!cf_diag_dbg) { + pr_err("Registration of s390dbf(cpum_cf_diag) failed\n"); + return -ENOMEM; + } + debug_register_view(cf_diag_dbg, &debug_sprintf_view); + + rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW); + if (rc) { + debug_unregister_view(cf_diag_dbg, &debug_sprintf_view); + debug_unregister(cf_diag_dbg); + pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n", + rc); + } + return rc; +} +arch_initcall(cf_diag_init); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index d63fb3c56b8a..b45238c89728 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -6,6 +6,7 @@ #include <linux/slab.h> #include <linux/perf_event.h> +#include <asm/cpu_mf.h> /* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */ diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index bfabeb1889cc..1266194afb02 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1600,7 +1600,7 @@ static void aux_sdb_init(unsigned long sdb) /* * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling - * @cpu: On which to allocate, -1 means current + * @event: Event the buffer is setup for, event->cpu == -1 means current * @pages: Array of pointers to buffer pages passed from perf core * @nr_pages: Total pages * @snapshot: Flag for snapshot mode @@ -1612,8 +1612,8 @@ static void aux_sdb_init(unsigned long sdb) * * Return the private AUX buffer structure if success or NULL if fails. */ -static void *aux_buffer_setup(int cpu, void **pages, int nr_pages, - bool snapshot) +static void *aux_buffer_setup(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { struct sf_buffer *sfb; struct aux_buffer *aux; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7ed90a759135..12934e8fbb91 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -369,7 +369,7 @@ void __init arch_call_rest_init(void) : : [_frame] "a" (frame)); } -static void __init setup_lowcore(void) +static void __init setup_lowcore_dat_off(void) { struct lowcore *lc; @@ -380,19 +380,16 @@ static void __init setup_lowcore(void) lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.addr = (unsigned long) restart_int_handler; - lc->external_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->external_new_psw.addr = (unsigned long) ext_int_handler; lc->svc_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; lc->svc_new_psw.addr = (unsigned long) system_call; - lc->program_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->program_new_psw.addr = (unsigned long) pgm_check_handler; lc->mcck_new_psw.mask = PSW_KERNEL_BITS; lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; - lc->io_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->clock_comparator = clock_comparator_max; lc->nodat_stack = ((unsigned long) &init_thread_union) @@ -452,6 +449,16 @@ static void __init setup_lowcore(void) lowcore_ptr[0] = lc; } +static void __init setup_lowcore_dat_on(void) +{ + __ctl_clear_bit(0, 28); + S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT; + __ctl_set_bit(0, 28); +} + static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, @@ -794,18 +801,9 @@ static void __init reserve_kernel(void) { unsigned long start_pfn = PFN_UP(__pa(_end)); -#ifdef CONFIG_DMA_API_DEBUG - /* - * DMA_API_DEBUG code stumbles over addresses from the - * range [PARMAREA_END, _stext]. Mark the memory as reserved - * so it is not used for CONFIG_DMA_API_DEBUG=y. - */ - memblock_reserve(0, PFN_PHYS(start_pfn)); -#else memblock_reserve(0, PARMAREA_END); memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) - (unsigned long)_stext); -#endif } static void __init setup_memory(void) @@ -990,6 +988,25 @@ static void __init setup_task_size(void) } /* + * Issue diagnose 318 to set the control program name and + * version codes. + */ +static void __init setup_control_program_code(void) +{ + union diag318_info diag318_info = { + .cpnc = CPNC_LINUX, + .cpvc_linux = 0, + .cpvc_distro = {0}, + }; + + if (!sclp.has_diag318) + return; + + diag_stat_inc(DIAG_STAT_X318); + asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val)); +} + +/* * Setup function called from init/main.c just after the banner * was printed. */ @@ -1033,6 +1050,7 @@ void __init setup_arch(char **cmdline_p) os_info_init(); setup_ipl(); setup_task_size(); + setup_control_program_code(); /* Do some memory reservations *before* memory is added to memblock */ reserve_memory_end(); @@ -1072,7 +1090,7 @@ void __init setup_arch(char **cmdline_p) #endif setup_resources(); - setup_lowcore(); + setup_lowcore_dat_off(); smp_fill_possible_mask(); cpu_detect_mhz_feature(); cpu_init(); @@ -1085,6 +1103,12 @@ void __init setup_arch(char **cmdline_p) */ paging_init(); + /* + * After paging_init created the kernel page table, the new PSWs + * in lowcore can now run with DAT enabled. + */ + setup_lowcore_dat_on(); + /* Setup default console */ conmode_default(); set_preferred_console(); diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index 537f97fde37f..993100c31d65 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S @@ -30,10 +30,10 @@ .section .text ENTRY(swsusp_arch_suspend) lg %r1,__LC_NODAT_STACK - aghi %r1,-STACK_FRAME_OVERHEAD stmg %r6,%r15,__SF_GPRS(%r1) + aghi %r1,-STACK_FRAME_OVERHEAD stg %r15,__SF_BACKCHAIN(%r1) - lgr %r1,%r15 + lgr %r15,%r1 /* Store FPU registers */ brasl %r14,save_fpu_regs @@ -124,13 +124,13 @@ ENTRY(swsusp_arch_resume) lghi %r2,1 brasl %r14,arch_set_page_states - /* Deactivate DAT */ - stnsm __SF_EMPTY(%r15),0xfb - /* Set prefix page to zero */ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) + /* Deactivate DAT */ + stnsm __SF_EMPTY(%r15),0xfb + /* Restore saved image */ larl %r1,restore_pblist lg %r1,0(%r1) diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 12f80d1f0415..2ac3c9b56a13 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -545,8 +545,6 @@ static __init int stsi_init_debugfs(void) int lvl, i; stsi_root = debugfs_create_dir("stsi", arch_debugfs_dir); - if (IS_ERR_OR_NULL(stsi_root)) - return 0; lvl = stsi(NULL, 0, 0, 0); if (lvl > 0) stsi_0_0_0 = lvl; diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index f24395a01918..98f850e00008 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -69,7 +69,7 @@ static void update_mt_scaling(void) u64 delta, fac, mult, div; int i; - stcctm5(smp_cpu_mtid + 1, cycles_new); + stcctm(MT_DIAG, smp_cpu_mtid + 1, cycles_new); cycles_old = this_cpu_ptr(mt_cycles); fac = 1; mult = div = 0; @@ -432,6 +432,6 @@ void vtime_init(void) __this_cpu_write(mt_scaling_jiffies, jiffies); __this_cpu_write(mt_scaling_mult, 1); __this_cpu_write(mt_scaling_div, 1); - stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); + stcctm(MT_DIAG, smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); } } diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a153257bf7d9..d62fa148558b 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->crycbd = 0; apie_h = vcpu->arch.sie_block->eca & ECA_APIE; - if (!apie_h && !key_msk) + if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0)) return 0; if (!crycb_addr) diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index a10e11f7a5f7..0e30e6e43b0c 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -43,11 +43,13 @@ static inline char *__strnend(const char *s, size_t n) * * returns the length of @s */ +#ifdef __HAVE_ARCH_STRLEN size_t strlen(const char *s) { return __strend(s) - s; } EXPORT_SYMBOL(strlen); +#endif /** * strnlen - Find the length of a length-limited string @@ -56,11 +58,13 @@ EXPORT_SYMBOL(strlen); * * returns the minimum of the length of @s and @n */ +#ifdef __HAVE_ARCH_STRNLEN size_t strnlen(const char *s, size_t n) { return __strnend(s, n) - s; } EXPORT_SYMBOL(strnlen); +#endif /** * strcpy - Copy a %NUL terminated string @@ -69,6 +73,7 @@ EXPORT_SYMBOL(strnlen); * * returns a pointer to @dest */ +#ifdef __HAVE_ARCH_STRCPY char *strcpy(char *dest, const char *src) { register int r0 asm("0") = 0; @@ -81,6 +86,7 @@ char *strcpy(char *dest, const char *src) return ret; } EXPORT_SYMBOL(strcpy); +#endif /** * strlcpy - Copy a %NUL terminated string into a sized buffer @@ -93,6 +99,7 @@ EXPORT_SYMBOL(strcpy); * of course, the buffer size is zero). It does not pad * out the result like strncpy() does. */ +#ifdef __HAVE_ARCH_STRLCPY size_t strlcpy(char *dest, const char *src, size_t size) { size_t ret = __strend(src) - src; @@ -105,6 +112,7 @@ size_t strlcpy(char *dest, const char *src, size_t size) return ret; } EXPORT_SYMBOL(strlcpy); +#endif /** * strncpy - Copy a length-limited, %NUL-terminated string @@ -115,6 +123,7 @@ EXPORT_SYMBOL(strlcpy); * The result is not %NUL-terminated if the source exceeds * @n bytes. */ +#ifdef __HAVE_ARCH_STRNCPY char *strncpy(char *dest, const char *src, size_t n) { size_t len = __strnend(src, n) - src; @@ -123,6 +132,7 @@ char *strncpy(char *dest, const char *src, size_t n) return dest; } EXPORT_SYMBOL(strncpy); +#endif /** * strcat - Append one %NUL-terminated string to another @@ -131,6 +141,7 @@ EXPORT_SYMBOL(strncpy); * * returns a pointer to @dest */ +#ifdef __HAVE_ARCH_STRCAT char *strcat(char *dest, const char *src) { register int r0 asm("0") = 0; @@ -146,6 +157,7 @@ char *strcat(char *dest, const char *src) return ret; } EXPORT_SYMBOL(strcat); +#endif /** * strlcat - Append a length-limited, %NUL-terminated string to another @@ -153,6 +165,7 @@ EXPORT_SYMBOL(strcat); * @src: The string to append to it * @n: The size of the destination buffer. */ +#ifdef __HAVE_ARCH_STRLCAT size_t strlcat(char *dest, const char *src, size_t n) { size_t dsize = __strend(dest) - dest; @@ -170,6 +183,7 @@ size_t strlcat(char *dest, const char *src, size_t n) return res; } EXPORT_SYMBOL(strlcat); +#endif /** * strncat - Append a length-limited, %NUL-terminated string to another @@ -182,6 +196,7 @@ EXPORT_SYMBOL(strlcat); * Note that in contrast to strncpy, strncat ensures the result is * terminated. */ +#ifdef __HAVE_ARCH_STRNCAT char *strncat(char *dest, const char *src, size_t n) { size_t len = __strnend(src, n) - src; @@ -192,6 +207,7 @@ char *strncat(char *dest, const char *src, size_t n) return dest; } EXPORT_SYMBOL(strncat); +#endif /** * strcmp - Compare two strings @@ -202,6 +218,7 @@ EXPORT_SYMBOL(strncat); * < 0 if @s1 is less than @s2 * > 0 if @s1 is greater than @s2 */ +#ifdef __HAVE_ARCH_STRCMP int strcmp(const char *s1, const char *s2) { register int r0 asm("0") = 0; @@ -219,12 +236,14 @@ int strcmp(const char *s1, const char *s2) return ret; } EXPORT_SYMBOL(strcmp); +#endif /** * strrchr - Find the last occurrence of a character in a string * @s: The string to be searched * @c: The character to search for */ +#ifdef __HAVE_ARCH_STRRCHR char *strrchr(const char *s, int c) { size_t len = __strend(s) - s; @@ -237,6 +256,7 @@ char *strrchr(const char *s, int c) return NULL; } EXPORT_SYMBOL(strrchr); +#endif static inline int clcle(const char *s1, unsigned long l1, const char *s2, unsigned long l2) @@ -261,6 +281,7 @@ static inline int clcle(const char *s1, unsigned long l1, * @s1: The string to be searched * @s2: The string to search for */ +#ifdef __HAVE_ARCH_STRSTR char *strstr(const char *s1, const char *s2) { int l1, l2; @@ -280,6 +301,7 @@ char *strstr(const char *s1, const char *s2) return NULL; } EXPORT_SYMBOL(strstr); +#endif /** * memchr - Find a character in an area of memory. @@ -290,6 +312,7 @@ EXPORT_SYMBOL(strstr); * returns the address of the first occurrence of @c, or %NULL * if @c is not found */ +#ifdef __HAVE_ARCH_MEMCHR void *memchr(const void *s, int c, size_t n) { register int r0 asm("0") = (char) c; @@ -304,6 +327,7 @@ void *memchr(const void *s, int c, size_t n) return (void *) ret; } EXPORT_SYMBOL(memchr); +#endif /** * memcmp - Compare two areas of memory @@ -311,6 +335,7 @@ EXPORT_SYMBOL(memchr); * @s2: Another area of memory * @count: The size of the area. */ +#ifdef __HAVE_ARCH_MEMCMP int memcmp(const void *s1, const void *s2, size_t n) { int ret; @@ -321,6 +346,7 @@ int memcmp(const void *s1, const void *s2, size_t n) return ret; } EXPORT_SYMBOL(memcmp); +#endif /** * memscan - Find a character in an area of memory. @@ -331,6 +357,7 @@ EXPORT_SYMBOL(memcmp); * returns the address of the first occurrence of @c, or 1 byte past * the area if @c is not found */ +#ifdef __HAVE_ARCH_MEMSCAN void *memscan(void *s, int c, size_t n) { register int r0 asm("0") = (char) c; @@ -342,3 +369,4 @@ void *memscan(void *s, int c, size_t n) return (void *) ret; } EXPORT_SYMBOL(memscan); +#endif diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index eba2def3414d..0b5622714c12 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -28,12 +28,7 @@ #include <asm/cpcmd.h> #include <asm/setup.h> -#define DCSS_LOADSHR 0x00 -#define DCSS_LOADNSR 0x04 #define DCSS_PURGESEG 0x08 -#define DCSS_FINDSEG 0x0c -#define DCSS_LOADNOLY 0x10 -#define DCSS_SEGEXT 0x18 #define DCSS_LOADSHRX 0x20 #define DCSS_LOADNSRX 0x24 #define DCSS_FINDSEGX 0x2c @@ -53,20 +48,6 @@ struct qout64 { struct qrange range[6]; }; -struct qrange_old { - unsigned int start; /* last byte type */ - unsigned int end; /* last byte reserved */ -}; - -/* output area format for the Diag x'64' old subcode x'18' */ -struct qout64_old { - int segstart; - int segend; - int segcnt; - int segrcnt; - struct qrange_old range[6]; -}; - struct qin64 { char qopcode; char rsrv1[3]; @@ -95,52 +76,10 @@ static DEFINE_MUTEX(dcss_lock); static LIST_HEAD(dcss_list); static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", "EW/EN-MIXED" }; -static int loadshr_scode, loadnsr_scode; -static int segext_scode, purgeseg_scode; -static int scode_set; - -/* set correct Diag x'64' subcodes. */ -static int -dcss_set_subcodes(void) -{ - char *name = kmalloc(8, GFP_KERNEL | GFP_DMA); - unsigned long rx, ry; - int rc; - - if (name == NULL) - return -ENOMEM; - - rx = (unsigned long) name; - ry = DCSS_FINDSEGX; - - strcpy(name, "dummy"); - diag_stat_inc(DIAG_STAT_X064); - asm volatile( - " diag %0,%1,0x64\n" - "0: ipm %2\n" - " srl %2,28\n" - " j 2f\n" - "1: la %2,3\n" - "2:\n" - EX_TABLE(0b, 1b) - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory"); - - kfree(name); - /* Diag x'64' new subcodes are supported, set to new subcodes */ - if (rc != 3) { - loadshr_scode = DCSS_LOADSHRX; - loadnsr_scode = DCSS_LOADNSRX; - purgeseg_scode = DCSS_PURGESEG; - segext_scode = DCSS_SEGEXTX; - return 0; - } - /* Diag x'64' new subcodes are not supported, set to old subcodes */ - loadshr_scode = DCSS_LOADNOLY; - loadnsr_scode = DCSS_LOADNSR; - purgeseg_scode = DCSS_PURGESEG; - segext_scode = DCSS_SEGEXT; - return 0; -} +static int loadshr_scode = DCSS_LOADSHRX; +static int loadnsr_scode = DCSS_LOADNSRX; +static int purgeseg_scode = DCSS_PURGESEG; +static int segext_scode = DCSS_SEGEXTX; /* * Create the 8 bytes, ebcdic VM segment name from @@ -196,32 +135,15 @@ dcss_diag(int *func, void *parameter, unsigned long rx, ry; int rc; - if (scode_set == 0) { - rc = dcss_set_subcodes(); - if (rc < 0) - return rc; - scode_set = 1; - } rx = (unsigned long) parameter; ry = (unsigned long) *func; - /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ diag_stat_inc(DIAG_STAT_X064); - if (*func > DCSS_SEGEXT) - asm volatile( - " diag %0,%1,0x64\n" - " ipm %2\n" - " srl %2,28\n" - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); - /* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */ - else - asm volatile( - " sam31\n" - " diag %0,%1,0x64\n" - " sam64\n" - " ipm %2\n" - " srl %2,28\n" - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); + asm volatile( + " diag %0,%1,0x64\n" + " ipm %2\n" + " srl %2,28\n" + : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); *ret1 = rx; *ret2 = ry; return rc; @@ -271,31 +193,6 @@ query_segment_type (struct dcss_segment *seg) goto out_free; } - /* Only old format of output area of Diagnose x'64' is supported, - copy data for the new format. */ - if (segext_scode == DCSS_SEGEXT) { - struct qout64_old *qout_old; - qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA); - if (qout_old == NULL) { - rc = -ENOMEM; - goto out_free; - } - memcpy(qout_old, qout, sizeof(struct qout64_old)); - qout->segstart = (unsigned long) qout_old->segstart; - qout->segend = (unsigned long) qout_old->segend; - qout->segcnt = qout_old->segcnt; - qout->segrcnt = qout_old->segrcnt; - - if (qout->segcnt > 6) - qout->segrcnt = 6; - for (i = 0; i < qout->segrcnt; i++) { - qout->range[i].start = - (unsigned long) qout_old->range[i].start; - qout->range[i].end = - (unsigned long) qout_old->range[i].end; - } - kfree(qout_old); - } if (qout->segcnt > 6) { rc = -EOPNOTSUPP; goto out_free; @@ -410,11 +307,9 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long if (rc < 0) goto out_free; - if (loadshr_scode == DCSS_LOADSHRX) { - if (segment_overlaps_others(seg)) { - rc = -EBUSY; - goto out_free; - } + if (segment_overlaps_others(seg)) { + rc = -EBUSY; + goto out_free; } rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1); @@ -472,11 +367,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *addr = seg->start_addr; *end = seg->end; if (do_nonshared) - pr_info("DCSS %s of range %p to %p and type %s loaded as " + pr_info("DCSS %s of range %px to %px and type %s loaded as " "exclusive-writable\n", name, (void*) seg->start_addr, (void*) seg->end, segtype_string[seg->vm_segtype]); else { - pr_info("DCSS %s of range %p to %p and type %s loaded in " + pr_info("DCSS %s of range %px to %px and type %s loaded in " "shared access mode\n", name, (void*) seg->start_addr, (void*) seg->end, segtype_string[seg->vm_segtype]); } diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c index bac5c27d11fc..01892dcf4029 100644 --- a/arch/s390/mm/kasan_init.c +++ b/arch/s390/mm/kasan_init.c @@ -226,8 +226,6 @@ static void __init kasan_enable_dat(void) static void __init kasan_early_detect_facilities(void) { - __stfle(S390_lowcore.stfle_fac_list, - ARRAY_SIZE(S390_lowcore.stfle_fac_list)); if (test_facility(8)) { has_edat = true; __ctl_set_bit(0, 23); diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 0a7627cdb34e..687f2a4d3459 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -29,14 +29,6 @@ static unsigned long stack_maxrandom_size(void) return STACK_RND_MASK << PAGE_SHIFT; } -/* - * Top of mmap area (just below the process stack). - * - * Leave at least a ~32 MB hole. - */ -#define MIN_GAP (32*1024*1024) -#define MAX_GAP (STACK_TOP/6*5) - static inline int mmap_is_legacy(struct rlimit *rlim_stack) { if (current->personality & ADDR_COMPAT_LAYOUT) @@ -60,13 +52,26 @@ static inline unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { unsigned long gap = rlim_stack->rlim_cur; + unsigned long pad = stack_maxrandom_size() + stack_guard_gap; + unsigned long gap_min, gap_max; + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; + + /* + * Top of mmap area (just below the process stack). + * Leave at least a ~32 MB hole. + */ + gap_min = 32 * 1024 * 1024UL; + gap_max = (STACK_TOP / 6) * 5; + + if (gap < gap_min) + gap = gap_min; + else if (gap > gap_max) + gap = gap_max; - if (gap < MIN_GAP) - gap = MIN_GAP; - else if (gap > MAX_GAP) - gap = MAX_GAP; - gap &= PAGE_MASK; - return STACK_TOP - stack_maxrandom_size() - rnd - gap; + return PAGE_ALIGN(STACK_TOP - gap - rnd); } unsigned long diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f2cc7da473e4..689b66f29fc6 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -318,7 +318,6 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, } return old; } -EXPORT_SYMBOL(ptep_modify_prot_start); void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) @@ -337,7 +336,6 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, } preempt_enable(); } -EXPORT_SYMBOL(ptep_modify_prot_commit); static inline void pmdp_idte_local(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 3ff758eeb71d..51dd0267d014 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1110,103 +1110,145 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i mask = 0xf000; /* j */ goto branch_oc; case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */ + case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */ mask = 0x2000; /* jh */ goto branch_ks; case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */ + case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */ mask = 0x4000; /* jl */ goto branch_ks; case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */ + case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */ mask = 0xa000; /* jhe */ goto branch_ks; case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */ + case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */ mask = 0xc000; /* jle */ goto branch_ks; case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */ + case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */ mask = 0x2000; /* jh */ goto branch_ku; case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */ + case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */ mask = 0x4000; /* jl */ goto branch_ku; case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */ + case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */ mask = 0xa000; /* jhe */ goto branch_ku; case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */ + case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */ mask = 0xc000; /* jle */ goto branch_ku; case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */ + case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */ mask = 0x7000; /* jne */ goto branch_ku; case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */ + case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */ mask = 0x8000; /* je */ goto branch_ku; case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */ + case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */ mask = 0x7000; /* jnz */ - /* lgfi %w1,imm (load sign extend imm) */ - EMIT6_IMM(0xc0010000, REG_W1, imm); - /* ngr %w1,%dst */ - EMIT4(0xb9800000, REG_W1, dst_reg); + if (BPF_CLASS(insn->code) == BPF_JMP32) { + /* llilf %w1,imm (load zero extend imm) */ + EMIT6_IMM(0xc00f0000, REG_W1, imm); + /* nr %w1,%dst */ + EMIT2(0x1400, REG_W1, dst_reg); + } else { + /* lgfi %w1,imm (load sign extend imm) */ + EMIT6_IMM(0xc0010000, REG_W1, imm); + /* ngr %w1,%dst */ + EMIT4(0xb9800000, REG_W1, dst_reg); + } goto branch_oc; case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */ + case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */ mask = 0x2000; /* jh */ goto branch_xs; case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */ + case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */ mask = 0x4000; /* jl */ goto branch_xs; case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */ + case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */ mask = 0xa000; /* jhe */ goto branch_xs; case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */ + case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */ mask = 0xc000; /* jle */ goto branch_xs; case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */ + case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */ mask = 0x2000; /* jh */ goto branch_xu; case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */ + case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */ mask = 0x4000; /* jl */ goto branch_xu; case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */ + case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */ mask = 0xa000; /* jhe */ goto branch_xu; case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */ + case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */ mask = 0xc000; /* jle */ goto branch_xu; case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */ + case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */ mask = 0x7000; /* jne */ goto branch_xu; case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */ + case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */ mask = 0x8000; /* je */ goto branch_xu; case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */ + case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */ + { + bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; + mask = 0x7000; /* jnz */ - /* ngrk %w1,%dst,%src */ - EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg); + /* nrk or ngrk %w1,%dst,%src */ + EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000), + REG_W1, dst_reg, src_reg); goto branch_oc; branch_ks: + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; /* lgfi %w1,imm (load sign extend imm) */ EMIT6_IMM(0xc0010000, REG_W1, imm); - /* cgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask); + /* crj or cgrj %dst,%w1,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), + dst_reg, REG_W1, i, off, mask); break; branch_ku: + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; /* lgfi %w1,imm (load sign extend imm) */ EMIT6_IMM(0xc0010000, REG_W1, imm); - /* clgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask); + /* clrj or clgrj %dst,%w1,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), + dst_reg, REG_W1, i, off, mask); break; branch_xs: - /* cgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask); + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; + /* crj or cgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), + dst_reg, src_reg, i, off, mask); break; branch_xu: - /* clgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask); + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; + /* clrj or clgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), + dst_reg, src_reg, i, off, mask); break; branch_oc: /* brc mask,jmp_off (branch instruction needs 4 bytes) */ jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4); EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off); break; + } default: /* too complex, give up */ pr_err("Unknown opcode %02x\n", insn->code); return -1; diff --git a/arch/s390/net/pnet.c b/arch/s390/net/pnet.c index e22f1b10a6c7..79211bec0fc8 100644 --- a/arch/s390/net/pnet.c +++ b/arch/s390/net/pnet.c @@ -12,6 +12,15 @@ #include <asm/ccwgroup.h> #include <asm/ccwdev.h> #include <asm/pnet.h> +#include <asm/ebcdic.h> + +#define PNETIDS_LEN 64 /* Total utility string length in bytes + * to cover up to 4 PNETIDs of 16 bytes + * for up to 4 device ports + */ +#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */ +#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN) + /* Max. # of ports with a PNETID */ /* * Get the PNETIDs from a device. @@ -40,6 +49,7 @@ static int pnet_ids_by_device(struct device *dev, u8 *pnetids) if (!util_str) return -ENOMEM; memcpy(pnetids, util_str, PNETIDS_LEN); + EBCASC(pnetids, PNETIDS_LEN); kfree(util_str); return 0; } @@ -47,6 +57,7 @@ static int pnet_ids_by_device(struct device *dev, u8 *pnetids) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); memcpy(pnetids, zdev->util_str, sizeof(zdev->util_str)); + EBCASC(pnetids, sizeof(zdev->util_str)); return 0; } return -EOPNOTSUPP; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a966d7bfac57..dc9bc82c072c 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -285,7 +285,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, struct zpci_dev *zdev = to_zpci(pdev); int idx; - if (!pci_resource_len(pdev, bar)) + if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT) return NULL; idx = zdev->bars[bar].map_idx; @@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq) if (ai == -1UL) break; inc_irq_stat(IRQIO_MSI); + airq_iv_lock(aibv, ai); generic_handle_irq(airq_iv_get_data(aibv, ai)); + airq_iv_unlock(aibv, ai); } } } @@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) zdev->aisb = aisb; /* Create adapter interrupt vector */ - zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA); + zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); if (!zdev->aibv) return -ENOMEM; @@ -482,6 +484,15 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) } } +#ifdef CONFIG_PCI_IOV +static struct resource iov_res = { + .name = "PCI IOV res", + .start = 0, + .end = -1, + .flags = IORESOURCE_MEM, +}; +#endif + static void zpci_map_resources(struct pci_dev *pdev) { resource_size_t len; @@ -495,6 +506,17 @@ static void zpci_map_resources(struct pci_dev *pdev) (resource_size_t __force) pci_iomap(pdev, i, 0); pdev->resource[i].end = pdev->resource[i].start + len - 1; } + +#ifdef CONFIG_PCI_IOV + i = PCI_IOV_RESOURCES; + + for (; i < PCI_SRIOV_NUM_BARS + PCI_IOV_RESOURCES; i++) { + len = pci_resource_len(pdev, i); + if (!len) + continue; + pdev->resource[i].parent = &iov_res; + } +#endif } static void zpci_unmap_resources(struct pci_dev *pdev) diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index 04388a254ffb..6b48ca7760a7 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c @@ -172,21 +172,14 @@ static const struct file_operations debugfs_pci_perf_fops = { void zpci_debug_init_device(struct zpci_dev *zdev, const char *name) { zdev->debugfs_dev = debugfs_create_dir(name, debugfs_root); - if (IS_ERR(zdev->debugfs_dev)) - zdev->debugfs_dev = NULL; - - zdev->debugfs_perf = debugfs_create_file("statistics", - S_IFREG | S_IRUGO | S_IWUSR, - zdev->debugfs_dev, zdev, - &debugfs_pci_perf_fops); - if (IS_ERR(zdev->debugfs_perf)) - zdev->debugfs_perf = NULL; + + debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR, + zdev->debugfs_dev, zdev, &debugfs_pci_perf_fops); } void zpci_debug_exit_device(struct zpci_dev *zdev) { - debugfs_remove(zdev->debugfs_perf); - debugfs_remove(zdev->debugfs_dev); + debugfs_remove_recursive(zdev->debugfs_dev); } int __init zpci_debug_init(void) diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 22b4106b8084..5495efa07335 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -630,7 +630,6 @@ static struct regulator_init_data cn12_power_init_data = { static struct fixed_voltage_config cn12_power_info = { .supply_name = "CN12 SD/MMC Vdd", .microvolts = 3300000, - .enable_high = 1, .init_data = &cn12_power_init_data, }; @@ -671,7 +670,6 @@ static struct regulator_init_data sdhi0_power_init_data = { static struct fixed_voltage_config sdhi0_power_info = { .supply_name = "CN11 SD/MMC Vdd", .microvolts = 3300000, - .enable_high = 1, .init_data = &sdhi0_power_init_data, }; diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile index 01d0f7fb14cc..2563d1e532e2 100644 --- a/arch/sh/boot/dts/Makefile +++ b/arch/sh/boot/dts/Makefile @@ -1,3 +1,3 @@ ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") -obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o +obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o endif diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 101c13c0c6ad..33d1d28057cb 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h @@ -26,7 +26,6 @@ typedef struct { #define segment_eq(a, b) ((a).seg == (b).seg) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index 56499ea39fd3..4884315daff4 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -53,7 +53,7 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key, * weak key detection code. */ ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -209,7 +209,7 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key, if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 5153798051fb..d6d8413eca83 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -25,7 +25,6 @@ #define KERNEL_DS ((mm_segment_t) { 0 }) #define USER_DS ((mm_segment_t) { -1 }) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 87ae9ffb1521..bf9d330073b2 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -31,7 +31,6 @@ #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) -#define get_ds() (KERNEL_DS) #define segment_eq(a, b) ((a).seg == (b).seg) diff --git a/arch/sparc/include/uapi/asm/posix_types.h b/arch/sparc/include/uapi/asm/posix_types.h index fec499d6efb7..f139e0048628 100644 --- a/arch/sparc/include/uapi/asm/posix_types.h +++ b/arch/sparc/include/uapi/asm/posix_types.h @@ -19,6 +19,16 @@ typedef unsigned short __kernel_old_gid_t; typedef int __kernel_suseconds_t; #define __kernel_suseconds_t __kernel_suseconds_t +typedef long __kernel_long_t; +typedef unsigned long __kernel_ulong_t; +#define __kernel_long_t __kernel_long_t + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_suseconds_t tv_usec; +}; +#define __kernel_old_timeval __kernel_old_timeval + #else /* sparc 32 bit */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 7ea35e5601b6..88fe4f978aca 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -3,6 +3,7 @@ #define _ASM_SOCKET_H #include <asm/sockios.h> +#include <asm/bitsperlong.h> /* For setsockopt(2) */ #define SOL_SOCKET 0xffff @@ -20,8 +21,8 @@ #define SO_BSDCOMPAT 0x0400 #define SO_RCVLOWAT 0x0800 #define SO_SNDLOWAT 0x1000 -#define SO_RCVTIMEO 0x2000 -#define SO_SNDTIMEO 0x4000 +#define SO_RCVTIMEO_OLD 0x2000 +#define SO_SNDTIMEO_OLD 0x4000 #define SO_ACCEPTCONN 0x8000 #define SO_SNDBUF 0x1001 @@ -33,7 +34,6 @@ #define SO_PROTOCOL 0x1028 #define SO_DOMAIN 0x1029 - /* Linux specific, keep the same. */ #define SO_NO_CHECK 0x000b #define SO_PRIORITY 0x000c @@ -45,19 +45,12 @@ #define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 0x001c -#define SO_TIMESTAMP 0x001d -#define SCM_TIMESTAMP SO_TIMESTAMP #define SO_PEERSEC 0x001e #define SO_PASSSEC 0x001f -#define SO_TIMESTAMPNS 0x0021 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #define SO_MARK 0x0022 -#define SO_TIMESTAMPING 0x0023 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 0x0024 #define SO_WIFI_STATUS 0x0025 @@ -104,9 +97,47 @@ #define SO_TXTIME 0x003f #define SCM_TXTIME SO_TXTIME +#define SO_BINDTOIFINDEX 0x0041 + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 #define SO_SECURITY_ENCRYPTION_NETWORK 0x5004 +#define SO_TIMESTAMP_OLD 0x001d +#define SO_TIMESTAMPNS_OLD 0x0021 +#define SO_TIMESTAMPING_OLD 0x0023 + +#define SO_TIMESTAMP_NEW 0x0046 +#define SO_TIMESTAMPNS_NEW 0x0042 +#define SO_TIMESTAMPING_NEW 0x0043 + +#define SO_RCVTIMEO_NEW 0x0044 +#define SO_SNDTIMEO_NEW 0x0045 + +#if !defined(__KERNEL__) + + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _ASM_SOCKET_H */ diff --git a/arch/um/include/asm/a.out-core.h b/arch/um/include/asm/a.out-core.h deleted file mode 100644 index 995643b18309..000000000000 --- a/arch/um/include/asm/a.out-core.h +++ /dev/null @@ -1,27 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef __UM_A_OUT_CORE_H -#define __UM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include <linux/user.h> - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u) -{ -} - -#endif /* __KERNEL__ */ -#endif /* __UM_A_OUT_CORE_H */ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 13c988009b2d..90b562a34d65 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -14,7 +14,6 @@ config X86_32 select ARCH_WANT_IPC_PARSE_VERSION select CLKSRC_I8253 select CLONE_BACKWARDS - select HAVE_AOUT select HAVE_GENERIC_DMA_COHERENT select MODULES_USE_ELF_REL select OLD_SIGACTION @@ -1511,6 +1510,7 @@ config AMD_MEM_ENCRYPT bool "AMD Secure Memory Encryption (SME) support" depends on X86_64 && CPU_SUP_AMD select DYNAMIC_PHYSICAL_MASK + select ARCH_USE_MEMREMAP_PROT ---help--- Say yes to enable support for the encryption of system memory. This requires an AMD processor that supports Secure Memory @@ -1530,10 +1530,6 @@ config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT If set to N, then the encryption of system memory can be activated with the mem_encrypt=on command line option. -config ARCH_USE_MEMREMAP_PROT - def_bool y - depends on AMD_MEM_ENCRYPT - # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" @@ -2844,6 +2840,7 @@ config IA32_EMULATION config IA32_AOUT tristate "IA32 a.out support" depends on IA32_EMULATION + depends on BROKEN ---help--- Support old a.out binaries in the 32bit emulation. diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 0723dff17e6c..15d0fbe27872 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -40,16 +40,6 @@ config EARLY_PRINTK_DBGP with klogd/syslogd or the X server. You should normally say N here, unless you want to debug such a crash. You need usb debug device. -config EARLY_PRINTK_EFI - bool "Early printk via the EFI framebuffer" - depends on EFI && EARLY_PRINTK - select FONT_SUPPORT - ---help--- - Write kernel log output directly into the EFI framebuffer. - - This is useful for kernel debugging when your machine crashes very - early before the console code is initialized. - config EARLY_PRINTK_USB_XDBC bool "Early printk via the xHCI debug port" depends on EARLY_PRINTK && PCI diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index f105ae8651c9..f62e347862cc 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -602,10 +602,12 @@ ENTRY(trampoline_32bit_src) 3: /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ pushl %ecx + pushl %edx movl $MSR_EFER, %ecx rdmsr btsl $_EFER_LME, %eax wrmsr + popl %edx popl %ecx /* Enable PAE and LA57 (if required) paging modes */ diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index 2a356b948720..3ea71b871813 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad( } static void crypto_aegis128_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS128_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) { + ops->crypt_blocks(state, + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_aesni_process_crypt(&state, req, ops); + crypto_aegis128_aesni_process_crypt(&state, &walk, ops); crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c index dbe8bb980da1..1b1b39c66c5e 100644 --- a/arch/x86/crypto/aegis128l-aesni-glue.c +++ b/arch/x86/crypto/aegis128l-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad( } static void crypto_aegis128l_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS128L_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) { + ops->crypt_blocks(state, round_down(walk->nbytes, + AEGIS128L_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128l_aesni_process_crypt(&state, req, ops); + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops); crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c index 8bebda2de92f..6227ca3220a0 100644 --- a/arch/x86/crypto/aegis256-aesni-glue.c +++ b/arch/x86/crypto/aegis256-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad( } static void crypto_aegis256_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS256_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) { + ops->crypt_blocks(state, + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis256_aesni_init(&state, ctx->key, req->iv); crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis256_aesni_process_crypt(&state, req, ops); + crypto_aegis256_aesni_process_crypt(&state, &walk, ops); crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 1321700d6647..1e3d2102033a 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -175,26 +175,18 @@ asmlinkage void aesni_gcm_finalize(void *ctx, struct gcm_context_data *gdata, u8 *auth_tag, unsigned long auth_tag_len); -static struct aesni_gcm_tfm_s { -void (*init)(void *ctx, - struct gcm_context_data *gdata, - u8 *iv, - u8 *hash_subkey, const u8 *aad, - unsigned long aad_len); -void (*enc_update)(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, - unsigned long plaintext_len); -void (*dec_update)(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, - unsigned long ciphertext_len); -void (*finalize)(void *ctx, - struct gcm_context_data *gdata, - u8 *auth_tag, unsigned long auth_tag_len); +static const struct aesni_gcm_tfm_s { + void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv, + u8 *hash_subkey, const u8 *aad, unsigned long aad_len); + void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, + const u8 *in, unsigned long plaintext_len); + void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, + const u8 *in, unsigned long ciphertext_len); + void (*finalize)(void *ctx, struct gcm_context_data *gdata, + u8 *auth_tag, unsigned long auth_tag_len); } *aesni_gcm_tfm; -struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { .init = &aesni_gcm_init, .enc_update = &aesni_gcm_enc_update, .dec_update = &aesni_gcm_dec_update, @@ -243,7 +235,7 @@ asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { .init = &aesni_gcm_init_avx_gen2, .enc_update = &aesni_gcm_enc_update_avx_gen2, .dec_update = &aesni_gcm_dec_update_avx_gen2, @@ -288,7 +280,7 @@ asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { .init = &aesni_gcm_init_avx_gen4, .enc_update = &aesni_gcm_enc_update_avx_gen4, .dec_update = &aesni_gcm_dec_update_avx_gen4, @@ -778,7 +770,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned long auth_tag_len = crypto_aead_authsize(tfm); - struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; + const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; struct gcm_context_data data AESNI_ALIGN_ATTR; struct scatter_walk dst_sg_walk = {}; unsigned long left = req->cryptlen; @@ -821,11 +813,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); } - src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); - scatterwalk_start(&src_sg_walk, src_sg); - if (req->src != req->dst) { - dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen); - scatterwalk_start(&dst_sg_walk, dst_sg); + if (left) { + src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); + scatterwalk_start(&src_sg_walk, src_sg); + if (req->src != req->dst) { + dst_sg = scatterwalk_ffwd(dst_start, req->dst, + req->assoclen); + scatterwalk_start(&dst_sg_walk, dst_sg); + } } kernel_fpu_begin(); diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S index de04d3e98d8d..3d873e67749d 100644 --- a/arch/x86/crypto/crct10dif-pcl-asm_64.S +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -43,609 +43,291 @@ # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -######################################################################## -# Function API: -# UINT16 crc_t10dif_pcl( -# UINT16 init_crc, //initial CRC value, 16 bits -# const unsigned char *buf, //buffer pointer to calculate CRC on -# UINT64 len //buffer length in bytes (64-bit data) -# ); # # Reference paper titled "Fast CRC Computation for Generic # Polynomials Using PCLMULQDQ Instruction" # URL: http://www.intel.com/content/dam/www/public/us/en/documents # /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf # -# #include <linux/linkage.h> .text -#define arg1 %rdi -#define arg2 %rsi -#define arg3 %rdx - -#define arg1_low32 %edi +#define init_crc %edi +#define buf %rsi +#define len %rdx + +#define FOLD_CONSTS %xmm10 +#define BSWAP_MASK %xmm11 + +# Fold reg1, reg2 into the next 32 data bytes, storing the result back into +# reg1, reg2. +.macro fold_32_bytes offset, reg1, reg2 + movdqu \offset(buf), %xmm9 + movdqu \offset+16(buf), %xmm12 + pshufb BSWAP_MASK, %xmm9 + pshufb BSWAP_MASK, %xmm12 + movdqa \reg1, %xmm8 + movdqa \reg2, %xmm13 + pclmulqdq $0x00, FOLD_CONSTS, \reg1 + pclmulqdq $0x11, FOLD_CONSTS, %xmm8 + pclmulqdq $0x00, FOLD_CONSTS, \reg2 + pclmulqdq $0x11, FOLD_CONSTS, %xmm13 + pxor %xmm9 , \reg1 + xorps %xmm8 , \reg1 + pxor %xmm12, \reg2 + xorps %xmm13, \reg2 +.endm + +# Fold src_reg into dst_reg. +.macro fold_16_bytes src_reg, dst_reg + movdqa \src_reg, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, \src_reg + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 + pxor %xmm8, \dst_reg + xorps \src_reg, \dst_reg +.endm -ENTRY(crc_t10dif_pcl) +# +# u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len); +# +# Assumes len >= 16. +# .align 16 +ENTRY(crc_t10dif_pcl) - # adjust the 16-bit initial_crc value, scale it to 32 bits - shl $16, arg1_low32 - - # Allocate Stack Space - mov %rsp, %rcx - sub $16*2, %rsp - # align stack to 16 byte boundary - and $~(0x10 - 1), %rsp - - # check if smaller than 256 - cmp $256, arg3 - - # for sizes less than 128, we can't fold 64B at a time... - jl _less_than_128 - - - # load the initial crc value - movd arg1_low32, %xmm10 # initial crc - - # crc value does not need to be byte-reflected, but it needs - # to be moved to the high part of the register. - # because data will be byte-reflected and will align with - # initial crc at correct place. - pslldq $12, %xmm10 - - movdqa SHUF_MASK(%rip), %xmm11 - # receive the initial 64B data, xor the initial crc value - movdqu 16*0(arg2), %xmm0 - movdqu 16*1(arg2), %xmm1 - movdqu 16*2(arg2), %xmm2 - movdqu 16*3(arg2), %xmm3 - movdqu 16*4(arg2), %xmm4 - movdqu 16*5(arg2), %xmm5 - movdqu 16*6(arg2), %xmm6 - movdqu 16*7(arg2), %xmm7 - - pshufb %xmm11, %xmm0 - # XOR the initial_crc value - pxor %xmm10, %xmm0 - pshufb %xmm11, %xmm1 - pshufb %xmm11, %xmm2 - pshufb %xmm11, %xmm3 - pshufb %xmm11, %xmm4 - pshufb %xmm11, %xmm5 - pshufb %xmm11, %xmm6 - pshufb %xmm11, %xmm7 - - movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4 - #imm value of pclmulqdq instruction - #will determine which constant to use - - ################################################################# - # we subtract 256 instead of 128 to save one instruction from the loop - sub $256, arg3 - - # at this section of the code, there is 64*x+y (0<=y<64) bytes of - # buffer. The _fold_64_B_loop will fold 64B at a time - # until we have 64+y Bytes of buffer - - - # fold 64B at a time. This section of the code folds 4 xmm - # registers in parallel -_fold_64_B_loop: - - # update the buffer pointer - add $128, arg2 # buf += 64# - - movdqu 16*0(arg2), %xmm9 - movdqu 16*1(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm0, %xmm8 - movdqa %xmm1, %xmm13 - pclmulqdq $0x0 , %xmm10, %xmm0 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0 , %xmm10, %xmm1 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm0 - xorps %xmm8 , %xmm0 - pxor %xmm12, %xmm1 - xorps %xmm13, %xmm1 - - movdqu 16*2(arg2), %xmm9 - movdqu 16*3(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm2, %xmm8 - movdqa %xmm3, %xmm13 - pclmulqdq $0x0, %xmm10, %xmm2 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0, %xmm10, %xmm3 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm2 - xorps %xmm8 , %xmm2 - pxor %xmm12, %xmm3 - xorps %xmm13, %xmm3 - - movdqu 16*4(arg2), %xmm9 - movdqu 16*5(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm4, %xmm8 - movdqa %xmm5, %xmm13 - pclmulqdq $0x0, %xmm10, %xmm4 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0, %xmm10, %xmm5 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm4 - xorps %xmm8 , %xmm4 - pxor %xmm12, %xmm5 - xorps %xmm13, %xmm5 - - movdqu 16*6(arg2), %xmm9 - movdqu 16*7(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm6 , %xmm8 - movdqa %xmm7 , %xmm13 - pclmulqdq $0x0 , %xmm10, %xmm6 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0 , %xmm10, %xmm7 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm6 - xorps %xmm8 , %xmm6 - pxor %xmm12, %xmm7 - xorps %xmm13, %xmm7 - - sub $128, arg3 - - # check if there is another 64B in the buffer to be able to fold - jge _fold_64_B_loop - ################################################################## - - - add $128, arg2 - # at this point, the buffer pointer is pointing at the last y Bytes - # of the buffer the 64B of folded data is in 4 of the xmm - # registers: xmm0, xmm1, xmm2, xmm3 - - - # fold the 8 xmm registers to 1 xmm register with different constants - - movdqa rk9(%rip), %xmm10 - movdqa %xmm0, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm0 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm0, %xmm7 - - movdqa rk11(%rip), %xmm10 - movdqa %xmm1, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm1 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm1, %xmm7 - - movdqa rk13(%rip), %xmm10 - movdqa %xmm2, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm2 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm2, %xmm7 - - movdqa rk15(%rip), %xmm10 - movdqa %xmm3, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm3 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm3, %xmm7 - - movdqa rk17(%rip), %xmm10 - movdqa %xmm4, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm4 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm4, %xmm7 - - movdqa rk19(%rip), %xmm10 - movdqa %xmm5, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm5 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm5, %xmm7 - - movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2 - #imm value of pclmulqdq instruction - #will determine which constant to use - movdqa %xmm6, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm6 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm6, %xmm7 - - - # instead of 64, we add 48 to the loop counter to save 1 instruction - # from the loop instead of a cmp instruction, we use the negative - # flag with the jl instruction - add $128-16, arg3 - jl _final_reduction_for_128 - - # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 - # and the rest is in memory. We can fold 16 bytes at a time if y>=16 - # continue folding 16B at a time - -_16B_reduction_loop: + movdqa .Lbswap_mask(%rip), BSWAP_MASK + + # For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp $256, len + jl .Lless_than_256_bytes + + # Load the first 128 data bytes. Byte swapping is necessary to make the + # bit order match the polynomial coefficient order. + movdqu 16*0(buf), %xmm0 + movdqu 16*1(buf), %xmm1 + movdqu 16*2(buf), %xmm2 + movdqu 16*3(buf), %xmm3 + movdqu 16*4(buf), %xmm4 + movdqu 16*5(buf), %xmm5 + movdqu 16*6(buf), %xmm6 + movdqu 16*7(buf), %xmm7 + add $128, buf + pshufb BSWAP_MASK, %xmm0 + pshufb BSWAP_MASK, %xmm1 + pshufb BSWAP_MASK, %xmm2 + pshufb BSWAP_MASK, %xmm3 + pshufb BSWAP_MASK, %xmm4 + pshufb BSWAP_MASK, %xmm5 + pshufb BSWAP_MASK, %xmm6 + pshufb BSWAP_MASK, %xmm7 + + # XOR the first 16 data *bits* with the initial CRC value. + pxor %xmm8, %xmm8 + pinsrw $7, init_crc, %xmm8 + pxor %xmm8, %xmm0 + + movdqa .Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS + + # Subtract 128 for the 128 data bytes just consumed. Subtract another + # 128 to simplify the termination condition of the following loop. + sub $256, len + + # While >= 128 data bytes remain (not counting xmm0-7), fold the 128 + # bytes xmm0-7 into them, storing the result back into xmm0-7. +.Lfold_128_bytes_loop: + fold_32_bytes 0, %xmm0, %xmm1 + fold_32_bytes 32, %xmm2, %xmm3 + fold_32_bytes 64, %xmm4, %xmm5 + fold_32_bytes 96, %xmm6, %xmm7 + add $128, buf + sub $128, len + jge .Lfold_128_bytes_loop + + # Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7. + + # Fold across 64 bytes. + movdqa .Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm0, %xmm4 + fold_16_bytes %xmm1, %xmm5 + fold_16_bytes %xmm2, %xmm6 + fold_16_bytes %xmm3, %xmm7 + # Fold across 32 bytes. + movdqa .Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm4, %xmm6 + fold_16_bytes %xmm5, %xmm7 + # Fold across 16 bytes. + movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm6, %xmm7 + + # Add 128 to get the correct number of data bytes remaining in 0...127 + # (not counting xmm7), following the previous extra subtraction by 128. + # Then subtract 16 to simplify the termination condition of the + # following loop. + add $128-16, len + + # While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes + # xmm7 into them, storing the result back into xmm7. + jl .Lfold_16_bytes_loop_done +.Lfold_16_bytes_loop: movdqa %xmm7, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm7 - pclmulqdq $0x0 , %xmm10, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, %xmm7 - movdqu (arg2), %xmm0 - pshufb %xmm11, %xmm0 + movdqu (buf), %xmm0 + pshufb BSWAP_MASK, %xmm0 pxor %xmm0 , %xmm7 - add $16, arg2 - sub $16, arg3 - # instead of a cmp instruction, we utilize the flags with the - # jge instruction equivalent of: cmp arg3, 16-16 - # check if there is any more 16B in the buffer to be able to fold - jge _16B_reduction_loop - - #now we have 16+z bytes left to reduce, where 0<= z < 16. - #first, we reduce the data in the xmm7 register - - -_final_reduction_for_128: - # check if any more data to fold. If not, compute the CRC of - # the final 128 bits - add $16, arg3 - je _128_done - - # here we are getting data that is less than 16 bytes. - # since we know that there was data before the pointer, we can - # offset the input pointer before the actual point, to receive - # exactly 16 bytes. after that the registers need to be adjusted. -_get_last_two_xmms: + add $16, buf + sub $16, len + jge .Lfold_16_bytes_loop + +.Lfold_16_bytes_loop_done: + # Add 16 to get the correct number of data bytes remaining in 0...15 + # (not counting xmm7), following the previous extra subtraction by 16. + add $16, len + je .Lreduce_final_16_bytes + +.Lhandle_partial_segment: + # Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16 + # bytes are in xmm7 and the rest are the remaining data in 'buf'. To do + # this without needing a fold constant for each possible 'len', redivide + # the bytes into a first chunk of 'len' bytes and a second chunk of 16 + # bytes, then fold the first chunk into the second. + movdqa %xmm7, %xmm2 - movdqu -16(arg2, arg3), %xmm1 - pshufb %xmm11, %xmm1 + # xmm1 = last 16 original data bytes + movdqu -16(buf, len), %xmm1 + pshufb BSWAP_MASK, %xmm1 - # get rid of the extra data that was loaded before - # load the shift constant - lea pshufb_shf_table+16(%rip), %rax - sub arg3, %rax + # xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes. + lea .Lbyteshift_table+16(%rip), %rax + sub len, %rax movdqu (%rax), %xmm0 - - # shift xmm2 to the left by arg3 bytes pshufb %xmm0, %xmm2 - # shift xmm7 to the right by 16-arg3 bytes - pxor mask1(%rip), %xmm0 + # xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes. + pxor .Lmask1(%rip), %xmm0 pshufb %xmm0, %xmm7 + + # xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes), + # then '16-len' bytes from xmm2 (high-order bytes). pblendvb %xmm2, %xmm1 #xmm0 is implicit - # fold 16 Bytes - movdqa %xmm1, %xmm2 + # Fold the first chunk into the second chunk, storing the result in xmm7. movdqa %xmm7, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm7 - pclmulqdq $0x0 , %xmm10, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, %xmm7 - pxor %xmm2, %xmm7 + pxor %xmm1, %xmm7 -_128_done: - # compute crc of a 128-bit value - movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10 - movdqa %xmm7, %xmm0 +.Lreduce_final_16_bytes: + # Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC - #64b fold - pclmulqdq $0x1, %xmm10, %xmm7 - pslldq $8 , %xmm0 - pxor %xmm0, %xmm7 + # Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + movdqa .Lfinal_fold_consts(%rip), FOLD_CONSTS - #32b fold + # Fold the high 64 bits into the low 64 bits, while also multiplying by + # x^64. This produces a 128-bit value congruent to x^64 * M(x) and + # whose low 48 bits are 0. movdqa %xmm7, %xmm0 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x)) + pslldq $8, %xmm0 + pxor %xmm0, %xmm7 # + low bits * x^64 - pand mask2(%rip), %xmm0 - - psrldq $12, %xmm7 - pclmulqdq $0x10, %xmm10, %xmm7 - pxor %xmm0, %xmm7 - - #barrett reduction -_barrett: - movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10 + # Fold the high 32 bits into the low 96 bits. This produces a 96-bit + # value congruent to x^64 * M(x) and whose low 48 bits are 0. movdqa %xmm7, %xmm0 - pclmulqdq $0x01, %xmm10, %xmm7 - pslldq $4, %xmm7 - pclmulqdq $0x11, %xmm10, %xmm7 + pand .Lmask2(%rip), %xmm0 # zero high 32 bits + psrldq $12, %xmm7 # extract high 32 bits + pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x)) + pxor %xmm0, %xmm7 # + low bits - pslldq $4, %xmm7 - pxor %xmm0, %xmm7 - pextrd $1, %xmm7, %eax + # Load G(x) and floor(x^48 / G(x)). + movdqa .Lbarrett_reduction_consts(%rip), FOLD_CONSTS -_cleanup: - # scale the result back to 16 bits - shr $16, %eax - mov %rcx, %rsp + # Use Barrett reduction to compute the final CRC value. + movdqa %xmm7, %xmm0 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x)) + psrlq $32, %xmm7 # /= x^32 + pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # *= G(x) + psrlq $48, %xmm0 + pxor %xmm7, %xmm0 # + low 16 nonzero bits + # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0. + + pextrw $0, %xmm0, %eax ret -######################################################################## - .align 16 -_less_than_128: - - # check if there is enough buffer to be able to fold 16B at a time - cmp $32, arg3 - jl _less_than_32 - movdqa SHUF_MASK(%rip), %xmm11 +.Lless_than_256_bytes: + # Checksumming a buffer of length 16...255 bytes - # now if there is, load the constants - movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 + # Load the first 16 data bytes. + movdqu (buf), %xmm7 + pshufb BSWAP_MASK, %xmm7 + add $16, buf - movd arg1_low32, %xmm0 # get the initial crc value - pslldq $12, %xmm0 # align it to its correct place - movdqu (arg2), %xmm7 # load the plaintext - pshufb %xmm11, %xmm7 # byte-reflect the plaintext + # XOR the first 16 data *bits* with the initial CRC value. + pxor %xmm0, %xmm0 + pinsrw $7, init_crc, %xmm0 pxor %xmm0, %xmm7 - - # update the buffer pointer - add $16, arg2 - - # update the counter. subtract 32 instead of 16 to save one - # instruction from the loop - sub $32, arg3 - - jmp _16B_reduction_loop - - -.align 16 -_less_than_32: - # mov initial crc to the return value. this is necessary for - # zero-length buffers. - mov arg1_low32, %eax - test arg3, arg3 - je _cleanup - - movdqa SHUF_MASK(%rip), %xmm11 - - movd arg1_low32, %xmm0 # get the initial crc value - pslldq $12, %xmm0 # align it to its correct place - - cmp $16, arg3 - je _exact_16_left - jl _less_than_16_left - - movdqu (arg2), %xmm7 # load the plaintext - pshufb %xmm11, %xmm7 # byte-reflect the plaintext - pxor %xmm0 , %xmm7 # xor the initial crc value - add $16, arg2 - sub $16, arg3 - movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 - jmp _get_last_two_xmms - - -.align 16 -_less_than_16_left: - # use stack space to load data less than 16 bytes, zero-out - # the 16B in memory first. - - pxor %xmm1, %xmm1 - mov %rsp, %r11 - movdqa %xmm1, (%r11) - - cmp $4, arg3 - jl _only_less_than_4 - - # backup the counter value - mov arg3, %r9 - cmp $8, arg3 - jl _less_than_8_left - - # load 8 Bytes - mov (arg2), %rax - mov %rax, (%r11) - add $8, %r11 - sub $8, arg3 - add $8, arg2 -_less_than_8_left: - - cmp $4, arg3 - jl _less_than_4_left - - # load 4 Bytes - mov (arg2), %eax - mov %eax, (%r11) - add $4, %r11 - sub $4, arg3 - add $4, arg2 -_less_than_4_left: - - cmp $2, arg3 - jl _less_than_2_left - - # load 2 Bytes - mov (arg2), %ax - mov %ax, (%r11) - add $2, %r11 - sub $2, arg3 - add $2, arg2 -_less_than_2_left: - cmp $1, arg3 - jl _zero_left - - # load 1 Byte - mov (arg2), %al - mov %al, (%r11) -_zero_left: - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - # shl r9, 4 - lea pshufb_shf_table+16(%rip), %rax - sub %r9, %rax - movdqu (%rax), %xmm0 - pxor mask1(%rip), %xmm0 - - pshufb %xmm0, %xmm7 - jmp _128_done - -.align 16 -_exact_16_left: - movdqu (arg2), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - jmp _128_done - -_only_less_than_4: - cmp $3, arg3 - jl _only_less_than_3 - - # load 3 Bytes - mov (arg2), %al - mov %al, (%r11) - - mov 1(arg2), %al - mov %al, 1(%r11) - - mov 2(arg2), %al - mov %al, 2(%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $5, %xmm7 - - jmp _barrett -_only_less_than_3: - cmp $2, arg3 - jl _only_less_than_2 - - # load 2 Bytes - mov (arg2), %al - mov %al, (%r11) - - mov 1(arg2), %al - mov %al, 1(%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $6, %xmm7 - - jmp _barrett -_only_less_than_2: - - # load 1 Byte - mov (arg2), %al - mov %al, (%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $7, %xmm7 - - jmp _barrett - + movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS + cmp $16, len + je .Lreduce_final_16_bytes # len == 16 + sub $32, len + jge .Lfold_16_bytes_loop # 32 <= len <= 255 + add $16, len + jmp .Lhandle_partial_segment # 17 <= len <= 31 ENDPROC(crc_t10dif_pcl) .section .rodata, "a", @progbits .align 16 -# precomputed constants -# these constants are precomputed from the poly: -# 0x8bb70000 (0x8bb7 scaled to 32 bits) -# Q = 0x18BB70000 -# rk1 = 2^(32*3) mod Q << 32 -# rk2 = 2^(32*5) mod Q << 32 -# rk3 = 2^(32*15) mod Q << 32 -# rk4 = 2^(32*17) mod Q << 32 -# rk5 = 2^(32*3) mod Q << 32 -# rk6 = 2^(32*2) mod Q << 32 -# rk7 = floor(2^64/Q) -# rk8 = Q -rk1: -.quad 0x2d56000000000000 -rk2: -.quad 0x06df000000000000 -rk3: -.quad 0x9d9d000000000000 -rk4: -.quad 0x7cf5000000000000 -rk5: -.quad 0x2d56000000000000 -rk6: -.quad 0x1368000000000000 -rk7: -.quad 0x00000001f65a57f8 -rk8: -.quad 0x000000018bb70000 - -rk9: -.quad 0xceae000000000000 -rk10: -.quad 0xbfd6000000000000 -rk11: -.quad 0x1e16000000000000 -rk12: -.quad 0x713c000000000000 -rk13: -.quad 0xf7f9000000000000 -rk14: -.quad 0x80a6000000000000 -rk15: -.quad 0x044c000000000000 -rk16: -.quad 0xe658000000000000 -rk17: -.quad 0xad18000000000000 -rk18: -.quad 0xa497000000000000 -rk19: -.quad 0x6ee3000000000000 -rk20: -.quad 0xe7b5000000000000 - +# Fold constants precomputed from the polynomial 0x18bb7 +# G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 # x^(8*128) mod G(x) + .quad 0x0000000000002295 # x^(8*128+64) mod G(x) +.Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 # x^(4*128) mod G(x) + .quad 0x000000000000dd31 # x^(4*128+64) mod G(x) +.Lfold_across_32_bytes_consts: + .quad 0x000000000000857d # x^(2*128) mod G(x) + .quad 0x0000000000007acc # x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 # x^(1*128) mod G(x) + .quad 0x0000000000001faa # x^(1*128+64) mod G(x) +.Lfinal_fold_consts: + .quad 0x1368000000000000 # x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 # x^48 * (x^80 mod G(x)) +.Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 # G(x) + .quad 0x00000001f65a57f8 # floor(x^48 / G(x)) .section .rodata.cst16.mask1, "aM", @progbits, 16 .align 16 -mask1: -.octa 0x80808080808080808080808080808080 +.Lmask1: + .octa 0x80808080808080808080808080808080 .section .rodata.cst16.mask2, "aM", @progbits, 16 .align 16 -mask2: -.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF +.Lmask2: + .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF + +.section .rodata.cst16.bswap_mask, "aM", @progbits, 16 +.align 16 +.Lbswap_mask: + .octa 0x000102030405060708090A0B0C0D0E0F -.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16 +.section .rodata.cst32.byteshift_table, "aM", @progbits, 32 .align 16 -SHUF_MASK: -.octa 0x000102030405060708090A0B0C0D0E0F - -.section .rodata.cst32.pshufb_shf_table, "aM", @progbits, 32 -.align 32 -pshufb_shf_table: -# use these values for shift constants for the pshufb instruction -# different alignments result in values as shown: -# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 -.octa 0x8f8e8d8c8b8a89888786858483828100 -.octa 0x000e0d0c0b0a09080706050403020100 +# For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len] +# is the index vector to shift left by 'len' bytes, and is also {0x80, ..., +# 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: + .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 + .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f + .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 + .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c index cd4df9322501..0e785c0b2354 100644 --- a/arch/x86/crypto/crct10dif-pclmul_glue.c +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -33,18 +33,12 @@ #include <asm/cpufeatures.h> #include <asm/cpu_device_id.h> -asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, - size_t len); +asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len); struct chksum_desc_ctx { __u16 crc; }; -/* - * Steps through buffer one byte at at time, calculates reflected - * crc using table. - */ - static int chksum_init(struct shash_desc *desc) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); @@ -59,7 +53,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - if (irq_fpu_usable()) { + if (length >= 16 && irq_fpu_usable()) { kernel_fpu_begin(); ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); kernel_fpu_end(); @@ -79,7 +73,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out) static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, u8 *out) { - if (irq_fpu_usable()) { + if (len >= 16 && irq_fpu_usable()) { kernel_fpu_begin(); *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); kernel_fpu_end(); diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c index 0dccdda1eb3a..7e600f8bcdad 100644 --- a/arch/x86/crypto/morus1280_glue.c +++ b/arch/x86/crypto/morus1280_glue.c @@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad( static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state, struct morus1280_ops ops, - struct aead_request *req) + struct skcipher_walk *walk) { - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS1280_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); + while (walk->nbytes >= MORUS1280_BLOCK_SIZE) { + ops.crypt_blocks(state, walk->src.virt.addr, + walk->dst.virt.addr, + round_down(walk->nbytes, + MORUS1280_BLOCK_SIZE)); + skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr, + walk->nbytes); + skcipher_walk_done(walk, 0); } } @@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); struct morus1280_state state; + struct skcipher_walk walk; + + ops.skcipher_walk_init(&walk, req, true); kernel_fpu_begin(); ctx->ops->init(&state, &ctx->key, req->iv); crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus1280_glue_process_crypt(&state, ops, req); + crypto_morus1280_glue_process_crypt(&state, ops, &walk); ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c index 7b58fe4d9bd1..cb3a81732016 100644 --- a/arch/x86/crypto/morus640_glue.c +++ b/arch/x86/crypto/morus640_glue.c @@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad( static void crypto_morus640_glue_process_crypt(struct morus640_state *state, struct morus640_ops ops, - struct aead_request *req) + struct skcipher_walk *walk) { - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS640_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS640_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); + while (walk->nbytes >= MORUS640_BLOCK_SIZE) { + ops.crypt_blocks(state, walk->src.virt.addr, + walk->dst.virt.addr, + round_down(walk->nbytes, MORUS640_BLOCK_SIZE)); + skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr, + walk->nbytes); + skcipher_walk_done(walk, 0); } } @@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct morus640_ctx *ctx = crypto_aead_ctx(tfm); struct morus640_state state; + struct skcipher_walk walk; + + ops.skcipher_walk_init(&walk, req, true); kernel_fpu_begin(); ctx->ops->init(&state, &ctx->key, req->iv); crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus640_glue_process_crypt(&state, ops, req); + crypto_morus640_glue_process_crypt(&state, ops, &walk); ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S index c88c670cb5fc..e6add74d78a5 100644 --- a/arch/x86/crypto/poly1305-sse2-x86_64.S +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -272,6 +272,10 @@ ENTRY(poly1305_block_sse2) dec %rcx jnz .Ldoblock + # Zeroing of key material + mov %rcx,0x00(%rsp) + mov %rcx,0x08(%rsp) + add $0x10,%rsp pop %r12 pop %rbx diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index d50bb4dc0650..62f317c9113a 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -253,15 +253,6 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config) return -EOPNOTSUPP; } -static const struct perf_event_attr ibs_notsupp = { - .exclude_user = 1, - .exclude_kernel = 1, - .exclude_hv = 1, - .exclude_idle = 1, - .exclude_host = 1, - .exclude_guest = 1, -}; - static int perf_ibs_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -282,9 +273,6 @@ static int perf_ibs_init(struct perf_event *event) if (event->pmu != &perf_ibs->pmu) return -ENOENT; - if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp)) - return -EINVAL; - if (config & ~perf_ibs->config_mask) return -EINVAL; @@ -537,6 +525,7 @@ static struct perf_ibs perf_ibs_fetch = { .start = perf_ibs_start, .stop = perf_ibs_stop, .read = perf_ibs_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }, .msr = MSR_AMD64_IBSFETCHCTL, .config_mask = IBS_FETCH_CONFIG_MASK, diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 3210fee27e7f..7635c23f7d82 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -223,11 +223,6 @@ static int perf_iommu_event_init(struct perf_event *event) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EINVAL; - /* IOMMU counters do not have usr/os/guest/host bits */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_host || event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -414,6 +409,7 @@ static const struct pmu iommu_pmu __initconst = { .read = perf_iommu_read, .task_ctx_nr = perf_invalid_context, .attr_groups = amd_iommu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static __init int init_one_iommu(unsigned int idx) diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index 2aefacf5c5b2..c5ff084551c6 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c @@ -136,14 +136,7 @@ static int pmu_event_init(struct perf_event *event) return -ENOENT; /* Unsupported modes and filters. */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - /* no sampling */ - event->attr.sample_period) + if (event->attr.sample_period) return -EINVAL; if (cfg != AMD_POWER_EVENTSEL_PKG) @@ -226,6 +219,7 @@ static struct pmu pmu_class = { .start = pmu_event_start, .stop = pmu_event_stop, .read = pmu_event_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static int power_cpu_exit(unsigned int cpu) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 398df6eaa109..79cfd3b30ceb 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -201,11 +201,6 @@ static int amd_uncore_event_init(struct perf_event *event) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EINVAL; - /* NB and Last level cache counters do not have usr/os/guest/host bits */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_host || event->attr.exclude_guest) - return -EINVAL; - /* and we do not enable counter overflow interrupts */ hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->idx = -1; @@ -307,6 +302,7 @@ static struct pmu amd_nb_pmu = { .start = amd_uncore_start, .stop = amd_uncore_stop, .read = amd_uncore_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static struct pmu amd_llc_pmu = { @@ -317,6 +313,7 @@ static struct pmu amd_llc_pmu = { .start = amd_uncore_start, .stop = amd_uncore_stop, .read = amd_uncore_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 374a19712e20..b684f0294f35 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2278,6 +2278,19 @@ void perf_check_microcode(void) x86_pmu.check_microcode(); } +static int x86_pmu_check_period(struct perf_event *event, u64 value) +{ + if (x86_pmu.check_period && x86_pmu.check_period(event, value)) + return -EINVAL; + + if (value && x86_pmu.limit_period) { + if (x86_pmu.limit_period(event, value) > value) + return -EINVAL; + } + + return 0; +} + static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, @@ -2302,6 +2315,7 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, .sched_task = x86_pmu_sched_task, .task_ctx_size = sizeof(struct x86_perf_task_context), + .check_period = x86_pmu_check_period, }; void arch_perf_update_userpage(struct perf_event *event, diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index a01ef1b0f883..7cdd7b13bbda 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -77,10 +77,12 @@ static size_t buf_size(struct page *page) } static void * -bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) +bts_buffer_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool overwrite) { struct bts_buffer *buf; struct page *page; + int cpu = event->cpu; int node = (cpu == -1) ? cpu : cpu_to_node(cpu); unsigned long offset; size_t size = nr_pages << PAGE_SHIFT; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 40e12cfc87f6..17096d3cd616 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -18,6 +18,7 @@ #include <asm/hardirq.h> #include <asm/intel-family.h> #include <asm/apic.h> +#include <asm/cpu_device_id.h> #include "../perf_event.h" @@ -3206,16 +3207,27 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; - /* - * If PMU counter has PEBS enabled it is not enough to disable counter - * on a guest entry since PEBS memory write can overshoot guest entry - * and corrupt guest memory. Disabling PEBS solves the problem. - */ - arr[1].msr = MSR_IA32_PEBS_ENABLE; - arr[1].host = cpuc->pebs_enabled; - arr[1].guest = 0; + if (x86_pmu.flags & PMU_FL_PEBS_ALL) + arr[0].guest &= ~cpuc->pebs_enabled; + else + arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); + *nr = 1; + + if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) { + /* + * If PMU counter has PEBS enabled it is not enough to + * disable counter on a guest entry since PEBS memory + * write can overshoot guest entry and corrupt guest + * memory. Disabling PEBS solves the problem. + * + * Don't do this if the CPU already enforces it. + */ + arr[1].msr = MSR_IA32_PEBS_ENABLE; + arr[1].host = cpuc->pebs_enabled; + arr[1].guest = 0; + *nr = 2; + } - *nr = 2; return arr; } @@ -3559,6 +3571,14 @@ static void free_excl_cntrs(int cpu) static void intel_pmu_cpu_dying(int cpu) { + fini_debug_store_on_cpu(cpu); + + if (x86_pmu.counter_freezing) + disable_counter_freeze(); +} + +static void intel_pmu_cpu_dead(int cpu) +{ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); struct intel_shared_regs *pc; @@ -3570,11 +3590,6 @@ static void intel_pmu_cpu_dying(int cpu) } free_excl_cntrs(cpu); - - fini_debug_store_on_cpu(cpu); - - if (x86_pmu.counter_freezing) - disable_counter_freeze(); } static void intel_pmu_sched_task(struct perf_event_context *ctx, @@ -3584,6 +3599,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, intel_pmu_lbr_sched_task(ctx, sched_in); } +static int intel_pmu_check_period(struct perf_event *event, u64 value) +{ + return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; +} + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); @@ -3663,6 +3683,9 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, + + .check_period = intel_pmu_check_period, }; static struct attribute *intel_pmu_attrs[]; @@ -3703,8 +3726,12 @@ static __initconst const struct x86_pmu intel_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, + .guest_get_msrs = intel_guest_get_msrs, .sched_task = intel_pmu_sched_task, + + .check_period = intel_pmu_check_period, }; static __init void intel_clovertown_quirk(void) @@ -3733,36 +3760,62 @@ static __init void intel_clovertown_quirk(void) x86_pmu.pebs_constraints = NULL; } -static int intel_snb_pebs_broken(int cpu) +static const struct x86_cpu_desc isolation_ucodes[] = { + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e), + {} +}; + +static void intel_check_pebs_isolation(void) { - u32 rev = UINT_MAX; /* default to broken for unknown models */ + x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes); +} - switch (cpu_data(cpu).x86_model) { - case INTEL_FAM6_SANDYBRIDGE: - rev = 0x28; - break; +static __init void intel_pebs_isolation_quirk(void) +{ + WARN_ON_ONCE(x86_pmu.check_microcode); + x86_pmu.check_microcode = intel_check_pebs_isolation; + intel_check_pebs_isolation(); +} - case INTEL_FAM6_SANDYBRIDGE_X: - switch (cpu_data(cpu).x86_stepping) { - case 6: rev = 0x618; break; - case 7: rev = 0x70c; break; - } - } +static const struct x86_cpu_desc pebs_ucodes[] = { + INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028), + INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618), + INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c), + {} +}; - return (cpu_data(cpu).microcode < rev); +static bool intel_snb_pebs_broken(void) +{ + return !x86_cpu_has_min_microcode_rev(pebs_ucodes); } static void intel_snb_check_microcode(void) { - int pebs_broken = 0; - int cpu; - - for_each_online_cpu(cpu) { - if ((pebs_broken = intel_snb_pebs_broken(cpu))) - break; - } - - if (pebs_broken == x86_pmu.pebs_broken) + if (intel_snb_pebs_broken() == x86_pmu.pebs_broken) return; /* @@ -3879,23 +3932,22 @@ static __init void intel_nehalem_quirk(void) } } -static bool intel_glp_counter_freezing_broken(int cpu) -{ - u32 rev = UINT_MAX; /* default to broken for unknown stepping */ - - switch (cpu_data(cpu).x86_stepping) { - case 1: - rev = 0x28; - break; - case 8: - rev = 0x6; - break; - } +static const struct x86_cpu_desc counter_freezing_ucodes[] = { + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006), + {} +}; - return (cpu_data(cpu).microcode < rev); +static bool intel_counter_freezing_broken(void) +{ + return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes); } -static __init void intel_glp_counter_freezing_quirk(void) +static __init void intel_counter_freezing_quirk(void) { /* Check if it's already disabled */ if (disable_counter_freezing) @@ -3905,7 +3957,7 @@ static __init void intel_glp_counter_freezing_quirk(void) * If the system starts with the wrong ucode, leave the * counter-freezing feature permanently disabled. */ - if (intel_glp_counter_freezing_broken(raw_smp_processor_id())) { + if (intel_counter_freezing_broken()) { pr_info("PMU counter freezing disabled due to CPU errata," "please upgrade microcode\n"); x86_pmu.counter_freezing = false; @@ -4256,6 +4308,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_X: + x86_add_quirk(intel_counter_freezing_quirk); memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, @@ -4282,7 +4335,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - x86_add_quirk(intel_glp_counter_freezing_quirk); + x86_add_quirk(intel_counter_freezing_quirk); memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, @@ -4425,6 +4478,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_HASWELL_ULT: case INTEL_FAM6_HASWELL_GT3E: x86_add_quirk(intel_ht_bug); + x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); @@ -4456,6 +4510,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_BROADWELL_XEON_D: case INTEL_FAM6_BROADWELL_GT3E: case INTEL_FAM6_BROADWELL_X: + x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); @@ -4518,6 +4573,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_KABYLAKE_MOBILE: case INTEL_FAM6_KABYLAKE_DESKTOP: + x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index d2e780705c5a..94a4b7fc75d0 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -280,13 +280,7 @@ static int cstate_pmu_event_init(struct perf_event *event) return -ENOENT; /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ + if (event->attr.sample_period) /* no sampling */ return -EINVAL; if (event->cpu < 0) @@ -437,7 +431,7 @@ static struct pmu cstate_core_pmu = { .start = cstate_pmu_event_start, .stop = cstate_pmu_event_stop, .read = cstate_pmu_event_update, - .capabilities = PERF_PMU_CAP_NO_INTERRUPT, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, .module = THIS_MODULE, }; @@ -451,7 +445,7 @@ static struct pmu cstate_pkg_pmu = { .start = cstate_pmu_event_start, .stop = cstate_pmu_event_stop, .read = cstate_pmu_event_update, - .capabilities = PERF_PMU_CAP_NO_INTERRUPT, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, .module = THIS_MODULE, }; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index e9acf1d2e7b2..10c99ce1fead 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1628,6 +1628,8 @@ void __init intel_ds_init(void) x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; + if (x86_pmu.version <= 4) + x86_pmu.pebs_no_isolation = 1; if (x86_pmu.pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; int format = x86_pmu.intel_cap.pebs_format; diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 9494ca68fd9d..fb3a2f13fc70 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1114,10 +1114,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages, * Return: Our private PT buffer structure. */ static void * -pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot) +pt_buffer_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { struct pt_buffer *buf; - int node, ret; + int node, ret, cpu = event->cpu; if (!nr_pages) return NULL; @@ -1222,7 +1223,8 @@ static int pt_event_addr_filters_validate(struct list_head *filters) static void pt_event_addr_filters_sync(struct perf_event *event) { struct perf_addr_filters_head *head = perf_event_addr_filters(event); - unsigned long msr_a, msr_b, *offs = event->addr_filters_offs; + unsigned long msr_a, msr_b; + struct perf_addr_filter_range *fr = event->addr_filter_ranges; struct pt_filters *filters = event->hw.addr_filters; struct perf_addr_filter *filter; int range = 0; @@ -1231,12 +1233,12 @@ static void pt_event_addr_filters_sync(struct perf_event *event) return; list_for_each_entry(filter, &head->list, entry) { - if (filter->path.dentry && !offs[range]) { + if (filter->path.dentry && !fr[range].start) { msr_a = msr_b = 0; } else { /* apply the offset */ - msr_a = filter->offset + offs[range]; - msr_b = filter->size + msr_a - 1; + msr_a = fr[range].start; + msr_b = msr_a + fr[range].size - 1; } filters->filter[range].msr_a = msr_a; diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 91039ffed633..94dc564146ca 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -397,13 +397,7 @@ static int rapl_pmu_event_init(struct perf_event *event) return -EINVAL; /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ + if (event->attr.sample_period) /* no sampling */ return -EINVAL; /* must be done before validate_group */ @@ -699,6 +693,7 @@ static int __init init_rapl_pmus(void) rapl_pmus->pmu.stop = rapl_pmu_event_stop; rapl_pmus->pmu.read = rapl_pmu_event_read; rapl_pmus->pmu.module = THIS_MODULE; + rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; return 0; } diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 27a461414b30..d516161c00c4 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -695,14 +695,6 @@ static int uncore_pmu_event_init(struct perf_event *event) if (pmu->func_id < 0) return -ENOENT; - /* - * Uncore PMU does measure at all privilege level all the time. - * So it doesn't make sense to specify any exclude bits. - */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_hv || event->attr.exclude_idle) - return -EINVAL; - /* Sampling not supported yet */ if (hwc->sample_period) return -EINVAL; @@ -800,6 +792,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu) .stop = uncore_pmu_event_stop, .read = uncore_pmu_event_read, .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; } else { pmu->pmu = *pmu->type->pmu; diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 2593b0d7aeee..b12517fae77a 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -397,13 +397,7 @@ static int snb_uncore_imc_event_init(struct perf_event *event) return -EINVAL; /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ + if (event->attr.sample_period) /* no sampling */ return -EINVAL; /* @@ -497,6 +491,7 @@ static struct pmu snb_uncore_imc_pmu = { .start = uncore_pmu_event_start, .stop = uncore_pmu_event_stop, .read = uncore_pmu_event_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static struct intel_uncore_ops snb_uncore_imc_ops = { diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c07bee31abe8..b10e04387f38 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = { .id_table = snbep_uncore_pci_ids, }; +#define NODE_ID_MASK 0x7 + /* * build pci bus to socket mapping */ @@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); if (err) break; - nodeid = config; + nodeid = config & NODE_ID_MASK; /* get the Node ID mapping */ err = pci_read_config_dword(ubox_dev, idmap_loc, &config); if (err) diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 1b9f85abf9bc..a878e6286e4a 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -160,13 +160,7 @@ static int msr_event_init(struct perf_event *event) return -ENOENT; /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ + if (event->attr.sample_period) /* no sampling */ return -EINVAL; if (cfg >= PERF_MSR_EVENT_MAX) @@ -256,7 +250,7 @@ static struct pmu pmu_msr = { .start = msr_event_start, .stop = msr_event_stop, .read = msr_event_update, - .capabilities = PERF_PMU_CAP_NO_INTERRUPT, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, }; static int __init msr_init(void) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78d7b7031bfc..7e75f474b076 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -601,13 +601,14 @@ struct x86_pmu { /* * Intel DebugStore bits */ - unsigned int bts :1, - bts_active :1, - pebs :1, - pebs_active :1, - pebs_broken :1, - pebs_prec_dist :1, - pebs_no_tlb :1; + unsigned int bts :1, + bts_active :1, + pebs :1, + pebs_active :1, + pebs_broken :1, + pebs_prec_dist :1, + pebs_no_tlb :1, + pebs_no_isolation :1; int pebs_record_size; int pebs_buffer_size; void (*drain_pebs)(struct pt_regs *regs); @@ -646,6 +647,11 @@ struct x86_pmu { * Intel host/guest support (KVM) */ struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); + + /* + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. + */ + int (*check_period) (struct perf_event *event, u64 period); }; struct x86_perf_task_context { @@ -857,7 +863,7 @@ static inline int amd_pmu_init(void) #ifdef CONFIG_CPU_SUP_INTEL -static inline bool intel_pmu_has_bts(struct perf_event *event) +static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) { struct hw_perf_event *hwc = &event->hw; unsigned int hw_event, bts_event; @@ -868,7 +874,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); - return hw_event == bts_event && hwc->sample_period == 1; + return hw_event == bts_event && period == 1; +} + +static inline bool intel_pmu_has_bts(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + return intel_pmu_has_bts_period(event, hwc->sample_period); } int intel_pmu_save_and_restart(struct perf_event *event); diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index f65b78d32f5e..3c135084e1eb 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -39,82 +39,10 @@ static int load_aout_binary(struct linux_binprm *); static int load_aout_library(struct file *); -#ifdef CONFIG_COREDUMP -static int aout_core_dump(struct coredump_params *); - -static unsigned long get_dr(int n) -{ - struct perf_event *bp = current->thread.ptrace_bps[n]; - return bp ? bp->hw.info.address : 0; -} - -/* - * fill in the user structure for a core dump.. - */ -static void dump_thread32(struct pt_regs *regs, struct user32 *dump) -{ - u32 fs, gs; - memset(dump, 0, sizeof(*dump)); - -/* changed the size calculations - should hopefully work better. lbt */ - dump->magic = CMAGIC; - dump->start_code = 0; - dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); - dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; - dump->u_dsize = ((unsigned long) - (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; - dump->u_dsize -= dump->u_tsize; - dump->u_debugreg[0] = get_dr(0); - dump->u_debugreg[1] = get_dr(1); - dump->u_debugreg[2] = get_dr(2); - dump->u_debugreg[3] = get_dr(3); - dump->u_debugreg[6] = current->thread.debugreg6; - dump->u_debugreg[7] = current->thread.ptrace_dr7; - - if (dump->start_stack < 0xc0000000) { - unsigned long tmp; - - tmp = (unsigned long) (0xc0000000 - dump->start_stack); - dump->u_ssize = tmp >> PAGE_SHIFT; - } - - dump->regs.ebx = regs->bx; - dump->regs.ecx = regs->cx; - dump->regs.edx = regs->dx; - dump->regs.esi = regs->si; - dump->regs.edi = regs->di; - dump->regs.ebp = regs->bp; - dump->regs.eax = regs->ax; - dump->regs.ds = current->thread.ds; - dump->regs.es = current->thread.es; - savesegment(fs, fs); - dump->regs.fs = fs; - savesegment(gs, gs); - dump->regs.gs = gs; - dump->regs.orig_eax = regs->orig_ax; - dump->regs.eip = regs->ip; - dump->regs.cs = regs->cs; - dump->regs.eflags = regs->flags; - dump->regs.esp = regs->sp; - dump->regs.ss = regs->ss; - -#if 1 /* FIXME */ - dump->u_fpvalid = 0; -#else - dump->u_fpvalid = dump_fpu(regs, &dump->i387); -#endif -} - -#endif - static struct linux_binfmt aout_format = { .module = THIS_MODULE, .load_binary = load_aout_binary, .load_shlib = load_aout_library, -#ifdef CONFIG_COREDUMP - .core_dump = aout_core_dump, -#endif - .min_coredump = PAGE_SIZE }; static int set_brk(unsigned long start, unsigned long end) @@ -126,91 +54,6 @@ static int set_brk(unsigned long start, unsigned long end) return vm_brk(start, end - start); } -#ifdef CONFIG_COREDUMP -/* - * These are the only things you should do on a core-file: use only these - * macros to write out all the necessary info. - */ - -#include <linux/coredump.h> - -#define START_DATA(u) (u.u_tsize << PAGE_SHIFT) -#define START_STACK(u) (u.start_stack) - -/* - * Routine writes a core dump image in the current directory. - * Currently only a stub-function. - * - * Note that setuid/setgid files won't make a core-dump if the uid/gid - * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable" - * field, which also makes sure the core-dumps won't be recursive if the - * dumping of the process results in another error.. - */ - -static int aout_core_dump(struct coredump_params *cprm) -{ - mm_segment_t fs; - int has_dumped = 0; - unsigned long dump_start, dump_size; - struct user32 dump; - - fs = get_fs(); - set_fs(KERNEL_DS); - has_dumped = 1; - strncpy(dump.u_comm, current->comm, sizeof(current->comm)); - dump.u_ar0 = offsetof(struct user32, regs); - dump.signal = cprm->siginfo->si_signo; - dump_thread32(cprm->regs, &dump); - - /* - * If the size of the dump file exceeds the rlimit, then see - * what would happen if we wrote the stack, but not the data - * area. - */ - if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) - dump.u_dsize = 0; - - /* Make sure we have enough room to write the stack and data areas. */ - if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) - dump.u_ssize = 0; - - /* make sure we actually have a data and stack area to dump */ - set_fs(USER_DS); - if (!access_ok((void *) (unsigned long)START_DATA(dump), - dump.u_dsize << PAGE_SHIFT)) - dump.u_dsize = 0; - if (!access_ok((void *) (unsigned long)START_STACK(dump), - dump.u_ssize << PAGE_SHIFT)) - dump.u_ssize = 0; - - set_fs(KERNEL_DS); - /* struct user */ - if (!dump_emit(cprm, &dump, sizeof(dump))) - goto end_coredump; - /* Now dump all of the user data. Include malloced stuff as well */ - if (!dump_skip(cprm, PAGE_SIZE - sizeof(dump))) - goto end_coredump; - /* now we start writing out the user space info */ - set_fs(USER_DS); - /* Dump the data area */ - if (dump.u_dsize != 0) { - dump_start = START_DATA(dump); - dump_size = dump.u_dsize << PAGE_SHIFT; - if (!dump_emit(cprm, (void *)dump_start, dump_size)) - goto end_coredump; - } - /* Now prepare to dump the stack area */ - if (dump.u_ssize != 0) { - dump_start = START_STACK(dump); - dump_size = dump.u_ssize << PAGE_SHIFT; - if (!dump_emit(cprm, (void *)dump_start, dump_size)) - goto end_coredump; - } -end_coredump: - set_fs(fs); - return has_dumped; -} -#endif /* * create_aout_tables() parses the env- and arg-strings in new user diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h deleted file mode 100644 index 7d3ece8bfb61..000000000000 --- a/arch/x86/include/asm/a.out-core.h +++ /dev/null @@ -1,67 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_X86_A_OUT_CORE_H -#define _ASM_X86_A_OUT_CORE_H - -#ifdef __KERNEL__ -#ifdef CONFIG_X86_32 - -#include <linux/user.h> -#include <linux/elfcore.h> -#include <linux/mm_types.h> - -#include <asm/debugreg.h> - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) -{ -/* changed the size calculations - should hopefully work better. lbt */ - dump->magic = CMAGIC; - dump->start_code = 0; - dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); - dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; - dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1))) - >> PAGE_SHIFT; - dump->u_dsize -= dump->u_tsize; - dump->u_ssize = 0; - aout_dump_debugregs(dump); - - if (dump->start_stack < TASK_SIZE) - dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) - >> PAGE_SHIFT; - - dump->regs.bx = regs->bx; - dump->regs.cx = regs->cx; - dump->regs.dx = regs->dx; - dump->regs.si = regs->si; - dump->regs.di = regs->di; - dump->regs.bp = regs->bp; - dump->regs.ax = regs->ax; - dump->regs.ds = (u16)regs->ds; - dump->regs.es = (u16)regs->es; - dump->regs.fs = (u16)regs->fs; - dump->regs.gs = get_user_gs(regs); - dump->regs.orig_ax = regs->orig_ax; - dump->regs.ip = regs->ip; - dump->regs.cs = (u16)regs->cs; - dump->regs.flags = regs->flags; - dump->regs.sp = regs->sp; - dump->regs.ss = (u16)regs->ss; - - dump->u_fpvalid = dump_fpu(regs, &dump->i387); -} - -#endif /* CONFIG_X86_32 */ -#endif /* __KERNEL__ */ -#endif /* _ASM_X86_A_OUT_CORE_H */ diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 0660e14690c8..4c74073a19cc 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -94,13 +94,12 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alt_total_slen alt_end_marker"b-661b" #define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" -#define __OLDINSTR(oldinstr, num) \ +#define OLDINSTR(oldinstr, num) \ + "# ALT: oldnstr\n" \ "661:\n\t" oldinstr "\n662:\n" \ + "# ALT: padding\n" \ ".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \ - "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" - -#define OLDINSTR(oldinstr, num) \ - __OLDINSTR(oldinstr, num) \ + "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" \ alt_end_marker ":\n" /* @@ -116,11 +115,23 @@ static inline int alternatives_text_reserved(void *start, void *end) * additionally longer than the first replacement alternative. */ #define OLDINSTR_2(oldinstr, num1, num2) \ + "# ALT: oldinstr2\n" \ "661:\n\t" oldinstr "\n662:\n" \ + "# ALT: padding2\n" \ ".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \ "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \ alt_end_marker ":\n" +#define OLDINSTR_3(oldinsn, n1, n2, n3) \ + "# ALT: oldinstr3\n" \ + "661:\n\t" oldinsn "\n662:\n" \ + "# ALT: padding3\n" \ + ".skip -((" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \ + " - (" alt_slen ")) > 0) * " \ + "(" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \ + " - (" alt_slen ")), 0x90\n" \ + alt_end_marker ":\n" + #define ALTINSTR_ENTRY(feature, num) \ " .long 661b - .\n" /* label */ \ " .long " b_replacement(num)"f - .\n" /* new instruction */ \ @@ -129,8 +140,9 @@ static inline int alternatives_text_reserved(void *start, void *end) " .byte " alt_rlen(num) "\n" /* replacement len */ \ " .byte " alt_pad_len "\n" /* pad len */ -#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ - b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t" +#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ + "# ALT: replacement " #num "\n" \ + b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n" /* alternative assembly primitive: */ #define ALTERNATIVE(oldinstr, newinstr, feature) \ @@ -153,6 +165,19 @@ static inline int alternatives_text_reserved(void *start, void *end) ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ ".popsection\n" +#define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \ + OLDINSTR_3(oldinsn, 1, 2, 3) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feat1, 1) \ + ALTINSTR_ENTRY(feat2, 2) \ + ALTINSTR_ENTRY(feat3, 3) \ + ".popsection\n" \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(newinsn1, feat1, 1) \ + ALTINSTR_REPLACEMENT(newinsn2, feat2, 2) \ + ALTINSTR_REPLACEMENT(newinsn3, feat3, 3) \ + ".popsection\n" + /* * Alternative instructions for different CPU types or capabilities. * diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h index baeba0567126..3417110574c1 100644 --- a/arch/x86/include/asm/cpu_device_id.h +++ b/arch/x86/include/asm/cpu_device_id.h @@ -11,4 +11,32 @@ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); +/* + * Match specific microcode revisions. + * + * vendor/family/model/stepping must be all set. + * + * Only checks against the boot CPU. When mixed-stepping configs are + * valid for a CPU model, add a quirk for every valid stepping and + * do the fine-tuning in the quirk handler. + */ + +struct x86_cpu_desc { + __u8 x86_family; + __u8 x86_vendor; + __u8 x86_model; + __u8 x86_stepping; + __u32 x86_microcode_rev; +}; + +#define INTEL_CPU_DESC(mod, step, rev) { \ + .x86_family = 6, \ + .x86_vendor = X86_VENDOR_INTEL, \ + .x86_model = mod, \ + .x86_stepping = step, \ + .x86_microcode_rev = rev, \ +} + +extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table); + #endif diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 107283b1eb1e..606a4b6a9812 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -170,7 +170,6 @@ static inline bool efi_runtime_supported(void) return false; } -extern struct console early_efi_console; extern void parse_efi_setup(u64 phys_addr, u32 data_len); extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 705dafc2d11a..2bdbbbcfa393 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -841,7 +841,7 @@ union hv_gpa_page_range { * count is equal with how many entries of union hv_gpa_page_range can * be populated into the input parameter page. */ -#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \ +#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) / \ sizeof(union hv_gpa_page_range)) struct hv_guest_mapping_flush_list { diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index d9a9993af882..9f15384c504a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -52,6 +52,8 @@ #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 +#define INTEL_FAM6_ICELAKE_MOBILE 0x7E + /* "Small Core" Processors (Atom) */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4660ce90de7f..180373360e34 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -299,6 +299,7 @@ union kvm_mmu_extended_role { unsigned int cr4_smap:1; unsigned int cr4_smep:1; unsigned int cr4_la57:1; + unsigned int maxphyaddr:6; }; }; @@ -397,6 +398,7 @@ struct kvm_mmu { void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte); hpa_t root_hpa; + gpa_t root_cr3; union kvm_mmu_role mmu_role; u8 root_level; u8 shadow_root_level; diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 91e4cf189914..5cc3930cb465 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -217,6 +217,8 @@ static __always_inline unsigned long long rdtsc(void) */ static __always_inline unsigned long long rdtsc_ordered(void) { + DECLARE_ARGS(val, low, high); + /* * The RDTSC instruction is not ordered relative to memory * access. The Intel SDM and the AMD APM are both vague on this @@ -227,9 +229,19 @@ static __always_inline unsigned long long rdtsc_ordered(void) * ordering guarantees as reading from a global memory location * that some other imaginary CPU is updating continuously with a * time stamp. + * + * Thus, use the preferred barrier on the respective CPU, aiming for + * RDTSCP as the default. */ - barrier_nospec(); - return rdtsc(); + asm volatile(ALTERNATIVE_3("rdtsc", + "mfence; rdtsc", X86_FEATURE_MFENCE_RDTSC, + "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, + "rdtscp", X86_FEATURE_RDTSCP) + : EAX_EDX_RET(val, low, high) + /* RDTSCP clobbers ECX with MSR_TSC_AUX. */ + :: "ecx"); + + return EAX_EDX_VAL(val, low, high); } static inline unsigned long long native_read_pmc(int counter) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 40616e805292..2779ace16d23 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { - native_set_pmd(pmdp, pmd); + set_pmd(pmdp, pmd); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index dbaed55c1c24..232f856e0db0 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -67,16 +67,30 @@ static __always_inline void refcount_dec(refcount_t *r) static __always_inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { - return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", + bool ret = GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, r->refs.counter, e, "er", i, "cx"); + + if (ret) { + smp_acquire__after_ctrl_dep(); + return true; + } + + return false; } static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) { - return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", - REFCOUNT_CHECK_LT_ZERO, - r->refs.counter, e, "cx"); + bool ret = GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", + REFCOUNT_CHECK_LT_ZERO, + r->refs.counter, e, "cx"); + + if (ret) { + smp_acquire__after_ctrl_dep(); + return true; + } + + return false; } static __always_inline __must_check diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 780f2b42c8ef..5e49a0acb5ee 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -25,7 +25,6 @@ #define KERNEL_DS MAKE_MM_SEG(-1UL) #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.addr_limit) static inline void set_fs(mm_segment_t fs) { @@ -284,7 +283,7 @@ do { \ __put_user_goto(x, ptr, "l", "k", "ir", label); \ break; \ case 8: \ - __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \ + __put_user_goto_u64(x, ptr, label); \ break; \ default: \ __put_user_bad(); \ @@ -431,8 +430,10 @@ do { \ ({ \ __label__ __pu_label; \ int __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __pu_val; \ + __pu_val = x; \ __uaccess_begin(); \ - __put_user_size((x), (ptr), (size), __pu_label); \ + __put_user_size(__pu_val, (ptr), (size), __pu_label); \ __pu_err = 0; \ __pu_label: \ __uaccess_end(); \ diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index e652a7cc6186..3f697a9e3f59 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h @@ -48,7 +48,8 @@ enum { BIOS_STATUS_SUCCESS = 0, BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, BIOS_STATUS_EINVAL = -EINVAL, - BIOS_STATUS_UNAVAIL = -EBUSY + BIOS_STATUS_UNAVAIL = -EBUSY, + BIOS_STATUS_ABORT = -EINTR, }; /* Address map parameters */ @@ -167,4 +168,9 @@ extern long system_serial_number; extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ +/* + * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details + */ +extern struct semaphore __efi_uv_runtime_lock; + #endif /* _ASM_X86_UV_BIOS_H */ diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild index f6648e9928b3..efe701b7c6ce 100644 --- a/arch/x86/include/uapi/asm/Kbuild +++ b/arch/x86/include/uapi/asm/Kbuild @@ -3,3 +3,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generated-y += unistd_64.h generated-y += unistd_x32.h +generic-y += socket.h diff --git a/arch/x86/include/uapi/asm/socket.h b/arch/x86/include/uapi/asm/socket.h deleted file mode 100644 index 6b71384b9d8b..000000000000 --- a/arch/x86/include/uapi/asm/socket.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/socket.h> diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index ebeac487a20c..9a79c7808f9c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -11,6 +11,7 @@ #include <linux/stop_machine.h> #include <linux/slab.h> #include <linux/kdebug.h> +#include <linux/kprobes.h> #include <asm/text-patching.h> #include <asm/alternative.h> #include <asm/sections.h> @@ -393,10 +394,10 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, continue; } - DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d", + DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d", a->cpuid >> 5, a->cpuid & 0x1f, - instr, a->instrlen, + instr, instr, a->instrlen, replacement, a->replacementlen, a->padlen); DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); @@ -764,8 +765,8 @@ int poke_int3_handler(struct pt_regs *regs) regs->ip = (unsigned long) bp_int3_handler; return 1; - } +NOKPROBE_SYMBOL(poke_int3_handler); /** * text_poke_bp() -- update instructions on live kernel on SMP diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 69f6bbb41be0..01004bfb1a1b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -819,11 +819,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c) static void init_amd_zn(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_ZEN); - /* - * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects - * all up to and including B1. - */ - if (c->x86_model <= 1 && c->x86_stepping <= 1) + + /* Fix erratum 1076: CPB feature bit not being set in CPUID. */ + if (!cpu_has(c, X86_FEATURE_CPB)) set_cpu_cap(c, X86_FEATURE_CPB); } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 01874d54f4fd..2da82eff0eb4 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -798,15 +798,25 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); + task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_DISABLE: task_set_spec_ssb_disable(task); + task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_FORCE_DISABLE: task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); + task_clear_spec_ssb_noexec(task); + task_update_spec_tif(task); + break; + case PR_SPEC_DISABLE_NOEXEC: + if (task_spec_ssb_force_disable(task)) + return -EPERM; + task_set_spec_ssb_disable(task); + task_set_spec_ssb_noexec(task); task_update_spec_tif(task); break; default: @@ -885,6 +895,8 @@ static int ssb_prctl_get(struct task_struct *task) case SPEC_STORE_BYPASS_PRCTL: if (task_spec_ssb_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ssb_noexec(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; if (task_spec_ssb_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c index 3fed38812eea..6dd78d8235e4 100644 --- a/arch/x86/kernel/cpu/match.c +++ b/arch/x86/kernel/cpu/match.c @@ -48,3 +48,34 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) return NULL; } EXPORT_SYMBOL(x86_match_cpu); + +static const struct x86_cpu_desc * +x86_match_cpu_with_stepping(const struct x86_cpu_desc *match) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + const struct x86_cpu_desc *m; + + for (m = match; m->x86_family | m->x86_model; m++) { + if (c->x86_vendor != m->x86_vendor) + continue; + if (c->x86 != m->x86_family) + continue; + if (c->x86_model != m->x86_model) + continue; + if (c->x86_stepping != m->x86_stepping) + continue; + return m; + } + return NULL; +} + +bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table) +{ + const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table); + + if (!res || res->x86_microcode_rev > boot_cpu_data.microcode) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 672c7225cb1b..6ce290c506d9 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, quirk_no_way_out(i, m, regs); if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + m->bank = i; mce_read_aux(m, i); *msg = tmp; return 1; diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 374a52fa5296..9b33904251a9 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -388,10 +388,6 @@ static int __init setup_early_printk(char *buf) if (!strncmp(buf, "xen", 3)) early_console_register(&xenboot_console, keep); #endif -#ifdef CONFIG_EARLY_PRINTK_EFI - if (!strncmp(buf, "efi", 3)) - early_console_register(&early_efi_console, keep); -#endif #ifdef CONFIG_EARLY_PRINTK_USB_XDBC if (!strncmp(buf, "xdbc", 4)) early_xdbc_parse_parameter(buf + 4); diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 8257a59704ae..3e3789c8f8e1 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -269,7 +269,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return ret; } -static int is_ftrace_caller(unsigned long ip) +static nokprobe_inline int is_ftrace_caller(unsigned long ip) { if (ip == ftrace_update_func) return 1; @@ -299,6 +299,7 @@ int ftrace_int3_handler(struct pt_regs *regs) return 1; } +NOKPROBE_SYMBOL(ftrace_int3_handler); static int ftrace_write(unsigned long ip, const char *val, int size) { diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 4ba75afba527..a034cb808e7e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -1028,6 +1028,13 @@ NOKPROBE_SYMBOL(kprobe_fault_handler); int __init arch_populate_kprobe_blacklist(void) { + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + if (ret) + return ret; + return kprobe_add_area_blacklist((unsigned long)__entry_text_start, (unsigned long)__entry_text_end); } diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 6adf6e6c2933..f14262952015 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -97,6 +97,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) } asm ( + ".pushsection .rodata\n" "optprobe_template_func:\n" ".global optprobe_template_entry\n" "optprobe_template_entry:\n" @@ -136,8 +137,7 @@ asm ( #endif ".global optprobe_template_end\n" "optprobe_template_end:\n" - ".type optprobe_template_func, @function\n" - ".size optprobe_template_func, .-optprobe_template_func\n"); + ".popsection\n"); void optprobe_template_func(void); STACK_FRAME_NON_STANDARD(optprobe_template_func); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 90ae0ca51083..58ac7be52c7a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -255,6 +255,18 @@ void arch_setup_new_exec(void) /* If cpuid was previously disabled for this task, re-enable it. */ if (test_thread_flag(TIF_NOCPUID)) enable_cpuid(); + + /* + * Don't inherit TIF_SSBD across exec boundary when + * PR_SPEC_DISABLE_NOEXEC is used. + */ + if (test_thread_flag(TIF_SSBD) && + task_spec_ssb_noexec(current)) { + clear_thread_flag(TIF_SSBD); + task_clear_spec_ssb_disable(current); + task_clear_spec_ssb_noexec(current); + speculation_ctrl_update(task_thread_info(current)->flags); + } } static inline void switch_to_bitmap(struct thread_struct *prev, diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9b7c4ca8f0a7..e289ce1332ab 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -111,6 +111,7 @@ void ist_enter(struct pt_regs *regs) /* This code is a bit fragile. Test it. */ RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); } +NOKPROBE_SYMBOL(ist_enter); void ist_exit(struct pt_regs *regs) { diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index bbffa6c54697..c07958b59f50 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; + unsigned f_la57 = 0; /* cpuid 1.edx */ const u32 kvm_cpuid_1_edx_x86_features = @@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, // TSC_ADJUST is emulated entry->ebx |= F(TSC_ADJUST); entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; + f_la57 = entry->ecx & F(LA57); cpuid_mask(&entry->ecx, CPUID_7_ECX); + /* Set LA57 based on hardware capability. */ + entry->ecx |= f_la57; entry->ecx |= f_umip; /* PKU is not yet implemented for shadow paging. */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index da9c42349b1f..f2d1d230d5b8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, &invalid_list); mmu->root_hpa = INVALID_PAGE; } + mmu->root_cr3 = 0; } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); @@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); } else BUG(); + vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); return 0; } @@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; u64 pdptr, pm_mask; - gfn_t root_gfn; + gfn_t root_gfn, root_cr3; int i; - root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; + root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); + root_gfn = root_cr3 >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; @@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu->root_hpa = root; - return 0; + goto set_root_cr3; } /* @@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); } +set_root_cr3: + vcpu->arch.mmu->root_cr3 = root_cr3; + return 0; } @@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, struct kvm_mmu_root_info root; struct kvm_mmu *mmu = vcpu->arch.mmu; - root.cr3 = mmu->get_cr3(vcpu); + root.cr3 = mmu->root_cr3; root.hpa = mmu->root_hpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { @@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, } mmu->root_hpa = root.hpa; + mmu->root_cr3 = root.cr3; return i < KVM_MMU_NUM_PREV_ROOTS; } @@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) ext.cr4_pse = !!is_pse(vcpu); ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); + ext.maxphyaddr = cpuid_maxphyaddr(vcpu); ext.valid = 1; @@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.root_mmu.root_cr3 = 0; vcpu->arch.root_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.guest_mmu.root_cr3 = 0; vcpu->arch.guest_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d8ea4ebd79e7..d737a51a53ca 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2473,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) return -EINVAL; + if (!nested_cpu_has_preemption_timer(vmcs12) && + nested_cpu_has_save_preemption_timer(vmcs12)) + return -EINVAL; + if (nested_cpu_has_ept(vmcs12) && !valid_ept_address(vcpu, vmcs12->ept_pointer)) return -EINVAL; @@ -5557,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, * secondary cpu-based controls. Do not include those that * depend on CPUID bits, they are added later by vmx_cpuid_update. */ - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, - msrs->secondary_ctls_low, - msrs->secondary_ctls_high); + if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + msrs->secondary_ctls_low, + msrs->secondary_ctls_high); + msrs->secondary_ctls_low = 0; msrs->secondary_ctls_high &= SECONDARY_EXEC_DESC | diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 95d618045001..30a6bcd735ec 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, if (!entry_only) j = find_msr(&m->host, msr); - if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { + if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || + (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; @@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) return; - /* - * First handle the simple case where no cmpxchg is necessary; just - * allow posting non-urgent interrupts. - * - * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change - * PI.NDST: pi_post_block will do it for us and the wakeup_handler - * expects the VCPU to be on the blocked_vcpu_list that matches - * PI.NDST. - */ - if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || - vcpu->cpu == cpu) { - pi_clear_sn(pi_desc); - return; - } - /* The full case. */ do { old.control = new.control = pi_desc->control; @@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) new.sn = 0; } while (cmpxchg64(&pi_desc->control, old.control, new.control) != old.control); + + /* + * Clear SN before reading the bitmap. The VT-d firmware + * writes the bitmap and reads SN atomically (5.2.3 in the + * spec), so it doesn't really have a memory barrier that + * pairs with this, but we cannot do that and we need one. + */ + smp_mb__after_atomic(); + + if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS)) + pi_set_on(pi_desc); } /* diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 99328954c2fc..0ac0a64c7790 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } -static inline void pi_clear_sn(struct pi_desc *pi_desc) +static inline void pi_set_sn(struct pi_desc *pi_desc) { - return clear_bit(POSTED_INTR_SN, + return set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } -static inline void pi_set_sn(struct pi_desc *pi_desc) +static inline void pi_set_on(struct pi_desc *pi_desc) { - return set_bit(POSTED_INTR_SN, - (unsigned long *)&pi_desc->control); + set_bit(POSTED_INTR_ON, + (unsigned long *)&pi_desc->control); } static inline void pi_clear_on(struct pi_desc *pi_desc) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e67ecf25e690..941f932373d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7801,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * 1) We should set ->mode before checking ->requests. Please see * the comment in kvm_vcpu_exiting_guest_mode(). * - * 2) For APICv, we should set ->mode before checking PIR.ON. This + * 2) For APICv, we should set ->mode before checking PID.ON. This * pairs with the memory barrier implicit in pi_test_and_set_on * (see vmx_deliver_posted_interrupt). * diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 6521134057e8..3c4568f8fb28 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -117,67 +117,12 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, } EXPORT_SYMBOL_GPL(ex_handler_fprestore); -/* Helper to check whether a uaccess fault indicates a kernel bug. */ -static bool bogus_uaccess(struct pt_regs *regs, int trapnr, - unsigned long fault_addr) -{ - /* This is the normal case: #PF with a fault address in userspace. */ - if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX) - return false; - - /* - * This code can be reached for machine checks, but only if the #MC - * handler has already decided that it looks like a candidate for fixup. - * This e.g. happens when attempting to access userspace memory which - * the CPU can't access because of uncorrectable bad memory. - */ - if (trapnr == X86_TRAP_MC) - return false; - - /* - * There are two remaining exception types we might encounter here: - * - #PF for faulting accesses to kernel addresses - * - #GP for faulting accesses to noncanonical addresses - * Complain about anything else. - */ - if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) { - WARN(1, "unexpected trap %d in uaccess\n", trapnr); - return false; - } - - /* - * This is a faulting memory access in kernel space, on a kernel - * address, in a usercopy function. This can e.g. be caused by improper - * use of helpers like __put_user and by improper attempts to access - * userspace addresses in KERNEL_DS regions. - * The one (semi-)legitimate exception are probe_kernel_{read,write}(), - * which can be invoked from places like kgdb, /dev/mem (for reading) - * and privileged BPF code (for reading). - * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag - * to tell us that faulting on kernel addresses, and even noncanonical - * addresses, in a userspace accessor does not necessarily imply a - * kernel bug, root might just be doing weird stuff. - */ - if (current->kernel_uaccess_faults_ok) - return false; - - /* This is bad. Refuse the fixup so that we go into die(). */ - if (trapnr == X86_TRAP_PF) { - pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n", - fault_addr); - } else { - pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n"); - } - return true; -} - __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr, unsigned long error_code, unsigned long fault_addr) { - if (bogus_uaccess(regs, trapnr, fault_addr)) - return false; + WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); regs->ip = ex_fixup_addr(fixup); return true; } @@ -188,8 +133,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup, unsigned long error_code, unsigned long fault_addr) { - if (bogus_uaccess(regs, trapnr, fault_addr)) - return false; /* Special hack for uaccess_err */ current->thread.uaccess_err = 1; regs->ip = ex_fixup_addr(fixup); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 5378d10f1d31..0029604af8a4 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -705,7 +705,7 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) return arch_memremap_can_ram_remap(phys_addr, size, 0); } -#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT +#ifdef CONFIG_AMD_MEM_ENCRYPT /* Remap memory with encryption */ void __init *early_memremap_encrypted(resource_size_t phys_addr, unsigned long size) @@ -747,7 +747,7 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); } -#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */ +#endif /* CONFIG_AMD_MEM_ENCRYPT */ static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4f8972311a77..14e6119838a6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) #endif +/* + * See set_mce_nospec(). + * + * Machine check recovery code needs to change cache mode of poisoned pages to + * UC to avoid speculative access logging another error. But passing the + * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a + * speculative access. So we cheat and flip the top bit of the address. This + * works fine for the code that updates the page tables. But at the end of the + * process we need to flush the TLB and cache and the non-canonical address + * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. + * + * But in the common case we already have a canonical address. This code + * will fix the top bit if needed and is a no-op otherwise. + */ +static inline unsigned long fix_addr(unsigned long addr) +{ +#ifdef CONFIG_X86_64 + return (long)(addr << 1) >> 1; +#else + return addr; +#endif +} + static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) { if (cpa->flags & CPA_PAGES_ARRAY) { @@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data) unsigned int i; for (i = 0; i < cpa->numpages; i++) - __flush_tlb_one_kernel(__cpa_addr(cpa, i)); + __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); } static void cpa_flush(struct cpa_data *data, int cache) @@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache) * Only flush present addresses: */ if (pte && (pte_val(*pte) & _PAGE_PRESENT)) - clflush_cache_range_opt((void *)addr, PAGE_SIZE); + clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); } mb(); } @@ -1627,29 +1650,6 @@ out: return ret; } -/* - * Machine check recovery code needs to change cache mode of poisoned - * pages to UC to avoid speculative access logging another error. But - * passing the address of the 1:1 mapping to set_memory_uc() is a fine - * way to encourage a speculative access. So we cheat and flip the top - * bit of the address. This works fine for the code that updates the - * page tables. But at the end of the process we need to flush the cache - * and the non-canonical address causes a #GP fault when used by the - * CLFLUSH instruction. - * - * But in the common case we already have a canonical address. This code - * will fix the top bit if needed and is a no-op otherwise. - */ -static inline unsigned long make_addr_canonical_again(unsigned long addr) -{ -#ifdef CONFIG_X86_64 - return (long)(addr << 1) >> 1; -#else - return addr; -#endif -} - - static int change_page_attr_set_clr(unsigned long *addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr, int force_split, int in_flag, diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5542303c43d9..afabf597c855 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -881,20 +881,41 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* cmp dst_reg, src_reg */ - EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, - add_2reg(0xC0, dst_reg, src_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_2mod(0x48, dst_reg, src_reg)); + else if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: /* test dst_reg, src_reg */ - EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, - add_2reg(0xC0, dst_reg, src_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_2mod(0x48, dst_reg, src_reg)); + else if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: /* test dst_reg, imm32 */ - EMIT1(add_1mod(0x48, dst_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); goto emit_cond_jmp; @@ -908,8 +929,21 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: /* cmp dst_reg, imm8/32 */ - EMIT1(add_1mod(0x48, dst_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); if (is_imm8(imm32)) EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 8f6cc71e0848..0d9cdffce6ac 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2072,7 +2072,18 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: - case BPF_JMP | BPF_JSGE | BPF_X: { + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = sstk ? IA32_ECX : src_lo; @@ -2081,25 +2092,35 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } if (sstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), - STACK_VAR(src_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EBX), + STACK_VAR(src_hi)); } - /* cmp dreg_hi,sreg_hi */ - EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); - EMIT2(IA32_JNE, 2); + if (is_jmp64) { + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 2); + } /* cmp dreg_lo,sreg_lo */ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); goto emit_cond_jmp; } - case BPF_JMP | BPF_JSET | BPF_X: { + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = sstk ? IA32_ECX : src_lo; @@ -2108,15 +2129,21 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } if (sstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), - STACK_VAR(src_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EBX), + STACK_VAR(src_hi)); } /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); @@ -2126,32 +2153,39 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); goto emit_cond_jmp; } - case BPF_JMP | BPF_JSET | BPF_K: { - u32 hi; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; + u32 hi; if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } - hi = imm32 & (1<<31) ? (u32)~0 : 0; /* mov ecx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); - /* mov ebx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + EMIT2_off32(0xC7, add_1reg(0xC0, sreg_lo), imm32); /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); - /* and dreg_hi,sreg_hi */ - EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); - /* or dreg_lo,dreg_hi */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + if (is_jmp64) { + hi = imm32 & (1 << 31) ? (u32)~0 : 0; + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, sreg_hi), hi); + /* and dreg_hi,sreg_hi */ + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); + /* or dreg_lo,dreg_hi */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + } goto emit_cond_jmp; } case BPF_JMP | BPF_JEQ | BPF_K: @@ -2163,29 +2197,44 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: - case BPF_JMP | BPF_JSGE | BPF_K: { - u32 hi; + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; + u32 hi; if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } - hi = imm32 & (1<<31) ? (u32)~0 : 0; /* mov ecx,imm32 */ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); - /* mov ebx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); - - /* cmp dreg_hi,sreg_hi */ - EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); - EMIT2(IA32_JNE, 2); + if (is_jmp64) { + hi = imm32 & (1 << 31) ? (u32)~0 : 0; + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 2); + } /* cmp dreg_lo,sreg_lo */ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile index e4dc3862d423..fe29f3f5d384 100644 --- a/arch/x86/platform/efi/Makefile +++ b/arch/x86/platform/efi/Makefile @@ -3,5 +3,4 @@ OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o -obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c deleted file mode 100644 index 7138bc7a265c..000000000000 --- a/arch/x86/platform/efi/early_printk.c +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Copyright (C) 2013 Intel Corporation; author Matt Fleming - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2. - */ - -#include <linux/console.h> -#include <linux/efi.h> -#include <linux/font.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <asm/setup.h> - -static const struct font_desc *font; -static u32 efi_x, efi_y; -static void *efi_fb; -static bool early_efi_keep; - -/* - * efi earlyprintk need use early_ioremap to map the framebuffer. - * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should - * be used instead. ioremap will be available after paging_init() which is - * earlier than initcall callbacks. Thus adding this early initcall function - * early_efi_map_fb to map the whole efi framebuffer. - */ -static __init int early_efi_map_fb(void) -{ - u64 base, size; - - if (!early_efi_keep) - return 0; - - base = boot_params.screen_info.lfb_base; - if (boot_params.screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) - base |= (u64)boot_params.screen_info.ext_lfb_base << 32; - size = boot_params.screen_info.lfb_size; - efi_fb = ioremap(base, size); - - return efi_fb ? 0 : -ENOMEM; -} -early_initcall(early_efi_map_fb); - -/* - * early_efi_map maps efi framebuffer region [start, start + len -1] - * In case earlyprintk=efi,keep we have the whole framebuffer mapped already - * so just return the offset efi_fb + start. - */ -static __ref void *early_efi_map(unsigned long start, unsigned long len) -{ - u64 base; - - base = boot_params.screen_info.lfb_base; - if (boot_params.screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) - base |= (u64)boot_params.screen_info.ext_lfb_base << 32; - - if (efi_fb) - return (efi_fb + start); - else - return early_ioremap(base + start, len); -} - -static __ref void early_efi_unmap(void *addr, unsigned long len) -{ - if (!efi_fb) - early_iounmap(addr, len); -} - -static void early_efi_clear_scanline(unsigned int y) -{ - unsigned long *dst; - u16 len; - - len = boot_params.screen_info.lfb_linelength; - dst = early_efi_map(y*len, len); - if (!dst) - return; - - memset(dst, 0, len); - early_efi_unmap(dst, len); -} - -static void early_efi_scroll_up(void) -{ - unsigned long *dst, *src; - u16 len; - u32 i, height; - - len = boot_params.screen_info.lfb_linelength; - height = boot_params.screen_info.lfb_height; - - for (i = 0; i < height - font->height; i++) { - dst = early_efi_map(i*len, len); - if (!dst) - return; - - src = early_efi_map((i + font->height) * len, len); - if (!src) { - early_efi_unmap(dst, len); - return; - } - - memmove(dst, src, len); - - early_efi_unmap(src, len); - early_efi_unmap(dst, len); - } -} - -static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h) -{ - const u32 color_black = 0x00000000; - const u32 color_white = 0x00ffffff; - const u8 *src; - u8 s8; - int m; - - src = font->data + c * font->height; - s8 = *(src + h); - - for (m = 0; m < 8; m++) { - if ((s8 >> (7 - m)) & 1) - *dst = color_white; - else - *dst = color_black; - dst++; - } -} - -static void -early_efi_write(struct console *con, const char *str, unsigned int num) -{ - struct screen_info *si; - unsigned int len; - const char *s; - void *dst; - - si = &boot_params.screen_info; - len = si->lfb_linelength; - - while (num) { - unsigned int linemax; - unsigned int h, count = 0; - - for (s = str; *s && *s != '\n'; s++) { - if (count == num) - break; - count++; - } - - linemax = (si->lfb_width - efi_x) / font->width; - if (count > linemax) - count = linemax; - - for (h = 0; h < font->height; h++) { - unsigned int n, x; - - dst = early_efi_map((efi_y + h) * len, len); - if (!dst) - return; - - s = str; - n = count; - x = efi_x; - - while (n-- > 0) { - early_efi_write_char(dst + x*4, *s, h); - x += font->width; - s++; - } - - early_efi_unmap(dst, len); - } - - num -= count; - efi_x += count * font->width; - str += count; - - if (num > 0 && *s == '\n') { - efi_x = 0; - efi_y += font->height; - str++; - num--; - } - - if (efi_x + font->width > si->lfb_width) { - efi_x = 0; - efi_y += font->height; - } - - if (efi_y + font->height > si->lfb_height) { - u32 i; - - efi_y -= font->height; - early_efi_scroll_up(); - - for (i = 0; i < font->height; i++) - early_efi_clear_scanline(efi_y + i); - } - } -} - -static __init int early_efi_setup(struct console *con, char *options) -{ - struct screen_info *si; - u16 xres, yres; - u32 i; - - si = &boot_params.screen_info; - xres = si->lfb_width; - yres = si->lfb_height; - - /* - * early_efi_write_char() implicitly assumes a framebuffer with - * 32-bits per pixel. - */ - if (si->lfb_depth != 32) - return -ENODEV; - - font = get_default_font(xres, yres, -1, -1); - if (!font) - return -ENODEV; - - efi_y = rounddown(yres, font->height) - font->height; - for (i = 0; i < (yres - efi_y) / font->height; i++) - early_efi_scroll_up(); - - /* early_console_register will unset CON_BOOT in case ,keep */ - if (!(con->flags & CON_BOOT)) - early_efi_keep = true; - return 0; -} - -struct console early_efi_console = { - .name = "earlyefi", - .write = early_efi_write, - .setup = early_efi_setup, - .flags = CON_PRINTBUFFER, - .index = -1, -}; diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 17456a1d3f04..458a0e2bcc57 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -304,7 +304,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) * - Not within any part of the kernel * - Not the BIOS reserved area (E820_TYPE_RESERVED, E820_TYPE_NVS, etc) */ -static bool can_free_region(u64 start, u64 size) +static __init bool can_free_region(u64 start, u64 size) { if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) return false; @@ -717,7 +717,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr) * "efi_mm" cannot be used to check if the page fault had occurred * in the firmware context because efi=old_map doesn't use efi_pgd. */ - if (efi_rts_work.efi_rts_id == NONE) + if (efi_rts_work.efi_rts_id == EFI_NONE) return; /* @@ -742,7 +742,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr) * because this case occurs *very* rarely and hence could be improved * on a need by basis. */ - if (efi_rts_work.efi_rts_id == RESET_SYSTEM) { + if (efi_rts_work.efi_rts_id == EFI_RESET_SYSTEM) { pr_info("efi_reset_system() buggy! Reboot through BIOS\n"); machine_real_restart(MRR_BIOS); return; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c index 96f438d4b026..1421d5330b2c 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c @@ -44,7 +44,6 @@ static struct fixed_voltage_config bcm43xx_vmmc = { */ .microvolts = 2000000, /* 1.8V */ .startup_delay = 250 * 1000, /* 250ms */ - .enable_high = 1, /* active high */ .enabled_at_boot = 0, /* disabled at boot */ .init_data = &bcm43xx_vmmc_data, }; diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4a6a5a26c582..eb33432f2f24 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c @@ -29,7 +29,8 @@ struct uv_systab *uv_systab; -s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, + u64 a4, u64 a5) { struct uv_systab *tab = uv_systab; s64 ret; @@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) return ret; } + +s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +{ + s64 ret; + + if (down_interruptible(&__efi_uv_runtime_lock)) + return BIOS_STATUS_ABORT; + + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); + up(&__efi_uv_runtime_lock); + + return ret; +} EXPORT_SYMBOL_GPL(uv_bios_call); s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, @@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, unsigned long bios_flags; s64 ret; + if (down_interruptible(&__efi_uv_runtime_lock)) + return BIOS_STATUS_ABORT; + local_irq_save(bios_flags); - ret = uv_bios_call(which, a1, a2, a3, a4, a5); + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); local_irq_restore(bios_flags); + up(&__efi_uv_runtime_lock); + return ret; } diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index ab14e6f73ca4..a9e80e44178c 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -16,7 +16,6 @@ config 64BIT config X86_32 def_bool !64BIT - select HAVE_AOUT select ARCH_32BIT_OFF_T select ARCH_WANT_IPC_PARSE_VERSION select MODULES_USE_ELF_REL diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index e255683cd520..809f39ce08c0 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -25,6 +25,7 @@ generic-y += percpu.h generic-y += preempt.h generic-y += rwsem.h generic-y += sections.h +generic-y += socket.h generic-y += topology.h generic-y += trace_clock.h generic-y += vga.h diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h index dfdf9fae1f84..7f6cf4151843 100644 --- a/arch/xtensa/include/asm/asm-uaccess.h +++ b/arch/xtensa/include/asm/asm-uaccess.h @@ -32,8 +32,6 @@ #define KERNEL_DS 0 #define USER_DS 1 -#define get_ds (KERNEL_DS) - /* * get_fs reads current->thread.current_ds into a register. * On Entry: diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 4b2480304bc3..6792928ba84a 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -32,7 +32,6 @@ #define KERNEL_DS ((mm_segment_t) { 0 }) #define USER_DS ((mm_segment_t) { 1 }) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild index 960bf1e4be53..6b43e5049ff7 100644 --- a/arch/xtensa/include/uapi/asm/Kbuild +++ b/arch/xtensa/include/uapi/asm/Kbuild @@ -2,3 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generic-y += kvm_para.h +generic-y += socket.h diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h deleted file mode 100644 index 1de07a7f7680..000000000000 --- a/arch/xtensa/include/uapi/asm/socket.h +++ /dev/null @@ -1,122 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * include/asm-xtensa/socket.h - * - * Copied from i386. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _XTENSA_SOCKET_H -#define _XTENSA_SOCKET_H - -#include <asm/sockios.h> - -/* For setsockoptions(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -#define SO_REUSEPORT 15 -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ - -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ - -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 -#define SO_GET_FILTER SO_ATTACH_FILTER - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#define SO_PROTOCOL 38 -#define SO_DOMAIN 39 - -#define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS -#define SO_PEEK_OFF 42 - -/* Instruct lower device to use last 4-bytes of skb data as FCS */ -#define SO_NOFCS 43 - -#define SO_LOCK_FILTER 44 - -#define SO_SELECT_ERR_QUEUE 45 - -#define SO_BUSY_POLL 46 - -#define SO_MAX_PACING_RATE 47 - -#define SO_BPF_EXTENSIONS 48 - -#define SO_INCOMING_CPU 49 - -#define SO_ATTACH_BPF 50 -#define SO_DETACH_BPF SO_DETACH_FILTER - -#define SO_ATTACH_REUSEPORT_CBPF 51 -#define SO_ATTACH_REUSEPORT_EBPF 52 - -#define SO_CNX_ADVICE 53 - -#define SCM_TIMESTAMPING_OPT_STATS 54 - -#define SO_MEMINFO 55 - -#define SO_INCOMING_NAPI_ID 56 - -#define SO_COOKIE 57 - -#define SCM_TIMESTAMPING_PKTINFO 58 - -#define SO_PEERGROUPS 59 - -#define SO_ZEROCOPY 60 - -#define SO_TXTIME 61 -#define SCM_TXTIME SO_TXTIME - -#endif /* _XTENSA_SOCKET_H */ |