diff options
Diffstat (limited to 'arch')
57 files changed, 430 insertions, 170 deletions
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 0c2e6cd399b6..78ff56665ef1 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -455,13 +455,13 @@ choice Please adjust DEBUG_UART_PHYS and DEBUG_UART_BASE configuration options based on your needs. - config DEBUG_MVEBU_UART - bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)" + config DEBUG_MVEBU_UART0 + bool "Kernel low-level debugging messages via MVEBU UART0 (old bootloaders)" depends on ARCH_MVEBU select DEBUG_UART_8250 help Say Y here if you want kernel low-level debugging support - on MVEBU based platforms. + on MVEBU based platforms on UART0. This option should be used with the old bootloaders that left the internal registers mapped at @@ -474,13 +474,28 @@ choice when u-boot hands over to the kernel, the system silently crashes, with no serial output at all. - config DEBUG_MVEBU_UART_ALTERNATE - bool "Kernel low-level debugging messages via MVEBU UART (new bootloaders)" + config DEBUG_MVEBU_UART0_ALTERNATE + bool "Kernel low-level debugging messages via MVEBU UART0 (new bootloaders)" depends on ARCH_MVEBU select DEBUG_UART_8250 help Say Y here if you want kernel low-level debugging support - on MVEBU based platforms. + on MVEBU based platforms on UART0. + + This option should be used with the new bootloaders + that remap the internal registers at 0xf1000000. + + If the wrong DEBUG_MVEBU_UART* option is selected, + when u-boot hands over to the kernel, the system + silently crashes, with no serial output at all. + + config DEBUG_MVEBU_UART1_ALTERNATE + bool "Kernel low-level debugging messages via MVEBU UART1 (new bootloaders)" + depends on ARCH_MVEBU + select DEBUG_UART_8250 + help + Say Y here if you want kernel low-level debugging support + on MVEBU based platforms on UART1. This option should be used with the new bootloaders that remap the internal registers at 0xf1000000. @@ -1301,7 +1316,7 @@ config DEBUG_UART_PHYS default 0xc8000000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN default 0xc8000003 if ARCH_IXP4XX && CPU_BIG_ENDIAN default 0xd0000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX - default 0xd0012000 if DEBUG_MVEBU_UART + default 0xd0012000 if DEBUG_MVEBU_UART0 default 0xc81004c0 if DEBUG_MESON_UARTAO default 0xd4017000 if DEBUG_MMP_UART2 default 0xd4018000 if DEBUG_MMP_UART3 @@ -1315,7 +1330,8 @@ config DEBUG_UART_PHYS default 0xe8008000 if DEBUG_R7S72100_SCIF2 default 0xf0000be0 if ARCH_EBSA110 default 0xf040ab00 if DEBUG_BRCMSTB_UART - default 0xf1012000 if DEBUG_MVEBU_UART_ALTERNATE + default 0xf1012000 if DEBUG_MVEBU_UART0_ALTERNATE + default 0xf1012100 if DEBUG_MVEBU_UART1_ALTERNATE default 0xf1012000 if ARCH_DOVE || ARCH_MV78XX0 || \ ARCH_ORION5X default 0xf7fc9000 if DEBUG_BERLIN_UART @@ -1397,8 +1413,9 @@ config DEBUG_UART_VIRT default 0xfeb30c00 if DEBUG_KEYSTONE_UART0 default 0xfeb31000 if DEBUG_KEYSTONE_UART1 default 0xfec02000 if DEBUG_SOCFPGA_UART + default 0xfec12000 if DEBUG_MVEBU_UART0 || DEBUG_MVEBU_UART0_ALTERNATE + default 0xfec12100 if DEBUG_MVEBU_UART1_ALTERNATE default 0xfec10000 if DEBUG_SIRFATLAS7_UART0 - default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0 default 0xfec20000 if DEBUG_SIRFATLAS7_UART1 default 0xfec60000 if DEBUG_SIRFPRIMA2_UART1 diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts index 1466580be295..70b1943a86b1 100644 --- a/arch/arm/boot/dts/armada-370-db.dts +++ b/arch/arm/boot/dts/armada-370-db.dts @@ -203,27 +203,3 @@ compatible = "linux,spdif-dir"; }; }; - -&pinctrl { - /* - * These pins might be muxed as I2S by - * the bootloader, but it conflicts - * with the real I2S pins that are - * muxed using i2s_pins. We must mux - * those pins to a function other than - * I2S. - */ - pinctrl-0 = <&hog_pins1 &hog_pins2>; - pinctrl-names = "default"; - - hog_pins1: hog-pins1 { - marvell,pins = "mpp6", "mpp8", "mpp10", - "mpp12", "mpp13"; - marvell,function = "gpio"; - }; - - hog_pins2: hog-pins2 { - marvell,pins = "mpp5", "mpp7", "mpp9"; - marvell,function = "gpo"; - }; -}; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 2328fe752e9c..bc393b7e5ece 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -338,6 +338,7 @@ CONFIG_USB=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_MVEBU=y CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_EHCI_HCD_STI=y CONFIG_USB_EHCI_HCD_PLATFORM=y diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 705bb7620673..0c3f5a0dafd3 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -413,6 +413,7 @@ #define __NR_getrandom (__NR_SYSCALL_BASE+384) #define __NR_memfd_create (__NR_SYSCALL_BASE+385) #define __NR_bpf (__NR_SYSCALL_BASE+386) +#define __NR_execveat (__NR_SYSCALL_BASE+387) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index e51833f8cc38..05745eb838c5 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -396,6 +396,7 @@ CALL(sys_getrandom) /* 385 */ CALL(sys_memfd_create) CALL(sys_bpf) + CALL(sys_execveat) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c index 6e4379c67cbc..592dda3f21ff 100644 --- a/arch/arm/kernel/perf_regs.c +++ b/arch/arm/kernel/perf_regs.c @@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task) { return PERF_SAMPLE_REGS_ABI_32; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index f9c863911038..715ae19bc7c8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "model name\t: %s rev %d (%s)\n", cpu_name, cpuid & 15, elf_platform); +#if defined(CONFIG_SMP) + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), + (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); +#else + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); +#endif /* dump out the processor features */ seq_puts(m, "Features\t: "); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5e6052e18850..86ef244c5a24 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void) void __init smp_cpus_done(unsigned int max_cpus) { + int cpu; + unsigned long bogosum = 0; + + for_each_online_cpu(cpu) + bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; + + printk(KERN_INFO "SMP: Total of %d processors activated " + "(%lu.%02lu BogoMIPS).\n", + num_online_cpus(), + bogosum / (500000/HZ), + (bogosum / (5000/HZ)) % 100); + hyp_mode_check(); } diff --git a/arch/arm/mach-bcm/platsmp-brcmstb.c b/arch/arm/mach-bcm/platsmp-brcmstb.c index 31c87a284a34..e209e6fc7caf 100644 --- a/arch/arm/mach-bcm/platsmp-brcmstb.c +++ b/arch/arm/mach-bcm/platsmp-brcmstb.c @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/jiffies.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/printk.h> @@ -94,10 +95,35 @@ static u32 pwr_ctrl_rd(u32 cpu) return readl_relaxed(base); } -static void pwr_ctrl_wr(u32 cpu, u32 val) +static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask) { void __iomem *base = pwr_ctrl_get_base(cpu); - writel(val, base); + writel((readl(base) & mask) | val, base); +} + +static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask) +{ + void __iomem *base = pwr_ctrl_get_base(cpu); + writel((readl(base) & mask) & ~val, base); +} + +#define POLL_TMOUT_MS 500 +static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask) +{ + const unsigned long timeo = jiffies + msecs_to_jiffies(POLL_TMOUT_MS); + u32 tmp; + + do { + tmp = pwr_ctrl_rd(cpu) & mask; + if (!set == !tmp) + return 0; + } while (time_before(jiffies, timeo)); + + tmp = pwr_ctrl_rd(cpu) & mask; + if (!set == !tmp) + return 0; + + return -ETIMEDOUT; } static void cpu_rst_cfg_set(u32 cpu, int set) @@ -139,15 +165,22 @@ static void brcmstb_cpu_power_on(u32 cpu) * The secondary cores power was cut, so we must go through * power-on initialization. */ - u32 tmp; + pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00); + pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1); + pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1); - /* Request zone power up */ - pwr_ctrl_wr(cpu, ZONE_PWR_UP_REQ_MASK); + pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1); - /* Wait for the power up FSM to complete */ - do { - tmp = pwr_ctrl_rd(cpu); - } while (!(tmp & ZONE_PWR_ON_STATE_MASK)); + if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK)) + panic("ZONE_MEM_PWR_STATE_MASK set timeout"); + + pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1); + + if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK)) + panic("ZONE_DPG_PWR_STATE_MASK set timeout"); + + pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1); + pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1); } static int brcmstb_cpu_get_power_state(u32 cpu) @@ -174,25 +207,33 @@ static void brcmstb_cpu_die(u32 cpu) static int brcmstb_cpu_kill(u32 cpu) { - u32 tmp; + /* + * Ordinarily, the hardware forbids power-down of CPU0 (which is good + * because it is the boot CPU), but this is not true when using BPCM + * manual mode. Consequently, we must avoid turning off CPU0 here to + * ensure that TI2C master reset will work. + */ + if (cpu == 0) { + pr_warn("SMP: refusing to power off CPU0\n"); + return 1; + } while (per_cpu_sw_state_rd(cpu)) ; - /* Program zone reset */ - pwr_ctrl_wr(cpu, ZONE_RESET_STATE_MASK | ZONE_BLK_RST_ASSERT_MASK | - ZONE_PWR_DN_REQ_MASK); + pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1); + pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1); + pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1); + pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1); + pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1); - /* Verify zone reset */ - tmp = pwr_ctrl_rd(cpu); - if (!(tmp & ZONE_RESET_STATE_MASK)) - pr_err("%s: Zone reset bit for CPU %d not asserted!\n", - __func__, cpu); + if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK)) + panic("ZONE_MEM_PWR_STATE_MASK clear timeout"); - /* Wait for power down */ - do { - tmp = pwr_ctrl_rd(cpu); - } while (!(tmp & ZONE_PWR_OFF_STATE_MASK)); + pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1); + + if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK)) + panic("ZONE_DPG_PWR_STATE_MASK clear timeout"); /* Flush pipeline before resetting CPU */ mb(); diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.h b/arch/arm/mach-mvebu/mvebu-soc-id.h index c16bb68ca81f..e124a0b82a3e 100644 --- a/arch/arm/mach-mvebu/mvebu-soc-id.h +++ b/arch/arm/mach-mvebu/mvebu-soc-id.h @@ -20,10 +20,28 @@ #define MV78XX0_A0_REV 0x1 #define MV78XX0_B0_REV 0x2 +/* Amada 370 ID */ +#define ARMADA_370_DEV_ID 0x6710 + +/* Amada 370 Revision */ +#define ARMADA_370_A1_REV 0x1 + +/* Armada 375 ID */ +#define ARMADA_375_DEV_ID 0x6720 + /* Armada 375 */ #define ARMADA_375_Z1_REV 0x0 #define ARMADA_375_A0_REV 0x3 +/* Armada 38x ID */ +#define ARMADA_380_DEV_ID 0x6810 +#define ARMADA_385_DEV_ID 0x6820 +#define ARMADA_388_DEV_ID 0x6828 + +/* Armada 38x Revision */ +#define ARMADA_38x_Z1_REV 0x0 +#define ARMADA_38x_A0_REV 0x4 + #ifdef CONFIG_ARCH_MVEBU int mvebu_get_soc_id(u32 *dev, u32 *rev); #else diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 1b4fafe524ff..d107b9386bda 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -51,6 +51,11 @@ config ARCH_R7S72100 bool "RZ/A1H (R7S72100)" select SYS_SUPPORTS_SH_MTU2 +config ARCH_R8A73A4 + bool "R-Mobile APE6 (R8A73A40)" + select ARCH_RMOBILE + select RENESAS_IRQC + config ARCH_R8A7740 bool "R-Mobile A1 (R8A77400)" select ARCH_RMOBILE diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c index 6b4c1f313cc9..3855fb024fdb 100644 --- a/arch/arm/mach-shmobile/clock-sh73a0.c +++ b/arch/arm/mach-shmobile/clock-sh73a0.c @@ -553,6 +553,7 @@ enum { MSTP001, MSTP314, MSTP313, MSTP312, MSTP311, MSTP304, MSTP303, MSTP302, MSTP301, MSTP300, MSTP411, MSTP410, MSTP403, + MSTP508, MSTP_NR }; #define MSTP(_parent, _reg, _bit, _flags) \ @@ -597,6 +598,7 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */ [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */ [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ + [MSTP508] = MSTP(&div4_clks[DIV4_HP], SMSTPCR5, 8, 0), /* INTCA0 */ }; /* The lookups structure below includes duplicate entries for some clocks @@ -677,6 +679,14 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */ CLKDEV_DEV_ID("e6828000.i2c", &mstp_clks[MSTP410]), /* I2C4 */ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ + CLKDEV_DEV_ID("renesas_intc_irqpin.0", &mstp_clks[MSTP508]), /* INTCA0 */ + CLKDEV_DEV_ID("e6900000.irqpin", &mstp_clks[MSTP508]), /* INTCA0 */ + CLKDEV_DEV_ID("renesas_intc_irqpin.1", &mstp_clks[MSTP508]), /* INTCA0 */ + CLKDEV_DEV_ID("e6900004.irqpin", &mstp_clks[MSTP508]), /* INTCA0 */ + CLKDEV_DEV_ID("renesas_intc_irqpin.2", &mstp_clks[MSTP508]), /* INTCA0 */ + CLKDEV_DEV_ID("e6900008.irqpin", &mstp_clks[MSTP508]), /* INTCA0 */ + CLKDEV_DEV_ID("renesas_intc_irqpin.3", &mstp_clks[MSTP508]), /* INTCA0 */ + CLKDEV_DEV_ID("e690000c.irqpin", &mstp_clks[MSTP508]), /* INTCA0 */ /* ICK */ CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c index 3dd6edd9bd1d..c35b91d92190 100644 --- a/arch/arm/mach-shmobile/setup-rcar-gen2.c +++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c @@ -52,15 +52,13 @@ void __init rcar_gen2_timer_init(void) { #if defined(CONFIG_ARM_ARCH_TIMER) || defined(CONFIG_COMMON_CLK) u32 mode = rcar_gen2_read_mode_pins(); - bool is_e2 = (bool)of_find_compatible_node(NULL, NULL, - "renesas,r8a7794"); #endif #ifdef CONFIG_ARM_ARCH_TIMER void __iomem *base; int extal_mhz = 0; u32 freq; - if (is_e2) { + if (of_machine_is_compatible("renesas,r8a7794")) { freq = 260000000 / 8; /* ZS / 8 */ /* CNTVOFF has to be initialized either from non-secure * Hypervisor mode or secure Monitor mode with SCR.NS==1. diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c index 3f761f839043..9fc280e24ef4 100644 --- a/arch/arm/mach-shmobile/smp-r8a7779.c +++ b/arch/arm/mach-shmobile/smp-r8a7779.c @@ -56,7 +56,7 @@ static struct rcar_sysc_ch *r8a7779_ch_cpu[4] = { [3] = &r8a7779_ch_cpu3, }; -#ifdef CONFIG_HAVE_ARM_TWD +#if defined(CONFIG_HAVE_ARM_TWD) && !defined(CONFIG_ARCH_MULTIPLATFORM) static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, R8A7779_SCU_BASE + 0x600, 29); void __init r8a7779_register_twd(void) { diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 59424937e52b..9fe8e241335c 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u static const char units[] = "KMGTPE"; u64 prot = val & pg_level[level].mask; - if (addr < USER_PGTABLES_CEILING) - return; - if (!st->level) { st->level = level; st->current_prot = prot; @@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m) pgd_t *pgd = swapper_pg_dir; struct pg_state st; unsigned long addr; - unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE; + unsigned i; memset(&st, 0, sizeof(st)); st.seq = m; st.marker = address_markers; - pgd += pgdoff; - - for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) { + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { addr = i * PGDIR_SIZE; if (!pgd_none(*pgd)) { walk_pud(&st, pgd, addr); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 98ad9c79ea0e..2495c8cb47ba 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = { .start = (unsigned long)_stext, .end = (unsigned long)__init_begin, #ifdef CONFIG_ARM_LPAE - .mask = ~PMD_SECT_RDONLY, - .prot = PMD_SECT_RDONLY, + .mask = ~L_PMD_SECT_RDONLY, + .prot = L_PMD_SECT_RDONLY, #else .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index cda7c40999b6..4e6ef896c619 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1329,8 +1329,8 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { struct memblock_region *reg; - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); + phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index b1fa4e614718..fbe0ca31a99c 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -21,6 +21,7 @@ #include <asm/barrier.h> +#include <linux/bug.h> #include <linux/init.h> #include <linux/types.h> diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index ace70682499b..8e797b2fcc01 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -39,6 +39,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; + u32 reg_id_dfr0; u32 reg_id_isar0; u32 reg_id_isar1; u32 reg_id_isar2; @@ -51,6 +52,10 @@ struct cpuinfo_arm64 { u32 reg_id_mmfr3; u32 reg_id_pfr0; u32 reg_id_pfr1; + + u32 reg_mvfr0; + u32 reg_mvfr1; + u32 reg_mvfr2; }; DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 286b1bec547c..f9be30ea1cbd 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -31,6 +31,7 @@ #include <asm/fpsimd.h> #include <asm/hw_breakpoint.h> +#include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include <asm/types.h> @@ -123,9 +124,6 @@ struct task_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) - unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 49c9aefd24a5..b780c6c76eec 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 386 +#define __NR_compat_syscalls 387 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 57b641747534..07d435cf2eea 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) * If we have AArch32, we care about 32-bit features for compat. These * registers should be RES0 otherwise. */ + diff |= CHECK(id_dfr0, boot, cur, cpu); diff |= CHECK(id_isar0, boot, cur, cpu); diff |= CHECK(id_isar1, boot, cur, cpu); diff |= CHECK(id_isar2, boot, cur, cpu); @@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) diff |= CHECK(id_pfr0, boot, cur, cpu); diff |= CHECK(id_pfr1, boot, cur, cpu); + diff |= CHECK(mvfr0, boot, cur, cpu); + diff |= CHECK(mvfr1, boot, cur, cpu); + diff |= CHECK(mvfr2, boot, cur, cpu); + /* * Mismatched CPU features are a recipe for disaster. Don't even * pretend to support them. @@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); @@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); + cpuinfo_detect_icache_policy(info); check_local_cpu_errata(); diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 6fac253bc783..2bb4347d0edf 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -326,6 +326,7 @@ void __init efi_idmap_init(void) /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ efi_setup_idmap(); + early_memunmap(memmap.map, memmap.map_end - memmap.map); } static int __init remap_region(efi_memory_desc_t *md, void **new) @@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void) } mapsize = memmap.map_end - memmap.map; - early_memunmap(memmap.map, mapsize); if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index fd027b101de5..9b6f71db2709 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,6 +25,7 @@ #include <linux/mm.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> +#include <asm/alternative.h> #include <asm/insn.h> #include <asm/sections.h> diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 6762ad705587..3f62b35fb6f1 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task) else return PERF_SAMPLE_REGS_ABI_64; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b80991166754..20fe2932ad0c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p) request_standard_resources(); efi_idmap_init(); + early_ioremap_reset(); unflatten_device_tree(); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 4f93c67e63de..14944e5b28da 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -25,6 +25,7 @@ #include <asm/cacheflush.h> #include <asm/cpu_ops.h> #include <asm/cputype.h> +#include <asm/io.h> #include <asm/smp_plat.h> extern void secondary_holding_pen(void); diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 6f4bac969bf7..23eada79439c 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -7,6 +7,7 @@ */ #include <linux/device.h> +#include <linux/delay.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index f3b51b57740a..95c39b95e97e 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 318 /* length of syscall table */ +#define NR_syscalls 319 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 4c2240c1b0cb..461079560c78 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -331,5 +331,6 @@ #define __NR_getrandom 1339 #define __NR_memfd_create 1340 #define __NR_bpf 1341 +#define __NR_execveat 1342 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 615ef81def49..e795cb848154 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) { return _acpi_map_lsapic(handle, physid, pcpu); } -EXPORT_SYMBOL(acpi_map_lsapic); +EXPORT_SYMBOL(acpi_map_cpu); -int acpi_unmap_lsapic(int cpu) +int acpi_unmap_cpu(int cpu) { ia64_cpu_to_sapicid[cpu] = -1; set_cpu_present(cpu, false); @@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu) return (0); } - -EXPORT_SYMBOL(acpi_unmap_lsapic); +EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ #ifdef CONFIG_ACPI_NUMA diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index f5e96dffc63c..fcf8b8cbca0b 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1779,6 +1779,7 @@ sys_call_table: data8 sys_getrandom data8 sys_memfd_create // 1340 data8 sys_bpf + data8 sys_execveat .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c index 51d5bb90d3e5..a223691dff4f 100644 --- a/arch/nios2/kernel/cpuinfo.c +++ b/arch/nios2/kernel/cpuinfo.c @@ -72,6 +72,7 @@ void __init setup_cpuinfo(void) cpuinfo.has_div = fcpu_has(cpu, "altr,has-div"); cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul"); cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx"); + cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu"); if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div) err_cpu("DIV"); diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index 83bca17d1008..0bdfd13ff98b 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt) GET_THREAD_INFO r1 ldw r4, TI_PREEMPT_COUNT(r1) bne r4, r0, restore_all - -need_resched: ldw r4, TI_FLAGS(r1) /* ? Need resched set */ BTBZ r10, r4, TIF_NEED_RESCHED, restore_all ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */ andi r10, r4, ESTATUS_EPIE beq r10, r0, restore_all - movia r4, PREEMPT_ACTIVE - stw r4, TI_PREEMPT_COUNT(r1) - rdctl r10, status /* enable intrs again */ - ori r10, r10 ,STATUS_PIE - wrctl status, r10 - PUSH r1 - call schedule - POP r1 - mov r4, r0 - stw r4, TI_PREEMPT_COUNT(r1) - rdctl r10, status /* disable intrs */ - andi r10, r10, %lo(~STATUS_PIE) - wrctl status, r10 - br need_resched -#else - br restore_all + call preempt_schedule_irq #endif + br restore_all /*********************************************************************** * A few syscall wrappers diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 19c36cba37c4..a46f5f45570c 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size); extern void reserve_crashkernel(void); extern void machine_kexec_mask_interrupts(void); +static inline bool kdump_in_progress(void) +{ + return crashing_cpu >= 0; +} + #else /* !CONFIG_KEXEC */ static inline void crash_kexec_secondary(struct pt_regs *regs) { } @@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler) return 0; } +static inline bool kdump_in_progress(void) +{ + return false; +} + #endif /* CONFIG_KEXEC */ #endif /* ! __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index ce9577d693be..91062eef582f 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp) SYSCALL_SPU(getrandom) SYSCALL_SPU(memfd_create) SYSCALL_SPU(bpf) +COMPAT_SYS(execveat) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index e0da021caa00..36b79c31eedd 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 362 +#define __NR_syscalls 363 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index f55351f2e66e..ef5b5b1f3123 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -384,5 +384,6 @@ #define __NR_getrandom 359 #define __NR_memfd_create 360 #define __NR_bpf 361 +#define __NR_execveat 362 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 879b3aacac32..f96d1ec24189 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image) * using debugger IPI. */ - if (crashing_cpu == -1) + if (!kdump_in_progress()) kexec_prepare_cpus(); pr_debug("kexec: Starting switchover sequence.\n"); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8ec017cb4446..8b2d2dc8ef10 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -700,6 +700,7 @@ void start_secondary(void *unused) smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); + cpu_callin_map[cpu] = 1; if (smp_ops->setup_cpu) smp_ops->setup_cpu(cpu); @@ -738,14 +739,6 @@ void start_secondary(void *unused) notify_cpu_starting(cpu); set_cpu_online(cpu, true); - /* - * CPU must be marked active and online before we signal back to the - * master, because the scheduler needs to see the cpu_online and - * cpu_active bits set. - */ - smp_wmb(); - cpu_callin_map[cpu] = 1; - local_irq_enable(); cpu_startup_entry(CPUHP_ONLINE); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 469751d92004..b5682fd6c984 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -43,6 +43,7 @@ #include <asm/trace.h> #include <asm/firmware.h> #include <asm/plpar_wrappers.h> +#include <asm/kexec.h> #include <asm/fadump.h> #include "pseries.h" @@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void) * out to the user, but at least this will stop us from * continuing on further and creating an even more * difficult to debug situation. + * + * There is a known problem when kdump'ing, if cpus are offline + * the above call will fail. Rather than panicking again, keep + * going and hope the kdump kernel is also little endian, which + * it usually is. */ - if (rc) + if (rc && !kdump_in_progress()) panic("Could not enable big endian exceptions"); } #endif diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index 87bc86821bc9..d195a87ca542 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -3,6 +3,7 @@ config UML default y select HAVE_ARCH_AUDITSYSCALL select HAVE_UID16 + select HAVE_FUTEX_CMPXCHG if FUTEX select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_IO diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 5b016e2498f3..3db07f30636f 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -51,6 +51,7 @@ targets += cpustr.h $(obj)/cpustr.h: $(obj)/mkcpustr FORCE $(call if_changed,cpustr) endif +clean-files += cpustr.h # --------------------------------------------------------------------------- diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index fd0f848938cc..5a4a089e8b1f 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o -obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o @@ -46,6 +45,7 @@ endif ifeq ($(avx2_supported),yes) obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o + obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ endif aes-i586-y := aes-i586-asm_32.o aes_glue.o diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 2df2a0298f5a..a916c4a61165 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -208,7 +208,7 @@ ddq_add_8: .if (klen == KEY_128) .if (load_keys) - vmovdqa 3*16(p_keys), xkeyA + vmovdqa 3*16(p_keys), xkey4 .endif .else vmovdqa 3*16(p_keys), xkeyA @@ -224,7 +224,7 @@ ddq_add_8: add $(16*by), p_in .if (klen == KEY_128) - vmovdqa 4*16(p_keys), xkey4 + vmovdqa 4*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 4*16(p_keys), xkey4 @@ -234,7 +234,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ + /* key 3 */ + .if (klen == KEY_128) + vaesenc xkey4, var_xdata, var_xdata + .else + vaesenc xkeyA, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -243,13 +248,18 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkey4, var_xdata, var_xdata /* key 4 */ + /* key 4 */ + .if (klen == KEY_128) + vaesenc xkeyB, var_xdata, var_xdata + .else + vaesenc xkey4, var_xdata, var_xdata + .endif .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) - vmovdqa 6*16(p_keys), xkeyB + vmovdqa 6*16(p_keys), xkey8 .endif .else vmovdqa 6*16(p_keys), xkeyB @@ -267,12 +277,17 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ + /* key 6 */ + .if (klen == KEY_128) + vaesenc xkey8, var_xdata, var_xdata + .else + vaesenc xkeyB, var_xdata, var_xdata + .endif .set i, (i +1) .endr .if (klen == KEY_128) - vmovdqa 8*16(p_keys), xkey8 + vmovdqa 8*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 8*16(p_keys), xkey8 @@ -288,7 +303,7 @@ ddq_add_8: .if (klen == KEY_128) .if (load_keys) - vmovdqa 9*16(p_keys), xkeyA + vmovdqa 9*16(p_keys), xkey12 .endif .else vmovdqa 9*16(p_keys), xkeyA @@ -297,7 +312,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkey8, var_xdata, var_xdata /* key 8 */ + /* key 8 */ + .if (klen == KEY_128) + vaesenc xkeyB, var_xdata, var_xdata + .else + vaesenc xkey8, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -306,7 +326,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ + /* key 9 */ + .if (klen == KEY_128) + vaesenc xkey12, var_xdata, var_xdata + .else + vaesenc xkeyA, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -412,7 +437,6 @@ ddq_add_8: /* main body of aes ctr load */ .macro do_aes_ctrmain key_len - cmp $16, num_bytes jb .Ldo_return2\key_len diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index e7e9682a33e9..f556c4843aa1 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void) /* * Load per CPU data from GDT. LSL is faster than RDTSCP and - * works on all CPUs. + * works on all CPUs. This is volatile so that it orders + * correctly wrt barrier() and to keep gcc from cleverly + * hoisting it out of the calling function. */ - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); + asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); return p; } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4433a4be8171..d1626364a28a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) { return _acpi_map_lsapic(handle, physid, pcpu); } -EXPORT_SYMBOL(acpi_map_lsapic); +EXPORT_SYMBOL(acpi_map_cpu); -int acpi_unmap_lsapic(int cpu) +int acpi_unmap_cpu(int cpu) { #ifdef CONFIG_ACPI_NUMA set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); @@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu) return (0); } - -EXPORT_SYMBOL(acpi_unmap_lsapic); +EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index e27b49d7c922..80091ae54c2b 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -66,3 +66,4 @@ targets += capflags.c $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE $(call if_changed,mkcapflags) endif +clean-files += capflags.c diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh index e2b22df964cd..36d99a337b49 100644 --- a/arch/x86/kernel/cpu/mkcapflags.sh +++ b/arch/x86/kernel/cpu/mkcapflags.sh @@ -28,7 +28,7 @@ function dump_array() # If the /* comment */ starts with a quote string, grab that. VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" [ -z "$VALUE" ] && VALUE="\"$NAME\"" - [ "$VALUE" == '""' ] && continue + [ "$VALUE" = '""' ] && continue # Name is uppercase, VALUE is all lowercase VALUE="$(echo "$VALUE" | tr A-Z a-z)" diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 18eb78bbdd10..863d9b02563e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -17,7 +17,7 @@ #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) #define UNCORE_EXTRA_PCI_DEV 0xff -#define UNCORE_EXTRA_PCI_DEV_MAX 2 +#define UNCORE_EXTRA_PCI_DEV_MAX 3 /* support up to 8 sockets */ #define UNCORE_SOCKET_MAX 8 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index 745b158e9a65..21af6149edf2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void) enum { SNBEP_PCI_QPI_PORT0_FILTER, SNBEP_PCI_QPI_PORT1_FILTER, + HSWEP_PCI_PCU_3, }; static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) @@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void) { if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; + + /* Detect 6-8 core systems with only two SBOXes */ + if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) { + u32 capid4; + + pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3], + 0x94, &capid4); + if (((capid4 >> 6) & 0x3) == 0) + hswep_uncore_sbox.num_boxes = 2; + } + uncore_msr_uncores = hswep_msr_uncores; } @@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = { .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, SNBEP_PCI_QPI_PORT1_FILTER), }, + { /* PCU.3 (for Capability registers) */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + HSWEP_PCI_PCU_3), + }, { /* end: all zeroes */ } }; diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index e309cc5c276e..781861cc5ee8 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task) { return PERF_SAMPLE_REGS_ABI_32; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} #else /* CONFIG_X86_64 */ #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ (1ULL << PERF_REG_X86_ES) | \ @@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task) else return PERF_SAMPLE_REGS_ABI_64; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + struct pt_regs *user_regs = task_pt_regs(current); + + /* + * If we're in an NMI that interrupted task_pt_regs setup, then + * we can't sample user regs at all. This check isn't really + * sufficient, though, as we could be in an NMI inside an interrupt + * that happened during task_pt_regs setup. + */ + if (regs->sp > (unsigned long)&user_regs->r11 && + regs->sp <= (unsigned long)(user_regs + 1)) { + regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; + regs_user->regs = NULL; + return; + } + + /* + * RIP, flags, and the argument registers are usually saved. + * orig_ax is probably okay, too. + */ + regs_user_copy->ip = user_regs->ip; + regs_user_copy->cx = user_regs->cx; + regs_user_copy->dx = user_regs->dx; + regs_user_copy->si = user_regs->si; + regs_user_copy->di = user_regs->di; + regs_user_copy->r8 = user_regs->r8; + regs_user_copy->r9 = user_regs->r9; + regs_user_copy->r10 = user_regs->r10; + regs_user_copy->r11 = user_regs->r11; + regs_user_copy->orig_ax = user_regs->orig_ax; + regs_user_copy->flags = user_regs->flags; + + /* + * Don't even try to report the "rest" regs. + */ + regs_user_copy->bx = -1; + regs_user_copy->bp = -1; + regs_user_copy->r12 = -1; + regs_user_copy->r13 = -1; + regs_user_copy->r14 = -1; + regs_user_copy->r15 = -1; + + /* + * For this to be at all useful, we need a reasonable guess for + * sp and the ABI. Be careful: we're in NMI context, and we're + * considering current to be the current task, so we should + * be careful not to look at any other percpu variables that might + * change during context switches. + */ + if (IS_ENABLED(CONFIG_IA32_EMULATION) && + task_thread_info(current)->status & TS_COMPAT) { + /* Easy case: we're in a compat syscall. */ + regs_user->abi = PERF_SAMPLE_REGS_ABI_32; + regs_user_copy->sp = user_regs->sp; + regs_user_copy->cs = user_regs->cs; + regs_user_copy->ss = user_regs->ss; + } else if (user_regs->orig_ax != -1) { + /* + * We're probably in a 64-bit syscall. + * Warning: this code is severely racy. At least it's better + * than just blindly copying user_regs. + */ + regs_user->abi = PERF_SAMPLE_REGS_ABI_64; + regs_user_copy->sp = this_cpu_read(old_rsp); + regs_user_copy->cs = __USER_CS; + regs_user_copy->ss = __USER_DS; + regs_user_copy->cx = -1; /* usually contains garbage */ + } else { + /* We're probably in an interrupt or exception. */ + regs_user->abi = user_64bit_mode(user_regs) ? + PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32; + regs_user_copy->sp = user_regs->sp; + regs_user_copy->cs = user_regs->cs; + regs_user_copy->ss = user_regs->ss; + } + + regs_user->regs = regs_user_copy; +} #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 2480978b31cc..1313ae6b478b 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -28,7 +28,7 @@ /* Verify next sizeof(t) bytes can be on the same instruction */ #define validate_next(t, insn, n) \ - ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr) + ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) #define __get_next(t, insn) \ ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index a97ee0801475..08a7d313538a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping( static unsigned long __init get_new_step_size(unsigned long step_size) { /* - * Explain why we shift by 5 and why we don't have to worry about - * 'step_size << 5' overflowing: - * - * initial mapped size is PMD_SIZE (2M). + * Initial mapped size is PMD_SIZE (2M). * We can not set step_size to be PUD_SIZE (1G) yet. * In worse case, when we cross the 1G boundary, and * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) - * to map 1G range with PTE. Use 5 as shift for now. + * to map 1G range with PTE. Hence we use one less than the + * difference of page table level shifts. * - * Don't need to worry about overflow, on 32bit, when step_size - * is 0, round_down() returns 0 for start, and that turns it - * into 0x100000000ULL. + * Don't need to worry about overflow in the top-down case, on 32bit, + * when step_size is 0, round_down() returns 0 for start, and that + * turns it into 0x100000000ULL. + * In the bottom-up case, round_up(x, 0) returns 0 though too, which + * needs to be taken into consideration by the code below. */ - return step_size << 5; + return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); } /** @@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start, unsigned long step_size; unsigned long addr; unsigned long mapped_ram_size = 0; - unsigned long new_mapped_ram_size; /* xen has big range in reserved near end of ram, skip it at first.*/ addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); @@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start, start = map_start; } else start = map_start; - new_mapped_ram_size = init_range_memory_mapping(start, + mapped_ram_size += init_range_memory_mapping(start, last_start); last_start = start; min_pfn_mapped = last_start >> PAGE_SHIFT; - /* only increase step_size after big range get mapped */ - if (new_mapped_ram_size > mapped_ram_size) + if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); - mapped_ram_size += new_mapped_ram_size; } if (real_end < map_end) @@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start, static void __init memory_map_bottom_up(unsigned long map_start, unsigned long map_end) { - unsigned long next, new_mapped_ram_size, start; + unsigned long next, start; unsigned long mapped_ram_size = 0; /* step_size need to be small so pgt_buf from BRK could cover it */ unsigned long step_size = PMD_SIZE; @@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start, * for page table. */ while (start < map_end) { - if (map_end - start > step_size) { + if (step_size && map_end - start > step_size) { next = round_up(start + 1, step_size); if (next > map_end) next = map_end; - } else + } else { next = map_end; + } - new_mapped_ram_size = init_range_memory_mapping(start, next); + mapped_ram_size += init_range_memory_mapping(start, next); start = next; - if (new_mapped_ram_size > mapped_ram_size) + if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); - mapped_ram_size += new_mapped_ram_size; } } diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c index 531d4269e2e3..bd16d6c370ec 100644 --- a/arch/x86/um/sys_call_table_32.c +++ b/arch/x86/um/sys_call_table_32.c @@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void); extern asmlinkage void sys_ni_syscall(void); -const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { +const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { /* * Smells like a compiler bug -- it doesn't work * when the & below is removed. diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c index 20c3649d0691..5cdfa9db2217 100644 --- a/arch/x86/um/sys_call_table_64.c +++ b/arch/x86/um/sys_call_table_64.c @@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void); extern void sys_ni_syscall(void); -const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { +const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { /* * Smells like a compiler bug -- it doesn't work * when the & below is removed. diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 009495b9ab4b..1c9f750c3859 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image) struct linux_binprm; -/* Put the vdso above the (randomized) stack with another randomized offset. - This way there is no hole in the middle of address space. - To save memory make sure it is still in the same PTE as the stack top. - This doesn't give that many random bits. - - Only used for the 64-bit and x32 vdsos. */ +/* + * Put the vdso above the (randomized) stack with another randomized + * offset. This way there is no hole in the middle of address space. + * To save memory make sure it is still in the same PTE as the stack + * top. This doesn't give that many random bits. + * + * Note that this algorithm is imperfect: the distribution of the vdso + * start address within a PMD is biased toward the end. + * + * Only used for the 64-bit and x32 vdsos. + */ static unsigned long vdso_addr(unsigned long start, unsigned len) { #ifdef CONFIG_X86_32 @@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) #else unsigned long addr, end; unsigned offset; - end = (start + PMD_SIZE - 1) & PMD_MASK; + + /* + * Round up the start address. It can start out unaligned as a result + * of stack start randomization. + */ + start = PAGE_ALIGN(start); + + /* Round the lowest possible end address up to a PMD boundary. */ + end = (start + len + PMD_SIZE - 1) & PMD_MASK; if (end >= TASK_SIZE_MAX) end = TASK_SIZE_MAX; end -= len; - /* This loses some more bits than a modulo, but is cheaper */ - offset = get_random_int() & (PTRS_PER_PTE - 1); - addr = start + (offset << PAGE_SHIFT); - if (addr >= end) - addr = end; + + if (end > start) { + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); + addr = start + (offset << PAGE_SHIFT); + } else { + addr = start; + } /* - * page-align it here so that get_unmapped_area doesn't - * align it wrongfully again to the next page. addr can come in 4K - * unaligned here as a result of stack start randomization. + * Forcibly align the final address in case we have a hardware + * issue that requires alignment for performance reasons. */ - addr = PAGE_ALIGN(addr); addr = align_vdso_addr(addr); return addr; |