diff options
Diffstat (limited to 'arch')
62 files changed, 407 insertions, 159 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 413fd94b5301..68be9017593d 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -397,8 +397,7 @@ dtb_check_done: add sp, sp, r6 #endif - tst r4, #1 - bleq cache_clean_flush + bl cache_clean_flush adr r0, BSYM(restart) add r0, r0, r6 @@ -1047,6 +1046,8 @@ cache_clean_flush: b call_cache_fn __armv4_mpu_cache_flush: + tst r4, #1 + movne pc, lr mov r2, #1 mov r3, #0 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache @@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush: mov pc, lr __fa526_cache_flush: + tst r4, #1 + movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache mcr p15, 0, r1, c7, c5, 0 @ flush I cache @@ -1072,13 +1075,16 @@ __fa526_cache_flush: __armv6_mmu_cache_flush: mov r1, #0 - mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D + tst r4, #1 + mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB - mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified + mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv7_mmu_cache_flush: + tst r4, #1 + bne iflush mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) mov r10, #0 @@ -1139,6 +1145,8 @@ iflush: mov pc, lr __armv5tej_mmu_cache_flush: + tst r4, #1 + movne pc, lr 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache bne 1b mcr p15, 0, r0, c7, c5, 0 @ flush I cache @@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush: mov pc, lr __armv4_mmu_cache_flush: + tst r4, #1 + movne pc, lr mov r2, #64*1024 @ default: 32K dcache size (*2) mov r11, #32 @ default: 32 byte line size mrc p15, 0, r3, c0, c0, 1 @ read cache type @@ -1179,6 +1189,8 @@ no_cache_id: __armv3_mmu_cache_flush: __armv3_mpu_cache_flush: + tst r4, #1 + movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index e2156a583de7..c4b968f0feb5 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -489,7 +489,7 @@ reg = <0x00060000 0x00020000>; }; partition@4 { - label = "NAND.u-boot-spl"; + label = "NAND.u-boot-spl-os"; reg = <0x00080000 0x00040000>; }; partition@5 { diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index e7ac47fa6615..a521ac0a7d5a 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -291,8 +291,8 @@ dcdc3: regulator-dcdc3 { compatible = "ti,tps65218-dcdc3"; regulator-name = "vdcdc3"; - regulator-min-microvolt = <1350000>; - regulator-max-microvolt = <1350000>; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 859ff3d620ee..87aa4f3b8b3d 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts @@ -363,8 +363,8 @@ dcdc3: regulator-dcdc3 { compatible = "ti,tps65218-dcdc3"; regulator-name = "vdds_ddr"; - regulator-min-microvolt = <1350000>; - regulator-max-microvolt = <1350000>; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index ac3e4859935f..f7e9bba10bd6 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -358,8 +358,8 @@ dcdc3: regulator-dcdc3 { compatible = "ti,tps65218-dcdc3"; regulator-name = "vdcdc3"; - regulator-min-microvolt = <1350000>; - regulator-max-microvolt = <1350000>; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm/boot/dts/sama5d31.dtsi b/arch/arm/boot/dts/sama5d31.dtsi index 7997dc9863ed..883878b32971 100644 --- a/arch/arm/boot/dts/sama5d31.dtsi +++ b/arch/arm/boot/dts/sama5d31.dtsi @@ -12,5 +12,5 @@ #include "sama5d3_uart.dtsi" / { - compatible = "atmel,samad31", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d33.dtsi b/arch/arm/boot/dts/sama5d33.dtsi index 39f832253caf..4b4434aca351 100644 --- a/arch/arm/boot/dts/sama5d33.dtsi +++ b/arch/arm/boot/dts/sama5d33.dtsi @@ -10,5 +10,5 @@ #include "sama5d3_gmac.dtsi" / { - compatible = "atmel,samad33", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d33", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d34.dtsi b/arch/arm/boot/dts/sama5d34.dtsi index 89cda2c0da39..aa01573fdee9 100644 --- a/arch/arm/boot/dts/sama5d34.dtsi +++ b/arch/arm/boot/dts/sama5d34.dtsi @@ -12,5 +12,5 @@ #include "sama5d3_mci2.dtsi" / { - compatible = "atmel,samad34", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d34", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d35.dtsi b/arch/arm/boot/dts/sama5d35.dtsi index d20cd71b5f0e..16c39f4c96a4 100644 --- a/arch/arm/boot/dts/sama5d35.dtsi +++ b/arch/arm/boot/dts/sama5d35.dtsi @@ -14,5 +14,5 @@ #include "sama5d3_tcb1.dtsi" / { - compatible = "atmel,samad35", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d35", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi index db58cad6acd3..e85139ef40af 100644 --- a/arch/arm/boot/dts/sama5d36.dtsi +++ b/arch/arm/boot/dts/sama5d36.dtsi @@ -16,5 +16,5 @@ #include "sama5d3_uart.dtsi" / { - compatible = "atmel,samad36", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d36", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi index 962dc28dc37b..cfcd200b0c17 100644 --- a/arch/arm/boot/dts/sama5d3xcm.dtsi +++ b/arch/arm/boot/dts/sama5d3xcm.dtsi @@ -8,7 +8,7 @@ */ / { - compatible = "atmel,samad3xcm", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5"; chosen { bootargs = "console=ttyS0,115200 rootfstype=ubifs ubi.mtd=5 root=ubi0:rootfs"; diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c index 6478626e3ff6..d0d39f150fab 100644 --- a/arch/arm/mach-mvebu/board-v7.c +++ b/arch/arm/mach-mvebu/board-v7.c @@ -188,7 +188,7 @@ static void __init thermal_quirk(void) static void __init mvebu_dt_init(void) { - if (of_machine_is_compatible("plathome,openblocks-ax3-4")) + if (of_machine_is_compatible("marvell,armadaxp")) i2c_quirk(); if (of_machine_is_compatible("marvell,a375-db")) { external_abort_quirk(); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ae69809a9e47..7eb94e6fc376 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS config KUSER_HELPERS bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + depends on MMU default y help Warning: disabling this option may break user programs. diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index b61a3bcc2fa8..e048f6198d68 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -497,6 +497,34 @@ static void orion_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) #define orion_gpio_dbg_show NULL #endif +static void orion_gpio_unmask_irq(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + u32 reg_val; + u32 mask = d->mask; + + irq_gc_lock(gc); + reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask); + reg_val |= mask; + irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask); + irq_gc_unlock(gc); +} + +static void orion_gpio_mask_irq(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + u32 mask = d->mask; + u32 reg_val; + + irq_gc_lock(gc); + reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask); + reg_val &= ~mask; + irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask); + irq_gc_unlock(gc); +} + void __init orion_gpio_init(struct device_node *np, int gpio_base, int ngpio, void __iomem *base, int mask_offset, @@ -565,8 +593,8 @@ void __init orion_gpio_init(struct device_node *np, ct = gc->chip_types; ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF; ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; - ct->chip.irq_mask = irq_gc_mask_clr_bit; - ct->chip.irq_unmask = irq_gc_mask_set_bit; + ct->chip.irq_mask = orion_gpio_mask_irq; + ct->chip.irq_unmask = orion_gpio_unmask_irq; ct->chip.irq_set_type = gpio_irq_set_type; ct->chip.name = ochip->chip.label; @@ -575,8 +603,8 @@ void __init orion_gpio_init(struct device_node *np, ct->regs.ack = GPIO_EDGE_CAUSE_OFF; ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; ct->chip.irq_ack = irq_gc_ack_clr_bit; - ct->chip.irq_mask = irq_gc_mask_clr_bit; - ct->chip.irq_unmask = irq_gc_mask_set_bit; + ct->chip.irq_mask = orion_gpio_mask_irq; + ct->chip.irq_unmask = orion_gpio_unmask_irq; ct->chip.irq_set_type = gpio_irq_set_type; ct->handler = handle_edge_irq; ct->chip.name = ochip->chip.label; diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ccc7087d3c4e..a62cd077457b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x) * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ -#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET +#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 619b1dd7bcde..d18a44940968 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -54,18 +54,17 @@ ENTRY(efi_stub_entry) b.eq efi_load_fail /* - * efi_entry() will have relocated the kernel image if necessary - * and we return here with device tree address in x0 and the kernel - * entry point stored at *image_addr. Save those values in registers - * which are callee preserved. + * efi_entry() will have copied the kernel image if necessary and we + * return here with device tree address in x0 and the kernel entry + * point stored at *image_addr. Save those values in registers which + * are callee preserved. */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address mov x21, x0 /* - * Flush dcache covering current runtime addresses - * of kernel text/data. Then flush all of icache. + * Calculate size of the kernel Image (same for original and copy). */ adrp x1, _text add x1, x1, #:lo12:_text @@ -73,9 +72,24 @@ ENTRY(efi_stub_entry) add x2, x2, #:lo12:_edata sub x1, x2, x1 + /* + * Flush the copied Image to the PoC, and ensure it is not shadowed by + * stale icache entries from before relocation. + */ bl __flush_dcache_area ic ialluis + /* + * Ensure that the rest of this function (in the original Image) is + * visible when the caches are disabled. The I-cache can't have stale + * entries for the VA range of the current image, so no maintenance is + * necessary. + */ + adr x0, efi_stub_entry + adr x1, efi_stub_entry_end + sub x1, x1, x0 + bl __flush_dcache_area + /* Turn off Dcache and MMU */ mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 @@ -105,4 +119,5 @@ efi_load_fail: ldp x29, x30, [sp], #32 ret +efi_stub_entry_end: ENDPROC(efi_stub_entry) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index e007714ded04..8cd27fedc8b6 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) * which ends with "dsb; isb" pair guaranteeing global * visibility. */ - atomic_set(&pp->cpu_count, -1); + /* Notify other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); } else { - while (atomic_read(&pp->cpu_count) != -1) + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) cpu_relax(); isb(); } diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 6e0ed93d51fe..c17967fdf5f6 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 ) sub x1, x1, #2 4: adds x1, x1, #1 b.mi 5f - strb wzr, [x0] +USER(9f, strb wzr, [x0] ) 5: mov x0, #0 ret ENDPROC(__clear_user) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 0bf90d26e745..f4f8b500f74c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys, + unsigned long end, phys_addr_t phys, int map_io) { pud_t *pud; diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index e194f957ca8c..fdbff44e5482 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -20,9 +20,15 @@ #define WORD_INSN ".word" #endif +#ifdef CONFIG_CPU_MICROMIPS +#define NOP_INSN "nop32" +#else +#define NOP_INSN "nop" +#endif + static __always_inline bool arch_static_branch(struct static_key *key) { - asm_volatile_goto("1:\tnop\n\t" + asm_volatile_goto("1:\t" NOP_INSN "\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h index 7d28f95b0512..6d69332f21ec 100644 --- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h @@ -41,10 +41,8 @@ #define cpu_has_mcheck 0 #define cpu_has_mdmx 0 #define cpu_has_mips16 0 -#define cpu_has_mips32r1 0 #define cpu_has_mips32r2 0 #define cpu_has_mips3d 0 -#define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 #define cpu_has_mipsmt 0 #define cpu_has_prefetch 0 diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index a10951090234..133678ab4eb8 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -301,7 +301,8 @@ do { \ __get_kernel_common((x), size, __gu_ptr); \ else \ __get_user_common((x), size, __gu_ptr); \ - } \ + } else \ + (x) = 0; \ \ __gu_err; \ }) @@ -316,6 +317,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ + " move %1, $0 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -630,6 +632,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ + " move %1, $0 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -773,10 +776,11 @@ extern void __put_user_unaligned_unknown(void); "jal\t" #destination "\n\t" #endif -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS -#define DADDI_SCRATCH "$0" -#else +#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \ + defined(CONFIG_CPU_HAS_PREFETCH)) #define DADDI_SCRATCH "$3" +#else +#define DADDI_SCRATCH "$0" #endif extern size_t __copy_user(void *__to, const void *__from, size_t __n); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 94c4a0c0a577..d5a4f380b019 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -757,31 +757,34 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2e"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON2F: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2f"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON3A: c->cputype = CPU_LOONGSON3; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; case PRID_REV_LOONGSON3B_R1: case PRID_REV_LOONGSON3B_R2: c->cputype = CPU_LOONGSON3; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3b"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; } - set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC | MIPS_CPU_32FPR; c->tlbsize = 64; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; case PRID_IMP_LOONGSON_32: /* Loongson-1 */ decode_configs(c); diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 6001610cfe55..dda800e9e731 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -18,31 +18,53 @@ #ifdef HAVE_JUMP_LABEL -#define J_RANGE_MASK ((1ul << 28) - 1) +/* + * Define parameters for the standard MIPS and the microMIPS jump + * instruction encoding respectively: + * + * - the ISA bit of the target, either 0 or 1 respectively, + * + * - the amount the jump target address is shifted right to fit in the + * immediate field of the machine instruction, either 2 or 1, + * + * - the mask determining the size of the jump region relative to the + * delay-slot instruction, either 256MB or 128MB, + * + * - the jump target alignment, either 4 or 2 bytes. + */ +#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS) +#define J_RANGE_SHIFT (2 - J_ISA_BIT) +#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1) +#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1) void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { + union mips_instruction *insn_p; union mips_instruction insn; - union mips_instruction *insn_p = - (union mips_instruction *)(unsigned long)e->code; - /* Jump only works within a 256MB aligned region. */ - BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK)); + insn_p = (union mips_instruction *)msk_isa16_mode(e->code); + + /* Jump only works within an aligned region its delay slot is in. */ + BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); - /* Target must have 4 byte alignment. */ - BUG_ON((e->target & 3) != 0); + /* Target must have the right alignment and ISA must be preserved. */ + BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); if (type == JUMP_LABEL_ENABLE) { - insn.j_format.opcode = j_op; - insn.j_format.target = (e->target & J_RANGE_MASK) >> 2; + insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; + insn.j_format.target = e->target >> J_RANGE_SHIFT; } else { insn.word = 0; /* nop */ } get_online_cpus(); mutex_lock(&text_mutex); - *insn_p = insn; + if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { + insn_p->halfword[0] = insn.word >> 16; + insn_p->halfword[1] = insn.word; + } else + *insn_p = insn; flush_icache_range((unsigned long)insn_p, (unsigned long)insn_p + sizeof(*insn_p)); diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c17ef80cf65a..5d3238af9b5c 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -503,6 +503,7 @@ STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: jr ra + nop .if __memcpy == 1 END(memcpy) .set __memcpy, 0 diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c index 37ed184398c6..42323bcc5d28 100644 --- a/arch/mips/loongson/loongson-3/numa.c +++ b/arch/mips/loongson/loongson-3/numa.c @@ -33,6 +33,7 @@ static struct node_data prealloc__node_data[MAX_NUMNODES]; unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; +EXPORT_SYMBOL(__node_distances); struct node_data *__node_data[MAX_NUMNODES]; EXPORT_SYMBOL(__node_data); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index fa6ebd4bc9e9..c3917e251f59 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -299,6 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) local_irq_save(flags); + htw_stop(); pid = read_c0_entryhi() & ASID_MASK; address &= (PAGE_MASK << 1); write_c0_entryhi(address | pid); @@ -346,6 +347,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlb_write_indexed(); } tlbw_use_hazard(); + htw_start(); flush_itlb_vm(vma); local_irq_restore(flags); } @@ -422,6 +424,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ + htw_stop(); old_ctx = read_c0_entryhi(); old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); @@ -443,6 +446,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, write_c0_entryhi(old_ctx); write_c0_pagemask(old_pagemask); + htw_start(); out: local_irq_restore(flags); return ret; diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 6854ed5097d2..83a1dfd8f0e3 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame, /* This marks the end of the previous function, which means we overran. */ break; - stack_size = (unsigned) stack_adjustment; + stack_size = (unsigned long) stack_adjustment; } else if (is_ra_save_ins(&ip)) { int ra_slot = ip.i_format.simmediate; if (ra_slot < 0) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index a95c00f5fb96..a304bcc37e4f 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -107,6 +107,7 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth) } unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; +EXPORT_SYMBOL(__node_distances); static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) { diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4006964d8e12..a5cb070b54bf 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -9,6 +9,8 @@ #include <asm/errno.h> #include <asm-generic/uaccess-unaligned.h> +#include <linux/bug.h> + #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -28,11 +30,6 @@ * that put_user is the same as __put_user, etc. */ -extern int __get_kernel_bad(void); -extern int __get_user_bad(void); -extern int __put_kernel_bad(void); -extern int __put_user_bad(void); - static inline long access_ok(int type, const void __user * addr, unsigned long size) { @@ -43,8 +40,8 @@ static inline long access_ok(int type, const void __user * addr, #define get_user __get_user #if !defined(CONFIG_64BIT) -#define LDD_KERNEL(ptr) __get_kernel_bad(); -#define LDD_USER(ptr) __get_user_bad(); +#define LDD_KERNEL(ptr) BUILD_BUG() +#define LDD_USER(ptr) BUILD_BUG() #define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr) #define STD_USER(x, ptr) __put_user_asm64(x,ptr) #define ASM_WORD_INSN ".word\t" @@ -94,7 +91,7 @@ struct exception_data { case 2: __get_kernel_asm("ldh",ptr); break; \ case 4: __get_kernel_asm("ldw",ptr); break; \ case 8: LDD_KERNEL(ptr); break; \ - default: __get_kernel_bad(); break; \ + default: BUILD_BUG(); break; \ } \ } \ else { \ @@ -103,7 +100,7 @@ struct exception_data { case 2: __get_user_asm("ldh",ptr); break; \ case 4: __get_user_asm("ldw",ptr); break; \ case 8: LDD_USER(ptr); break; \ - default: __get_user_bad(); break; \ + default: BUILD_BUG(); break; \ } \ } \ \ @@ -136,7 +133,7 @@ struct exception_data { case 2: __put_kernel_asm("sth",__x,ptr); break; \ case 4: __put_kernel_asm("stw",__x,ptr); break; \ case 8: STD_KERNEL(__x,ptr); break; \ - default: __put_kernel_bad(); break; \ + default: BUILD_BUG(); break; \ } \ } \ else { \ @@ -145,7 +142,7 @@ struct exception_data { case 2: __put_user_asm("sth",__x,ptr); break; \ case 4: __put_user_asm("stw",__x,ptr); break; \ case 8: STD_USER(__x,ptr); break; \ - default: __put_user_bad(); break; \ + default: BUILD_BUG(); break; \ } \ } \ \ diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h index 75196b415d3f..e0a23c7bdd43 100644 --- a/arch/parisc/include/uapi/asm/bitsperlong.h +++ b/arch/parisc/include/uapi/asm/bitsperlong.h @@ -1,13 +1,7 @@ #ifndef __ASM_PARISC_BITSPERLONG_H #define __ASM_PARISC_BITSPERLONG_H -/* - * using CONFIG_* outside of __KERNEL__ is wrong, - * __LP64__ was also removed from headers, so what - * is the right approach on parisc? - * -arnd - */ -#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__) +#if defined(__LP64__) #define __BITS_PER_LONG 64 #define SHIFT_PER_LONG 6 #else diff --git a/arch/parisc/include/uapi/asm/msgbuf.h b/arch/parisc/include/uapi/asm/msgbuf.h index fe88f2649418..342138983914 100644 --- a/arch/parisc/include/uapi/asm/msgbuf.h +++ b/arch/parisc/include/uapi/asm/msgbuf.h @@ -1,6 +1,8 @@ #ifndef _PARISC_MSGBUF_H #define _PARISC_MSGBUF_H +#include <asm/bitsperlong.h> + /* * The msqid64_ds structure for parisc architecture, copied from sparc. * Note extra padding because this structure is passed back and forth @@ -13,15 +15,15 @@ struct msqid64_ds { struct ipc64_perm msg_perm; -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad1; #endif __kernel_time_t msg_stime; /* last msgsnd time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad2; #endif __kernel_time_t msg_rtime; /* last msgrcv time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad3; #endif __kernel_time_t msg_ctime; /* last change time */ diff --git a/arch/parisc/include/uapi/asm/sembuf.h b/arch/parisc/include/uapi/asm/sembuf.h index 1e59ffd3bd1e..f01d89e30d73 100644 --- a/arch/parisc/include/uapi/asm/sembuf.h +++ b/arch/parisc/include/uapi/asm/sembuf.h @@ -1,6 +1,8 @@ #ifndef _PARISC_SEMBUF_H #define _PARISC_SEMBUF_H +#include <asm/bitsperlong.h> + /* * The semid64_ds structure for parisc architecture. * Note extra padding because this structure is passed back and forth @@ -13,11 +15,11 @@ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad1; #endif __kernel_time_t sem_otime; /* last semop time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad2; #endif __kernel_time_t sem_ctime; /* last change time */ diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h index 0a3eada1863b..8496c38560c6 100644 --- a/arch/parisc/include/uapi/asm/shmbuf.h +++ b/arch/parisc/include/uapi/asm/shmbuf.h @@ -1,6 +1,8 @@ #ifndef _PARISC_SHMBUF_H #define _PARISC_SHMBUF_H +#include <asm/bitsperlong.h> + /* * The shmid64_ds structure for parisc architecture. * Note extra padding because this structure is passed back and forth @@ -13,19 +15,19 @@ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad1; #endif __kernel_time_t shm_atime; /* last attach time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad2; #endif __kernel_time_t shm_dtime; /* last detach time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad3; #endif __kernel_time_t shm_ctime; /* last change time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad4; #endif size_t shm_segsz; /* size of segment (bytes) */ @@ -36,23 +38,16 @@ struct shmid64_ds { unsigned int __unused2; }; -#ifdef CONFIG_64BIT -/* The 'unsigned int' (formerly 'unsigned long') data types below will - * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on - * a wide kernel, but if some of these values are meant to contain pointers - * they may need to be 'long long' instead. -PB XXX FIXME - */ -#endif struct shminfo64 { - unsigned int shmmax; - unsigned int shmmin; - unsigned int shmmni; - unsigned int shmseg; - unsigned int shmall; - unsigned int __unused1; - unsigned int __unused2; - unsigned int __unused3; - unsigned int __unused4; + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; }; #endif /* _PARISC_SHMBUF_H */ diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index 10df7079f4cd..e26043b73f5d 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -85,7 +85,7 @@ struct siginfo; /* Type of a signal handler. */ -#ifdef CONFIG_64BIT +#if defined(__LP64__) /* function pointers on 64-bit parisc are pointers to little structs and the * compiler doesn't support code which changes or tests the address of * the function in the little struct. This is really ugly -PB diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 8667f18be238..5f5c0373de63 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -833,8 +833,9 @@ #define __NR_seccomp (__NR_Linux + 338) #define __NR_getrandom (__NR_Linux + 339) #define __NR_memfd_create (__NR_Linux + 340) +#define __NR_bpf (__NR_Linux + 341) -#define __NR_Linux_syscalls (__NR_memfd_create + 1) +#define __NR_Linux_syscalls (__NR_bpf + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index b563d9c8268b..fe4f0b89bf8f 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -286,11 +286,11 @@ ENTRY_COMP(msgsnd) ENTRY_COMP(msgrcv) ENTRY_SAME(msgget) /* 190 */ - ENTRY_SAME(msgctl) - ENTRY_SAME(shmat) + ENTRY_COMP(msgctl) + ENTRY_COMP(shmat) ENTRY_SAME(shmdt) ENTRY_SAME(shmget) - ENTRY_SAME(shmctl) /* 195 */ + ENTRY_COMP(shmctl) /* 195 */ ENTRY_SAME(ni_syscall) /* streams1 */ ENTRY_SAME(ni_syscall) /* streams2 */ ENTRY_SAME(lstat64) @@ -323,7 +323,7 @@ ENTRY_SAME(epoll_ctl) /* 225 */ ENTRY_SAME(epoll_wait) ENTRY_SAME(remap_file_pages) - ENTRY_SAME(semtimedop) + ENTRY_COMP(semtimedop) ENTRY_COMP(mq_open) ENTRY_SAME(mq_unlink) /* 230 */ ENTRY_COMP(mq_timedsend) @@ -436,6 +436,7 @@ ENTRY_SAME(seccomp) ENTRY_SAME(getrandom) ENTRY_SAME(memfd_create) /* 340 */ + ENTRY_SAME(bpf) /* Nothing yet */ diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index de40b48b460e..da08ed088157 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -361,7 +361,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, cascade_data->virq = virt_msir; msi->cascade_array[irq_index] = cascade_data; - ret = request_irq(virt_msir, fsl_msi_cascade, 0, + ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD, "fsl-msi-cascade", cascade_data); if (ret) { dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n", diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 765c1776ec9f..0e69b7e7a439 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -22,7 +22,7 @@ int atomic_add_return(int, atomic_t *); int atomic_cmpxchg(atomic_t *, int, int); -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +int atomic_xchg(atomic_t *, int); int __atomic_add_unless(atomic_t *, int, int); void atomic_set(atomic_t *, int); diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 32c29a133f9d..d38b52dca216 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -11,22 +11,14 @@ #ifndef __ARCH_SPARC_CMPXCHG__ #define __ARCH_SPARC_CMPXCHG__ -static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) -{ - __asm__ __volatile__("swap [%2], %0" - : "=&r" (val) - : "0" (val), "r" (m) - : "memory"); - return val; -} - +unsigned long __xchg_u32(volatile u32 *m, u32 new); void __xchg_called_with_bad_pointer(void); static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { case 4: - return xchg_u32(ptr, x); + return __xchg_u32(ptr, x); } __xchg_called_with_bad_pointer(); return x; diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h index a34ad079487e..4c7c12d69bea 100644 --- a/arch/sparc/include/uapi/asm/swab.h +++ b/arch/sparc/include/uapi/asm/swab.h @@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr) { __u16 ret; - __asm__ __volatile__ ("lduha [%1] %2, %0" + __asm__ __volatile__ ("lduha [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab16p __arch_swab16p @@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr) { __u32 ret; - __asm__ __volatile__ ("lduwa [%1] %2, %0" + __asm__ __volatile__ ("lduwa [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab32p __arch_swab32p @@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr) { __u64 ret; - __asm__ __volatile__ ("ldxa [%1] %2, %0" + __asm__ __volatile__ ("ldxa [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab64p __arch_swab64p diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index 8f76f23dac38..f9c6813c132d 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) { unsigned long csr_reg, csr, csr_error_bits; irqreturn_t ret = IRQ_NONE; - u16 stat; + u32 stat; csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; csr = upa_readq(csr_reg); @@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) pbm->name); ret = IRQ_HANDLED; } - pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); + pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat); if (stat & (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | @@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) PCI_STATUS_SIG_SYSTEM_ERROR)) { printk("%s: PCI bus error, PCI_STATUS[%04x]\n", pbm->name, stat); - pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); + pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff); ret = IRQ_HANDLED; } return ret; diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 302c476413d5..da6f1a7fc4db 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -816,13 +816,17 @@ void arch_send_call_function_single_ipi(int cpu) void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); + irq_enter(); generic_smp_call_function_interrupt(); + irq_exit(); } void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); + irq_enter(); generic_smp_call_function_single_interrupt(); + irq_exit(); } static void tsb_sync(void *info) diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index a7c418ac26af..71cd65ab200c 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -45,6 +45,19 @@ ATOMIC_OP(add, +=) #undef ATOMIC_OP +int atomic_xchg(atomic_t *v, int new) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + v->counter = new; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(atomic_xchg); + int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -137,3 +150,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) return (unsigned long)prev; } EXPORT_SYMBOL(__cmpxchg_u32); + +unsigned long __xchg_u32(volatile u32 *ptr, u32 new) +{ + unsigned long flags; + u32 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + prev = *ptr; + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return (unsigned long)prev; +} +EXPORT_SYMBOL(__xchg_u32); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ded8a6774ac9..41a503c15862 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -144,7 +144,7 @@ config INSTRUCTION_DECODER config PERF_EVENTS_INTEL_UNCORE def_bool y - depends on PERF_EVENTS && SUP_SUP_INTEL && PCI + depends on PERF_EVENTS && CPU_SUP_INTEL && PCI config OUTPUT_FORMAT string diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 704f58aa79cd..be1e07d4b596 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -76,8 +76,10 @@ suffix-$(CONFIG_KERNEL_XZ) := xz suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_KERNEL_LZ4) := lz4 +RUN_SIZE = $(shell objdump -h vmlinux | \ + perl $(srctree)/arch/x86/tools/calc_run_size.pl) quiet_cmd_mkpiggy = MKPIGGY $@ - cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) + cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) targets += piggy.S $(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index cbed1407a5cd..1d7fbbcc196d 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -207,7 +207,8 @@ relocated: * Do the decompression, and jump to the new kernel.. */ /* push arguments for decompress_kernel: */ - pushl $z_output_len /* decompressed length */ + pushl $z_run_size /* size of kernel with .bss and .brk */ + pushl $z_output_len /* decompressed length, end of relocs */ leal z_extract_offset_negative(%ebx), %ebp pushl %ebp /* output address */ pushl $z_input_len /* input_len */ @@ -217,7 +218,7 @@ relocated: pushl %eax /* heap area */ pushl %esi /* real mode pointer */ call decompress_kernel /* returns kernel location in %eax */ - addl $24, %esp + addl $28, %esp /* * Jump to the decompressed kernel. diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 2884e0c3e8a5..6b1766c6c082 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -402,13 +402,16 @@ relocated: * Do the decompression, and jump to the new kernel.. */ pushq %rsi /* Save the real mode argument */ + movq $z_run_size, %r9 /* size of kernel with .bss and .brk */ + pushq %r9 movq %rsi, %rdi /* real mode address */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq input_data(%rip), %rdx /* input_data */ movl $z_input_len, %ecx /* input_len */ movq %rbp, %r8 /* output target address */ - movq $z_output_len, %r9 /* decompressed length */ + movq $z_output_len, %r9 /* decompressed length, end of relocs */ call decompress_kernel /* returns kernel location in %rax */ + popq %r9 popq %rsi /* diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 57ab74df7eea..30dd59a9f0b4 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -358,7 +358,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, unsigned char *input_data, unsigned long input_len, unsigned char *output, - unsigned long output_len) + unsigned long output_len, + unsigned long run_size) { real_mode = rmode; @@ -381,8 +382,14 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; - output = choose_kernel_location(input_data, input_len, - output, output_len); + /* + * The memory hole needed for the kernel is the larger of either + * the entire decompressed kernel plus relocation table, or the + * entire decompressed kernel plus .bss and .brk sections. + */ + output = choose_kernel_location(input_data, input_len, output, + output_len > run_size ? output_len + : run_size); /* Validate memory location choices. */ if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index b669ab65bf6c..d8222f213182 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -36,11 +36,13 @@ int main(int argc, char *argv[]) uint32_t olen; long ilen; unsigned long offs; + unsigned long run_size; FILE *f = NULL; int retval = 1; - if (argc < 2) { - fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); + if (argc < 3) { + fprintf(stderr, "Usage: %s compressed_file run_size\n", + argv[0]); goto bail; } @@ -74,6 +76,7 @@ int main(int argc, char *argv[]) offs += olen >> 12; /* Add 8 bytes for each 32K block */ offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ + run_size = atoi(argv[2]); printf(".section \".rodata..compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); @@ -85,6 +88,8 @@ int main(int argc, char *argv[]) /* z_extract_offset_negative allows simplification of head_32.S */ printf(".globl z_extract_offset_negative\n"); printf("z_extract_offset_negative = -0x%lx\n", offs); + printf(".globl z_run_size\n"); + printf("z_run_size = %lu\n", run_size); printf(".globl input_data, input_data_end\n"); printf("input_data:\n"); diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 8cd27e08e23c..8cd1cc3bc835 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -150,6 +150,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) } void cpu_disable_common(void); +void cpu_die_common(unsigned int cpu); void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4b4f78c9ba19..cfa9b5b2c27a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -146,6 +146,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); static int __init x86_xsave_setup(char *s) { + if (strlen(s)) + return 0; setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); setup_clear_cpu_cap(X86_FEATURE_XSAVES); diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c index 7aa1acc79789..06674473b0e6 100644 --- a/arch/x86/kernel/cpu/microcode/amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c @@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size) * load_microcode_amd() to save equivalent cpu table and microcode patches in * kernel heap memory. */ -static void apply_ucode_in_initrd(void *ucode, size_t size) +static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) { struct equiv_cpu_entry *eq; size_t *cont_sz; u32 *header; u8 *data, **cont; + u8 (*patch)[PATCH_MAX_SIZE]; u16 eq_id = 0; int offset, left; u32 rev, eax, ebx, ecx, edx; @@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); cont_sz = (size_t *)__pa_nodebug(&container_size); cont = (u8 **)__pa_nodebug(&container); + patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); #else new_rev = &ucode_new_rev; cont_sz = &container_size; cont = &container; + patch = &amd_ucode_patch; #endif data = ucode; @@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) rev = mc->hdr.patch_id; *new_rev = rev; - /* save ucode patch */ - memcpy(amd_ucode_patch, mc, - min_t(u32, header[1], PATCH_MAX_SIZE)); + if (save_patch) + memcpy(patch, mc, + min_t(u32, header[1], PATCH_MAX_SIZE)); } } @@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void) *data = cp.data; *size = cp.size; - apply_ucode_in_initrd(cp.data, cp.size); + apply_ucode_in_initrd(cp.data, cp.size, true); } #ifdef CONFIG_X86_32 @@ -263,7 +266,7 @@ void load_ucode_amd_ap(void) size_t *usize; void **ucode; - mc = (struct microcode_amd *)__pa(amd_ucode_patch); + mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { __apply_microcode_amd(mc); return; @@ -275,7 +278,7 @@ void load_ucode_amd_ap(void) if (!*ucode || !*usize) return; - apply_ucode_in_initrd(*ucode, *usize); + apply_ucode_in_initrd(*ucode, *usize, false); } static void __init collect_cpu_sig_on_bsp(void *arg) @@ -339,7 +342,7 @@ void load_ucode_amd_ap(void) * AP has a different equivalence ID than BSP, looks like * mixed-steppings silicon so go through the ucode blob anew. */ - apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); + apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); } } #endif @@ -347,7 +350,9 @@ void load_ucode_amd_ap(void) int __init save_microcode_in_initrd_amd(void) { unsigned long cont; + int retval = 0; enum ucode_state ret; + u8 *cont_va; u32 eax; if (!container) @@ -355,13 +360,15 @@ int __init save_microcode_in_initrd_amd(void) #ifdef CONFIG_X86_32 get_bsp_sig(); - cont = (unsigned long)container; + cont = (unsigned long)container; + cont_va = __va(container); #else /* * We need the physical address of the container for both bitness since * boot_params.hdr.ramdisk_image is a physical address. */ - cont = __pa(container); + cont = __pa(container); + cont_va = container; #endif /* @@ -372,6 +379,8 @@ int __init save_microcode_in_initrd_amd(void) if (relocated_ramdisk) container = (u8 *)(__va(relocated_ramdisk) + (cont - boot_params.hdr.ramdisk_image)); + else + container = cont_va; if (ucode_new_rev) pr_info("microcode: updated early to new patch_level=0x%08x\n", @@ -382,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void) ret = load_microcode_amd(eax, container, container_size); if (ret != UCODE_OK) - return -EINVAL; + retval = -EINVAL; /* * This will be freed any msec now, stash patches for the current @@ -391,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void) container = NULL; container_size = 0; - return 0; + return retval; } diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index dd9d6190b08d..2ce9051174e6 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -465,6 +465,14 @@ static void mc_bp_resume(void) if (uci->valid && uci->mc) microcode_ops->apply_microcode(cpu); + else if (!uci->mc) + /* + * We might resume and not have applied late microcode but still + * have a newer patch stashed from the early loader. We don't + * have it in uci->mc so we have to load it the same way we're + * applying patches early on the APs. + */ + load_ucode_ap(); } static struct syscore_ops mc_syscore_ops = { diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c index 5f28a64e71ea..2c017f242a78 100644 --- a/arch/x86/kernel/cpu/microcode/core_early.c +++ b/arch/x86/kernel/cpu/microcode/core_early.c @@ -124,7 +124,7 @@ void __init load_ucode_bsp(void) static bool check_loader_disabled_ap(void) { #ifdef CONFIG_X86_32 - return __pa_nodebug(dis_ucode_ldr); + return *((bool *)__pa_nodebug(&dis_ucode_ldr)); #else return dis_ucode_ldr; #endif diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index adf138eac85c..f9ed429d6e4f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -486,14 +486,17 @@ static struct attribute_group snbep_uncore_qpi_format_group = { .attrs = snbep_uncore_qpi_formats_attr, }; -#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ - .init_box = snbep_uncore_msr_init_box, \ +#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ .disable_box = snbep_uncore_msr_disable_box, \ .enable_box = snbep_uncore_msr_enable_box, \ .disable_event = snbep_uncore_msr_disable_event, \ .enable_event = snbep_uncore_msr_enable_event, \ .read_counter = uncore_msr_read_counter +#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ + __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \ + .init_box = snbep_uncore_msr_init_box \ + static struct intel_uncore_ops snbep_uncore_msr_ops = { SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), }; @@ -1919,6 +1922,30 @@ static struct intel_uncore_type hswep_uncore_cbox = { .format_group = &hswep_uncore_cbox_format_group, }; +/* + * Write SBOX Initialization register bit by bit to avoid spurious #GPs + */ +static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + + if (msr) { + u64 init = SNBEP_PMON_BOX_CTL_INT; + u64 flags = 0; + int i; + + for_each_set_bit(i, (unsigned long *)&init, 64) { + flags |= (1ULL << i); + wrmsrl(msr, flags); + } + } +} + +static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = { + __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .init_box = hswep_uncore_sbox_msr_init_box +}; + static struct attribute *hswep_uncore_sbox_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -1944,7 +1971,7 @@ static struct intel_uncore_type hswep_uncore_sbox = { .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, .msr_offset = HSWEP_SBOX_MSR_OFFSET, - .ops = &snbep_uncore_msr_ops, + .ops = &hswep_uncore_sbox_msr_ops, .format_group = &hswep_uncore_sbox_format_group, }; @@ -2025,13 +2052,27 @@ static struct intel_uncore_type hswep_uncore_imc = { SNBEP_UNCORE_PCI_COMMON_INIT(), }; +static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8}; + +static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); + pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); + + return count; +} + static struct intel_uncore_ops hswep_uncore_irp_ops = { .init_box = snbep_uncore_pci_init_box, .disable_box = snbep_uncore_pci_disable_box, .enable_box = snbep_uncore_pci_enable_box, .disable_event = ivbep_uncore_irp_disable_event, .enable_event = ivbep_uncore_irp_enable_event, - .read_counter = ivbep_uncore_irp_read_counter, + .read_counter = hswep_uncore_irp_read_counter, }; static struct intel_uncore_type hswep_uncore_irp = { diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 749b0e423419..e510618b2e91 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1484,7 +1484,7 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) */ if (work & _TIF_NOHZ) { user_exit(); - work &= ~TIF_NOHZ; + work &= ~_TIF_NOHZ; } #ifdef CONFIG_SECCOMP diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 4d2128ac70bd..668d8f2a8781 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1303,10 +1303,14 @@ static void __ref remove_cpu_from_maps(int cpu) numa_remove_cpu(cpu); } +static DEFINE_PER_CPU(struct completion, die_complete); + void cpu_disable_common(void) { int cpu = smp_processor_id(); + init_completion(&per_cpu(die_complete, smp_processor_id())); + remove_siblinginfo(cpu); /* It's now safe to remove this processor from the online map */ @@ -1316,8 +1320,6 @@ void cpu_disable_common(void) fixup_irqs(); } -static DEFINE_PER_CPU(struct completion, die_complete); - int native_cpu_disable(void) { int ret; @@ -1327,16 +1329,21 @@ int native_cpu_disable(void) return ret; clear_local_APIC(); - init_completion(&per_cpu(die_complete, smp_processor_id())); cpu_disable_common(); return 0; } +void cpu_die_common(unsigned int cpu) +{ + wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); +} + void native_cpu_die(unsigned int cpu) { /* We don't do anything here: idle task is faking death itself. */ - wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); + + cpu_die_common(cpu); /* They ack this in play_dead() by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 7609e0e421ec..1318f75d56e4 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -41,9 +41,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst, while (((unsigned long)src & 6) && len >= 2) { __u16 val16; - *errp = __get_user(val16, (const __u16 __user *)src); - if (*errp) - return isum; + if (__get_user(val16, (const __u16 __user *)src)) + goto out_err; *(__u16 *)dst = val16; isum = (__force __wsum)add32_with_carry( diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4cb8763868fc..4e5dfec750fc 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1123,7 +1123,7 @@ void mark_rodata_ro(void) unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PFN_ALIGN(&__stop___ex_table); unsigned long rodata_end = PFN_ALIGN(&__end_rodata); - unsigned long all_end = PFN_ALIGN(&_end); + unsigned long all_end; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); @@ -1134,7 +1134,16 @@ void mark_rodata_ro(void) /* * The rodata/data/bss/brk section (but not the kernel text!) * should also be not-executable. + * + * We align all_end to PMD_SIZE because the existing mapping + * is a full PMD. If we would align _brk_end to PAGE_SIZE we + * split the PMD and the reminder between _brk_end and the end + * of the PMD will remain mapped executable. + * + * Any PMD which was setup after the one which covers _brk_end + * has been zapped already via cleanup_highmem(). */ + all_end = roundup((unsigned long)_brk_end, PMD_SIZE); set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); rodata_test(); diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl new file mode 100644 index 000000000000..23210baade2d --- /dev/null +++ b/arch/x86/tools/calc_run_size.pl @@ -0,0 +1,39 @@ +#!/usr/bin/perl +# +# Calculate the amount of space needed to run the kernel, including room for +# the .bss and .brk sections. +# +# Usage: +# objdump -h a.out | perl calc_run_size.pl +use strict; + +my $mem_size = 0; +my $file_offset = 0; + +my $sections=" *[0-9]+ \.(?:bss|brk) +"; +while (<>) { + if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) { + my $size = hex($1); + my $offset = hex($2); + $mem_size += $size; + if ($file_offset == 0) { + $file_offset = $offset; + } elsif ($file_offset != $offset) { + # BFD linker shows the same file offset in ELF. + # Gold linker shows them as consecutive. + next if ($file_offset + $mem_size == $offset + $size); + + printf STDERR "file_offset: 0x%lx\n", $file_offset; + printf STDERR "mem_size: 0x%lx\n", $mem_size; + printf STDERR "offset: 0x%lx\n", $offset; + printf STDERR "size: 0x%lx\n", $size; + + die ".bss and .brk are non-contiguous\n"; + } + } +} + +if ($file_offset == 0) { + die "Never found .bss or .brk file offset\n"; +} +printf("%d\n", $mem_size + $file_offset); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 8650cdb53209..4c071aeb8417 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -510,6 +510,9 @@ static void xen_cpu_die(unsigned int cpu) current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } + + cpu_die_common(cpu); + xen_smp_intr_free(cpu); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); |