summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/cache.h9
-rw-r--r--arch/arc/include/asm/delay.h4
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/module.h4
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/setup.h1
-rw-r--r--arch/arc/kernel/head.S14
-rw-r--r--arch/arc/kernel/intc-arcv2.c6
-rw-r--r--arch/arc/kernel/intc-compact.c4
-rw-r--r--arch/arc/kernel/mcip.c59
-rw-r--r--arch/arc/kernel/module.c4
-rw-r--r--arch/arc/kernel/smp.c25
-rw-r--r--arch/arc/kernel/unaligned.c3
-rw-r--r--arch/arc/mm/cache.c155
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts1
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/da850-evm.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi1
-rw-r--r--arch/arm/boot/dts/dra72-evm-revc.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi4
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts11
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts2
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/s3c2410_defconfig6
-rw-r--r--arch/arm/include/asm/cputype.h3
-rw-r--r--arch/arm/include/asm/ftrace.h18
-rw-r--r--arch/arm/include/asm/virt.h5
-rw-r--r--arch/arm/include/uapi/asm/types.h (renamed from arch/arm/include/asm/types.h)6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c16
-rw-r--r--arch/arm/kernel/smp_tlb.c7
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/mach-omap1/dma.c16
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c2
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/crypto/aes-modes.S88
-rw-r--r--arch/arm64/include/asm/assembler.h36
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/virt.h9
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/topology.c8
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/frv/include/asm/atomic.h35
-rw-r--r--arch/mn10300/include/asm/switch_to.h2
-rw-r--r--arch/parisc/include/asm/bitops.h8
-rw-r--r--arch/parisc/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/parisc/include/uapi/asm/swab.h5
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h14
-rw-r--r--arch/powerpc/include/asm/mmu.h2
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h10
-rw-r--r--arch/powerpc/include/asm/stackprotector.h40
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/eeh.c10
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/module_64.c8
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c14
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c31
-rw-r--r--arch/powerpc/mm/init-common.c13
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c18
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/power9-events-list.h2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c31
-rw-r--r--arch/s390/configs/default_defconfig27
-rw-r--r--arch/s390/configs/gcov_defconfig50
-rw-r--r--arch/s390/configs/performance_defconfig33
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h8
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/sstate.c6
-rw-r--r--arch/sparc/kernel/traps_64.c73
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/x86/boot/string.c1
-rw-r--r--arch/x86/boot/string.h9
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c3
-rw-r--r--arch/x86/entry/entry_32.S30
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/events/amd/ibs.c2
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/intel/cstate.c2
-rw-r--r--arch/x86/events/intel/ds.c6
-rw-r--r--arch/x86/events/intel/rapl.c61
-rw-r--r--arch/x86/events/intel/uncore.c233
-rw-r--r--arch/x86/events/intel/uncore_snbep.c2
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/microcode.h1
-rw-r--r--arch/x86/include/asm/microcode_intel.h15
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/switch_to.h10
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/cpu/amd.c9
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c11
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c31
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c22
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c79
-rw-r--r--arch/x86/kernel/fpu/core.c4
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kernel/tsc.c1
-rw-r--r--arch/x86/kernel/unwind_frame.c30
-rw-r--r--arch/x86/kvm/emulate.c70
-rw-r--r--arch/x86/kvm/lapic.c6
-rw-r--r--arch/x86/kvm/lapic.h1
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/pci/acpi.c10
-rw-r--r--arch/x86/platform/efi/efi.c66
-rw-r--r--arch/x86/platform/efi/efi_64.c16
-rw-r--r--arch/x86/platform/efi/quirks.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_spidev.c)4
-rw-r--r--arch/xtensa/kernel/setup.c2
153 files changed, 1263 insertions, 741 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c75d29077e4a..283099c9560a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -29,7 +29,7 @@ config ARC
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_MEMBLOCK
- select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+ select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HANDLE_DOMAIN_IRQ
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index b3410ff6a62d..5008021fba98 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_IC_PTAG_HI 0x1F
/* Bit val in IC_CTRL */
-#define IC_CTRL_CACHE_DISABLE 0x1
+#define IC_CTRL_DIS 0x1
/* Data cache related Auxiliary registers */
#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_DC_PTAG_HI 0x5F
/* Bit val in DC_CTRL */
-#define DC_CTRL_INV_MODE_FLUSH 0x40
-#define DC_CTRL_FLUSH_STATUS 0x100
+#define DC_CTRL_DIS 0x001
+#define DC_CTRL_INV_MODE_FLUSH 0x040
+#define DC_CTRL_FLUSH_STATUS 0x100
/*System-level cache (L2 cache) related Auxiliary registers */
#define ARC_REG_SLC_CFG 0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_SLC_RGN_END 0x916
/* Bit val in SLC_CONTROL */
+#define SLC_CTRL_DIS 0x001
#define SLC_CTRL_IM 0x040
-#define SLC_CTRL_DISABLE 0x001
#define SLC_CTRL_BUSY 0x100
#define SLC_CTRL_RGN_OP_INV 0x200
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index a36e8601114d..d5da2115d78a 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
" lp 1f \n"
" nop \n"
"1: \n"
- : : "r"(loops));
+ :
+ : "r"(loops)
+ : "lp_count");
}
extern void __bad_udelay(void);
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e6f4b7..aee1a77934cf 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
;
; Now manually save: r12, sp, fp, gp, r25
+ PUSH r30
PUSH r12
; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
POPAX AUX_USER_SP
1:
POP r12
+ POP r30
.endm
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 6e91d8b339c3..567590ea8f6c 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -14,13 +14,13 @@
#include <asm-generic/module.h>
-#ifdef CONFIG_ARC_DW2_UNWIND
struct mod_arch_specific {
+#ifdef CONFIG_ARC_DW2_UNWIND
void *unw_info;
int unw_sec_idx;
+#endif
const char *secstr;
};
-#endif
#define MODULE_PROC_FAMILY "ARC700"
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da1fcfd..47111d565a95 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@ struct pt_regs {
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
- unsigned long r12;
+ unsigned long r12, r30;
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index cb954cdab070..c568a9df82b1 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
void setup_processor(void);
void __init setup_arch_memory(void);
+long __init arc_get_mem_sz(void);
/* Helpers used in arc_*_mumbojumbo routines */
#define IS_AVAIL1(v, s) ((v) ? s : "")
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd867fdff..8b90d25a15cc 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -71,14 +71,14 @@ ENTRY(stext)
GET_CPU_ID r5
cmp r5, 0
mov.nz r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
- ; Non-Master can proceed as system would be booted sufficiently
- jnz first_lines_of_secondary
-#else
+ bz .Lmaster_proceed
+
; Non-Masters wait for Master to boot enough and bring them up
- jnz arc_platform_smp_wait_to_boot
-#endif
- ; Master falls thru
+ ; when they resume, tail-call to entry point
+ mov blink, @first_lines_of_secondary
+ j arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
#endif
; Clear BSS before updating any globals
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 994dca7014db..ecef0fb0b66c 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
static void arcv2_irq_mask(struct irq_data *data)
{
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 0);
}
static void arcv2_irq_unmask(struct irq_data *data)
{
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
/*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index ce9deb953ca9..8c1fd5c00782 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
- ienb &= ~(1 << data->irq);
+ ienb &= ~(1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
- ienb |= (1 << data->irq);
+ ienb |= (1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 560c4afc2af4..9f6b68fd4f3b 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -10,6 +10,7 @@
#include <linux/smp.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
#include <soc/arc/mcip.h>
#include <asm/irqflags-arcv2.h>
@@ -92,11 +93,10 @@ static void mcip_probe_n_setup(void)
READ_BCR(ARC_REG_MCIP_BCR, mp);
sprintf(smp_cpuinfo_buf,
- "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
+ "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
mp.ver, mp.num_cores,
IS_AVAIL1(mp.ipi, "IPI "),
IS_AVAIL1(mp.idu, "IDU "),
- IS_AVAIL1(mp.llm, "LLM "),
IS_AVAIL1(mp.dbg, "DEBUG "),
IS_AVAIL1(mp.gfrc, "GFRC"));
@@ -174,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
-#ifdef CONFIG_SMP
static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
bool force)
@@ -204,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
return IRQ_SET_MASK_OK;
}
-#endif
+
+static void idu_irq_enable(struct irq_data *data)
+{
+ /*
+ * By default send all common interrupts to all available online CPUs.
+ * The affinity of common interrupts in IDU must be set manually since
+ * in some cases the kernel will not call irq_set_affinity() by itself:
+ * 1. When the kernel is not configured with support of SMP.
+ * 2. When the kernel is configured with support of SMP but upper
+ * interrupt controllers does not support setting of the affinity
+ * and cannot propagate it to IDU.
+ */
+ idu_irq_set_affinity(data, cpu_online_mask, false);
+ idu_irq_unmask(data);
+}
static struct irq_chip idu_irq_chip = {
.name = "MCIP IDU Intc",
.irq_mask = idu_irq_mask,
.irq_unmask = idu_irq_unmask,
+ .irq_enable = idu_irq_enable,
#ifdef CONFIG_SMP
.irq_set_affinity = idu_irq_set_affinity,
#endif
@@ -221,10 +235,13 @@ static irq_hw_number_t idu_first_hwirq;
static void idu_cascade_isr(struct irq_desc *desc)
{
struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *core_chip = irq_desc_get_chip(desc);
irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
+ chained_irq_enter(core_chip, desc);
generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
+ chained_irq_exit(core_chip, desc);
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -239,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
- irq_hw_number_t hwirq = *out_hwirq = intspec[0];
- int distri = intspec[1];
- unsigned long flags;
-
+ /*
+ * Ignore value of interrupt distribution mode for common interrupts in
+ * IDU which resides in intspec[1] since setting an affinity using value
+ * from Device Tree is deprecated in ARC.
+ */
+ *out_hwirq = intspec[0];
*out_type = IRQ_TYPE_NONE;
- /* XXX: validate distribution scheme again online cpu mask */
- if (distri == 0) {
- /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
- raw_spin_lock_irqsave(&mcip_lock, flags);
- idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
- idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
- raw_spin_unlock_irqrestore(&mcip_lock, flags);
- } else {
- /*
- * DEST based distribution for Level Triggered intr can only
- * have 1 CPU, so generalize it to always contain 1 cpu
- */
- int cpu = ffs(distri);
-
- if (cpu != fls(distri))
- pr_warn("IDU irq %lx distri mode set to cpu %x\n",
- hwirq, cpu);
-
- raw_spin_lock_irqsave(&mcip_lock, flags);
- idu_set_dest(hwirq, cpu);
- idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
- raw_spin_unlock_irqrestore(&mcip_lock, flags);
- }
-
return 0;
}
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 42e964db2967..3d99a6091332 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
#ifdef CONFIG_ARC_DW2_UNWIND
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
- mod->arch.secstr = secstr;
#endif
+ mod->arch.secstr = secstr;
return 0;
}
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
+#ifdef CONFIG_ARC_DW2_UNWIND
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
module->arch.unw_sec_idx = tgtsec;
+#endif
return 0;
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 88674d972c9d..2afbafadb6ab 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
*/
static volatile int wake_flag;
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f) f
+#define __boot_write(f, v) f = v
+
+#else
+
+#define __boot_read(f) arc_read_uncached_32(&f)
+#define __boot_write(f, v) arc_write_uncached_32(&f, v)
+
+#endif
+
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
{
BUG_ON(cpu == 0);
- wake_flag = cpu;
+
+ __boot_write(wake_flag, cpu);
}
void arc_platform_smp_wait_to_boot(int cpu)
{
- while (wake_flag != cpu)
+ /* for halt-on-reset, we've waited already */
+ if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+ return;
+
+ while (__boot_read(wake_flag) != cpu)
;
- wake_flag = 0;
- __asm__ __volatile__("j @first_lines_of_secondary \n");
+ __boot_write(wake_flag, 0);
}
-
const char *arc_platform_smp_cpuinfo(void)
{
return plat_smp_ops.info ? : "";
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index abd961f3e763..91ebe382147f 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
if (state.fault)
goto fault;
+ /* clear any remanants of delay slot */
if (delay_mode(regs)) {
- regs->ret = regs->bta;
+ regs->ret = regs->bta ~1U;
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index ec86ac0e3321..d408fa21a07c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
static int l2_line_sz;
static int ioc_exists;
-int slc_enable = 1, ioc_enable = 0;
+int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
/*
* For ARC700 MMUv3 I-cache and D-cache flushes
- * Also reused for HS38 aliasing I-cache configuration
+ * - ARC700 programming model requires paddr and vaddr be passed in seperate
+ * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
+ * caches actually alias or not.
+ * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
+ * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
*/
static inline
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
__after_dc_op(op);
}
+static inline void __dc_disable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ __dc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
+}
+
+static void __dc_enable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
+}
+
/* For kernel mappings cache operation: index is same as paddr */
#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
#else
#define __dc_entire_op(op)
+#define __dc_disable()
+#define __dc_enable()
#define __dc_line_op(paddr, vaddr, sz, op)
#define __dc_line_op_k(paddr, sz, op)
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
+noinline static void slc_entire_op(const int op)
+{
+ unsigned int ctrl, r = ARC_REG_SLC_CTRL;
+
+ ctrl = read_aux_reg(r);
+
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ write_aux_reg(r, ctrl);
+
+ write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+
+ /* Important to wait for flush to complete */
+ while (read_aux_reg(r) & SLC_CTRL_BUSY);
+}
+
+static inline void arc_slc_disable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ slc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
+}
+
+static inline void arc_slc_enable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
+}
+
/***********************************************************
* Exported APIs
*/
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
return 0;
}
-void arc_cache_init(void)
+/*
+ * IO-Coherency (IOC) setup rules:
+ *
+ * 1. Needs to be at system level, so only once by Master core
+ * Non-Masters need not be accessing caches at that time
+ * - They are either HALT_ON_RESET and kick started much later or
+ * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
+ * doesn't perturb caches or coherency unit
+ *
+ * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
+ * otherwise any straggler data might behave strangely post IOC enabling
+ *
+ * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
+ * Coherency transactions
+ */
+noinline void __init arc_ioc_setup(void)
{
- unsigned int __maybe_unused cpu = smp_processor_id();
- char str[256];
+ unsigned int ap_sz;
- printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ /* Flush + invalidate + disable L1 dcache */
+ __dc_disable();
+
+ /* Flush + invalidate SLC */
+ if (read_aux_reg(ARC_REG_SLC_BCR))
+ slc_entire_op(OP_FLUSH_N_INV);
+
+ /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
+ write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/*
- * Only master CPU needs to execute rest of function:
- * - Assume SMP so all cores will have same cache config so
- * any geomtry checks will be same for all
- * - IOC setup / dma callbacks only need to be setup once
+ * IOC Aperture size:
+ * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
+ * TBD: fix for PGU + 1GB of low mem
+ * TBD: fix for PAE
*/
- if (cpu)
- return;
+ ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
+ write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
+
+ write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
+ write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+
+ /* Re-enable L1 dcache */
+ __dc_enable();
+}
+
+void __init arc_cache_init_master(void)
+{
+ unsigned int __maybe_unused cpu = smp_processor_id();
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
}
}
- if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
-
- /* IM set : flush before invalidate */
- write_aux_reg(ARC_REG_SLC_CTRL,
- read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
+ /* Note that SLC disable not formally supported till HS 3.0 */
+ if (is_isa_arcv2() && l2_line_sz && !slc_enable)
+ arc_slc_disable();
- write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
-
- /* Important to wait for flush to complete */
- while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
- write_aux_reg(ARC_REG_SLC_CTRL,
- read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
- }
+ if (is_isa_arcv2() && ioc_enable)
+ arc_ioc_setup();
if (is_isa_arcv2() && ioc_enable) {
- /* IO coherency base - 0x8z */
- write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
- /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
- write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
- /* Enable partial writes */
- write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
- /* Enable IO coherency */
- write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
-
__dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
__dma_cache_inv = __dma_cache_inv_ioc;
__dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
__dma_cache_wback = __dma_cache_wback_l1;
}
}
+
+void __ref arc_cache_init(void)
+{
+ unsigned int __maybe_unused cpu = smp_processor_id();
+ char str[256];
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+ /*
+ * Only master CPU needs to execute rest of function:
+ * - Assume SMP so all cores will have same cache config so
+ * any geomtry checks will be same for all
+ * - IOC setup / dma callbacks only need to be setup once
+ */
+ if (!cpu)
+ arc_cache_init_master();
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 399e2f223d25..8c9415ed6280 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
#endif
+long __init arc_get_mem_sz(void)
+{
+ return low_mem_sz;
+}
+
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
static int __init setup_mem_sz(char *str)
{
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 7327250f0bb6..f10fe8526239 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -846,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-a83t-allwinner-h8homlet-v2.dtb \
sun8i-a83t-cubietruck-plus.dtb \
sun8i-h3-bananapi-m2-plus.dtb \
+ sun8i-h3-nanopi-m1.dtb \
sun8i-h3-nanopi-neo.dtb \
sun8i-h3-orangepi-2.dtb \
sun8i-h3-orangepi-lite.dtb \
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 1463df3b5b19..8ed46f9d79b7 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -170,7 +170,6 @@
AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
>;
};
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index b6142bda661e..15f07f9af3b3 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -160,7 +160,7 @@
axi {
compatible = "simple-bus";
- ranges = <0x00000000 0x18000000 0x0011c40a>;
+ ranges = <0x00000000 0x18000000 0x0011c40c>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 41de15fe15a2..78492a0bbbab 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -99,6 +99,7 @@
#size-cells = <1>;
compatible = "m25p64";
spi-max-frequency = <30000000>;
+ m25p,fast-read;
reg = <0>;
partition@0 {
label = "U-Boot-SPL";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 1faf24acd521..5ba161679e01 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1378,6 +1378,7 @@
phy-names = "sata-phy";
clocks = <&sata_ref_clk>;
ti,hwmods = "sata";
+ ports-implemented = <0x1>;
};
rtc: rtc@48838000 {
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index c3d939c9666c..3f808a47df03 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -75,6 +75,6 @@
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
- ti,min-output-imepdance;
+ ti,min-output-impedance;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index 34887a10c5f1..47ba97229a48 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -319,8 +319,6 @@
compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "imx6q-nitrogen6_max-sgtl5000";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sgtl5000>;
ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
@@ -402,6 +400,8 @@
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sgtl5000>;
reg = <0x0a>;
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
index d80f21abea62..31d4cc62dbc7 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -250,8 +250,6 @@
compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "imx6q-nitrogen6_som2-sgtl5000";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sgtl5000>;
ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
@@ -320,6 +318,8 @@
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sgtl5000>;
reg = <0x0a>;
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index da8598402ab8..38faa90007d7 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -158,7 +158,7 @@
&mmc1 {
interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
pinctrl-names = "default";
- pinctrl-0 = <&mmc1_pins &mmc1_cd>;
+ pinctrl-0 = <&mmc1_pins>;
wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
vmmc-supply = <&vmmc1>;
@@ -193,7 +193,8 @@
OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
- OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/
+ OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
+ OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
>;
};
@@ -242,12 +243,6 @@
OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */
>;
};
-
- mmc1_cd: pinmux_mmc1_cd {
- pinctrl-single,pins = <
- OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
- >;
- };
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 7cd92babc41a..0844737b72b2 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -988,6 +988,7 @@
phy-names = "sata-phy";
clocks = <&sata_ref_clk>;
ti,hwmods = "sata";
+ ports-implemented = <0x1>;
};
dss: dss@58000000 {
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 5ae4ec59e6ea..c852b69229c9 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -357,7 +357,7 @@
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 735914f6ae44..7cae328398b1 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -140,6 +140,10 @@
cpu-supply = <&reg_dcdc3>;
};
+&de {
+ status = "okay";
+};
+
&ehci0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 2b26175d55d1..e78faaf9243c 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -234,6 +234,7 @@
de: display-engine {
compatible = "allwinner,sun6i-a31-display-engine";
allwinner,pipelines = <&fe0>;
+ status = "disabled";
};
soc@01c00000 {
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
index 5ea4915f6d75..10d307408f23 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
@@ -56,7 +56,7 @@
};
&pio {
- mmc2_pins_nrst: mmc2@0 {
+ mmc2_pins_nrst: mmc2-rst-pin {
allwinner,pins = "PC16";
allwinner,function = "gpio_out";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index b01a43851294..028d2b70e3b5 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -471,7 +471,7 @@ CONFIG_MESON_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
CONFIG_DIGICOLOR_WATCHDOG=y
CONFIG_BCM2835_WDT=y
-CONFIG_BCM47XX_WATCHDOG=y
+CONFIG_BCM47XX_WDT=y
CONFIG_BCM7038_WDT=m
CONFIG_BCM_KONA_WDT=y
CONFIG_MFD_ACT8945A=y
@@ -893,7 +893,7 @@ CONFIG_BCM2835_MBOX=y
CONFIG_RASPBERRYPI_FIRMWARE=y
CONFIG_EFI_VARS=m
CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_CONFIG_BCM47XX_NVRAM=y
+CONFIG_BCM47XX_NVRAM=y
CONFIG_BCM47XX_SPROM=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 4364040ed696..1e6c48dd7b11 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 522b5feb4eaa..b62eaeb147aa 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -94,6 +94,9 @@
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION 0x510002d0
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f5a644..22b73112b75f 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
#define ftrace_return_address(n) return_address(n)
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ if (!strcmp(sym, "sys_mmap2"))
+ sym = "sys_mmap_pgoff";
+ else if (!strcmp(sym, "sys_statfs64_wrapper"))
+ sym = "sys_statfs64";
+ else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+ sym = "sys_fstatfs64";
+ else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+ sym = "sys_fadvise64_64";
+
+ /* Ignore case since sym may start with "SyS" instead of "sys" */
+ return !strcasecmp(sym, name);
+}
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index a2e75b84e2ae..6dae1956c74d 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
return false;
}
+static inline bool has_vhe(void)
+{
+ return false;
+}
+
/* The section containing the hypervisor idmap text */
extern char __hyp_idmap_text_start[];
extern char __hyp_idmap_text_end[];
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/uapi/asm/types.h
index a53cdb8f068c..9435a42f575e 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/uapi/asm/types.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_TYPES_H
-#define _ASM_TYPES_H
+#ifndef _UAPI_ASM_TYPES_H
+#define _UAPI_ASM_TYPES_H
#include <asm-generic/int-ll64.h>
@@ -37,4 +37,4 @@
#define __UINTPTR_TYPE__ unsigned long
#endif
-#endif /* _ASM_TYPES_H */
+#endif /* _UAPI_ASM_TYPES_H */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 188180b5523d..be3b3fbd382f 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
return 0;
}
+ /*
+ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+ * whenever a WFI is issued, even if the core is not powered down, in
+ * violation of the architecture. When DBGPRSR.SPD is set, accesses to
+ * breakpoint and watchpoint registers are treated as undefined, so
+ * this results in boot time and runtime failures when these are
+ * accessed and we unexpectedly take a trap.
+ *
+ * It's not clear if/how this can be worked around, so we blacklist
+ * Scorpion CPUs to avoid these issues.
+ */
+ if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
+ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+ return 0;
+ }
+
has_ossr = core_has_os_save_restore();
/* Determine how many BRPs/WRPs are available. */
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 22313cb53362..9af0701f7094 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -9,6 +9,7 @@
*/
#include <linux/preempt.h>
#include <linux/smp.h>
+#include <linux/uaccess.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
static inline void ipi_flush_tlb_page(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
+ unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+
+ uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
static inline void ipi_flush_tlb_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
+ unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+
+ uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_range(void *arg)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 11676787ad49..9d7446456e0c 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
+ if (is_kernel_in_hyp_mode())
+ kvm_timer_init_vhe();
+
kvm_arm_init_debug();
}
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index f6ba589cd312..c821c1d5610e 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -32,7 +32,6 @@
#include "soc.h"
#define OMAP1_DMA_BASE (0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT 17
static u32 enable_1510_mode;
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
goto exit_iounmap;
}
- d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
-
/* Valid attributes for omap1 plus processors */
if (cpu_is_omap15xx())
d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
d->dev_caps |= CLEAR_CSR_ON_READ;
d->dev_caps |= IS_WORD_16;
- if (cpu_is_omap15xx())
- d->chan_count = 9;
- else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
- if (!(d->dev_caps & ENABLE_1510_MODE))
- d->chan_count = 16;
+ /* available logical channels */
+ if (cpu_is_omap15xx()) {
+ d->lch_count = 9;
+ } else {
+ if (d->dev_caps & ENABLE_1510_MODE)
+ d->lch_count = 9;
else
- d->chan_count = 9;
+ d->lch_count = 16;
}
p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 477910a48448..70c004794880 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
.nshutdown_gpio = 162,
.dev_name = "/dev/ttyO1",
.flow_cntrl = 1,
- .baud_rate = 300000,
+ .baud_rate = 3000000,
};
static struct platform_device wl128x_device = {
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index 8538910db202..a970e7fcba9e 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
*/
bool prcmu_is_cpu_in_wfi(int cpu)
{
- return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
- PRCM_ARM_WFI_STANDBY_WFI0;
+ return readl(PRCM_ARM_WFI_STANDBY) &
+ (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
}
/*
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 238fbeacd330..5d28e1cdc998 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -137,6 +137,10 @@
};
};
+&scpi_clocks {
+ status = "disabled";
+};
+
&uart_AO {
status = "okay";
pinctrl-0 = <&uart_ao_a_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 596240c38a9c..b35307321b63 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -55,7 +55,7 @@
mboxes = <&mailbox 1 &mailbox 2>;
shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
- clocks {
+ scpi_clocks: clocks {
compatible = "arm,scpi-clocks";
scpi_dvfs: scpi_clocks@0 {
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 64226d5ae471..135890cd8a85 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -1367,7 +1367,7 @@
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index 358089687a69..ef1b9e573af0 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -27,7 +27,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 68a908334c7b..54dc28351c8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -72,7 +72,7 @@
<1 10 0xf08>;
};
- amba_apu {
+ amba_apu: amba_apu@0 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
@@ -175,7 +175,7 @@
};
i2c0: i2c@ff020000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 17 4>;
@@ -185,7 +185,7 @@
};
i2c1: i2c@ff030000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 18 4>;
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index c53dbeae79f2..838dad5c209f 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
cbz w6, .Lcbcencloop
ld1 {v0.16b}, [x5] /* get iv */
- enc_prepare w3, x2, x5
+ enc_prepare w3, x2, x6
.Lcbcencloop:
ld1 {v1.16b}, [x1], #16 /* get next pt block */
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
- encrypt_block v0, w3, x2, x5, w6
+ encrypt_block v0, w3, x2, x6, w7
st1 {v0.16b}, [x0], #16
subs w4, w4, #1
bne .Lcbcencloop
+ st1 {v0.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_encrypt)
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
cbz w6, .LcbcdecloopNx
ld1 {v7.16b}, [x5] /* get iv */
- dec_prepare w3, x2, x5
+ dec_prepare w3, x2, x6
.LcbcdecloopNx:
#if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
.Lcbcdecloop:
ld1 {v1.16b}, [x1], #16 /* get next ct block */
mov v0.16b, v1.16b /* ...and copy to v0 */
- decrypt_block v0, w3, x2, x5, w6
+ decrypt_block v0, w3, x2, x6, w7
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
mov v7.16b, v1.16b /* ct is next iv */
st1 {v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
bne .Lcbcdecloop
.Lcbcdecout:
FRAME_POP
+ st1 {v7.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_decrypt)
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
AES_ENTRY(aes_ctr_encrypt)
FRAME_PUSH
- cbnz w6, .Lctrfirst /* 1st time around? */
- umov x5, v4.d[1] /* keep swabbed ctr in reg */
- rev x5, x5
-#if INTERLEAVE >= 2
- cmn w5, w4 /* 32 bit overflow? */
- bcs .Lctrinc
- add x5, x5, #1 /* increment BE ctr */
- b .LctrincNx
-#else
- b .Lctrinc
-#endif
-.Lctrfirst:
+ cbz w6, .Lctrnotfirst /* 1st time around? */
enc_prepare w3, x2, x6
ld1 {v4.16b}, [x5]
- umov x5, v4.d[1] /* keep swabbed ctr in reg */
- rev x5, x5
+
+.Lctrnotfirst:
+ umov x8, v4.d[1] /* keep swabbed ctr in reg */
+ rev x8, x8
#if INTERLEAVE >= 2
- cmn w5, w4 /* 32 bit overflow? */
+ cmn w8, w4 /* 32 bit overflow? */
bcs .Lctrloop
.LctrloopNx:
subs w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
#if INTERLEAVE == 2
mov v0.8b, v4.8b
mov v1.8b, v4.8b
- rev x7, x5
- add x5, x5, #1
+ rev x7, x8
+ add x8, x8, #1
ins v0.d[1], x7
- rev x7, x5
- add x5, x5, #1
+ rev x7, x8
+ add x8, x8, #1
ins v1.d[1], x7
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
st1 {v0.16b-v1.16b}, [x0], #32
#else
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
- dup v7.4s, w5
+ dup v7.4s, w8
mov v0.16b, v4.16b
add v7.4s, v7.4s, v8.4s
mov v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
eor v2.16b, v7.16b, v2.16b
eor v3.16b, v5.16b, v3.16b
st1 {v0.16b-v3.16b}, [x0], #64
- add x5, x5, #INTERLEAVE
+ add x8, x8, #INTERLEAVE
#endif
- cbz w4, .LctroutNx
-.LctrincNx:
- rev x7, x5
+ rev x7, x8
ins v4.d[1], x7
+ cbz w4, .Lctrout
b .LctrloopNx
-.LctroutNx:
- sub x5, x5, #1
- rev x7, x5
- ins v4.d[1], x7
- b .Lctrout
.Lctr1x:
adds w4, w4, #INTERLEAVE
beq .Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
.Lctrloop:
mov v0.16b, v4.16b
encrypt_block v0, w3, x2, x6, w7
+
+ adds x8, x8, #1 /* increment BE ctr */
+ rev x7, x8
+ ins v4.d[1], x7
+ bcs .Lctrcarry /* overflow? */
+
+.Lctrcarrydone:
subs w4, w4, #1
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
ld1 {v3.16b}, [x1], #16
eor v3.16b, v0.16b, v3.16b
st1 {v3.16b}, [x0], #16
- beq .Lctrout
-.Lctrinc:
- adds x5, x5, #1 /* increment BE ctr */
- rev x7, x5
- ins v4.d[1], x7
- bcc .Lctrloop /* no overflow? */
- umov x7, v4.d[0] /* load upper word of ctr */
- rev x7, x7 /* ... to handle the carry */
- add x7, x7, #1
- rev x7, x7
- ins v4.d[0], x7
- b .Lctrloop
+ bne .Lctrloop
+
+.Lctrout:
+ st1 {v4.16b}, [x5] /* return next CTR value */
+ FRAME_POP
+ ret
+
.Lctrhalfblock:
ld1 {v3.8b}, [x1]
eor v3.8b, v0.8b, v3.8b
st1 {v3.8b}, [x0]
-.Lctrout:
FRAME_POP
ret
+
+.Lctrcarry:
+ umov x7, v4.d[0] /* load upper word of ctr */
+ rev x7, x7 /* ... to handle the carry */
+ add x7, x7, #1
+ rev x7, x7
+ ins v4.d[0], x7
+ b .Lctrcarrydone
AES_ENDPROC(aes_ctr_encrypt)
.ltorg
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 446f6c46d4b1..3a4301163e04 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -164,22 +164,25 @@ lr .req x30 // link register
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC.
+ * <symbol> is within the range +/- 4 GB of the PC when running
+ * in core kernel context. In module context, a movz/movk sequence
+ * is used, since modules may be loaded far away from the kernel
+ * when KASLR is in effect.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
- * @tmp: optional scratch register to be used if <dst> == sp, which
- * is not allowed in an adrp instruction
*/
- .macro adr_l, dst, sym, tmp=
- .ifb \tmp
+ .macro adr_l, dst, sym
+#ifndef MODULE
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
- .else
- adrp \tmp, \sym
- add \dst, \tmp, :lo12:\sym
- .endif
+#else
+ movz \dst, #:abs_g3:\sym
+ movk \dst, #:abs_g2_nc:\sym
+ movk \dst, #:abs_g1_nc:\sym
+ movk \dst, #:abs_g0_nc:\sym
+#endif
.endm
/*
@@ -190,6 +193,7 @@ lr .req x30 // link register
* the address
*/
.macro ldr_l, dst, sym, tmp=
+#ifndef MODULE
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
@@ -197,6 +201,15 @@ lr .req x30 // link register
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
+#else
+ .ifb \tmp
+ adr_l \dst, \sym
+ ldr \dst, [\dst]
+ .else
+ adr_l \tmp, \sym
+ ldr \dst, [\tmp]
+ .endif
+#endif
.endm
/*
@@ -206,8 +219,13 @@ lr .req x30 // link register
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
+#ifndef MODULE
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
+#else
+ adr_l \tmp, \sym
+ str \src, [\tmp]
+#endif
.endm
/*
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index bfe632808d77..90c39a662379 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index fea10736b11f..439f6b5d31f6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -47,6 +47,7 @@
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/sysreg.h>
+#include <asm/cpufeature.h>
/*
* __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
return read_sysreg(CurrentEL) == CurrentEL_EL2;
}
+static inline bool has_vhe(void)
+{
+ if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_ARM64_VHE
extern void verify_cpu_run_el(void);
#else
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index b5c3933ed441..d1ff83dfe5de 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
__uint128_t vregs[32];
__u32 fpsr;
__u32 fpcr;
+ __u32 __reserved[2];
};
struct user_hwdebug_state {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 923841ffe4a9..43512d4d7df2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -683,7 +683,7 @@ el0_inv:
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
- bl bad_mode
+ bl bad_el0_sync
b ret_to_user
ENDPROC(el0_sync)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fc35e06ccaac..a22161ccf447 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
/* (address, ctrl) registers */
limit = regset->n * regset->size;
while (count && offset < limit) {
+ if (count < PTRACE_HBP_ADDR_SZ)
+ return -EINVAL;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
offset, offset + PTRACE_HBP_ADDR_SZ);
if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
return ret;
offset += PTRACE_HBP_ADDR_SZ;
+ if (!count)
+ break;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
offset, offset + PTRACE_HBP_CTRL_SZ);
if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct user_pt_regs newregs;
+ struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct user_fpsimd_state newstate;
+ struct user_fpsimd_state newstate =
+ target->thread.fpsimd_state.user_fpsimd;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- unsigned long tls;
+ unsigned long tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- int syscallno, ret;
+ int syscallno = task_pt_regs(target)->syscallno;
+ int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
const void __user *ubuf)
{
int ret;
- compat_ulong_t tls;
+ compat_ulong_t tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 23e9e13bd2aa..655e65f38f31 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -11,6 +11,7 @@
* for more details.
*/
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
static int __init register_cpufreq_notifier(void)
{
- if (cap_parsing_failed)
+ /*
+ * on ACPI-based systems we need to use the default cpu capacity
+ * until we have the necessary code to parse the cpu capacity, so
+ * skip registering cpufreq notifier.
+ */
+ if (!acpi_disabled || cap_parsing_failed)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5b830be79c01..659b2e6b6cf7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
}
/*
- * bad_mode handles the impossible case in the exception vector.
+ * bad_mode handles the impossible case in the exception vector. This is always
+ * fatal.
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
- siginfo_t info;
- void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
+
+ die("Oops - bad mode", regs, 0);
+ local_irq_disable();
+ panic("bad mode");
+}
+
+/*
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
+ * exceptions taken from EL0. Unlike bad_mode, this returns.
+ */
+asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+{
+ siginfo_t info;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+ console_verbose();
+
+ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
+ smp_processor_id(), esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
- arm64_notify_die("Oops - bad mode", regs, &info, 0);
+ current->thread.fault_address = 0;
+ current->thread.fault_code = 0;
+
+ force_sig_info(info.si_signo, &info, current);
}
void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 964b7549af5c..e25584d72396 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -239,7 +239,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
- changed = ptep_set_access_flags(vma, addr, cpte,
+ changed |= ptep_set_access_flags(vma, addr, cpte,
pfn_pte(pfn,
hugeprot),
dirty);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 716d1226ba69..380ebe705093 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -404,6 +404,8 @@ void __init mem_init(void)
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
+ else
+ swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e264fc7..e93c9494503a 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+ long long c, old;
+
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic64_cmpxchg(v, c, c + i);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long c, old, dec;
+
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#define ATOMIC_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311735c8..67e333aa7629 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
struct task_struct;
struct thread_struct;
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
struct fpu_state_struct;
extern asmlinkage void fpu_save(struct fpu_state_struct *);
#define switch_fpu(prev, next) \
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 3f9406d9b9d6..da87943328a5 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -6,7 +6,7 @@
#endif
#include <linux/compiler.h>
-#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <linux/atomic.h>
@@ -17,6 +17,12 @@
* to include/asm-i386/bitops.h or kerneldoc
*/
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
index e0a23c7bdd43..07fa7e50bdc0 100644
--- a/arch/parisc/include/uapi/asm/bitsperlong.h
+++ b/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -3,10 +3,8 @@
#if defined(__LP64__)
#define __BITS_PER_LONG 64
-#define SHIFT_PER_LONG 6
#else
#define __BITS_PER_LONG 32
-#define SHIFT_PER_LONG 5
#endif
#include <asm-generic/bitsperlong.h>
diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
index e78403b129ef..928e1bbac98f 100644
--- a/arch/parisc/include/uapi/asm/swab.h
+++ b/arch/parisc/include/uapi/asm/swab.h
@@ -1,6 +1,7 @@
#ifndef _PARISC_SWAB_H
#define _PARISC_SWAB_H
+#include <asm/bitsperlong.h>
#include <linux/types.h>
#include <linux/compiler.h>
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
}
#define __arch_swab32 __arch_swab32
-#if BITS_PER_LONG > 32
+#if __BITS_PER_LONG > 32
/*
** From "PA-RISC 2.0 Architecture", HP Professional Books.
** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
return x;
}
#define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
+#endif /* __BITS_PER_LONG > 32 */
#endif /* _PARISC_SWAB_H */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a8ee573fe610..281f4f1fcd1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -164,7 +164,6 @@ config PPC
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_KERNEL_GZIP
- select HAVE_CC_STACKPROTECTOR
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
bool "Build a relocatable kernel"
depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
select NONSTATIC_KERNEL
+ select MODULE_REL_CRCS if MODVERSIONS
help
This builds a kernel image that is capable of running at the
location the kernel is loaded at. For ppc32, there is no any
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1c64bc6330bc..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -36,12 +36,13 @@
#ifdef CONFIG_HUGETLB_PAGE
static inline int hash__hugepd_ok(hugepd_t hpd)
{
+ unsigned long hpdval = hpd_val(hpd);
/*
* if it is not a pte and have hugepd shift mask
* set, then it is a hugepd directory pointer
*/
- if (!(hpd.pd & _PAGE_PTE) &&
- ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+ if (!(hpdval & _PAGE_PTE) &&
+ ((hpdval & HUGEPD_SHIFT_MASK) != 0))
return true;
return false;
}
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3de4e6..4c935f7504f7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
unsigned long phys);
extern void hash__vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index b312b152461b..6e834caa3720 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
{
int i;
+#ifndef __clang__ /* clang can't cope with this */
BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
if (!static_key_initialized) {
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index ede215167d1a..7f4025a6c69e 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
* We have only four bits to encode, MMU page size
*/
BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
- return __va(hpd.pd & HUGEPD_ADDR_MASK);
+ return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
}
static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
{
- return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+ return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
#ifdef CONFIG_PPC_8xx
- return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+ return (pte_t *)__va(hpd_val(hpd) &
+ ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
#else
- return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+ return (pte_t *)((hpd_val(hpd) &
+ ~HUGEPD_SHIFT_MASK) | PD_HUGE);
#endif
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
#else
- return hpd.pd & HUGEPD_SHIFT_MASK;
+ return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
#endif
}
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index a34c764ca8dd..233a7e8cc8e3 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
{
int i;
+#ifndef __clang__ /* clang can't cope with this */
BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
if (!static_key_initialized) {
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index cc12c61ef315..53885512b8d3 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
}
#endif
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 172849727054..0cd8a3852763 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
static inline int hugepd_ok(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd.pd & 0x4) != 0);
+ return ((hpd_val(hpd) & 0x4) != 0);
#else
- return (hpd.pd > 0);
+ /* We clear the top bit to indicate hugepd */
+ return ((hpd_val(hpd) & PD_HUGE) == 0);
#endif
}
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 56398e7e6100..47120bf2670c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
#include <asm/pgtable-types.h>
#endif
-typedef struct { signed long pd; } hugepd_t;
#ifndef CONFIG_HUGETLB_PAGE
#define is_hugepd(pdep) (0)
#define pgd_huge(pgd) (0)
#endif /* CONFIG_HUGETLB_PAGE */
-#define __hugepd(x) ((hugepd_t) { (x) })
-
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index e157489ee7a1..ae0a23091a9b 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -65,6 +65,7 @@ struct power_pmu {
#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
+#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
/*
* Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 49c0a5a80efa..9c0f5db5cf46 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
return pmd_raw(old) == prev;
}
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return be64_to_cpu(x.pdbe);
+}
+
#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index e7f4f3e0fcde..8bd3b13fe2fb 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
}
#endif
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return x.pd;
+}
+
#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c56ea8c84abb..c4ced1d01d57 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -157,7 +157,7 @@
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
-#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
#define PPC_INST_MSGCLR 0x7c0001dc
@@ -174,13 +174,13 @@
#define PPC_INST_RFDI 0x4c00004e
#define PPC_INST_RFMCI 0x4c00004c
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
-#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
-#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
-#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
-#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MFVSRD 0x7c000066
#define PPC_INST_MTVSRD 0x7c000166
#define PPC_INST_SLBFEE 0x7c0007a7
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
deleted file mode 100644
index 6720190eabec..000000000000
--- a/arch/powerpc/include/asm/stackprotector.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function. The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC. This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
- unsigned long canary;
-
- /* Try to get a semi random initial value. */
- get_random_bytes(&canary, sizeof(canary));
- canary ^= mftb();
- canary ^= LINUX_VERSION_CODE;
-
- current->stack_canary = canary;
- __stack_chk_guard = current->stack_canary;
-}
-
-#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 23f8082d7bfa..f4c2b52e58b3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0601e6a7297c..195a9fc8f81c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,9 +91,6 @@ int main(void)
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
#endif
-#ifdef CONFIG_CC_STACKPROTECTOR
- DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 8180bfd7ab93..9de7f79e702b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*
* For pHyp, we have to enable IO for log retrieval. Otherwise,
* 0xFF's is always returned from PCI config space.
+ *
+ * When the @severity is EEH_LOG_PERM, the PE is going to be
+ * removed. Prior to that, the drivers for devices included in
+ * the PE will be closed. The drivers rely on working IO path
+ * to bring the devices to quiet state. Otherwise, PCI traffic
+ * from those devices after they are removed is like to cause
+ * another unexpected EEH error.
*/
if (!(pe->type & EEH_PE_PHB)) {
- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+ severity == EEH_LOG_PERM)
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
/*
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d88573bdd090..b94887165a10 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
{
struct eeh_pe *pe = (struct eeh_pe *)data;
- bool *clear_sw_state = flag;
+ bool clear_sw_state = *(bool *)flag;
int i, rc = 1;
for (i = 0; rc && i < 3; i++)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 5742dbdbee46..3841d749a430 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
- lwz r0,TSK_STACK_CANARY(r2)
- lis r4,__stack_chk_guard@ha
- stw r0,__stack_chk_guard@l(r4)
-#endif
+
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index bb1807184bad..0b0f89685b67 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
for (end = (void *)vers + size; vers < end; vers++)
if (vers->name[0] == '.') {
memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
- /* The TOC symbol has no CRC computed. To avoid CRC
- * check failing, we must force it to the expected
- * value (see CRC check in module.c).
- */
- if (!strcmp(vers->name, "TOC."))
- vers->crc = -(unsigned long)reloc_start;
-#endif
}
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 04885cec24df..5dd056df0baa 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -64,12 +64,6 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
/* Transactional Memory debug */
#ifdef TM_DEBUG_SW
#define TM_DEBUG(x...) printk(KERN_INFO x)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ec47a939cbdd..ac83eb04a8b8 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
+ if (!PHANDLE_VALID(cpu_pkg))
+ return;
+
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
prom.cpu = be32_to_cpu(rval);
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e4744ff38a17..925a4ef90559 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
flush_fp_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
+ for (i = 0; i < 32; i++)
+ buf[i] = target->thread.TS_CKFPR(i);
+ buf[32] = target->thread.ckfp_state.fpscr;
+
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
if (!ret)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 80334937e14f..67e19a0821be 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int create_section_mapping(unsigned long start, unsigned long end)
+int hash__create_section_mapping(unsigned long start, unsigned long end)
{
int rc = htab_bolt_mapping(start, end, __pa(start),
pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
return rc;
}
-int remove_section_mapping(unsigned long start, unsigned long end)
+int hash__remove_section_mapping(unsigned long start, unsigned long end)
{
int rc = htab_remove_mapping(start, end, mmu_linear_psize,
mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index d5026f3800b6..37b5f91e381b 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
int hugepd_ok(hugepd_t hpd)
{
bool is_hugepd;
+ unsigned long hpdval;
+
+ hpdval = hpd_val(hpd);
/*
* We should not find this format in page directory, warn otherwise.
*/
- is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+ is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
WARN(is_hugepd, "Found wrong page directory format\n");
return 0;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 289df38fb7e0..8c3389cbcd12 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
#endif
-#define hugepd_none(hpd) ((hpd).pd == 0)
+#define hugepd_none(hpd) (hpd_val(hpd) == 0)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
for (i = 0; i < num_hugepd; i++, hpdp++) {
if (unlikely(!hugepd_none(*hpdp)))
break;
- else
+ else {
#ifdef CONFIG_PPC_BOOK3S_64
- hpdp->pd = __pa(new) |
- (shift_to_mmu_psize(pshift) << 2);
+ *hpdp = __hugepd(__pa(new) |
+ (shift_to_mmu_psize(pshift) << 2));
#elif defined(CONFIG_PPC_8xx)
- hpdp->pd = __pa(new) |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
- _PMD_PAGE_512K) |
- _PMD_PRESENT;
+ *hpdp = __hugepd(__pa(new) |
+ (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+ _PMD_PAGE_512K) | _PMD_PRESENT);
#else
/* We use the old format for PPC_FSL_BOOK3E */
- hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+ *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
#endif
+ }
}
/* If we bailed from the for loop early, an error occurred, clean up */
if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--)
- hpdp->pd = 0;
+ *hpdp = __hugepd(0);
kmem_cache_free(cachep, new);
}
spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
return;
for (i = 0; i < num_hugepd; i++, hpdp++)
- hpdp->pd = 0;
+ *hpdp = __hugepd(0);
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't
* use pgt cache for hugepd.
*/
- if (pdshift > shift) {
+ if (pdshift > shift)
pgtable_cache_add(pdshift - shift, NULL);
- if (!PGT_CACHE(pdshift - shift))
- panic("hugetlbpage_init(): could not create "
- "pgtable cache for %d bit pagesize\n", shift);
- }
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
else if (!hugepte_cache) {
/*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
else if (mmu_psize_defs[MMU_PAGE_2M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
#endif
- else
- panic("%s: Unable to set default huge page size\n", __func__);
-
return 0;
}
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index a175cd82ae8c..f2108c40e697 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
+ if (!new)
+ panic("Could not allocate pgtable cache for order %d", shift);
+
kfree(name);
pgtable_cache[shift - 1] = new;
+
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
@@ -88,7 +92,7 @@ void pgtable_cache_init(void)
{
pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
- if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+ if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
/*
* In all current configs, when the PUD index exists it's the
@@ -97,11 +101,4 @@ void pgtable_cache_init(void)
*/
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
- if (!PGT_CACHE(PGD_INDEX_SIZE))
- panic("Couldn't allocate pgd cache");
- if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
- panic("Couldn't allocate pmd pgtable caches");
- if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
- panic("Couldn't allocate pud pgtable caches");
}
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index ebf9782bacf9..653ff6c74ebe 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
else if (mmu_hash_ops.hpte_clear_all)
mmu_hash_ops.hpte_clear_all();
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return -ENODEV;
+
+ return hash__create_section_mapping(start, end);
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return -ENODEV;
+
+ return hash__remove_section_mapping(start, end);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index cfa53ccc8baf..34f1a0dbc898 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
if (!pmdp)
return -ENOMEM;
if (map_page_size == PMD_SIZE) {
- ptep = (pte_t *)pudp;
+ ptep = pmdp_ptep(pmdp);
goto set_the_pte;
}
ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
}
pmdp = pmd_offset(pudp, ea);
if (map_page_size == PMD_SIZE) {
- ptep = (pte_t *)pudp;
+ ptep = pmdp_ptep(pmdp);
goto set_the_pte;
}
if (!pmd_present(*pmdp)) {
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fd3e4034c04d..270eb9b74e2e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
*/
if (TRAP(regs) != 0xf00)
use_siar = 0;
+ else if ((ppmu->flags & PPMU_NO_SIAR))
+ use_siar = 0;
else if (marked)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 6447dc1c3d89..929b56d47ad9 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e)
EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
EVENT(PM_CMPLU_STALL, 0x1e054)
EVENT(PM_INST_CMPL, 0x00002)
-EVENT(PM_BRU_CMPL, 0x40060)
+EVENT(PM_BRU_CMPL, 0x10012)
EVENT(PM_BR_MPRED_CMPL, 0x400f6)
/* All L1 D cache load references counted at finish, gated by reject */
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 346010e8d463..7332634e18c9 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
.bhrb_filter_map = power9_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.disable_pmc = isa207_disable_pmc,
- .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
+ .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power9_generic_events),
.generic_events = power9_generic_events,
.cache_events = &power9_cache_events,
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index d38e86fd5720..60c57657c772 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -20,6 +20,7 @@
#include <asm/xics.h>
#include <asm/io.h>
#include <asm/opal.h>
+#include <asm/kvm_ppc.h>
static void icp_opal_teardown_cpu(void)
{
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
* Should we be flagging idle loop instead?
* Or creating some task to be scheduled?
*/
- opal_int_eoi((0x00 << 24) | XICS_IPI);
+ if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
+ force_external_irq_replay();
+}
+
+static unsigned int icp_opal_get_xirr(void)
+{
+ unsigned int kvm_xirr;
+ __be32 hw_xirr;
+ int64_t rc;
+
+ /* Handle an interrupt latched by KVM first */
+ kvm_xirr = kvmppc_get_xics_latch();
+ if (kvm_xirr)
+ return kvm_xirr;
+
+ /* Then ask OPAL */
+ rc = opal_int_get_xirr(&hw_xirr, false);
+ if (rc < 0)
+ return 0;
+ return be32_to_cpu(hw_xirr);
}
static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
unsigned int xirr;
unsigned int vec;
unsigned int irq;
- int64_t rc;
- rc = opal_int_get_xirr(&xirr, false);
- if (rc < 0)
- return 0;
- xirr = be32_to_cpu(xirr);
+ xirr = icp_opal_get_xirr();
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
xics_mask_unknown_vec(vec);
/* We might learn about it later, so EOI it */
- opal_int_eoi(xirr);
+ if (opal_int_eoi(xirr) > 0)
+ force_external_irq_replay();
return 0;
}
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index e659daffe368..e00975361fec 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -69,7 +69,7 @@ CONFIG_CMA=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZPOOL=m
+CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_IMA=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 95ceac50bc65..f05d2d6e1087 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
+# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
+CONFIG_LIVEPATCH=y
CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
-CONFIG_RBTREE_TEST=m
-CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index bc7b176f5795..2cf87343b590 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 2d40ef0a6295..d00e368fb5e6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
+CONFIG_DEVKMEM=y
CONFIG_RAW_DRIVER=m
CONFIG_VIRTIO_BALLOON=y
CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab802f6..8e136b88cdf4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+ : \
+ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
+ : "memory"); \
}
#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7447ba509c30..12020b55887b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
+ if (MACHINE_HAS_VX)
+ convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+ else
+ memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
for (i = 0; i < __NUM_VXRS_LOW; i++)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bec71e902be3..6484a250021e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
+ sizeof(S390_lowcore.stfle_fac_list));
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT;
kfree(mach);
@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Populate the facility mask initially. */
memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
+ sizeof(S390_lowcore.stfle_fac_list));
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size())
kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7a1897c51c54..d56ef26d4681 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
return pgste;
}
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
pgste_t pgste, pte_t old, pte_t new)
{
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
} else {
*ptep = new;
}
+ return old;
}
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_direct(mm, addr, ptep);
- ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_lazy(mm, addr, ptep);
- ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be675e507..d0317993e947 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
static inline void tsb_context_switch(struct mm_struct *mm)
{
__tsb_context_switch(__pa(mm->pgd),
- &mm->context.tsb_block[0],
+ &mm->context.tsb_block[MM_TSB_BASE],
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- (mm->context.tsb_block[1].tsb ?
- &mm->context.tsb_block[1] :
+ (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
+ &mm->context.tsb_block[MM_TSB_HUGE] :
NULL)
#else
NULL
#endif
- , __pa(&mm->context.tsb_descr[0]));
+ , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
}
void tsb_grow(struct mm_struct *mm,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 3bebf395252c..4d0248aa0928 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
unsigned long order = get_order(size);
unsigned long p;
- p = __get_free_pages(GFP_KERNEL, order);
+ p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index c59af546f522..3caed4023589 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
"Linux powering off";
static const char rebooting_msg[32] __attribute__((aligned(32))) =
"Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
- "Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+ "Linux panicking";
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
{
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
{
- do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+ do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
return NOTIFY_DONE;
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4bc10e44d1ca..dfc97a47c9a0 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
atomic_inc(&sun4v_resum_oflow_cnt);
}
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+ unsigned int insn;
+
+ if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+ return compute_effective_address(regs, insn,
+ (insn >> 25) & 0x1f);
+ }
+ return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+ struct sun4v_error_entry *ent) {
+
+ unsigned int attrs = ent->err_attrs;
+
+ if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+ unsigned long addr = ent->err_raddr;
+ siginfo_t info;
+
+ if (addr == ~(u64)0) {
+ /* This seems highly unlikely to ever occur */
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+ } else {
+ unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+ PAGE_SIZE);
+
+ /* Break the unfortunate news. */
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+ addr);
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
+ page_cnt);
+
+ while (page_cnt-- > 0) {
+ if (pfn_valid(addr >> PAGE_SHIFT))
+ get_page(pfn_to_page(addr >> PAGE_SHIFT));
+ addr += PAGE_SIZE;
+ }
+ }
+ info.si_signo = SIGKILL;
+ info.si_errno = 0;
+ info.si_trapno = 0;
+ force_sig_info(info.si_signo, &info, current);
+
+ return true;
+ }
+ if (attrs & SUN4V_ERR_ATTRS_PIO) {
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+ force_sig_info(info.si_signo, &info, current);
+
+ return true;
+ }
+
+ /* Default to doing nothing */
+ return false;
+}
+
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
* Log the event, clear the first word of the entry, and die.
*/
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
put_cpu();
+ if (!(regs->tstate & TSTATE_PRIV) &&
+ sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+ /* DON'T PANIC: This userspace error was handled. */
+ return;
+ }
+
#ifdef CONFIG_PCI
/* Check for the special PCI poke sequence. */
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index d89b7011667c..e279572824b1 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct pt_regs regs;
+ struct pt_regs regs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
sizeof(regs));
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index cc3bd583dce1..9e240fcba784 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "ctype.h"
+#include "string.h"
int memcmp(const void *s1, const void *s2, size_t len)
{
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 725e820602b1..113588ddb43f 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
#define memset(d,c,l) __builtin_memset(d,c,l)
#define memcmp __builtin_memcmp
+extern int strcmp(const char *str1, const char *str2);
+extern int strncmp(const char *cs, const char *ct, size_t count);
+extern size_t strlen(const char *s);
+extern char *strstr(const char *s1, const char *s2);
+extern size_t strnlen(const char *s, size_t maxlen);
+extern unsigned int atou(const char *s);
+extern unsigned long long simple_strtoull(const char *cp, char **endp,
+ unsigned int base);
+
#endif /* BOOT_STRING_H */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 31c34ee131f3..6ef688a1ef3e 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1020,7 +1020,8 @@ struct {
const char *basename;
struct simd_skcipher_alg *simd;
} aesni_simd_skciphers2[] = {
-#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
+#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
+ IS_BUILTIN(CONFIG_CRYPTO_PCBC)
{
.algname = "pcbc(aes)",
.drvname = "pcbc-aes-aesni",
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 701d29f8e4d3..57f7ec35216e 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -255,23 +255,6 @@ ENTRY(__switch_to_asm)
END(__switch_to_asm)
/*
- * The unwinder expects the last frame on the stack to always be at the same
- * offset from the end of the page, which allows it to validate the stack.
- * Calling schedule_tail() directly would break that convention because its an
- * asmlinkage function so its argument has to be pushed on the stack. This
- * wrapper creates a proper "end of stack" frame header before the call.
- */
-ENTRY(schedule_tail_wrapper)
- FRAME_BEGIN
-
- pushl %eax
- call schedule_tail
- popl %eax
-
- FRAME_END
- ret
-ENDPROC(schedule_tail_wrapper)
-/*
* A newly forked process directly context switches into this address.
*
* eax: prev task we switched from
@@ -279,15 +262,24 @@ ENDPROC(schedule_tail_wrapper)
* edi: kernel thread arg
*/
ENTRY(ret_from_fork)
- call schedule_tail_wrapper
+ FRAME_BEGIN /* help unwinder find end of stack */
+
+ /*
+ * schedule_tail() is asmlinkage so we have to put its 'prev' argument
+ * on the stack.
+ */
+ pushl %eax
+ call schedule_tail
+ popl %eax
testl %ebx, %ebx
jnz 1f /* kernel threads are uncommon */
2:
/* When we fork, we trace the syscall return in the child, too. */
- movl %esp, %eax
+ leal FRAME_OFFSET(%esp), %eax
call syscall_return_slowpath
+ FRAME_END
jmp restore_all
/* kernel thread */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 5b219707c2f2..044d18ebc43c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,6 +36,7 @@
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <asm/export.h>
+#include <asm/frame.h>
#include <linux/err.h>
.code64
@@ -408,17 +409,19 @@ END(__switch_to_asm)
* r12: kernel thread arg
*/
ENTRY(ret_from_fork)
+ FRAME_BEGIN /* help unwinder find end of stack */
movq %rax, %rdi
- call schedule_tail /* rdi: 'prev' task parameter */
+ call schedule_tail /* rdi: 'prev' task parameter */
- testq %rbx, %rbx /* from kernel_thread? */
- jnz 1f /* kernel threads are uncommon */
+ testq %rbx, %rbx /* from kernel_thread? */
+ jnz 1f /* kernel threads are uncommon */
2:
- movq %rsp, %rdi
+ leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
call syscall_return_slowpath /* returns with IRQs disabled */
TRACE_IRQS_ON /* user mode is traced as IRQS on */
SWAPGS
+ FRAME_END
jmp restore_regs_and_iret
1:
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 05612a2529c8..496e60391fac 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
* all online cpus.
*/
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- "perf/x86/amd/ibs:STARTING",
+ "perf/x86/amd/ibs:starting",
x86_pmu_amd_ibs_starting_cpu,
x86_pmu_amd_ibs_dying_cpu);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 019c5887b698..1635c0c8df23 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
+
+ /* There's no sense in having PEBS for non sampling events: */
+ if (!is_sampling_event(event))
+ return -EINVAL;
}
/*
* check that PEBS LBR correction does not conflict with
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 86138267b68a..eb1484c86bb4 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
+ struct cpu_hw_events *sibling;
struct intel_excl_cntrs *c;
- c = per_cpu(cpu_hw_events, i).excl_cntrs;
+ sibling = &per_cpu(cpu_hw_events, i);
+ c = sibling->excl_cntrs;
if (c && c->core_id == core_id) {
cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
cpuc->excl_cntrs = c;
- cpuc->excl_thread_id = 1;
+ if (!sibling->excl_thread_id)
+ cpuc->excl_thread_id = 1;
break;
}
}
@@ -3987,7 +3990,7 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
- x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+ x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index fec8a461bdef..1076c9a77292 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -434,6 +434,7 @@ static struct pmu cstate_core_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .module = THIS_MODULE,
};
static struct pmu cstate_pkg_pmu = {
@@ -447,6 +448,7 @@ static struct pmu cstate_pkg_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .module = THIS_MODULE,
};
static const struct cstate_model nhm_cstates __initconst = {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index be202390bbd3..9dfeeeca0ea8 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
continue;
/* log dropped samples number */
- if (error[bit])
+ if (error[bit]) {
perf_log_lost_samples(event, error[bit]);
+ if (perf_event_account_interrupt(event))
+ x86_pmu_stop(event, 0);
+ }
+
if (counts[bit]) {
__intel_pmu_pebs_event(event, iregs, base,
top, bit, counts[bit]);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index bd34124449b0..22ef4f72cf32 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{
- return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+ unsigned int pkgid = topology_logical_package_id(cpu);
+
+ /*
+ * The unsigned check also catches the '-1' return value for non
+ * existent mappings in the topology map.
+ */
+ return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
}
static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
/* must be done before validate_group */
pmu = cpu_to_rapl_pmu(event->cpu);
+ if (!pmu)
+ return -EINVAL;
event->cpu = pmu->cpu;
event->pmu_private = pmu;
event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
+ if (!pmu) {
+ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+ if (!pmu)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&pmu->lock);
+ INIT_LIST_HEAD(&pmu->active_list);
+ pmu->pmu = &rapl_pmus->pmu;
+ pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+ rapl_hrtimer_init(pmu);
+
+ rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+ }
+
/*
* Check if there is an online cpu in the package which collects rapl
* events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
return 0;
}
-static int rapl_cpu_prepare(unsigned int cpu)
-{
- struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
- if (pmu)
- return 0;
-
- pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
- if (!pmu)
- return -ENOMEM;
-
- raw_spin_lock_init(&pmu->lock);
- INIT_LIST_HEAD(&pmu->active_list);
- pmu->pmu = &rapl_pmus->pmu;
- pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
- pmu->cpu = -1;
- rapl_hrtimer_init(pmu);
- rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
- return 0;
-}
-
static int rapl_check_hw_unit(bool apply_quirk)
{
u64 msr_rapl_power_unit_bits;
@@ -697,6 +698,7 @@ static int __init init_rapl_pmus(void)
rapl_pmus->pmu.start = rapl_pmu_event_start;
rapl_pmus->pmu.stop = rapl_pmu_event_stop;
rapl_pmus->pmu.read = rapl_pmu_event_read;
+ rapl_pmus->pmu.module = THIS_MODULE;
return 0;
}
@@ -802,29 +804,21 @@ static int __init rapl_pmu_init(void)
/*
* Install callbacks. Core will call them for each online cpu.
*/
-
- ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
- rapl_cpu_prepare, NULL);
- if (ret)
- goto out;
-
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
"perf/x86/rapl:online",
rapl_cpu_online, rapl_cpu_offline);
if (ret)
- goto out1;
+ goto out;
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
if (ret)
- goto out2;
+ goto out1;
rapl_advertise();
return 0;
-out2:
- cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
out1:
- cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
out:
pr_warn("Initialization failed (%d), disabled\n", ret);
cleanup_rapl_pmus();
@@ -835,7 +829,6 @@ module_init(rapl_pmu_init);
static void __exit intel_rapl_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
- cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
perf_pmu_unregister(&rapl_pmus->pmu);
cleanup_rapl_pmus();
}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 97c246f84dea..1ab45976474d 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{
- return pmu->boxes[topology_logical_package_id(cpu)];
+ unsigned int pkgid = topology_logical_package_id(cpu);
+
+ /*
+ * The unsigned check also catches the '-1' return value for non
+ * existent mappings in the topology map.
+ */
+ return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
}
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -733,6 +739,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
.start = uncore_pmu_event_start,
.stop = uncore_pmu_event_stop,
.read = uncore_pmu_event_read,
+ .module = THIS_MODULE,
};
} else {
pmu->pmu = *pmu->type->pmu;
@@ -763,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
pmu->registered = false;
}
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
- struct intel_uncore_pmu *pmu = type->pmus;
- struct intel_uncore_box *box;
- int i, pkg;
-
- if (pmu) {
- pkg = topology_physical_package_id(cpu);
- for (i = 0; i < type->num_boxes; i++, pmu++) {
- box = pmu->boxes[pkg];
- if (box)
- uncore_box_exit(box);
- }
- }
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
- struct intel_uncore_type **types;
-
- for (types = uncore_msr_uncores; *types; types++)
- __uncore_exit_boxes(*types++, smp_processor_id());
-}
-
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
{
int pkg;
@@ -1057,86 +1040,6 @@ static void uncore_pci_exit(void)
}
}
-static int uncore_cpu_dying(unsigned int cpu)
-{
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
- struct intel_uncore_pmu *pmu;
- struct intel_uncore_box *box;
- int i, pkg;
-
- pkg = topology_logical_package_id(cpu);
- for (; *types; types++) {
- type = *types;
- pmu = type->pmus;
- for (i = 0; i < type->num_boxes; i++, pmu++) {
- box = pmu->boxes[pkg];
- if (box && atomic_dec_return(&box->refcnt) == 0)
- uncore_box_exit(box);
- }
- }
- return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
- struct intel_uncore_pmu *pmu;
- struct intel_uncore_box *box;
- int i, pkg, ncpus = 1;
-
- if (first_init) {
- /*
- * On init we get the number of online cpus in the package
- * and set refcount for all of them.
- */
- ncpus = cpumask_weight(topology_core_cpumask(cpu));
- }
-
- pkg = topology_logical_package_id(cpu);
- for (; *types; types++) {
- type = *types;
- pmu = type->pmus;
- for (i = 0; i < type->num_boxes; i++, pmu++) {
- box = pmu->boxes[pkg];
- if (!box)
- continue;
- /* The first cpu on a package activates the box */
- if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
- uncore_box_init(box);
- }
- }
-
- return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
- struct intel_uncore_pmu *pmu;
- struct intel_uncore_box *box;
- int i, pkg;
-
- pkg = topology_logical_package_id(cpu);
- for (; *types; types++) {
- type = *types;
- pmu = type->pmus;
- for (i = 0; i < type->num_boxes; i++, pmu++) {
- if (pmu->boxes[pkg])
- continue;
- /* First cpu of a package allocates the box */
- box = uncore_alloc_box(type, cpu_to_node(cpu));
- if (!box)
- return -ENOMEM;
- box->pmu = pmu;
- box->pkgid = pkg;
- pmu->boxes[pkg] = box;
- }
- }
- return 0;
-}
-
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
int new_cpu)
{
@@ -1176,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
static int uncore_event_cpu_offline(unsigned int cpu)
{
- int target;
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, pkg, target;
/* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
- return 0;
-
+ goto unref;
/* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
@@ -1193,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+ /* Clear the references */
+ pkg = topology_logical_package_id(cpu);
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ box = pmu->boxes[pkg];
+ if (box && atomic_dec_return(&box->refcnt) == 0)
+ uncore_box_exit(box);
+ }
+ }
return 0;
}
+static int allocate_boxes(struct intel_uncore_type **types,
+ unsigned int pkg, unsigned int cpu)
+{
+ struct intel_uncore_box *box, *tmp;
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ LIST_HEAD(allocated);
+ int i;
+
+ /* Try to allocate all required boxes */
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ if (pmu->boxes[pkg])
+ continue;
+ box = uncore_alloc_box(type, cpu_to_node(cpu));
+ if (!box)
+ goto cleanup;
+ box->pmu = pmu;
+ box->pkgid = pkg;
+ list_add(&box->active_list, &allocated);
+ }
+ }
+ /* Install them in the pmus */
+ list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+ list_del_init(&box->active_list);
+ box->pmu->boxes[pkg] = box;
+ }
+ return 0;
+
+cleanup:
+ list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+ list_del_init(&box->active_list);
+ kfree(box);
+ }
+ return -ENOMEM;
+}
+
static int uncore_event_cpu_online(unsigned int cpu)
{
- int target;
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, ret, pkg, target;
+
+ pkg = topology_logical_package_id(cpu);
+ ret = allocate_boxes(types, pkg, cpu);
+ if (ret)
+ return ret;
+
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ box = pmu->boxes[pkg];
+ if (!box && atomic_inc_return(&box->refcnt) == 1)
+ uncore_box_init(box);
+ }
+ }
/*
* Check if there is an online cpu in the package
@@ -1388,38 +1363,16 @@ static int __init intel_uncore_init(void)
if (cret && pret)
return -ENODEV;
- /*
- * Install callbacks. Core will call them for each online cpu.
- *
- * The first online cpu of each package allocates and takes
- * the refcounts for all other online cpus in that package.
- * If msrs are not enabled no allocation is required and
- * uncore_cpu_prepare() is not called for each online cpu.
- */
- if (!cret) {
- ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
- "perf/x86/intel/uncore:prepare",
- uncore_cpu_prepare, NULL);
- if (ret)
- goto err;
- } else {
- cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
- "perf/x86/intel/uncore:prepare",
- uncore_cpu_prepare, NULL);
- }
- first_init = 1;
- cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
- "perf/x86/uncore:starting",
- uncore_cpu_starting, uncore_cpu_dying);
- first_init = 0;
- cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
- "perf/x86/uncore:online",
- uncore_event_cpu_online, uncore_event_cpu_offline);
+ /* Install hotplug callbacks to setup the targets for each package */
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+ "perf/x86/intel/uncore:online",
+ uncore_event_cpu_online,
+ uncore_event_cpu_offline);
+ if (ret)
+ goto err;
return 0;
err:
- /* Undo box->init_box() */
- on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
return ret;
@@ -1428,9 +1381,7 @@ module_init(intel_uncore_init);
static void __exit intel_uncore_exit(void)
{
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
- cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
}
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index e6832be714bc..dae2fedc1601 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
void hswep_uncore_cpu_init(void)
{
- int pkg = topology_phys_to_logical_pkg(0);
+ int pkg = boot_cpu_data.logical_proc_id;
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 34a46dc076d3..8167fdb67ae8 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -57,7 +57,7 @@
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 38711df3bcb5..2266f864b747 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
void reload_early_microcode(void);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
#else
static inline int __init microcode_init(void) { return 0; };
static inline void __init load_ucode_bsp(void) { }
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 195becc6f780..e793fc9a9b20 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -52,6 +52,21 @@ struct extended_sigtable {
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
+static inline u32 intel_get_microcode_revision(void)
+{
+ u32 rev, dummy;
+
+ native_wrmsrl(MSR_IA32_UCODE_REV, 0);
+
+ /* As documented in the SDM: Do a CPUID 1 here */
+ native_cpuid_eax(1);
+
+ /* get the current revision from MSR 0x8B */
+ native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
+
+ return rev;
+}
+
#ifdef CONFIG_MICROCODE_INTEL
extern void __init load_ucode_intel_bsp(void);
extern void load_ucode_intel_ap(void);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index eaf100508c36..1be64da0384e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -219,6 +219,24 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
: "memory");
}
+#define native_cpuid_reg(reg) \
+static inline unsigned int native_cpuid_##reg(unsigned int op) \
+{ \
+ unsigned int eax = op, ebx, ecx = 0, edx; \
+ \
+ native_cpuid(&eax, &ebx, &ecx, &edx); \
+ \
+ return reg; \
+}
+
+/*
+ * Native CPUID functions returning a single datum.
+ */
+native_cpuid_reg(eax)
+native_cpuid_reg(ebx)
+native_cpuid_reg(ecx)
+native_cpuid_reg(edx)
+
static inline void load_cr3(pgd_t *pgdir)
{
write_cr3(__pa(pgdir));
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index a3269c897ec5..2e41c50ddf47 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -58,7 +58,7 @@ get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
if (task == current)
return __builtin_frame_address(0);
- return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
+ return &((struct inactive_task_frame *)task->thread.sp)->bp;
}
#else
static inline unsigned long *
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 5cb436acd463..fcc5cd387fd1 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -36,7 +36,10 @@ static inline void prepare_switch_to(struct task_struct *prev,
asmlinkage void ret_from_fork(void);
-/* data that is pointed to by thread.sp */
+/*
+ * This is the structure pointed to by thread.sp for an inactive task. The
+ * order of the fields must match the code in __switch_to_asm().
+ */
struct inactive_task_frame {
#ifdef CONFIG_X86_64
unsigned long r15;
@@ -48,6 +51,11 @@ struct inactive_task_frame {
unsigned long di;
#endif
unsigned long bx;
+
+ /*
+ * These two fields must be together. They form a stack frame header,
+ * needed by get_frame_pointer().
+ */
unsigned long bp;
unsigned long ret_addr;
};
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 945e512a112a..52f352b063fd 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -2115,6 +2117,7 @@ static inline void __init check_timer(void)
if (idx != -1 && irq_trigger(idx))
unmask_ioapic_irq(irq_get_chip_data(0));
}
+ irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
if (timer_irq_works()) {
if (disable_timer_pin_1 > 0)
@@ -2136,6 +2139,7 @@ static inline void __init check_timer(void)
* legacy devices should be connected to IO APIC #0
*/
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+ irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
legacy_pic->unmask(0);
if (timer_irq_works()) {
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 71cae73a5076..1d3167269a67 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -309,15 +309,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
/* get information required for multi-node processors */
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
- u32 eax, ebx, ecx, edx;
- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- node_id = ecx & 7;
-
- /* get compute unit information */
- smp_num_siblings = ((ebx >> 8) & 3) + 1;
- c->x86_max_cores /= smp_num_siblings;
- c->cpu_core_id = ebx & 0xff;
+ node_id = cpuid_ecx(0x8000001e) & 7;
/*
* We may have multiple LLCs if L3 caches exist, so check if we
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index dc1697ca5191..9bab7a8a4293 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1221,7 +1221,7 @@ static __init int setup_disablecpuid(char *arg)
{
int bit;
- if (get_option(&arg, &bit) && bit < NCAPINTS*32)
+ if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
setup_clear_cpu_cap(bit);
else
return 0;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fcd484d2bb03..203f860d2ab3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -14,6 +14,7 @@
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/intel-family.h>
+#include <asm/microcode_intel.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -78,14 +79,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
- unsigned lower_word;
-
- wrmsr(MSR_IA32_UCODE_REV, 0, 0);
- /* Required by the SDM */
- sync_core();
- rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
- }
+ if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
+ c->microcode = intel_get_microcode_revision();
/*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 00ef43233e03..537c6647d84c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
{
unsigned long when = jiffies + interval;
unsigned long flags;
local_irq_save(flags);
- if (timer_pending(t)) {
- if (time_before(when, t->expires))
- mod_timer(t, when);
- } else {
- t->expires = round_jiffies(when);
- add_timer_on(t, smp_processor_id());
- }
+ if (!timer_pending(t) || time_before(when, t->expires))
+ mod_timer(t, round_jiffies(when));
local_irq_restore(flags);
}
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
done:
__this_cpu_write(mce_next_interval, iv);
- __restart_timer(t, iv);
+ __start_timer(t, iv);
}
/*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned long iv = __this_cpu_read(mce_next_interval);
- __restart_timer(t, interval);
+ __start_timer(t, interval);
if (interval < iv)
__this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
}
}
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
{
unsigned long iv = check_interval * HZ;
if (mca_cfg.ignore_ce || !iv)
return;
- per_cpu(mce_next_interval, cpu) = iv;
-
- t->expires = round_jiffies(jiffies + iv);
- add_timer_on(t, cpu);
+ this_cpu_write(mce_next_interval, iv);
+ __start_timer(t, iv);
}
static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
unsigned int cpu = smp_processor_id();
setup_pinned_timer(t, mce_timer_fn, cpu);
- mce_start_timer(cpu, t);
+ mce_start_timer(t);
}
/* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
static int mce_cpu_online(unsigned int cpu)
{
- struct timer_list *t = &per_cpu(mce_timer, cpu);
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
int ret;
mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
return ret;
}
mce_reenable_cpu();
- mce_start_timer(cpu, t);
+ mce_start_timer(t);
return 0;
}
static int mce_cpu_pre_down(unsigned int cpu)
{
- struct timer_list *t = &per_cpu(mce_timer, cpu);
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
mce_disable_cpu();
del_timer_sync(t);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 6a31e2691f3a..079e81733a58 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
reget:
if (!get_builtin_microcode(&cp, family)) {
#ifdef CONFIG_BLK_DEV_INITRD
- cp = find_cpio_data(ucode_path, (void *)initrd_start,
- initrd_end - initrd_start, NULL);
+ if (!initrd_gone)
+ cp = find_cpio_data(ucode_path, (void *)initrd_start,
+ initrd_end - initrd_start, NULL);
#endif
if (!(cp.data && cp.size)) {
/*
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 2af69d27da62..73102d932760 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -46,6 +46,8 @@
static struct microcode_ops *microcode_ops;
static bool dis_ucode_ldr = true;
+bool initrd_gone;
+
LIST_HEAD(microcode_cache);
/*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
static int __init save_microcode_in_initrd(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
+ int ret = -EINVAL;
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
if (c->x86 >= 6)
- return save_microcode_in_initrd_intel();
+ ret = save_microcode_in_initrd_intel();
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
- return save_microcode_in_initrd_amd(c->x86);
+ ret = save_microcode_in_initrd_amd(c->x86);
break;
default:
break;
}
- return -EINVAL;
+ initrd_gone = true;
+
+ return ret;
}
struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
* has the virtual address of the beginning of the initrd. It also
* possibly relocates the ramdisk. In either case, initrd_start contains
* the updated address so use that instead.
+ *
+ * initrd_gone is for the hotplug case where we've thrown out initrd
+ * already.
*/
- if (!use_pa && initrd_start)
- start = initrd_start;
+ if (!use_pa) {
+ if (initrd_gone)
+ return (struct cpio_data){ NULL, 0, "" };
+ if (initrd_start)
+ start = initrd_start;
+ }
return find_cpio_data(path, (void *)start, size, NULL);
#else /* !CONFIG_BLK_DEV_INITRD */
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index b624b54912e1..8325d8a09ab0 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -41,7 +41,7 @@
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
struct microcode_intel *intel_ucode_patch;
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -150,7 +150,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
{
struct ucode_patch *p;
- p = kzalloc(size, GFP_KERNEL);
+ p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -368,26 +368,6 @@ next:
return patch;
}
-static void cpuid_1(void)
-{
- /*
- * According to the Intel SDM, Volume 3, 9.11.7:
- *
- * CPUID returns a value in a model specific register in
- * addition to its usual register return values. The
- * semantics of CPUID cause it to deposit an update ID value
- * in the 64-bit model-specific register at address 08BH
- * (IA32_BIOS_SIGN_ID). If no update is present in the
- * processor, the value in the MSR remains unmodified.
- *
- * Use native_cpuid -- this code runs very early and we don't
- * want to mess with paravirt.
- */
- unsigned int eax = 1, ebx, ecx = 0, edx;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-}
-
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
@@ -410,15 +390,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
csig.pf = 1 << ((val[1] >> 18) & 7);
}
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
-
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
- csig.rev = val[1];
+ csig.rev = intel_get_microcode_revision();
uci->cpu_sig = csig;
uci->valid = 1;
@@ -602,7 +575,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
struct microcode_intel *mc;
- unsigned int val[2];
+ u32 rev;
mc = uci->mc;
if (!mc)
@@ -610,21 +583,16 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
- if (val[1] != mc->hdr.rev)
+ rev = intel_get_microcode_revision();
+ if (rev != mc->hdr.rev)
return -1;
#ifdef CONFIG_X86_64
/* Flush global tlb. This is precaution. */
flush_tlb_early();
#endif
- uci->cpu_sig.rev = val[1];
+ uci->cpu_sig.rev = rev;
if (early)
print_ucode(uci);
@@ -639,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
struct ucode_cpu_info uci;
struct cpio_data cp;
- /*
- * AP loading didn't find any microcode patch, no need to save anything.
- */
- if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
- return 0;
-
if (!load_builtin_intel_microcode(&cp))
cp = find_microcode_in_initrd(ucode_path, false);
@@ -660,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
return 0;
}
-
/*
* @res_patch, output: a pointer to the patch we found.
*/
@@ -804,8 +765,8 @@ static int apply_microcode_intel(int cpu)
struct microcode_intel *mc;
struct ucode_cpu_info *uci;
struct cpuinfo_x86 *c;
- unsigned int val[2];
static int prev_rev;
+ u32 rev;
/* We should bind the task to the CPU */
if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -822,33 +783,28 @@ static int apply_microcode_intel(int cpu)
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
- wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
- /* get the current revision from MSR 0x8B */
- rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+ rev = intel_get_microcode_revision();
- if (val[1] != mc->hdr.rev) {
+ if (rev != mc->hdr.rev) {
pr_err("CPU%d update to revision 0x%x failed\n",
cpu, mc->hdr.rev);
return -1;
}
- if (val[1] != prev_rev) {
+ if (rev != prev_rev) {
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
- val[1],
+ rev,
mc->hdr.date & 0xffff,
mc->hdr.date >> 24,
(mc->hdr.date >> 16) & 0xff);
- prev_rev = val[1];
+ prev_rev = rev;
}
c = &cpu_data(cpu);
- uci->cpu_sig.rev = val[1];
- c->microcode = val[1];
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
return 0;
}
@@ -860,7 +816,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
- unsigned int curr_mc_size = 0;
+ unsigned int curr_mc_size = 0, new_mc_size = 0;
unsigned int csig, cpf;
while (leftover) {
@@ -901,6 +857,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
+ new_mc_size = mc_size;
mc = NULL; /* trigger new vmalloc */
}
@@ -926,7 +883,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
* permanent memory. So it will be loaded early when a CPU is hot added
* or resumes.
*/
- save_mc_for_early(new_mc, curr_mc_size);
+ save_mc_for_early(new_mc, new_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index e4e97a5355ce..de7234401275 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -9,6 +9,7 @@
#include <asm/fpu/regset.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/types.h>
+#include <asm/fpu/xstate.h>
#include <asm/traps.h>
#include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
* it will #GP. Make sure it is replaced after the memset().
*/
if (static_cpu_has(X86_FEATURE_XSAVES))
- state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+ state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+ xfeatures_mask;
if (static_cpu_has(X86_FEATURE_FXSR))
fpstate_init_fxstate(&state->fxsave);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 85e87b46c318..dc6ba5bda9fc 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
} else {
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+ irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
disable_irq(hdev->irq);
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index be3a49ee0356..e41af597aed8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
crystal_khz = 24000; /* 24.0 MHz */
break;
case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_ATOM_DENVERTON:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 4443e499f279..23d15565d02a 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -6,6 +6,21 @@
#define FRAME_HEADER_SIZE (sizeof(long) * 2)
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ if (task == current) \
+ val = READ_ONCE(x); \
+ else \
+ val = READ_ONCE_NOCHECK(x); \
+ val; \
+})
+
static void unwind_dump(struct unwind_state *state, unsigned long *sp)
{
static bool dumped_before = false;
@@ -48,7 +63,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
if (state->regs && user_mode(state->regs))
return 0;
- addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
+ addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
+ addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
addr_p);
return __kernel_text_address(addr) ? addr : 0;
@@ -162,7 +178,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (state->regs)
next_bp = (unsigned long *)state->regs->bp;
else
- next_bp = (unsigned long *)*state->bp;
+ next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
/* is the next frame pointer an encoded pointer to pt_regs? */
regs = decode_frame_pointer(next_bp);
@@ -207,6 +223,16 @@ bool unwind_next_frame(struct unwind_state *state)
return true;
bad_address:
+ /*
+ * When unwinding a non-current task, the task might actually be
+ * running on another CPU, in which case it could be modifying its
+ * stack while we're reading it. This is generally not a problem and
+ * can be ignored as long as the caller understands that unwinding
+ * another task will not always succeed.
+ */
+ if (state->task != current)
+ goto the_end;
+
if (state->regs) {
printk_deferred_once(KERN_WARNING
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 56628a44668b..cedbba0f3402 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
+static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr,
+ void *data,
+ unsigned int size)
+{
+ int rc;
+ ulong linear;
+
+ rc = linearize(ctxt, addr, size, true, &linear);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
+}
+
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
@@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
&ctxt->exception);
}
-/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
@@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
rpl = selector & 3;
- /* NULL selector is not valid for TR, CS and SS (except for long mode) */
- if ((seg == VCPU_SREG_CS
- || (seg == VCPU_SREG_SS
- && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
- || seg == VCPU_SREG_TR)
- && null_selector)
- goto exception;
-
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
- if (null_selector) /* for NULL selector skip all following checks */
+ /* NULL selector is not valid for TR, CS and (except for long mode) SS */
+ if (null_selector) {
+ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
+ goto exception;
+
+ if (seg == VCPU_SREG_SS) {
+ if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
+ goto exception;
+
+ /*
+ * ctxt->ops->set_segment expects the CPL to be in
+ * SS.DPL, so fake an expand-up 32-bit data segment.
+ */
+ seg_desc.type = 3;
+ seg_desc.p = 1;
+ seg_desc.s = 1;
+ seg_desc.dpl = cpl;
+ seg_desc.d = 1;
+ seg_desc.g = 1;
+ }
+
+ /* Skip all following checks */
goto load;
+ }
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
@@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
+
+ /*
+ * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
+ * they can load it at CPL<3 (Intel's manual says only LSS can,
+ * but it's wrong).
+ *
+ * However, the Intel manual says that putting IST=1/DPL=3 in
+ * an interrupt gate will result in SS=3 (the AMD manual instead
+ * says it doesn't), so allow SS=3 in __load_segment_descriptor
+ * and only forbid it here.
+ */
+ if (seg == VCPU_SREG_SS && selector == 3 &&
+ ctxt->mode == X86EMUL_MODE_PROT64)
+ return emulate_exception(ctxt, GP_VECTOR, 0, true);
+
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
@@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
- return segmented_write(ctxt, ctxt->dst.addr.mem,
- &desc_ptr, 2 + ctxt->op_bytes);
+ return segmented_write_std(ctxt, ctxt->dst.addr.mem,
+ &desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
@@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
else
size = offsetof(struct fxregs_state, xmm_space[0]);
- return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
}
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
@@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
if (rc != X86EMUL_CONTINUE)
return rc;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5fe290c1b7d8..2f6ef5121a4c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
jump_label_rate_limit(&apic_hw_disabled, HZ);
jump_label_rate_limit(&apic_sw_disabled, HZ);
}
+
+void kvm_lapic_exit(void)
+{
+ static_key_deferred_flush(&apic_hw_disabled);
+ static_key_deferred_flush(&apic_sw_disabled);
+}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index e0c80233b3e1..ff8039d61672 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
void kvm_lapic_init(void);
+void kvm_lapic_exit(void);
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2f22810a7e0c..e52c9088660f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
memcpy(dest, xsave, XSAVE_HDR_OFFSET);
/* Set XSTATE_BV */
+ xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
/*
@@ -3342,6 +3343,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
switch (cap->cap) {
case KVM_CAP_HYPERV_SYNIC:
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return -EINVAL;
return kvm_hv_activate_synic(vcpu);
default:
return -EINVAL;
@@ -6045,6 +6048,7 @@ out:
void kvm_arch_exit(void)
{
+ kvm_lapic_exit();
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -6168,7 +6172,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+ return emulator_write_emulated(ctxt, rip, instruction, 3,
+ &ctxt->exception);
}
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 324e5713d386..af59f808742f 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
- if (info->si_addr == (void *)-1) {
+ if (info->si_addr == (void __user *)-1) {
err = -EINVAL;
goto err_out;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index e76d1af60f7a..bb660e53cbd6 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1172,6 +1172,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)image;
prog->jited = 1;
+ } else {
+ prog = orig_prog;
}
out_addrs:
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 3cd69832d7f4..3961103e9176 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
},
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
+ {
+ .callback = set_nouse_crs,
+ .ident = "Supermicro X8DTH",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
+ DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
+ },
+ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
{
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 936a488d6cf6..274dfc481849 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -210,6 +210,70 @@ int __init efi_memblock_x86_reserve_range(void)
return 0;
}
+#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
+#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
+#define U64_HIGH_BIT (~(U64_MAX >> 1))
+
+static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
+{
+ u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
+ u64 end_hi = 0;
+ char buf[64];
+
+ if (md->num_pages == 0) {
+ end = 0;
+ } else if (md->num_pages > EFI_PAGES_MAX ||
+ EFI_PAGES_MAX - md->num_pages <
+ (md->phys_addr >> EFI_PAGE_SHIFT)) {
+ end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
+ >> OVERFLOW_ADDR_SHIFT;
+
+ if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
+ end_hi += 1;
+ } else {
+ return true;
+ }
+
+ pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
+
+ if (end_hi) {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end_hi, end);
+ } else {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end);
+ }
+ return false;
+}
+
+static void __init efi_clean_memmap(void)
+{
+ efi_memory_desc_t *out = efi.memmap.map;
+ const efi_memory_desc_t *in = out;
+ const efi_memory_desc_t *end = efi.memmap.map_end;
+ int i, n_removal;
+
+ for (i = n_removal = 0; in < end; i++) {
+ if (efi_memmap_entry_valid(in, i)) {
+ if (out != in)
+ memcpy(out, in, efi.memmap.desc_size);
+ out = (void *)out + efi.memmap.desc_size;
+ } else {
+ n_removal++;
+ }
+ in = (void *)in + efi.memmap.desc_size;
+ }
+
+ if (n_removal > 0) {
+ u64 size = efi.memmap.nr_map - n_removal;
+
+ pr_warn("Removing %d invalid memory map entries.\n", n_removal);
+ efi_memmap_install(efi.memmap.phys_map, size);
+ }
+}
+
void __init efi_print_memmap(void)
{
efi_memory_desc_t *md;
@@ -472,6 +536,8 @@ void __init efi_init(void)
}
}
+ efi_clean_memmap();
+
if (efi_enabled(EFI_DBG))
efi_print_memmap();
}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 319148bd4b05..2f25a363068c 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
efi_scratch.use_pgd = true;
/*
+ * Certain firmware versions are way too sentimential and still believe
+ * they are exclusive and unquestionable owners of the first physical page,
+ * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+ * (but then write-access it later during SetVirtualAddressMap()).
+ *
+ * Create a 1:1 mapping for this page, to avoid triple faults during early
+ * boot with such firmware. We are free to hand this page to the BIOS,
+ * as trim_bios_range() will reserve the first page and isolate it away
+ * from memory allocators anyway.
+ */
+ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+ pr_err("Failed to create 1:1 mapping for the first page!\n");
+ return 1;
+ }
+
+ /*
* When making calls to the firmware everything needs to be 1:1
* mapped and addressable with 32-bit pointers. Map the kernel
* text and allocate a new stack because we can't rely on the
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 10aca63a50d7..30031d5293c4 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
new_size = efi.memmap.desc_size * num_entries;
- new_phys = memblock_alloc(new_size, 0);
+ new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
pr_err("Could not allocate boot services memmap\n");
return;
@@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
}
new_size = efi.memmap.desc_size * num_entries;
- new_phys = memblock_alloc(new_size, 0);
+ new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
pr_err("Failed to allocate new EFI memmap\n");
return;
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 61b5ed2b7d40..90e4f2a6625b 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -15,7 +15,7 @@ obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
# SPI Devices
-obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
+obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
# I2C Devices
obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
index 30c601b399ee..27186ad654c9 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
@@ -11,6 +11,7 @@
* of the License.
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/sfi.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -34,6 +35,9 @@ static void __init *spidev_platform_data(void *info)
{
struct spi_board_info *spi_info = info;
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return ERR_PTR(-ENODEV);
+
spi_info->mode = SPI_MODE_0;
spi_info->controller_data = &spidev_spi_chip;
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 848e8568fb3c..8fd4be610607 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
void cpu_reset(void)
{
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
local_irq_disable();
/*
* We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must