summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi1
-rw-r--r--arch/arm/boot/dts/stm32f7-pinctrl.dtsi82
-rw-r--r--arch/arm/kernel/unwind.c25
-rw-r--r--arch/arm/mach-sa1100/jornada720_ssp.c5
-rw-r--r--arch/arm/vfp/entry.S7
-rw-r--r--arch/arm/vfp/vfphw.S6
-rw-r--r--arch/parisc/include/asm/pgtable.h8
-rw-r--r--arch/parisc/kernel/kexec.c2
-rw-r--r--arch/riscv/kernel/pi/Makefile2
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S7
-rw-r--r--arch/x86/events/core.c6
-rw-r--r--arch/x86/events/intel/ds.c56
-rw-r--r--arch/x86/include/asm/perf_event.h3
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/lib/retpoline.S4
15 files changed, 170 insertions, 46 deletions
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index c9e05e3540d6..00bf53f99c29 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -387,6 +387,7 @@
interrupt-names = "tx", "rx0", "rx1", "sce";
resets = <&rcc STM32F4_APB1_RESET(CAN2)>;
clocks = <&rcc 0 STM32F4_APB1_CLOCK(CAN2)>;
+ st,can-secondary;
st,gcan = <&gcan>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
index c8e6c52fb248..9f65403295ca 100644
--- a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi
@@ -283,6 +283,88 @@
slew-rate = <2>;
};
};
+
+ can1_pins_a: can1-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 12, AF9)>; /* CAN1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 11, AF9)>; /* CAN1_RX */
+ bias-pull-up;
+ };
+ };
+
+ can1_pins_b: can1-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 9, AF9)>; /* CAN1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 8, AF9)>; /* CAN1_RX */
+ bias-pull-up;
+ };
+ };
+
+ can1_pins_c: can1-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('D', 1, AF9)>; /* CAN1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 0, AF9)>; /* CAN1_RX */
+ bias-pull-up;
+
+ };
+ };
+
+ can1_pins_d: can1-3 {
+ pins1 {
+ pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('H', 14, AF9)>; /* CAN1_RX */
+ bias-pull-up;
+
+ };
+ };
+
+ can2_pins_a: can2-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 6, AF9)>; /* CAN2_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 5, AF9)>; /* CAN2_RX */
+ bias-pull-up;
+ };
+ };
+
+ can2_pins_b: can2-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 13, AF9)>; /* CAN2_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 12, AF9)>; /* CAN2_RX */
+ bias-pull-up;
+ };
+ };
+
+ can3_pins_a: can3-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 15, AF11)>; /* CAN3_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 8, AF11)>; /* CAN3_RX */
+ bias-pull-up;
+ };
+ };
+
+ can3_pins_b: can3-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 4, AF11)>; /* CAN3_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 3, AF11)>; /* CAN3_RX */
+ bias-pull-up;
+ };
+ };
};
};
};
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 53be7ea6181b..9d2192156087 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -308,6 +308,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
return URC_OK;
}
+static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
+{
+ unsigned long bytes = 0;
+ unsigned long insn;
+ unsigned long result = 0;
+
+ /*
+ * unwind_get_byte() will advance `ctrl` one instruction at a time, so
+ * loop until we get an instruction byte where bit 7 is not set.
+ *
+ * Note: This decodes a maximum of 4 bytes to output 28 bits data where
+ * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
+ * it is sufficient for unwinding the stack.
+ */
+ do {
+ insn = unwind_get_byte(ctrl);
+ result |= (insn & 0x7f) << (bytes * 7);
+ bytes++;
+ } while (!!(insn & 0x80) && (bytes != sizeof(result)));
+
+ return result;
+}
+
/*
* Execute the current unwind instruction.
*/
@@ -361,7 +384,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if (ret)
goto error;
} else if (insn == 0xb2) {
- unsigned long uleb128 = unwind_get_byte(ctrl);
+ unsigned long uleb128 = unwind_decode_uleb128(ctrl);
ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
} else {
diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
index 67f72ca984b2..1956b095e699 100644
--- a/arch/arm/mach-sa1100/jornada720_ssp.c
+++ b/arch/arm/mach-sa1100/jornada720_ssp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* arch/arm/mac-sa1100/jornada720_ssp.c
*
* Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
@@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags;
/**
* jornada_ssp_reverse - reverses input byte
+ * @byte: input byte to reverse
*
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
@@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse);
/**
* jornada_ssp_byte - waits for ready ssp bus and sends byte
+ * @byte: input byte to transmit
*
* waits for fifo buffer to clear and then transmits, if it doesn't then we will
* timeout after <timeout> rounds. Needs mcu running before its called.
@@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte);
/**
* jornada_ssp_inout - decide if input is command or trading byte
+ * @byte: input byte to send (may be %TXDUMMY)
*
* returns : (jornada_ssp_byte(byte)) on success
* : %-ETIMEDOUT on timeout failure
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 7483ef8bccda..62206ef25037 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -23,6 +23,9 @@
@
ENTRY(do_vfp)
mov r1, r10
- mov r3, r9
- b vfp_entry
+ str lr, [sp, #-8]!
+ add r3, sp, #4
+ str r9, [r3]
+ bl vfp_entry
+ ldr pc, [sp], #8
ENDPROC(do_vfp)
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 4d8478264d82..a4610d0f3215 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -172,13 +172,14 @@ vfp_hw_state_valid:
@ out before setting an FPEXC that
@ stops us reading stuff
VFPFMXR FPEXC, r1 @ Restore FPEXC last
+ mov sp, r3 @ we think we have handled things
+ pop {lr}
sub r2, r2, #4 @ Retry current instruction - if Thumb
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
- mov lr, r3 @ we think we have handled things
local_bh_enable_and_ret:
adr r0, .
mov r1, #SOFTIRQ_DISABLE_OFFSET
@@ -209,8 +210,9 @@ skip:
process_exception:
DBGSTR "bounce"
+ mov sp, r3 @ setup for a return to the user code.
+ pop {lr}
mov r2, sp @ nothing stacked - regdump is at TOS
- mov lr, r3 @ setup for a return to the user code.
@ Now call the C code to package up the bounce to the support code
@ r0 holds the trigger instruction
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index e2950f5db7c9..e715df5385d6 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -413,12 +413,12 @@ extern void paging_init (void);
* For the 64bit version, the offset is extended by 32bit.
*/
#define __swp_type(x) ((x).val & 0x1f)
-#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
- (((x).val >> 8) & ~0x7) )
+#define __swp_offset(x) ( (((x).val >> 5) & 0x7) | \
+ (((x).val >> 10) << 3) )
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type) & 0x1f) | \
- ((offset & 0x7) << 6) | \
- ((offset & ~0x7) << 8) })
+ ((offset & 0x7) << 5) | \
+ ((offset >> 3) << 10) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
diff --git a/arch/parisc/kernel/kexec.c b/arch/parisc/kernel/kexec.c
index 5eb7f30edc1f..db57345a9daf 100644
--- a/arch/parisc/kernel/kexec.c
+++ b/arch/parisc/kernel/kexec.c
@@ -4,6 +4,8 @@
#include <linux/console.h>
#include <linux/kexec.h>
#include <linux/delay.h>
+#include <linux/reboot.h>
+
#include <asm/cacheflush.h>
#include <asm/sections.h>
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index 5d7cb991f2b8..7b593d44c712 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -22,7 +22,7 @@ KCOV_INSTRUMENT := n
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \
- --prefix-alloc-sections=.init
+ --prefix-alloc-sections=.init.pi
$(obj)/%.pi.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index f03b5697f8e0..e5f9f4677bbf 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -84,11 +84,8 @@ SECTIONS
__init_data_begin = .;
INIT_DATA_SECTION(16)
- /* Those sections result from the compilation of kernel/pi/string.c */
- .init.pidata : {
- *(.init.srodata.cst8*)
- *(.init__bug_table*)
- *(.init.sdata*)
+ .init.pi : {
+ *(.init.pi*)
}
.init.bss : {
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index d096b04bf80e..9d248703cbdd 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1703,10 +1703,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
perf_sample_data_init(&data, 0, event->hw.last_period);
- if (has_branch_stack(event)) {
- data.br_stack = &cpuc->lbr_stack;
- data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
- }
+ if (has_branch_stack(event))
+ perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index a2e566e53076..df88576d6b2a 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add)
{
struct pmu *pmu = event->pmu;
+
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
* that does not hurt:
*/
- bool update = cpuc->n_pebs == 1;
+ if (cpuc->n_pebs == 1)
+ cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
if (needed_cb != pebs_needs_sched_cb(cpuc)) {
if (!needed_cb)
@@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
else
perf_sched_cb_dec(pmu);
- update = true;
+ cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
}
/*
@@ -1252,24 +1254,13 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
if (x86_pmu.intel_cap.pebs_baseline && add) {
u64 pebs_data_cfg;
- /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
- if (cpuc->n_pebs == 1) {
- cpuc->pebs_data_cfg = 0;
- cpuc->pebs_record_size = sizeof(struct pebs_basic);
- }
-
pebs_data_cfg = pebs_update_adaptive_cfg(event);
-
- /* Update pebs_record_size if new event requires more data. */
- if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
- cpuc->pebs_data_cfg |= pebs_data_cfg;
- adaptive_pebs_record_size_update();
- update = true;
- }
+ /*
+ * Be sure to update the thresholds when we change the record.
+ */
+ if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
+ cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
}
-
- if (update)
- pebs_update_threshold(cpuc);
}
void intel_pmu_pebs_add(struct perf_event *event)
@@ -1326,9 +1317,17 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
wrmsrl(base + idx, value);
}
+static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
+{
+ if (cpuc->n_pebs == cpuc->n_large_pebs &&
+ cpuc->n_pebs != cpuc->n_pebs_via_pt)
+ intel_pmu_drain_pebs_buffer();
+}
+
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
unsigned int idx = hwc->idx;
@@ -1344,11 +1343,22 @@ void intel_pmu_pebs_enable(struct perf_event *event)
if (x86_pmu.intel_cap.pebs_baseline) {
hwc->config |= ICL_EVENTSEL_ADAPTIVE;
- if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
- wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
- cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
+ if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+ /*
+ * drain_pebs() assumes uniform record size;
+ * hence we need to drain when changing said
+ * size.
+ */
+ intel_pmu_drain_large_pebs(cpuc);
+ adaptive_pebs_record_size_update();
+ wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
+ cpuc->active_pebs_data_cfg = pebs_data_cfg;
}
}
+ if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
+ cpuc->pebs_data_cfg = pebs_data_cfg;
+ pebs_update_threshold(cpuc);
+ }
if (idx >= INTEL_PMC_IDX_FIXED) {
if (x86_pmu.intel_cap.pebs_format < 5)
@@ -1391,9 +1401,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- if (cpuc->n_pebs == cpuc->n_large_pebs &&
- cpuc->n_pebs != cpuc->n_pebs_via_pt)
- intel_pmu_drain_pebs_buffer();
+ intel_pmu_drain_large_pebs(cpuc);
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8fc15ed5e60b..abf09882f58b 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -121,6 +121,9 @@
#define PEBS_DATACFG_LBRS BIT_ULL(3)
#define PEBS_DATACFG_LBR_SHIFT 24
+/* Steal the highest bit of pebs_data_cfg for SW usage */
+#define PEBS_UPDATE_DS_SW BIT_ULL(63)
+
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 4266b64631a4..7e331e8f3692 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -36,6 +36,7 @@
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
+#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
/* Protect the PCI config register pairs used for SMN. */
static DEFINE_MUTEX(smn_mutex);
@@ -79,6 +80,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{}
};
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 27ef53fab6bd..b3b1e376dce8 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -144,8 +144,8 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
*/
.align 64
.skip 63, 0xcc
-SYM_FUNC_START_NOALIGN(zen_untrain_ret);
-
+SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ ANNOTATE_NOENDBR
/*
* As executed from zen_untrain_ret, this is:
*