summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile8
-rw-r--r--arch/powerpc/kernel/asm-offsets.c16
-rw-r--r--arch/powerpc/kernel/crash_dump.c2
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c12
-rw-r--r--arch/powerpc/kernel/eeh.c2
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S55
-rw-r--r--arch/powerpc/kernel/fadump.c101
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/interrupt.c8
-rw-r--r--arch/powerpc/kernel/io-workarounds.c197
-rw-r--r--arch/powerpc/kernel/io.c58
-rw-r--r--arch/powerpc/kernel/iommu.c7
-rw-r--r--arch/powerpc/kernel/irq.c44
-rw-r--r--arch/powerpc/kernel/jump_label.c2
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c20
-rw-r--r--arch/powerpc/kernel/misc_32.S26
-rw-r--r--arch/powerpc/kernel/misc_64.S8
-rw-r--r--arch/powerpc/kernel/module_32.c2
-rw-r--r--arch/powerpc/kernel/module_64.c88
-rw-r--r--arch/powerpc/kernel/of_platform.c102
-rw-r--r--arch/powerpc/kernel/optprobes.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c5
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c38
-rw-r--r--arch/powerpc/kernel/process.c14
-rw-r--r--arch/powerpc/kernel/prom.c7
-rw-r--r--arch/powerpc/kernel/prom_init.c144
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c2
-rw-r--r--arch/powerpc/kernel/rtas.c104
-rw-r--r--arch/powerpc/kernel/secure_boot.c5
-rw-r--r--arch/powerpc/kernel/security.c2
-rw-r--r--arch/powerpc/kernel/secvar-sysfs.c24
-rw-r--r--arch/powerpc/kernel/setup-common.c22
-rw-r--r--arch/powerpc/kernel/setup_32.c10
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/smp.c11
-rw-r--r--arch/powerpc/kernel/static_call.c60
-rw-r--r--arch/powerpc/kernel/switch.S1
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl5
-rw-r--r--arch/powerpc/kernel/sysfs.c1
-rw-r--r--arch/powerpc/kernel/time.c72
-rw-r--r--arch/powerpc/kernel/trace/Makefile11
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c310
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_pg.c85
-rw-r--r--arch/powerpc/kernel/trace/ftrace_entry.S244
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kernel/udbg.c6
-rw-r--r--arch/powerpc/kernel/udbg_16550.c23
-rw-r--r--arch/powerpc/kernel/vdso.c131
-rw-r--r--arch/powerpc/kernel/vdso/Makefile14
-rw-r--r--arch/powerpc/kernel/vdso/cacheflush.S2
-rw-r--r--arch/powerpc/kernel/vdso/datapage.S4
-rw-r--r--arch/powerpc/kernel/vdso/getrandom.S2
-rw-r--r--arch/powerpc/kernel/vdso/gettimeofday.S5
-rw-r--r--arch/powerpc/kernel/vdso/vdso32.lds.S4
-rw-r--r--arch/powerpc/kernel/vdso/vdso64.lds.S4
-rw-r--r--arch/powerpc/kernel/vdso/vgetrandom.c4
-rw-r--r--arch/powerpc/kernel/vdso/vgettimeofday.c14
-rw-r--r--arch/powerpc/kernel/vdso32_wrapper.S2
-rw-r--r--arch/powerpc/kernel/vdso64_wrapper.S2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S12
-rw-r--r--arch/powerpc/kernel/watchdog.c3
64 files changed, 1003 insertions, 1182 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index f43c1198768c..fb2b95267022 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -70,7 +70,7 @@ obj-y := cputable.o syscalls.o switch.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
- of_platform.o prom_parse.o firmware.o \
+ prom_parse.o firmware.o \
hw_breakpoint_constraints.o interrupt.o \
kdebugfs.o stacktrace.o syscall.o
obj-y += ptrace/
@@ -126,7 +126,7 @@ obj-$(CONFIG_PPC_BOOK3S_32) += head_book3s_32.o
obj-$(CONFIG_44x) += head_44x.o
obj-$(CONFIG_PPC_8xx) += head_8xx.o
obj-$(CONFIG_PPC_85xx) += head_85xx.o
-extra-y += vmlinux.lds
+always-$(KBUILD_BUILTIN) += vmlinux.lds
obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o
@@ -152,8 +152,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
-obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
-
obj-y += trace/
ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
@@ -162,9 +160,7 @@ endif
obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o
-ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)(CONFIG_PPC_BOOK3S),)
obj-y += ppc_save_regs.o
-endif
obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o
obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 131a8cc10dbe..b3048f6d3822 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -334,8 +334,6 @@ int main(void)
#endif /* ! CONFIG_PPC64 */
/* datapage offsets for use by vdso */
- OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data);
- OFFSET(VDSO_RNG_DATA_OFFSET, vdso_arch_data, rng_data);
OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec);
#ifdef CONFIG_PPC64
OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size);
@@ -347,8 +345,6 @@ int main(void)
#else
OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, syscall_map);
#endif
- OFFSET(VDSO_CLOCKMODE_OFFSET, vdso_arch_data, data[0].clock_mode);
- DEFINE(VDSO_CLOCKMODE_TIMENS, VDSO_CLOCKMODE_TIMENS);
#ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
@@ -597,7 +593,6 @@ int main(void)
HSTATE_FIELD(HSTATE_DABR, dabr);
HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode);
- DEFINE(IPI_PRIORITY, IPI_PRIORITY);
OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr);
OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar);
OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar);
@@ -677,5 +672,16 @@ int main(void)
DEFINE(BPT_SIZE, BPT_SIZE);
#endif
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ DEFINE(FTRACE_OOL_STUB_SIZE, sizeof(struct ftrace_ool_stub));
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ OFFSET(FTRACE_OPS_FUNC, ftrace_ops, func);
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ OFFSET(FTRACE_OPS_DIRECT_CALL, ftrace_ops, direct_call);
+#endif
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 2086fa6cdc25..103b6605dd68 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/of.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/kdump.h>
#include <asm/firmware.h>
#include <linux/uio.h>
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index f0ae39e77e37..4d64a5db50f3 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -136,7 +136,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_controller *phb = pci_bus_to_host(pdev->bus);
- if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
+ if (!phb->controller_ops.iommu_bypass_supported)
return false;
return phb->controller_ops.iommu_bypass_supported(pdev, mask);
}
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index af4263594eb2..3af6c06af02f 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -867,7 +867,7 @@ bool __init dt_cpu_ftrs_init(void *fdt)
using_dt_cpu_ftrs = false;
/* Setup and verify the FDT, if it fails we just bail */
- if (!early_init_dt_verify(fdt))
+ if (!early_init_dt_verify(fdt, __pa(fdt)))
return false;
if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
@@ -1087,12 +1087,10 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
/* Count and allocate space for cpu features */
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
&nr_dt_cpu_features);
- dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
- if (!dt_cpu_features)
- panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
- __func__,
- sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
- PAGE_SIZE);
+ dt_cpu_features =
+ memblock_alloc_or_panic(
+ sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
+ PAGE_SIZE);
cpufeatures_setup_start(isa);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 83fe99861eb1..ca7f7bb2b478 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1509,6 +1509,8 @@ int eeh_pe_configure(struct eeh_pe *pe)
/* Invalid PE ? */
if (!pe)
return -ENODEV;
+ else
+ ret = eeh_ops->configure_bridge(pe);
return ret;
}
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index d4b8aff20815..247ab2acaccc 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -9,7 +9,7 @@
#include <linux/of_fdt.h>
#include <asm/epapr_hcalls.h>
#include <asm/cacheflush.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/machdep.h>
#include <asm/inst.h>
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 195b075d116c..b7229430ca94 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -2537,27 +2537,8 @@ EXC_REAL_NONE(0x1000, 0x100)
EXC_VIRT_NONE(0x5000, 0x100)
EXC_REAL_NONE(0x1100, 0x100)
EXC_VIRT_NONE(0x5100, 0x100)
-
-#ifdef CONFIG_CBE_RAS
-INT_DEFINE_BEGIN(cbe_system_error)
- IVEC=0x1200
- IHSRR=1
-INT_DEFINE_END(cbe_system_error)
-
-EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
- GEN_INT_ENTRY cbe_system_error, virt=0
-EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
-EXC_VIRT_NONE(0x5200, 0x100)
-EXC_COMMON_BEGIN(cbe_system_error_common)
- GEN_COMMON cbe_system_error
- addi r3,r1,STACK_INT_FRAME_REGS
- bl CFUNC(cbe_system_error_exception)
- b interrupt_return_hsrr
-
-#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
-#endif
/**
* Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
@@ -2708,26 +2689,8 @@ EXC_COMMON_BEGIN(denorm_exception_common)
b interrupt_return_hsrr
-#ifdef CONFIG_CBE_RAS
-INT_DEFINE_BEGIN(cbe_maintenance)
- IVEC=0x1600
- IHSRR=1
-INT_DEFINE_END(cbe_maintenance)
-
-EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
- GEN_INT_ENTRY cbe_maintenance, virt=0
-EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
-EXC_VIRT_NONE(0x5600, 0x100)
-EXC_COMMON_BEGIN(cbe_maintenance_common)
- GEN_COMMON cbe_maintenance
- addi r3,r1,STACK_INT_FRAME_REGS
- bl CFUNC(cbe_maintenance_exception)
- b interrupt_return_hsrr
-
-#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
-#endif
INT_DEFINE_BEGIN(altivec_assist)
@@ -2755,26 +2718,8 @@ EXC_COMMON_BEGIN(altivec_assist_common)
b interrupt_return_srr
-#ifdef CONFIG_CBE_RAS
-INT_DEFINE_BEGIN(cbe_thermal)
- IVEC=0x1800
- IHSRR=1
-INT_DEFINE_END(cbe_thermal)
-
-EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
- GEN_INT_ENTRY cbe_thermal, virt=0
-EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
-EXC_VIRT_NONE(0x5800, 0x100)
-EXC_COMMON_BEGIN(cbe_thermal_common)
- GEN_COMMON cbe_thermal
- addi r3,r1,STACK_INT_FRAME_REGS
- bl CFUNC(cbe_thermal_exception)
- b interrupt_return_hsrr
-
-#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
-#endif
#ifdef CONFIG_PPC_WATCHDOG
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index a612e7513a4f..8ca49e40c473 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -33,6 +33,7 @@
#include <asm/fadump-internal.h>
#include <asm/setup.h>
#include <asm/interrupt.h>
+#include <asm/prom.h>
/*
* The CPU who acquired the lock to trigger the fadump crash should
@@ -78,26 +79,38 @@ static struct cma *fadump_cma;
* But for some reason even if it fails we still have the memory reservation
* with us and we can still continue doing fadump.
*/
-static int __init fadump_cma_init(void)
+void __init fadump_cma_init(void)
{
- unsigned long long base, size;
+ unsigned long long base, size, end;
int rc;
- if (!fw_dump.fadump_enabled)
- return 0;
-
+ if (!fw_dump.fadump_supported || !fw_dump.fadump_enabled ||
+ fw_dump.dump_active)
+ return;
/*
* Do not use CMA if user has provided fadump=nocma kernel parameter.
- * Return 1 to continue with fadump old behaviour.
*/
- if (fw_dump.nocma)
- return 1;
+ if (fw_dump.nocma || !fw_dump.boot_memory_size)
+ return;
+ /*
+ * [base, end) should be reserved during early init in
+ * fadump_reserve_mem(). No need to check this here as
+ * cma_init_reserved_mem() already checks for overlap.
+ * Here we give the aligned chunk of this reserved memory to CMA.
+ */
base = fw_dump.reserve_dump_area_start;
size = fw_dump.boot_memory_size;
+ end = base + size;
- if (!size)
- return 0;
+ base = ALIGN(base, CMA_MIN_ALIGNMENT_BYTES);
+ end = ALIGN_DOWN(end, CMA_MIN_ALIGNMENT_BYTES);
+ size = end - base;
+
+ if (end <= base) {
+ pr_warn("%s: Too less memory to give to CMA\n", __func__);
+ return;
+ }
rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
if (rc) {
@@ -108,7 +121,7 @@ static int __init fadump_cma_init(void)
* blocked from production system usage. Hence return 1,
* so that we can continue with fadump.
*/
- return 1;
+ return;
}
/*
@@ -120,15 +133,13 @@ static int __init fadump_cma_init(void)
/*
* So we now have successfully initialized cma area for fadump.
*/
- pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
+ pr_info("Initialized [0x%llx, %luMB] cma area from [0x%lx, %luMB] "
"bytes of memory reserved for firmware-assisted dump\n",
- cma_get_size(fadump_cma),
- (unsigned long)cma_get_base(fadump_cma) >> 20,
- fw_dump.reserve_dump_area_size);
- return 1;
+ cma_get_base(fadump_cma), cma_get_size(fadump_cma) >> 20,
+ fw_dump.reserve_dump_area_start,
+ fw_dump.boot_memory_size >> 20);
+ return;
}
-#else
-static int __init fadump_cma_init(void) { return 1; }
#endif /* CONFIG_CMA */
/*
@@ -143,7 +154,7 @@ void __init fadump_append_bootargs(void)
if (!fw_dump.dump_active || !fw_dump.param_area_supported || !fw_dump.param_area)
return;
- if (fw_dump.param_area >= fw_dump.boot_mem_top) {
+ if (fw_dump.param_area < fw_dump.boot_mem_top) {
if (memblock_reserve(fw_dump.param_area, COMMAND_LINE_SIZE)) {
pr_warn("WARNING: Can't use additional parameters area!\n");
fw_dump.param_area = 0;
@@ -279,10 +290,8 @@ static void __init fadump_show_config(void)
if (!fw_dump.fadump_supported)
return;
- pr_debug("Fadump enabled : %s\n",
- (fw_dump.fadump_enabled ? "yes" : "no"));
- pr_debug("Dump Active : %s\n",
- (fw_dump.dump_active ? "yes" : "no"));
+ pr_debug("Fadump enabled : %s\n", str_yes_no(fw_dump.fadump_enabled));
+ pr_debug("Dump Active : %s\n", str_yes_no(fw_dump.dump_active));
pr_debug("Dump section sizes:\n");
pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
@@ -558,13 +567,6 @@ int __init fadump_reserve_mem(void)
if (!fw_dump.dump_active) {
fw_dump.boot_memory_size =
PAGE_ALIGN(fadump_calculate_reserve_size());
-#ifdef CONFIG_CMA
- if (!fw_dump.nocma) {
- fw_dump.boot_memory_size =
- ALIGN(fw_dump.boot_memory_size,
- CMA_MIN_ALIGNMENT_BYTES);
- }
-#endif
bootmem_min = fw_dump.ops->fadump_get_bootmem_min();
if (fw_dump.boot_memory_size < bootmem_min) {
@@ -637,8 +639,6 @@ int __init fadump_reserve_mem(void)
pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
(size >> 20), base, (memblock_phys_mem_size() >> 20));
-
- ret = fadump_cma_init();
}
return ret;
@@ -750,7 +750,7 @@ u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
* prstatus.pr_pid = ????
*/
elf_core_copy_regs(&prstatus.pr_reg, regs);
- buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
+ buf = append_elf_note(buf, NN_PRSTATUS, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
return buf;
}
@@ -1586,6 +1586,12 @@ static void __init fadump_init_files(void)
return;
}
+ if (fw_dump.param_area) {
+ rc = sysfs_create_file(fadump_kobj, &bootargs_append_attr.attr);
+ if (rc)
+ pr_err("unable to create bootargs_append sysfs file (%d)\n", rc);
+ }
+
debugfs_create_file("fadump_region", 0444, arch_debugfs_dir, NULL,
&fadump_region_fops);
@@ -1740,7 +1746,7 @@ err_out:
* Reserve memory to store additional parameters to be passed
* for fadump/capture kernel.
*/
-static void __init fadump_setup_param_area(void)
+void __init fadump_setup_param_area(void)
{
phys_addr_t range_start, range_end;
@@ -1748,7 +1754,7 @@ static void __init fadump_setup_param_area(void)
return;
/* This memory can't be used by PFW or bootloader as it is shared across kernels */
- if (radix_enabled()) {
+ if (early_radix_enabled()) {
/*
* Anywhere in the upper half should be good enough as all memory
* is accessible in real mode.
@@ -1757,31 +1763,31 @@ static void __init fadump_setup_param_area(void)
range_end = memblock_end_of_DRAM();
} else {
/*
- * Passing additional parameters is supported for hash MMU only
- * if the first memory block size is 768MB or higher.
+ * Memory range for passing additional parameters for HASH MMU
+ * must meet the following conditions:
+ * 1. The first memory block size must be higher than the
+ * minimum RMA (MIN_RMA) size. Bootloader can use memory
+ * upto RMA size. So it should be avoided.
+ * 2. The range should be between MIN_RMA and RMA size (ppc64_rma_size)
+ * 3. It must not overlap with the fadump reserved area.
*/
- if (ppc64_rma_size < 0x30000000)
+ if (ppc64_rma_size < MIN_RMA*1024*1024)
return;
- /*
- * 640 MB to 768 MB is not used by PFW/bootloader. So, try reserving
- * memory for passing additional parameters in this range to avoid
- * being stomped on by PFW/bootloader.
- */
- range_start = 0x2A000000;
- range_end = range_start + 0x4000000;
+ range_start = MIN_RMA * 1024 * 1024;
+ range_end = min(ppc64_rma_size, fw_dump.boot_mem_top);
}
fw_dump.param_area = memblock_phys_alloc_range(COMMAND_LINE_SIZE,
COMMAND_LINE_SIZE,
range_start,
range_end);
- if (!fw_dump.param_area || sysfs_create_file(fadump_kobj, &bootargs_append_attr.attr)) {
+ if (!fw_dump.param_area) {
pr_warn("WARNING: Could not setup area to pass additional parameters!\n");
return;
}
- memset(phys_to_virt(fw_dump.param_area), 0, COMMAND_LINE_SIZE);
+ memset((void *)fw_dump.param_area, 0, COMMAND_LINE_SIZE);
}
/*
@@ -1807,7 +1813,6 @@ int __init setup_fadump(void)
}
/* Initialize the kernel dump memory structure and register with f/w */
else if (fw_dump.reserve_dump_area_size) {
- fadump_setup_param_area();
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
register_fadump();
}
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 30b56c67fa61..e527cd3ef128 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -97,7 +97,7 @@ void power4_idle(void)
/*
* Register the sysctl to set/clear powersave_nap.
*/
-static struct ctl_table powersave_nap_ctl_table[] = {
+static const struct ctl_table powersave_nap_ctl_table[] = {
{
.procname = "powersave-nap",
.data = &powersave_nap,
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index af62ec974b97..e0c681d0b076 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -25,6 +25,10 @@
unsigned long global_dbcr0[NR_CPUS];
#endif
+#if defined(CONFIG_PREEMPT_DYNAMIC)
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline bool exit_must_hard_disable(void)
@@ -185,7 +189,7 @@ again:
ti_flags = read_thread_flags();
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
- if (ti_flags & _TIF_NEED_RESCHED) {
+ if (ti_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) {
schedule();
} else {
/*
@@ -396,7 +400,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
/* Returning to a kernel context with local irqs enabled. */
WARN_ON_ONCE(!(regs->msr & MSR_EE));
again:
- if (IS_ENABLED(CONFIG_PREEMPT)) {
+ if (need_irq_preemption()) {
/* Return to preemptible kernel context */
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
deleted file mode 100644
index c877f074d174..000000000000
--- a/arch/powerpc/kernel/io-workarounds.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Support PCI IO workaround
- *
- * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
- * IBM, Corp.
- * (C) Copyright 2007-2008 TOSHIBA CORPORATION
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/sched/mm.h> /* for init_mm */
-#include <linux/pgtable.h>
-
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/ppc-pci.h>
-#include <asm/io-workarounds.h>
-#include <asm/pte-walk.h>
-
-
-#define IOWA_MAX_BUS 8
-
-static struct iowa_bus iowa_busses[IOWA_MAX_BUS];
-static unsigned int iowa_bus_count;
-
-static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
-{
- int i, j;
- struct resource *res;
- unsigned long vstart, vend;
-
- for (i = 0; i < iowa_bus_count; i++) {
- struct iowa_bus *bus = &iowa_busses[i];
- struct pci_controller *phb = bus->phb;
-
- if (vaddr) {
- vstart = (unsigned long)phb->io_base_virt;
- vend = vstart + phb->pci_io_size - 1;
- if ((vaddr >= vstart) && (vaddr <= vend))
- return bus;
- }
-
- if (paddr)
- for (j = 0; j < 3; j++) {
- res = &phb->mem_resources[j];
- if (paddr >= res->start && paddr <= res->end)
- return bus;
- }
- }
-
- return NULL;
-}
-
-#ifdef CONFIG_PPC_INDIRECT_MMIO
-struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
-{
- struct iowa_bus *bus;
- int token;
-
- token = PCI_GET_ADDR_TOKEN(addr);
-
- if (token && token <= iowa_bus_count)
- bus = &iowa_busses[token - 1];
- else {
- unsigned long vaddr, paddr;
-
- vaddr = (unsigned long)PCI_FIX_ADDR(addr);
- if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
- return NULL;
-
- paddr = ppc_find_vmap_phys(vaddr);
-
- bus = iowa_pci_find(vaddr, paddr);
-
- if (bus == NULL)
- return NULL;
- }
-
- return bus;
-}
-#else /* CONFIG_PPC_INDIRECT_MMIO */
-struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
-{
- return NULL;
-}
-#endif /* !CONFIG_PPC_INDIRECT_MMIO */
-
-#ifdef CONFIG_PPC_INDIRECT_PIO
-struct iowa_bus *iowa_pio_find_bus(unsigned long port)
-{
- unsigned long vaddr = (unsigned long)pci_io_base + port;
- return iowa_pci_find(vaddr, 0);
-}
-#else
-struct iowa_bus *iowa_pio_find_bus(unsigned long port)
-{
- return NULL;
-}
-#endif
-
-#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
-static ret iowa_##name at \
-{ \
- struct iowa_bus *bus; \
- bus = iowa_##space##_find_bus(aa); \
- if (bus && bus->ops && bus->ops->name) \
- return bus->ops->name al; \
- return __do_##name al; \
-}
-
-#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
-static void iowa_##name at \
-{ \
- struct iowa_bus *bus; \
- bus = iowa_##space##_find_bus(aa); \
- if (bus && bus->ops && bus->ops->name) { \
- bus->ops->name al; \
- return; \
- } \
- __do_##name al; \
-}
-
-#include <asm/io-defs.h>
-
-#undef DEF_PCI_AC_RET
-#undef DEF_PCI_AC_NORET
-
-static const struct ppc_pci_io iowa_pci_io = {
-
-#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name,
-#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name,
-
-#include <asm/io-defs.h>
-
-#undef DEF_PCI_AC_RET
-#undef DEF_PCI_AC_NORET
-
-};
-
-#ifdef CONFIG_PPC_INDIRECT_MMIO
-void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
- pgprot_t prot, void *caller)
-{
- struct iowa_bus *bus;
- void __iomem *res = __ioremap_caller(addr, size, prot, caller);
- int busno;
-
- bus = iowa_pci_find(0, (unsigned long)addr);
- if (bus != NULL) {
- busno = bus - iowa_busses;
- PCI_SET_ADDR_TOKEN(res, busno + 1);
- }
- return res;
-}
-#endif /* !CONFIG_PPC_INDIRECT_MMIO */
-
-bool io_workaround_inited;
-
-/* Enable IO workaround */
-static void io_workaround_init(void)
-{
- if (io_workaround_inited)
- return;
- ppc_pci_io = iowa_pci_io;
- io_workaround_inited = true;
-}
-
-/* Register new bus to support workaround */
-void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops,
- int (*initfunc)(struct iowa_bus *, void *), void *data)
-{
- struct iowa_bus *bus;
- struct device_node *np = phb->dn;
-
- io_workaround_init();
-
- if (iowa_bus_count >= IOWA_MAX_BUS) {
- pr_err("IOWA:Too many pci bridges, "
- "workarounds disabled for %pOF\n", np);
- return;
- }
-
- bus = &iowa_busses[iowa_bus_count];
- bus->phb = phb;
- bus->ops = ops;
- bus->private = data;
-
- if (initfunc)
- if ((*initfunc)(bus, data))
- return;
-
- iowa_bus_count++;
-
- pr_debug("IOWA:[%d]Add bus, %pOF.\n", iowa_bus_count-1, np);
-}
-
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 6af535905984..bcc201c01514 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -31,13 +31,14 @@ void _insb(const volatile u8 __iomem *port, void *buf, long count)
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
tmp = *(const volatile u8 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
- asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
+ data_barrier(tmp);
}
EXPORT_SYMBOL(_insb);
@@ -47,75 +48,80 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
*(volatile u8 __force *)port = *tbuf++;
} while (--count != 0);
- asm volatile("sync");
+ mb();
}
EXPORT_SYMBOL(_outsb);
-void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
+void _insw(const volatile u16 __iomem *port, void *buf, long count)
{
u16 *tbuf = buf;
u16 tmp;
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
tmp = *(const volatile u16 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
- asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
+ data_barrier(tmp);
}
-EXPORT_SYMBOL(_insw_ns);
+EXPORT_SYMBOL(_insw);
-void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
+void _outsw(volatile u16 __iomem *port, const void *buf, long count)
{
const u16 *tbuf = buf;
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
*(volatile u16 __force *)port = *tbuf++;
} while (--count != 0);
- asm volatile("sync");
+ mb();
}
-EXPORT_SYMBOL(_outsw_ns);
+EXPORT_SYMBOL(_outsw);
-void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
+void _insl(const volatile u32 __iomem *port, void *buf, long count)
{
u32 *tbuf = buf;
u32 tmp;
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
tmp = *(const volatile u32 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
- asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
+ data_barrier(tmp);
}
-EXPORT_SYMBOL(_insl_ns);
+EXPORT_SYMBOL(_insl);
-void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
+void _outsl(volatile u32 __iomem *port, const void *buf, long count)
{
const u32 *tbuf = buf;
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
*(volatile u32 __force *)port = *tbuf++;
} while (--count != 0);
- asm volatile("sync");
+ mb();
}
-EXPORT_SYMBOL(_outsl_ns);
+EXPORT_SYMBOL(_outsl);
#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
@@ -127,7 +133,7 @@ _memset_io(volatile void __iomem *addr, int c, unsigned long n)
lc |= lc << 8;
lc |= lc << 16;
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
while(n && !IO_CHECK_ALIGN(p, 4)) {
*((volatile u8 *)p) = c;
p++;
@@ -143,7 +149,7 @@ _memset_io(volatile void __iomem *addr, int c, unsigned long n)
p++;
n--;
}
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
}
EXPORT_SYMBOL(_memset_io);
@@ -152,7 +158,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
{
void *vsrc = (void __force *) src;
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
*((u8 *)dest) = *((volatile u8 *)vsrc);
eieio();
@@ -174,7 +180,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
dest++;
n--;
}
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
}
EXPORT_SYMBOL(_memcpy_fromio);
@@ -182,7 +188,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
{
void *vdest = (void __force *) dest;
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
*((volatile u8 *)vdest) = *((u8 *)src);
src++;
@@ -201,6 +207,6 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
vdest++;
n--;
}
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
}
EXPORT_SYMBOL(_memcpy_toio);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 76381e14e800..244eb4857e7f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
@@ -687,7 +688,7 @@ void iommu_table_clear(struct iommu_table *tbl)
void iommu_table_reserve_pages(struct iommu_table *tbl,
unsigned long res_start, unsigned long res_end)
{
- int i;
+ unsigned long i;
WARN_ON_ONCE(res_end < res_start);
/*
@@ -769,8 +770,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
iommu_table_clear(tbl);
if (!welcomed) {
- printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
- novmerge ? "disabled" : "enabled");
+ pr_info("IOMMU table initialized, virtual merging %s\n",
+ str_disabled_enabled(novmerge));
welcomed = 1;
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2e1600a8bbbb..a0e8b998c9b5 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -89,69 +89,69 @@ int arch_show_interrupts(struct seq_file *p, int prec)
#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
if (tau_initialized) {
- seq_printf(p, "%*s: ", prec, "TAU");
+ seq_printf(p, "%*s:", prec, "TAU");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", tau_interrupts(j));
+ seq_put_decimal_ull_width(p, " ", tau_interrupts(j), 10);
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
}
#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
- seq_printf(p, "%*s: ", prec, "LOC");
+ seq_printf(p, "%*s:", prec, "LOC");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).timer_irqs_event, 10);
seq_printf(p, " Local timer interrupts for timer event device\n");
- seq_printf(p, "%*s: ", prec, "BCT");
+ seq_printf(p, "%*s:", prec, "BCT");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).broadcast_irqs_event, 10);
seq_printf(p, " Broadcast timer interrupts for timer event device\n");
- seq_printf(p, "%*s: ", prec, "LOC");
+ seq_printf(p, "%*s:", prec, "LOC");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).timer_irqs_others, 10);
seq_printf(p, " Local timer interrupts for others\n");
- seq_printf(p, "%*s: ", prec, "SPU");
+ seq_printf(p, "%*s:", prec, "SPU");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).spurious_irqs, 10);
seq_printf(p, " Spurious interrupts\n");
- seq_printf(p, "%*s: ", prec, "PMI");
+ seq_printf(p, "%*s:", prec, "PMI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).pmu_irqs, 10);
seq_printf(p, " Performance monitoring interrupts\n");
- seq_printf(p, "%*s: ", prec, "MCE");
+ seq_printf(p, "%*s:", prec, "MCE");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).mce_exceptions, 10);
seq_printf(p, " Machine check exceptions\n");
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_HVMODE)) {
- seq_printf(p, "%*s: ", prec, "HMI");
+ seq_printf(p, "%*s:", prec, "HMI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
+ seq_put_decimal_ull_width(p, " ", paca_ptrs[j]->hmi_irqs, 10);
seq_printf(p, " Hypervisor Maintenance Interrupts\n");
}
#endif
- seq_printf(p, "%*s: ", prec, "NMI");
+ seq_printf(p, "%*s:", prec, "NMI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).sreset_irqs, 10);
seq_printf(p, " System Reset interrupts\n");
#ifdef CONFIG_PPC_WATCHDOG
- seq_printf(p, "%*s: ", prec, "WDG");
+ seq_printf(p, "%*s:", prec, "WDG");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).soft_nmi_irqs, 10);
seq_printf(p, " Watchdog soft-NMI interrupts\n");
#endif
#ifdef CONFIG_PPC_DOORBELL
if (cpu_has_feature(CPU_FTR_DBELL)) {
- seq_printf(p, "%*s: ", prec, "DBL");
+ seq_printf(p, "%*s:", prec, "DBL");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
+ seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).doorbell_irqs, 10);
seq_printf(p, " Doorbell interrupts\n");
}
#endif
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index 5277cf582c16..2659e1ac8604 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/inst.h>
void arch_jump_label_transform(struct jump_entry *entry,
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7a8bc03a00af..5081334b7bd2 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -21,7 +21,7 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/debug.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <linux/slab.h>
#include <asm/inst.h>
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index f8aa91bc3b17..c0d9f12cb441 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/set_memory.h>
#include <linux/execmem.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
@@ -105,24 +105,22 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
return addr;
}
-static bool arch_kprobe_on_func_entry(unsigned long offset)
+static bool arch_kprobe_on_func_entry(unsigned long addr, unsigned long offset)
{
-#ifdef CONFIG_PPC64_ELF_ABI_V2
-#ifdef CONFIG_KPROBES_ON_FTRACE
- return offset <= 16;
-#else
- return offset <= 8;
-#endif
-#else
+ unsigned long ip = ftrace_location(addr);
+
+ if (ip)
+ return offset <= (ip - addr);
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
+ return offset <= 8;
return !offset;
-#endif
}
/* XXX try and fold the magic of kprobe_lookup_name() in this */
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
bool *on_func_entry)
{
- *on_func_entry = arch_kprobe_on_func_entry(offset);
+ *on_func_entry = arch_kprobe_on_func_entry(addr, offset);
return (kprobe_opcode_t *)(addr + offset);
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 033cd00aa0fc..acb727f54e9d 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -28,32 +28,6 @@
.text
/*
- * This returns the high 64 bits of the product of two 64-bit numbers.
- */
-_GLOBAL(mulhdu)
- cmpwi r6,0
- cmpwi cr1,r3,0
- mr r10,r4
- mulhwu r4,r4,r5
- beq 1f
- mulhwu r0,r10,r6
- mullw r7,r10,r5
- addc r7,r0,r7
- addze r4,r4
-1: beqlr cr1 /* all done if high part of A is 0 */
- mullw r9,r3,r5
- mulhwu r10,r3,r5
- beq 2f
- mullw r0,r3,r6
- mulhwu r8,r3,r6
- addc r7,r0,r7
- adde r4,r4,r8
- addze r10,r10
-2: addc r4,r4,r9
- addze r3,r10
- blr
-
-/*
* reloc_got2 runs through the .got2 section adding an offset
* to each entry.
*/
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 91123e102db4..a997c7f43dc0 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -74,7 +74,7 @@ _GLOBAL(rmci_off)
blr
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
-#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
+#ifdef CONFIG_PPC_PMAC
/*
* Do an IO access in real mode
@@ -137,7 +137,7 @@ _GLOBAL(real_writeb)
sync
isync
blr
-#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
+#endif // CONFIG_PPC_PMAC
#ifdef CONFIG_PPC_PASEMI
@@ -174,7 +174,7 @@ _GLOBAL(real_205_writeb)
#endif /* CONFIG_PPC_PASEMI */
-#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
+#ifdef CONFIG_CPU_FREQ_PMAC64
/*
* SCOM access functions for 970 (FX only for now)
*
@@ -243,7 +243,7 @@ _GLOBAL(scom970_write)
/* restore interrupts */
mtmsrd r5,1
blr
-#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
+#endif // CONFIG_CPU_FREQ_PMAC64
/* kexec_wait(phys_cpu)
*
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 816a63fd71fb..f930e3395a7f 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -18,7 +18,7 @@
#include <linux/bug.h>
#include <linux/sort.h>
#include <asm/setup.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
/* Count how many different relocations (different symbol, different
addend) */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index e9bab599d0c2..126bf3b06ab7 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <asm/module.h>
#include <asm/firmware.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <linux/sort.h>
#include <asm/setup.h>
#include <asm/sections.h>
@@ -205,7 +205,9 @@ static int relacmp(const void *_x, const void *_y)
/* Get size of potential trampolines required. */
static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
- const Elf64_Shdr *sechdrs)
+ const Elf64_Shdr *sechdrs,
+ char *secstrings,
+ struct module *me)
{
/* One extra reloc so it's always 0-addr terminated */
unsigned long relocs = 1;
@@ -241,13 +243,21 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
}
}
-#ifdef CONFIG_DYNAMIC_FTRACE
- /* make the trampoline to the ftrace_caller */
- relocs++;
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- /* an additional one for ftrace_regs_caller */
- relocs++;
-#endif
+ /* stubs for ftrace_caller and ftrace_regs_caller */
+ relocs += IS_ENABLED(CONFIG_DYNAMIC_FTRACE) + IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS);
+
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ /* stubs for the function tracer */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (!strcmp(secstrings + sechdrs[i].sh_name, "__patchable_function_entries")) {
+ me->arch.ool_stub_count = sechdrs[i].sh_size / sizeof(unsigned long);
+ me->arch.ool_stub_index = 0;
+ relocs += roundup(me->arch.ool_stub_count * sizeof(struct ftrace_ool_stub),
+ sizeof(struct ppc64_stub_entry)) /
+ sizeof(struct ppc64_stub_entry);
+ break;
+ }
+ }
#endif
pr_debug("Looks like a total of %lu stubs, max\n", relocs);
@@ -355,6 +365,24 @@ static void dedotify_versions(struct modversion_info *vers,
}
}
+/* Same as normal versions, remove a leading dot if present. */
+static void dedotify_ext_version_names(char *str_seq, unsigned long size)
+{
+ unsigned long out = 0;
+ unsigned long in;
+ char last = '\0';
+
+ for (in = 0; in < size; in++) {
+ /* Skip one leading dot */
+ if (last == '\0' && str_seq[in] == '.')
+ in++;
+ last = str_seq[in];
+ str_seq[out++] = last;
+ }
+ /* Zero the trailing portion of the names table for robustness */
+ memset(&str_seq[out], 0, size - out);
+}
+
/*
* Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
* seem to be defined (value set later).
@@ -424,10 +452,12 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
me->arch.toc_section = i;
if (sechdrs[i].sh_addralign < 8)
sechdrs[i].sh_addralign = 8;
- }
- else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
+ } else if (strcmp(secstrings + sechdrs[i].sh_name, "__versions") == 0)
dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size);
+ else if (strcmp(secstrings + sechdrs[i].sh_name, "__version_ext_names") == 0)
+ dedotify_ext_version_names((void *)hdr + sechdrs[i].sh_offset,
+ sechdrs[i].sh_size);
if (sechdrs[i].sh_type == SHT_SYMTAB)
dedotify((void *)hdr + sechdrs[i].sh_offset,
@@ -460,7 +490,7 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
#endif
/* Override the stubs size */
- sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
+ sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs, secstrings, me);
return 0;
}
@@ -1085,6 +1115,37 @@ int module_trampoline_target(struct module *mod, unsigned long addr,
return 0;
}
+static int setup_ftrace_ool_stubs(const Elf64_Shdr *sechdrs, unsigned long addr, struct module *me)
+{
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ unsigned int i, total_stubs, num_stubs;
+ struct ppc64_stub_entry *stub;
+
+ total_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stub);
+ num_stubs = roundup(me->arch.ool_stub_count * sizeof(struct ftrace_ool_stub),
+ sizeof(struct ppc64_stub_entry)) / sizeof(struct ppc64_stub_entry);
+
+ /* Find the next available entry */
+ stub = (void *)sechdrs[me->arch.stubs_section].sh_addr;
+ for (i = 0; stub_func_addr(stub[i].funcdata); i++)
+ if (WARN_ON(i >= total_stubs))
+ return -1;
+
+ if (WARN_ON(i + num_stubs > total_stubs))
+ return -1;
+
+ stub += i;
+ me->arch.ool_stubs = (struct ftrace_ool_stub *)stub;
+
+ /* reserve stubs */
+ for (i = 0; i < num_stubs; i++)
+ if (patch_u32((void *)&stub->funcdata, PPC_RAW_NOP()))
+ return -1;
+#endif
+
+ return 0;
+}
+
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
{
mod->arch.tramp = stub_for_addr(sechdrs,
@@ -1103,6 +1164,9 @@ int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
if (!mod->arch.tramp)
return -ENOENT;
+ if (setup_ftrace_ool_stubs(sechdrs, mod->arch.tramp, mod))
+ return -ENOENT;
+
return 0;
}
#endif
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
deleted file mode 100644
index adc76fa58d1e..000000000000
--- a/arch/powerpc/kernel/of_platform.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- * and Arnd Bergmann, IBM Corp.
- */
-
-#undef DEBUG
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/mod_devicetable.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/atomic.h>
-
-#include <asm/errno.h>
-#include <asm/topology.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-#include <asm/eeh.h>
-
-#ifdef CONFIG_PPC_OF_PLATFORM_PCI
-
-/* The probing of PCI controllers from of_platform is currently
- * 64 bits only, mostly due to gratuitous differences between
- * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
- * lacking some bits needed here.
- */
-
-static int of_pci_phb_probe(struct platform_device *dev)
-{
- struct pci_controller *phb;
-
- /* Check if we can do that ... */
- if (ppc_md.pci_setup_phb == NULL)
- return -ENODEV;
-
- pr_info("Setting up PCI bus %pOF\n", dev->dev.of_node);
-
- /* Alloc and setup PHB data structure */
- phb = pcibios_alloc_controller(dev->dev.of_node);
- if (!phb)
- return -ENODEV;
-
- /* Setup parent in sysfs */
- phb->parent = &dev->dev;
-
- /* Setup the PHB using arch provided callback */
- if (ppc_md.pci_setup_phb(phb)) {
- pcibios_free_controller(phb);
- return -ENODEV;
- }
-
- /* Process "ranges" property */
- pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0);
-
- /* Init pci_dn data structures */
- pci_devs_phb_init_dynamic(phb);
-
- /* Create EEH PE for the PHB */
- eeh_phb_pe_create(phb);
-
- /* Scan the bus */
- pcibios_scan_phb(phb);
- if (phb->bus == NULL)
- return -ENXIO;
-
- /* Claim resources. This might need some rework as well depending
- * whether we are doing probe-only or not, like assigning unassigned
- * resources etc...
- */
- pcibios_claim_one_bus(phb->bus);
-
- /* Add probed PCI devices to the device model */
- pci_bus_add_devices(phb->bus);
-
- return 0;
-}
-
-static const struct of_device_id of_pci_phb_ids[] = {
- { .type = "pci", },
- { .type = "pcix", },
- { .type = "pcie", },
- { .type = "pciex", },
- { .type = "ht", },
- {}
-};
-
-static struct platform_driver of_pci_phb_driver = {
- .probe = of_pci_phb_probe,
- .driver = {
- .name = "of-pci",
- .of_match_table = of_pci_phb_ids,
- },
-};
-
-builtin_platform_driver(of_pci_phb_driver);
-
-#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index c0b351d61058..2e83702bf9ba 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -13,7 +13,7 @@
#include <asm/kprobes.h>
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/sstep.h>
#include <asm/ppc-opcode.h>
#include <asm/inst.h>
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index ce0c8623e563..f8a3bd8cfae4 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -213,11 +213,8 @@ pci_create_OF_bus_map(void)
struct property* of_prop;
struct device_node *dn;
- of_prop = memblock_alloc(sizeof(struct property) + 256,
+ of_prop = memblock_alloc_or_panic(sizeof(struct property) + 256,
SMP_CACHE_BYTES);
- if (!of_prop)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct property) + 256);
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index b109cd7b5d01..d083b4517065 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -4,17 +4,20 @@
*/
#include <linux/init.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/rtas.h>
+#include <asm/systemcfg.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
static loff_t page_map_seek(struct file *file, loff_t off, int whence)
{
@@ -33,10 +36,9 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE)
return -EINVAL;
- remap_pfn_range(vma, vma->vm_start,
- __pa(pde_data(file_inode(file))) >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot);
- return 0;
+ return remap_pfn_range(vma, vma->vm_start,
+ __pa(pde_data(file_inode(file))) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot);
}
static const struct proc_ops page_map_proc_ops = {
@@ -45,13 +47,35 @@ static const struct proc_ops page_map_proc_ops = {
.proc_mmap = page_map_mmap,
};
+static union {
+ struct systemcfg data;
+ u8 page[PAGE_SIZE];
+} systemcfg_data_store __page_aligned_data;
+struct systemcfg *systemcfg = &systemcfg_data_store.data;
static int __init proc_ppc64_init(void)
{
struct proc_dir_entry *pde;
+ strscpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
+ systemcfg->version.major = SYSTEMCFG_MAJOR;
+ systemcfg->version.minor = SYSTEMCFG_MINOR;
+ systemcfg->processor = mfspr(SPRN_PVR);
+ /*
+ * Fake the old platform number for pSeries and add
+ * in LPAR bit if necessary
+ */
+ systemcfg->platform = 0x100;
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ systemcfg->platform |= 1;
+ systemcfg->physicalMemorySize = memblock_phys_mem_size();
+ systemcfg->dcache_size = ppc64_caches.l1d.size;
+ systemcfg->dcache_line_size = ppc64_caches.l1d.line_size;
+ systemcfg->icache_size = ppc64_caches.l1i.size;
+ systemcfg->icache_line_size = ppc64_caches.l1i.line_size;
+
pde = proc_create_data("powerpc/systemcfg", S_IFREG | 0444, NULL,
- &page_map_proc_ops, vdso_data);
+ &page_map_proc_ops, systemcfg);
if (!pde)
return 1;
proc_set_size(pde, PAGE_SIZE);
@@ -60,7 +84,7 @@ static int __init proc_ppc64_init(void)
}
__initcall(proc_ppc64_init);
-#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_PPC64_PROC_SYSTEMCFG */
/*
* Create the ppc64 and ppc64/rtas directories early. This allows us to
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ff61a3e7984c..855e09886503 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -54,7 +54,7 @@
#include <asm/firmware.h>
#include <asm/hw_irq.h>
#endif
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/exec.h>
#include <asm/livepatch.h>
#include <asm/cpu_has_feature.h>
@@ -1000,7 +1000,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
WARN_ON(tm_suspend_disabled);
- TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
+ TM_DEBUG("---- tm_reclaim on pid %d (NIP=%lx, "
"ccr=%lx, msr=%lx, trap=%lx)\n",
tsk->pid, thr->regs->nip,
thr->regs->ccr, thr->regs->msr,
@@ -1008,7 +1008,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
- TM_DEBUG("--- tm_reclaim on pid %d complete\n",
+ TM_DEBUG("---- tm_reclaim on pid %d complete\n",
tsk->pid);
out_and_saveregs:
@@ -1960,8 +1960,8 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
* address of _start and the second entry is the TOC
* value we need to use.
*/
- __get_user(entry, (unsigned long __user *)start);
- __get_user(toc, (unsigned long __user *)start+1);
+ get_user(entry, (unsigned long __user *)start);
+ get_user(toc, (unsigned long __user *)start+1);
/* Check whether the e_entry function descriptor entries
* need to be relocated before we can use them.
@@ -2367,14 +2367,14 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
(sp + STACK_INT_FRAME_REGS);
lr = regs->link;
- printk("%s--- interrupt: %lx at %pS\n",
+ printk("%s---- interrupt: %lx at %pS\n",
loglvl, regs->trap, (void *)regs->nip);
// Detect the case of an empty pt_regs at the very base
// of the stack and suppress showing it in full.
if (!empty_user_regs(regs, tsk)) {
__show_regs(regs);
- printk("%s--- interrupt: %lx\n", loglvl, regs->trap);
+ printk("%s---- interrupt: %lx\n", loglvl, regs->trap);
}
firstframe = 1;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 0be07ed407c7..9ed9dde7d231 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -791,7 +791,7 @@ void __init early_init_devtree(void *params)
DBG(" -> early_init_devtree(%px)\n", params);
/* Too early to BUG_ON(), do it by hand */
- if (!early_init_dt_verify(params))
+ if (!early_init_dt_verify(params, __pa(params)))
panic("BUG: Failed verifying flat device tree, bad version?");
of_scan_flat_dt(early_init_dt_scan_model, NULL);
@@ -860,7 +860,7 @@ void __init early_init_devtree(void *params)
*/
if (fadump_reserve_mem() == 0)
#endif
- reserve_crashkernel();
+ arch_reserve_crashkernel();
early_reserve_mem();
if (memory_limit > memblock_phys_mem_size())
@@ -908,6 +908,9 @@ void __init early_init_devtree(void *params)
mmu_early_init_devtree();
+ /* Setup param area for passing additional parameters to fadump capture kernel. */
+ fadump_setup_param_area();
+
#ifdef CONFIG_PPC_POWERNV
/* Scan and build the list of machine check recoverable ranges */
of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index fbb68fc28ed3..827c958677f8 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1061,7 +1061,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.virt_base = cpu_to_be32(0xffffffff),
.virt_size = cpu_to_be32(0xffffffff),
.load_base = cpu_to_be32(0xffffffff),
- .min_rma = cpu_to_be32(512), /* 512MB min RMA */
+ .min_rma = cpu_to_be32(MIN_RMA),
.min_load = cpu_to_be32(0xffffffff), /* full client load */
.min_rma_percent = 0, /* min RMA percentage of total RAM */
.max_pft_size = 48, /* max log_2(hash table size) */
@@ -2792,91 +2792,6 @@ static void __init flatten_device_tree(void)
dt_struct_start, dt_struct_end);
}
-#ifdef CONFIG_PPC_MAPLE
-/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
- * The values are bad, and it doesn't even have the right number of cells. */
-static void __init fixup_device_tree_maple(void)
-{
- phandle isa;
- u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
- u32 isa_ranges[6];
- char *name;
-
- name = "/ht@0/isa@4";
- isa = call_prom("finddevice", 1, 1, ADDR(name));
- if (!PHANDLE_VALID(isa)) {
- name = "/ht@0/isa@6";
- isa = call_prom("finddevice", 1, 1, ADDR(name));
- rloc = 0x01003000; /* IO space; PCI device = 6 */
- }
- if (!PHANDLE_VALID(isa))
- return;
-
- if (prom_getproplen(isa, "ranges") != 12)
- return;
- if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
- == PROM_ERROR)
- return;
-
- if (isa_ranges[0] != 0x1 ||
- isa_ranges[1] != 0xf4000000 ||
- isa_ranges[2] != 0x00010000)
- return;
-
- prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
-
- isa_ranges[0] = 0x1;
- isa_ranges[1] = 0x0;
- isa_ranges[2] = rloc;
- isa_ranges[3] = 0x0;
- isa_ranges[4] = 0x0;
- isa_ranges[5] = 0x00010000;
- prom_setprop(isa, name, "ranges",
- isa_ranges, sizeof(isa_ranges));
-}
-
-#define CPC925_MC_START 0xf8000000
-#define CPC925_MC_LENGTH 0x1000000
-/* The values for memory-controller don't have right number of cells */
-static void __init fixup_device_tree_maple_memory_controller(void)
-{
- phandle mc;
- u32 mc_reg[4];
- char *name = "/hostbridge@f8000000";
- u32 ac, sc;
-
- mc = call_prom("finddevice", 1, 1, ADDR(name));
- if (!PHANDLE_VALID(mc))
- return;
-
- if (prom_getproplen(mc, "reg") != 8)
- return;
-
- prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
- prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
- if ((ac != 2) || (sc != 2))
- return;
-
- if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
- return;
-
- if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
- return;
-
- prom_printf("Fixing up bogus hostbridge on Maple...\n");
-
- mc_reg[0] = 0x0;
- mc_reg[1] = CPC925_MC_START;
- mc_reg[2] = 0x0;
- mc_reg[3] = CPC925_MC_LENGTH;
- prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
-}
-#else
-#define fixup_device_tree_maple()
-#define fixup_device_tree_maple_memory_controller()
-#endif
-
-#ifdef CONFIG_PPC_CHRP
/*
* Pegasos and BriQ lacks the "ranges" property in the isa node
* Pegasos needs decimal IRQ 14/15, not hexadecimal
@@ -2927,12 +2842,8 @@ static void __init fixup_device_tree_chrp(void)
}
}
}
-#else
-#define fixup_device_tree_chrp()
-#endif
-#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
-static void __init fixup_device_tree_pmac(void)
+static void __init fixup_device_tree_pmac64(void)
{
phandle u3, i2c, mpic;
u32 u3_rev;
@@ -2971,11 +2882,27 @@ static void __init fixup_device_tree_pmac(void)
prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
&parent, sizeof(parent));
}
-#else
-#define fixup_device_tree_pmac()
-#endif
-#ifdef CONFIG_PPC_EFIKA
+static void __init fixup_device_tree_pmac(void)
+{
+ __be32 val = 1;
+ char type[8];
+ phandle node;
+
+ // Some pmacs are missing #size-cells on escc or i2s nodes
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = '\0';
+ prom_getprop(node, "device_type", type, sizeof(type));
+ if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s"))
+ continue;
+
+ if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
+ continue;
+
+ prom_setprop(node, NULL, "#size-cells", &val, sizeof(val));
+ }
+}
+
/*
* The MPC5200 FEC driver requires an phy-handle property to tell it how
* to talk to the phy. If the phy-handle property is missing, then this
@@ -3107,11 +3034,7 @@ static void __init fixup_device_tree_efika(void)
/* Make sure ethernet phy-handle property exists */
fixup_device_tree_efika_add_phy();
}
-#else
-#define fixup_device_tree_efika()
-#endif
-#ifdef CONFIG_PPC_PASEMI_NEMO
/*
* CFE supplied on Nemo is broken in several ways, biggest
* problem is that it reassigns ISA interrupts to unused mpic ints.
@@ -3187,18 +3110,23 @@ static void __init fixup_device_tree_pasemi(void)
prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
}
-#else /* !CONFIG_PPC_PASEMI_NEMO */
-static inline void fixup_device_tree_pasemi(void) { }
-#endif
static void __init fixup_device_tree(void)
{
- fixup_device_tree_maple();
- fixup_device_tree_maple_memory_controller();
- fixup_device_tree_chrp();
- fixup_device_tree_pmac();
- fixup_device_tree_efika();
- fixup_device_tree_pasemi();
+ if (IS_ENABLED(CONFIG_PPC_CHRP))
+ fixup_device_tree_chrp();
+
+ if (IS_ENABLED(CONFIG_PPC_PMAC))
+ fixup_device_tree_pmac();
+
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && IS_ENABLED(CONFIG_PPC64))
+ fixup_device_tree_pmac64();
+
+ if (IS_ENABLED(CONFIG_PPC_EFIKA))
+ fixup_device_tree_efika();
+
+ if (IS_ENABLED(CONFIG_PPC_PASEMI_NEMO))
+ fixup_device_tree_pasemi();
}
static void __init prom_find_boot_cpu(void)
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 727ed4a14545..c6997df63287 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -215,7 +215,7 @@ static int do_seccomp(struct pt_regs *regs)
* have already loaded -ENOSYS into r3, or seccomp has put
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
*/
- if (__secure_computing(NULL))
+ if (__secure_computing())
return -1;
/*
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index f7e86e09c49f..e61245c4468e 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -92,12 +92,12 @@ struct rtas_function {
* Per-function locks for sequence-based RTAS functions.
*/
static DEFINE_MUTEX(rtas_ibm_activate_firmware_lock);
-static DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock);
-static DEFINE_MUTEX(rtas_ibm_get_indices_lock);
static DEFINE_MUTEX(rtas_ibm_lpar_perftools_lock);
-static DEFINE_MUTEX(rtas_ibm_physical_attestation_lock);
-static DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock);
+DEFINE_MUTEX(rtas_ibm_physical_attestation_lock);
DEFINE_MUTEX(rtas_ibm_get_vpd_lock);
+DEFINE_MUTEX(rtas_ibm_get_indices_lock);
+DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock);
+DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock);
static struct rtas_function rtas_function_table[] __ro_after_init = {
[RTAS_FNIDX__CHECK_EXCEPTION] = {
@@ -798,66 +798,6 @@ void __init udbg_init_rtas_panel(void)
udbg_putc = call_rtas_display_status_delay;
}
-#ifdef CONFIG_UDBG_RTAS_CONSOLE
-
-/* If you think you're dying before early_init_dt_scan_rtas() does its
- * work, you can hard code the token values for your firmware here and
- * hardcode rtas.base/entry etc.
- */
-static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
-static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
-
-static void udbg_rtascon_putc(char c)
-{
- int tries;
-
- if (!rtas.base)
- return;
-
- /* Add CRs before LFs */
- if (c == '\n')
- udbg_rtascon_putc('\r');
-
- /* if there is more than one character to be displayed, wait a bit */
- for (tries = 0; tries < 16; tries++) {
- if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
- break;
- udelay(1000);
- }
-}
-
-static int udbg_rtascon_getc_poll(void)
-{
- int c;
-
- if (!rtas.base)
- return -1;
-
- if (rtas_call(rtas_getchar_token, 0, 2, &c))
- return -1;
-
- return c;
-}
-
-static int udbg_rtascon_getc(void)
-{
- int c;
-
- while ((c = udbg_rtascon_getc_poll()) == -1)
- ;
-
- return c;
-}
-
-
-void __init udbg_init_rtas_console(void)
-{
- udbg_putc = udbg_rtascon_putc;
- udbg_getc = udbg_rtascon_getc;
- udbg_getc_poll = udbg_rtascon_getc_poll;
-}
-#endif /* CONFIG_UDBG_RTAS_CONSOLE */
-
void rtas_progress(char *s, unsigned short hex)
{
struct device_node *root;
@@ -1390,21 +1330,14 @@ bool __ref rtas_busy_delay(int status)
*/
ms = clamp(ms, 1U, 1000U);
/*
- * The delay hint is an order-of-magnitude suggestion, not
- * a minimum. It is fine, possibly even advantageous, for
- * us to pause for less time than hinted. For small values,
- * use usleep_range() to ensure we don't sleep much longer
- * than actually needed.
- *
- * See Documentation/timers/timers-howto.rst for
- * explanation of the threshold used here. In effect we use
- * usleep_range() for 9900 and 9901, msleep() for
- * 9902-9905.
+ * The delay hint is an order-of-magnitude suggestion, not a
+ * minimum. It is fine, possibly even advantageous, for us to
+ * pause for less time than hinted. To make sure pause time will
+ * not be way longer than requested independent of HZ
+ * configuration, use fsleep(). See fsleep() for details of
+ * used sleeping functions.
*/
- if (ms <= 20)
- usleep_range(ms * 100, ms * 1000);
- else
- msleep(ms);
+ fsleep(ms * 1000);
break;
case RTAS_BUSY:
ret = true;
@@ -2142,21 +2075,6 @@ int __init early_init_dt_scan_rtas(unsigned long node,
rtas.size = *sizep;
}
-#ifdef CONFIG_UDBG_RTAS_CONSOLE
- basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
- if (basep)
- rtas_putchar_token = *basep;
-
- basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
- if (basep)
- rtas_getchar_token = *basep;
-
- if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
- rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
- udbg_init_rtas_console();
-
-#endif
-
/* break now */
return 1;
}
diff --git a/arch/powerpc/kernel/secure_boot.c b/arch/powerpc/kernel/secure_boot.c
index 9e0efb657f39..3a28795b4ed8 100644
--- a/arch/powerpc/kernel/secure_boot.c
+++ b/arch/powerpc/kernel/secure_boot.c
@@ -5,6 +5,7 @@
*/
#include <linux/types.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
#include <asm/secure_boot.h>
static struct device_node *get_ppc_fw_sb_node(void)
@@ -38,7 +39,7 @@ bool is_ppc_secureboot_enabled(void)
of_node_put(node);
out:
- pr_info("Secure boot mode %s\n", enabled ? "enabled" : "disabled");
+ pr_info("Secure boot mode %s\n", str_enabled_disabled(enabled));
return enabled;
}
@@ -62,7 +63,7 @@ bool is_ppc_trustedboot_enabled(void)
of_node_put(node);
out:
- pr_info("Trusted boot mode %s\n", enabled ? "enabled" : "disabled");
+ pr_info("Trusted boot mode %s\n", str_enabled_disabled(enabled));
return enabled;
}
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 4856e1a5161c..fbb7ebd8aa08 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -14,7 +14,7 @@
#include <linux/debugfs.h>
#include <asm/asm-prototypes.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/security_features.h>
#include <asm/sections.h>
#include <asm/setup.h>
diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c
index fbeb1cbac01b..afb690a172b4 100644
--- a/arch/powerpc/kernel/secvar-sysfs.c
+++ b/arch/powerpc/kernel/secvar-sysfs.c
@@ -52,7 +52,7 @@ static ssize_t size_show(struct kobject *kobj, struct kobj_attribute *attr,
}
static ssize_t data_read(struct file *filep, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
char *data;
@@ -85,7 +85,7 @@ data_fail:
}
static ssize_t update_write(struct file *filep, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
int rc;
@@ -104,11 +104,11 @@ static struct kobj_attribute format_attr = __ATTR_RO(format);
static struct kobj_attribute size_attr = __ATTR_RO(size);
-static struct bin_attribute data_attr = __BIN_ATTR_RO(data, 0);
+static struct bin_attribute data_attr __ro_after_init = __BIN_ATTR_RO(data, 0);
-static struct bin_attribute update_attr = __BIN_ATTR_WO(update, 0);
+static struct bin_attribute update_attr __ro_after_init = __BIN_ATTR_WO(update, 0);
-static struct bin_attribute *secvar_bin_attrs[] = {
+static const struct bin_attribute *const secvar_bin_attrs[] = {
&data_attr,
&update_attr,
NULL,
@@ -121,7 +121,7 @@ static struct attribute *secvar_attrs[] = {
static const struct attribute_group secvar_attr_group = {
.attrs = secvar_attrs,
- .bin_attrs = secvar_bin_attrs,
+ .bin_attrs_new = secvar_bin_attrs,
};
__ATTRIBUTE_GROUPS(secvar_attr);
@@ -130,7 +130,7 @@ static const struct kobj_type secvar_ktype = {
.default_groups = secvar_attr_groups,
};
-static int update_kobj_size(void)
+static __init int update_kobj_size(void)
{
u64 varsize;
@@ -145,7 +145,7 @@ static int update_kobj_size(void)
return 0;
}
-static int secvar_sysfs_config(struct kobject *kobj)
+static __init int secvar_sysfs_config(struct kobject *kobj)
{
struct attribute_group config_group = {
.name = "config",
@@ -158,7 +158,7 @@ static int secvar_sysfs_config(struct kobject *kobj)
return 0;
}
-static int add_var(const char *name)
+static __init int add_var(const char *name)
{
struct kobject *kobj;
int rc;
@@ -181,7 +181,7 @@ static int add_var(const char *name)
return 0;
}
-static int secvar_sysfs_load(void)
+static __init int secvar_sysfs_load(void)
{
u64 namesize = 0;
char *name;
@@ -209,7 +209,7 @@ static int secvar_sysfs_load(void)
return rc;
}
-static int secvar_sysfs_load_static(void)
+static __init int secvar_sysfs_load_static(void)
{
const char * const *name_ptr = secvar_ops->var_names;
int rc;
@@ -224,7 +224,7 @@ static int secvar_sysfs_load_static(void)
return 0;
}
-static int secvar_sysfs_init(void)
+static __init int secvar_sysfs_init(void)
{
u64 max_size;
int rc;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 943430077375..68d47c53876c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -67,6 +67,7 @@
#include <asm/cpu_has_feature.h>
#include <asm/kasan.h>
#include <asm/mce.h>
+#include <asm/systemcfg.h>
#include "setup.h"
@@ -457,11 +458,8 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
- cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
+ cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32),
__alignof__(u32));
- if (!cpu_to_phys_id)
- panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
- __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
for_each_node_by_type(dn, "cpu") {
const __be32 *intserv;
@@ -560,7 +558,9 @@ void __init smp_setup_cpu_maps(void)
out:
of_node_put(dn);
}
- vdso_data->processorCount = num_present_cpus();
+#endif
+#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
+ systemcfg->processorCount = num_present_cpus();
#endif /* CONFIG_PPC64 */
/* Initialize CPU <=> thread mapping/
@@ -831,8 +831,8 @@ static int __init check_cache_coherency(void)
if (devtree_coherency != KERNEL_COHERENCY) {
printk(KERN_ERR
"kernel coherency:%s != device tree_coherency:%s\n",
- KERNEL_COHERENCY ? "on" : "off",
- devtree_coherency ? "on" : "off");
+ str_on_off(KERNEL_COHERENCY),
+ str_on_off(devtree_coherency));
BUG();
}
@@ -957,8 +957,6 @@ void __init setup_arch(char **cmdline_p)
/* Parse memory topology */
mem_topology_setup();
- /* Set max_mapnr before paging_init() */
- set_max_mapnr(max_pfn);
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
/*
@@ -997,9 +995,11 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
/*
- * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
- * be called after initmem_init(), so that pageblock_order is initialised.
+ * Reserve large chunks of memory for use by CMA for fadump, KVM and
+ * hugetlb. These must be called after initmem_init(), so that
+ * pageblock_order is initialised.
*/
+ fadump_cma_init();
kvm_cma_reserve();
gigantic_hugetlb_cma_reserve();
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index e515c1f7d8d3..5a1bf501fbe1 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -40,7 +40,7 @@
#include <asm/time.h>
#include <asm/serial.h>
#include <asm/udbg.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h>
#include <asm/kdump.h>
@@ -140,13 +140,7 @@ arch_initcall(ppc_init);
static void *__init alloc_stack(void)
{
- void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN);
-
- if (!ptr)
- panic("cannot allocate %d bytes for stack at %pS\n",
- THREAD_SIZE, (void *)_RET_IP_);
-
- return ptr;
+ return memblock_alloc_or_panic(THREAD_SIZE, THREAD_ALIGN);
}
void __init irqstack_early_init(void)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 22f83fbbc762..7284c8021eeb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -60,7 +60,7 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/ftrace.h>
#include <asm/opal.h>
#include <asm/cputhreads.h>
@@ -892,7 +892,7 @@ unsigned long memory_block_size_bytes(void)
}
#endif
-#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
+#ifdef CONFIG_PPC_INDIRECT_PIO
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
@@ -920,6 +920,7 @@ static int __init disable_hardlockup_detector(void)
hardlockup_detector_disable();
#else
if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ check_kvm_guest();
if (is_kvm_guest())
hardlockup_detector_disable();
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 4ab9b8cee77a..5ac7084eebc0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -61,6 +61,7 @@
#include <asm/ftrace.h>
#include <asm/kup.h>
#include <asm/fadump.h>
+#include <asm/systemcfg.h>
#include <trace/events/ipi.h>
@@ -1186,8 +1187,8 @@ int generic_cpu_disable(void)
return -EBUSY;
set_cpu_online(cpu, false);
-#ifdef CONFIG_PPC64
- vdso_data->processorCount--;
+#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
+ systemcfg->processorCount--;
#endif
/* Update affinity of all IRQs previously aimed at this CPU */
irq_migrate_all_off_this_cpu();
@@ -1642,10 +1643,12 @@ void start_secondary(void *unused)
secondary_cpu_time_init();
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
if (system_state == SYSTEM_RUNNING)
- vdso_data->processorCount++;
+ systemcfg->processorCount++;
+#endif
+#ifdef CONFIG_PPC64
vdso_getcpu_init();
#endif
set_numa_node(numa_cpu_lookup_table[cpu]);
diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c
index 1502b7e439ca..ec3101f95e53 100644
--- a/arch/powerpc/kernel/static_call.c
+++ b/arch/powerpc/kernel/static_call.c
@@ -2,32 +2,60 @@
#include <linux/memory.h>
#include <linux/static_call.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
{
int err;
bool is_ret0 = (func == __static_call_return0);
- unsigned long target = (unsigned long)(is_ret0 ? tramp + PPC_SCT_RET0 : func);
- bool is_short = is_offset_in_branch_range((long)target - (long)tramp);
-
- if (!tramp)
- return;
+ unsigned long _tramp = (unsigned long)tramp;
+ unsigned long _func = (unsigned long)func;
+ unsigned long _ret0 = _tramp + PPC_SCT_RET0;
+ bool is_short = is_offset_in_branch_range((long)func - (long)(site ? : tramp));
mutex_lock(&text_mutex);
- if (func && !is_short) {
- err = patch_ulong(tramp + PPC_SCT_DATA, target);
- if (err)
- goto out;
+ if (site && tail) {
+ if (!func)
+ err = patch_instruction(site, ppc_inst(PPC_RAW_BLR()));
+ else if (is_ret0)
+ err = patch_branch(site, _ret0, 0);
+ else if (is_short)
+ err = patch_branch(site, _func, 0);
+ else if (tramp)
+ err = patch_branch(site, _tramp, 0);
+ else
+ err = 0;
+ } else if (site) {
+ if (!func)
+ err = patch_instruction(site, ppc_inst(PPC_RAW_NOP()));
+ else if (is_ret0)
+ err = patch_instruction(site, ppc_inst(PPC_RAW_LI(_R3, 0)));
+ else if (is_short)
+ err = patch_branch(site, _func, BRANCH_SET_LINK);
+ else if (tramp)
+ err = patch_branch(site, _tramp, BRANCH_SET_LINK);
+ else
+ err = 0;
+ } else if (tramp) {
+ if (func && !is_short) {
+ err = patch_ulong(tramp + PPC_SCT_DATA, _func);
+ if (err)
+ goto out;
+ }
+
+ if (!func)
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
+ else if (is_ret0)
+ err = patch_branch(tramp, _ret0, 0);
+ else if (is_short)
+ err = patch_branch(tramp, _func, 0);
+ else
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP()));
+ } else {
+ err = 0;
}
- if (!func)
- err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
- else if (is_short)
- err = patch_branch(tramp, target, 0);
- else
- err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP()));
out:
mutex_unlock(&text_mutex);
diff --git a/arch/powerpc/kernel/switch.S b/arch/powerpc/kernel/switch.S
index 608c0ce7cec6..59e3ee99db0e 100644
--- a/arch/powerpc/kernel/switch.S
+++ b/arch/powerpc/kernel/switch.S
@@ -39,7 +39,6 @@ flush_branch_caches:
// Flush the link stack
.rept 64
- ANNOTATE_INTRA_FUNCTION_CALL
bl .+4
.endr
b 1f
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index ebae8415dfbb..9a084bdb8926 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -553,3 +553,8 @@
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index b842c83ab497..6b3dd6decdf9 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
#include <asm/hvcall.h>
#include <asm/machdep.h>
#include <asm/smp.h>
+#include <asm/time.h>
#include <asm/pmc.h>
#include <asm/firmware.h>
#include <asm/idle.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 0ff9f038e800..8224381c1dba 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -71,11 +71,11 @@
#include <asm/vdso_datapage.h>
#include <asm/firmware.h>
#include <asm/mce.h>
+#include <asm/systemcfg.h>
/* powerpc clocksource/clockevent code */
#include <linux/clockchips.h>
-#include <linux/timekeeper_internal.h>
static u64 timebase_read(struct clocksource *);
static struct clocksource clocksource_timebase = {
@@ -901,6 +901,38 @@ void secondary_cpu_time_init(void)
register_decrementer_clockevent(smp_processor_id());
}
+/*
+ * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
+ * result.
+ */
+static __init void div128_by_32(u64 dividend_high, u64 dividend_low,
+ unsigned int divisor, struct div_result *dr)
+{
+ unsigned long a, b, c, d;
+ unsigned long w, x, y, z;
+ u64 ra, rb, rc;
+
+ a = dividend_high >> 32;
+ b = dividend_high & 0xffffffff;
+ c = dividend_low >> 32;
+ d = dividend_low & 0xffffffff;
+
+ w = a / divisor;
+ ra = ((u64)(a - (w * divisor)) << 32) + b;
+
+ rb = ((u64)do_div(ra, divisor) << 32) + c;
+ x = ra;
+
+ rc = ((u64)do_div(rb, divisor) << 32) + d;
+ y = rb;
+
+ do_div(rc, divisor);
+ z = rc;
+
+ dr->result_high = ((u64)w << 32) + x;
+ dr->result_low = ((u64)y << 32) + z;
+}
+
/* This function is only called on the boot processor */
void __init time_init(void)
{
@@ -950,7 +982,10 @@ void __init time_init(void)
sys_tz.tz_dsttime = 0;
}
- vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
+ vdso_k_arch_data->tb_ticks_per_sec = tb_ticks_per_sec;
+#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
+ systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
+#endif
/* initialise and enable the large decrementer (if we have one) */
set_decrementer_max();
@@ -971,39 +1006,6 @@ void __init time_init(void)
enable_sched_clock_irqtime();
}
-/*
- * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
- * result.
- */
-void div128_by_32(u64 dividend_high, u64 dividend_low,
- unsigned divisor, struct div_result *dr)
-{
- unsigned long a, b, c, d;
- unsigned long w, x, y, z;
- u64 ra, rb, rc;
-
- a = dividend_high >> 32;
- b = dividend_high & 0xffffffff;
- c = dividend_low >> 32;
- d = dividend_low & 0xffffffff;
-
- w = a / divisor;
- ra = ((u64)(a - (w * divisor)) << 32) + b;
-
- rb = ((u64) do_div(ra, divisor) << 32) + c;
- x = ra;
-
- rc = ((u64) do_div(rb, divisor) << 32) + d;
- y = rb;
-
- do_div(rc, divisor);
- z = rc;
-
- dr->result_high = ((u64)w << 32) + x;
- dr->result_low = ((u64)y << 32) + z;
-
-}
-
/* We don't need to calibrate delay, we use the CPU timebase for that */
void calibrate_delay(void)
{
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
index 125f4ca588b9..d6c3885453bd 100644
--- a/arch/powerpc/kernel/trace/Makefile
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -9,12 +9,15 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_ftrace_64_pg.o = $(CC_FLAGS_FTRACE)
endif
-obj32-$(CONFIG_FUNCTION_TRACER) += ftrace.o ftrace_entry.o
-ifdef CONFIG_MPROFILE_KERNEL
-obj64-$(CONFIG_FUNCTION_TRACER) += ftrace.o ftrace_entry.o
+ifdef CONFIG_FUNCTION_TRACER
+obj32-y += ftrace.o ftrace_entry.o
+ifeq ($(CONFIG_MPROFILE_KERNEL)$(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY),)
+obj64-y += ftrace_64_pg.o ftrace_64_pg_entry.o
else
-obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_pg.o ftrace_64_pg_entry.o
+obj64-y += ftrace.o ftrace_entry.o
+endif
endif
+
obj-$(CONFIG_TRACING) += trace_clock.o
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index d8d6b4fd9a14..6dca92d5a6e8 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -23,7 +23,7 @@
#include <linux/list.h>
#include <asm/cacheflush.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/ftrace.h>
#include <asm/syscall.h>
#include <asm/inst.h>
@@ -37,8 +37,12 @@ unsigned long ftrace_call_adjust(unsigned long addr)
if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
return 0;
- if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
+ if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY) &&
+ !IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
addr += MCOUNT_INSN_SIZE;
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
+ addr += MCOUNT_INSN_SIZE;
+ }
return addr;
}
@@ -82,7 +86,7 @@ static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_
{
int ret = ftrace_validate_inst(ip, old);
- if (!ret)
+ if (!ret && !ppc_inst_equal(old, new))
ret = patch_instruction((u32 *)ip, new);
return ret;
@@ -106,28 +110,66 @@ static unsigned long find_ftrace_tramp(unsigned long ip)
return 0;
}
+#ifdef CONFIG_MODULES
+static unsigned long ftrace_lookup_module_stub(unsigned long ip, unsigned long addr)
+{
+ struct module *mod = NULL;
+
+ scoped_guard(rcu)
+ mod = __module_text_address(ip);
+ if (!mod)
+ pr_err("No module loaded at addr=%lx\n", ip);
+
+ return (addr == (unsigned long)ftrace_caller ? mod->arch.tramp : mod->arch.tramp_regs);
+}
+#else
+static unsigned long ftrace_lookup_module_stub(unsigned long ip, unsigned long addr)
+{
+ return 0;
+}
+#endif
+
+static unsigned long ftrace_get_ool_stub(struct dyn_ftrace *rec)
+{
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ return rec->arch.ool_stub;
+#else
+ BUILD_BUG();
+#endif
+}
+
static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst)
{
- unsigned long ip = rec->ip;
+ unsigned long ip;
unsigned long stub;
- if (is_offset_in_branch_range(addr - ip)) {
+ if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
+ ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE; /* second instruction in stub */
+ else
+ ip = rec->ip;
+
+ if (!is_offset_in_branch_range(addr - ip) && addr != FTRACE_ADDR &&
+ addr != FTRACE_REGS_ADDR) {
+ /* This can only happen with ftrace direct */
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)) {
+ pr_err("0x%lx (0x%lx): Unexpected target address 0x%lx\n",
+ ip, rec->ip, addr);
+ return -EINVAL;
+ }
+ addr = FTRACE_ADDR;
+ }
+
+ if (is_offset_in_branch_range(addr - ip))
/* Within range */
stub = addr;
-#ifdef CONFIG_MODULES
- } else if (rec->arch.mod) {
- /* Module code would be going to one of the module stubs */
- stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp :
- rec->arch.mod->arch.tramp_regs);
-#endif
- } else if (core_kernel_text(ip)) {
+ else if (core_kernel_text(ip))
/* We would be branching to one of our ftrace stubs */
stub = find_ftrace_tramp(ip);
- if (!stub) {
- pr_err("0x%lx: No ftrace stubs reachable\n", ip);
- return -EINVAL;
- }
- } else {
+ else
+ stub = ftrace_lookup_module_stub(ip, addr);
+
+ if (!stub) {
+ pr_err("0x%lx (0x%lx): No ftrace stubs reachable\n", ip, rec->ip);
return -EINVAL;
}
@@ -135,6 +177,145 @@ static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_
return 0;
}
+static int ftrace_init_ool_stub(struct module *mod, struct dyn_ftrace *rec)
+{
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ static int ool_stub_text_index, ool_stub_text_end_index, ool_stub_inittext_index;
+ int ret = 0, ool_stub_count, *ool_stub_index;
+ ppc_inst_t inst;
+ /*
+ * See ftrace_entry.S if changing the below instruction sequence, as we rely on
+ * decoding the last branch instruction here to recover the correct function ip.
+ */
+ struct ftrace_ool_stub *ool_stub, ool_stub_template = {
+ .insn = {
+ PPC_RAW_MFLR(_R0),
+ PPC_RAW_NOP(), /* bl ftrace_caller */
+ PPC_RAW_MTLR(_R0),
+ PPC_RAW_NOP() /* b rec->ip + 4 */
+ }
+ };
+
+ WARN_ON(rec->arch.ool_stub);
+
+ if (is_kernel_inittext(rec->ip)) {
+ ool_stub = ftrace_ool_stub_inittext;
+ ool_stub_index = &ool_stub_inittext_index;
+ ool_stub_count = ftrace_ool_stub_inittext_count;
+ } else if (is_kernel_text(rec->ip)) {
+ /*
+ * ftrace records are sorted, so we first use up the stub area within .text
+ * (ftrace_ool_stub_text) before using the area at the end of .text
+ * (ftrace_ool_stub_text_end), unless the stub is out of range of the record.
+ */
+ if (ool_stub_text_index >= ftrace_ool_stub_text_count ||
+ !is_offset_in_branch_range((long)rec->ip -
+ (long)&ftrace_ool_stub_text[ool_stub_text_index])) {
+ ool_stub = ftrace_ool_stub_text_end;
+ ool_stub_index = &ool_stub_text_end_index;
+ ool_stub_count = ftrace_ool_stub_text_end_count;
+ } else {
+ ool_stub = ftrace_ool_stub_text;
+ ool_stub_index = &ool_stub_text_index;
+ ool_stub_count = ftrace_ool_stub_text_count;
+ }
+#ifdef CONFIG_MODULES
+ } else if (mod) {
+ ool_stub = mod->arch.ool_stubs;
+ ool_stub_index = &mod->arch.ool_stub_index;
+ ool_stub_count = mod->arch.ool_stub_count;
+#endif
+ } else {
+ return -EINVAL;
+ }
+
+ ool_stub += (*ool_stub_index)++;
+
+ if (WARN_ON(*ool_stub_index > ool_stub_count))
+ return -EINVAL;
+
+ if (!is_offset_in_branch_range((long)rec->ip - (long)&ool_stub->insn[0]) ||
+ !is_offset_in_branch_range((long)(rec->ip + MCOUNT_INSN_SIZE) -
+ (long)&ool_stub->insn[3])) {
+ pr_err("%s: ftrace ool stub out of range (%p -> %p).\n",
+ __func__, (void *)rec->ip, (void *)&ool_stub->insn[0]);
+ return -EINVAL;
+ }
+
+ rec->arch.ool_stub = (unsigned long)&ool_stub->insn[0];
+
+ /* bl ftrace_caller */
+ if (!mod)
+ ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &inst);
+#ifdef CONFIG_MODULES
+ else
+ /*
+ * We can't use ftrace_get_call_inst() since that uses
+ * __module_text_address(rec->ip) to look up the module.
+ * But, since the module is not fully formed at this stage,
+ * the lookup fails. We know the target though, so generate
+ * the branch inst directly.
+ */
+ inst = ftrace_create_branch_inst(ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE,
+ mod->arch.tramp, 1);
+#endif
+ ool_stub_template.insn[1] = ppc_inst_val(inst);
+
+ /* b rec->ip + 4 */
+ if (!ret && create_branch(&inst, &ool_stub->insn[3], rec->ip + MCOUNT_INSN_SIZE, 0))
+ return -EINVAL;
+ ool_stub_template.insn[3] = ppc_inst_val(inst);
+
+ if (!ret)
+ ret = patch_instructions((u32 *)ool_stub, (u32 *)&ool_stub_template,
+ sizeof(ool_stub_template), false);
+
+ return ret;
+#else /* !CONFIG_PPC_FTRACE_OUT_OF_LINE */
+ BUILD_BUG();
+#endif
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+static const struct ftrace_ops *powerpc_rec_get_ops(struct dyn_ftrace *rec)
+{
+ const struct ftrace_ops *ops = NULL;
+
+ if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
+ ops = ftrace_find_unique_ops(rec);
+ WARN_ON_ONCE(!ops);
+ }
+
+ if (!ops)
+ ops = &ftrace_list_ops;
+
+ return ops;
+}
+
+static int ftrace_rec_set_ops(struct dyn_ftrace *rec, const struct ftrace_ops *ops)
+{
+ if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
+ return patch_ulong((void *)(ftrace_get_ool_stub(rec) - sizeof(unsigned long)),
+ (unsigned long)ops);
+ else
+ return patch_ulong((void *)(rec->ip - MCOUNT_INSN_SIZE - sizeof(unsigned long)),
+ (unsigned long)ops);
+}
+
+static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
+{
+ return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
+}
+
+static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
+{
+ return ftrace_rec_set_ops(rec, powerpc_rec_get_ops(rec));
+}
+#else
+static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
+static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
{
@@ -147,18 +328,33 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
ppc_inst_t old, new;
- int ret;
+ unsigned long ip = rec->ip;
+ int ret = 0;
/* This can only ever be called during module load */
- if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(rec->ip)))
+ if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(ip)))
return -EINVAL;
old = ppc_inst(PPC_RAW_NOP());
- ret = ftrace_get_call_inst(rec, addr, &new);
+ if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
+ ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE; /* second instruction in stub */
+ ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &old);
+ }
+
+ ret |= ftrace_get_call_inst(rec, addr, &new);
+
+ if (!ret)
+ ret = ftrace_modify_code(ip, old, new);
+
+ ret = ftrace_rec_update_ops(rec);
if (ret)
return ret;
- return ftrace_modify_code(rec->ip, old, new);
+ if (!ret && IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
+ ret = ftrace_modify_code(rec->ip, ppc_inst(PPC_RAW_NOP()),
+ ppc_inst(PPC_RAW_BRANCH((long)ftrace_get_ool_stub(rec) - (long)rec->ip)));
+
+ return ret;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
@@ -191,6 +387,13 @@ void ftrace_replace_code(int enable)
new_addr = ftrace_get_addr_new(rec);
update = ftrace_update_record(rec, enable);
+ if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) && update != FTRACE_UPDATE_IGNORE) {
+ ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE;
+ ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &nop_inst);
+ if (ret)
+ goto out;
+ }
+
switch (update) {
case FTRACE_UPDATE_IGNORE:
default:
@@ -198,16 +401,19 @@ void ftrace_replace_code(int enable)
case FTRACE_UPDATE_MODIFY_CALL:
ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst);
ret |= ftrace_get_call_inst(rec, addr, &call_inst);
+ ret |= ftrace_rec_update_ops(rec);
old = call_inst;
new = new_call_inst;
break;
case FTRACE_UPDATE_MAKE_NOP:
ret = ftrace_get_call_inst(rec, addr, &call_inst);
+ ret |= ftrace_rec_set_nop_ops(rec);
old = call_inst;
new = nop_inst;
break;
case FTRACE_UPDATE_MAKE_CALL:
ret = ftrace_get_call_inst(rec, new_addr, &call_inst);
+ ret |= ftrace_rec_update_ops(rec);
old = nop_inst;
new = call_inst;
break;
@@ -215,6 +421,24 @@ void ftrace_replace_code(int enable)
if (!ret)
ret = ftrace_modify_code(ip, old, new);
+
+ if (!ret && IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) &&
+ (update == FTRACE_UPDATE_MAKE_NOP || update == FTRACE_UPDATE_MAKE_CALL)) {
+ /* Update the actual ftrace location */
+ call_inst = ppc_inst(PPC_RAW_BRANCH((long)ftrace_get_ool_stub(rec) -
+ (long)rec->ip));
+ nop_inst = ppc_inst(PPC_RAW_NOP());
+ ip = rec->ip;
+
+ if (update == FTRACE_UPDATE_MAKE_NOP)
+ ret = ftrace_modify_code(ip, call_inst, nop_inst);
+ else
+ ret = ftrace_modify_code(ip, nop_inst, call_inst);
+
+ if (ret)
+ goto out;
+ }
+
if (ret)
goto out;
}
@@ -234,20 +458,27 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
/* Verify instructions surrounding the ftrace location */
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
/* Expect nops */
- ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP()));
+ if (!IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
+ ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP()));
if (!ret)
ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP()));
} else if (IS_ENABLED(CONFIG_PPC32)) {
/* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */
ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
- if (!ret)
- ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)));
+ if (ret)
+ return ret;
+ ret = ftrace_modify_code(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)),
+ ppc_inst(PPC_RAW_NOP()));
} else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
/* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */
ret = ftrace_read_inst(ip - 4, &old);
if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) {
+ /* Gcc v5.x emit the additional 'std' instruction, gcc v6.x don't */
ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
- ret |= ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)));
+ if (ret)
+ return ret;
+ ret = ftrace_modify_code(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)),
+ ppc_inst(PPC_RAW_NOP()));
}
} else {
return -EINVAL;
@@ -256,13 +487,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
if (ret)
return ret;
- if (!core_kernel_text(ip)) {
- if (!mod) {
- pr_err("0x%lx: No module provided for non-kernel address\n", ip);
- return -EFAULT;
- }
- rec->arch.mod = mod;
- }
+ /* Set up out-of-line stub */
+ if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
+ return ftrace_init_ool_stub(mod, rec);
/* Nop-out the ftrace location */
new = ppc_inst(PPC_RAW_NOP());
@@ -302,6 +529,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ppc_inst_t old, new;
int ret;
+ /*
+ * When using CALL_OPS, the function to call is associated with the
+ * call site, and we don't have a global function pointer to update.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
+ return 0;
+
old = ppc_inst_read((u32 *)&ftrace_call);
new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
ret = ftrace_modify_code(ip, old, new);
@@ -421,8 +655,7 @@ int __init ftrace_dyn_arch_init(void)
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- unsigned long sp = fregs->regs.gpr[1];
- int bit;
+ unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
if (unlikely(ftrace_graph_is_dead()))
goto out;
@@ -430,15 +663,10 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
- bit = ftrace_test_recursion_trylock(ip, parent_ip);
- if (bit < 0)
- goto out;
-
- if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
+ if (!function_graph_enter_regs(parent_ip, ip, 0, (unsigned long *)sp, fregs))
parent_ip = ppc_function_entry(return_to_handler);
- ftrace_test_recursion_unlock(bit);
out:
- fregs->regs.link = parent_ip;
+ arch_ftrace_regs(fregs)->regs.link = parent_ip;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.c b/arch/powerpc/kernel/trace/ftrace_64_pg.c
index 12fab1803bcf..5c6e545d1708 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_pg.c
+++ b/arch/powerpc/kernel/trace/ftrace_64_pg.c
@@ -23,7 +23,7 @@
#include <linux/list.h>
#include <asm/cacheflush.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/ftrace.h>
#include <asm/syscall.h>
#include <asm/inst.h>
@@ -116,6 +116,18 @@ static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
}
#ifdef CONFIG_MODULES
+static struct module *ftrace_lookup_module(struct dyn_ftrace *rec)
+{
+ struct module *mod;
+
+ scoped_guard(rcu)
+ mod = __module_text_address(rec->ip);
+ if (!mod)
+ pr_err("No module loaded at addr=%lx\n", rec->ip);
+
+ return mod;
+}
+
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
@@ -124,6 +136,12 @@ __ftrace_make_nop(struct module *mod,
unsigned long ip = rec->ip;
ppc_inst_t op, pop;
+ if (!mod) {
+ mod = ftrace_lookup_module(rec);
+ if (!mod)
+ return -EINVAL;
+ }
+
/* read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
@@ -366,27 +384,6 @@ int ftrace_make_nop(struct module *mod,
return -EINVAL;
}
- /*
- * Out of range jumps are called from modules.
- * We should either already have a pointer to the module
- * or it has been passed in.
- */
- if (!rec->arch.mod) {
- if (!mod) {
- pr_err("No module loaded addr=%lx\n", addr);
- return -EFAULT;
- }
- rec->arch.mod = mod;
- } else if (mod) {
- if (mod != rec->arch.mod) {
- pr_err("Record mod %p not equal to passed in mod %p\n",
- rec->arch.mod, mod);
- return -EINVAL;
- }
- /* nothing to do if mod == rec->arch.mod */
- } else
- mod = rec->arch.mod;
-
return __ftrace_make_nop(mod, rec, addr);
}
@@ -411,7 +408,10 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
ppc_inst_t op[2];
void *ip = (void *)rec->ip;
unsigned long entry, ptr, tramp;
- struct module *mod = rec->arch.mod;
+ struct module *mod = ftrace_lookup_module(rec);
+
+ if (!mod)
+ return -EINVAL;
/* read where this goes */
if (copy_inst_from_kernel_nofault(op, ip))
@@ -533,16 +533,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EINVAL;
}
- /*
- * Out of range jumps are called from modules.
- * Being that we are converting from nop, it had better
- * already have a module defined.
- */
- if (!rec->arch.mod) {
- pr_err("No module loaded\n");
- return -EINVAL;
- }
-
return __ftrace_make_call(rec, addr);
}
@@ -555,7 +545,10 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
ppc_inst_t op;
unsigned long ip = rec->ip;
unsigned long entry, ptr, tramp;
- struct module *mod = rec->arch.mod;
+ struct module *mod = ftrace_lookup_module(rec);
+
+ if (!mod)
+ return -EINVAL;
/* If we never set up ftrace trampolines, then bail */
if (!mod->arch.tramp || !mod->arch.tramp_regs) {
@@ -668,14 +661,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EINVAL;
}
- /*
- * Out of range jumps are called from modules.
- */
- if (!rec->arch.mod) {
- pr_err("No module loaded\n");
- return -EINVAL;
- }
-
return __ftrace_modify_call(rec, old_addr, addr);
}
#endif
@@ -800,10 +785,10 @@ int ftrace_disable_ftrace_graph_caller(void)
* in current thread info. Return the address we want to divert to.
*/
static unsigned long
-__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
+__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp,
+ struct ftrace_regs *fregs)
{
unsigned long return_hooker;
- int bit;
if (unlikely(ftrace_graph_is_dead()))
goto out;
@@ -811,16 +796,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
- bit = ftrace_test_recursion_trylock(ip, parent);
- if (bit < 0)
- goto out;
-
return_hooker = ppc_function_entry(return_to_handler);
- if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
+ if (!function_graph_enter_regs(parent, ip, 0, (unsigned long *)sp, fregs))
parent = return_hooker;
- ftrace_test_recursion_unlock(bit);
out:
return parent;
}
@@ -829,13 +809,14 @@ out:
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
+ arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip,
+ arch_ftrace_regs(fregs)->regs.gpr[1], fregs);
}
#else
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
unsigned long sp)
{
- return __prepare_ftrace_return(parent, ip, sp);
+ return __prepare_ftrace_return(parent, ip, sp, NULL);
}
#endif
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/trace/ftrace_entry.S b/arch/powerpc/kernel/trace/ftrace_entry.S
index 76dbe9fd2c0f..3565c67fc638 100644
--- a/arch/powerpc/kernel/trace/ftrace_entry.S
+++ b/arch/powerpc/kernel/trace/ftrace_entry.S
@@ -39,13 +39,37 @@
/* Create our stack frame + pt_regs */
PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
+ .if \allregs == 1
+ SAVE_GPRS(11, 12, r1)
+ .endif
+
+ /* Get the _mcount() call site out of LR */
+ mflr r11
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ /* Load the ftrace_op */
+ PPC_LL r12, -(MCOUNT_INSN_SIZE*2 + SZL)(r11)
+
+ /* Load direct_call from the ftrace_op */
+ PPC_LL r12, FTRACE_OPS_DIRECT_CALL(r12)
+ PPC_LCMPI r12, 0
+ .if \allregs == 1
+ bne .Lftrace_direct_call_regs
+ .else
+ bne .Lftrace_direct_call
+ .endif
+#endif
+
+ /* Save the previous LR in pt_regs->link */
+ PPC_STL r0, _LINK(r1)
+ /* Also save it in A's stack frame */
+ PPC_STL r0, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE+LRSAVE(r1)
+
/* Save all gprs to pt_regs */
SAVE_GPR(0, r1)
SAVE_GPRS(3, 10, r1)
#ifdef CONFIG_PPC64
- /* Save the original return address in A's stack frame */
- std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
/* Ok to continue? */
lbz r3, PACA_FTRACE_ENABLED(r13)
cmpdi r3, 0
@@ -54,9 +78,9 @@
.if \allregs == 1
SAVE_GPR(2, r1)
- SAVE_GPRS(11, 31, r1)
+ SAVE_GPRS(13, 31, r1)
.else
-#ifdef CONFIG_LIVEPATCH_64
+#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE)
SAVE_GPR(14, r1)
#endif
.endif
@@ -67,80 +91,143 @@
.if \allregs == 1
/* Load special regs for save below */
+ mfcr r7
mfmsr r8
mfctr r9
mfxer r10
- mfcr r11
.else
/* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
li r8, 0
.endif
- /* Get the _mcount() call site out of LR */
- mflr r7
- /* Save it as pt_regs->nip */
- PPC_STL r7, _NIP(r1)
- /* Also save it in B's stackframe header for proper unwind */
- PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
- /* Save the read LR in pt_regs->link */
- PPC_STL r0, _LINK(r1)
-
#ifdef CONFIG_PPC64
/* Save callee's TOC in the ABI compliant location */
std r2, STK_GOT(r1)
LOAD_PACA_TOC() /* get kernel TOC in r2 */
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ /* r11 points to the instruction following the call to ftrace */
+ PPC_LL r5, -(MCOUNT_INSN_SIZE*2 + SZL)(r11)
+ PPC_LL r12, FTRACE_OPS_FUNC(r5)
+ mtctr r12
+#else /* !CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS */
+#ifdef CONFIG_PPC64
LOAD_REG_ADDR(r3, function_trace_op)
ld r5,0(r3)
#else
lis r3,function_trace_op@ha
lwz r5,function_trace_op@l(r3)
#endif
-
-#ifdef CONFIG_LIVEPATCH_64
- mr r14, r7 /* remember old NIP */
#endif
- /* Calculate ip from nip-4 into r3 for call below */
- subi r3, r7, MCOUNT_INSN_SIZE
-
- /* Put the original return address in r4 as parent_ip */
- mr r4, r0
-
/* Save special regs */
PPC_STL r8, _MSR(r1)
.if \allregs == 1
+ PPC_STL r7, _CCR(r1)
PPC_STL r9, _CTR(r1)
PPC_STL r10, _XER(r1)
- PPC_STL r11, _CCR(r1)
.endif
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ /* Clear orig_gpr3 to later detect ftrace_direct call */
+ li r7, 0
+ PPC_STL r7, ORIG_GPR3(r1)
+#endif
+
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ /* Save our real return address in nvr for return */
+ .if \allregs == 0
+ SAVE_GPR(15, r1)
+ .endif
+ mr r15, r11
+ /*
+ * We want the ftrace location in the function, but our lr (in r11)
+ * points at the 'mtlr r0' instruction in the out of line stub. To
+ * recover the ftrace location, we read the branch instruction in the
+ * stub, and adjust our lr by the branch offset.
+ *
+ * See ftrace_init_ool_stub() for the profile sequence.
+ */
+ lwz r8, MCOUNT_INSN_SIZE(r11)
+ slwi r8, r8, 6
+ srawi r8, r8, 6
+ add r3, r11, r8
+ /*
+ * Override our nip to point past the branch in the original function.
+ * This allows reliable stack trace and the ftrace stack tracer to work as-is.
+ */
+ addi r11, r3, MCOUNT_INSN_SIZE
+#else
+ /* Calculate ip from nip-4 into r3 for call below */
+ subi r3, r11, MCOUNT_INSN_SIZE
+#endif
+
+ /* Save NIP as pt_regs->nip */
+ PPC_STL r11, _NIP(r1)
+ /* Also save it in B's stackframe header for proper unwind */
+ PPC_STL r11, LRSAVE+SWITCH_FRAME_SIZE(r1)
+#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE)
+ mr r14, r11 /* remember old NIP */
+#endif
+
+ /* Put the original return address in r4 as parent_ip */
+ mr r4, r0
+
/* Load &pt_regs in r6 for call below */
addi r6, r1, STACK_INT_FRAME_REGS
.endm
.macro ftrace_regs_exit allregs
- /* Load ctr with the possibly modified NIP */
- PPC_LL r3, _NIP(r1)
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ /* Check orig_gpr3 to detect ftrace_direct call */
+ PPC_LL r3, ORIG_GPR3(r1)
+ PPC_LCMPI cr1, r3, 0
mtctr r3
+#endif
+ /* Restore possibly modified LR */
+ PPC_LL r0, _LINK(r1)
+
+#ifndef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ /* Load ctr with the possibly modified NIP */
+ PPC_LL r3, _NIP(r1)
#ifdef CONFIG_LIVEPATCH_64
cmpd r14, r3 /* has NIP been altered? */
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ beq cr1,2f
+ mtlr r3
+ b 3f
+#endif
+2: mtctr r3
+ mtlr r0
+3:
+
+#else /* !CONFIG_PPC_FTRACE_OUT_OF_LINE */
+ /* Load LR with the possibly modified NIP */
+ PPC_LL r3, _NIP(r1)
+ cmpd r14, r3 /* has NIP been altered? */
+ bne- 1f
+
+ mr r3, r15
+1: mtlr r3
+ .if \allregs == 0
+ REST_GPR(15, r1)
+ .endif
+#endif
+
/* Restore gprs */
.if \allregs == 1
REST_GPRS(2, 31, r1)
.else
REST_GPRS(3, 10, r1)
-#ifdef CONFIG_LIVEPATCH_64
+#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE)
REST_GPR(14, r1)
#endif
.endif
- /* Restore possibly modified LR */
- PPC_LL r0, _LINK(r1)
- mtlr r0
-
#ifdef CONFIG_PPC64
/* Restore callee's TOC */
ld r2, STK_GOT(r1)
@@ -153,23 +240,46 @@
/* Based on the cmpd above, if the NIP was altered handle livepatch */
bne- livepatch_handler
#endif
- bctr /* jump after _mcount site */
+
+ /* jump after _mcount site */
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ bnectr cr1
+#endif
+ /*
+ * Return with blr to keep the link stack balanced. The function profiling sequence
+ * uses 'mtlr r0' to restore LR.
+ */
+ blr
+#else
+ bctr
+#endif
.endm
-_GLOBAL(ftrace_regs_caller)
- ftrace_regs_entry 1
- /* ftrace_call(r3, r4, r5, r6) */
+.macro ftrace_regs_func allregs
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ bctrl
+#else
+ .if \allregs == 1
.globl ftrace_regs_call
ftrace_regs_call:
+ .else
+.globl ftrace_call
+ftrace_call:
+ .endif
+ /* ftrace_call(r3, r4, r5, r6) */
bl ftrace_stub
+#endif
+.endm
+
+_GLOBAL(ftrace_regs_caller)
+ ftrace_regs_entry 1
+ ftrace_regs_func 1
ftrace_regs_exit 1
_GLOBAL(ftrace_caller)
ftrace_regs_entry 0
- /* ftrace_call(r3, r4, r5, r6) */
-.globl ftrace_call
-ftrace_call:
- bl ftrace_stub
+ ftrace_regs_func 0
ftrace_regs_exit 0
_GLOBAL(ftrace_stub)
@@ -177,6 +287,11 @@ _GLOBAL(ftrace_stub)
#ifdef CONFIG_PPC64
ftrace_no_trace:
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ REST_GPR(3, r1)
+ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ blr
+#else
mflr r3
mtctr r3
REST_GPR(3, r1)
@@ -184,6 +299,22 @@ ftrace_no_trace:
mtlr r0
bctr
#endif
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+.Lftrace_direct_call_regs:
+ mtctr r12
+ REST_GPRS(11, 12, r1)
+ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ bctr
+.Lftrace_direct_call:
+ mtctr r12
+ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ bctr
+SYM_FUNC_START(ftrace_stub_direct_tramp)
+ blr
+SYM_FUNC_END(ftrace_stub_direct_tramp)
+#endif
#ifdef CONFIG_LIVEPATCH_64
/*
@@ -194,11 +325,17 @@ ftrace_no_trace:
* We get here when a function A, calls another function B, but B has
* been live patched with a new function C.
*
- * On entry:
- * - we have no stack frame and can not allocate one
+ * On entry, we have no stack frame and can not allocate one.
+ *
+ * With PPC_FTRACE_OUT_OF_LINE=n, on entry:
* - LR points back to the original caller (in A)
* - CTR holds the new NIP in C
* - r0, r11 & r12 are free
+ *
+ * With PPC_FTRACE_OUT_OF_LINE=y, on entry:
+ * - r0 points back to the original caller (in A)
+ * - LR holds the new NIP in C
+ * - r11 & r12 are free
*/
livepatch_handler:
ld r12, PACA_THREAD_INFO(r13)
@@ -208,18 +345,23 @@ livepatch_handler:
addi r11, r11, 24
std r11, TI_livepatch_sp(r12)
- /* Save toc & real LR on livepatch stack */
- std r2, -24(r11)
- mflr r12
- std r12, -16(r11)
-
/* Store stack end marker */
lis r12, STACK_END_MAGIC@h
ori r12, r12, STACK_END_MAGIC@l
std r12, -8(r11)
- /* Put ctr in r12 for global entry and branch there */
+ /* Save toc & real LR on livepatch stack */
+ std r2, -24(r11)
+#ifndef CONFIG_PPC_FTRACE_OUT_OF_LINE
+ mflr r12
+ std r12, -16(r11)
mfctr r12
+#else
+ std r0, -16(r11)
+ mflr r12
+ /* Put ctr in r12 for global entry and branch there */
+ mtctr r12
+#endif
bctrl
/*
@@ -308,6 +450,14 @@ _GLOBAL(return_to_handler)
blr
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
+SYM_DATA(ftrace_ool_stub_text_count, .long CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE)
+
+SYM_START(ftrace_ool_stub_text, SYM_L_GLOBAL, .balign SZL)
+ .space CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE * FTRACE_OOL_STUB_SIZE
+SYM_CODE_END(ftrace_ool_stub_text)
+#endif
+
.pushsection ".tramp.ftrace.text","aw",@progbits;
.globl ftrace_tramp_text
ftrace_tramp_text:
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index edf5cabe5dfd..cb8e9357383e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -263,10 +263,9 @@ static int __die(const char *str, struct pt_regs *regs, long err)
{
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
- printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
+ printk("%s PAGE_SIZE=%luK%s %s%s%s%s %s\n",
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
PAGE_SIZE / 1024, get_mmu_str(),
- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 4b99208f5adc..862b22b2b616 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -36,12 +36,6 @@ void __init udbg_early_init(void)
#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL)
/* RTAS panel debug */
udbg_init_rtas_panel();
-#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE)
- /* RTAS console debug */
- udbg_init_rtas_console();
-#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
- /* Maple real mode debug */
- udbg_init_maple_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
udbg_init_pas_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX)
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 313802aff571..dfe8ed2192e8 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -205,29 +205,6 @@ void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
udbg_use_uart();
}
-#ifdef CONFIG_PPC_MAPLE
-
-#define UDBG_UART_MAPLE_ADDR ((void __iomem *)0xf40003f8)
-
-static u8 udbg_uart_in_maple(unsigned int reg)
-{
- return real_readb(UDBG_UART_MAPLE_ADDR + reg);
-}
-
-static void udbg_uart_out_maple(unsigned int reg, u8 val)
-{
- real_writeb(val, UDBG_UART_MAPLE_ADDR + reg);
-}
-
-void __init udbg_init_maple_realmode(void)
-{
- udbg_uart_in = udbg_uart_in_maple;
- udbg_uart_out = udbg_uart_out_maple;
- udbg_use_uart();
-}
-
-#endif /* CONFIG_PPC_MAPLE */
-
#ifdef CONFIG_PPC_PASEMI
#define UDBG_UART_PAS_ADDR ((void __iomem *)0xfcff03f8UL)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index ee4b9d676cff..219d67bcf747 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -16,9 +16,8 @@
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
-#include <linux/memblock.h>
#include <linux/syscalls.h>
-#include <linux/time_namespace.h>
+#include <linux/vdso_datastore.h>
#include <vdso/datapage.h>
#include <asm/syscall.h>
@@ -33,6 +32,8 @@
#include <asm/vdso_datapage.h>
#include <asm/setup.h>
+static_assert(__VDSO_PAGES == VDSO_NR_PAGES);
+
/* The alignment of the vDSO */
#define VDSO_ALIGNMENT (1 << 16)
@@ -41,23 +42,6 @@ extern char vdso64_start, vdso64_end;
long sys_ni_syscall(void);
-/*
- * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
- * Once the early boot kernel code no longer needs to muck around
- * with it, it will become dynamically allocated
- */
-static union {
- struct vdso_arch_data data;
- u8 page[PAGE_SIZE];
-} vdso_data_store __page_aligned_data;
-struct vdso_arch_data *vdso_data = &vdso_data_store.data;
-
-enum vvar_pages {
- VVAR_DATA_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_NR_PAGES,
-};
-
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
unsigned long text_size)
{
@@ -96,14 +80,6 @@ static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struc
mm->context.vdso = NULL;
}
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf);
-
-static struct vm_special_mapping vvar_spec __ro_after_init = {
- .name = "[vvar]",
- .fault = vvar_fault,
-};
-
static struct vm_special_mapping vdso32_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso32_mremap,
@@ -116,70 +92,6 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
.close = vdso_close,
};
-#ifdef CONFIG_TIME_NS
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return ((struct vdso_arch_data *)vvar_page)->data;
-}
-
-/*
- * The vvar mapping contains data for a specific time namespace, so when a task
- * changes namespace we must unmap its vvar data for the old namespace.
- * Subsequent faults will map in data for the new namespace.
- *
- * For more details see timens_setup_vdso_data().
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- VMA_ITERATOR(vmi, mm, 0);
- struct vm_area_struct *vma;
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, &vvar_spec))
- zap_vma_pages(vma);
- }
- mmap_read_unlock(mm);
-
- return 0;
-}
-#endif
-
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *timens_page = find_timens_vvar_page(vma);
- unsigned long pfn;
-
- switch (vmf->pgoff) {
- case VVAR_DATA_PAGE_OFFSET:
- if (timens_page)
- pfn = page_to_pfn(timens_page);
- else
- pfn = virt_to_pfn(vdso_data);
- break;
-#ifdef CONFIG_TIME_NS
- case VVAR_TIMENS_PAGE_OFFSET:
- /*
- * If a task belongs to a time namespace then a namespace
- * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
- * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
- * offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (!timens_page)
- return VM_FAULT_SIGBUS;
- pfn = virt_to_pfn(vdso_data);
- break;
-#endif /* CONFIG_TIME_NS */
- default:
- return VM_FAULT_SIGBUS;
- }
-
- return vmf_insert_pfn(vma, vmf->address, pfn);
-}
-
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
@@ -188,7 +100,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
{
unsigned long vdso_size, vdso_base, mappings_size;
struct vm_special_mapping *vdso_spec;
- unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
+ unsigned long vvar_size = VDSO_NR_PAGES * PAGE_SIZE;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -214,9 +126,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
/* Add required alignment. */
vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
- vma = _install_special_mapping(mm, vdso_base, vvar_size,
- VM_READ | VM_MAYREAD | VM_IO |
- VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
+ vma = vdso_install_vvar_mapping(mm, vdso_base);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -296,10 +206,10 @@ static void __init vdso_setup_syscall_map(void)
for (i = 0; i < NR_syscalls; i++) {
if (sys_call_table[i] != (void *)&sys_ni_syscall)
- vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
+ vdso_k_arch_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
if (IS_ENABLED(CONFIG_COMPAT) &&
compat_sys_call_table[i] != (void *)&sys_ni_syscall)
- vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
+ vdso_k_arch_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
}
}
@@ -349,29 +259,10 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
static int __init vdso_init(void)
{
#ifdef CONFIG_PPC64
- /*
- * Fill up the "systemcfg" stuff for backward compatibility
- */
- strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
- vdso_data->version.major = SYSTEMCFG_MAJOR;
- vdso_data->version.minor = SYSTEMCFG_MINOR;
- vdso_data->processor = mfspr(SPRN_PVR);
- /*
- * Fake the old platform number for pSeries and add
- * in LPAR bit if necessary
- */
- vdso_data->platform = 0x100;
- if (firmware_has_feature(FW_FEATURE_LPAR))
- vdso_data->platform |= 1;
- vdso_data->physicalMemorySize = memblock_phys_mem_size();
- vdso_data->dcache_size = ppc64_caches.l1d.size;
- vdso_data->dcache_line_size = ppc64_caches.l1d.line_size;
- vdso_data->icache_size = ppc64_caches.l1i.size;
- vdso_data->icache_line_size = ppc64_caches.l1i.line_size;
- vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
- vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
- vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
- vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
+ vdso_k_arch_data->dcache_block_size = ppc64_caches.l1d.block_size;
+ vdso_k_arch_data->icache_block_size = ppc64_caches.l1i.block_size;
+ vdso_k_arch_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
+ vdso_k_arch_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
#endif /* CONFIG_PPC64 */
vdso_setup_syscall_map();
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index 31ca5a547004..8834dfe9d727 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -3,7 +3,7 @@
# List of files in the vdso, has to be asm only for now
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
obj-vdso32 = sigtramp32-32.o gettimeofday-32.o datapage-32.o cacheflush-32.o note-32.o getcpu-32.o
obj-vdso64 = sigtramp64-64.o gettimeofday-64.o datapage-64.o cacheflush-64.o note-64.o getcpu-64.o
@@ -50,14 +50,18 @@ ldflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld)
ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
# Filter flags that clang will warn are unused for linking
-ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CFLAGS))
+ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
CC32FLAGS := -m32
-CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc
- # This flag is supported by clang for 64-bit but not 32-bit so it will cause
- # an unused command line flag warning for this file.
+CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc -mpcrel
ifdef CONFIG_CC_IS_CLANG
+# This flag is supported by clang for 64-bit but not 32-bit so it will cause
+# an unused command line flag warning for this file.
CC32FLAGSREMOVE += -fno-stack-clash-protection
+# -mstack-protector-guard values from the 64-bit build are not valid for the
+# 32-bit one. clang validates the values passed to these arguments during
+# parsing, even when -fno-stack-protector is passed afterwards.
+CC32FLAGSREMOVE += -mstack-protector-guard%
endif
LD32FLAGS := -Wl,-soname=linux-vdso32.so.1
AS32FLAGS := -D__VDSO32__
diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S
index 3b2479bd2f9a..488d3ade11e6 100644
--- a/arch/powerpc/kernel/vdso/cacheflush.S
+++ b/arch/powerpc/kernel/vdso/cacheflush.S
@@ -30,7 +30,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
#ifdef CONFIG_PPC64
mflr r12
.cfi_register lr,r12
- get_realdatapage r10, r11
+ get_datapage r10 vdso_u_arch_data
mtlr r12
.cfi_restore lr
#endif
diff --git a/arch/powerpc/kernel/vdso/datapage.S b/arch/powerpc/kernel/vdso/datapage.S
index 2b19b6201a33..d23b2e8e2a34 100644
--- a/arch/powerpc/kernel/vdso/datapage.S
+++ b/arch/powerpc/kernel/vdso/datapage.S
@@ -28,7 +28,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
mflr r12
.cfi_register lr,r12
mr. r4,r3
- get_realdatapage r3, r11
+ get_datapage r3 vdso_u_arch_data
mtlr r12
#ifdef __powerpc64__
addi r3,r3,CFG_SYSCALL_MAP64
@@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
.cfi_startproc
mflr r12
.cfi_register lr,r12
- get_realdatapage r3, r11
+ get_datapage r3 vdso_u_arch_data
#ifndef __powerpc64__
lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
#endif
diff --git a/arch/powerpc/kernel/vdso/getrandom.S b/arch/powerpc/kernel/vdso/getrandom.S
index f3bbf931931c..a80d9fb436f7 100644
--- a/arch/powerpc/kernel/vdso/getrandom.S
+++ b/arch/powerpc/kernel/vdso/getrandom.S
@@ -31,8 +31,6 @@
PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
.cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT
#endif
- get_realdatapage r8, r11
- addi r8, r8, VDSO_RNG_DATA_OFFSET
bl CFUNC(DOTSYM(\funct))
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
#ifdef __powerpc64__
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S
index 5540d7021fa2..79c967212444 100644
--- a/arch/powerpc/kernel/vdso/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso/gettimeofday.S
@@ -32,11 +32,10 @@
PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
.cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT
#endif
- get_datapage r5
.ifeq \call_time
- addi r5, r5, VDSO_DATA_OFFSET
+ get_datapage r5 vdso_u_time_data
.else
- addi r4, r5, VDSO_DATA_OFFSET
+ get_datapage r4 vdso_u_time_data
.endif
bl CFUNC(DOTSYM(\funct))
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 7b41d5d256e8..72a1012b8a20 100644
--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -6,6 +6,7 @@
#include <asm/vdso.h>
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
+#include <vdso/datapage.h>
#ifdef __LITTLE_ENDIAN__
OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
@@ -16,7 +17,8 @@ OUTPUT_ARCH(powerpc:common)
SECTIONS
{
- PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE);
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
index 9481e4b892ed..32102a05eaa7 100644
--- a/arch/powerpc/kernel/vdso/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
@@ -6,6 +6,7 @@
#include <asm/vdso.h>
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
+#include <vdso/datapage.h>
#ifdef __LITTLE_ENDIAN__
OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
@@ -16,7 +17,8 @@ OUTPUT_ARCH(powerpc:common64)
SECTIONS
{
- PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE);
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/powerpc/kernel/vdso/vgetrandom.c b/arch/powerpc/kernel/vdso/vgetrandom.c
index 5f855d45fb7b..cc79b960a541 100644
--- a/arch/powerpc/kernel/vdso/vgetrandom.c
+++ b/arch/powerpc/kernel/vdso/vgetrandom.c
@@ -8,7 +8,7 @@
#include <linux/types.h>
ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state,
- size_t opaque_len, const struct vdso_rng_data *vd)
+ size_t opaque_len)
{
- return __cvdso_getrandom_data(vd, buffer, len, flags, opaque_state, opaque_len);
+ return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len);
}
diff --git a/arch/powerpc/kernel/vdso/vgettimeofday.c b/arch/powerpc/kernel/vdso/vgettimeofday.c
index 55a287c9a736..6f5167d81af5 100644
--- a/arch/powerpc/kernel/vdso/vgettimeofday.c
+++ b/arch/powerpc/kernel/vdso/vgettimeofday.c
@@ -7,43 +7,43 @@
#ifdef __powerpc64__
int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_gettime_data(vd, clock, ts);
}
int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_getres_data(vd, clock_id, res);
}
#else
int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_gettime32_data(vd, clock, ts);
}
int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_gettime_data(vd, clock, ts);
}
int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_getres_time32_data(vd, clock_id, res);
}
#endif
int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_gettimeofday_data(vd, tv, tz);
}
-__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_data *vd)
+__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_time_data *vd)
{
return __cvdso_time_data(vd, time);
}
diff --git a/arch/powerpc/kernel/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32_wrapper.S
index 10f92f265d51..20bca3548b44 100644
--- a/arch/powerpc/kernel/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32_wrapper.S
@@ -2,7 +2,7 @@
#include <linux/linkage.h>
#include <asm/page.h>
- __PAGE_ALIGNED_DATA
+ .section ".data..ro_after_init", "aw"
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
diff --git a/arch/powerpc/kernel/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64_wrapper.S
index 839d1a61411d..1912936fa227 100644
--- a/arch/powerpc/kernel/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64_wrapper.S
@@ -2,7 +2,7 @@
#include <linux/linkage.h>
#include <asm/page.h>
- __PAGE_ALIGNED_DATA
+ .section ".data..ro_after_init", "aw"
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 7ab4e2fb28b1..de6ee7d35cff 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -1,10 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_PPC64
-#define PROVIDE32(x) PROVIDE(__unused__##x)
-#else
-#define PROVIDE32(x) PROVIDE(x)
-#endif
-
#define BSS_FIRST_SECTIONS *(.bss.prominit)
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN 0
@@ -127,7 +121,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_etext = .;
- PROVIDE32 (etext = .);
/* Read-only data */
RO_DATA(PAGE_SIZE)
@@ -265,14 +258,13 @@ SECTIONS
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
_sinittext = .;
INIT_TEXT
-
+ *(.tramp.ftrace.init);
/*
*.init.text might be RO so we must ensure this section ends on
* a page boundary.
*/
. = ALIGN(PAGE_SIZE);
_einittext = .;
- *(.tramp.ftrace.init);
} :text
/* .exit.text is discarded at runtime, not link time,
@@ -395,7 +387,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_edata = .;
- PROVIDE32 (edata = .);
/*
* And finally the bss
@@ -405,7 +396,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_end = . ;
- PROVIDE32 (end = .);
DWARF_DEBUG
ELF_DETAILS
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 8c464a5d8246..2429cb1c7baa 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -495,8 +495,7 @@ static void start_watchdog(void *arg)
*this_cpu_ptr(&wd_timer_tb) = get_tb();
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
+ hrtimer_setup(hrtimer, watchdog_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
HRTIMER_MODE_REL_PINNED);
}